• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
28 
29 #include <wtf/Platform.h>
30 
31 #if ENABLE(ASSEMBLER)
32 
33 #include "X86Assembler.h"
34 
35 namespace JSC {
36 
37 class MacroAssembler {
38 protected:
39     X86Assembler m_assembler;
40 
41 #if PLATFORM(X86_64)
42     static const X86::RegisterID scratchRegister = X86::r11;
43 #endif
44 
45 public:
46     typedef X86::RegisterID RegisterID;
47 
48     // Note: do not rely on values in this enum, these will change (to 0..3).
49     enum Scale {
50         TimesOne = 1,
51         TimesTwo = 2,
52         TimesFour = 4,
53         TimesEight = 8,
54 #if PLATFORM(X86)
55         ScalePtr = TimesFour
56 #endif
57 #if PLATFORM(X86_64)
58         ScalePtr = TimesEight
59 #endif
60     };
61 
MacroAssembler()62     MacroAssembler()
63     {
64     }
65 
size()66     size_t size() { return m_assembler.size(); }
copyCode(ExecutablePool * allocator)67     void* copyCode(ExecutablePool* allocator)
68     {
69         return m_assembler.executableCopy(allocator);
70     }
71 
72 
73     // Address:
74     //
75     // Describes a simple base-offset address.
76     struct Address {
77         explicit Address(RegisterID base, int32_t offset = 0)
baseAddress78             : base(base)
79             , offset(offset)
80         {
81         }
82 
83         RegisterID base;
84         int32_t offset;
85     };
86 
87     // ImplicitAddress:
88     //
89     // This class is used for explicit 'load' and 'store' operations
90     // (as opposed to situations in which a memory operand is provided
91     // to a generic operation, such as an integer arithmetic instruction).
92     //
93     // In the case of a load (or store) operation we want to permit
94     // addresses to be implicitly constructed, e.g. the two calls:
95     //
96     //     load32(Address(addrReg), destReg);
97     //     load32(addrReg, destReg);
98     //
99     // Are equivalent, and the explicit wrapping of the Address in the former
100     // is unnecessary.
101     struct ImplicitAddress {
ImplicitAddressImplicitAddress102         ImplicitAddress(RegisterID base)
103             : base(base)
104             , offset(0)
105         {
106         }
107 
ImplicitAddressImplicitAddress108         ImplicitAddress(Address address)
109             : base(address.base)
110             , offset(address.offset)
111         {
112         }
113 
114         RegisterID base;
115         int32_t offset;
116     };
117 
118     // BaseIndex:
119     //
120     // Describes a complex addressing mode.
121     struct BaseIndex {
122         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
baseBaseIndex123             : base(base)
124             , index(index)
125             , scale(scale)
126             , offset(offset)
127         {
128         }
129 
130         RegisterID base;
131         RegisterID index;
132         Scale scale;
133         int32_t offset;
134     };
135 
136     // AbsoluteAddress:
137     //
138     // Describes an memory operand given by a pointer.  For regular load & store
139     // operations an unwrapped void* will be used, rather than using this.
140     struct AbsoluteAddress {
AbsoluteAddressAbsoluteAddress141         explicit AbsoluteAddress(void* ptr)
142             : m_ptr(ptr)
143         {
144         }
145 
146         void* m_ptr;
147     };
148 
149 
150     class Jump;
151     class PatchBuffer;
152 
153     // DataLabelPtr:
154     //
155     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
156     // patched after the code has been generated.
157     class DataLabelPtr {
158         friend class MacroAssembler;
159         friend class PatchBuffer;
160 
161     public:
DataLabelPtr()162         DataLabelPtr()
163         {
164         }
165 
DataLabelPtr(MacroAssembler * masm)166         DataLabelPtr(MacroAssembler* masm)
167             : m_label(masm->m_assembler.label())
168         {
169         }
170 
patch(void * address,void * value)171         static void patch(void* address, void* value)
172         {
173             X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
174         }
175 
176     private:
177         X86Assembler::JmpDst m_label;
178     };
179 
180     // DataLabel32:
181     //
182     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
183     // patched after the code has been generated.
184     class DataLabel32 {
185         friend class MacroAssembler;
186         friend class PatchBuffer;
187 
188     public:
DataLabel32()189         DataLabel32()
190         {
191         }
192 
DataLabel32(MacroAssembler * masm)193         DataLabel32(MacroAssembler* masm)
194             : m_label(masm->m_assembler.label())
195         {
196         }
197 
patch(void * address,int32_t value)198         static void patch(void* address, int32_t value)
199         {
200             X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value);
201         }
202 
203     private:
204         X86Assembler::JmpDst m_label;
205     };
206 
207     // Label:
208     //
209     // A Label records a point in the generated instruction stream, typically such that
210     // it may be used as a destination for a jump.
211     class Label {
212         friend class Jump;
213         friend class MacroAssembler;
214         friend class PatchBuffer;
215 
216     public:
Label()217         Label()
218         {
219         }
220 
Label(MacroAssembler * masm)221         Label(MacroAssembler* masm)
222             : m_label(masm->m_assembler.label())
223         {
224         }
225 
226         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
JmpDst()227         operator X86Assembler::JmpDst()
228         {
229             return m_label;
230         }
231 
232     private:
233         X86Assembler::JmpDst m_label;
234     };
235 
236 
237     // Jump:
238     //
239     // A jump object is a reference to a jump instruction that has been planted
240     // into the code buffer - it is typically used to link the jump, setting the
241     // relative offset such that when executed it will jump to the desired
242     // destination.
243     //
244     // Jump objects retain a pointer to the assembler for syntactic purposes -
245     // to allow the jump object to be able to link itself, e.g.:
246     //
247     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
248     //     // ...
249     //     forwardsBranch.link();
250     //
251     // Jumps may also be linked to a Label.
252     class Jump {
253         friend class PatchBuffer;
254         friend class MacroAssembler;
255 
256     public:
Jump()257         Jump()
258         {
259         }
260 
261         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
Jump(X86Assembler::JmpSrc jmp)262         Jump(X86Assembler::JmpSrc jmp)
263             : m_jmp(jmp)
264         {
265         }
266 
link(MacroAssembler * masm)267         void link(MacroAssembler* masm)
268         {
269             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
270         }
271 
linkTo(Label label,MacroAssembler * masm)272         void linkTo(Label label, MacroAssembler* masm)
273         {
274             masm->m_assembler.link(m_jmp, label.m_label);
275         }
276 
277         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
JmpSrc()278         operator X86Assembler::JmpSrc()
279         {
280             return m_jmp;
281         }
282 
patch(void * address,void * destination)283         static void patch(void* address, void* destination)
284         {
285             X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
286         }
287 
288     private:
289         X86Assembler::JmpSrc m_jmp;
290     };
291 
292     // JumpList:
293     //
294     // A JumpList is a set of Jump objects.
295     // All jumps in the set will be linked to the same destination.
296     class JumpList {
297         friend class PatchBuffer;
298 
299     public:
link(MacroAssembler * masm)300         void link(MacroAssembler* masm)
301         {
302             size_t size = m_jumps.size();
303             for (size_t i = 0; i < size; ++i)
304                 m_jumps[i].link(masm);
305             m_jumps.clear();
306         }
307 
linkTo(Label label,MacroAssembler * masm)308         void linkTo(Label label, MacroAssembler* masm)
309         {
310             size_t size = m_jumps.size();
311             for (size_t i = 0; i < size; ++i)
312                 m_jumps[i].linkTo(label, masm);
313             m_jumps.clear();
314         }
315 
append(Jump jump)316         void append(Jump jump)
317         {
318             m_jumps.append(jump);
319         }
320 
append(JumpList & other)321         void append(JumpList& other)
322         {
323             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
324         }
325 
empty()326         bool empty()
327         {
328             return !m_jumps.size();
329         }
330 
331     private:
332         Vector<Jump, 16> m_jumps;
333     };
334 
335 
336     // PatchBuffer:
337     //
338     // This class assists in linking code generated by the macro assembler, once code generation
339     // has been completed, and the code has been copied to is final location in memory.  At this
340     // time pointers to labels within the code may be resolved, and relative offsets to external
341     // addresses may be fixed.
342     //
343     // Specifically:
344     //   * Jump objects may be linked to external targets,
345     //   * The address of Jump objects may taken, such that it can later be relinked.
346     //   * The return address of a Jump object representing a call may be acquired.
347     //   * The address of a Label pointing into the code may be resolved.
348     //   * The value referenced by a DataLabel may be fixed.
349     //
350     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
351     // address of calls, as opposed to a point that can be used to later relink a Jump -
352     // possibly wrap the later up in an object that can do just that).
353     class PatchBuffer {
354     public:
PatchBuffer(void * code)355         PatchBuffer(void* code)
356             : m_code(code)
357         {
358         }
359 
link(Jump jump,void * target)360         void link(Jump jump, void* target)
361         {
362             X86Assembler::link(m_code, jump.m_jmp, target);
363         }
364 
link(JumpList list,void * target)365         void link(JumpList list, void* target)
366         {
367             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
368                 X86Assembler::link(m_code, list.m_jumps[i], target);
369         }
370 
addressOf(Jump jump)371         void* addressOf(Jump jump)
372         {
373             return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
374         }
375 
addressOf(Label label)376         void* addressOf(Label label)
377         {
378             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
379         }
380 
addressOf(DataLabelPtr label)381         void* addressOf(DataLabelPtr label)
382         {
383             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
384         }
385 
addressOf(DataLabel32 label)386         void* addressOf(DataLabel32 label)
387         {
388             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
389         }
390 
setPtr(DataLabelPtr label,void * value)391         void setPtr(DataLabelPtr label, void* value)
392         {
393             X86Assembler::patchAddress(m_code, label.m_label, value);
394         }
395 
396     private:
397         void* m_code;
398     };
399 
400 
401     // ImmPtr:
402     //
403     // A pointer sized immediate operand to an instruction - this is wrapped
404     // in a class requiring explicit construction in order to differentiate
405     // from pointers used as absolute addresses to memory operations
406     struct ImmPtr {
ImmPtrImmPtr407         explicit ImmPtr(void* value)
408             : m_value(value)
409         {
410         }
411 
asIntptrImmPtr412         intptr_t asIntptr()
413         {
414             return reinterpret_cast<intptr_t>(m_value);
415         }
416 
417         void* m_value;
418     };
419 
420 
421     // Imm32:
422     //
423     // A 32bit immediate operand to an instruction - this is wrapped in a
424     // class requiring explicit construction in order to prevent RegisterIDs
425     // (which are implemented as an enum) from accidentally being passed as
426     // immediate values.
427     struct Imm32 {
Imm32Imm32428         explicit Imm32(int32_t value)
429             : m_value(value)
430         {
431         }
432 
433 #if PLATFORM(X86)
Imm32Imm32434         explicit Imm32(ImmPtr ptr)
435             : m_value(ptr.asIntptr())
436         {
437         }
438 #endif
439 
440         int32_t m_value;
441     };
442 
443     // Integer arithmetic operations:
444     //
445     // Operations are typically two operand - operation(source, srcDst)
446     // For many operations the source may be an Imm32, the srcDst operand
447     // may often be a memory location (explictly described using an Address
448     // object).
449 
addPtr(RegisterID src,RegisterID dest)450     void addPtr(RegisterID src, RegisterID dest)
451     {
452 #if PLATFORM(X86_64)
453         m_assembler.addq_rr(src, dest);
454 #else
455         add32(src, dest);
456 #endif
457     }
458 
addPtr(Imm32 imm,RegisterID srcDest)459     void addPtr(Imm32 imm, RegisterID srcDest)
460     {
461 #if PLATFORM(X86_64)
462         m_assembler.addq_ir(imm.m_value, srcDest);
463 #else
464         add32(imm, srcDest);
465 #endif
466     }
467 
addPtr(ImmPtr imm,RegisterID dest)468     void addPtr(ImmPtr imm, RegisterID dest)
469     {
470 #if PLATFORM(X86_64)
471         move(imm, scratchRegister);
472         m_assembler.addq_rr(scratchRegister, dest);
473 #else
474         add32(Imm32(imm), dest);
475 #endif
476     }
477 
addPtr(Imm32 imm,RegisterID src,RegisterID dest)478     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
479     {
480         m_assembler.leal_mr(imm.m_value, src, dest);
481     }
482 
add32(RegisterID src,RegisterID dest)483     void add32(RegisterID src, RegisterID dest)
484     {
485         m_assembler.addl_rr(src, dest);
486     }
487 
add32(Imm32 imm,Address address)488     void add32(Imm32 imm, Address address)
489     {
490         m_assembler.addl_im(imm.m_value, address.offset, address.base);
491     }
492 
add32(Imm32 imm,RegisterID dest)493     void add32(Imm32 imm, RegisterID dest)
494     {
495         m_assembler.addl_ir(imm.m_value, dest);
496     }
497 
add32(Imm32 imm,AbsoluteAddress address)498     void add32(Imm32 imm, AbsoluteAddress address)
499     {
500 #if PLATFORM(X86_64)
501         move(ImmPtr(address.m_ptr), scratchRegister);
502         add32(imm, Address(scratchRegister));
503 #else
504         m_assembler.addl_im(imm.m_value, address.m_ptr);
505 #endif
506     }
507 
add32(Address src,RegisterID dest)508     void add32(Address src, RegisterID dest)
509     {
510         m_assembler.addl_mr(src.offset, src.base, dest);
511     }
512 
andPtr(RegisterID src,RegisterID dest)513     void andPtr(RegisterID src, RegisterID dest)
514     {
515 #if PLATFORM(X86_64)
516         m_assembler.andq_rr(src, dest);
517 #else
518         and32(src, dest);
519 #endif
520     }
521 
andPtr(Imm32 imm,RegisterID srcDest)522     void andPtr(Imm32 imm, RegisterID srcDest)
523     {
524 #if PLATFORM(X86_64)
525         m_assembler.andq_ir(imm.m_value, srcDest);
526 #else
527         and32(imm, srcDest);
528 #endif
529     }
530 
and32(RegisterID src,RegisterID dest)531     void and32(RegisterID src, RegisterID dest)
532     {
533         m_assembler.andl_rr(src, dest);
534     }
535 
and32(Imm32 imm,RegisterID dest)536     void and32(Imm32 imm, RegisterID dest)
537     {
538         m_assembler.andl_ir(imm.m_value, dest);
539     }
540 
lshift32(Imm32 imm,RegisterID dest)541     void lshift32(Imm32 imm, RegisterID dest)
542     {
543         m_assembler.shll_i8r(imm.m_value, dest);
544     }
545 
lshift32(RegisterID shift_amount,RegisterID dest)546     void lshift32(RegisterID shift_amount, RegisterID dest)
547     {
548         // On x86 we can only shift by ecx; if asked to shift by another register we'll
549         // need rejig the shift amount into ecx first, and restore the registers afterwards.
550         if (shift_amount != X86::ecx) {
551             swap(shift_amount, X86::ecx);
552 
553             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
554             if (dest == shift_amount)
555                 m_assembler.shll_CLr(X86::ecx);
556             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
557             else if (dest == X86::ecx)
558                 m_assembler.shll_CLr(shift_amount);
559             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
560             else
561                 m_assembler.shll_CLr(dest);
562 
563             swap(shift_amount, X86::ecx);
564         } else
565             m_assembler.shll_CLr(dest);
566     }
567 
568     // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
569     // For now, this operation has specific register requirements, and the three register must
570     // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
571     // given the complexity to fix, the fact that it is not uncommmon  for processors to have
572     // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
573     // support a hardware divide at all, it may not be
mod32(RegisterID divisor,RegisterID dividend,RegisterID remainder)574     void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
575     {
576 #ifdef NDEBUG
577 #pragma unused(dividend,remainder)
578 #else
579         ASSERT((dividend == X86::eax) && (remainder == X86::edx));
580         ASSERT((dividend != divisor) && (remainder != divisor));
581 #endif
582 
583         m_assembler.cdq();
584         m_assembler.idivl_r(divisor);
585     }
586 
mul32(RegisterID src,RegisterID dest)587     void mul32(RegisterID src, RegisterID dest)
588     {
589         m_assembler.imull_rr(src, dest);
590     }
591 
mul32(Imm32 imm,RegisterID src,RegisterID dest)592     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
593     {
594         m_assembler.imull_i32r(src, imm.m_value, dest);
595     }
596 
not32(RegisterID srcDest)597     void not32(RegisterID srcDest)
598     {
599         m_assembler.notl_r(srcDest);
600     }
601 
orPtr(RegisterID src,RegisterID dest)602     void orPtr(RegisterID src, RegisterID dest)
603     {
604 #if PLATFORM(X86_64)
605         m_assembler.orq_rr(src, dest);
606 #else
607         or32(src, dest);
608 #endif
609     }
610 
orPtr(ImmPtr imm,RegisterID dest)611     void orPtr(ImmPtr imm, RegisterID dest)
612     {
613 #if PLATFORM(X86_64)
614         move(imm, scratchRegister);
615         m_assembler.orq_rr(scratchRegister, dest);
616 #else
617         or32(Imm32(imm), dest);
618 #endif
619     }
620 
orPtr(Imm32 imm,RegisterID dest)621     void orPtr(Imm32 imm, RegisterID dest)
622     {
623 #if PLATFORM(X86_64)
624         m_assembler.orq_ir(imm.m_value, dest);
625 #else
626         or32(imm, dest);
627 #endif
628     }
629 
or32(RegisterID src,RegisterID dest)630     void or32(RegisterID src, RegisterID dest)
631     {
632         m_assembler.orl_rr(src, dest);
633     }
634 
or32(Imm32 imm,RegisterID dest)635     void or32(Imm32 imm, RegisterID dest)
636     {
637         m_assembler.orl_ir(imm.m_value, dest);
638     }
639 
rshiftPtr(RegisterID shift_amount,RegisterID dest)640     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
641     {
642 #if PLATFORM(X86_64)
643         // On x86 we can only shift by ecx; if asked to shift by another register we'll
644         // need rejig the shift amount into ecx first, and restore the registers afterwards.
645         if (shift_amount != X86::ecx) {
646             swap(shift_amount, X86::ecx);
647 
648             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
649             if (dest == shift_amount)
650                 m_assembler.sarq_CLr(X86::ecx);
651             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
652             else if (dest == X86::ecx)
653                 m_assembler.sarq_CLr(shift_amount);
654             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
655             else
656                 m_assembler.sarq_CLr(dest);
657 
658             swap(shift_amount, X86::ecx);
659         } else
660             m_assembler.sarq_CLr(dest);
661 #else
662         rshift32(shift_amount, dest);
663 #endif
664     }
665 
rshiftPtr(Imm32 imm,RegisterID dest)666     void rshiftPtr(Imm32 imm, RegisterID dest)
667     {
668 #if PLATFORM(X86_64)
669         m_assembler.sarq_i8r(imm.m_value, dest);
670 #else
671         rshift32(imm, dest);
672 #endif
673     }
674 
rshift32(RegisterID shift_amount,RegisterID dest)675     void rshift32(RegisterID shift_amount, RegisterID dest)
676     {
677         // On x86 we can only shift by ecx; if asked to shift by another register we'll
678         // need rejig the shift amount into ecx first, and restore the registers afterwards.
679         if (shift_amount != X86::ecx) {
680             swap(shift_amount, X86::ecx);
681 
682             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
683             if (dest == shift_amount)
684                 m_assembler.sarl_CLr(X86::ecx);
685             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
686             else if (dest == X86::ecx)
687                 m_assembler.sarl_CLr(shift_amount);
688             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
689             else
690                 m_assembler.sarl_CLr(dest);
691 
692             swap(shift_amount, X86::ecx);
693         } else
694             m_assembler.sarl_CLr(dest);
695     }
696 
rshift32(Imm32 imm,RegisterID dest)697     void rshift32(Imm32 imm, RegisterID dest)
698     {
699         m_assembler.sarl_i8r(imm.m_value, dest);
700     }
701 
subPtr(RegisterID src,RegisterID dest)702     void subPtr(RegisterID src, RegisterID dest)
703     {
704 #if PLATFORM(X86_64)
705         m_assembler.subq_rr(src, dest);
706 #else
707         sub32(src, dest);
708 #endif
709     }
710 
subPtr(Imm32 imm,RegisterID dest)711     void subPtr(Imm32 imm, RegisterID dest)
712     {
713 #if PLATFORM(X86_64)
714         m_assembler.subq_ir(imm.m_value, dest);
715 #else
716         sub32(imm, dest);
717 #endif
718     }
719 
subPtr(ImmPtr imm,RegisterID dest)720     void subPtr(ImmPtr imm, RegisterID dest)
721     {
722 #if PLATFORM(X86_64)
723         move(imm, scratchRegister);
724         m_assembler.subq_rr(scratchRegister, dest);
725 #else
726         sub32(Imm32(imm), dest);
727 #endif
728     }
729 
sub32(RegisterID src,RegisterID dest)730     void sub32(RegisterID src, RegisterID dest)
731     {
732         m_assembler.subl_rr(src, dest);
733     }
734 
sub32(Imm32 imm,RegisterID dest)735     void sub32(Imm32 imm, RegisterID dest)
736     {
737         m_assembler.subl_ir(imm.m_value, dest);
738     }
739 
sub32(Imm32 imm,Address address)740     void sub32(Imm32 imm, Address address)
741     {
742         m_assembler.subl_im(imm.m_value, address.offset, address.base);
743     }
744 
sub32(Imm32 imm,AbsoluteAddress address)745     void sub32(Imm32 imm, AbsoluteAddress address)
746     {
747 #if PLATFORM(X86_64)
748         move(ImmPtr(address.m_ptr), scratchRegister);
749         sub32(imm, Address(scratchRegister));
750 #else
751         m_assembler.subl_im(imm.m_value, address.m_ptr);
752 #endif
753     }
754 
sub32(Address src,RegisterID dest)755     void sub32(Address src, RegisterID dest)
756     {
757         m_assembler.subl_mr(src.offset, src.base, dest);
758     }
759 
xorPtr(RegisterID src,RegisterID dest)760     void xorPtr(RegisterID src, RegisterID dest)
761     {
762 #if PLATFORM(X86_64)
763         m_assembler.xorq_rr(src, dest);
764 #else
765         xor32(src, dest);
766 #endif
767     }
768 
xorPtr(Imm32 imm,RegisterID srcDest)769     void xorPtr(Imm32 imm, RegisterID srcDest)
770     {
771 #if PLATFORM(X86_64)
772         m_assembler.xorq_ir(imm.m_value, srcDest);
773 #else
774         xor32(imm, srcDest);
775 #endif
776     }
777 
xor32(RegisterID src,RegisterID dest)778     void xor32(RegisterID src, RegisterID dest)
779     {
780         m_assembler.xorl_rr(src, dest);
781     }
782 
xor32(Imm32 imm,RegisterID srcDest)783     void xor32(Imm32 imm, RegisterID srcDest)
784     {
785         m_assembler.xorl_ir(imm.m_value, srcDest);
786     }
787 
788 
789     // Memory access operations:
790     //
791     // Loads are of the form load(address, destination) and stores of the form
792     // store(source, address).  The source for a store may be an Imm32.  Address
793     // operand objects to loads and store will be implicitly constructed if a
794     // register is passed.
795 
loadPtr(ImplicitAddress address,RegisterID dest)796     void loadPtr(ImplicitAddress address, RegisterID dest)
797     {
798 #if PLATFORM(X86_64)
799         m_assembler.movq_mr(address.offset, address.base, dest);
800 #else
801         load32(address, dest);
802 #endif
803     }
804 
loadPtrWithAddressOffsetPatch(Address address,RegisterID dest)805     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
806     {
807 #if PLATFORM(X86_64)
808         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
809         return DataLabel32(this);
810 #else
811         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
812         return DataLabel32(this);
813 #endif
814     }
815 
loadPtr(BaseIndex address,RegisterID dest)816     void loadPtr(BaseIndex address, RegisterID dest)
817     {
818 #if PLATFORM(X86_64)
819         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
820 #else
821         load32(address, dest);
822 #endif
823     }
824 
loadPtr(void * address,RegisterID dest)825     void loadPtr(void* address, RegisterID dest)
826     {
827 #if PLATFORM(X86_64)
828         if (dest == X86::eax)
829             m_assembler.movq_mEAX(address);
830         else {
831             move(X86::eax, dest);
832             m_assembler.movq_mEAX(address);
833             swap(X86::eax, dest);
834         }
835 #else
836         load32(address, dest);
837 #endif
838     }
839 
load32(ImplicitAddress address,RegisterID dest)840     void load32(ImplicitAddress address, RegisterID dest)
841     {
842         m_assembler.movl_mr(address.offset, address.base, dest);
843     }
844 
load32(BaseIndex address,RegisterID dest)845     void load32(BaseIndex address, RegisterID dest)
846     {
847         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
848     }
849 
load32(void * address,RegisterID dest)850     void load32(void* address, RegisterID dest)
851     {
852 #if PLATFORM(X86_64)
853         if (dest == X86::eax)
854             m_assembler.movl_mEAX(address);
855         else {
856             move(X86::eax, dest);
857             m_assembler.movl_mEAX(address);
858             swap(X86::eax, dest);
859         }
860 #else
861         m_assembler.movl_mr(address, dest);
862 #endif
863     }
864 
load16(BaseIndex address,RegisterID dest)865     void load16(BaseIndex address, RegisterID dest)
866     {
867         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
868     }
869 
storePtr(RegisterID src,ImplicitAddress address)870     void storePtr(RegisterID src, ImplicitAddress address)
871     {
872 #if PLATFORM(X86_64)
873         m_assembler.movq_rm(src, address.offset, address.base);
874 #else
875         store32(src, address);
876 #endif
877     }
878 
storePtrWithAddressOffsetPatch(RegisterID src,Address address)879     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
880     {
881 #if PLATFORM(X86_64)
882         m_assembler.movq_rm_disp32(src, address.offset, address.base);
883         return DataLabel32(this);
884 #else
885         m_assembler.movl_rm_disp32(src, address.offset, address.base);
886         return DataLabel32(this);
887 #endif
888     }
889 
storePtr(RegisterID src,BaseIndex address)890     void storePtr(RegisterID src, BaseIndex address)
891     {
892 #if PLATFORM(X86_64)
893         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
894 #else
895         store32(src, address);
896 #endif
897     }
898 
storePtr(ImmPtr imm,ImplicitAddress address)899     void storePtr(ImmPtr imm, ImplicitAddress address)
900     {
901 #if PLATFORM(X86_64)
902         move(imm, scratchRegister);
903         storePtr(scratchRegister, address);
904 #else
905         m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base);
906 #endif
907     }
908 
909 #if !PLATFORM(X86_64)
storePtr(ImmPtr imm,void * address)910     void storePtr(ImmPtr imm, void* address)
911     {
912         store32(Imm32(imm), address);
913     }
914 #endif
915 
storePtrWithPatch(Address address)916     DataLabelPtr storePtrWithPatch(Address address)
917     {
918 #if PLATFORM(X86_64)
919         m_assembler.movq_i64r(0, scratchRegister);
920         DataLabelPtr label(this);
921         storePtr(scratchRegister, address);
922         return label;
923 #else
924         m_assembler.movl_i32m(0, address.offset, address.base);
925         return DataLabelPtr(this);
926 #endif
927     }
928 
store32(RegisterID src,ImplicitAddress address)929     void store32(RegisterID src, ImplicitAddress address)
930     {
931         m_assembler.movl_rm(src, address.offset, address.base);
932     }
933 
store32(RegisterID src,BaseIndex address)934     void store32(RegisterID src, BaseIndex address)
935     {
936         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
937     }
938 
store32(Imm32 imm,ImplicitAddress address)939     void store32(Imm32 imm, ImplicitAddress address)
940     {
941         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
942     }
943 
store32(Imm32 imm,void * address)944     void store32(Imm32 imm, void* address)
945     {
946 #if PLATFORM(X86_64)
947         move(X86::eax, scratchRegister);
948         move(imm, X86::eax);
949         m_assembler.movl_EAXm(address);
950         move(scratchRegister, X86::eax);
951 #else
952         m_assembler.movl_i32m(imm.m_value, address);
953 #endif
954     }
955 
956 
957     // Stack manipulation operations:
958     //
959     // The ABI is assumed to provide a stack abstraction to memory,
960     // containing machine word sized units of data.  Push and pop
961     // operations add and remove a single register sized unit of data
962     // to or from the stack.  Peek and poke operations read or write
963     // values on the stack, without moving the current stack position.
964 
pop(RegisterID dest)965     void pop(RegisterID dest)
966     {
967         m_assembler.pop_r(dest);
968     }
969 
push(RegisterID src)970     void push(RegisterID src)
971     {
972         m_assembler.push_r(src);
973     }
974 
push(Address address)975     void push(Address address)
976     {
977         m_assembler.push_m(address.offset, address.base);
978     }
979 
push(Imm32 imm)980     void push(Imm32 imm)
981     {
982         m_assembler.push_i32(imm.m_value);
983     }
984 
pop()985     void pop()
986     {
987         addPtr(Imm32(sizeof(void*)), X86::esp);
988     }
989 
990     void peek(RegisterID dest, int index = 0)
991     {
992         loadPtr(Address(X86::esp, (index * sizeof(void *))), dest);
993     }
994 
995     void poke(RegisterID src, int index = 0)
996     {
997         storePtr(src, Address(X86::esp, (index * sizeof(void *))));
998     }
999 
1000     void poke(Imm32 value, int index = 0)
1001     {
1002         store32(value, Address(X86::esp, (index * sizeof(void *))));
1003     }
1004 
1005     void poke(ImmPtr imm, int index = 0)
1006     {
1007         storePtr(imm, Address(X86::esp, (index * sizeof(void *))));
1008     }
1009 
1010     // Register move operations:
1011     //
1012     // Move values in registers.
1013 
move(Imm32 imm,RegisterID dest)1014     void move(Imm32 imm, RegisterID dest)
1015     {
1016         // Note: on 64-bit the Imm32 value is zero extended into the register, it
1017         // may be useful to have a separate version that sign extends the value?
1018         if (!imm.m_value)
1019             m_assembler.xorl_rr(dest, dest);
1020         else
1021             m_assembler.movl_i32r(imm.m_value, dest);
1022     }
1023 
move(RegisterID src,RegisterID dest)1024     void move(RegisterID src, RegisterID dest)
1025     {
1026         // Note: on 64-bit this is is a full register move; perhaps it would be
1027         // useful to have separate move32 & movePtr, with move32 zero extending?
1028 #if PLATFORM(X86_64)
1029         m_assembler.movq_rr(src, dest);
1030 #else
1031         m_assembler.movl_rr(src, dest);
1032 #endif
1033     }
1034 
move(ImmPtr imm,RegisterID dest)1035     void move(ImmPtr imm, RegisterID dest)
1036     {
1037 #if PLATFORM(X86_64)
1038         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
1039             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
1040         else
1041             m_assembler.movq_i64r(imm.asIntptr(), dest);
1042 #else
1043         m_assembler.movl_i32r(imm.asIntptr(), dest);
1044 #endif
1045     }
1046 
swap(RegisterID reg1,RegisterID reg2)1047     void swap(RegisterID reg1, RegisterID reg2)
1048     {
1049 #if PLATFORM(X86_64)
1050         m_assembler.xchgq_rr(reg1, reg2);
1051 #else
1052         m_assembler.xchgl_rr(reg1, reg2);
1053 #endif
1054     }
1055 
signExtend32ToPtr(RegisterID src,RegisterID dest)1056     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1057     {
1058 #if PLATFORM(X86_64)
1059         m_assembler.movsxd_rr(src, dest);
1060 #else
1061         if (src != dest)
1062             move(src, dest);
1063 #endif
1064     }
1065 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)1066     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1067     {
1068 #if PLATFORM(X86_64)
1069         m_assembler.movl_rr(src, dest);
1070 #else
1071         if (src != dest)
1072             move(src, dest);
1073 #endif
1074     }
1075 
1076 
1077     // Forwards / external control flow operations:
1078     //
1079     // This set of jump and conditional branch operations return a Jump
1080     // object which may linked at a later point, allow forwards jump,
1081     // or jumps that will require external linkage (after the code has been
1082     // relocated).
1083     //
1084     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1085     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1086     // used (representing the names 'below' and 'above').
1087     //
1088     // Operands to the comparision are provided in the expected order, e.g.
1089     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
1090     // treated as a signed 32bit value, is less than or equal to 5.
1091     //
1092     // jz and jnz test whether the first operand is equal to zero, and take
1093     // an optional second operand of a mask under which to perform the test.
1094 
1095 private:
compareImm32ForBranch(RegisterID left,int32_t right)1096     void compareImm32ForBranch(RegisterID left, int32_t right)
1097     {
1098         m_assembler.cmpl_ir(right, left);
1099     }
1100 
compareImm32ForBranchEquality(RegisterID reg,int32_t imm)1101     void compareImm32ForBranchEquality(RegisterID reg, int32_t imm)
1102     {
1103         if (!imm)
1104             m_assembler.testl_rr(reg, reg);
1105         else
1106             m_assembler.cmpl_ir(imm, reg);
1107     }
1108 
compareImm32ForBranchEquality(Address address,int32_t imm)1109     void compareImm32ForBranchEquality(Address address, int32_t imm)
1110     {
1111         m_assembler.cmpl_im(imm, address.offset, address.base);
1112     }
1113 
testImm32(RegisterID reg,Imm32 mask)1114     void testImm32(RegisterID reg, Imm32 mask)
1115     {
1116         // if we are only interested in the low seven bits, this can be tested with a testb
1117         if (mask.m_value == -1)
1118             m_assembler.testl_rr(reg, reg);
1119         else if ((mask.m_value & ~0x7f) == 0)
1120             m_assembler.testb_i8r(mask.m_value, reg);
1121         else
1122             m_assembler.testl_i32r(mask.m_value, reg);
1123     }
1124 
testImm32(Address address,Imm32 mask)1125     void testImm32(Address address, Imm32 mask)
1126     {
1127         if (mask.m_value == -1)
1128             m_assembler.cmpl_im(0, address.offset, address.base);
1129         else
1130             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1131     }
1132 
testImm32(BaseIndex address,Imm32 mask)1133     void testImm32(BaseIndex address, Imm32 mask)
1134     {
1135         if (mask.m_value == -1)
1136             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1137         else
1138             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1139     }
1140 
1141 #if PLATFORM(X86_64)
compareImm64ForBranch(RegisterID left,int32_t right)1142     void compareImm64ForBranch(RegisterID left, int32_t right)
1143     {
1144         m_assembler.cmpq_ir(right, left);
1145     }
1146 
compareImm64ForBranchEquality(RegisterID reg,int32_t imm)1147     void compareImm64ForBranchEquality(RegisterID reg, int32_t imm)
1148     {
1149         if (!imm)
1150             m_assembler.testq_rr(reg, reg);
1151         else
1152             m_assembler.cmpq_ir(imm, reg);
1153     }
1154 
testImm64(RegisterID reg,Imm32 mask)1155     void testImm64(RegisterID reg, Imm32 mask)
1156     {
1157         // if we are only interested in the low seven bits, this can be tested with a testb
1158         if (mask.m_value == -1)
1159             m_assembler.testq_rr(reg, reg);
1160         else if ((mask.m_value & ~0x7f) == 0)
1161             m_assembler.testb_i8r(mask.m_value, reg);
1162         else
1163             m_assembler.testq_i32r(mask.m_value, reg);
1164     }
1165 
testImm64(Address address,Imm32 mask)1166     void testImm64(Address address, Imm32 mask)
1167     {
1168         if (mask.m_value == -1)
1169             m_assembler.cmpq_im(0, address.offset, address.base);
1170         else
1171             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
1172     }
1173 
testImm64(BaseIndex address,Imm32 mask)1174     void testImm64(BaseIndex address, Imm32 mask)
1175     {
1176         if (mask.m_value == -1)
1177             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
1178         else
1179             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1180     }
1181 #endif
1182 
1183 public:
ja32(RegisterID left,Imm32 right)1184     Jump ja32(RegisterID left, Imm32 right)
1185     {
1186         compareImm32ForBranch(left, right.m_value);
1187         return Jump(m_assembler.ja());
1188     }
1189 
jaePtr(RegisterID left,RegisterID right)1190     Jump jaePtr(RegisterID left, RegisterID right)
1191     {
1192 #if PLATFORM(X86_64)
1193         m_assembler.cmpq_rr(right, left);
1194         return Jump(m_assembler.jae());
1195 #else
1196         return jae32(left, right);
1197 #endif
1198     }
1199 
jaePtr(RegisterID reg,ImmPtr ptr)1200     Jump jaePtr(RegisterID reg, ImmPtr ptr)
1201     {
1202 #if PLATFORM(X86_64)
1203         intptr_t imm = ptr.asIntptr();
1204         if (CAN_SIGN_EXTEND_32_64(imm)) {
1205             compareImm64ForBranch(reg, imm);
1206             return Jump(m_assembler.jae());
1207         } else {
1208             move(ptr, scratchRegister);
1209             return jaePtr(reg, scratchRegister);
1210         }
1211 #else
1212         return jae32(reg, Imm32(ptr));
1213 #endif
1214     }
1215 
jae32(RegisterID left,RegisterID right)1216     Jump jae32(RegisterID left, RegisterID right)
1217     {
1218         m_assembler.cmpl_rr(right, left);
1219         return Jump(m_assembler.jae());
1220     }
1221 
jae32(RegisterID left,Imm32 right)1222     Jump jae32(RegisterID left, Imm32 right)
1223     {
1224         compareImm32ForBranch(left, right.m_value);
1225         return Jump(m_assembler.jae());
1226     }
1227 
jae32(RegisterID left,Address right)1228     Jump jae32(RegisterID left, Address right)
1229     {
1230         m_assembler.cmpl_mr(right.offset, right.base, left);
1231         return Jump(m_assembler.jae());
1232     }
1233 
jae32(Address left,RegisterID right)1234     Jump jae32(Address left, RegisterID right)
1235     {
1236         m_assembler.cmpl_rm(right, left.offset, left.base);
1237         return Jump(m_assembler.jae());
1238     }
1239 
jbPtr(RegisterID left,RegisterID right)1240     Jump jbPtr(RegisterID left, RegisterID right)
1241     {
1242 #if PLATFORM(X86_64)
1243         m_assembler.cmpq_rr(right, left);
1244         return Jump(m_assembler.jb());
1245 #else
1246         return jb32(left, right);
1247 #endif
1248     }
1249 
jbPtr(RegisterID reg,ImmPtr ptr)1250     Jump jbPtr(RegisterID reg, ImmPtr ptr)
1251     {
1252 #if PLATFORM(X86_64)
1253         intptr_t imm = ptr.asIntptr();
1254         if (CAN_SIGN_EXTEND_32_64(imm)) {
1255             compareImm64ForBranch(reg, imm);
1256             return Jump(m_assembler.jb());
1257         } else {
1258             move(ptr, scratchRegister);
1259             return jbPtr(reg, scratchRegister);
1260         }
1261 #else
1262         return jb32(reg, Imm32(ptr));
1263 #endif
1264     }
1265 
jb32(RegisterID left,RegisterID right)1266     Jump jb32(RegisterID left, RegisterID right)
1267     {
1268         m_assembler.cmpl_rr(right, left);
1269         return Jump(m_assembler.jb());
1270     }
1271 
jb32(RegisterID left,Imm32 right)1272     Jump jb32(RegisterID left, Imm32 right)
1273     {
1274         compareImm32ForBranch(left, right.m_value);
1275         return Jump(m_assembler.jb());
1276     }
1277 
jb32(RegisterID left,Address right)1278     Jump jb32(RegisterID left, Address right)
1279     {
1280         m_assembler.cmpl_mr(right.offset, right.base, left);
1281         return Jump(m_assembler.jb());
1282     }
1283 
jePtr(RegisterID op1,RegisterID op2)1284     Jump jePtr(RegisterID op1, RegisterID op2)
1285     {
1286 #if PLATFORM(X86_64)
1287         m_assembler.cmpq_rr(op1, op2);
1288         return Jump(m_assembler.je());
1289 #else
1290         return je32(op1, op2);
1291 #endif
1292     }
1293 
jePtr(RegisterID reg,Address address)1294     Jump jePtr(RegisterID reg, Address address)
1295     {
1296 #if PLATFORM(X86_64)
1297         m_assembler.cmpq_rm(reg, address.offset, address.base);
1298 #else
1299         m_assembler.cmpl_rm(reg, address.offset, address.base);
1300 #endif
1301         return Jump(m_assembler.je());
1302     }
1303 
jePtr(RegisterID reg,ImmPtr ptr)1304     Jump jePtr(RegisterID reg, ImmPtr ptr)
1305     {
1306 #if PLATFORM(X86_64)
1307         intptr_t imm = ptr.asIntptr();
1308         if (CAN_SIGN_EXTEND_32_64(imm)) {
1309             compareImm64ForBranchEquality(reg, imm);
1310             return Jump(m_assembler.je());
1311         } else {
1312             move(ptr, scratchRegister);
1313             return jePtr(scratchRegister, reg);
1314         }
1315 #else
1316         return je32(reg, Imm32(ptr));
1317 #endif
1318     }
1319 
jePtr(Address address,ImmPtr imm)1320     Jump jePtr(Address address, ImmPtr imm)
1321     {
1322 #if PLATFORM(X86_64)
1323         move(imm, scratchRegister);
1324         return jePtr(scratchRegister, address);
1325 #else
1326         return je32(address, Imm32(imm));
1327 #endif
1328     }
1329 
je32(RegisterID op1,RegisterID op2)1330     Jump je32(RegisterID op1, RegisterID op2)
1331     {
1332         m_assembler.cmpl_rr(op1, op2);
1333         return Jump(m_assembler.je());
1334     }
1335 
je32(Address op1,RegisterID op2)1336     Jump je32(Address op1, RegisterID op2)
1337     {
1338         m_assembler.cmpl_mr(op1.offset, op1.base, op2);
1339         return Jump(m_assembler.je());
1340     }
1341 
je32(RegisterID reg,Imm32 imm)1342     Jump je32(RegisterID reg, Imm32 imm)
1343     {
1344         compareImm32ForBranchEquality(reg, imm.m_value);
1345         return Jump(m_assembler.je());
1346     }
1347 
je32(Address address,Imm32 imm)1348     Jump je32(Address address, Imm32 imm)
1349     {
1350         compareImm32ForBranchEquality(address, imm.m_value);
1351         return Jump(m_assembler.je());
1352     }
1353 
je16(RegisterID op1,BaseIndex op2)1354     Jump je16(RegisterID op1, BaseIndex op2)
1355     {
1356         m_assembler.cmpw_rm(op1, op2.offset, op2.base, op2.index, op2.scale);
1357         return Jump(m_assembler.je());
1358     }
1359 
jg32(RegisterID left,RegisterID right)1360     Jump jg32(RegisterID left, RegisterID right)
1361     {
1362         m_assembler.cmpl_rr(right, left);
1363         return Jump(m_assembler.jg());
1364     }
1365 
jg32(RegisterID reg,Address address)1366     Jump jg32(RegisterID reg, Address address)
1367     {
1368         m_assembler.cmpl_mr(address.offset, address.base, reg);
1369         return Jump(m_assembler.jg());
1370     }
1371 
jgePtr(RegisterID left,RegisterID right)1372     Jump jgePtr(RegisterID left, RegisterID right)
1373     {
1374 #if PLATFORM(X86_64)
1375         m_assembler.cmpq_rr(right, left);
1376         return Jump(m_assembler.jge());
1377 #else
1378         return jge32(left, right);
1379 #endif
1380     }
1381 
jgePtr(RegisterID reg,ImmPtr ptr)1382     Jump jgePtr(RegisterID reg, ImmPtr ptr)
1383     {
1384 #if PLATFORM(X86_64)
1385         intptr_t imm = ptr.asIntptr();
1386         if (CAN_SIGN_EXTEND_32_64(imm)) {
1387             compareImm64ForBranch(reg, imm);
1388             return Jump(m_assembler.jge());
1389         } else {
1390             move(ptr, scratchRegister);
1391             return jgePtr(reg, scratchRegister);
1392         }
1393 #else
1394         return jge32(reg, Imm32(ptr));
1395 #endif
1396     }
1397 
jge32(RegisterID left,RegisterID right)1398     Jump jge32(RegisterID left, RegisterID right)
1399     {
1400         m_assembler.cmpl_rr(right, left);
1401         return Jump(m_assembler.jge());
1402     }
1403 
jge32(RegisterID left,Imm32 right)1404     Jump jge32(RegisterID left, Imm32 right)
1405     {
1406         compareImm32ForBranch(left, right.m_value);
1407         return Jump(m_assembler.jge());
1408     }
1409 
jlPtr(RegisterID left,RegisterID right)1410     Jump jlPtr(RegisterID left, RegisterID right)
1411     {
1412 #if PLATFORM(X86_64)
1413         m_assembler.cmpq_rr(right, left);
1414         return Jump(m_assembler.jl());
1415 #else
1416         return jl32(left, right);
1417 #endif
1418     }
1419 
jlPtr(RegisterID reg,ImmPtr ptr)1420     Jump jlPtr(RegisterID reg, ImmPtr ptr)
1421     {
1422 #if PLATFORM(X86_64)
1423         intptr_t imm = ptr.asIntptr();
1424         if (CAN_SIGN_EXTEND_32_64(imm)) {
1425             compareImm64ForBranch(reg, imm);
1426             return Jump(m_assembler.jl());
1427         } else {
1428             move(ptr, scratchRegister);
1429             return jlPtr(reg, scratchRegister);
1430         }
1431 #else
1432         return jl32(reg, Imm32(ptr));
1433 #endif
1434     }
1435 
jl32(RegisterID left,RegisterID right)1436     Jump jl32(RegisterID left, RegisterID right)
1437     {
1438         m_assembler.cmpl_rr(right, left);
1439         return Jump(m_assembler.jl());
1440     }
1441 
jl32(RegisterID left,Imm32 right)1442     Jump jl32(RegisterID left, Imm32 right)
1443     {
1444         compareImm32ForBranch(left, right.m_value);
1445         return Jump(m_assembler.jl());
1446     }
1447 
jlePtr(RegisterID left,RegisterID right)1448     Jump jlePtr(RegisterID left, RegisterID right)
1449     {
1450 #if PLATFORM(X86_64)
1451         m_assembler.cmpq_rr(right, left);
1452         return Jump(m_assembler.jle());
1453 #else
1454         return jle32(left, right);
1455 #endif
1456     }
1457 
jlePtr(RegisterID reg,ImmPtr ptr)1458     Jump jlePtr(RegisterID reg, ImmPtr ptr)
1459     {
1460 #if PLATFORM(X86_64)
1461         intptr_t imm = ptr.asIntptr();
1462         if (CAN_SIGN_EXTEND_32_64(imm)) {
1463             compareImm64ForBranch(reg, imm);
1464             return Jump(m_assembler.jle());
1465         } else {
1466             move(ptr, scratchRegister);
1467             return jlePtr(reg, scratchRegister);
1468         }
1469 #else
1470         return jle32(reg, Imm32(ptr));
1471 #endif
1472     }
1473 
jle32(RegisterID left,RegisterID right)1474     Jump jle32(RegisterID left, RegisterID right)
1475     {
1476         m_assembler.cmpl_rr(right, left);
1477         return Jump(m_assembler.jle());
1478     }
1479 
jle32(RegisterID left,Imm32 right)1480     Jump jle32(RegisterID left, Imm32 right)
1481     {
1482         compareImm32ForBranch(left, right.m_value);
1483         return Jump(m_assembler.jle());
1484     }
1485 
jnePtr(RegisterID op1,RegisterID op2)1486     Jump jnePtr(RegisterID op1, RegisterID op2)
1487     {
1488 #if PLATFORM(X86_64)
1489         m_assembler.cmpq_rr(op1, op2);
1490         return Jump(m_assembler.jne());
1491 #else
1492         return jne32(op1, op2);
1493 #endif
1494     }
1495 
jnePtr(RegisterID reg,Address address)1496     Jump jnePtr(RegisterID reg, Address address)
1497     {
1498 #if PLATFORM(X86_64)
1499         m_assembler.cmpq_rm(reg, address.offset, address.base);
1500 #else
1501         m_assembler.cmpl_rm(reg, address.offset, address.base);
1502 #endif
1503         return Jump(m_assembler.jne());
1504     }
1505 
jnePtr(RegisterID reg,AbsoluteAddress address)1506     Jump jnePtr(RegisterID reg, AbsoluteAddress address)
1507     {
1508 #if PLATFORM(X86_64)
1509         move(ImmPtr(address.m_ptr), scratchRegister);
1510         return jnePtr(reg, Address(scratchRegister));
1511 #else
1512         m_assembler.cmpl_rm(reg, address.m_ptr);
1513         return Jump(m_assembler.jne());
1514 #endif
1515     }
1516 
jnePtr(RegisterID reg,ImmPtr ptr)1517     Jump jnePtr(RegisterID reg, ImmPtr ptr)
1518     {
1519 #if PLATFORM(X86_64)
1520         intptr_t imm = ptr.asIntptr();
1521         if (CAN_SIGN_EXTEND_32_64(imm)) {
1522             compareImm64ForBranchEquality(reg, imm);
1523             return Jump(m_assembler.jne());
1524         } else {
1525             move(ptr, scratchRegister);
1526             return jnePtr(scratchRegister, reg);
1527         }
1528 #else
1529         return jne32(reg, Imm32(ptr));
1530 #endif
1531     }
1532 
jnePtr(Address address,ImmPtr imm)1533     Jump jnePtr(Address address, ImmPtr imm)
1534     {
1535 #if PLATFORM(X86_64)
1536         move(imm, scratchRegister);
1537         return jnePtr(scratchRegister, address);
1538 #else
1539         return jne32(address, Imm32(imm));
1540 #endif
1541     }
1542 
1543 #if !PLATFORM(X86_64)
jnePtr(AbsoluteAddress address,ImmPtr imm)1544     Jump jnePtr(AbsoluteAddress address, ImmPtr imm)
1545     {
1546         m_assembler.cmpl_im(imm.asIntptr(), address.m_ptr);
1547         return Jump(m_assembler.jne());
1548     }
1549 #endif
1550 
1551     Jump jnePtrWithPatch(RegisterID reg, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
1552     {
1553 #if PLATFORM(X86_64)
1554         m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
1555         dataLabel = DataLabelPtr(this);
1556         return jnePtr(scratchRegister, reg);
1557 #else
1558         m_assembler.cmpl_ir_force32(initialValue.asIntptr(), reg);
1559         dataLabel = DataLabelPtr(this);
1560         return Jump(m_assembler.jne());
1561 #endif
1562     }
1563 
1564     Jump jnePtrWithPatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
1565     {
1566 #if PLATFORM(X86_64)
1567         m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
1568         dataLabel = DataLabelPtr(this);
1569         return jnePtr(scratchRegister, address);
1570 #else
1571         m_assembler.cmpl_im_force32(initialValue.asIntptr(), address.offset, address.base);
1572         dataLabel = DataLabelPtr(this);
1573         return Jump(m_assembler.jne());
1574 #endif
1575     }
1576 
jne32(RegisterID op1,RegisterID op2)1577     Jump jne32(RegisterID op1, RegisterID op2)
1578     {
1579         m_assembler.cmpl_rr(op1, op2);
1580         return Jump(m_assembler.jne());
1581     }
1582 
jne32(RegisterID reg,Imm32 imm)1583     Jump jne32(RegisterID reg, Imm32 imm)
1584     {
1585         compareImm32ForBranchEquality(reg, imm.m_value);
1586         return Jump(m_assembler.jne());
1587     }
1588 
jne32(Address address,Imm32 imm)1589     Jump jne32(Address address, Imm32 imm)
1590     {
1591         compareImm32ForBranchEquality(address, imm.m_value);
1592         return Jump(m_assembler.jne());
1593     }
1594 
jne32(Address address,RegisterID reg)1595     Jump jne32(Address address, RegisterID reg)
1596     {
1597         m_assembler.cmpl_rm(reg, address.offset, address.base);
1598         return Jump(m_assembler.jne());
1599     }
1600 
jnzPtr(RegisterID reg,RegisterID mask)1601     Jump jnzPtr(RegisterID reg, RegisterID mask)
1602     {
1603 #if PLATFORM(X86_64)
1604         m_assembler.testq_rr(reg, mask);
1605         return Jump(m_assembler.jne());
1606 #else
1607         return jnz32(reg, mask);
1608 #endif
1609     }
1610 
1611     Jump jnzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
1612     {
1613 #if PLATFORM(X86_64)
1614         testImm64(reg, mask);
1615         return Jump(m_assembler.jne());
1616 #else
1617         return jnz32(reg, mask);
1618 #endif
1619     }
1620 
jnzPtr(RegisterID reg,ImmPtr mask)1621     Jump jnzPtr(RegisterID reg, ImmPtr mask)
1622     {
1623 #if PLATFORM(X86_64)
1624         move(mask, scratchRegister);
1625         m_assembler.testq_rr(scratchRegister, reg);
1626         return Jump(m_assembler.jne());
1627 #else
1628         return jnz32(reg, Imm32(mask));
1629 #endif
1630     }
1631 
1632     Jump jnzPtr(Address address, Imm32 mask = Imm32(-1))
1633     {
1634 #if PLATFORM(X86_64)
1635         testImm64(address, mask);
1636         return Jump(m_assembler.jne());
1637 #else
1638         return jnz32(address, mask);
1639 #endif
1640     }
1641 
jnz32(RegisterID reg,RegisterID mask)1642     Jump jnz32(RegisterID reg, RegisterID mask)
1643     {
1644         m_assembler.testl_rr(reg, mask);
1645         return Jump(m_assembler.jne());
1646     }
1647 
1648     Jump jnz32(RegisterID reg, Imm32 mask = Imm32(-1))
1649     {
1650         testImm32(reg, mask);
1651         return Jump(m_assembler.jne());
1652     }
1653 
1654     Jump jnz32(Address address, Imm32 mask = Imm32(-1))
1655     {
1656         testImm32(address, mask);
1657         return Jump(m_assembler.jne());
1658     }
1659 
jzPtr(RegisterID reg,RegisterID mask)1660     Jump jzPtr(RegisterID reg, RegisterID mask)
1661     {
1662 #if PLATFORM(X86_64)
1663         m_assembler.testq_rr(reg, mask);
1664         return Jump(m_assembler.je());
1665 #else
1666         return jz32(reg, mask);
1667 #endif
1668     }
1669 
1670     Jump jzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
1671     {
1672 #if PLATFORM(X86_64)
1673         testImm64(reg, mask);
1674         return Jump(m_assembler.je());
1675 #else
1676         return jz32(reg, mask);
1677 #endif
1678     }
1679 
jzPtr(RegisterID reg,ImmPtr mask)1680     Jump jzPtr(RegisterID reg, ImmPtr mask)
1681     {
1682 #if PLATFORM(X86_64)
1683         move(mask, scratchRegister);
1684         m_assembler.testq_rr(scratchRegister, reg);
1685         return Jump(m_assembler.je());
1686 #else
1687         return jz32(reg, Imm32(mask));
1688 #endif
1689     }
1690 
1691     Jump jzPtr(Address address, Imm32 mask = Imm32(-1))
1692     {
1693 #if PLATFORM(X86_64)
1694         testImm64(address, mask);
1695         return Jump(m_assembler.je());
1696 #else
1697         return jz32(address, mask);
1698 #endif
1699     }
1700 
1701     Jump jzPtr(BaseIndex address, Imm32 mask = Imm32(-1))
1702     {
1703 #if PLATFORM(X86_64)
1704         testImm64(address, mask);
1705         return Jump(m_assembler.je());
1706 #else
1707         return jz32(address, mask);
1708 #endif
1709     }
1710 
jz32(RegisterID reg,RegisterID mask)1711     Jump jz32(RegisterID reg, RegisterID mask)
1712     {
1713         m_assembler.testl_rr(reg, mask);
1714         return Jump(m_assembler.je());
1715     }
1716 
1717     Jump jz32(RegisterID reg, Imm32 mask = Imm32(-1))
1718     {
1719         testImm32(reg, mask);
1720         return Jump(m_assembler.je());
1721     }
1722 
1723     Jump jz32(Address address, Imm32 mask = Imm32(-1))
1724     {
1725         testImm32(address, mask);
1726         return Jump(m_assembler.je());
1727     }
1728 
1729     Jump jz32(BaseIndex address, Imm32 mask = Imm32(-1))
1730     {
1731         testImm32(address, mask);
1732         return Jump(m_assembler.je());
1733     }
1734 
jump()1735     Jump jump()
1736     {
1737         return Jump(m_assembler.jmp());
1738     }
1739 
1740 
1741     // Backwards, local control flow operations:
1742     //
1743     // These operations provide a shorter notation for local
1744     // backwards branches, which may be both more convenient
1745     // for the user, and for the programmer, and for the
1746     // assembler (allowing shorter values to be used in
1747     // relative offsets).
1748     //
1749     // The code sequence:
1750     //
1751     //     Label topOfLoop(this);
1752     //     // ...
1753     //     jne32(reg1, reg2, topOfLoop);
1754     //
1755     // Is equivalent to the longer, potentially less efficient form:
1756     //
1757     //     Label topOfLoop(this);
1758     //     // ...
1759     //     jne32(reg1, reg2).linkTo(topOfLoop);
1760 
jae32(RegisterID left,Address right,Label target)1761     void jae32(RegisterID left, Address right, Label target)
1762     {
1763         jae32(left, right).linkTo(target, this);
1764     }
1765 
je32(RegisterID op1,Imm32 imm,Label target)1766     void je32(RegisterID op1, Imm32 imm, Label target)
1767     {
1768         je32(op1, imm).linkTo(target, this);
1769     }
1770 
je16(RegisterID op1,BaseIndex op2,Label target)1771     void je16(RegisterID op1, BaseIndex op2, Label target)
1772     {
1773         je16(op1, op2).linkTo(target, this);
1774     }
1775 
jl32(RegisterID left,Imm32 right,Label target)1776     void jl32(RegisterID left, Imm32 right, Label target)
1777     {
1778         jl32(left, right).linkTo(target, this);
1779     }
1780 
jle32(RegisterID left,RegisterID right,Label target)1781     void jle32(RegisterID left, RegisterID right, Label target)
1782     {
1783         jle32(left, right).linkTo(target, this);
1784     }
1785 
jnePtr(RegisterID op1,ImmPtr imm,Label target)1786     void jnePtr(RegisterID op1, ImmPtr imm, Label target)
1787     {
1788         jnePtr(op1, imm).linkTo(target, this);
1789     }
1790 
jne32(RegisterID op1,RegisterID op2,Label target)1791     void jne32(RegisterID op1, RegisterID op2, Label target)
1792     {
1793         jne32(op1, op2).linkTo(target, this);
1794     }
1795 
jne32(RegisterID op1,Imm32 imm,Label target)1796     void jne32(RegisterID op1, Imm32 imm, Label target)
1797     {
1798         jne32(op1, imm).linkTo(target, this);
1799     }
1800 
jzPtr(RegisterID reg,Label target)1801     void jzPtr(RegisterID reg, Label target)
1802     {
1803         jzPtr(reg).linkTo(target, this);
1804     }
1805 
jump(Label target)1806     void jump(Label target)
1807     {
1808         m_assembler.link(m_assembler.jmp(), target.m_label);
1809     }
1810 
jump(RegisterID target)1811     void jump(RegisterID target)
1812     {
1813         m_assembler.jmp_r(target);
1814     }
1815 
1816     // Address is a memory location containing the address to jump to
jump(Address address)1817     void jump(Address address)
1818     {
1819         m_assembler.jmp_m(address.offset, address.base);
1820     }
1821 
1822 
1823     // Arithmetic control flow operations:
1824     //
1825     // This set of conditional branch operations branch based
1826     // on the result of an arithmetic operation.  The operation
1827     // is performed as normal, storing the result.
1828     //
1829     // * jz operations branch if the result is zero.
1830     // * jo operations branch if the (signed) arithmetic
1831     //   operation caused an overflow to occur.
1832 
jnzSubPtr(Imm32 imm,RegisterID dest)1833     Jump jnzSubPtr(Imm32 imm, RegisterID dest)
1834     {
1835         subPtr(imm, dest);
1836         return Jump(m_assembler.jne());
1837     }
1838 
jnzSub32(Imm32 imm,RegisterID dest)1839     Jump jnzSub32(Imm32 imm, RegisterID dest)
1840     {
1841         sub32(imm, dest);
1842         return Jump(m_assembler.jne());
1843     }
1844 
joAddPtr(RegisterID src,RegisterID dest)1845     Jump joAddPtr(RegisterID src, RegisterID dest)
1846     {
1847         addPtr(src, dest);
1848         return Jump(m_assembler.jo());
1849     }
1850 
joAdd32(RegisterID src,RegisterID dest)1851     Jump joAdd32(RegisterID src, RegisterID dest)
1852     {
1853         add32(src, dest);
1854         return Jump(m_assembler.jo());
1855     }
1856 
joAdd32(Imm32 imm,RegisterID dest)1857     Jump joAdd32(Imm32 imm, RegisterID dest)
1858     {
1859         add32(imm, dest);
1860         return Jump(m_assembler.jo());
1861     }
1862 
joMul32(RegisterID src,RegisterID dest)1863     Jump joMul32(RegisterID src, RegisterID dest)
1864     {
1865         mul32(src, dest);
1866         return Jump(m_assembler.jo());
1867     }
1868 
joMul32(Imm32 imm,RegisterID src,RegisterID dest)1869     Jump joMul32(Imm32 imm, RegisterID src, RegisterID dest)
1870     {
1871         mul32(imm, src, dest);
1872         return Jump(m_assembler.jo());
1873     }
1874 
joSub32(RegisterID src,RegisterID dest)1875     Jump joSub32(RegisterID src, RegisterID dest)
1876     {
1877         sub32(src, dest);
1878         return Jump(m_assembler.jo());
1879     }
1880 
joSub32(Imm32 imm,RegisterID dest)1881     Jump joSub32(Imm32 imm, RegisterID dest)
1882     {
1883         sub32(imm, dest);
1884         return Jump(m_assembler.jo());
1885     }
1886 
jzSubPtr(Imm32 imm,RegisterID dest)1887     Jump jzSubPtr(Imm32 imm, RegisterID dest)
1888     {
1889         subPtr(imm, dest);
1890         return Jump(m_assembler.je());
1891     }
1892 
jzSub32(Imm32 imm,RegisterID dest)1893     Jump jzSub32(Imm32 imm, RegisterID dest)
1894     {
1895         sub32(imm, dest);
1896         return Jump(m_assembler.je());
1897     }
1898 
1899 
1900     // Miscellaneous operations:
1901 
breakpoint()1902     void breakpoint()
1903     {
1904         m_assembler.int3();
1905     }
1906 
call()1907     Jump call()
1908     {
1909         return Jump(m_assembler.call());
1910     }
1911 
1912     // FIXME: why does this return a Jump object? - it can't be linked.
1913     // This may be to get a reference to the return address of the call.
1914     //
1915     // This should probably be handled by a separate label type to a regular
1916     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
1917     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
1918     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
1919     // form of label, can get the void* after the code has been linked, but can't
1920     // try to link it like a Jump object), and let the CallLabel be cast into a
1921     // CallReturnLabel.
call(RegisterID target)1922     Jump call(RegisterID target)
1923     {
1924         return Jump(m_assembler.call(target));
1925     }
1926 
label()1927     Label label()
1928     {
1929         return Label(this);
1930     }
1931 
align()1932     Label align()
1933     {
1934         m_assembler.align(16);
1935         return Label(this);
1936     }
1937 
differenceBetween(Label from,Jump to)1938     ptrdiff_t differenceBetween(Label from, Jump to)
1939     {
1940         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
1941     }
1942 
differenceBetween(Label from,Label to)1943     ptrdiff_t differenceBetween(Label from, Label to)
1944     {
1945         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1946     }
1947 
differenceBetween(Label from,DataLabelPtr to)1948     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
1949     {
1950         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1951     }
1952 
differenceBetween(Label from,DataLabel32 to)1953     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
1954     {
1955         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1956     }
1957 
differenceBetween(DataLabelPtr from,Jump to)1958     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
1959     {
1960         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
1961     }
1962 
ret()1963     void ret()
1964     {
1965         m_assembler.ret();
1966     }
1967 
sete32(RegisterID src,RegisterID srcDest)1968     void sete32(RegisterID src, RegisterID srcDest)
1969     {
1970         m_assembler.cmpl_rr(srcDest, src);
1971         m_assembler.sete_r(srcDest);
1972         m_assembler.movzbl_rr(srcDest, srcDest);
1973     }
1974 
sete32(Imm32 imm,RegisterID srcDest)1975     void sete32(Imm32 imm, RegisterID srcDest)
1976     {
1977         compareImm32ForBranchEquality(srcDest, imm.m_value);
1978         m_assembler.sete_r(srcDest);
1979         m_assembler.movzbl_rr(srcDest, srcDest);
1980     }
1981 
setne32(RegisterID src,RegisterID srcDest)1982     void setne32(RegisterID src, RegisterID srcDest)
1983     {
1984         m_assembler.cmpl_rr(srcDest, src);
1985         m_assembler.setne_r(srcDest);
1986         m_assembler.movzbl_rr(srcDest, srcDest);
1987     }
1988 
setne32(Imm32 imm,RegisterID srcDest)1989     void setne32(Imm32 imm, RegisterID srcDest)
1990     {
1991         compareImm32ForBranchEquality(srcDest, imm.m_value);
1992         m_assembler.setne_r(srcDest);
1993         m_assembler.movzbl_rr(srcDest, srcDest);
1994     }
1995 
1996     // FIXME:
1997     // The mask should be optional... paerhaps the argument order should be
1998     // dest-src, operations always have a dest? ... possibly not true, considering
1999     // asm ops like test, or pseudo ops like pop().
setnz32(Address address,Imm32 mask,RegisterID dest)2000     void setnz32(Address address, Imm32 mask, RegisterID dest)
2001     {
2002         testImm32(address, mask);
2003         m_assembler.setnz_r(dest);
2004         m_assembler.movzbl_rr(dest, dest);
2005     }
2006 
setz32(Address address,Imm32 mask,RegisterID dest)2007     void setz32(Address address, Imm32 mask, RegisterID dest)
2008     {
2009         testImm32(address, mask);
2010         m_assembler.setz_r(dest);
2011         m_assembler.movzbl_rr(dest, dest);
2012     }
2013 };
2014 
2015 } // namespace JSC
2016 
2017 #endif // ENABLE(ASSEMBLER)
2018 
2019 #endif // MacroAssembler_h
2020