• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28 
29 #if ENABLE(ASSEMBLER)
30 
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 
34 namespace JSC {
35 
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37     static const int DoubleConditionBitInvert = 0x10;
38     static const int DoubleConditionBitSpecial = 0x20;
39     static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
40 
41 public:
42     typedef X86Assembler::FPRegisterID FPRegisterID;
43 
44     enum Condition {
45         Equal = X86Assembler::ConditionE,
46         NotEqual = X86Assembler::ConditionNE,
47         Above = X86Assembler::ConditionA,
48         AboveOrEqual = X86Assembler::ConditionAE,
49         Below = X86Assembler::ConditionB,
50         BelowOrEqual = X86Assembler::ConditionBE,
51         GreaterThan = X86Assembler::ConditionG,
52         GreaterThanOrEqual = X86Assembler::ConditionGE,
53         LessThan = X86Assembler::ConditionL,
54         LessThanOrEqual = X86Assembler::ConditionLE,
55         Overflow = X86Assembler::ConditionO,
56         Signed = X86Assembler::ConditionS,
57         Zero = X86Assembler::ConditionE,
58         NonZero = X86Assembler::ConditionNE
59     };
60 
61     enum DoubleCondition {
62         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
63         DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
64         DoubleNotEqual = X86Assembler::ConditionNE,
65         DoubleGreaterThan = X86Assembler::ConditionA,
66         DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
67         DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
68         DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
69         // If either operand is NaN, these conditions always evaluate to true.
70         DoubleEqualOrUnordered = X86Assembler::ConditionE,
71         DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
72         DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
73         DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
74         DoubleLessThanOrUnordered = X86Assembler::ConditionB,
75         DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
76     };
77     COMPILE_ASSERT(
78         !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
79         DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
80 
81     static const RegisterID stackPointerRegister = X86Registers::esp;
82 
83     // Integer arithmetic operations:
84     //
85     // Operations are typically two operand - operation(source, srcDst)
86     // For many operations the source may be an TrustedImm32, the srcDst operand
87     // may often be a memory location (explictly described using an Address
88     // object).
89 
add32(RegisterID src,RegisterID dest)90     void add32(RegisterID src, RegisterID dest)
91     {
92         m_assembler.addl_rr(src, dest);
93     }
94 
add32(TrustedImm32 imm,Address address)95     void add32(TrustedImm32 imm, Address address)
96     {
97         m_assembler.addl_im(imm.m_value, address.offset, address.base);
98     }
99 
add32(TrustedImm32 imm,RegisterID dest)100     void add32(TrustedImm32 imm, RegisterID dest)
101     {
102         m_assembler.addl_ir(imm.m_value, dest);
103     }
104 
add32(Address src,RegisterID dest)105     void add32(Address src, RegisterID dest)
106     {
107         m_assembler.addl_mr(src.offset, src.base, dest);
108     }
109 
add32(RegisterID src,Address dest)110     void add32(RegisterID src, Address dest)
111     {
112         m_assembler.addl_rm(src, dest.offset, dest.base);
113     }
114 
and32(RegisterID src,RegisterID dest)115     void and32(RegisterID src, RegisterID dest)
116     {
117         m_assembler.andl_rr(src, dest);
118     }
119 
and32(TrustedImm32 imm,RegisterID dest)120     void and32(TrustedImm32 imm, RegisterID dest)
121     {
122         m_assembler.andl_ir(imm.m_value, dest);
123     }
124 
and32(RegisterID src,Address dest)125     void and32(RegisterID src, Address dest)
126     {
127         m_assembler.andl_rm(src, dest.offset, dest.base);
128     }
129 
and32(Address src,RegisterID dest)130     void and32(Address src, RegisterID dest)
131     {
132         m_assembler.andl_mr(src.offset, src.base, dest);
133     }
134 
and32(TrustedImm32 imm,Address address)135     void and32(TrustedImm32 imm, Address address)
136     {
137         m_assembler.andl_im(imm.m_value, address.offset, address.base);
138     }
139 
and32(RegisterID op1,RegisterID op2,RegisterID dest)140     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
141     {
142         if (op1 == op2)
143             zeroExtend32ToPtr(op1, dest);
144         else if (op1 == dest)
145             and32(op2, dest);
146         else {
147             move(op2, dest);
148             and32(op1, dest);
149         }
150     }
151 
and32(TrustedImm32 imm,RegisterID src,RegisterID dest)152     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
153     {
154         move(src, dest);
155         and32(imm, dest);
156     }
157 
lshift32(RegisterID shift_amount,RegisterID dest)158     void lshift32(RegisterID shift_amount, RegisterID dest)
159     {
160         ASSERT(shift_amount != dest);
161 
162         if (shift_amount == X86Registers::ecx)
163             m_assembler.shll_CLr(dest);
164         else {
165             // On x86 we can only shift by ecx; if asked to shift by another register we'll
166             // need rejig the shift amount into ecx first, and restore the registers afterwards.
167             // If we dest is ecx, then shift the swapped register!
168             swap(shift_amount, X86Registers::ecx);
169             m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
170             swap(shift_amount, X86Registers::ecx);
171         }
172     }
173 
lshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)174     void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
175     {
176         ASSERT(shift_amount != dest);
177 
178         if (src != dest)
179             move(src, dest);
180         lshift32(shift_amount, dest);
181     }
182 
lshift32(TrustedImm32 imm,RegisterID dest)183     void lshift32(TrustedImm32 imm, RegisterID dest)
184     {
185         m_assembler.shll_i8r(imm.m_value, dest);
186     }
187 
lshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)188     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
189     {
190         if (src != dest)
191             move(src, dest);
192         lshift32(imm, dest);
193     }
194 
mul32(RegisterID src,RegisterID dest)195     void mul32(RegisterID src, RegisterID dest)
196     {
197         m_assembler.imull_rr(src, dest);
198     }
199 
mul32(Address src,RegisterID dest)200     void mul32(Address src, RegisterID dest)
201     {
202         m_assembler.imull_mr(src.offset, src.base, dest);
203     }
204 
mul32(TrustedImm32 imm,RegisterID src,RegisterID dest)205     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
206     {
207         m_assembler.imull_i32r(src, imm.m_value, dest);
208     }
209 
neg32(RegisterID srcDest)210     void neg32(RegisterID srcDest)
211     {
212         m_assembler.negl_r(srcDest);
213     }
214 
neg32(Address srcDest)215     void neg32(Address srcDest)
216     {
217         m_assembler.negl_m(srcDest.offset, srcDest.base);
218     }
219 
not32(RegisterID srcDest)220     void not32(RegisterID srcDest)
221     {
222         m_assembler.notl_r(srcDest);
223     }
224 
not32(Address srcDest)225     void not32(Address srcDest)
226     {
227         m_assembler.notl_m(srcDest.offset, srcDest.base);
228     }
229 
or32(RegisterID src,RegisterID dest)230     void or32(RegisterID src, RegisterID dest)
231     {
232         m_assembler.orl_rr(src, dest);
233     }
234 
or32(TrustedImm32 imm,RegisterID dest)235     void or32(TrustedImm32 imm, RegisterID dest)
236     {
237         m_assembler.orl_ir(imm.m_value, dest);
238     }
239 
or32(RegisterID src,Address dest)240     void or32(RegisterID src, Address dest)
241     {
242         m_assembler.orl_rm(src, dest.offset, dest.base);
243     }
244 
or32(Address src,RegisterID dest)245     void or32(Address src, RegisterID dest)
246     {
247         m_assembler.orl_mr(src.offset, src.base, dest);
248     }
249 
or32(TrustedImm32 imm,Address address)250     void or32(TrustedImm32 imm, Address address)
251     {
252         m_assembler.orl_im(imm.m_value, address.offset, address.base);
253     }
254 
or32(RegisterID op1,RegisterID op2,RegisterID dest)255     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
256     {
257         if (op1 == op2)
258             zeroExtend32ToPtr(op1, dest);
259         else if (op1 == dest)
260             or32(op2, dest);
261         else {
262             move(op2, dest);
263             or32(op1, dest);
264         }
265     }
266 
or32(TrustedImm32 imm,RegisterID src,RegisterID dest)267     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
268     {
269         move(src, dest);
270         or32(imm, dest);
271     }
272 
rshift32(RegisterID shift_amount,RegisterID dest)273     void rshift32(RegisterID shift_amount, RegisterID dest)
274     {
275         ASSERT(shift_amount != dest);
276 
277         if (shift_amount == X86Registers::ecx)
278             m_assembler.sarl_CLr(dest);
279         else {
280             // On x86 we can only shift by ecx; if asked to shift by another register we'll
281             // need rejig the shift amount into ecx first, and restore the registers afterwards.
282             // If we dest is ecx, then shift the swapped register!
283             swap(shift_amount, X86Registers::ecx);
284             m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
285             swap(shift_amount, X86Registers::ecx);
286         }
287     }
288 
rshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)289     void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
290     {
291         ASSERT(shift_amount != dest);
292 
293         if (src != dest)
294             move(src, dest);
295         rshift32(shift_amount, dest);
296     }
297 
rshift32(TrustedImm32 imm,RegisterID dest)298     void rshift32(TrustedImm32 imm, RegisterID dest)
299     {
300         m_assembler.sarl_i8r(imm.m_value, dest);
301     }
302 
rshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)303     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
304     {
305         if (src != dest)
306             move(src, dest);
307         rshift32(imm, dest);
308     }
309 
urshift32(RegisterID shift_amount,RegisterID dest)310     void urshift32(RegisterID shift_amount, RegisterID dest)
311     {
312         ASSERT(shift_amount != dest);
313 
314         if (shift_amount == X86Registers::ecx)
315             m_assembler.shrl_CLr(dest);
316         else {
317             // On x86 we can only shift by ecx; if asked to shift by another register we'll
318             // need rejig the shift amount into ecx first, and restore the registers afterwards.
319             // If we dest is ecx, then shift the swapped register!
320             swap(shift_amount, X86Registers::ecx);
321             m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
322             swap(shift_amount, X86Registers::ecx);
323         }
324     }
325 
urshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)326     void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
327     {
328         ASSERT(shift_amount != dest);
329 
330         if (src != dest)
331             move(src, dest);
332         urshift32(shift_amount, dest);
333     }
334 
urshift32(TrustedImm32 imm,RegisterID dest)335     void urshift32(TrustedImm32 imm, RegisterID dest)
336     {
337         m_assembler.shrl_i8r(imm.m_value, dest);
338     }
339 
urshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)340     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
341     {
342         if (src != dest)
343             move(src, dest);
344         urshift32(imm, dest);
345     }
346 
sub32(RegisterID src,RegisterID dest)347     void sub32(RegisterID src, RegisterID dest)
348     {
349         m_assembler.subl_rr(src, dest);
350     }
351 
sub32(TrustedImm32 imm,RegisterID dest)352     void sub32(TrustedImm32 imm, RegisterID dest)
353     {
354         m_assembler.subl_ir(imm.m_value, dest);
355     }
356 
sub32(TrustedImm32 imm,Address address)357     void sub32(TrustedImm32 imm, Address address)
358     {
359         m_assembler.subl_im(imm.m_value, address.offset, address.base);
360     }
361 
sub32(Address src,RegisterID dest)362     void sub32(Address src, RegisterID dest)
363     {
364         m_assembler.subl_mr(src.offset, src.base, dest);
365     }
366 
sub32(RegisterID src,Address dest)367     void sub32(RegisterID src, Address dest)
368     {
369         m_assembler.subl_rm(src, dest.offset, dest.base);
370     }
371 
372 
xor32(RegisterID src,RegisterID dest)373     void xor32(RegisterID src, RegisterID dest)
374     {
375         m_assembler.xorl_rr(src, dest);
376     }
377 
xor32(TrustedImm32 imm,Address dest)378     void xor32(TrustedImm32 imm, Address dest)
379     {
380         m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
381     }
382 
xor32(TrustedImm32 imm,RegisterID dest)383     void xor32(TrustedImm32 imm, RegisterID dest)
384     {
385         m_assembler.xorl_ir(imm.m_value, dest);
386     }
387 
xor32(RegisterID src,Address dest)388     void xor32(RegisterID src, Address dest)
389     {
390         m_assembler.xorl_rm(src, dest.offset, dest.base);
391     }
392 
xor32(Address src,RegisterID dest)393     void xor32(Address src, RegisterID dest)
394     {
395         m_assembler.xorl_mr(src.offset, src.base, dest);
396     }
397 
xor32(RegisterID op1,RegisterID op2,RegisterID dest)398     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
399     {
400         if (op1 == op2)
401             move(TrustedImm32(0), dest);
402         else if (op1 == dest)
403             xor32(op2, dest);
404         else {
405             move(op2, dest);
406             xor32(op1, dest);
407         }
408     }
409 
xor32(TrustedImm32 imm,RegisterID src,RegisterID dest)410     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
411     {
412         move(src, dest);
413         xor32(imm, dest);
414     }
415 
sqrtDouble(FPRegisterID src,FPRegisterID dst)416     void sqrtDouble(FPRegisterID src, FPRegisterID dst)
417     {
418         m_assembler.sqrtsd_rr(src, dst);
419     }
420 
421     // Memory access operations:
422     //
423     // Loads are of the form load(address, destination) and stores of the form
424     // store(source, address).  The source for a store may be an TrustedImm32.  Address
425     // operand objects to loads and store will be implicitly constructed if a
426     // register is passed.
427 
load32(ImplicitAddress address,RegisterID dest)428     void load32(ImplicitAddress address, RegisterID dest)
429     {
430         m_assembler.movl_mr(address.offset, address.base, dest);
431     }
432 
load32(BaseIndex address,RegisterID dest)433     void load32(BaseIndex address, RegisterID dest)
434     {
435         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
436     }
437 
load32WithUnalignedHalfWords(BaseIndex address,RegisterID dest)438     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
439     {
440         load32(address, dest);
441     }
442 
load32WithAddressOffsetPatch(Address address,RegisterID dest)443     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
444     {
445         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
446         return DataLabel32(this);
447     }
448 
load16(BaseIndex address,RegisterID dest)449     void load16(BaseIndex address, RegisterID dest)
450     {
451         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
452     }
453 
load16(Address address,RegisterID dest)454     void load16(Address address, RegisterID dest)
455     {
456         m_assembler.movzwl_mr(address.offset, address.base, dest);
457     }
458 
store32WithAddressOffsetPatch(RegisterID src,Address address)459     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
460     {
461         m_assembler.movl_rm_disp32(src, address.offset, address.base);
462         return DataLabel32(this);
463     }
464 
store32(RegisterID src,ImplicitAddress address)465     void store32(RegisterID src, ImplicitAddress address)
466     {
467         m_assembler.movl_rm(src, address.offset, address.base);
468     }
469 
store32(RegisterID src,BaseIndex address)470     void store32(RegisterID src, BaseIndex address)
471     {
472         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
473     }
474 
store32(TrustedImm32 imm,ImplicitAddress address)475     void store32(TrustedImm32 imm, ImplicitAddress address)
476     {
477         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
478     }
479 
480 
481     // Floating-point operation:
482     //
483     // Presently only supports SSE, not x87 floating point.
484 
moveDouble(FPRegisterID src,FPRegisterID dest)485     void moveDouble(FPRegisterID src, FPRegisterID dest)
486     {
487         ASSERT(isSSE2Present());
488         if (src != dest)
489             m_assembler.movsd_rr(src, dest);
490     }
491 
loadDouble(ImplicitAddress address,FPRegisterID dest)492     void loadDouble(ImplicitAddress address, FPRegisterID dest)
493     {
494         ASSERT(isSSE2Present());
495         m_assembler.movsd_mr(address.offset, address.base, dest);
496     }
497 
storeDouble(FPRegisterID src,ImplicitAddress address)498     void storeDouble(FPRegisterID src, ImplicitAddress address)
499     {
500         ASSERT(isSSE2Present());
501         m_assembler.movsd_rm(src, address.offset, address.base);
502     }
503 
addDouble(FPRegisterID src,FPRegisterID dest)504     void addDouble(FPRegisterID src, FPRegisterID dest)
505     {
506         ASSERT(isSSE2Present());
507         m_assembler.addsd_rr(src, dest);
508     }
509 
addDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)510     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
511     {
512         ASSERT(isSSE2Present());
513         if (op1 == dest)
514             addDouble(op2, dest);
515         else {
516             moveDouble(op2, dest);
517             addDouble(op1, dest);
518         }
519     }
520 
addDouble(Address src,FPRegisterID dest)521     void addDouble(Address src, FPRegisterID dest)
522     {
523         ASSERT(isSSE2Present());
524         m_assembler.addsd_mr(src.offset, src.base, dest);
525     }
526 
divDouble(FPRegisterID src,FPRegisterID dest)527     void divDouble(FPRegisterID src, FPRegisterID dest)
528     {
529         ASSERT(isSSE2Present());
530         m_assembler.divsd_rr(src, dest);
531     }
532 
divDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)533     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
534     {
535         // B := A / B is invalid.
536         ASSERT(op1 == dest || op2 != dest);
537 
538         moveDouble(op1, dest);
539         divDouble(op2, dest);
540     }
541 
divDouble(Address src,FPRegisterID dest)542     void divDouble(Address src, FPRegisterID dest)
543     {
544         ASSERT(isSSE2Present());
545         m_assembler.divsd_mr(src.offset, src.base, dest);
546     }
547 
subDouble(FPRegisterID src,FPRegisterID dest)548     void subDouble(FPRegisterID src, FPRegisterID dest)
549     {
550         ASSERT(isSSE2Present());
551         m_assembler.subsd_rr(src, dest);
552     }
553 
subDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)554     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
555     {
556         // B := A - B is invalid.
557         ASSERT(op1 == dest || op2 != dest);
558 
559         moveDouble(op1, dest);
560         subDouble(op2, dest);
561     }
562 
subDouble(Address src,FPRegisterID dest)563     void subDouble(Address src, FPRegisterID dest)
564     {
565         ASSERT(isSSE2Present());
566         m_assembler.subsd_mr(src.offset, src.base, dest);
567     }
568 
mulDouble(FPRegisterID src,FPRegisterID dest)569     void mulDouble(FPRegisterID src, FPRegisterID dest)
570     {
571         ASSERT(isSSE2Present());
572         m_assembler.mulsd_rr(src, dest);
573     }
574 
mulDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)575     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
576     {
577         ASSERT(isSSE2Present());
578         if (op1 == dest)
579             mulDouble(op2, dest);
580         else {
581             moveDouble(op2, dest);
582             mulDouble(op1, dest);
583         }
584     }
585 
mulDouble(Address src,FPRegisterID dest)586     void mulDouble(Address src, FPRegisterID dest)
587     {
588         ASSERT(isSSE2Present());
589         m_assembler.mulsd_mr(src.offset, src.base, dest);
590     }
591 
convertInt32ToDouble(RegisterID src,FPRegisterID dest)592     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
593     {
594         ASSERT(isSSE2Present());
595         m_assembler.cvtsi2sd_rr(src, dest);
596     }
597 
convertInt32ToDouble(Address src,FPRegisterID dest)598     void convertInt32ToDouble(Address src, FPRegisterID dest)
599     {
600         ASSERT(isSSE2Present());
601         m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
602     }
603 
branchDouble(DoubleCondition cond,FPRegisterID left,FPRegisterID right)604     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
605     {
606         ASSERT(isSSE2Present());
607 
608         if (cond & DoubleConditionBitInvert)
609             m_assembler.ucomisd_rr(left, right);
610         else
611             m_assembler.ucomisd_rr(right, left);
612 
613         if (cond == DoubleEqual) {
614             Jump isUnordered(m_assembler.jp());
615             Jump result = Jump(m_assembler.je());
616             isUnordered.link(this);
617             return result;
618         } else if (cond == DoubleNotEqualOrUnordered) {
619             Jump isUnordered(m_assembler.jp());
620             Jump isEqual(m_assembler.je());
621             isUnordered.link(this);
622             Jump result = jump();
623             isEqual.link(this);
624             return result;
625         }
626 
627         ASSERT(!(cond & DoubleConditionBitSpecial));
628         return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
629     }
630 
631     // Truncates 'src' to an integer, and places the resulting 'dest'.
632     // If the result is not representable as a 32 bit value, branch.
633     // May also branch for some values that are representable in 32 bits
634     // (specifically, in this case, INT_MIN).
635     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
636     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
637     {
638         ASSERT(isSSE2Present());
639         m_assembler.cvttsd2si_rr(src, dest);
640         return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
641     }
642 
643     // Convert 'src' to an integer, and places the resulting 'dest'.
644     // If the result is not representable as a 32 bit value, branch.
645     // May also branch for some values that are representable in 32 bits
646     // (specifically, in this case, 0).
branchConvertDoubleToInt32(FPRegisterID src,RegisterID dest,JumpList & failureCases,FPRegisterID fpTemp)647     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
648     {
649         ASSERT(isSSE2Present());
650         m_assembler.cvttsd2si_rr(src, dest);
651 
652         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
653         failureCases.append(branchTest32(Zero, dest));
654 
655         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
656         convertInt32ToDouble(dest, fpTemp);
657         m_assembler.ucomisd_rr(fpTemp, src);
658         failureCases.append(m_assembler.jp());
659         failureCases.append(m_assembler.jne());
660     }
661 
branchDoubleNonZero(FPRegisterID reg,FPRegisterID scratch)662     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
663     {
664         ASSERT(isSSE2Present());
665         m_assembler.xorpd_rr(scratch, scratch);
666         return branchDouble(DoubleNotEqual, reg, scratch);
667     }
668 
branchDoubleZeroOrNaN(FPRegisterID reg,FPRegisterID scratch)669     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
670     {
671         ASSERT(isSSE2Present());
672         m_assembler.xorpd_rr(scratch, scratch);
673         return branchDouble(DoubleEqualOrUnordered, reg, scratch);
674     }
675 
676     // Stack manipulation operations:
677     //
678     // The ABI is assumed to provide a stack abstraction to memory,
679     // containing machine word sized units of data.  Push and pop
680     // operations add and remove a single register sized unit of data
681     // to or from the stack.  Peek and poke operations read or write
682     // values on the stack, without moving the current stack position.
683 
pop(RegisterID dest)684     void pop(RegisterID dest)
685     {
686         m_assembler.pop_r(dest);
687     }
688 
push(RegisterID src)689     void push(RegisterID src)
690     {
691         m_assembler.push_r(src);
692     }
693 
push(Address address)694     void push(Address address)
695     {
696         m_assembler.push_m(address.offset, address.base);
697     }
698 
push(TrustedImm32 imm)699     void push(TrustedImm32 imm)
700     {
701         m_assembler.push_i32(imm.m_value);
702     }
703 
704 
705     // Register move operations:
706     //
707     // Move values in registers.
708 
move(TrustedImm32 imm,RegisterID dest)709     void move(TrustedImm32 imm, RegisterID dest)
710     {
711         // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
712         // may be useful to have a separate version that sign extends the value?
713         if (!imm.m_value)
714             m_assembler.xorl_rr(dest, dest);
715         else
716             m_assembler.movl_i32r(imm.m_value, dest);
717     }
718 
719 #if CPU(X86_64)
move(RegisterID src,RegisterID dest)720     void move(RegisterID src, RegisterID dest)
721     {
722         // Note: on 64-bit this is is a full register move; perhaps it would be
723         // useful to have separate move32 & movePtr, with move32 zero extending?
724         if (src != dest)
725             m_assembler.movq_rr(src, dest);
726     }
727 
move(TrustedImmPtr imm,RegisterID dest)728     void move(TrustedImmPtr imm, RegisterID dest)
729     {
730         m_assembler.movq_i64r(imm.asIntptr(), dest);
731     }
732 
swap(RegisterID reg1,RegisterID reg2)733     void swap(RegisterID reg1, RegisterID reg2)
734     {
735         if (reg1 != reg2)
736             m_assembler.xchgq_rr(reg1, reg2);
737     }
738 
signExtend32ToPtr(RegisterID src,RegisterID dest)739     void signExtend32ToPtr(RegisterID src, RegisterID dest)
740     {
741         m_assembler.movsxd_rr(src, dest);
742     }
743 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)744     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
745     {
746         m_assembler.movl_rr(src, dest);
747     }
748 #else
move(RegisterID src,RegisterID dest)749     void move(RegisterID src, RegisterID dest)
750     {
751         if (src != dest)
752             m_assembler.movl_rr(src, dest);
753     }
754 
move(TrustedImmPtr imm,RegisterID dest)755     void move(TrustedImmPtr imm, RegisterID dest)
756     {
757         m_assembler.movl_i32r(imm.asIntptr(), dest);
758     }
759 
swap(RegisterID reg1,RegisterID reg2)760     void swap(RegisterID reg1, RegisterID reg2)
761     {
762         if (reg1 != reg2)
763             m_assembler.xchgl_rr(reg1, reg2);
764     }
765 
signExtend32ToPtr(RegisterID src,RegisterID dest)766     void signExtend32ToPtr(RegisterID src, RegisterID dest)
767     {
768         move(src, dest);
769     }
770 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)771     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
772     {
773         move(src, dest);
774     }
775 #endif
776 
777 
778     // Forwards / external control flow operations:
779     //
780     // This set of jump and conditional branch operations return a Jump
781     // object which may linked at a later point, allow forwards jump,
782     // or jumps that will require external linkage (after the code has been
783     // relocated).
784     //
785     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
786     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
787     // used (representing the names 'below' and 'above').
788     //
789     // Operands to the comparision are provided in the expected order, e.g.
790     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
791     // treated as a signed 32bit value, is less than or equal to 5.
792     //
793     // jz and jnz test whether the first operand is equal to zero, and take
794     // an optional second operand of a mask under which to perform the test.
795 
796 public:
branch8(Condition cond,Address left,TrustedImm32 right)797     Jump branch8(Condition cond, Address left, TrustedImm32 right)
798     {
799         m_assembler.cmpb_im(right.m_value, left.offset, left.base);
800         return Jump(m_assembler.jCC(x86Condition(cond)));
801     }
802 
branch32(Condition cond,RegisterID left,RegisterID right)803     Jump branch32(Condition cond, RegisterID left, RegisterID right)
804     {
805         m_assembler.cmpl_rr(right, left);
806         return Jump(m_assembler.jCC(x86Condition(cond)));
807     }
808 
branch32(Condition cond,RegisterID left,TrustedImm32 right)809     Jump branch32(Condition cond, RegisterID left, TrustedImm32 right)
810     {
811         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
812             m_assembler.testl_rr(left, left);
813         else
814             m_assembler.cmpl_ir(right.m_value, left);
815         return Jump(m_assembler.jCC(x86Condition(cond)));
816     }
817 
branch32(Condition cond,RegisterID left,Address right)818     Jump branch32(Condition cond, RegisterID left, Address right)
819     {
820         m_assembler.cmpl_mr(right.offset, right.base, left);
821         return Jump(m_assembler.jCC(x86Condition(cond)));
822     }
823 
branch32(Condition cond,Address left,RegisterID right)824     Jump branch32(Condition cond, Address left, RegisterID right)
825     {
826         m_assembler.cmpl_rm(right, left.offset, left.base);
827         return Jump(m_assembler.jCC(x86Condition(cond)));
828     }
829 
branch32(Condition cond,Address left,TrustedImm32 right)830     Jump branch32(Condition cond, Address left, TrustedImm32 right)
831     {
832         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
833         return Jump(m_assembler.jCC(x86Condition(cond)));
834     }
835 
branch32(Condition cond,BaseIndex left,TrustedImm32 right)836     Jump branch32(Condition cond, BaseIndex left, TrustedImm32 right)
837     {
838         m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
839         return Jump(m_assembler.jCC(x86Condition(cond)));
840     }
841 
branch32WithUnalignedHalfWords(Condition cond,BaseIndex left,TrustedImm32 right)842     Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, TrustedImm32 right)
843     {
844         return branch32(cond, left, right);
845     }
846 
branch16(Condition cond,BaseIndex left,RegisterID right)847     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
848     {
849         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
850         return Jump(m_assembler.jCC(x86Condition(cond)));
851     }
852 
branch16(Condition cond,BaseIndex left,TrustedImm32 right)853     Jump branch16(Condition cond, BaseIndex left, TrustedImm32 right)
854     {
855         ASSERT(!(right.m_value & 0xFFFF0000));
856 
857         m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
858         return Jump(m_assembler.jCC(x86Condition(cond)));
859     }
860 
branchTest32(Condition cond,RegisterID reg,RegisterID mask)861     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
862     {
863         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
864         m_assembler.testl_rr(reg, mask);
865         return Jump(m_assembler.jCC(x86Condition(cond)));
866     }
867 
868     Jump branchTest32(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
869     {
870         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
871         // if we are only interested in the low seven bits, this can be tested with a testb
872         if (mask.m_value == -1)
873             m_assembler.testl_rr(reg, reg);
874         else if ((mask.m_value & ~0x7f) == 0)
875             m_assembler.testb_i8r(mask.m_value, reg);
876         else
877             m_assembler.testl_i32r(mask.m_value, reg);
878         return Jump(m_assembler.jCC(x86Condition(cond)));
879     }
880 
881     Jump branchTest32(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
882     {
883         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
884         if (mask.m_value == -1)
885             m_assembler.cmpl_im(0, address.offset, address.base);
886         else
887             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
888         return Jump(m_assembler.jCC(x86Condition(cond)));
889     }
890 
891     Jump branchTest32(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
892     {
893         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
894         if (mask.m_value == -1)
895             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
896         else
897             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
898         return Jump(m_assembler.jCC(x86Condition(cond)));
899     }
900 
901     Jump branchTest8(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
902     {
903         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
904         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
905         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
906         if (mask.m_value == -1)
907             m_assembler.testb_rr(reg, reg);
908         else
909             m_assembler.testb_i8r(mask.m_value, reg);
910         return Jump(m_assembler.jCC(x86Condition(cond)));
911     }
912 
913     Jump branchTest8(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
914     {
915         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
916         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
917         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
918         if (mask.m_value == -1)
919             m_assembler.cmpb_im(0, address.offset, address.base);
920         else
921             m_assembler.testb_im(mask.m_value, address.offset, address.base);
922         return Jump(m_assembler.jCC(x86Condition(cond)));
923     }
924 
925     Jump branchTest8(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
926     {
927         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
928         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
929         ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
930         if (mask.m_value == -1)
931             m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
932         else
933             m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
934         return Jump(m_assembler.jCC(x86Condition(cond)));
935     }
936 
jump()937     Jump jump()
938     {
939         return Jump(m_assembler.jmp());
940     }
941 
jump(RegisterID target)942     void jump(RegisterID target)
943     {
944         m_assembler.jmp_r(target);
945     }
946 
947     // Address is a memory location containing the address to jump to
jump(Address address)948     void jump(Address address)
949     {
950         m_assembler.jmp_m(address.offset, address.base);
951     }
952 
953 
954     // Arithmetic control flow operations:
955     //
956     // This set of conditional branch operations branch based
957     // on the result of an arithmetic operation.  The operation
958     // is performed as normal, storing the result.
959     //
960     // * jz operations branch if the result is zero.
961     // * jo operations branch if the (signed) arithmetic
962     //   operation caused an overflow to occur.
963 
branchAdd32(Condition cond,RegisterID src,RegisterID dest)964     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
965     {
966         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
967         add32(src, dest);
968         return Jump(m_assembler.jCC(x86Condition(cond)));
969     }
970 
branchAdd32(Condition cond,TrustedImm32 imm,RegisterID dest)971     Jump branchAdd32(Condition cond, TrustedImm32 imm, RegisterID dest)
972     {
973         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
974         add32(imm, dest);
975         return Jump(m_assembler.jCC(x86Condition(cond)));
976     }
977 
branchAdd32(Condition cond,TrustedImm32 src,Address dest)978     Jump branchAdd32(Condition cond, TrustedImm32 src, Address dest)
979     {
980         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
981         add32(src, dest);
982         return Jump(m_assembler.jCC(x86Condition(cond)));
983     }
984 
branchAdd32(Condition cond,RegisterID src,Address dest)985     Jump branchAdd32(Condition cond, RegisterID src, Address dest)
986     {
987         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
988         add32(src, dest);
989         return Jump(m_assembler.jCC(x86Condition(cond)));
990     }
991 
branchAdd32(Condition cond,Address src,RegisterID dest)992     Jump branchAdd32(Condition cond, Address src, RegisterID dest)
993     {
994         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
995         add32(src, dest);
996         return Jump(m_assembler.jCC(x86Condition(cond)));
997     }
998 
branchAdd32(Condition cond,RegisterID src1,RegisterID src2,RegisterID dest)999     Jump branchAdd32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1000     {
1001         if (src1 == dest)
1002             return branchAdd32(cond, src2, dest);
1003         move(src2, dest);
1004         return branchAdd32(cond, src1, dest);
1005     }
1006 
branchAdd32(Condition cond,RegisterID src,TrustedImm32 imm,RegisterID dest)1007     Jump branchAdd32(Condition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1008     {
1009         move(src, dest);
1010         return branchAdd32(cond, imm, dest);
1011     }
1012 
branchMul32(Condition cond,RegisterID src,RegisterID dest)1013     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
1014     {
1015         ASSERT(cond == Overflow);
1016         mul32(src, dest);
1017         return Jump(m_assembler.jCC(x86Condition(cond)));
1018     }
1019 
branchMul32(Condition cond,Address src,RegisterID dest)1020     Jump branchMul32(Condition cond, Address src, RegisterID dest)
1021     {
1022         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1023         mul32(src, dest);
1024         return Jump(m_assembler.jCC(x86Condition(cond)));
1025     }
1026 
branchMul32(Condition cond,TrustedImm32 imm,RegisterID src,RegisterID dest)1027     Jump branchMul32(Condition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1028     {
1029         ASSERT(cond == Overflow);
1030         mul32(imm, src, dest);
1031         return Jump(m_assembler.jCC(x86Condition(cond)));
1032     }
1033 
branchMul32(Condition cond,RegisterID src1,RegisterID src2,RegisterID dest)1034     Jump branchMul32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1035     {
1036         if (src1 == dest)
1037             return branchMul32(cond, src2, dest);
1038         move(src2, dest);
1039         return branchMul32(cond, src1, dest);
1040     }
1041 
branchSub32(Condition cond,RegisterID src,RegisterID dest)1042     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
1043     {
1044         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1045         sub32(src, dest);
1046         return Jump(m_assembler.jCC(x86Condition(cond)));
1047     }
1048 
branchSub32(Condition cond,TrustedImm32 imm,RegisterID dest)1049     Jump branchSub32(Condition cond, TrustedImm32 imm, RegisterID dest)
1050     {
1051         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1052         sub32(imm, dest);
1053         return Jump(m_assembler.jCC(x86Condition(cond)));
1054     }
1055 
branchSub32(Condition cond,TrustedImm32 imm,Address dest)1056     Jump branchSub32(Condition cond, TrustedImm32 imm, Address dest)
1057     {
1058         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1059         sub32(imm, dest);
1060         return Jump(m_assembler.jCC(x86Condition(cond)));
1061     }
1062 
branchSub32(Condition cond,RegisterID src,Address dest)1063     Jump branchSub32(Condition cond, RegisterID src, Address dest)
1064     {
1065         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1066         sub32(src, dest);
1067         return Jump(m_assembler.jCC(x86Condition(cond)));
1068     }
1069 
branchSub32(Condition cond,Address src,RegisterID dest)1070     Jump branchSub32(Condition cond, Address src, RegisterID dest)
1071     {
1072         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1073         sub32(src, dest);
1074         return Jump(m_assembler.jCC(x86Condition(cond)));
1075     }
1076 
branchSub32(Condition cond,RegisterID src1,RegisterID src2,RegisterID dest)1077     Jump branchSub32(Condition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1078     {
1079         // B := A - B is invalid.
1080         ASSERT(src1 == dest || src2 != dest);
1081 
1082         move(src1, dest);
1083         return branchSub32(cond, src2, dest);
1084     }
1085 
branchSub32(Condition cond,RegisterID src1,TrustedImm32 src2,RegisterID dest)1086     Jump branchSub32(Condition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1087     {
1088         move(src1, dest);
1089         return branchSub32(cond, src2, dest);
1090     }
1091 
branchNeg32(Condition cond,RegisterID srcDest)1092     Jump branchNeg32(Condition cond, RegisterID srcDest)
1093     {
1094         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1095         neg32(srcDest);
1096         return Jump(m_assembler.jCC(x86Condition(cond)));
1097     }
1098 
branchOr32(Condition cond,RegisterID src,RegisterID dest)1099     Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
1100     {
1101         ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
1102         or32(src, dest);
1103         return Jump(m_assembler.jCC(x86Condition(cond)));
1104     }
1105 
1106 
1107     // Miscellaneous operations:
1108 
breakpoint()1109     void breakpoint()
1110     {
1111         m_assembler.int3();
1112     }
1113 
nearCall()1114     Call nearCall()
1115     {
1116         return Call(m_assembler.call(), Call::LinkableNear);
1117     }
1118 
call(RegisterID target)1119     Call call(RegisterID target)
1120     {
1121         return Call(m_assembler.call(target), Call::None);
1122     }
1123 
call(Address address)1124     void call(Address address)
1125     {
1126         m_assembler.call_m(address.offset, address.base);
1127     }
1128 
ret()1129     void ret()
1130     {
1131         m_assembler.ret();
1132     }
1133 
set8Compare32(Condition cond,RegisterID left,RegisterID right,RegisterID dest)1134     void set8Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1135     {
1136         m_assembler.cmpl_rr(right, left);
1137         m_assembler.setCC_r(x86Condition(cond), dest);
1138     }
1139 
set8Compare32(Condition cond,Address left,RegisterID right,RegisterID dest)1140     void set8Compare32(Condition cond, Address left, RegisterID right, RegisterID dest)
1141     {
1142         m_assembler.cmpl_mr(left.offset, left.base, right);
1143         m_assembler.setCC_r(x86Condition(cond), dest);
1144     }
1145 
set8Compare32(Condition cond,RegisterID left,TrustedImm32 right,RegisterID dest)1146     void set8Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1147     {
1148         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1149             m_assembler.testl_rr(left, left);
1150         else
1151             m_assembler.cmpl_ir(right.m_value, left);
1152         m_assembler.setCC_r(x86Condition(cond), dest);
1153     }
1154 
set32Compare32(Condition cond,RegisterID left,RegisterID right,RegisterID dest)1155     void set32Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1156     {
1157         m_assembler.cmpl_rr(right, left);
1158         m_assembler.setCC_r(x86Condition(cond), dest);
1159         m_assembler.movzbl_rr(dest, dest);
1160     }
1161 
set32Compare32(Condition cond,RegisterID left,TrustedImm32 right,RegisterID dest)1162     void set32Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1163     {
1164         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1165             m_assembler.testl_rr(left, left);
1166         else
1167             m_assembler.cmpl_ir(right.m_value, left);
1168         m_assembler.setCC_r(x86Condition(cond), dest);
1169         m_assembler.movzbl_rr(dest, dest);
1170     }
1171 
1172     // FIXME:
1173     // The mask should be optional... paerhaps the argument order should be
1174     // dest-src, operations always have a dest? ... possibly not true, considering
1175     // asm ops like test, or pseudo ops like pop().
1176 
set32Test8(Condition cond,Address address,TrustedImm32 mask,RegisterID dest)1177     void set32Test8(Condition cond, Address address, TrustedImm32 mask, RegisterID dest)
1178     {
1179         if (mask.m_value == -1)
1180             m_assembler.cmpb_im(0, address.offset, address.base);
1181         else
1182             m_assembler.testb_im(mask.m_value, address.offset, address.base);
1183         m_assembler.setCC_r(x86Condition(cond), dest);
1184         m_assembler.movzbl_rr(dest, dest);
1185     }
1186 
set32Test32(Condition cond,Address address,TrustedImm32 mask,RegisterID dest)1187     void set32Test32(Condition cond, Address address, TrustedImm32 mask, RegisterID dest)
1188     {
1189         if (mask.m_value == -1)
1190             m_assembler.cmpl_im(0, address.offset, address.base);
1191         else
1192             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1193         m_assembler.setCC_r(x86Condition(cond), dest);
1194         m_assembler.movzbl_rr(dest, dest);
1195     }
1196 
1197 protected:
x86Condition(Condition cond)1198     X86Assembler::Condition x86Condition(Condition cond)
1199     {
1200         return static_cast<X86Assembler::Condition>(cond);
1201     }
1202 
1203 private:
1204     // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1205     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1206     friend class MacroAssemblerX86;
1207 
1208 #if CPU(X86)
1209 #if OS(MAC_OS_X)
1210 
1211     // All X86 Macs are guaranteed to support at least SSE2,
isSSE2Present()1212     static bool isSSE2Present()
1213     {
1214         return true;
1215     }
1216 
1217 #else // OS(MAC_OS_X)
1218 
1219     enum SSE2CheckState {
1220         NotCheckedSSE2,
1221         HasSSE2,
1222         NoSSE2
1223     };
1224 
isSSE2Present()1225     static bool isSSE2Present()
1226     {
1227         if (s_sse2CheckState == NotCheckedSSE2) {
1228             // Default the flags value to zero; if the compiler is
1229             // not MSVC or GCC we will read this as SSE2 not present.
1230             int flags = 0;
1231 #if COMPILER(MSVC)
1232             _asm {
1233                 mov eax, 1 // cpuid function 1 gives us the standard feature set
1234                 cpuid;
1235                 mov flags, edx;
1236             }
1237 #elif COMPILER(GCC)
1238             asm (
1239                  "movl $0x1, %%eax;"
1240                  "pushl %%ebx;"
1241                  "cpuid;"
1242                  "popl %%ebx;"
1243                  "movl %%edx, %0;"
1244                  : "=g" (flags)
1245                  :
1246                  : "%eax", "%ecx", "%edx"
1247                  );
1248 #endif
1249             static const int SSE2FeatureBit = 1 << 26;
1250             s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1251         }
1252         // Only check once.
1253         ASSERT(s_sse2CheckState != NotCheckedSSE2);
1254 
1255         return s_sse2CheckState == HasSSE2;
1256     }
1257 
1258     static SSE2CheckState s_sse2CheckState;
1259 
1260 #endif // OS(MAC_OS_X)
1261 #elif !defined(NDEBUG) // CPU(X86)
1262 
1263     // On x86-64 we should never be checking for SSE2 in a non-debug build,
1264     // but non debug add this method to keep the asserts above happy.
isSSE2Present()1265     static bool isSSE2Present()
1266     {
1267         return true;
1268     }
1269 
1270 #endif
1271 };
1272 
1273 } // namespace JSC
1274 
1275 #endif // ENABLE(ASSEMBLER)
1276 
1277 #endif // MacroAssemblerX86Common_h
1278