• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28 
29 #include <wtf/Platform.h>
30 
31 #if ENABLE(ASSEMBLER)
32 
33 #include "X86Assembler.h"
34 #include "AbstractMacroAssembler.h"
35 
36 namespace JSC {
37 
38 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
39 public:
40 
41     enum Condition {
42         Equal = X86Assembler::ConditionE,
43         NotEqual = X86Assembler::ConditionNE,
44         Above = X86Assembler::ConditionA,
45         AboveOrEqual = X86Assembler::ConditionAE,
46         Below = X86Assembler::ConditionB,
47         BelowOrEqual = X86Assembler::ConditionBE,
48         GreaterThan = X86Assembler::ConditionG,
49         GreaterThanOrEqual = X86Assembler::ConditionGE,
50         LessThan = X86Assembler::ConditionL,
51         LessThanOrEqual = X86Assembler::ConditionLE,
52         Overflow = X86Assembler::ConditionO,
53         Signed = X86Assembler::ConditionS,
54         Zero = X86Assembler::ConditionE,
55         NonZero = X86Assembler::ConditionNE
56     };
57 
58     enum DoubleCondition {
59         DoubleEqual = X86Assembler::ConditionE,
60         DoubleNotEqual = X86Assembler::ConditionNE,
61         DoubleGreaterThan = X86Assembler::ConditionA,
62         DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
63         DoubleLessThan = X86Assembler::ConditionB,
64         DoubleLessThanOrEqual = X86Assembler::ConditionBE,
65     };
66 
67     static const RegisterID stackPointerRegister = X86::esp;
68 
69     // Integer arithmetic operations:
70     //
71     // Operations are typically two operand - operation(source, srcDst)
72     // For many operations the source may be an Imm32, the srcDst operand
73     // may often be a memory location (explictly described using an Address
74     // object).
75 
add32(RegisterID src,RegisterID dest)76     void add32(RegisterID src, RegisterID dest)
77     {
78         m_assembler.addl_rr(src, dest);
79     }
80 
add32(Imm32 imm,Address address)81     void add32(Imm32 imm, Address address)
82     {
83         m_assembler.addl_im(imm.m_value, address.offset, address.base);
84     }
85 
add32(Imm32 imm,RegisterID dest)86     void add32(Imm32 imm, RegisterID dest)
87     {
88         m_assembler.addl_ir(imm.m_value, dest);
89     }
90 
add32(Address src,RegisterID dest)91     void add32(Address src, RegisterID dest)
92     {
93         m_assembler.addl_mr(src.offset, src.base, dest);
94     }
95 
add32(RegisterID src,Address dest)96     void add32(RegisterID src, Address dest)
97     {
98         m_assembler.addl_rm(src, dest.offset, dest.base);
99     }
100 
and32(RegisterID src,RegisterID dest)101     void and32(RegisterID src, RegisterID dest)
102     {
103         m_assembler.andl_rr(src, dest);
104     }
105 
and32(Imm32 imm,RegisterID dest)106     void and32(Imm32 imm, RegisterID dest)
107     {
108         m_assembler.andl_ir(imm.m_value, dest);
109     }
110 
and32(RegisterID src,Address dest)111     void and32(RegisterID src, Address dest)
112     {
113         m_assembler.andl_rm(src, dest.offset, dest.base);
114     }
115 
and32(Address src,RegisterID dest)116     void and32(Address src, RegisterID dest)
117     {
118         m_assembler.andl_mr(src.offset, src.base, dest);
119     }
120 
and32(Imm32 imm,Address address)121     void and32(Imm32 imm, Address address)
122     {
123         m_assembler.andl_im(imm.m_value, address.offset, address.base);
124     }
125 
lshift32(Imm32 imm,RegisterID dest)126     void lshift32(Imm32 imm, RegisterID dest)
127     {
128         m_assembler.shll_i8r(imm.m_value, dest);
129     }
130 
lshift32(RegisterID shift_amount,RegisterID dest)131     void lshift32(RegisterID shift_amount, RegisterID dest)
132     {
133         // On x86 we can only shift by ecx; if asked to shift by another register we'll
134         // need rejig the shift amount into ecx first, and restore the registers afterwards.
135         if (shift_amount != X86::ecx) {
136             swap(shift_amount, X86::ecx);
137 
138             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
139             if (dest == shift_amount)
140                 m_assembler.shll_CLr(X86::ecx);
141             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
142             else if (dest == X86::ecx)
143                 m_assembler.shll_CLr(shift_amount);
144             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
145             else
146                 m_assembler.shll_CLr(dest);
147 
148             swap(shift_amount, X86::ecx);
149         } else
150             m_assembler.shll_CLr(dest);
151     }
152 
mul32(RegisterID src,RegisterID dest)153     void mul32(RegisterID src, RegisterID dest)
154     {
155         m_assembler.imull_rr(src, dest);
156     }
157 
mul32(Address src,RegisterID dest)158     void mul32(Address src, RegisterID dest)
159     {
160         m_assembler.imull_mr(src.offset, src.base, dest);
161     }
162 
mul32(Imm32 imm,RegisterID src,RegisterID dest)163     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
164     {
165         m_assembler.imull_i32r(src, imm.m_value, dest);
166     }
167 
neg32(RegisterID srcDest)168     void neg32(RegisterID srcDest)
169     {
170         m_assembler.negl_r(srcDest);
171     }
172 
neg32(Address srcDest)173     void neg32(Address srcDest)
174     {
175         m_assembler.negl_m(srcDest.offset, srcDest.base);
176     }
177 
not32(RegisterID srcDest)178     void not32(RegisterID srcDest)
179     {
180         m_assembler.notl_r(srcDest);
181     }
182 
not32(Address srcDest)183     void not32(Address srcDest)
184     {
185         m_assembler.notl_m(srcDest.offset, srcDest.base);
186     }
187 
or32(RegisterID src,RegisterID dest)188     void or32(RegisterID src, RegisterID dest)
189     {
190         m_assembler.orl_rr(src, dest);
191     }
192 
or32(Imm32 imm,RegisterID dest)193     void or32(Imm32 imm, RegisterID dest)
194     {
195         m_assembler.orl_ir(imm.m_value, dest);
196     }
197 
or32(RegisterID src,Address dest)198     void or32(RegisterID src, Address dest)
199     {
200         m_assembler.orl_rm(src, dest.offset, dest.base);
201     }
202 
or32(Address src,RegisterID dest)203     void or32(Address src, RegisterID dest)
204     {
205         m_assembler.orl_mr(src.offset, src.base, dest);
206     }
207 
or32(Imm32 imm,Address address)208     void or32(Imm32 imm, Address address)
209     {
210         m_assembler.orl_im(imm.m_value, address.offset, address.base);
211     }
212 
rshift32(RegisterID shift_amount,RegisterID dest)213     void rshift32(RegisterID shift_amount, RegisterID dest)
214     {
215         // On x86 we can only shift by ecx; if asked to shift by another register we'll
216         // need rejig the shift amount into ecx first, and restore the registers afterwards.
217         if (shift_amount != X86::ecx) {
218             swap(shift_amount, X86::ecx);
219 
220             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
221             if (dest == shift_amount)
222                 m_assembler.sarl_CLr(X86::ecx);
223             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
224             else if (dest == X86::ecx)
225                 m_assembler.sarl_CLr(shift_amount);
226             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
227             else
228                 m_assembler.sarl_CLr(dest);
229 
230             swap(shift_amount, X86::ecx);
231         } else
232             m_assembler.sarl_CLr(dest);
233     }
234 
rshift32(Imm32 imm,RegisterID dest)235     void rshift32(Imm32 imm, RegisterID dest)
236     {
237         m_assembler.sarl_i8r(imm.m_value, dest);
238     }
239 
sub32(RegisterID src,RegisterID dest)240     void sub32(RegisterID src, RegisterID dest)
241     {
242         m_assembler.subl_rr(src, dest);
243     }
244 
sub32(Imm32 imm,RegisterID dest)245     void sub32(Imm32 imm, RegisterID dest)
246     {
247         m_assembler.subl_ir(imm.m_value, dest);
248     }
249 
sub32(Imm32 imm,Address address)250     void sub32(Imm32 imm, Address address)
251     {
252         m_assembler.subl_im(imm.m_value, address.offset, address.base);
253     }
254 
sub32(Address src,RegisterID dest)255     void sub32(Address src, RegisterID dest)
256     {
257         m_assembler.subl_mr(src.offset, src.base, dest);
258     }
259 
sub32(RegisterID src,Address dest)260     void sub32(RegisterID src, Address dest)
261     {
262         m_assembler.subl_rm(src, dest.offset, dest.base);
263     }
264 
265 
xor32(RegisterID src,RegisterID dest)266     void xor32(RegisterID src, RegisterID dest)
267     {
268         m_assembler.xorl_rr(src, dest);
269     }
270 
xor32(Imm32 imm,Address dest)271     void xor32(Imm32 imm, Address dest)
272     {
273         m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
274     }
275 
xor32(Imm32 imm,RegisterID dest)276     void xor32(Imm32 imm, RegisterID dest)
277     {
278         m_assembler.xorl_ir(imm.m_value, dest);
279     }
280 
xor32(RegisterID src,Address dest)281     void xor32(RegisterID src, Address dest)
282     {
283         m_assembler.xorl_rm(src, dest.offset, dest.base);
284     }
285 
xor32(Address src,RegisterID dest)286     void xor32(Address src, RegisterID dest)
287     {
288         m_assembler.xorl_mr(src.offset, src.base, dest);
289     }
290 
291 
292     // Memory access operations:
293     //
294     // Loads are of the form load(address, destination) and stores of the form
295     // store(source, address).  The source for a store may be an Imm32.  Address
296     // operand objects to loads and store will be implicitly constructed if a
297     // register is passed.
298 
load32(ImplicitAddress address,RegisterID dest)299     void load32(ImplicitAddress address, RegisterID dest)
300     {
301         m_assembler.movl_mr(address.offset, address.base, dest);
302     }
303 
load32(BaseIndex address,RegisterID dest)304     void load32(BaseIndex address, RegisterID dest)
305     {
306         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
307     }
308 
load32WithAddressOffsetPatch(Address address,RegisterID dest)309     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
310     {
311         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
312         return DataLabel32(this);
313     }
314 
load16(BaseIndex address,RegisterID dest)315     void load16(BaseIndex address, RegisterID dest)
316     {
317         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
318     }
319 
store32WithAddressOffsetPatch(RegisterID src,Address address)320     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
321     {
322         m_assembler.movl_rm_disp32(src, address.offset, address.base);
323         return DataLabel32(this);
324     }
325 
store32(RegisterID src,ImplicitAddress address)326     void store32(RegisterID src, ImplicitAddress address)
327     {
328         m_assembler.movl_rm(src, address.offset, address.base);
329     }
330 
store32(RegisterID src,BaseIndex address)331     void store32(RegisterID src, BaseIndex address)
332     {
333         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
334     }
335 
store32(Imm32 imm,ImplicitAddress address)336     void store32(Imm32 imm, ImplicitAddress address)
337     {
338         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
339     }
340 
341 
342     // Floating-point operation:
343     //
344     // Presently only supports SSE, not x87 floating point.
345 
loadDouble(ImplicitAddress address,FPRegisterID dest)346     void loadDouble(ImplicitAddress address, FPRegisterID dest)
347     {
348         ASSERT(isSSE2Present());
349         m_assembler.movsd_mr(address.offset, address.base, dest);
350     }
351 
storeDouble(FPRegisterID src,ImplicitAddress address)352     void storeDouble(FPRegisterID src, ImplicitAddress address)
353     {
354         ASSERT(isSSE2Present());
355         m_assembler.movsd_rm(src, address.offset, address.base);
356     }
357 
addDouble(FPRegisterID src,FPRegisterID dest)358     void addDouble(FPRegisterID src, FPRegisterID dest)
359     {
360         ASSERT(isSSE2Present());
361         m_assembler.addsd_rr(src, dest);
362     }
363 
addDouble(Address src,FPRegisterID dest)364     void addDouble(Address src, FPRegisterID dest)
365     {
366         ASSERT(isSSE2Present());
367         m_assembler.addsd_mr(src.offset, src.base, dest);
368     }
369 
divDouble(FPRegisterID src,FPRegisterID dest)370     void divDouble(FPRegisterID src, FPRegisterID dest)
371     {
372         ASSERT(isSSE2Present());
373         m_assembler.divsd_rr(src, dest);
374     }
375 
divDouble(Address src,FPRegisterID dest)376     void divDouble(Address src, FPRegisterID dest)
377     {
378         ASSERT(isSSE2Present());
379         m_assembler.divsd_mr(src.offset, src.base, dest);
380     }
381 
subDouble(FPRegisterID src,FPRegisterID dest)382     void subDouble(FPRegisterID src, FPRegisterID dest)
383     {
384         ASSERT(isSSE2Present());
385         m_assembler.subsd_rr(src, dest);
386     }
387 
subDouble(Address src,FPRegisterID dest)388     void subDouble(Address src, FPRegisterID dest)
389     {
390         ASSERT(isSSE2Present());
391         m_assembler.subsd_mr(src.offset, src.base, dest);
392     }
393 
mulDouble(FPRegisterID src,FPRegisterID dest)394     void mulDouble(FPRegisterID src, FPRegisterID dest)
395     {
396         ASSERT(isSSE2Present());
397         m_assembler.mulsd_rr(src, dest);
398     }
399 
mulDouble(Address src,FPRegisterID dest)400     void mulDouble(Address src, FPRegisterID dest)
401     {
402         ASSERT(isSSE2Present());
403         m_assembler.mulsd_mr(src.offset, src.base, dest);
404     }
405 
convertInt32ToDouble(RegisterID src,FPRegisterID dest)406     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
407     {
408         ASSERT(isSSE2Present());
409         m_assembler.cvtsi2sd_rr(src, dest);
410     }
411 
convertInt32ToDouble(Address src,FPRegisterID dest)412     void convertInt32ToDouble(Address src, FPRegisterID dest)
413     {
414         m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
415     }
416 
branchDouble(DoubleCondition cond,FPRegisterID left,FPRegisterID right)417     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
418     {
419         ASSERT(isSSE2Present());
420         m_assembler.ucomisd_rr(right, left);
421         return Jump(m_assembler.jCC(x86Condition(cond)));
422     }
423 
branchDouble(DoubleCondition cond,FPRegisterID left,Address right)424     Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
425     {
426         m_assembler.ucomisd_mr(right.offset, right.base, left);
427         return Jump(m_assembler.jCC(x86Condition(cond)));
428     }
429 
430     // Truncates 'src' to an integer, and places the resulting 'dest'.
431     // If the result is not representable as a 32 bit value, branch.
432     // May also branch for some values that are representable in 32 bits
433     // (specifically, in this case, INT_MIN).
branchTruncateDoubleToInt32(FPRegisterID src,RegisterID dest)434     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
435     {
436         ASSERT(isSSE2Present());
437         m_assembler.cvttsd2si_rr(src, dest);
438         return branch32(Equal, dest, Imm32(0x80000000));
439     }
440 
zeroDouble(FPRegisterID srcDest)441     void zeroDouble(FPRegisterID srcDest)
442     {
443         ASSERT(isSSE2Present());
444         m_assembler.xorpd_rr(srcDest, srcDest);
445     }
446 
447 
448     // Stack manipulation operations:
449     //
450     // The ABI is assumed to provide a stack abstraction to memory,
451     // containing machine word sized units of data.  Push and pop
452     // operations add and remove a single register sized unit of data
453     // to or from the stack.  Peek and poke operations read or write
454     // values on the stack, without moving the current stack position.
455 
pop(RegisterID dest)456     void pop(RegisterID dest)
457     {
458         m_assembler.pop_r(dest);
459     }
460 
push(RegisterID src)461     void push(RegisterID src)
462     {
463         m_assembler.push_r(src);
464     }
465 
push(Address address)466     void push(Address address)
467     {
468         m_assembler.push_m(address.offset, address.base);
469     }
470 
push(Imm32 imm)471     void push(Imm32 imm)
472     {
473         m_assembler.push_i32(imm.m_value);
474     }
475 
476 
477     // Register move operations:
478     //
479     // Move values in registers.
480 
move(Imm32 imm,RegisterID dest)481     void move(Imm32 imm, RegisterID dest)
482     {
483         // Note: on 64-bit the Imm32 value is zero extended into the register, it
484         // may be useful to have a separate version that sign extends the value?
485         if (!imm.m_value)
486             m_assembler.xorl_rr(dest, dest);
487         else
488             m_assembler.movl_i32r(imm.m_value, dest);
489     }
490 
491 #if PLATFORM(X86_64)
move(RegisterID src,RegisterID dest)492     void move(RegisterID src, RegisterID dest)
493     {
494         // Note: on 64-bit this is is a full register move; perhaps it would be
495         // useful to have separate move32 & movePtr, with move32 zero extending?
496         if (src != dest)
497             m_assembler.movq_rr(src, dest);
498     }
499 
move(ImmPtr imm,RegisterID dest)500     void move(ImmPtr imm, RegisterID dest)
501     {
502         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
503             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
504         else
505             m_assembler.movq_i64r(imm.asIntptr(), dest);
506     }
507 
swap(RegisterID reg1,RegisterID reg2)508     void swap(RegisterID reg1, RegisterID reg2)
509     {
510         m_assembler.xchgq_rr(reg1, reg2);
511     }
512 
signExtend32ToPtr(RegisterID src,RegisterID dest)513     void signExtend32ToPtr(RegisterID src, RegisterID dest)
514     {
515         m_assembler.movsxd_rr(src, dest);
516     }
517 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)518     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
519     {
520         m_assembler.movl_rr(src, dest);
521     }
522 #else
move(RegisterID src,RegisterID dest)523     void move(RegisterID src, RegisterID dest)
524     {
525         if (src != dest)
526             m_assembler.movl_rr(src, dest);
527     }
528 
move(ImmPtr imm,RegisterID dest)529     void move(ImmPtr imm, RegisterID dest)
530     {
531         m_assembler.movl_i32r(imm.asIntptr(), dest);
532     }
533 
swap(RegisterID reg1,RegisterID reg2)534     void swap(RegisterID reg1, RegisterID reg2)
535     {
536         if (reg1 != reg2)
537             m_assembler.xchgl_rr(reg1, reg2);
538     }
539 
signExtend32ToPtr(RegisterID src,RegisterID dest)540     void signExtend32ToPtr(RegisterID src, RegisterID dest)
541     {
542         move(src, dest);
543     }
544 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)545     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
546     {
547         move(src, dest);
548     }
549 #endif
550 
551 
552     // Forwards / external control flow operations:
553     //
554     // This set of jump and conditional branch operations return a Jump
555     // object which may linked at a later point, allow forwards jump,
556     // or jumps that will require external linkage (after the code has been
557     // relocated).
558     //
559     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
560     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
561     // used (representing the names 'below' and 'above').
562     //
563     // Operands to the comparision are provided in the expected order, e.g.
564     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
565     // treated as a signed 32bit value, is less than or equal to 5.
566     //
567     // jz and jnz test whether the first operand is equal to zero, and take
568     // an optional second operand of a mask under which to perform the test.
569 
570 public:
branch32(Condition cond,RegisterID left,RegisterID right)571     Jump branch32(Condition cond, RegisterID left, RegisterID right)
572     {
573         m_assembler.cmpl_rr(right, left);
574         return Jump(m_assembler.jCC(x86Condition(cond)));
575     }
576 
branch32(Condition cond,RegisterID left,Imm32 right)577     Jump branch32(Condition cond, RegisterID left, Imm32 right)
578     {
579         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
580             m_assembler.testl_rr(left, left);
581         else
582             m_assembler.cmpl_ir(right.m_value, left);
583         return Jump(m_assembler.jCC(x86Condition(cond)));
584     }
585 
branch32(Condition cond,RegisterID left,Address right)586     Jump branch32(Condition cond, RegisterID left, Address right)
587     {
588         m_assembler.cmpl_mr(right.offset, right.base, left);
589         return Jump(m_assembler.jCC(x86Condition(cond)));
590     }
591 
branch32(Condition cond,Address left,RegisterID right)592     Jump branch32(Condition cond, Address left, RegisterID right)
593     {
594         m_assembler.cmpl_rm(right, left.offset, left.base);
595         return Jump(m_assembler.jCC(x86Condition(cond)));
596     }
597 
branch32(Condition cond,Address left,Imm32 right)598     Jump branch32(Condition cond, Address left, Imm32 right)
599     {
600         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
601         return Jump(m_assembler.jCC(x86Condition(cond)));
602     }
603 
branch32(Condition cond,BaseIndex left,Imm32 right)604     Jump branch32(Condition cond, BaseIndex left, Imm32 right)
605     {
606         m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
607         return Jump(m_assembler.jCC(x86Condition(cond)));
608     }
609 
branch16(Condition cond,BaseIndex left,RegisterID right)610     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
611     {
612         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
613         return Jump(m_assembler.jCC(x86Condition(cond)));
614     }
615 
branch16(Condition cond,BaseIndex left,Imm32 right)616     Jump branch16(Condition cond, BaseIndex left, Imm32 right)
617     {
618         ASSERT(!(right.m_value & 0xFFFF0000));
619 
620         m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
621         return Jump(m_assembler.jCC(x86Condition(cond)));
622     }
623 
branchTest32(Condition cond,RegisterID reg,RegisterID mask)624     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
625     {
626         ASSERT((cond == Zero) || (cond == NonZero));
627         m_assembler.testl_rr(reg, mask);
628         return Jump(m_assembler.jCC(x86Condition(cond)));
629     }
630 
631     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
632     {
633         ASSERT((cond == Zero) || (cond == NonZero));
634         // if we are only interested in the low seven bits, this can be tested with a testb
635         if (mask.m_value == -1)
636             m_assembler.testl_rr(reg, reg);
637         else if ((mask.m_value & ~0x7f) == 0)
638             m_assembler.testb_i8r(mask.m_value, reg);
639         else
640             m_assembler.testl_i32r(mask.m_value, reg);
641         return Jump(m_assembler.jCC(x86Condition(cond)));
642     }
643 
644     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
645     {
646         ASSERT((cond == Zero) || (cond == NonZero));
647         if (mask.m_value == -1)
648             m_assembler.cmpl_im(0, address.offset, address.base);
649         else
650             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
651         return Jump(m_assembler.jCC(x86Condition(cond)));
652     }
653 
654     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
655     {
656         ASSERT((cond == Zero) || (cond == NonZero));
657         if (mask.m_value == -1)
658             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
659         else
660             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
661         return Jump(m_assembler.jCC(x86Condition(cond)));
662     }
663 
jump()664     Jump jump()
665     {
666         return Jump(m_assembler.jmp());
667     }
668 
jump(RegisterID target)669     void jump(RegisterID target)
670     {
671         m_assembler.jmp_r(target);
672     }
673 
674     // Address is a memory location containing the address to jump to
jump(Address address)675     void jump(Address address)
676     {
677         m_assembler.jmp_m(address.offset, address.base);
678     }
679 
680 
681     // Arithmetic control flow operations:
682     //
683     // This set of conditional branch operations branch based
684     // on the result of an arithmetic operation.  The operation
685     // is performed as normal, storing the result.
686     //
687     // * jz operations branch if the result is zero.
688     // * jo operations branch if the (signed) arithmetic
689     //   operation caused an overflow to occur.
690 
branchAdd32(Condition cond,RegisterID src,RegisterID dest)691     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
692     {
693         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
694         add32(src, dest);
695         return Jump(m_assembler.jCC(x86Condition(cond)));
696     }
697 
branchAdd32(Condition cond,Imm32 imm,RegisterID dest)698     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
699     {
700         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
701         add32(imm, dest);
702         return Jump(m_assembler.jCC(x86Condition(cond)));
703     }
704 
branchAdd32(Condition cond,Imm32 src,Address dest)705     Jump branchAdd32(Condition cond, Imm32 src, Address dest)
706     {
707         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
708         add32(src, dest);
709         return Jump(m_assembler.jCC(x86Condition(cond)));
710     }
711 
branchAdd32(Condition cond,RegisterID src,Address dest)712     Jump branchAdd32(Condition cond, RegisterID src, Address dest)
713     {
714         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
715         add32(src, dest);
716         return Jump(m_assembler.jCC(x86Condition(cond)));
717     }
718 
branchAdd32(Condition cond,Address src,RegisterID dest)719     Jump branchAdd32(Condition cond, Address src, RegisterID dest)
720     {
721         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
722         add32(src, dest);
723         return Jump(m_assembler.jCC(x86Condition(cond)));
724     }
725 
branchMul32(Condition cond,RegisterID src,RegisterID dest)726     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
727     {
728         ASSERT(cond == Overflow);
729         mul32(src, dest);
730         return Jump(m_assembler.jCC(x86Condition(cond)));
731     }
732 
branchMul32(Condition cond,Address src,RegisterID dest)733     Jump branchMul32(Condition cond, Address src, RegisterID dest)
734     {
735         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
736         mul32(src, dest);
737         return Jump(m_assembler.jCC(x86Condition(cond)));
738     }
739 
branchMul32(Condition cond,Imm32 imm,RegisterID src,RegisterID dest)740     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
741     {
742         ASSERT(cond == Overflow);
743         mul32(imm, src, dest);
744         return Jump(m_assembler.jCC(x86Condition(cond)));
745     }
746 
branchSub32(Condition cond,RegisterID src,RegisterID dest)747     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
748     {
749         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
750         sub32(src, dest);
751         return Jump(m_assembler.jCC(x86Condition(cond)));
752     }
753 
branchSub32(Condition cond,Imm32 imm,RegisterID dest)754     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
755     {
756         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
757         sub32(imm, dest);
758         return Jump(m_assembler.jCC(x86Condition(cond)));
759     }
760 
branchSub32(Condition cond,Imm32 imm,Address dest)761     Jump branchSub32(Condition cond, Imm32 imm, Address dest)
762     {
763         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
764         sub32(imm, dest);
765         return Jump(m_assembler.jCC(x86Condition(cond)));
766     }
767 
branchSub32(Condition cond,RegisterID src,Address dest)768     Jump branchSub32(Condition cond, RegisterID src, Address dest)
769     {
770         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
771         sub32(src, dest);
772         return Jump(m_assembler.jCC(x86Condition(cond)));
773     }
774 
branchSub32(Condition cond,Address src,RegisterID dest)775     Jump branchSub32(Condition cond, Address src, RegisterID dest)
776     {
777         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
778         sub32(src, dest);
779         return Jump(m_assembler.jCC(x86Condition(cond)));
780     }
781 
branchOr32(Condition cond,RegisterID src,RegisterID dest)782     Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
783     {
784         ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
785         or32(src, dest);
786         return Jump(m_assembler.jCC(x86Condition(cond)));
787     }
788 
789 
790     // Miscellaneous operations:
791 
breakpoint()792     void breakpoint()
793     {
794         m_assembler.int3();
795     }
796 
nearCall()797     Call nearCall()
798     {
799         return Call(m_assembler.call(), Call::LinkableNear);
800     }
801 
call(RegisterID target)802     Call call(RegisterID target)
803     {
804         return Call(m_assembler.call(target), Call::None);
805     }
806 
call(Address address)807     void call(Address address)
808     {
809         m_assembler.call_m(address.offset, address.base);
810     }
811 
ret()812     void ret()
813     {
814         m_assembler.ret();
815     }
816 
set8(Condition cond,RegisterID left,RegisterID right,RegisterID dest)817     void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
818     {
819         m_assembler.cmpl_rr(right, left);
820         m_assembler.setCC_r(x86Condition(cond), dest);
821     }
822 
set8(Condition cond,Address left,RegisterID right,RegisterID dest)823     void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
824     {
825         m_assembler.cmpl_mr(left.offset, left.base, right);
826         m_assembler.setCC_r(x86Condition(cond), dest);
827     }
828 
set8(Condition cond,RegisterID left,Imm32 right,RegisterID dest)829     void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
830     {
831         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
832             m_assembler.testl_rr(left, left);
833         else
834             m_assembler.cmpl_ir(right.m_value, left);
835         m_assembler.setCC_r(x86Condition(cond), dest);
836     }
837 
set32(Condition cond,RegisterID left,RegisterID right,RegisterID dest)838     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
839     {
840         m_assembler.cmpl_rr(right, left);
841         m_assembler.setCC_r(x86Condition(cond), dest);
842         m_assembler.movzbl_rr(dest, dest);
843     }
844 
set32(Condition cond,RegisterID left,Imm32 right,RegisterID dest)845     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
846     {
847         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
848             m_assembler.testl_rr(left, left);
849         else
850             m_assembler.cmpl_ir(right.m_value, left);
851         m_assembler.setCC_r(x86Condition(cond), dest);
852         m_assembler.movzbl_rr(dest, dest);
853     }
854 
855     // FIXME:
856     // The mask should be optional... paerhaps the argument order should be
857     // dest-src, operations always have a dest? ... possibly not true, considering
858     // asm ops like test, or pseudo ops like pop().
859 
setTest8(Condition cond,Address address,Imm32 mask,RegisterID dest)860     void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
861     {
862         if (mask.m_value == -1)
863             m_assembler.cmpl_im(0, address.offset, address.base);
864         else
865             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
866         m_assembler.setCC_r(x86Condition(cond), dest);
867     }
868 
setTest32(Condition cond,Address address,Imm32 mask,RegisterID dest)869     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
870     {
871         if (mask.m_value == -1)
872             m_assembler.cmpl_im(0, address.offset, address.base);
873         else
874             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
875         m_assembler.setCC_r(x86Condition(cond), dest);
876         m_assembler.movzbl_rr(dest, dest);
877     }
878 
879 protected:
x86Condition(Condition cond)880     X86Assembler::Condition x86Condition(Condition cond)
881     {
882         return static_cast<X86Assembler::Condition>(cond);
883     }
884 
x86Condition(DoubleCondition cond)885     X86Assembler::Condition x86Condition(DoubleCondition cond)
886     {
887         return static_cast<X86Assembler::Condition>(cond);
888     }
889 
890 private:
891     // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
892     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
893     friend class MacroAssemblerX86;
894 
895 #if PLATFORM(X86)
896 #if PLATFORM(MAC)
897 
898     // All X86 Macs are guaranteed to support at least SSE2,
isSSE2Present()899     static bool isSSE2Present()
900     {
901         return true;
902     }
903 
904 #else // PLATFORM(MAC)
905 
906     enum SSE2CheckState {
907         NotCheckedSSE2,
908         HasSSE2,
909         NoSSE2
910     };
911 
isSSE2Present()912     static bool isSSE2Present()
913     {
914         if (s_sse2CheckState == NotCheckedSSE2) {
915             // Default the flags value to zero; if the compiler is
916             // not MSVC or GCC we will read this as SSE2 not present.
917             int flags = 0;
918 #if COMPILER(MSVC)
919             _asm {
920                 mov eax, 1 // cpuid function 1 gives us the standard feature set
921                 cpuid;
922                 mov flags, edx;
923             }
924 #elif COMPILER(GCC)
925             asm (
926                  "movl $0x1, %%eax;"
927                  "pushl %%ebx;"
928                  "cpuid;"
929                  "popl %%ebx;"
930                  "movl %%edx, %0;"
931                  : "=g" (flags)
932                  :
933                  : "%eax", "%ecx", "%edx"
934                  );
935 #endif
936             static const int SSE2FeatureBit = 1 << 26;
937             s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
938         }
939         // Only check once.
940         ASSERT(s_sse2CheckState != NotCheckedSSE2);
941 
942         return s_sse2CheckState == HasSSE2;
943     }
944 
945     static SSE2CheckState s_sse2CheckState;
946 
947 #endif // PLATFORM(MAC)
948 #elif !defined(NDEBUG) // PLATFORM(X86)
949 
950     // On x86-64 we should never be checking for SSE2 in a non-debug build,
951     // but non debug add this method to keep the asserts above happy.
isSSE2Present()952     static bool isSSE2Present()
953     {
954         return true;
955     }
956 
957 #endif
958 };
959 
960 } // namespace JSC
961 
962 #endif // ENABLE(ASSEMBLER)
963 
964 #endif // MacroAssemblerX86Common_h
965