1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "JIT.h"
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JSArray.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "ResultType.h"
38 #include "SamplingTool.h"
39
40 #ifndef NDEBUG
41 #include <stdio.h>
42 #endif
43
44 using namespace std;
45
46 namespace JSC {
47
48 #if USE(JSVALUE32_64)
49
emit_op_negate(Instruction * currentInstruction)50 void JIT::emit_op_negate(Instruction* currentInstruction)
51 {
52 unsigned dst = currentInstruction[1].u.operand;
53 unsigned src = currentInstruction[2].u.operand;
54
55 emitLoad(src, regT1, regT0);
56
57 Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
58 addSlowCase(branch32(Equal, regT0, Imm32(0)));
59
60 neg32(regT0);
61 emitStoreInt32(dst, regT0, (dst == src));
62
63 Jump end = jump();
64
65 srcNotInt.link(this);
66 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
67
68 xor32(Imm32(1 << 31), regT1);
69 store32(regT1, tagFor(dst));
70 if (dst != src)
71 store32(regT0, payloadFor(dst));
72
73 end.link(this);
74 }
75
emitSlow_op_negate(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)76 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77 {
78 unsigned dst = currentInstruction[1].u.operand;
79
80 linkSlowCase(iter); // 0 check
81 linkSlowCase(iter); // double check
82
83 JITStubCall stubCall(this, cti_op_negate);
84 stubCall.addArgument(regT1, regT0);
85 stubCall.call(dst);
86 }
87
emit_op_jnless(Instruction * currentInstruction)88 void JIT::emit_op_jnless(Instruction* currentInstruction)
89 {
90 unsigned op1 = currentInstruction[1].u.operand;
91 unsigned op2 = currentInstruction[2].u.operand;
92 unsigned target = currentInstruction[3].u.operand;
93
94 JumpList notInt32Op1;
95 JumpList notInt32Op2;
96
97 // Int32 less.
98 if (isOperandConstantImmediateInt(op1)) {
99 emitLoad(op2, regT3, regT2);
100 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
101 addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
102 } else if (isOperandConstantImmediateInt(op2)) {
103 emitLoad(op1, regT1, regT0);
104 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
105 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
106 } else {
107 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
108 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
109 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
110 addJump(branch32(GreaterThanOrEqual, regT0, regT2), target + 3);
111 }
112
113 if (!supportsFloatingPoint()) {
114 addSlowCase(notInt32Op1);
115 addSlowCase(notInt32Op2);
116 return;
117 }
118 Jump end = jump();
119
120 // Double less.
121 emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
122 end.link(this);
123 }
124
emitSlow_op_jnless(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)125 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
126 {
127 unsigned op1 = currentInstruction[1].u.operand;
128 unsigned op2 = currentInstruction[2].u.operand;
129 unsigned target = currentInstruction[3].u.operand;
130
131 if (!supportsFloatingPoint()) {
132 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
133 linkSlowCase(iter); // int32 check
134 linkSlowCase(iter); // int32 check
135 } else {
136 if (!isOperandConstantImmediateInt(op1)) {
137 linkSlowCase(iter); // double check
138 linkSlowCase(iter); // int32 check
139 }
140 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
141 linkSlowCase(iter); // double check
142 }
143
144 JITStubCall stubCall(this, cti_op_jless);
145 stubCall.addArgument(op1);
146 stubCall.addArgument(op2);
147 stubCall.call();
148 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
149 }
150
emit_op_jnlesseq(Instruction * currentInstruction)151 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
152 {
153 unsigned op1 = currentInstruction[1].u.operand;
154 unsigned op2 = currentInstruction[2].u.operand;
155 unsigned target = currentInstruction[3].u.operand;
156
157 JumpList notInt32Op1;
158 JumpList notInt32Op2;
159
160 // Int32 less.
161 if (isOperandConstantImmediateInt(op1)) {
162 emitLoad(op2, regT3, regT2);
163 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
164 addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
165 } else if (isOperandConstantImmediateInt(op2)) {
166 emitLoad(op1, regT1, regT0);
167 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
168 addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
169 } else {
170 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
171 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
172 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
173 addJump(branch32(GreaterThan, regT0, regT2), target + 3);
174 }
175
176 if (!supportsFloatingPoint()) {
177 addSlowCase(notInt32Op1);
178 addSlowCase(notInt32Op2);
179 return;
180 }
181 Jump end = jump();
182
183 // Double less.
184 emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
185 end.link(this);
186 }
187
emitSlow_op_jnlesseq(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)188 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
189 {
190 unsigned op1 = currentInstruction[1].u.operand;
191 unsigned op2 = currentInstruction[2].u.operand;
192 unsigned target = currentInstruction[3].u.operand;
193
194 if (!supportsFloatingPoint()) {
195 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
196 linkSlowCase(iter); // int32 check
197 linkSlowCase(iter); // int32 check
198 } else {
199 if (!isOperandConstantImmediateInt(op1)) {
200 linkSlowCase(iter); // double check
201 linkSlowCase(iter); // int32 check
202 }
203 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
204 linkSlowCase(iter); // double check
205 }
206
207 JITStubCall stubCall(this, cti_op_jlesseq);
208 stubCall.addArgument(op1);
209 stubCall.addArgument(op2);
210 stubCall.call();
211 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
212 }
213
214 // LeftShift (<<)
215
emit_op_lshift(Instruction * currentInstruction)216 void JIT::emit_op_lshift(Instruction* currentInstruction)
217 {
218 unsigned dst = currentInstruction[1].u.operand;
219 unsigned op1 = currentInstruction[2].u.operand;
220 unsigned op2 = currentInstruction[3].u.operand;
221
222 if (isOperandConstantImmediateInt(op2)) {
223 emitLoad(op1, regT1, regT0);
224 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
225 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
226 emitStoreInt32(dst, regT0, dst == op1);
227 return;
228 }
229
230 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
231 if (!isOperandConstantImmediateInt(op1))
232 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
233 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
234 lshift32(regT2, regT0);
235 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
236 }
237
emitSlow_op_lshift(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)238 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
239 {
240 unsigned dst = currentInstruction[1].u.operand;
241 unsigned op1 = currentInstruction[2].u.operand;
242 unsigned op2 = currentInstruction[3].u.operand;
243
244 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
245 linkSlowCase(iter); // int32 check
246 linkSlowCase(iter); // int32 check
247
248 JITStubCall stubCall(this, cti_op_lshift);
249 stubCall.addArgument(op1);
250 stubCall.addArgument(op2);
251 stubCall.call(dst);
252 }
253
254 // RightShift (>>)
255
emit_op_rshift(Instruction * currentInstruction)256 void JIT::emit_op_rshift(Instruction* currentInstruction)
257 {
258 unsigned dst = currentInstruction[1].u.operand;
259 unsigned op1 = currentInstruction[2].u.operand;
260 unsigned op2 = currentInstruction[3].u.operand;
261
262 if (isOperandConstantImmediateInt(op2)) {
263 emitLoad(op1, regT1, regT0);
264 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
265 rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
266 emitStoreInt32(dst, regT0, dst == op1);
267 return;
268 }
269
270 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
271 if (!isOperandConstantImmediateInt(op1))
272 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
273 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
274 rshift32(regT2, regT0);
275 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
276 }
277
emitSlow_op_rshift(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)278 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
279 {
280 unsigned dst = currentInstruction[1].u.operand;
281 unsigned op1 = currentInstruction[2].u.operand;
282 unsigned op2 = currentInstruction[3].u.operand;
283
284 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
285 linkSlowCase(iter); // int32 check
286 linkSlowCase(iter); // int32 check
287
288 JITStubCall stubCall(this, cti_op_rshift);
289 stubCall.addArgument(op1);
290 stubCall.addArgument(op2);
291 stubCall.call(dst);
292 }
293
294 // BitAnd (&)
295
emit_op_bitand(Instruction * currentInstruction)296 void JIT::emit_op_bitand(Instruction* currentInstruction)
297 {
298 unsigned dst = currentInstruction[1].u.operand;
299 unsigned op1 = currentInstruction[2].u.operand;
300 unsigned op2 = currentInstruction[3].u.operand;
301
302 unsigned op;
303 int32_t constant;
304 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
305 emitLoad(op, regT1, regT0);
306 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
307 and32(Imm32(constant), regT0);
308 emitStoreInt32(dst, regT0, (op == dst));
309 return;
310 }
311
312 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
313 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
314 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
315 and32(regT2, regT0);
316 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
317 }
318
emitSlow_op_bitand(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)319 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
320 {
321 unsigned dst = currentInstruction[1].u.operand;
322 unsigned op1 = currentInstruction[2].u.operand;
323 unsigned op2 = currentInstruction[3].u.operand;
324
325 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
326 linkSlowCase(iter); // int32 check
327 linkSlowCase(iter); // int32 check
328
329 JITStubCall stubCall(this, cti_op_bitand);
330 stubCall.addArgument(op1);
331 stubCall.addArgument(op2);
332 stubCall.call(dst);
333 }
334
335 // BitOr (|)
336
emit_op_bitor(Instruction * currentInstruction)337 void JIT::emit_op_bitor(Instruction* currentInstruction)
338 {
339 unsigned dst = currentInstruction[1].u.operand;
340 unsigned op1 = currentInstruction[2].u.operand;
341 unsigned op2 = currentInstruction[3].u.operand;
342
343 unsigned op;
344 int32_t constant;
345 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
346 emitLoad(op, regT1, regT0);
347 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
348 or32(Imm32(constant), regT0);
349 emitStoreInt32(dst, regT0, (op == dst));
350 return;
351 }
352
353 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
354 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
355 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
356 or32(regT2, regT0);
357 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
358 }
359
emitSlow_op_bitor(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)360 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
361 {
362 unsigned dst = currentInstruction[1].u.operand;
363 unsigned op1 = currentInstruction[2].u.operand;
364 unsigned op2 = currentInstruction[3].u.operand;
365
366 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
367 linkSlowCase(iter); // int32 check
368 linkSlowCase(iter); // int32 check
369
370 JITStubCall stubCall(this, cti_op_bitor);
371 stubCall.addArgument(op1);
372 stubCall.addArgument(op2);
373 stubCall.call(dst);
374 }
375
376 // BitXor (^)
377
emit_op_bitxor(Instruction * currentInstruction)378 void JIT::emit_op_bitxor(Instruction* currentInstruction)
379 {
380 unsigned dst = currentInstruction[1].u.operand;
381 unsigned op1 = currentInstruction[2].u.operand;
382 unsigned op2 = currentInstruction[3].u.operand;
383
384 unsigned op;
385 int32_t constant;
386 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
387 emitLoad(op, regT1, regT0);
388 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
389 xor32(Imm32(constant), regT0);
390 emitStoreInt32(dst, regT0, (op == dst));
391 return;
392 }
393
394 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
395 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
396 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
397 xor32(regT2, regT0);
398 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
399 }
400
emitSlow_op_bitxor(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)401 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
402 {
403 unsigned dst = currentInstruction[1].u.operand;
404 unsigned op1 = currentInstruction[2].u.operand;
405 unsigned op2 = currentInstruction[3].u.operand;
406
407 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
408 linkSlowCase(iter); // int32 check
409 linkSlowCase(iter); // int32 check
410
411 JITStubCall stubCall(this, cti_op_bitxor);
412 stubCall.addArgument(op1);
413 stubCall.addArgument(op2);
414 stubCall.call(dst);
415 }
416
417 // BitNot (~)
418
emit_op_bitnot(Instruction * currentInstruction)419 void JIT::emit_op_bitnot(Instruction* currentInstruction)
420 {
421 unsigned dst = currentInstruction[1].u.operand;
422 unsigned src = currentInstruction[2].u.operand;
423
424 emitLoad(src, regT1, regT0);
425 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
426
427 not32(regT0);
428 emitStoreInt32(dst, regT0, (dst == src));
429 }
430
emitSlow_op_bitnot(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)431 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
432 {
433 unsigned dst = currentInstruction[1].u.operand;
434
435 linkSlowCase(iter); // int32 check
436
437 JITStubCall stubCall(this, cti_op_bitnot);
438 stubCall.addArgument(regT1, regT0);
439 stubCall.call(dst);
440 }
441
442 // PostInc (i++)
443
emit_op_post_inc(Instruction * currentInstruction)444 void JIT::emit_op_post_inc(Instruction* currentInstruction)
445 {
446 unsigned dst = currentInstruction[1].u.operand;
447 unsigned srcDst = currentInstruction[2].u.operand;
448
449 emitLoad(srcDst, regT1, regT0);
450 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
451
452 if (dst == srcDst) // x = x++ is a noop for ints.
453 return;
454
455 emitStoreInt32(dst, regT0);
456
457 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
458 emitStoreInt32(srcDst, regT0, true);
459 }
460
emitSlow_op_post_inc(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)461 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
462 {
463 unsigned dst = currentInstruction[1].u.operand;
464 unsigned srcDst = currentInstruction[2].u.operand;
465
466 linkSlowCase(iter); // int32 check
467 if (dst != srcDst)
468 linkSlowCase(iter); // overflow check
469
470 JITStubCall stubCall(this, cti_op_post_inc);
471 stubCall.addArgument(srcDst);
472 stubCall.addArgument(Imm32(srcDst));
473 stubCall.call(dst);
474 }
475
476 // PostDec (i--)
477
emit_op_post_dec(Instruction * currentInstruction)478 void JIT::emit_op_post_dec(Instruction* currentInstruction)
479 {
480 unsigned dst = currentInstruction[1].u.operand;
481 unsigned srcDst = currentInstruction[2].u.operand;
482
483 emitLoad(srcDst, regT1, regT0);
484 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
485
486 if (dst == srcDst) // x = x-- is a noop for ints.
487 return;
488
489 emitStoreInt32(dst, regT0);
490
491 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
492 emitStoreInt32(srcDst, regT0, true);
493 }
494
emitSlow_op_post_dec(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)495 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
496 {
497 unsigned dst = currentInstruction[1].u.operand;
498 unsigned srcDst = currentInstruction[2].u.operand;
499
500 linkSlowCase(iter); // int32 check
501 if (dst != srcDst)
502 linkSlowCase(iter); // overflow check
503
504 JITStubCall stubCall(this, cti_op_post_dec);
505 stubCall.addArgument(srcDst);
506 stubCall.addArgument(Imm32(srcDst));
507 stubCall.call(dst);
508 }
509
510 // PreInc (++i)
511
emit_op_pre_inc(Instruction * currentInstruction)512 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
513 {
514 unsigned srcDst = currentInstruction[1].u.operand;
515
516 emitLoad(srcDst, regT1, regT0);
517
518 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
519 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
520 emitStoreInt32(srcDst, regT0, true);
521 }
522
emitSlow_op_pre_inc(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)523 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
524 {
525 unsigned srcDst = currentInstruction[1].u.operand;
526
527 linkSlowCase(iter); // int32 check
528 linkSlowCase(iter); // overflow check
529
530 JITStubCall stubCall(this, cti_op_pre_inc);
531 stubCall.addArgument(srcDst);
532 stubCall.call(srcDst);
533 }
534
535 // PreDec (--i)
536
emit_op_pre_dec(Instruction * currentInstruction)537 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
538 {
539 unsigned srcDst = currentInstruction[1].u.operand;
540
541 emitLoad(srcDst, regT1, regT0);
542
543 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
544 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
545 emitStoreInt32(srcDst, regT0, true);
546 }
547
emitSlow_op_pre_dec(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)548 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
549 {
550 unsigned srcDst = currentInstruction[1].u.operand;
551
552 linkSlowCase(iter); // int32 check
553 linkSlowCase(iter); // overflow check
554
555 JITStubCall stubCall(this, cti_op_pre_dec);
556 stubCall.addArgument(srcDst);
557 stubCall.call(srcDst);
558 }
559
560 // Addition (+)
561
emit_op_add(Instruction * currentInstruction)562 void JIT::emit_op_add(Instruction* currentInstruction)
563 {
564 unsigned dst = currentInstruction[1].u.operand;
565 unsigned op1 = currentInstruction[2].u.operand;
566 unsigned op2 = currentInstruction[3].u.operand;
567 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
568
569 JumpList notInt32Op1;
570 JumpList notInt32Op2;
571
572 unsigned op;
573 int32_t constant;
574 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
575 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
576 return;
577 }
578
579 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
580 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
581 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
582
583 // Int32 case.
584 addSlowCase(branchAdd32(Overflow, regT2, regT0));
585 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
586
587 if (!supportsFloatingPoint()) {
588 addSlowCase(notInt32Op1);
589 addSlowCase(notInt32Op2);
590 return;
591 }
592 Jump end = jump();
593
594 // Double case.
595 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
596 end.link(this);
597 }
598
emitAdd32Constant(unsigned dst,unsigned op,int32_t constant,ResultType opType)599 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
600 {
601 // Int32 case.
602 emitLoad(op, regT1, regT0);
603 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
604 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
605 emitStoreInt32(dst, regT0, (op == dst));
606
607 // Double case.
608 if (!supportsFloatingPoint()) {
609 addSlowCase(notInt32);
610 return;
611 }
612 Jump end = jump();
613
614 notInt32.link(this);
615 if (!opType.definitelyIsNumber())
616 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
617 move(Imm32(constant), regT2);
618 convertInt32ToDouble(regT2, fpRegT0);
619 emitLoadDouble(op, fpRegT1);
620 addDouble(fpRegT1, fpRegT0);
621 emitStoreDouble(dst, fpRegT0);
622
623 end.link(this);
624 }
625
emitSlow_op_add(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)626 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
627 {
628 unsigned dst = currentInstruction[1].u.operand;
629 unsigned op1 = currentInstruction[2].u.operand;
630 unsigned op2 = currentInstruction[3].u.operand;
631 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
632
633 unsigned op;
634 int32_t constant;
635 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
636 linkSlowCase(iter); // overflow check
637
638 if (!supportsFloatingPoint()) {
639 linkSlowCase(iter); // non-sse case
640 return;
641 }
642
643 ResultType opType = op == op1 ? types.first() : types.second();
644 if (!opType.definitelyIsNumber())
645 linkSlowCase(iter); // double check
646 } else {
647 linkSlowCase(iter); // overflow check
648
649 if (!supportsFloatingPoint()) {
650 linkSlowCase(iter); // int32 check
651 linkSlowCase(iter); // int32 check
652 } else {
653 if (!types.first().definitelyIsNumber())
654 linkSlowCase(iter); // double check
655
656 if (!types.second().definitelyIsNumber()) {
657 linkSlowCase(iter); // int32 check
658 linkSlowCase(iter); // double check
659 }
660 }
661 }
662
663 JITStubCall stubCall(this, cti_op_add);
664 stubCall.addArgument(op1);
665 stubCall.addArgument(op2);
666 stubCall.call(dst);
667 }
668
669 // Subtraction (-)
670
emit_op_sub(Instruction * currentInstruction)671 void JIT::emit_op_sub(Instruction* currentInstruction)
672 {
673 unsigned dst = currentInstruction[1].u.operand;
674 unsigned op1 = currentInstruction[2].u.operand;
675 unsigned op2 = currentInstruction[3].u.operand;
676 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
677
678 JumpList notInt32Op1;
679 JumpList notInt32Op2;
680
681 if (isOperandConstantImmediateInt(op2)) {
682 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
683 return;
684 }
685
686 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
687 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
688 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
689
690 // Int32 case.
691 addSlowCase(branchSub32(Overflow, regT2, regT0));
692 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
693
694 if (!supportsFloatingPoint()) {
695 addSlowCase(notInt32Op1);
696 addSlowCase(notInt32Op2);
697 return;
698 }
699 Jump end = jump();
700
701 // Double case.
702 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
703 end.link(this);
704 }
705
emitSub32Constant(unsigned dst,unsigned op,int32_t constant,ResultType opType)706 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
707 {
708 // Int32 case.
709 emitLoad(op, regT1, regT0);
710 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
711 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
712 emitStoreInt32(dst, regT0, (op == dst));
713
714 // Double case.
715 if (!supportsFloatingPoint()) {
716 addSlowCase(notInt32);
717 return;
718 }
719 Jump end = jump();
720
721 notInt32.link(this);
722 if (!opType.definitelyIsNumber())
723 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
724 move(Imm32(constant), regT2);
725 convertInt32ToDouble(regT2, fpRegT0);
726 emitLoadDouble(op, fpRegT1);
727 subDouble(fpRegT0, fpRegT1);
728 emitStoreDouble(dst, fpRegT1);
729
730 end.link(this);
731 }
732
emitSlow_op_sub(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)733 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
734 {
735 unsigned dst = currentInstruction[1].u.operand;
736 unsigned op1 = currentInstruction[2].u.operand;
737 unsigned op2 = currentInstruction[3].u.operand;
738 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
739
740 if (isOperandConstantImmediateInt(op2)) {
741 linkSlowCase(iter); // overflow check
742
743 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
744 linkSlowCase(iter); // int32 or double check
745 } else {
746 linkSlowCase(iter); // overflow check
747
748 if (!supportsFloatingPoint()) {
749 linkSlowCase(iter); // int32 check
750 linkSlowCase(iter); // int32 check
751 } else {
752 if (!types.first().definitelyIsNumber())
753 linkSlowCase(iter); // double check
754
755 if (!types.second().definitelyIsNumber()) {
756 linkSlowCase(iter); // int32 check
757 linkSlowCase(iter); // double check
758 }
759 }
760 }
761
762 JITStubCall stubCall(this, cti_op_sub);
763 stubCall.addArgument(op1);
764 stubCall.addArgument(op2);
765 stubCall.call(dst);
766 }
767
emitBinaryDoubleOp(OpcodeID opcodeID,unsigned dst,unsigned op1,unsigned op2,OperandTypes types,JumpList & notInt32Op1,JumpList & notInt32Op2,bool op1IsInRegisters,bool op2IsInRegisters)768 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
769 {
770 JumpList end;
771
772 if (!notInt32Op1.empty()) {
773 // Double case 1: Op1 is not int32; Op2 is unknown.
774 notInt32Op1.link(this);
775
776 ASSERT(op1IsInRegisters);
777
778 // Verify Op1 is double.
779 if (!types.first().definitelyIsNumber())
780 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
781
782 if (!op2IsInRegisters)
783 emitLoad(op2, regT3, regT2);
784
785 Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
786
787 if (!types.second().definitelyIsNumber())
788 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
789
790 convertInt32ToDouble(regT2, fpRegT0);
791 Jump doTheMath = jump();
792
793 // Load Op2 as double into double register.
794 doubleOp2.link(this);
795 emitLoadDouble(op2, fpRegT0);
796
797 // Do the math.
798 doTheMath.link(this);
799 switch (opcodeID) {
800 case op_mul:
801 emitLoadDouble(op1, fpRegT2);
802 mulDouble(fpRegT2, fpRegT0);
803 emitStoreDouble(dst, fpRegT0);
804 break;
805 case op_add:
806 emitLoadDouble(op1, fpRegT2);
807 addDouble(fpRegT2, fpRegT0);
808 emitStoreDouble(dst, fpRegT0);
809 break;
810 case op_sub:
811 emitLoadDouble(op1, fpRegT1);
812 subDouble(fpRegT0, fpRegT1);
813 emitStoreDouble(dst, fpRegT1);
814 break;
815 case op_div:
816 emitLoadDouble(op1, fpRegT1);
817 divDouble(fpRegT0, fpRegT1);
818 emitStoreDouble(dst, fpRegT1);
819 break;
820 case op_jnless:
821 emitLoadDouble(op1, fpRegT2);
822 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT2), dst + 3);
823 break;
824 case op_jnlesseq:
825 emitLoadDouble(op1, fpRegT2);
826 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT2), dst + 3);
827 break;
828 default:
829 ASSERT_NOT_REACHED();
830 }
831
832 if (!notInt32Op2.empty())
833 end.append(jump());
834 }
835
836 if (!notInt32Op2.empty()) {
837 // Double case 2: Op1 is int32; Op2 is not int32.
838 notInt32Op2.link(this);
839
840 ASSERT(op2IsInRegisters);
841
842 if (!op1IsInRegisters)
843 emitLoadPayload(op1, regT0);
844
845 convertInt32ToDouble(regT0, fpRegT0);
846
847 // Verify op2 is double.
848 if (!types.second().definitelyIsNumber())
849 addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
850
851 // Do the math.
852 switch (opcodeID) {
853 case op_mul:
854 emitLoadDouble(op2, fpRegT2);
855 mulDouble(fpRegT2, fpRegT0);
856 emitStoreDouble(dst, fpRegT0);
857 break;
858 case op_add:
859 emitLoadDouble(op2, fpRegT2);
860 addDouble(fpRegT2, fpRegT0);
861 emitStoreDouble(dst, fpRegT0);
862 break;
863 case op_sub:
864 emitLoadDouble(op2, fpRegT2);
865 subDouble(fpRegT2, fpRegT0);
866 emitStoreDouble(dst, fpRegT0);
867 break;
868 case op_div:
869 emitLoadDouble(op2, fpRegT2);
870 divDouble(fpRegT2, fpRegT0);
871 emitStoreDouble(dst, fpRegT0);
872 break;
873 case op_jnless:
874 emitLoadDouble(op2, fpRegT1);
875 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst + 3);
876 break;
877 case op_jnlesseq:
878 emitLoadDouble(op2, fpRegT1);
879 addJump(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), dst + 3);
880 break;
881 default:
882 ASSERT_NOT_REACHED();
883 }
884 }
885
886 end.link(this);
887 }
888
889 // Multiplication (*)
890
emit_op_mul(Instruction * currentInstruction)891 void JIT::emit_op_mul(Instruction* currentInstruction)
892 {
893 unsigned dst = currentInstruction[1].u.operand;
894 unsigned op1 = currentInstruction[2].u.operand;
895 unsigned op2 = currentInstruction[3].u.operand;
896 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
897
898 JumpList notInt32Op1;
899 JumpList notInt32Op2;
900
901 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
902 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
903 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
904
905 // Int32 case.
906 move(regT0, regT3);
907 addSlowCase(branchMul32(Overflow, regT2, regT0));
908 addSlowCase(branchTest32(Zero, regT0));
909 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
910
911 if (!supportsFloatingPoint()) {
912 addSlowCase(notInt32Op1);
913 addSlowCase(notInt32Op2);
914 return;
915 }
916 Jump end = jump();
917
918 // Double case.
919 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
920 end.link(this);
921 }
922
emitSlow_op_mul(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)923 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
924 {
925 unsigned dst = currentInstruction[1].u.operand;
926 unsigned op1 = currentInstruction[2].u.operand;
927 unsigned op2 = currentInstruction[3].u.operand;
928 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
929
930 Jump overflow = getSlowCase(iter); // overflow check
931 linkSlowCase(iter); // zero result check
932
933 Jump negZero = branchOr32(Signed, regT2, regT3);
934 emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
935
936 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
937
938 negZero.link(this);
939 overflow.link(this);
940
941 if (!supportsFloatingPoint()) {
942 linkSlowCase(iter); // int32 check
943 linkSlowCase(iter); // int32 check
944 }
945
946 if (supportsFloatingPoint()) {
947 if (!types.first().definitelyIsNumber())
948 linkSlowCase(iter); // double check
949
950 if (!types.second().definitelyIsNumber()) {
951 linkSlowCase(iter); // int32 check
952 linkSlowCase(iter); // double check
953 }
954 }
955
956 Label jitStubCall(this);
957 JITStubCall stubCall(this, cti_op_mul);
958 stubCall.addArgument(op1);
959 stubCall.addArgument(op2);
960 stubCall.call(dst);
961 }
962
963 // Division (/)
964
emit_op_div(Instruction * currentInstruction)965 void JIT::emit_op_div(Instruction* currentInstruction)
966 {
967 unsigned dst = currentInstruction[1].u.operand;
968 unsigned op1 = currentInstruction[2].u.operand;
969 unsigned op2 = currentInstruction[3].u.operand;
970 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
971
972 if (!supportsFloatingPoint()) {
973 addSlowCase(jump());
974 return;
975 }
976
977 // Int32 divide.
978 JumpList notInt32Op1;
979 JumpList notInt32Op2;
980
981 JumpList end;
982
983 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
984
985 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
986 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
987
988 convertInt32ToDouble(regT0, fpRegT0);
989 convertInt32ToDouble(regT2, fpRegT1);
990 divDouble(fpRegT1, fpRegT0);
991
992 JumpList doubleResult;
993 if (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) {
994 m_assembler.cvttsd2si_rr(fpRegT0, regT0);
995 convertInt32ToDouble(regT0, fpRegT1);
996 m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
997
998 doubleResult.append(m_assembler.jne());
999 doubleResult.append(m_assembler.jp());
1000
1001 doubleResult.append(branchTest32(Zero, regT0));
1002
1003 // Int32 result.
1004 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1005 end.append(jump());
1006 }
1007
1008 // Double result.
1009 doubleResult.link(this);
1010 emitStoreDouble(dst, fpRegT0);
1011 end.append(jump());
1012
1013 // Double divide.
1014 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1015 end.link(this);
1016 }
1017
emitSlow_op_div(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)1018 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1019 {
1020 unsigned dst = currentInstruction[1].u.operand;
1021 unsigned op1 = currentInstruction[2].u.operand;
1022 unsigned op2 = currentInstruction[3].u.operand;
1023 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1024
1025 if (!supportsFloatingPoint())
1026 linkSlowCase(iter);
1027 else {
1028 if (!types.first().definitelyIsNumber())
1029 linkSlowCase(iter); // double check
1030
1031 if (!types.second().definitelyIsNumber()) {
1032 linkSlowCase(iter); // int32 check
1033 linkSlowCase(iter); // double check
1034 }
1035 }
1036
1037 JITStubCall stubCall(this, cti_op_div);
1038 stubCall.addArgument(op1);
1039 stubCall.addArgument(op2);
1040 stubCall.call(dst);
1041 }
1042
1043 // Mod (%)
1044
1045 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1046
1047 #if PLATFORM(X86) || PLATFORM(X86_64)
1048
emit_op_mod(Instruction * currentInstruction)1049 void JIT::emit_op_mod(Instruction* currentInstruction)
1050 {
1051 unsigned dst = currentInstruction[1].u.operand;
1052 unsigned op1 = currentInstruction[2].u.operand;
1053 unsigned op2 = currentInstruction[3].u.operand;
1054
1055 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1056 emitLoad(op1, X86::edx, X86::eax);
1057 move(Imm32(getConstantOperand(op2).asInt32()), X86::ecx);
1058 addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
1059 if (getConstantOperand(op2).asInt32() == -1)
1060 addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1061 } else {
1062 emitLoad2(op1, X86::edx, X86::eax, op2, X86::ebx, X86::ecx);
1063 addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
1064 addSlowCase(branch32(NotEqual, X86::ebx, Imm32(JSValue::Int32Tag)));
1065
1066 addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1067 addSlowCase(branch32(Equal, X86::ecx, Imm32(0))); // divide by 0
1068 }
1069
1070 move(X86::eax, X86::ebx); // Save dividend payload, in case of 0.
1071 m_assembler.cdq();
1072 m_assembler.idivl_r(X86::ecx);
1073
1074 // If the remainder is zero and the dividend is negative, the result is -0.
1075 Jump storeResult1 = branchTest32(NonZero, X86::edx);
1076 Jump storeResult2 = branchTest32(Zero, X86::ebx, Imm32(0x80000000)); // not negative
1077 emitStore(dst, jsNumber(m_globalData, -0.0));
1078 Jump end = jump();
1079
1080 storeResult1.link(this);
1081 storeResult2.link(this);
1082 emitStoreInt32(dst, X86::edx, (op1 == dst || op2 == dst));
1083 end.link(this);
1084 }
1085
emitSlow_op_mod(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)1086 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1087 {
1088 unsigned dst = currentInstruction[1].u.operand;
1089 unsigned op1 = currentInstruction[2].u.operand;
1090 unsigned op2 = currentInstruction[3].u.operand;
1091
1092 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1093 linkSlowCase(iter); // int32 check
1094 if (getConstantOperand(op2).asInt32() == -1)
1095 linkSlowCase(iter); // 0x80000000 check
1096 } else {
1097 linkSlowCase(iter); // int32 check
1098 linkSlowCase(iter); // int32 check
1099 linkSlowCase(iter); // 0 check
1100 linkSlowCase(iter); // 0x80000000 check
1101 }
1102
1103 JITStubCall stubCall(this, cti_op_mod);
1104 stubCall.addArgument(op1);
1105 stubCall.addArgument(op2);
1106 stubCall.call(dst);
1107 }
1108
1109 #else // PLATFORM(X86) || PLATFORM(X86_64)
1110
emit_op_mod(Instruction * currentInstruction)1111 void JIT::emit_op_mod(Instruction* currentInstruction)
1112 {
1113 unsigned dst = currentInstruction[1].u.operand;
1114 unsigned op1 = currentInstruction[2].u.operand;
1115 unsigned op2 = currentInstruction[3].u.operand;
1116
1117 JITStubCall stubCall(this, cti_op_mod);
1118 stubCall.addArgument(op1);
1119 stubCall.addArgument(op2);
1120 stubCall.call(dst);
1121 }
1122
emitSlow_op_mod(Instruction *,Vector<SlowCaseEntry>::iterator &)1123 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
1124 {
1125 }
1126
1127 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1128
1129 /* ------------------------------ END: OP_MOD ------------------------------ */
1130
1131 #else // USE(JSVALUE32_64)
1132
1133 void JIT::emit_op_lshift(Instruction* currentInstruction)
1134 {
1135 unsigned result = currentInstruction[1].u.operand;
1136 unsigned op1 = currentInstruction[2].u.operand;
1137 unsigned op2 = currentInstruction[3].u.operand;
1138
1139 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1140 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
1141 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1142 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1143 emitFastArithImmToInt(regT0);
1144 emitFastArithImmToInt(regT2);
1145 #if !PLATFORM(X86)
1146 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1147 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1148 and32(Imm32(0x1f), regT2);
1149 #endif
1150 lshift32(regT2, regT0);
1151 #if !USE(JSVALUE64)
1152 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1153 signExtend32ToPtr(regT0, regT0);
1154 #endif
1155 emitFastArithReTagImmediate(regT0, regT0);
1156 emitPutVirtualRegister(result);
1157 }
1158
1159 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1160 {
1161 unsigned result = currentInstruction[1].u.operand;
1162 unsigned op1 = currentInstruction[2].u.operand;
1163 unsigned op2 = currentInstruction[3].u.operand;
1164
1165 #if USE(JSVALUE64)
1166 UNUSED_PARAM(op1);
1167 UNUSED_PARAM(op2);
1168 linkSlowCase(iter);
1169 linkSlowCase(iter);
1170 #else
1171 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
1172 Jump notImm1 = getSlowCase(iter);
1173 Jump notImm2 = getSlowCase(iter);
1174 linkSlowCase(iter);
1175 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1176 notImm1.link(this);
1177 notImm2.link(this);
1178 #endif
1179 JITStubCall stubCall(this, cti_op_lshift);
1180 stubCall.addArgument(regT0);
1181 stubCall.addArgument(regT2);
1182 stubCall.call(result);
1183 }
1184
1185 void JIT::emit_op_rshift(Instruction* currentInstruction)
1186 {
1187 unsigned result = currentInstruction[1].u.operand;
1188 unsigned op1 = currentInstruction[2].u.operand;
1189 unsigned op2 = currentInstruction[3].u.operand;
1190
1191 if (isOperandConstantImmediateInt(op2)) {
1192 // isOperandConstantImmediateInt(op2) => 1 SlowCase
1193 emitGetVirtualRegister(op1, regT0);
1194 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1195 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1196 #if USE(JSVALUE64)
1197 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1198 #else
1199 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1200 #endif
1201 } else {
1202 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1203 if (supportsFloatingPointTruncate()) {
1204 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
1205 #if USE(JSVALUE64)
1206 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
1207 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
1208 addPtr(tagTypeNumberRegister, regT0);
1209 movePtrToDouble(regT0, fpRegT0);
1210 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1211 #else
1212 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
1213 emitJumpSlowCaseIfNotJSCell(regT0, op1);
1214 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
1215 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1216 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1217 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1218 #endif
1219 lhsIsInt.link(this);
1220 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1221 } else {
1222 // !supportsFloatingPoint() => 2 SlowCases
1223 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1224 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1225 }
1226 emitFastArithImmToInt(regT2);
1227 #if !PLATFORM(X86)
1228 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1229 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1230 and32(Imm32(0x1f), regT2);
1231 #endif
1232 #if USE(JSVALUE64)
1233 rshift32(regT2, regT0);
1234 #else
1235 rshiftPtr(regT2, regT0);
1236 #endif
1237 }
1238 #if USE(JSVALUE64)
1239 emitFastArithIntToImmNoCheck(regT0, regT0);
1240 #else
1241 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
1242 #endif
1243 emitPutVirtualRegister(result);
1244 }
1245
1246 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1247 {
1248 unsigned result = currentInstruction[1].u.operand;
1249 unsigned op1 = currentInstruction[2].u.operand;
1250 unsigned op2 = currentInstruction[3].u.operand;
1251
1252 JITStubCall stubCall(this, cti_op_rshift);
1253
1254 if (isOperandConstantImmediateInt(op2)) {
1255 linkSlowCase(iter);
1256 stubCall.addArgument(regT0);
1257 stubCall.addArgument(op2, regT2);
1258 } else {
1259 if (supportsFloatingPointTruncate()) {
1260 #if USE(JSVALUE64)
1261 linkSlowCase(iter);
1262 linkSlowCase(iter);
1263 linkSlowCase(iter);
1264 #else
1265 linkSlowCaseIfNotJSCell(iter, op1);
1266 linkSlowCase(iter);
1267 linkSlowCase(iter);
1268 linkSlowCase(iter);
1269 linkSlowCase(iter);
1270 #endif
1271 // We're reloading op1 to regT0 as we can no longer guarantee that
1272 // we have not munged the operand. It may have already been shifted
1273 // correctly, but it still will not have been tagged.
1274 stubCall.addArgument(op1, regT0);
1275 stubCall.addArgument(regT2);
1276 } else {
1277 linkSlowCase(iter);
1278 linkSlowCase(iter);
1279 stubCall.addArgument(regT0);
1280 stubCall.addArgument(regT2);
1281 }
1282 }
1283
1284 stubCall.call(result);
1285 }
1286
1287 void JIT::emit_op_jnless(Instruction* currentInstruction)
1288 {
1289 unsigned op1 = currentInstruction[1].u.operand;
1290 unsigned op2 = currentInstruction[2].u.operand;
1291 unsigned target = currentInstruction[3].u.operand;
1292
1293 // We generate inline code for the following cases in the fast path:
1294 // - int immediate to constant int immediate
1295 // - constant int immediate to int immediate
1296 // - int immediate to int immediate
1297
1298 if (isOperandConstantImmediateInt(op2)) {
1299 emitGetVirtualRegister(op1, regT0);
1300 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1301 #if USE(JSVALUE64)
1302 int32_t op2imm = getConstantOperandImmediateInt(op2);
1303 #else
1304 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1305 #endif
1306 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
1307 } else if (isOperandConstantImmediateInt(op1)) {
1308 emitGetVirtualRegister(op2, regT1);
1309 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1310 #if USE(JSVALUE64)
1311 int32_t op1imm = getConstantOperandImmediateInt(op1);
1312 #else
1313 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1314 #endif
1315 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
1316 } else {
1317 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1318 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1319 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1320
1321 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
1322 }
1323 }
1324
1325 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1326 {
1327 unsigned op1 = currentInstruction[1].u.operand;
1328 unsigned op2 = currentInstruction[2].u.operand;
1329 unsigned target = currentInstruction[3].u.operand;
1330
1331 // We generate inline code for the following cases in the slow path:
1332 // - floating-point number to constant int immediate
1333 // - constant int immediate to floating-point number
1334 // - floating-point number to floating-point number.
1335
1336 if (isOperandConstantImmediateInt(op2)) {
1337 linkSlowCase(iter);
1338
1339 if (supportsFloatingPoint()) {
1340 #if USE(JSVALUE64)
1341 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1342 addPtr(tagTypeNumberRegister, regT0);
1343 movePtrToDouble(regT0, fpRegT0);
1344 #else
1345 Jump fail1;
1346 if (!m_codeBlock->isKnownNotImmediate(op1))
1347 fail1 = emitJumpIfNotJSCell(regT0);
1348
1349 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1350 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1351 #endif
1352
1353 int32_t op2imm = getConstantOperand(op2).asInt32();;
1354
1355 move(Imm32(op2imm), regT1);
1356 convertInt32ToDouble(regT1, fpRegT1);
1357
1358 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1359
1360 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1361
1362 #if USE(JSVALUE64)
1363 fail1.link(this);
1364 #else
1365 if (!m_codeBlock->isKnownNotImmediate(op1))
1366 fail1.link(this);
1367 fail2.link(this);
1368 #endif
1369 }
1370
1371 JITStubCall stubCall(this, cti_op_jless);
1372 stubCall.addArgument(regT0);
1373 stubCall.addArgument(op2, regT2);
1374 stubCall.call();
1375 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1376
1377 } else if (isOperandConstantImmediateInt(op1)) {
1378 linkSlowCase(iter);
1379
1380 if (supportsFloatingPoint()) {
1381 #if USE(JSVALUE64)
1382 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1383 addPtr(tagTypeNumberRegister, regT1);
1384 movePtrToDouble(regT1, fpRegT1);
1385 #else
1386 Jump fail1;
1387 if (!m_codeBlock->isKnownNotImmediate(op2))
1388 fail1 = emitJumpIfNotJSCell(regT1);
1389
1390 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1391 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1392 #endif
1393
1394 int32_t op1imm = getConstantOperand(op1).asInt32();;
1395
1396 move(Imm32(op1imm), regT0);
1397 convertInt32ToDouble(regT0, fpRegT0);
1398
1399 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1400
1401 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1402
1403 #if USE(JSVALUE64)
1404 fail1.link(this);
1405 #else
1406 if (!m_codeBlock->isKnownNotImmediate(op2))
1407 fail1.link(this);
1408 fail2.link(this);
1409 #endif
1410 }
1411
1412 JITStubCall stubCall(this, cti_op_jless);
1413 stubCall.addArgument(op1, regT2);
1414 stubCall.addArgument(regT1);
1415 stubCall.call();
1416 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1417
1418 } else {
1419 linkSlowCase(iter);
1420
1421 if (supportsFloatingPoint()) {
1422 #if USE(JSVALUE64)
1423 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1424 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1425 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1426 addPtr(tagTypeNumberRegister, regT0);
1427 addPtr(tagTypeNumberRegister, regT1);
1428 movePtrToDouble(regT0, fpRegT0);
1429 movePtrToDouble(regT1, fpRegT1);
1430 #else
1431 Jump fail1;
1432 if (!m_codeBlock->isKnownNotImmediate(op1))
1433 fail1 = emitJumpIfNotJSCell(regT0);
1434
1435 Jump fail2;
1436 if (!m_codeBlock->isKnownNotImmediate(op2))
1437 fail2 = emitJumpIfNotJSCell(regT1);
1438
1439 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1440 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1441 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1442 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1443 #endif
1444
1445 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1446
1447 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1448
1449 #if USE(JSVALUE64)
1450 fail1.link(this);
1451 fail2.link(this);
1452 fail3.link(this);
1453 #else
1454 if (!m_codeBlock->isKnownNotImmediate(op1))
1455 fail1.link(this);
1456 if (!m_codeBlock->isKnownNotImmediate(op2))
1457 fail2.link(this);
1458 fail3.link(this);
1459 fail4.link(this);
1460 #endif
1461 }
1462
1463 linkSlowCase(iter);
1464 JITStubCall stubCall(this, cti_op_jless);
1465 stubCall.addArgument(regT0);
1466 stubCall.addArgument(regT1);
1467 stubCall.call();
1468 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1469 }
1470 }
1471
1472 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
1473 {
1474 unsigned op1 = currentInstruction[1].u.operand;
1475 unsigned op2 = currentInstruction[2].u.operand;
1476 unsigned target = currentInstruction[3].u.operand;
1477
1478 // We generate inline code for the following cases in the fast path:
1479 // - int immediate to constant int immediate
1480 // - constant int immediate to int immediate
1481 // - int immediate to int immediate
1482
1483 if (isOperandConstantImmediateInt(op2)) {
1484 emitGetVirtualRegister(op1, regT0);
1485 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1486 #if USE(JSVALUE64)
1487 int32_t op2imm = getConstantOperandImmediateInt(op2);
1488 #else
1489 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1490 #endif
1491 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
1492 } else if (isOperandConstantImmediateInt(op1)) {
1493 emitGetVirtualRegister(op2, regT1);
1494 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1495 #if USE(JSVALUE64)
1496 int32_t op1imm = getConstantOperandImmediateInt(op1);
1497 #else
1498 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1499 #endif
1500 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
1501 } else {
1502 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1503 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1504 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1505
1506 addJump(branch32(GreaterThan, regT0, regT1), target + 3);
1507 }
1508 }
1509
1510 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1511 {
1512 unsigned op1 = currentInstruction[1].u.operand;
1513 unsigned op2 = currentInstruction[2].u.operand;
1514 unsigned target = currentInstruction[3].u.operand;
1515
1516 // We generate inline code for the following cases in the slow path:
1517 // - floating-point number to constant int immediate
1518 // - constant int immediate to floating-point number
1519 // - floating-point number to floating-point number.
1520
1521 if (isOperandConstantImmediateInt(op2)) {
1522 linkSlowCase(iter);
1523
1524 if (supportsFloatingPoint()) {
1525 #if USE(JSVALUE64)
1526 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1527 addPtr(tagTypeNumberRegister, regT0);
1528 movePtrToDouble(regT0, fpRegT0);
1529 #else
1530 Jump fail1;
1531 if (!m_codeBlock->isKnownNotImmediate(op1))
1532 fail1 = emitJumpIfNotJSCell(regT0);
1533
1534 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1535 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1536 #endif
1537
1538 int32_t op2imm = getConstantOperand(op2).asInt32();;
1539
1540 move(Imm32(op2imm), regT1);
1541 convertInt32ToDouble(regT1, fpRegT1);
1542
1543 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1544
1545 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1546
1547 #if USE(JSVALUE64)
1548 fail1.link(this);
1549 #else
1550 if (!m_codeBlock->isKnownNotImmediate(op1))
1551 fail1.link(this);
1552 fail2.link(this);
1553 #endif
1554 }
1555
1556 JITStubCall stubCall(this, cti_op_jlesseq);
1557 stubCall.addArgument(regT0);
1558 stubCall.addArgument(op2, regT2);
1559 stubCall.call();
1560 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1561
1562 } else if (isOperandConstantImmediateInt(op1)) {
1563 linkSlowCase(iter);
1564
1565 if (supportsFloatingPoint()) {
1566 #if USE(JSVALUE64)
1567 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1568 addPtr(tagTypeNumberRegister, regT1);
1569 movePtrToDouble(regT1, fpRegT1);
1570 #else
1571 Jump fail1;
1572 if (!m_codeBlock->isKnownNotImmediate(op2))
1573 fail1 = emitJumpIfNotJSCell(regT1);
1574
1575 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1576 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1577 #endif
1578
1579 int32_t op1imm = getConstantOperand(op1).asInt32();;
1580
1581 move(Imm32(op1imm), regT0);
1582 convertInt32ToDouble(regT0, fpRegT0);
1583
1584 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1585
1586 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1587
1588 #if USE(JSVALUE64)
1589 fail1.link(this);
1590 #else
1591 if (!m_codeBlock->isKnownNotImmediate(op2))
1592 fail1.link(this);
1593 fail2.link(this);
1594 #endif
1595 }
1596
1597 JITStubCall stubCall(this, cti_op_jlesseq);
1598 stubCall.addArgument(op1, regT2);
1599 stubCall.addArgument(regT1);
1600 stubCall.call();
1601 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1602
1603 } else {
1604 linkSlowCase(iter);
1605
1606 if (supportsFloatingPoint()) {
1607 #if USE(JSVALUE64)
1608 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1609 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1610 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1611 addPtr(tagTypeNumberRegister, regT0);
1612 addPtr(tagTypeNumberRegister, regT1);
1613 movePtrToDouble(regT0, fpRegT0);
1614 movePtrToDouble(regT1, fpRegT1);
1615 #else
1616 Jump fail1;
1617 if (!m_codeBlock->isKnownNotImmediate(op1))
1618 fail1 = emitJumpIfNotJSCell(regT0);
1619
1620 Jump fail2;
1621 if (!m_codeBlock->isKnownNotImmediate(op2))
1622 fail2 = emitJumpIfNotJSCell(regT1);
1623
1624 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1625 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1626 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1627 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1628 #endif
1629
1630 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1631
1632 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1633
1634 #if USE(JSVALUE64)
1635 fail1.link(this);
1636 fail2.link(this);
1637 fail3.link(this);
1638 #else
1639 if (!m_codeBlock->isKnownNotImmediate(op1))
1640 fail1.link(this);
1641 if (!m_codeBlock->isKnownNotImmediate(op2))
1642 fail2.link(this);
1643 fail3.link(this);
1644 fail4.link(this);
1645 #endif
1646 }
1647
1648 linkSlowCase(iter);
1649 JITStubCall stubCall(this, cti_op_jlesseq);
1650 stubCall.addArgument(regT0);
1651 stubCall.addArgument(regT1);
1652 stubCall.call();
1653 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1654 }
1655 }
1656
1657 void JIT::emit_op_bitand(Instruction* currentInstruction)
1658 {
1659 unsigned result = currentInstruction[1].u.operand;
1660 unsigned op1 = currentInstruction[2].u.operand;
1661 unsigned op2 = currentInstruction[3].u.operand;
1662
1663 if (isOperandConstantImmediateInt(op1)) {
1664 emitGetVirtualRegister(op2, regT0);
1665 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1666 #if USE(JSVALUE64)
1667 int32_t imm = getConstantOperandImmediateInt(op1);
1668 andPtr(Imm32(imm), regT0);
1669 if (imm >= 0)
1670 emitFastArithIntToImmNoCheck(regT0, regT0);
1671 #else
1672 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
1673 #endif
1674 } else if (isOperandConstantImmediateInt(op2)) {
1675 emitGetVirtualRegister(op1, regT0);
1676 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1677 #if USE(JSVALUE64)
1678 int32_t imm = getConstantOperandImmediateInt(op2);
1679 andPtr(Imm32(imm), regT0);
1680 if (imm >= 0)
1681 emitFastArithIntToImmNoCheck(regT0, regT0);
1682 #else
1683 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
1684 #endif
1685 } else {
1686 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1687 andPtr(regT1, regT0);
1688 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1689 }
1690 emitPutVirtualRegister(result);
1691 }
1692
1693 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1694 {
1695 unsigned result = currentInstruction[1].u.operand;
1696 unsigned op1 = currentInstruction[2].u.operand;
1697 unsigned op2 = currentInstruction[3].u.operand;
1698
1699 linkSlowCase(iter);
1700 if (isOperandConstantImmediateInt(op1)) {
1701 JITStubCall stubCall(this, cti_op_bitand);
1702 stubCall.addArgument(op1, regT2);
1703 stubCall.addArgument(regT0);
1704 stubCall.call(result);
1705 } else if (isOperandConstantImmediateInt(op2)) {
1706 JITStubCall stubCall(this, cti_op_bitand);
1707 stubCall.addArgument(regT0);
1708 stubCall.addArgument(op2, regT2);
1709 stubCall.call(result);
1710 } else {
1711 JITStubCall stubCall(this, cti_op_bitand);
1712 stubCall.addArgument(op1, regT2);
1713 stubCall.addArgument(regT1);
1714 stubCall.call(result);
1715 }
1716 }
1717
1718 void JIT::emit_op_post_inc(Instruction* currentInstruction)
1719 {
1720 unsigned result = currentInstruction[1].u.operand;
1721 unsigned srcDst = currentInstruction[2].u.operand;
1722
1723 emitGetVirtualRegister(srcDst, regT0);
1724 move(regT0, regT1);
1725 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1726 #if USE(JSVALUE64)
1727 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
1728 emitFastArithIntToImmNoCheck(regT1, regT1);
1729 #else
1730 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1731 signExtend32ToPtr(regT1, regT1);
1732 #endif
1733 emitPutVirtualRegister(srcDst, regT1);
1734 emitPutVirtualRegister(result);
1735 }
1736
1737 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1738 {
1739 unsigned result = currentInstruction[1].u.operand;
1740 unsigned srcDst = currentInstruction[2].u.operand;
1741
1742 linkSlowCase(iter);
1743 linkSlowCase(iter);
1744 JITStubCall stubCall(this, cti_op_post_inc);
1745 stubCall.addArgument(regT0);
1746 stubCall.addArgument(Imm32(srcDst));
1747 stubCall.call(result);
1748 }
1749
1750 void JIT::emit_op_post_dec(Instruction* currentInstruction)
1751 {
1752 unsigned result = currentInstruction[1].u.operand;
1753 unsigned srcDst = currentInstruction[2].u.operand;
1754
1755 emitGetVirtualRegister(srcDst, regT0);
1756 move(regT0, regT1);
1757 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1758 #if USE(JSVALUE64)
1759 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
1760 emitFastArithIntToImmNoCheck(regT1, regT1);
1761 #else
1762 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1763 signExtend32ToPtr(regT1, regT1);
1764 #endif
1765 emitPutVirtualRegister(srcDst, regT1);
1766 emitPutVirtualRegister(result);
1767 }
1768
1769 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1770 {
1771 unsigned result = currentInstruction[1].u.operand;
1772 unsigned srcDst = currentInstruction[2].u.operand;
1773
1774 linkSlowCase(iter);
1775 linkSlowCase(iter);
1776 JITStubCall stubCall(this, cti_op_post_dec);
1777 stubCall.addArgument(regT0);
1778 stubCall.addArgument(Imm32(srcDst));
1779 stubCall.call(result);
1780 }
1781
1782 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
1783 {
1784 unsigned srcDst = currentInstruction[1].u.operand;
1785
1786 emitGetVirtualRegister(srcDst, regT0);
1787 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1788 #if USE(JSVALUE64)
1789 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
1790 emitFastArithIntToImmNoCheck(regT0, regT0);
1791 #else
1792 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1793 signExtend32ToPtr(regT0, regT0);
1794 #endif
1795 emitPutVirtualRegister(srcDst);
1796 }
1797
1798 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1799 {
1800 unsigned srcDst = currentInstruction[1].u.operand;
1801
1802 Jump notImm = getSlowCase(iter);
1803 linkSlowCase(iter);
1804 emitGetVirtualRegister(srcDst, regT0);
1805 notImm.link(this);
1806 JITStubCall stubCall(this, cti_op_pre_inc);
1807 stubCall.addArgument(regT0);
1808 stubCall.call(srcDst);
1809 }
1810
1811 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
1812 {
1813 unsigned srcDst = currentInstruction[1].u.operand;
1814
1815 emitGetVirtualRegister(srcDst, regT0);
1816 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1817 #if USE(JSVALUE64)
1818 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
1819 emitFastArithIntToImmNoCheck(regT0, regT0);
1820 #else
1821 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1822 signExtend32ToPtr(regT0, regT0);
1823 #endif
1824 emitPutVirtualRegister(srcDst);
1825 }
1826
1827 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1828 {
1829 unsigned srcDst = currentInstruction[1].u.operand;
1830
1831 Jump notImm = getSlowCase(iter);
1832 linkSlowCase(iter);
1833 emitGetVirtualRegister(srcDst, regT0);
1834 notImm.link(this);
1835 JITStubCall stubCall(this, cti_op_pre_dec);
1836 stubCall.addArgument(regT0);
1837 stubCall.call(srcDst);
1838 }
1839
1840 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1841
1842 #if PLATFORM(X86) || PLATFORM(X86_64)
1843
1844 void JIT::emit_op_mod(Instruction* currentInstruction)
1845 {
1846 unsigned result = currentInstruction[1].u.operand;
1847 unsigned op1 = currentInstruction[2].u.operand;
1848 unsigned op2 = currentInstruction[3].u.operand;
1849
1850 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
1851 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1852 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
1853 #if USE(JSVALUE64)
1854 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
1855 m_assembler.cdq();
1856 m_assembler.idivl_r(X86::ecx);
1857 #else
1858 emitFastArithDeTagImmediate(X86::eax);
1859 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
1860 m_assembler.cdq();
1861 m_assembler.idivl_r(X86::ecx);
1862 signExtend32ToPtr(X86::edx, X86::edx);
1863 #endif
1864 emitFastArithReTagImmediate(X86::edx, X86::eax);
1865 emitPutVirtualRegister(result);
1866 }
1867
1868 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1869 {
1870 unsigned result = currentInstruction[1].u.operand;
1871
1872 #if USE(JSVALUE64)
1873 linkSlowCase(iter);
1874 linkSlowCase(iter);
1875 linkSlowCase(iter);
1876 #else
1877 Jump notImm1 = getSlowCase(iter);
1878 Jump notImm2 = getSlowCase(iter);
1879 linkSlowCase(iter);
1880 emitFastArithReTagImmediate(X86::eax, X86::eax);
1881 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
1882 notImm1.link(this);
1883 notImm2.link(this);
1884 #endif
1885 JITStubCall stubCall(this, cti_op_mod);
1886 stubCall.addArgument(X86::eax);
1887 stubCall.addArgument(X86::ecx);
1888 stubCall.call(result);
1889 }
1890
1891 #else // PLATFORM(X86) || PLATFORM(X86_64)
1892
1893 void JIT::emit_op_mod(Instruction* currentInstruction)
1894 {
1895 unsigned result = currentInstruction[1].u.operand;
1896 unsigned op1 = currentInstruction[2].u.operand;
1897 unsigned op2 = currentInstruction[3].u.operand;
1898
1899 JITStubCall stubCall(this, cti_op_mod);
1900 stubCall.addArgument(op1, regT2);
1901 stubCall.addArgument(op2, regT2);
1902 stubCall.call(result);
1903 }
1904
1905 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
1906 {
1907 ASSERT_NOT_REACHED();
1908 }
1909
1910 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1911
1912 /* ------------------------------ END: OP_MOD ------------------------------ */
1913
1914 #if USE(JSVALUE64)
1915
1916 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1917
1918 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
1919 {
1920 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1921 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1922 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1923 if (opcodeID == op_add)
1924 addSlowCase(branchAdd32(Overflow, regT1, regT0));
1925 else if (opcodeID == op_sub)
1926 addSlowCase(branchSub32(Overflow, regT1, regT0));
1927 else {
1928 ASSERT(opcodeID == op_mul);
1929 addSlowCase(branchMul32(Overflow, regT1, regT0));
1930 addSlowCase(branchTest32(Zero, regT0));
1931 }
1932 emitFastArithIntToImmNoCheck(regT0, regT0);
1933 }
1934
1935 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
1936 {
1937 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
1938 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
1939
1940 Jump notImm1 = getSlowCase(iter);
1941 Jump notImm2 = getSlowCase(iter);
1942
1943 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
1944 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
1945 linkSlowCase(iter);
1946 emitGetVirtualRegister(op1, regT0);
1947
1948 Label stubFunctionCall(this);
1949 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
1950 stubCall.addArgument(regT0);
1951 stubCall.addArgument(regT1);
1952 stubCall.call(result);
1953 Jump end = jump();
1954
1955 // if we get here, eax is not an int32, edx not yet checked.
1956 notImm1.link(this);
1957 if (!types.first().definitelyIsNumber())
1958 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
1959 if (!types.second().definitelyIsNumber())
1960 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1961 addPtr(tagTypeNumberRegister, regT0);
1962 movePtrToDouble(regT0, fpRegT1);
1963 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
1964 convertInt32ToDouble(regT1, fpRegT2);
1965 Jump op2wasInteger = jump();
1966
1967 // if we get here, eax IS an int32, edx is not.
1968 notImm2.link(this);
1969 if (!types.second().definitelyIsNumber())
1970 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1971 convertInt32ToDouble(regT0, fpRegT1);
1972 op2isDouble.link(this);
1973 addPtr(tagTypeNumberRegister, regT1);
1974 movePtrToDouble(regT1, fpRegT2);
1975 op2wasInteger.link(this);
1976
1977 if (opcodeID == op_add)
1978 addDouble(fpRegT2, fpRegT1);
1979 else if (opcodeID == op_sub)
1980 subDouble(fpRegT2, fpRegT1);
1981 else {
1982 ASSERT(opcodeID == op_mul);
1983 mulDouble(fpRegT2, fpRegT1);
1984 }
1985 moveDoubleToPtr(fpRegT1, regT0);
1986 subPtr(tagTypeNumberRegister, regT0);
1987 emitPutVirtualRegister(result, regT0);
1988
1989 end.link(this);
1990 }
1991
1992 void JIT::emit_op_add(Instruction* currentInstruction)
1993 {
1994 unsigned result = currentInstruction[1].u.operand;
1995 unsigned op1 = currentInstruction[2].u.operand;
1996 unsigned op2 = currentInstruction[3].u.operand;
1997 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1998
1999 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
2000 JITStubCall stubCall(this, cti_op_add);
2001 stubCall.addArgument(op1, regT2);
2002 stubCall.addArgument(op2, regT2);
2003 stubCall.call(result);
2004 return;
2005 }
2006
2007 if (isOperandConstantImmediateInt(op1)) {
2008 emitGetVirtualRegister(op2, regT0);
2009 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2010 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
2011 emitFastArithIntToImmNoCheck(regT0, regT0);
2012 } else if (isOperandConstantImmediateInt(op2)) {
2013 emitGetVirtualRegister(op1, regT0);
2014 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2015 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
2016 emitFastArithIntToImmNoCheck(regT0, regT0);
2017 } else
2018 compileBinaryArithOp(op_add, result, op1, op2, types);
2019
2020 emitPutVirtualRegister(result);
2021 }
2022
2023 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2024 {
2025 unsigned result = currentInstruction[1].u.operand;
2026 unsigned op1 = currentInstruction[2].u.operand;
2027 unsigned op2 = currentInstruction[3].u.operand;
2028
2029 if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
2030 linkSlowCase(iter);
2031 linkSlowCase(iter);
2032 JITStubCall stubCall(this, cti_op_add);
2033 stubCall.addArgument(op1, regT2);
2034 stubCall.addArgument(op2, regT2);
2035 stubCall.call(result);
2036 } else
2037 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2038 }
2039
2040 void JIT::emit_op_mul(Instruction* currentInstruction)
2041 {
2042 unsigned result = currentInstruction[1].u.operand;
2043 unsigned op1 = currentInstruction[2].u.operand;
2044 unsigned op2 = currentInstruction[3].u.operand;
2045 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2046
2047 // For now, only plant a fast int case if the constant operand is greater than zero.
2048 int32_t value;
2049 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2050 emitGetVirtualRegister(op2, regT0);
2051 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2052 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2053 emitFastArithReTagImmediate(regT0, regT0);
2054 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2055 emitGetVirtualRegister(op1, regT0);
2056 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2057 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2058 emitFastArithReTagImmediate(regT0, regT0);
2059 } else
2060 compileBinaryArithOp(op_mul, result, op1, op2, types);
2061
2062 emitPutVirtualRegister(result);
2063 }
2064
2065 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2066 {
2067 unsigned result = currentInstruction[1].u.operand;
2068 unsigned op1 = currentInstruction[2].u.operand;
2069 unsigned op2 = currentInstruction[3].u.operand;
2070 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2071
2072 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2073 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2074 linkSlowCase(iter);
2075 linkSlowCase(iter);
2076 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2077 JITStubCall stubCall(this, cti_op_mul);
2078 stubCall.addArgument(op1, regT2);
2079 stubCall.addArgument(op2, regT2);
2080 stubCall.call(result);
2081 } else
2082 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
2083 }
2084
2085 void JIT::emit_op_sub(Instruction* currentInstruction)
2086 {
2087 unsigned result = currentInstruction[1].u.operand;
2088 unsigned op1 = currentInstruction[2].u.operand;
2089 unsigned op2 = currentInstruction[3].u.operand;
2090 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2091
2092 compileBinaryArithOp(op_sub, result, op1, op2, types);
2093
2094 emitPutVirtualRegister(result);
2095 }
2096
2097 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2098 {
2099 unsigned result = currentInstruction[1].u.operand;
2100 unsigned op1 = currentInstruction[2].u.operand;
2101 unsigned op2 = currentInstruction[3].u.operand;
2102 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2103
2104 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
2105 }
2106
2107 #else // USE(JSVALUE64)
2108
2109 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2110
2111 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2112 {
2113 Structure* numberStructure = m_globalData->numberStructure.get();
2114 Jump wasJSNumberCell1;
2115 Jump wasJSNumberCell2;
2116
2117 emitGetVirtualRegisters(src1, regT0, src2, regT1);
2118
2119 if (types.second().isReusable() && supportsFloatingPoint()) {
2120 ASSERT(types.second().mightBeNumber());
2121
2122 // Check op2 is a number
2123 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2124 if (!types.second().definitelyIsNumber()) {
2125 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2126 addSlowCase(checkStructure(regT1, numberStructure));
2127 }
2128
2129 // (1) In this case src2 is a reusable number cell.
2130 // Slow case if src1 is not a number type.
2131 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2132 if (!types.first().definitelyIsNumber()) {
2133 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2134 addSlowCase(checkStructure(regT0, numberStructure));
2135 }
2136
2137 // (1a) if we get here, src1 is also a number cell
2138 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2139 Jump loadedDouble = jump();
2140 // (1b) if we get here, src1 is an immediate
2141 op1imm.link(this);
2142 emitFastArithImmToInt(regT0);
2143 convertInt32ToDouble(regT0, fpRegT0);
2144 // (1c)
2145 loadedDouble.link(this);
2146 if (opcodeID == op_add)
2147 addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2148 else if (opcodeID == op_sub)
2149 subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2150 else {
2151 ASSERT(opcodeID == op_mul);
2152 mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2153 }
2154
2155 // Store the result to the JSNumberCell and jump.
2156 storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2157 move(regT1, regT0);
2158 emitPutVirtualRegister(dst);
2159 wasJSNumberCell2 = jump();
2160
2161 // (2) This handles cases where src2 is an immediate number.
2162 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
2163 op2imm.link(this);
2164 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2165 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2166 ASSERT(types.first().mightBeNumber());
2167
2168 // Check op1 is a number
2169 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2170 if (!types.first().definitelyIsNumber()) {
2171 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2172 addSlowCase(checkStructure(regT0, numberStructure));
2173 }
2174
2175 // (1) In this case src1 is a reusable number cell.
2176 // Slow case if src2 is not a number type.
2177 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2178 if (!types.second().definitelyIsNumber()) {
2179 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2180 addSlowCase(checkStructure(regT1, numberStructure));
2181 }
2182
2183 // (1a) if we get here, src2 is also a number cell
2184 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
2185 Jump loadedDouble = jump();
2186 // (1b) if we get here, src2 is an immediate
2187 op2imm.link(this);
2188 emitFastArithImmToInt(regT1);
2189 convertInt32ToDouble(regT1, fpRegT1);
2190 // (1c)
2191 loadedDouble.link(this);
2192 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2193 if (opcodeID == op_add)
2194 addDouble(fpRegT1, fpRegT0);
2195 else if (opcodeID == op_sub)
2196 subDouble(fpRegT1, fpRegT0);
2197 else {
2198 ASSERT(opcodeID == op_mul);
2199 mulDouble(fpRegT1, fpRegT0);
2200 }
2201 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2202 emitPutVirtualRegister(dst);
2203
2204 // Store the result to the JSNumberCell and jump.
2205 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2206 emitPutVirtualRegister(dst);
2207 wasJSNumberCell1 = jump();
2208
2209 // (2) This handles cases where src1 is an immediate number.
2210 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
2211 op1imm.link(this);
2212 emitJumpSlowCaseIfNotImmediateInteger(regT1);
2213 } else
2214 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2215
2216 if (opcodeID == op_add) {
2217 emitFastArithDeTagImmediate(regT0);
2218 addSlowCase(branchAdd32(Overflow, regT1, regT0));
2219 } else if (opcodeID == op_sub) {
2220 addSlowCase(branchSub32(Overflow, regT1, regT0));
2221 signExtend32ToPtr(regT0, regT0);
2222 emitFastArithReTagImmediate(regT0, regT0);
2223 } else {
2224 ASSERT(opcodeID == op_mul);
2225 // convert eax & edx from JSImmediates to ints, and check if either are zero
2226 emitFastArithImmToInt(regT1);
2227 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
2228 Jump op2NonZero = branchTest32(NonZero, regT1);
2229 op1Zero.link(this);
2230 // if either input is zero, add the two together, and check if the result is < 0.
2231 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
2232 move(regT0, regT2);
2233 addSlowCase(branchAdd32(Signed, regT1, regT2));
2234 // Skip the above check if neither input is zero
2235 op2NonZero.link(this);
2236 addSlowCase(branchMul32(Overflow, regT1, regT0));
2237 signExtend32ToPtr(regT0, regT0);
2238 emitFastArithReTagImmediate(regT0, regT0);
2239 }
2240 emitPutVirtualRegister(dst);
2241
2242 if (types.second().isReusable() && supportsFloatingPoint())
2243 wasJSNumberCell2.link(this);
2244 else if (types.first().isReusable() && supportsFloatingPoint())
2245 wasJSNumberCell1.link(this);
2246 }
2247
2248 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2249 {
2250 linkSlowCase(iter);
2251 if (types.second().isReusable() && supportsFloatingPoint()) {
2252 if (!types.first().definitelyIsNumber()) {
2253 linkSlowCaseIfNotJSCell(iter, src1);
2254 linkSlowCase(iter);
2255 }
2256 if (!types.second().definitelyIsNumber()) {
2257 linkSlowCaseIfNotJSCell(iter, src2);
2258 linkSlowCase(iter);
2259 }
2260 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2261 if (!types.first().definitelyIsNumber()) {
2262 linkSlowCaseIfNotJSCell(iter, src1);
2263 linkSlowCase(iter);
2264 }
2265 if (!types.second().definitelyIsNumber()) {
2266 linkSlowCaseIfNotJSCell(iter, src2);
2267 linkSlowCase(iter);
2268 }
2269 }
2270 linkSlowCase(iter);
2271
2272 // additional entry point to handle -0 cases.
2273 if (opcodeID == op_mul)
2274 linkSlowCase(iter);
2275
2276 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2277 stubCall.addArgument(src1, regT2);
2278 stubCall.addArgument(src2, regT2);
2279 stubCall.call(dst);
2280 }
2281
2282 void JIT::emit_op_add(Instruction* currentInstruction)
2283 {
2284 unsigned result = currentInstruction[1].u.operand;
2285 unsigned op1 = currentInstruction[2].u.operand;
2286 unsigned op2 = currentInstruction[3].u.operand;
2287
2288 if (isOperandConstantImmediateInt(op1)) {
2289 emitGetVirtualRegister(op2, regT0);
2290 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2291 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
2292 signExtend32ToPtr(regT0, regT0);
2293 emitPutVirtualRegister(result);
2294 } else if (isOperandConstantImmediateInt(op2)) {
2295 emitGetVirtualRegister(op1, regT0);
2296 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2297 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
2298 signExtend32ToPtr(regT0, regT0);
2299 emitPutVirtualRegister(result);
2300 } else {
2301 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2302 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2303 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2304 else {
2305 JITStubCall stubCall(this, cti_op_add);
2306 stubCall.addArgument(op1, regT2);
2307 stubCall.addArgument(op2, regT2);
2308 stubCall.call(result);
2309 }
2310 }
2311 }
2312
2313 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2314 {
2315 unsigned result = currentInstruction[1].u.operand;
2316 unsigned op1 = currentInstruction[2].u.operand;
2317 unsigned op2 = currentInstruction[3].u.operand;
2318
2319 if (isOperandConstantImmediateInt(op1)) {
2320 Jump notImm = getSlowCase(iter);
2321 linkSlowCase(iter);
2322 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
2323 notImm.link(this);
2324 JITStubCall stubCall(this, cti_op_add);
2325 stubCall.addArgument(op1, regT2);
2326 stubCall.addArgument(regT0);
2327 stubCall.call(result);
2328 } else if (isOperandConstantImmediateInt(op2)) {
2329 Jump notImm = getSlowCase(iter);
2330 linkSlowCase(iter);
2331 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
2332 notImm.link(this);
2333 JITStubCall stubCall(this, cti_op_add);
2334 stubCall.addArgument(regT0);
2335 stubCall.addArgument(op2, regT2);
2336 stubCall.call(result);
2337 } else {
2338 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2339 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
2340 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
2341 }
2342 }
2343
2344 void JIT::emit_op_mul(Instruction* currentInstruction)
2345 {
2346 unsigned result = currentInstruction[1].u.operand;
2347 unsigned op1 = currentInstruction[2].u.operand;
2348 unsigned op2 = currentInstruction[3].u.operand;
2349
2350 // For now, only plant a fast int case if the constant operand is greater than zero.
2351 int32_t value;
2352 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2353 emitGetVirtualRegister(op2, regT0);
2354 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2355 emitFastArithDeTagImmediate(regT0);
2356 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2357 signExtend32ToPtr(regT0, regT0);
2358 emitFastArithReTagImmediate(regT0, regT0);
2359 emitPutVirtualRegister(result);
2360 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2361 emitGetVirtualRegister(op1, regT0);
2362 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2363 emitFastArithDeTagImmediate(regT0);
2364 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2365 signExtend32ToPtr(regT0, regT0);
2366 emitFastArithReTagImmediate(regT0, regT0);
2367 emitPutVirtualRegister(result);
2368 } else
2369 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2370 }
2371
2372 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2373 {
2374 unsigned result = currentInstruction[1].u.operand;
2375 unsigned op1 = currentInstruction[2].u.operand;
2376 unsigned op2 = currentInstruction[3].u.operand;
2377
2378 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2379 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2380 linkSlowCase(iter);
2381 linkSlowCase(iter);
2382 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2383 JITStubCall stubCall(this, cti_op_mul);
2384 stubCall.addArgument(op1, regT2);
2385 stubCall.addArgument(op2, regT2);
2386 stubCall.call(result);
2387 } else
2388 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2389 }
2390
2391 void JIT::emit_op_sub(Instruction* currentInstruction)
2392 {
2393 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2394 }
2395
2396 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2397 {
2398 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2399 }
2400
2401 #endif // USE(JSVALUE64)
2402
2403 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
2404
2405 #endif // USE(JSVALUE32_64)
2406
2407 } // namespace JSC
2408
2409 #endif // ENABLE(JIT)
2410