1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_IA32
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/debug/debug.h"
12 #include "src/ia32/frames-ia32.h"
13 #include "src/ia32/macro-assembler-ia32.h"
14 #include "src/runtime/runtime.h"
15
16 namespace v8 {
17 namespace internal {
18
19 // -------------------------------------------------------------------------
20 // MacroAssembler implementation.
21
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23 CodeObjectRequired create_code_object)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
26 has_frame_(false) {
27 if (create_code_object == CodeObjectRequired::kYes) {
28 code_object_ =
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30 }
31 }
32
33
Load(Register dst,const Operand & src,Representation r)34 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
35 DCHECK(!r.IsDouble());
36 if (r.IsInteger8()) {
37 movsx_b(dst, src);
38 } else if (r.IsUInteger8()) {
39 movzx_b(dst, src);
40 } else if (r.IsInteger16()) {
41 movsx_w(dst, src);
42 } else if (r.IsUInteger16()) {
43 movzx_w(dst, src);
44 } else {
45 mov(dst, src);
46 }
47 }
48
49
Store(Register src,const Operand & dst,Representation r)50 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
51 DCHECK(!r.IsDouble());
52 if (r.IsInteger8() || r.IsUInteger8()) {
53 mov_b(dst, src);
54 } else if (r.IsInteger16() || r.IsUInteger16()) {
55 mov_w(dst, src);
56 } else {
57 if (r.IsHeapObject()) {
58 AssertNotSmi(src);
59 } else if (r.IsSmi()) {
60 AssertSmi(src);
61 }
62 mov(dst, src);
63 }
64 }
65
66
LoadRoot(Register destination,Heap::RootListIndex index)67 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
68 if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
69 mov(destination, isolate()->heap()->root_handle(index));
70 return;
71 }
72 ExternalReference roots_array_start =
73 ExternalReference::roots_array_start(isolate());
74 mov(destination, Immediate(index));
75 mov(destination, Operand::StaticArray(destination,
76 times_pointer_size,
77 roots_array_start));
78 }
79
80
StoreRoot(Register source,Register scratch,Heap::RootListIndex index)81 void MacroAssembler::StoreRoot(Register source,
82 Register scratch,
83 Heap::RootListIndex index) {
84 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
85 ExternalReference roots_array_start =
86 ExternalReference::roots_array_start(isolate());
87 mov(scratch, Immediate(index));
88 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
89 source);
90 }
91
92
CompareRoot(Register with,Register scratch,Heap::RootListIndex index)93 void MacroAssembler::CompareRoot(Register with,
94 Register scratch,
95 Heap::RootListIndex index) {
96 ExternalReference roots_array_start =
97 ExternalReference::roots_array_start(isolate());
98 mov(scratch, Immediate(index));
99 cmp(with, Operand::StaticArray(scratch,
100 times_pointer_size,
101 roots_array_start));
102 }
103
104
CompareRoot(Register with,Heap::RootListIndex index)105 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
106 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
107 cmp(with, isolate()->heap()->root_handle(index));
108 }
109
110
CompareRoot(const Operand & with,Heap::RootListIndex index)111 void MacroAssembler::CompareRoot(const Operand& with,
112 Heap::RootListIndex index) {
113 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
114 cmp(with, isolate()->heap()->root_handle(index));
115 }
116
117
PushRoot(Heap::RootListIndex index)118 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
119 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
120 Push(isolate()->heap()->root_handle(index));
121 }
122
123 #define REG(Name) \
124 { Register::kCode_##Name }
125
126 static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
127
128 #undef REG
129
130 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
131
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)132 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
133 Register exclusion1, Register exclusion2,
134 Register exclusion3) {
135 // We don't allow a GC during a store buffer overflow so there is no need to
136 // store the registers in any particular way, but we do have to store and
137 // restore them.
138 for (int i = 0; i < kNumberOfSavedRegs; i++) {
139 Register reg = saved_regs[i];
140 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
141 push(reg);
142 }
143 }
144 if (fp_mode == kSaveFPRegs) {
145 sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
146 // Save all XMM registers except XMM0.
147 for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
148 XMMRegister reg = XMMRegister::from_code(i);
149 movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
150 }
151 }
152 }
153
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)154 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
155 Register exclusion2, Register exclusion3) {
156 if (fp_mode == kSaveFPRegs) {
157 // Restore all XMM registers except XMM0.
158 for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
159 XMMRegister reg = XMMRegister::from_code(i);
160 movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
161 }
162 add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
163 }
164
165 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
166 Register reg = saved_regs[i];
167 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
168 pop(reg);
169 }
170 }
171 }
172
InNewSpace(Register object,Register scratch,Condition cc,Label * condition_met,Label::Distance distance)173 void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
174 Label* condition_met,
175 Label::Distance distance) {
176 const int mask =
177 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
178 CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
179 }
180
181
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,MacroAssembler::RememberedSetFinalAction and_then)182 void MacroAssembler::RememberedSetHelper(
183 Register object, // Only used for debug checks.
184 Register addr,
185 Register scratch,
186 SaveFPRegsMode save_fp,
187 MacroAssembler::RememberedSetFinalAction and_then) {
188 Label done;
189 if (emit_debug_code()) {
190 Label ok;
191 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
192 int3();
193 bind(&ok);
194 }
195 // Load store buffer top.
196 ExternalReference store_buffer =
197 ExternalReference::store_buffer_top(isolate());
198 mov(scratch, Operand::StaticVariable(store_buffer));
199 // Store pointer to buffer.
200 mov(Operand(scratch, 0), addr);
201 // Increment buffer top.
202 add(scratch, Immediate(kPointerSize));
203 // Write back new top of buffer.
204 mov(Operand::StaticVariable(store_buffer), scratch);
205 // Call stub on end of buffer.
206 // Check for end of buffer.
207 test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
208 if (and_then == kReturnAtEnd) {
209 Label buffer_overflowed;
210 j(equal, &buffer_overflowed, Label::kNear);
211 ret(0);
212 bind(&buffer_overflowed);
213 } else {
214 DCHECK(and_then == kFallThroughAtEnd);
215 j(not_equal, &done, Label::kNear);
216 }
217 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
218 CallStub(&store_buffer_overflow);
219 if (and_then == kReturnAtEnd) {
220 ret(0);
221 } else {
222 DCHECK(and_then == kFallThroughAtEnd);
223 bind(&done);
224 }
225 }
226
227
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister scratch_reg,Register result_reg)228 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
229 XMMRegister scratch_reg,
230 Register result_reg) {
231 Label done;
232 Label conv_failure;
233 xorps(scratch_reg, scratch_reg);
234 cvtsd2si(result_reg, input_reg);
235 test(result_reg, Immediate(0xFFFFFF00));
236 j(zero, &done, Label::kNear);
237 cmp(result_reg, Immediate(0x1));
238 j(overflow, &conv_failure, Label::kNear);
239 mov(result_reg, Immediate(0));
240 setcc(sign, result_reg);
241 sub(result_reg, Immediate(1));
242 and_(result_reg, Immediate(255));
243 jmp(&done, Label::kNear);
244 bind(&conv_failure);
245 Move(result_reg, Immediate(0));
246 ucomisd(input_reg, scratch_reg);
247 j(below, &done, Label::kNear);
248 Move(result_reg, Immediate(255));
249 bind(&done);
250 }
251
252
ClampUint8(Register reg)253 void MacroAssembler::ClampUint8(Register reg) {
254 Label done;
255 test(reg, Immediate(0xFFFFFF00));
256 j(zero, &done, Label::kNear);
257 setcc(negative, reg); // 1 if negative, 0 if positive.
258 dec_b(reg); // 0 if negative, 255 if positive.
259 bind(&done);
260 }
261
262
SlowTruncateToI(Register result_reg,Register input_reg,int offset)263 void MacroAssembler::SlowTruncateToI(Register result_reg,
264 Register input_reg,
265 int offset) {
266 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
267 call(stub.GetCode(), RelocInfo::CODE_TARGET);
268 }
269
270
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)271 void MacroAssembler::TruncateDoubleToI(Register result_reg,
272 XMMRegister input_reg) {
273 Label done;
274 cvttsd2si(result_reg, Operand(input_reg));
275 cmp(result_reg, 0x1);
276 j(no_overflow, &done, Label::kNear);
277
278 sub(esp, Immediate(kDoubleSize));
279 movsd(MemOperand(esp, 0), input_reg);
280 SlowTruncateToI(result_reg, esp, 0);
281 add(esp, Immediate(kDoubleSize));
282 bind(&done);
283 }
284
285
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * lost_precision,Label * is_nan,Label * minus_zero,Label::Distance dst)286 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
287 XMMRegister scratch,
288 MinusZeroMode minus_zero_mode,
289 Label* lost_precision, Label* is_nan,
290 Label* minus_zero, Label::Distance dst) {
291 DCHECK(!input_reg.is(scratch));
292 cvttsd2si(result_reg, Operand(input_reg));
293 Cvtsi2sd(scratch, Operand(result_reg));
294 ucomisd(scratch, input_reg);
295 j(not_equal, lost_precision, dst);
296 j(parity_even, is_nan, dst);
297 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
298 Label done;
299 // The integer converted back is equal to the original. We
300 // only have to test if we got -0 as an input.
301 test(result_reg, Operand(result_reg));
302 j(not_zero, &done, Label::kNear);
303 movmskpd(result_reg, input_reg);
304 // Bit 0 contains the sign of the double in input_reg.
305 // If input was positive, we are ok and return 0, otherwise
306 // jump to minus_zero.
307 and_(result_reg, 1);
308 j(not_zero, minus_zero, dst);
309 bind(&done);
310 }
311 }
312
313
TruncateHeapNumberToI(Register result_reg,Register input_reg)314 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
315 Register input_reg) {
316 Label done, slow_case;
317
318 if (CpuFeatures::IsSupported(SSE3)) {
319 CpuFeatureScope scope(this, SSE3);
320 Label convert;
321 // Use more powerful conversion when sse3 is available.
322 // Load x87 register with heap number.
323 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
324 // Get exponent alone and check for too-big exponent.
325 mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
326 and_(result_reg, HeapNumber::kExponentMask);
327 const uint32_t kTooBigExponent =
328 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
329 cmp(Operand(result_reg), Immediate(kTooBigExponent));
330 j(greater_equal, &slow_case, Label::kNear);
331
332 // Reserve space for 64 bit answer.
333 sub(Operand(esp), Immediate(kDoubleSize));
334 // Do conversion, which cannot fail because we checked the exponent.
335 fisttp_d(Operand(esp, 0));
336 mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
337 add(Operand(esp), Immediate(kDoubleSize));
338 jmp(&done, Label::kNear);
339
340 // Slow case.
341 bind(&slow_case);
342 if (input_reg.is(result_reg)) {
343 // Input is clobbered. Restore number from fpu stack
344 sub(Operand(esp), Immediate(kDoubleSize));
345 fstp_d(Operand(esp, 0));
346 SlowTruncateToI(result_reg, esp, 0);
347 add(esp, Immediate(kDoubleSize));
348 } else {
349 fstp(0);
350 SlowTruncateToI(result_reg, input_reg);
351 }
352 } else {
353 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
354 cvttsd2si(result_reg, Operand(xmm0));
355 cmp(result_reg, 0x1);
356 j(no_overflow, &done, Label::kNear);
357 // Check if the input was 0x8000000 (kMinInt).
358 // If no, then we got an overflow and we deoptimize.
359 ExternalReference min_int = ExternalReference::address_of_min_int();
360 ucomisd(xmm0, Operand::StaticVariable(min_int));
361 j(not_equal, &slow_case, Label::kNear);
362 j(parity_even, &slow_case, Label::kNear); // NaN.
363 jmp(&done, Label::kNear);
364
365 // Slow case.
366 bind(&slow_case);
367 if (input_reg.is(result_reg)) {
368 // Input is clobbered. Restore number from double scratch.
369 sub(esp, Immediate(kDoubleSize));
370 movsd(MemOperand(esp, 0), xmm0);
371 SlowTruncateToI(result_reg, esp, 0);
372 add(esp, Immediate(kDoubleSize));
373 } else {
374 SlowTruncateToI(result_reg, input_reg);
375 }
376 }
377 bind(&done);
378 }
379
380
LoadUint32(XMMRegister dst,const Operand & src)381 void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
382 Label done;
383 cmp(src, Immediate(0));
384 ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
385 Cvtsi2sd(dst, src);
386 j(not_sign, &done, Label::kNear);
387 addsd(dst, Operand::StaticVariable(uint32_bias));
388 bind(&done);
389 }
390
391
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)392 void MacroAssembler::RecordWriteArray(
393 Register object,
394 Register value,
395 Register index,
396 SaveFPRegsMode save_fp,
397 RememberedSetAction remembered_set_action,
398 SmiCheck smi_check,
399 PointersToHereCheck pointers_to_here_check_for_value) {
400 // First, check if a write barrier is even needed. The tests below
401 // catch stores of Smis.
402 Label done;
403
404 // Skip barrier if writing a smi.
405 if (smi_check == INLINE_SMI_CHECK) {
406 DCHECK_EQ(0, kSmiTag);
407 test(value, Immediate(kSmiTagMask));
408 j(zero, &done);
409 }
410
411 // Array access: calculate the destination address in the same manner as
412 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
413 // into an array of words.
414 Register dst = index;
415 lea(dst, Operand(object, index, times_half_pointer_size,
416 FixedArray::kHeaderSize - kHeapObjectTag));
417
418 RecordWrite(object, dst, value, save_fp, remembered_set_action,
419 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
420
421 bind(&done);
422
423 // Clobber clobbered input registers when running with the debug-code flag
424 // turned on to provoke errors.
425 if (emit_debug_code()) {
426 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
427 mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
428 }
429 }
430
431
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)432 void MacroAssembler::RecordWriteField(
433 Register object,
434 int offset,
435 Register value,
436 Register dst,
437 SaveFPRegsMode save_fp,
438 RememberedSetAction remembered_set_action,
439 SmiCheck smi_check,
440 PointersToHereCheck pointers_to_here_check_for_value) {
441 // First, check if a write barrier is even needed. The tests below
442 // catch stores of Smis.
443 Label done;
444
445 // Skip barrier if writing a smi.
446 if (smi_check == INLINE_SMI_CHECK) {
447 JumpIfSmi(value, &done, Label::kNear);
448 }
449
450 // Although the object register is tagged, the offset is relative to the start
451 // of the object, so so offset must be a multiple of kPointerSize.
452 DCHECK(IsAligned(offset, kPointerSize));
453
454 lea(dst, FieldOperand(object, offset));
455 if (emit_debug_code()) {
456 Label ok;
457 test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
458 j(zero, &ok, Label::kNear);
459 int3();
460 bind(&ok);
461 }
462
463 RecordWrite(object, dst, value, save_fp, remembered_set_action,
464 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
465
466 bind(&done);
467
468 // Clobber clobbered input registers when running with the debug-code flag
469 // turned on to provoke errors.
470 if (emit_debug_code()) {
471 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
472 mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
473 }
474 }
475
476
RecordWriteForMap(Register object,Handle<Map> map,Register scratch1,Register scratch2,SaveFPRegsMode save_fp)477 void MacroAssembler::RecordWriteForMap(
478 Register object,
479 Handle<Map> map,
480 Register scratch1,
481 Register scratch2,
482 SaveFPRegsMode save_fp) {
483 Label done;
484
485 Register address = scratch1;
486 Register value = scratch2;
487 if (emit_debug_code()) {
488 Label ok;
489 lea(address, FieldOperand(object, HeapObject::kMapOffset));
490 test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
491 j(zero, &ok, Label::kNear);
492 int3();
493 bind(&ok);
494 }
495
496 DCHECK(!object.is(value));
497 DCHECK(!object.is(address));
498 DCHECK(!value.is(address));
499 AssertNotSmi(object);
500
501 if (!FLAG_incremental_marking) {
502 return;
503 }
504
505 // Compute the address.
506 lea(address, FieldOperand(object, HeapObject::kMapOffset));
507
508 // A single check of the map's pages interesting flag suffices, since it is
509 // only set during incremental collection, and then it's also guaranteed that
510 // the from object's page's interesting flag is also set. This optimization
511 // relies on the fact that maps can never be in new space.
512 DCHECK(!isolate()->heap()->InNewSpace(*map));
513 CheckPageFlagForMap(map,
514 MemoryChunk::kPointersToHereAreInterestingMask,
515 zero,
516 &done,
517 Label::kNear);
518
519 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
520 save_fp);
521 CallStub(&stub);
522
523 bind(&done);
524
525 // Count number of write barriers in generated code.
526 isolate()->counters()->write_barriers_static()->Increment();
527 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
528
529 // Clobber clobbered input registers when running with the debug-code flag
530 // turned on to provoke errors.
531 if (emit_debug_code()) {
532 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
533 mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
534 mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
535 }
536 }
537
538
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)539 void MacroAssembler::RecordWrite(
540 Register object,
541 Register address,
542 Register value,
543 SaveFPRegsMode fp_mode,
544 RememberedSetAction remembered_set_action,
545 SmiCheck smi_check,
546 PointersToHereCheck pointers_to_here_check_for_value) {
547 DCHECK(!object.is(value));
548 DCHECK(!object.is(address));
549 DCHECK(!value.is(address));
550 AssertNotSmi(object);
551
552 if (remembered_set_action == OMIT_REMEMBERED_SET &&
553 !FLAG_incremental_marking) {
554 return;
555 }
556
557 if (emit_debug_code()) {
558 Label ok;
559 cmp(value, Operand(address, 0));
560 j(equal, &ok, Label::kNear);
561 int3();
562 bind(&ok);
563 }
564
565 // First, check if a write barrier is even needed. The tests below
566 // catch stores of Smis and stores into young gen.
567 Label done;
568
569 if (smi_check == INLINE_SMI_CHECK) {
570 // Skip barrier if writing a smi.
571 JumpIfSmi(value, &done, Label::kNear);
572 }
573
574 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
575 CheckPageFlag(value,
576 value, // Used as scratch.
577 MemoryChunk::kPointersToHereAreInterestingMask,
578 zero,
579 &done,
580 Label::kNear);
581 }
582 CheckPageFlag(object,
583 value, // Used as scratch.
584 MemoryChunk::kPointersFromHereAreInterestingMask,
585 zero,
586 &done,
587 Label::kNear);
588
589 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
590 fp_mode);
591 CallStub(&stub);
592
593 bind(&done);
594
595 // Count number of write barriers in generated code.
596 isolate()->counters()->write_barriers_static()->Increment();
597 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
598
599 // Clobber clobbered registers when running with the debug-code flag
600 // turned on to provoke errors.
601 if (emit_debug_code()) {
602 mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
603 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
604 }
605 }
606
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)607 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
608 Register code_entry,
609 Register scratch) {
610 const int offset = JSFunction::kCodeEntryOffset;
611
612 // Since a code entry (value) is always in old space, we don't need to update
613 // remembered set. If incremental marking is off, there is nothing for us to
614 // do.
615 if (!FLAG_incremental_marking) return;
616
617 DCHECK(!js_function.is(code_entry));
618 DCHECK(!js_function.is(scratch));
619 DCHECK(!code_entry.is(scratch));
620 AssertNotSmi(js_function);
621
622 if (emit_debug_code()) {
623 Label ok;
624 lea(scratch, FieldOperand(js_function, offset));
625 cmp(code_entry, Operand(scratch, 0));
626 j(equal, &ok, Label::kNear);
627 int3();
628 bind(&ok);
629 }
630
631 // First, check if a write barrier is even needed. The tests below
632 // catch stores of Smis and stores into young gen.
633 Label done;
634
635 CheckPageFlag(code_entry, scratch,
636 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
637 Label::kNear);
638 CheckPageFlag(js_function, scratch,
639 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
640 Label::kNear);
641
642 // Save input registers.
643 push(js_function);
644 push(code_entry);
645
646 const Register dst = scratch;
647 lea(dst, FieldOperand(js_function, offset));
648
649 // Save caller-saved registers.
650 PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
651
652 int argument_count = 3;
653 PrepareCallCFunction(argument_count, code_entry);
654 mov(Operand(esp, 0 * kPointerSize), js_function);
655 mov(Operand(esp, 1 * kPointerSize), dst); // Slot.
656 mov(Operand(esp, 2 * kPointerSize),
657 Immediate(ExternalReference::isolate_address(isolate())));
658
659 {
660 AllowExternalCallThatCantCauseGC scope(this);
661 CallCFunction(
662 ExternalReference::incremental_marking_record_write_code_entry_function(
663 isolate()),
664 argument_count);
665 }
666
667 // Restore caller-saved registers.
668 PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
669
670 // Restore input registers.
671 pop(code_entry);
672 pop(js_function);
673
674 bind(&done);
675 }
676
DebugBreak()677 void MacroAssembler::DebugBreak() {
678 Move(eax, Immediate(0));
679 mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
680 isolate())));
681 CEntryStub ces(isolate(), 1);
682 call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
683 }
684
Cvtsi2sd(XMMRegister dst,const Operand & src)685 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
686 xorps(dst, dst);
687 cvtsi2sd(dst, src);
688 }
689
690
Cvtui2ss(XMMRegister dst,Register src,Register tmp)691 void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
692 Label msb_set_src;
693 Label jmp_return;
694 test(src, src);
695 j(sign, &msb_set_src, Label::kNear);
696 cvtsi2ss(dst, src);
697 jmp(&jmp_return, Label::kNear);
698 bind(&msb_set_src);
699 mov(tmp, src);
700 shr(src, 1);
701 // Recover the least significant bit to avoid rounding errors.
702 and_(tmp, Immediate(1));
703 or_(src, tmp);
704 cvtsi2ss(dst, src);
705 addss(dst, dst);
706 bind(&jmp_return);
707 }
708
ShlPair(Register high,Register low,uint8_t shift)709 void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
710 if (shift >= 32) {
711 mov(high, low);
712 shl(high, shift - 32);
713 xor_(low, low);
714 } else {
715 shld(high, low, shift);
716 shl(low, shift);
717 }
718 }
719
ShlPair_cl(Register high,Register low)720 void MacroAssembler::ShlPair_cl(Register high, Register low) {
721 shld_cl(high, low);
722 shl_cl(low);
723 Label done;
724 test(ecx, Immediate(0x20));
725 j(equal, &done, Label::kNear);
726 mov(high, low);
727 xor_(low, low);
728 bind(&done);
729 }
730
ShrPair(Register high,Register low,uint8_t shift)731 void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
732 if (shift >= 32) {
733 mov(low, high);
734 shr(low, shift - 32);
735 xor_(high, high);
736 } else {
737 shrd(high, low, shift);
738 shr(high, shift);
739 }
740 }
741
ShrPair_cl(Register high,Register low)742 void MacroAssembler::ShrPair_cl(Register high, Register low) {
743 shrd_cl(low, high);
744 shr_cl(high);
745 Label done;
746 test(ecx, Immediate(0x20));
747 j(equal, &done, Label::kNear);
748 mov(low, high);
749 xor_(high, high);
750 bind(&done);
751 }
752
SarPair(Register high,Register low,uint8_t shift)753 void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
754 if (shift >= 32) {
755 mov(low, high);
756 sar(low, shift - 32);
757 sar(high, 31);
758 } else {
759 shrd(high, low, shift);
760 sar(high, shift);
761 }
762 }
763
SarPair_cl(Register high,Register low)764 void MacroAssembler::SarPair_cl(Register high, Register low) {
765 shrd_cl(low, high);
766 sar_cl(high);
767 Label done;
768 test(ecx, Immediate(0x20));
769 j(equal, &done, Label::kNear);
770 mov(low, high);
771 sar(high, 31);
772 bind(&done);
773 }
774
IsUnsafeImmediate(const Immediate & x)775 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
776 static const int kMaxImmediateBits = 17;
777 if (!RelocInfo::IsNone(x.rmode_)) return false;
778 return !is_intn(x.x_, kMaxImmediateBits);
779 }
780
781
SafeMove(Register dst,const Immediate & x)782 void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
783 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
784 Move(dst, Immediate(x.x_ ^ jit_cookie()));
785 xor_(dst, jit_cookie());
786 } else {
787 Move(dst, x);
788 }
789 }
790
791
SafePush(const Immediate & x)792 void MacroAssembler::SafePush(const Immediate& x) {
793 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
794 push(Immediate(x.x_ ^ jit_cookie()));
795 xor_(Operand(esp, 0), Immediate(jit_cookie()));
796 } else {
797 push(x);
798 }
799 }
800
801
CmpObjectType(Register heap_object,InstanceType type,Register map)802 void MacroAssembler::CmpObjectType(Register heap_object,
803 InstanceType type,
804 Register map) {
805 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
806 CmpInstanceType(map, type);
807 }
808
809
CmpInstanceType(Register map,InstanceType type)810 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
811 cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
812 }
813
814
CheckFastElements(Register map,Label * fail,Label::Distance distance)815 void MacroAssembler::CheckFastElements(Register map,
816 Label* fail,
817 Label::Distance distance) {
818 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
819 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
820 STATIC_ASSERT(FAST_ELEMENTS == 2);
821 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
822 cmpb(FieldOperand(map, Map::kBitField2Offset),
823 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
824 j(above, fail, distance);
825 }
826
827
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)828 void MacroAssembler::CheckFastObjectElements(Register map,
829 Label* fail,
830 Label::Distance distance) {
831 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
832 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
833 STATIC_ASSERT(FAST_ELEMENTS == 2);
834 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
835 cmpb(FieldOperand(map, Map::kBitField2Offset),
836 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
837 j(below_equal, fail, distance);
838 cmpb(FieldOperand(map, Map::kBitField2Offset),
839 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
840 j(above, fail, distance);
841 }
842
843
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)844 void MacroAssembler::CheckFastSmiElements(Register map,
845 Label* fail,
846 Label::Distance distance) {
847 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
848 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
849 cmpb(FieldOperand(map, Map::kBitField2Offset),
850 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
851 j(above, fail, distance);
852 }
853
854
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register key,Register scratch1,XMMRegister scratch2,Label * fail,int elements_offset)855 void MacroAssembler::StoreNumberToDoubleElements(
856 Register maybe_number,
857 Register elements,
858 Register key,
859 Register scratch1,
860 XMMRegister scratch2,
861 Label* fail,
862 int elements_offset) {
863 Label smi_value, done;
864 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
865
866 CheckMap(maybe_number,
867 isolate()->factory()->heap_number_map(),
868 fail,
869 DONT_DO_SMI_CHECK);
870
871 // Double value, turn potential sNaN into qNaN.
872 Move(scratch2, 1.0);
873 mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
874 jmp(&done, Label::kNear);
875
876 bind(&smi_value);
877 // Value is a smi. Convert to a double and store.
878 // Preserve original value.
879 mov(scratch1, maybe_number);
880 SmiUntag(scratch1);
881 Cvtsi2sd(scratch2, scratch1);
882 bind(&done);
883 movsd(FieldOperand(elements, key, times_4,
884 FixedDoubleArray::kHeaderSize - elements_offset),
885 scratch2);
886 }
887
888
CompareMap(Register obj,Handle<Map> map)889 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
890 cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
891 }
892
893
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)894 void MacroAssembler::CheckMap(Register obj,
895 Handle<Map> map,
896 Label* fail,
897 SmiCheckType smi_check_type) {
898 if (smi_check_type == DO_SMI_CHECK) {
899 JumpIfSmi(obj, fail);
900 }
901
902 CompareMap(obj, map);
903 j(not_equal, fail);
904 }
905
906
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)907 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
908 Register scratch2, Handle<WeakCell> cell,
909 Handle<Code> success,
910 SmiCheckType smi_check_type) {
911 Label fail;
912 if (smi_check_type == DO_SMI_CHECK) {
913 JumpIfSmi(obj, &fail);
914 }
915 mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
916 CmpWeakValue(scratch1, cell, scratch2);
917 j(equal, success);
918
919 bind(&fail);
920 }
921
922
IsObjectStringType(Register heap_object,Register map,Register instance_type)923 Condition MacroAssembler::IsObjectStringType(Register heap_object,
924 Register map,
925 Register instance_type) {
926 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
927 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
928 STATIC_ASSERT(kNotStringTag != 0);
929 test(instance_type, Immediate(kIsNotStringMask));
930 return zero;
931 }
932
933
IsObjectNameType(Register heap_object,Register map,Register instance_type)934 Condition MacroAssembler::IsObjectNameType(Register heap_object,
935 Register map,
936 Register instance_type) {
937 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
938 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
939 cmpb(instance_type, Immediate(LAST_NAME_TYPE));
940 return below_equal;
941 }
942
943
FCmp()944 void MacroAssembler::FCmp() {
945 fucomip();
946 fstp(0);
947 }
948
949
AssertNumber(Register object)950 void MacroAssembler::AssertNumber(Register object) {
951 if (emit_debug_code()) {
952 Label ok;
953 JumpIfSmi(object, &ok);
954 cmp(FieldOperand(object, HeapObject::kMapOffset),
955 isolate()->factory()->heap_number_map());
956 Check(equal, kOperandNotANumber);
957 bind(&ok);
958 }
959 }
960
AssertNotNumber(Register object)961 void MacroAssembler::AssertNotNumber(Register object) {
962 if (emit_debug_code()) {
963 test(object, Immediate(kSmiTagMask));
964 Check(not_equal, kOperandIsANumber);
965 cmp(FieldOperand(object, HeapObject::kMapOffset),
966 isolate()->factory()->heap_number_map());
967 Check(not_equal, kOperandIsANumber);
968 }
969 }
970
AssertSmi(Register object)971 void MacroAssembler::AssertSmi(Register object) {
972 if (emit_debug_code()) {
973 test(object, Immediate(kSmiTagMask));
974 Check(equal, kOperandIsNotASmi);
975 }
976 }
977
978
AssertString(Register object)979 void MacroAssembler::AssertString(Register object) {
980 if (emit_debug_code()) {
981 test(object, Immediate(kSmiTagMask));
982 Check(not_equal, kOperandIsASmiAndNotAString);
983 push(object);
984 mov(object, FieldOperand(object, HeapObject::kMapOffset));
985 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
986 pop(object);
987 Check(below, kOperandIsNotAString);
988 }
989 }
990
991
AssertName(Register object)992 void MacroAssembler::AssertName(Register object) {
993 if (emit_debug_code()) {
994 test(object, Immediate(kSmiTagMask));
995 Check(not_equal, kOperandIsASmiAndNotAName);
996 push(object);
997 mov(object, FieldOperand(object, HeapObject::kMapOffset));
998 CmpInstanceType(object, LAST_NAME_TYPE);
999 pop(object);
1000 Check(below_equal, kOperandIsNotAName);
1001 }
1002 }
1003
1004
AssertFunction(Register object)1005 void MacroAssembler::AssertFunction(Register object) {
1006 if (emit_debug_code()) {
1007 test(object, Immediate(kSmiTagMask));
1008 Check(not_equal, kOperandIsASmiAndNotAFunction);
1009 Push(object);
1010 CmpObjectType(object, JS_FUNCTION_TYPE, object);
1011 Pop(object);
1012 Check(equal, kOperandIsNotAFunction);
1013 }
1014 }
1015
1016
AssertBoundFunction(Register object)1017 void MacroAssembler::AssertBoundFunction(Register object) {
1018 if (emit_debug_code()) {
1019 test(object, Immediate(kSmiTagMask));
1020 Check(not_equal, kOperandIsASmiAndNotABoundFunction);
1021 Push(object);
1022 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
1023 Pop(object);
1024 Check(equal, kOperandIsNotABoundFunction);
1025 }
1026 }
1027
AssertGeneratorObject(Register object)1028 void MacroAssembler::AssertGeneratorObject(Register object) {
1029 if (emit_debug_code()) {
1030 test(object, Immediate(kSmiTagMask));
1031 Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
1032 Push(object);
1033 CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
1034 Pop(object);
1035 Check(equal, kOperandIsNotAGeneratorObject);
1036 }
1037 }
1038
AssertReceiver(Register object)1039 void MacroAssembler::AssertReceiver(Register object) {
1040 if (emit_debug_code()) {
1041 test(object, Immediate(kSmiTagMask));
1042 Check(not_equal, kOperandIsASmiAndNotAReceiver);
1043 Push(object);
1044 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1045 CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
1046 Pop(object);
1047 Check(above_equal, kOperandIsNotAReceiver);
1048 }
1049 }
1050
1051
AssertUndefinedOrAllocationSite(Register object)1052 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
1053 if (emit_debug_code()) {
1054 Label done_checking;
1055 AssertNotSmi(object);
1056 cmp(object, isolate()->factory()->undefined_value());
1057 j(equal, &done_checking);
1058 cmp(FieldOperand(object, 0),
1059 Immediate(isolate()->factory()->allocation_site_map()));
1060 Assert(equal, kExpectedUndefinedOrCell);
1061 bind(&done_checking);
1062 }
1063 }
1064
1065
AssertNotSmi(Register object)1066 void MacroAssembler::AssertNotSmi(Register object) {
1067 if (emit_debug_code()) {
1068 test(object, Immediate(kSmiTagMask));
1069 Check(not_equal, kOperandIsASmi);
1070 }
1071 }
1072
StubPrologue(StackFrame::Type type)1073 void MacroAssembler::StubPrologue(StackFrame::Type type) {
1074 push(ebp); // Caller's frame pointer.
1075 mov(ebp, esp);
1076 push(Immediate(Smi::FromInt(type)));
1077 }
1078
Prologue(bool code_pre_aging)1079 void MacroAssembler::Prologue(bool code_pre_aging) {
1080 PredictableCodeSizeScope predictible_code_size_scope(this,
1081 kNoCodeAgeSequenceLength);
1082 if (code_pre_aging) {
1083 // Pre-age the code.
1084 call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
1085 RelocInfo::CODE_AGE_SEQUENCE);
1086 Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
1087 } else {
1088 push(ebp); // Caller's frame pointer.
1089 mov(ebp, esp);
1090 push(esi); // Callee's context.
1091 push(edi); // Callee's JS function.
1092 }
1093 }
1094
1095
EmitLoadTypeFeedbackVector(Register vector)1096 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1097 mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
1098 mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
1099 mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
1100 }
1101
1102
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1103 void MacroAssembler::EnterFrame(StackFrame::Type type,
1104 bool load_constant_pool_pointer_reg) {
1105 // Out-of-line constant pool not implemented on ia32.
1106 UNREACHABLE();
1107 }
1108
1109
EnterFrame(StackFrame::Type type)1110 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1111 push(ebp);
1112 mov(ebp, esp);
1113 push(Immediate(Smi::FromInt(type)));
1114 if (type == StackFrame::INTERNAL) {
1115 push(Immediate(CodeObject()));
1116 }
1117 if (emit_debug_code()) {
1118 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
1119 Check(not_equal, kCodeObjectNotProperlyPatched);
1120 }
1121 }
1122
1123
LeaveFrame(StackFrame::Type type)1124 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1125 if (emit_debug_code()) {
1126 cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
1127 Immediate(Smi::FromInt(type)));
1128 Check(equal, kStackFrameTypesMustMatch);
1129 }
1130 leave();
1131 }
1132
1133
EnterExitFramePrologue()1134 void MacroAssembler::EnterExitFramePrologue() {
1135 // Set up the frame structure on the stack.
1136 DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1137 DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1138 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1139 push(ebp);
1140 mov(ebp, esp);
1141
1142 // Reserve room for entry stack pointer and push the code object.
1143 push(Immediate(Smi::FromInt(StackFrame::EXIT)));
1144 DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
1145 push(Immediate(0)); // Saved entry sp, patched before call.
1146 DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
1147 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
1148
1149 // Save the frame pointer and the context in top.
1150 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
1151 ExternalReference context_address(Isolate::kContextAddress, isolate());
1152 ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
1153 mov(Operand::StaticVariable(c_entry_fp_address), ebp);
1154 mov(Operand::StaticVariable(context_address), esi);
1155 mov(Operand::StaticVariable(c_function_address), ebx);
1156 }
1157
1158
EnterExitFrameEpilogue(int argc,bool save_doubles)1159 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1160 // Optionally save all XMM registers.
1161 if (save_doubles) {
1162 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
1163 argc * kPointerSize;
1164 sub(esp, Immediate(space));
1165 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1166 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1167 XMMRegister reg = XMMRegister::from_code(i);
1168 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1169 }
1170 } else {
1171 sub(esp, Immediate(argc * kPointerSize));
1172 }
1173
1174 // Get the required frame alignment for the OS.
1175 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
1176 if (kFrameAlignment > 0) {
1177 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
1178 and_(esp, -kFrameAlignment);
1179 }
1180
1181 // Patch the saved entry sp.
1182 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1183 }
1184
1185
EnterExitFrame(int argc,bool save_doubles)1186 void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
1187 EnterExitFramePrologue();
1188
1189 // Set up argc and argv in callee-saved registers.
1190 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1191 mov(edi, eax);
1192 lea(esi, Operand(ebp, eax, times_4, offset));
1193
1194 // Reserve space for argc, argv and isolate.
1195 EnterExitFrameEpilogue(argc, save_doubles);
1196 }
1197
1198
EnterApiExitFrame(int argc)1199 void MacroAssembler::EnterApiExitFrame(int argc) {
1200 EnterExitFramePrologue();
1201 EnterExitFrameEpilogue(argc, false);
1202 }
1203
1204
LeaveExitFrame(bool save_doubles,bool pop_arguments)1205 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
1206 // Optionally restore all XMM registers.
1207 if (save_doubles) {
1208 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1209 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1210 XMMRegister reg = XMMRegister::from_code(i);
1211 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1212 }
1213 }
1214
1215 if (pop_arguments) {
1216 // Get the return address from the stack and restore the frame pointer.
1217 mov(ecx, Operand(ebp, 1 * kPointerSize));
1218 mov(ebp, Operand(ebp, 0 * kPointerSize));
1219
1220 // Pop the arguments and the receiver from the caller stack.
1221 lea(esp, Operand(esi, 1 * kPointerSize));
1222
1223 // Push the return address to get ready to return.
1224 push(ecx);
1225 } else {
1226 // Otherwise just leave the exit frame.
1227 leave();
1228 }
1229
1230 LeaveExitFrameEpilogue(true);
1231 }
1232
1233
LeaveExitFrameEpilogue(bool restore_context)1234 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1235 // Restore current context from top and clear it in debug mode.
1236 ExternalReference context_address(Isolate::kContextAddress, isolate());
1237 if (restore_context) {
1238 mov(esi, Operand::StaticVariable(context_address));
1239 }
1240 #ifdef DEBUG
1241 mov(Operand::StaticVariable(context_address), Immediate(0));
1242 #endif
1243
1244 // Clear the top frame.
1245 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1246 isolate());
1247 mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1248 }
1249
1250
LeaveApiExitFrame(bool restore_context)1251 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1252 mov(esp, ebp);
1253 pop(ebp);
1254
1255 LeaveExitFrameEpilogue(restore_context);
1256 }
1257
1258
PushStackHandler()1259 void MacroAssembler::PushStackHandler() {
1260 // Adjust this code if not the case.
1261 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1262 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1263
1264 // Link the current handler as the next handler.
1265 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1266 push(Operand::StaticVariable(handler_address));
1267
1268 // Set this new handler as the current one.
1269 mov(Operand::StaticVariable(handler_address), esp);
1270 }
1271
1272
PopStackHandler()1273 void MacroAssembler::PopStackHandler() {
1274 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1275 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1276 pop(Operand::StaticVariable(handler_address));
1277 add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1278 }
1279
1280
CheckAccessGlobalProxy(Register holder_reg,Register scratch1,Register scratch2,Label * miss)1281 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1282 Register scratch1,
1283 Register scratch2,
1284 Label* miss) {
1285 Label same_contexts;
1286
1287 DCHECK(!holder_reg.is(scratch1));
1288 DCHECK(!holder_reg.is(scratch2));
1289 DCHECK(!scratch1.is(scratch2));
1290
1291 // Load current lexical context from the active StandardFrame, which
1292 // may require crawling past STUB frames.
1293 Label load_context;
1294 Label has_context;
1295 mov(scratch2, ebp);
1296 bind(&load_context);
1297 mov(scratch1,
1298 MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
1299 JumpIfNotSmi(scratch1, &has_context);
1300 mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
1301 jmp(&load_context);
1302 bind(&has_context);
1303
1304 // When generating debug code, make sure the lexical context is set.
1305 if (emit_debug_code()) {
1306 cmp(scratch1, Immediate(0));
1307 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1308 }
1309 // Load the native context of the current context.
1310 mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
1311
1312 // Check the context is a native context.
1313 if (emit_debug_code()) {
1314 // Read the first word and compare to native_context_map.
1315 cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1316 isolate()->factory()->native_context_map());
1317 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1318 }
1319
1320 // Check if both contexts are the same.
1321 cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1322 j(equal, &same_contexts);
1323
1324 // Compare security tokens, save holder_reg on the stack so we can use it
1325 // as a temporary register.
1326 //
1327 // Check that the security token in the calling global object is
1328 // compatible with the security token in the receiving global
1329 // object.
1330 mov(scratch2,
1331 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1332
1333 // Check the context is a native context.
1334 if (emit_debug_code()) {
1335 cmp(scratch2, isolate()->factory()->null_value());
1336 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1337
1338 // Read the first word and compare to native_context_map(),
1339 cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1340 isolate()->factory()->native_context_map());
1341 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1342 }
1343
1344 int token_offset = Context::kHeaderSize +
1345 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1346 mov(scratch1, FieldOperand(scratch1, token_offset));
1347 cmp(scratch1, FieldOperand(scratch2, token_offset));
1348 j(not_equal, miss);
1349
1350 bind(&same_contexts);
1351 }
1352
1353
1354 // Compute the hash code from the untagged key. This must be kept in sync with
1355 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1356 // code-stub-hydrogen.cc
1357 //
1358 // Note: r0 will contain hash code
GetNumberHash(Register r0,Register scratch)1359 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1360 // Xor original key with a seed.
1361 if (serializer_enabled()) {
1362 ExternalReference roots_array_start =
1363 ExternalReference::roots_array_start(isolate());
1364 mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1365 mov(scratch,
1366 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1367 SmiUntag(scratch);
1368 xor_(r0, scratch);
1369 } else {
1370 int32_t seed = isolate()->heap()->HashSeed();
1371 xor_(r0, Immediate(seed));
1372 }
1373
1374 // hash = ~hash + (hash << 15);
1375 mov(scratch, r0);
1376 not_(r0);
1377 shl(scratch, 15);
1378 add(r0, scratch);
1379 // hash = hash ^ (hash >> 12);
1380 mov(scratch, r0);
1381 shr(scratch, 12);
1382 xor_(r0, scratch);
1383 // hash = hash + (hash << 2);
1384 lea(r0, Operand(r0, r0, times_4, 0));
1385 // hash = hash ^ (hash >> 4);
1386 mov(scratch, r0);
1387 shr(scratch, 4);
1388 xor_(r0, scratch);
1389 // hash = hash * 2057;
1390 imul(r0, r0, 2057);
1391 // hash = hash ^ (hash >> 16);
1392 mov(scratch, r0);
1393 shr(scratch, 16);
1394 xor_(r0, scratch);
1395 and_(r0, 0x3fffffff);
1396 }
1397
1398
1399
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register r0,Register r1,Register r2,Register result)1400 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1401 Register elements,
1402 Register key,
1403 Register r0,
1404 Register r1,
1405 Register r2,
1406 Register result) {
1407 // Register use:
1408 //
1409 // elements - holds the slow-case elements of the receiver and is unchanged.
1410 //
1411 // key - holds the smi key on entry and is unchanged.
1412 //
1413 // Scratch registers:
1414 //
1415 // r0 - holds the untagged key on entry and holds the hash once computed.
1416 //
1417 // r1 - used to hold the capacity mask of the dictionary
1418 //
1419 // r2 - used for the index into the dictionary.
1420 //
1421 // result - holds the result on exit if the load succeeds and we fall through.
1422
1423 Label done;
1424
1425 GetNumberHash(r0, r1);
1426
1427 // Compute capacity mask.
1428 mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1429 shr(r1, kSmiTagSize); // convert smi to int
1430 dec(r1);
1431
1432 // Generate an unrolled loop that performs a few probes before giving up.
1433 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1434 // Use r2 for index calculations and keep the hash intact in r0.
1435 mov(r2, r0);
1436 // Compute the masked index: (hash + i + i * i) & mask.
1437 if (i > 0) {
1438 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1439 }
1440 and_(r2, r1);
1441
1442 // Scale the index by multiplying by the entry size.
1443 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1444 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1445
1446 // Check if the key matches.
1447 cmp(key, FieldOperand(elements,
1448 r2,
1449 times_pointer_size,
1450 SeededNumberDictionary::kElementsStartOffset));
1451 if (i != (kNumberDictionaryProbes - 1)) {
1452 j(equal, &done);
1453 } else {
1454 j(not_equal, miss);
1455 }
1456 }
1457
1458 bind(&done);
1459 // Check that the value is a field property.
1460 const int kDetailsOffset =
1461 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1462 DCHECK_EQ(DATA, 0);
1463 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1464 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1465 j(not_zero, miss);
1466
1467 // Get the value at the masked, scaled index.
1468 const int kValueOffset =
1469 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1470 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1471 }
1472
1473
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)1474 void MacroAssembler::LoadAllocationTopHelper(Register result,
1475 Register scratch,
1476 AllocationFlags flags) {
1477 ExternalReference allocation_top =
1478 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1479
1480 // Just return if allocation top is already known.
1481 if ((flags & RESULT_CONTAINS_TOP) != 0) {
1482 // No use of scratch if allocation top is provided.
1483 DCHECK(scratch.is(no_reg));
1484 #ifdef DEBUG
1485 // Assert that result actually contains top on entry.
1486 cmp(result, Operand::StaticVariable(allocation_top));
1487 Check(equal, kUnexpectedAllocationTop);
1488 #endif
1489 return;
1490 }
1491
1492 // Move address of new object to result. Use scratch register if available.
1493 if (scratch.is(no_reg)) {
1494 mov(result, Operand::StaticVariable(allocation_top));
1495 } else {
1496 mov(scratch, Immediate(allocation_top));
1497 mov(result, Operand(scratch, 0));
1498 }
1499 }
1500
1501
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)1502 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1503 Register scratch,
1504 AllocationFlags flags) {
1505 if (emit_debug_code()) {
1506 test(result_end, Immediate(kObjectAlignmentMask));
1507 Check(zero, kUnalignedAllocationInNewSpace);
1508 }
1509
1510 ExternalReference allocation_top =
1511 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1512
1513 // Update new top. Use scratch if available.
1514 if (scratch.is(no_reg)) {
1515 mov(Operand::StaticVariable(allocation_top), result_end);
1516 } else {
1517 mov(Operand(scratch, 0), result_end);
1518 }
1519 }
1520
1521
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1522 void MacroAssembler::Allocate(int object_size,
1523 Register result,
1524 Register result_end,
1525 Register scratch,
1526 Label* gc_required,
1527 AllocationFlags flags) {
1528 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1529 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1530 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1531 if (!FLAG_inline_new) {
1532 if (emit_debug_code()) {
1533 // Trash the registers to simulate an allocation failure.
1534 mov(result, Immediate(0x7091));
1535 if (result_end.is_valid()) {
1536 mov(result_end, Immediate(0x7191));
1537 }
1538 if (scratch.is_valid()) {
1539 mov(scratch, Immediate(0x7291));
1540 }
1541 }
1542 jmp(gc_required);
1543 return;
1544 }
1545 DCHECK(!result.is(result_end));
1546
1547 // Load address of new object into result.
1548 LoadAllocationTopHelper(result, scratch, flags);
1549
1550 ExternalReference allocation_limit =
1551 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1552
1553 // Align the next allocation. Storing the filler map without checking top is
1554 // safe in new-space because the limit of the heap is aligned there.
1555 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1556 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1557 Label aligned;
1558 test(result, Immediate(kDoubleAlignmentMask));
1559 j(zero, &aligned, Label::kNear);
1560 if ((flags & PRETENURE) != 0) {
1561 cmp(result, Operand::StaticVariable(allocation_limit));
1562 j(above_equal, gc_required);
1563 }
1564 mov(Operand(result, 0),
1565 Immediate(isolate()->factory()->one_pointer_filler_map()));
1566 add(result, Immediate(kDoubleSize / 2));
1567 bind(&aligned);
1568 }
1569
1570 // Calculate new top and bail out if space is exhausted.
1571 Register top_reg = result_end.is_valid() ? result_end : result;
1572
1573 if (!top_reg.is(result)) {
1574 mov(top_reg, result);
1575 }
1576 add(top_reg, Immediate(object_size));
1577 cmp(top_reg, Operand::StaticVariable(allocation_limit));
1578 j(above, gc_required);
1579
1580 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1581 // The top pointer is not updated for allocation folding dominators.
1582 UpdateAllocationTopHelper(top_reg, scratch, flags);
1583 }
1584
1585 if (top_reg.is(result)) {
1586 sub(result, Immediate(object_size - kHeapObjectTag));
1587 } else {
1588 // Tag the result.
1589 DCHECK(kHeapObjectTag == 1);
1590 inc(result);
1591 }
1592 }
1593
1594
Allocate(int header_size,ScaleFactor element_size,Register element_count,RegisterValueType element_count_type,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1595 void MacroAssembler::Allocate(int header_size,
1596 ScaleFactor element_size,
1597 Register element_count,
1598 RegisterValueType element_count_type,
1599 Register result,
1600 Register result_end,
1601 Register scratch,
1602 Label* gc_required,
1603 AllocationFlags flags) {
1604 DCHECK((flags & SIZE_IN_WORDS) == 0);
1605 DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
1606 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1607 if (!FLAG_inline_new) {
1608 if (emit_debug_code()) {
1609 // Trash the registers to simulate an allocation failure.
1610 mov(result, Immediate(0x7091));
1611 mov(result_end, Immediate(0x7191));
1612 if (scratch.is_valid()) {
1613 mov(scratch, Immediate(0x7291));
1614 }
1615 // Register element_count is not modified by the function.
1616 }
1617 jmp(gc_required);
1618 return;
1619 }
1620 DCHECK(!result.is(result_end));
1621
1622 // Load address of new object into result.
1623 LoadAllocationTopHelper(result, scratch, flags);
1624
1625 ExternalReference allocation_limit =
1626 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1627
1628 // Align the next allocation. Storing the filler map without checking top is
1629 // safe in new-space because the limit of the heap is aligned there.
1630 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1631 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1632 Label aligned;
1633 test(result, Immediate(kDoubleAlignmentMask));
1634 j(zero, &aligned, Label::kNear);
1635 if ((flags & PRETENURE) != 0) {
1636 cmp(result, Operand::StaticVariable(allocation_limit));
1637 j(above_equal, gc_required);
1638 }
1639 mov(Operand(result, 0),
1640 Immediate(isolate()->factory()->one_pointer_filler_map()));
1641 add(result, Immediate(kDoubleSize / 2));
1642 bind(&aligned);
1643 }
1644
1645 // Calculate new top and bail out if space is exhausted.
1646 // We assume that element_count*element_size + header_size does not
1647 // overflow.
1648 if (element_count_type == REGISTER_VALUE_IS_SMI) {
1649 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1650 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1651 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1652 DCHECK(element_size >= times_2);
1653 DCHECK(kSmiTagSize == 1);
1654 element_size = static_cast<ScaleFactor>(element_size - 1);
1655 } else {
1656 DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1657 }
1658
1659 lea(result_end, Operand(element_count, element_size, header_size));
1660 add(result_end, result);
1661 cmp(result_end, Operand::StaticVariable(allocation_limit));
1662 j(above, gc_required);
1663
1664 // Tag result.
1665 DCHECK(kHeapObjectTag == 1);
1666 inc(result);
1667
1668 UpdateAllocationTopHelper(result_end, scratch, flags);
1669 }
1670
1671
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1672 void MacroAssembler::Allocate(Register object_size,
1673 Register result,
1674 Register result_end,
1675 Register scratch,
1676 Label* gc_required,
1677 AllocationFlags flags) {
1678 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1679 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1680 if (!FLAG_inline_new) {
1681 if (emit_debug_code()) {
1682 // Trash the registers to simulate an allocation failure.
1683 mov(result, Immediate(0x7091));
1684 mov(result_end, Immediate(0x7191));
1685 if (scratch.is_valid()) {
1686 mov(scratch, Immediate(0x7291));
1687 }
1688 // object_size is left unchanged by this function.
1689 }
1690 jmp(gc_required);
1691 return;
1692 }
1693 DCHECK(!result.is(result_end));
1694
1695 // Load address of new object into result.
1696 LoadAllocationTopHelper(result, scratch, flags);
1697
1698 ExternalReference allocation_limit =
1699 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1700
1701 // Align the next allocation. Storing the filler map without checking top is
1702 // safe in new-space because the limit of the heap is aligned there.
1703 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1704 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1705 Label aligned;
1706 test(result, Immediate(kDoubleAlignmentMask));
1707 j(zero, &aligned, Label::kNear);
1708 if ((flags & PRETENURE) != 0) {
1709 cmp(result, Operand::StaticVariable(allocation_limit));
1710 j(above_equal, gc_required);
1711 }
1712 mov(Operand(result, 0),
1713 Immediate(isolate()->factory()->one_pointer_filler_map()));
1714 add(result, Immediate(kDoubleSize / 2));
1715 bind(&aligned);
1716 }
1717
1718 // Calculate new top and bail out if space is exhausted.
1719 if (!object_size.is(result_end)) {
1720 mov(result_end, object_size);
1721 }
1722 add(result_end, result);
1723 cmp(result_end, Operand::StaticVariable(allocation_limit));
1724 j(above, gc_required);
1725
1726 // Tag result.
1727 DCHECK(kHeapObjectTag == 1);
1728 inc(result);
1729
1730 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1731 // The top pointer is not updated for allocation folding dominators.
1732 UpdateAllocationTopHelper(result_end, scratch, flags);
1733 }
1734 }
1735
FastAllocate(int object_size,Register result,Register result_end,AllocationFlags flags)1736 void MacroAssembler::FastAllocate(int object_size, Register result,
1737 Register result_end, AllocationFlags flags) {
1738 DCHECK(!result.is(result_end));
1739 // Load address of new object into result.
1740 LoadAllocationTopHelper(result, no_reg, flags);
1741
1742 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1743 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1744 Label aligned;
1745 test(result, Immediate(kDoubleAlignmentMask));
1746 j(zero, &aligned, Label::kNear);
1747 mov(Operand(result, 0),
1748 Immediate(isolate()->factory()->one_pointer_filler_map()));
1749 add(result, Immediate(kDoubleSize / 2));
1750 bind(&aligned);
1751 }
1752
1753 lea(result_end, Operand(result, object_size));
1754 UpdateAllocationTopHelper(result_end, no_reg, flags);
1755
1756 DCHECK(kHeapObjectTag == 1);
1757 inc(result);
1758 }
1759
FastAllocate(Register object_size,Register result,Register result_end,AllocationFlags flags)1760 void MacroAssembler::FastAllocate(Register object_size, Register result,
1761 Register result_end, AllocationFlags flags) {
1762 DCHECK(!result.is(result_end));
1763 // Load address of new object into result.
1764 LoadAllocationTopHelper(result, no_reg, flags);
1765
1766 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1767 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1768 Label aligned;
1769 test(result, Immediate(kDoubleAlignmentMask));
1770 j(zero, &aligned, Label::kNear);
1771 mov(Operand(result, 0),
1772 Immediate(isolate()->factory()->one_pointer_filler_map()));
1773 add(result, Immediate(kDoubleSize / 2));
1774 bind(&aligned);
1775 }
1776
1777 lea(result_end, Operand(result, object_size, times_1, 0));
1778 UpdateAllocationTopHelper(result_end, no_reg, flags);
1779
1780 DCHECK(kHeapObjectTag == 1);
1781 inc(result);
1782 }
1783
1784
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Label * gc_required,MutableMode mode)1785 void MacroAssembler::AllocateHeapNumber(Register result,
1786 Register scratch1,
1787 Register scratch2,
1788 Label* gc_required,
1789 MutableMode mode) {
1790 // Allocate heap number in new space.
1791 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1792 NO_ALLOCATION_FLAGS);
1793
1794 Handle<Map> map = mode == MUTABLE
1795 ? isolate()->factory()->mutable_heap_number_map()
1796 : isolate()->factory()->heap_number_map();
1797
1798 // Set the map.
1799 mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1800 }
1801
1802
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1803 void MacroAssembler::AllocateTwoByteString(Register result,
1804 Register length,
1805 Register scratch1,
1806 Register scratch2,
1807 Register scratch3,
1808 Label* gc_required) {
1809 // Calculate the number of bytes needed for the characters in the string while
1810 // observing object alignment.
1811 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1812 DCHECK(kShortSize == 2);
1813 // scratch1 = length * 2 + kObjectAlignmentMask.
1814 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1815 and_(scratch1, Immediate(~kObjectAlignmentMask));
1816
1817 // Allocate two byte string in new space.
1818 Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
1819 REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
1820 NO_ALLOCATION_FLAGS);
1821
1822 // Set the map, length and hash field.
1823 mov(FieldOperand(result, HeapObject::kMapOffset),
1824 Immediate(isolate()->factory()->string_map()));
1825 mov(scratch1, length);
1826 SmiTag(scratch1);
1827 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1828 mov(FieldOperand(result, String::kHashFieldOffset),
1829 Immediate(String::kEmptyHashField));
1830 }
1831
1832
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1833 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1834 Register scratch1, Register scratch2,
1835 Register scratch3,
1836 Label* gc_required) {
1837 // Calculate the number of bytes needed for the characters in the string while
1838 // observing object alignment.
1839 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1840 mov(scratch1, length);
1841 DCHECK(kCharSize == 1);
1842 add(scratch1, Immediate(kObjectAlignmentMask));
1843 and_(scratch1, Immediate(~kObjectAlignmentMask));
1844
1845 // Allocate one-byte string in new space.
1846 Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
1847 REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
1848 NO_ALLOCATION_FLAGS);
1849
1850 // Set the map, length and hash field.
1851 mov(FieldOperand(result, HeapObject::kMapOffset),
1852 Immediate(isolate()->factory()->one_byte_string_map()));
1853 mov(scratch1, length);
1854 SmiTag(scratch1);
1855 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1856 mov(FieldOperand(result, String::kHashFieldOffset),
1857 Immediate(String::kEmptyHashField));
1858 }
1859
1860
AllocateOneByteString(Register result,int length,Register scratch1,Register scratch2,Label * gc_required)1861 void MacroAssembler::AllocateOneByteString(Register result, int length,
1862 Register scratch1, Register scratch2,
1863 Label* gc_required) {
1864 DCHECK(length > 0);
1865
1866 // Allocate one-byte string in new space.
1867 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1868 gc_required, NO_ALLOCATION_FLAGS);
1869
1870 // Set the map, length and hash field.
1871 mov(FieldOperand(result, HeapObject::kMapOffset),
1872 Immediate(isolate()->factory()->one_byte_string_map()));
1873 mov(FieldOperand(result, String::kLengthOffset),
1874 Immediate(Smi::FromInt(length)));
1875 mov(FieldOperand(result, String::kHashFieldOffset),
1876 Immediate(String::kEmptyHashField));
1877 }
1878
1879
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)1880 void MacroAssembler::AllocateTwoByteConsString(Register result,
1881 Register scratch1,
1882 Register scratch2,
1883 Label* gc_required) {
1884 // Allocate heap number in new space.
1885 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1886 NO_ALLOCATION_FLAGS);
1887
1888 // Set the map. The other fields are left uninitialized.
1889 mov(FieldOperand(result, HeapObject::kMapOffset),
1890 Immediate(isolate()->factory()->cons_string_map()));
1891 }
1892
1893
AllocateOneByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)1894 void MacroAssembler::AllocateOneByteConsString(Register result,
1895 Register scratch1,
1896 Register scratch2,
1897 Label* gc_required) {
1898 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1899 NO_ALLOCATION_FLAGS);
1900
1901 // Set the map. The other fields are left uninitialized.
1902 mov(FieldOperand(result, HeapObject::kMapOffset),
1903 Immediate(isolate()->factory()->cons_one_byte_string_map()));
1904 }
1905
1906
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)1907 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1908 Register scratch1,
1909 Register scratch2,
1910 Label* gc_required) {
1911 // Allocate heap number in new space.
1912 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1913 NO_ALLOCATION_FLAGS);
1914
1915 // Set the map. The other fields are left uninitialized.
1916 mov(FieldOperand(result, HeapObject::kMapOffset),
1917 Immediate(isolate()->factory()->sliced_string_map()));
1918 }
1919
1920
AllocateOneByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)1921 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1922 Register scratch1,
1923 Register scratch2,
1924 Label* gc_required) {
1925 // Allocate heap number in new space.
1926 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1927 NO_ALLOCATION_FLAGS);
1928
1929 // Set the map. The other fields are left uninitialized.
1930 mov(FieldOperand(result, HeapObject::kMapOffset),
1931 Immediate(isolate()->factory()->sliced_one_byte_string_map()));
1932 }
1933
1934
AllocateJSValue(Register result,Register constructor,Register value,Register scratch,Label * gc_required)1935 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
1936 Register value, Register scratch,
1937 Label* gc_required) {
1938 DCHECK(!result.is(constructor));
1939 DCHECK(!result.is(scratch));
1940 DCHECK(!result.is(value));
1941
1942 // Allocate JSValue in new space.
1943 Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
1944 NO_ALLOCATION_FLAGS);
1945
1946 // Initialize the JSValue.
1947 LoadGlobalFunctionInitialMap(constructor, scratch);
1948 mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
1949 LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
1950 mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
1951 mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
1952 mov(FieldOperand(result, JSValue::kValueOffset), value);
1953 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
1954 }
1955
1956
1957 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1958 // long or aligned copies. The contents of scratch and length are destroyed.
1959 // Source and destination are incremented by length.
1960 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1961 // have been tried here already, and this is fastest.
1962 // A simpler loop is faster on small copies, but 30% slower on large ones.
1963 // The cld() instruction must have been emitted, to set the direction flag(),
1964 // before calling this function.
CopyBytes(Register source,Register destination,Register length,Register scratch)1965 void MacroAssembler::CopyBytes(Register source,
1966 Register destination,
1967 Register length,
1968 Register scratch) {
1969 Label short_loop, len4, len8, len12, done, short_string;
1970 DCHECK(source.is(esi));
1971 DCHECK(destination.is(edi));
1972 DCHECK(length.is(ecx));
1973 cmp(length, Immediate(4));
1974 j(below, &short_string, Label::kNear);
1975
1976 // Because source is 4-byte aligned in our uses of this function,
1977 // we keep source aligned for the rep_movs call by copying the odd bytes
1978 // at the end of the ranges.
1979 mov(scratch, Operand(source, length, times_1, -4));
1980 mov(Operand(destination, length, times_1, -4), scratch);
1981
1982 cmp(length, Immediate(8));
1983 j(below_equal, &len4, Label::kNear);
1984 cmp(length, Immediate(12));
1985 j(below_equal, &len8, Label::kNear);
1986 cmp(length, Immediate(16));
1987 j(below_equal, &len12, Label::kNear);
1988
1989 mov(scratch, ecx);
1990 shr(ecx, 2);
1991 rep_movs();
1992 and_(scratch, Immediate(0x3));
1993 add(destination, scratch);
1994 jmp(&done, Label::kNear);
1995
1996 bind(&len12);
1997 mov(scratch, Operand(source, 8));
1998 mov(Operand(destination, 8), scratch);
1999 bind(&len8);
2000 mov(scratch, Operand(source, 4));
2001 mov(Operand(destination, 4), scratch);
2002 bind(&len4);
2003 mov(scratch, Operand(source, 0));
2004 mov(Operand(destination, 0), scratch);
2005 add(destination, length);
2006 jmp(&done, Label::kNear);
2007
2008 bind(&short_string);
2009 test(length, length);
2010 j(zero, &done, Label::kNear);
2011
2012 bind(&short_loop);
2013 mov_b(scratch, Operand(source, 0));
2014 mov_b(Operand(destination, 0), scratch);
2015 inc(source);
2016 inc(destination);
2017 dec(length);
2018 j(not_zero, &short_loop);
2019
2020 bind(&done);
2021 }
2022
2023
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2024 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2025 Register end_address,
2026 Register filler) {
2027 Label loop, entry;
2028 jmp(&entry, Label::kNear);
2029 bind(&loop);
2030 mov(Operand(current_address, 0), filler);
2031 add(current_address, Immediate(kPointerSize));
2032 bind(&entry);
2033 cmp(current_address, end_address);
2034 j(below, &loop, Label::kNear);
2035 }
2036
2037
BooleanBitTest(Register object,int field_offset,int bit_index)2038 void MacroAssembler::BooleanBitTest(Register object,
2039 int field_offset,
2040 int bit_index) {
2041 bit_index += kSmiTagSize + kSmiShiftSize;
2042 DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
2043 int byte_index = bit_index / kBitsPerByte;
2044 int byte_bit_index = bit_index & (kBitsPerByte - 1);
2045 test_b(FieldOperand(object, field_offset + byte_index),
2046 Immediate(1 << byte_bit_index));
2047 }
2048
2049
2050
NegativeZeroTest(Register result,Register op,Label * then_label)2051 void MacroAssembler::NegativeZeroTest(Register result,
2052 Register op,
2053 Label* then_label) {
2054 Label ok;
2055 test(result, result);
2056 j(not_zero, &ok, Label::kNear);
2057 test(op, op);
2058 j(sign, then_label, Label::kNear);
2059 bind(&ok);
2060 }
2061
2062
NegativeZeroTest(Register result,Register op1,Register op2,Register scratch,Label * then_label)2063 void MacroAssembler::NegativeZeroTest(Register result,
2064 Register op1,
2065 Register op2,
2066 Register scratch,
2067 Label* then_label) {
2068 Label ok;
2069 test(result, result);
2070 j(not_zero, &ok, Label::kNear);
2071 mov(scratch, op1);
2072 or_(scratch, op2);
2073 j(sign, then_label, Label::kNear);
2074 bind(&ok);
2075 }
2076
2077
GetMapConstructor(Register result,Register map,Register temp)2078 void MacroAssembler::GetMapConstructor(Register result, Register map,
2079 Register temp) {
2080 Label done, loop;
2081 mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
2082 bind(&loop);
2083 JumpIfSmi(result, &done, Label::kNear);
2084 CmpObjectType(result, MAP_TYPE, temp);
2085 j(not_equal, &done, Label::kNear);
2086 mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
2087 jmp(&loop);
2088 bind(&done);
2089 }
2090
2091
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2092 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2093 Register scratch, Label* miss) {
2094 // Get the prototype or initial map from the function.
2095 mov(result,
2096 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2097
2098 // If the prototype or initial map is the hole, don't return it and
2099 // simply miss the cache instead. This will allow us to allocate a
2100 // prototype object on-demand in the runtime system.
2101 cmp(result, Immediate(isolate()->factory()->the_hole_value()));
2102 j(equal, miss);
2103
2104 // If the function does not have an initial map, we're done.
2105 Label done;
2106 CmpObjectType(result, MAP_TYPE, scratch);
2107 j(not_equal, &done, Label::kNear);
2108
2109 // Get the prototype from the initial map.
2110 mov(result, FieldOperand(result, Map::kPrototypeOffset));
2111
2112 // All done.
2113 bind(&done);
2114 }
2115
2116
CallStub(CodeStub * stub,TypeFeedbackId ast_id)2117 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
2118 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
2119 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
2120 }
2121
2122
TailCallStub(CodeStub * stub)2123 void MacroAssembler::TailCallStub(CodeStub* stub) {
2124 jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
2125 }
2126
2127
StubReturn(int argc)2128 void MacroAssembler::StubReturn(int argc) {
2129 DCHECK(argc >= 1 && generating_stub());
2130 ret((argc - 1) * kPointerSize);
2131 }
2132
2133
AllowThisStubCall(CodeStub * stub)2134 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2135 return has_frame_ || !stub->SometimesSetsUpAFrame();
2136 }
2137
2138
IndexFromHash(Register hash,Register index)2139 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2140 // The assert checks that the constants for the maximum number of digits
2141 // for an array index cached in the hash field and the number of bits
2142 // reserved for it does not conflict.
2143 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2144 (1 << String::kArrayIndexValueBits));
2145 if (!index.is(hash)) {
2146 mov(index, hash);
2147 }
2148 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
2149 }
2150
2151
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2152 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2153 int num_arguments,
2154 SaveFPRegsMode save_doubles) {
2155 // If the expected number of arguments of the runtime function is
2156 // constant, we check that the actual number of arguments match the
2157 // expectation.
2158 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2159
2160 // TODO(1236192): Most runtime routines don't need the number of
2161 // arguments passed in because it is constant. At some point we
2162 // should remove this need and make the runtime routine entry code
2163 // smarter.
2164 Move(eax, Immediate(num_arguments));
2165 mov(ebx, Immediate(ExternalReference(f, isolate())));
2166 CEntryStub ces(isolate(), 1, save_doubles);
2167 CallStub(&ces);
2168 }
2169
2170
CallExternalReference(ExternalReference ref,int num_arguments)2171 void MacroAssembler::CallExternalReference(ExternalReference ref,
2172 int num_arguments) {
2173 mov(eax, Immediate(num_arguments));
2174 mov(ebx, Immediate(ref));
2175
2176 CEntryStub stub(isolate(), 1);
2177 CallStub(&stub);
2178 }
2179
2180
TailCallRuntime(Runtime::FunctionId fid)2181 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2182 // ----------- S t a t e -------------
2183 // -- esp[0] : return address
2184 // -- esp[8] : argument num_arguments - 1
2185 // ...
2186 // -- esp[8 * num_arguments] : argument 0 (receiver)
2187 //
2188 // For runtime functions with variable arguments:
2189 // -- eax : number of arguments
2190 // -----------------------------------
2191
2192 const Runtime::Function* function = Runtime::FunctionForId(fid);
2193 DCHECK_EQ(1, function->result_size);
2194 if (function->nargs >= 0) {
2195 // TODO(1236192): Most runtime routines don't need the number of
2196 // arguments passed in because it is constant. At some point we
2197 // should remove this need and make the runtime routine entry code
2198 // smarter.
2199 mov(eax, Immediate(function->nargs));
2200 }
2201 JumpToExternalReference(ExternalReference(fid, isolate()));
2202 }
2203
2204
JumpToExternalReference(const ExternalReference & ext)2205 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
2206 // Set the entry point and jump to the C entry runtime stub.
2207 mov(ebx, Immediate(ext));
2208 CEntryStub ces(isolate(), 1);
2209 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
2210 }
2211
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1,ReturnAddressState ra_state,int number_of_temp_values_after_return_address)2212 void MacroAssembler::PrepareForTailCall(
2213 const ParameterCount& callee_args_count, Register caller_args_count_reg,
2214 Register scratch0, Register scratch1, ReturnAddressState ra_state,
2215 int number_of_temp_values_after_return_address) {
2216 #if DEBUG
2217 if (callee_args_count.is_reg()) {
2218 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2219 scratch1));
2220 } else {
2221 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2222 }
2223 DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
2224 number_of_temp_values_after_return_address == 0);
2225 #endif
2226
2227 // Calculate the destination address where we will put the return address
2228 // after we drop current frame.
2229 Register new_sp_reg = scratch0;
2230 if (callee_args_count.is_reg()) {
2231 sub(caller_args_count_reg, callee_args_count.reg());
2232 lea(new_sp_reg,
2233 Operand(ebp, caller_args_count_reg, times_pointer_size,
2234 StandardFrameConstants::kCallerPCOffset -
2235 number_of_temp_values_after_return_address * kPointerSize));
2236 } else {
2237 lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
2238 StandardFrameConstants::kCallerPCOffset -
2239 (callee_args_count.immediate() +
2240 number_of_temp_values_after_return_address) *
2241 kPointerSize));
2242 }
2243
2244 if (FLAG_debug_code) {
2245 cmp(esp, new_sp_reg);
2246 Check(below, kStackAccessBelowStackPointer);
2247 }
2248
2249 // Copy return address from caller's frame to current frame's return address
2250 // to avoid its trashing and let the following loop copy it to the right
2251 // place.
2252 Register tmp_reg = scratch1;
2253 if (ra_state == ReturnAddressState::kOnStack) {
2254 mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
2255 mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
2256 tmp_reg);
2257 } else {
2258 DCHECK(ReturnAddressState::kNotOnStack == ra_state);
2259 DCHECK_EQ(0, number_of_temp_values_after_return_address);
2260 Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
2261 }
2262
2263 // Restore caller's frame pointer now as it could be overwritten by
2264 // the copying loop.
2265 mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2266
2267 // +2 here is to copy both receiver and return address.
2268 Register count_reg = caller_args_count_reg;
2269 if (callee_args_count.is_reg()) {
2270 lea(count_reg, Operand(callee_args_count.reg(),
2271 2 + number_of_temp_values_after_return_address));
2272 } else {
2273 mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
2274 number_of_temp_values_after_return_address));
2275 // TODO(ishell): Unroll copying loop for small immediate values.
2276 }
2277
2278 // Now copy callee arguments to the caller frame going backwards to avoid
2279 // callee arguments corruption (source and destination areas could overlap).
2280 Label loop, entry;
2281 jmp(&entry, Label::kNear);
2282 bind(&loop);
2283 dec(count_reg);
2284 mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
2285 mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
2286 bind(&entry);
2287 cmp(count_reg, Immediate(0));
2288 j(not_equal, &loop, Label::kNear);
2289
2290 // Leave current frame.
2291 mov(esp, new_sp_reg);
2292 }
2293
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance done_near,const CallWrapper & call_wrapper)2294 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2295 const ParameterCount& actual,
2296 Label* done,
2297 bool* definitely_mismatches,
2298 InvokeFlag flag,
2299 Label::Distance done_near,
2300 const CallWrapper& call_wrapper) {
2301 bool definitely_matches = false;
2302 *definitely_mismatches = false;
2303 Label invoke;
2304 if (expected.is_immediate()) {
2305 DCHECK(actual.is_immediate());
2306 mov(eax, actual.immediate());
2307 if (expected.immediate() == actual.immediate()) {
2308 definitely_matches = true;
2309 } else {
2310 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2311 if (expected.immediate() == sentinel) {
2312 // Don't worry about adapting arguments for builtins that
2313 // don't want that done. Skip adaption code by making it look
2314 // like we have a match between expected and actual number of
2315 // arguments.
2316 definitely_matches = true;
2317 } else {
2318 *definitely_mismatches = true;
2319 mov(ebx, expected.immediate());
2320 }
2321 }
2322 } else {
2323 if (actual.is_immediate()) {
2324 // Expected is in register, actual is immediate. This is the
2325 // case when we invoke function values without going through the
2326 // IC mechanism.
2327 mov(eax, actual.immediate());
2328 cmp(expected.reg(), actual.immediate());
2329 j(equal, &invoke);
2330 DCHECK(expected.reg().is(ebx));
2331 } else if (!expected.reg().is(actual.reg())) {
2332 // Both expected and actual are in (different) registers. This
2333 // is the case when we invoke functions using call and apply.
2334 cmp(expected.reg(), actual.reg());
2335 j(equal, &invoke);
2336 DCHECK(actual.reg().is(eax));
2337 DCHECK(expected.reg().is(ebx));
2338 } else {
2339 Move(eax, actual.reg());
2340 }
2341 }
2342
2343 if (!definitely_matches) {
2344 Handle<Code> adaptor =
2345 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2346 if (flag == CALL_FUNCTION) {
2347 call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2348 call(adaptor, RelocInfo::CODE_TARGET);
2349 call_wrapper.AfterCall();
2350 if (!*definitely_mismatches) {
2351 jmp(done, done_near);
2352 }
2353 } else {
2354 jmp(adaptor, RelocInfo::CODE_TARGET);
2355 }
2356 bind(&invoke);
2357 }
2358 }
2359
2360
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)2361 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
2362 const ParameterCount& expected,
2363 const ParameterCount& actual) {
2364 Label skip_flooding;
2365 ExternalReference last_step_action =
2366 ExternalReference::debug_last_step_action_address(isolate());
2367 STATIC_ASSERT(StepFrame > StepIn);
2368 cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
2369 j(less, &skip_flooding);
2370 {
2371 FrameScope frame(this,
2372 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2373 if (expected.is_reg()) {
2374 SmiTag(expected.reg());
2375 Push(expected.reg());
2376 }
2377 if (actual.is_reg()) {
2378 SmiTag(actual.reg());
2379 Push(actual.reg());
2380 }
2381 if (new_target.is_valid()) {
2382 Push(new_target);
2383 }
2384 Push(fun);
2385 Push(fun);
2386 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
2387 Pop(fun);
2388 if (new_target.is_valid()) {
2389 Pop(new_target);
2390 }
2391 if (actual.is_reg()) {
2392 Pop(actual.reg());
2393 SmiUntag(actual.reg());
2394 }
2395 if (expected.is_reg()) {
2396 Pop(expected.reg());
2397 SmiUntag(expected.reg());
2398 }
2399 }
2400 bind(&skip_flooding);
2401 }
2402
2403
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2404 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2405 const ParameterCount& expected,
2406 const ParameterCount& actual,
2407 InvokeFlag flag,
2408 const CallWrapper& call_wrapper) {
2409 // You can't call a function without a valid frame.
2410 DCHECK(flag == JUMP_FUNCTION || has_frame());
2411 DCHECK(function.is(edi));
2412 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
2413
2414 if (call_wrapper.NeedsDebugStepCheck()) {
2415 FloodFunctionIfStepping(function, new_target, expected, actual);
2416 }
2417
2418 // Clear the new.target register if not given.
2419 if (!new_target.is_valid()) {
2420 mov(edx, isolate()->factory()->undefined_value());
2421 }
2422
2423 Label done;
2424 bool definitely_mismatches = false;
2425 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
2426 Label::kNear, call_wrapper);
2427 if (!definitely_mismatches) {
2428 // We call indirectly through the code field in the function to
2429 // allow recompilation to take effect without changing any of the
2430 // call sites.
2431 Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
2432 if (flag == CALL_FUNCTION) {
2433 call_wrapper.BeforeCall(CallSize(code));
2434 call(code);
2435 call_wrapper.AfterCall();
2436 } else {
2437 DCHECK(flag == JUMP_FUNCTION);
2438 jmp(code);
2439 }
2440 bind(&done);
2441 }
2442 }
2443
2444
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2445 void MacroAssembler::InvokeFunction(Register fun,
2446 Register new_target,
2447 const ParameterCount& actual,
2448 InvokeFlag flag,
2449 const CallWrapper& call_wrapper) {
2450 // You can't call a function without a valid frame.
2451 DCHECK(flag == JUMP_FUNCTION || has_frame());
2452
2453 DCHECK(fun.is(edi));
2454 mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2455 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2456 mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
2457 SmiUntag(ebx);
2458
2459 ParameterCount expected(ebx);
2460 InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
2461 }
2462
2463
InvokeFunction(Register fun,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2464 void MacroAssembler::InvokeFunction(Register fun,
2465 const ParameterCount& expected,
2466 const ParameterCount& actual,
2467 InvokeFlag flag,
2468 const CallWrapper& call_wrapper) {
2469 // You can't call a function without a valid frame.
2470 DCHECK(flag == JUMP_FUNCTION || has_frame());
2471
2472 DCHECK(fun.is(edi));
2473 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2474
2475 InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
2476 }
2477
2478
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2479 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2480 const ParameterCount& expected,
2481 const ParameterCount& actual,
2482 InvokeFlag flag,
2483 const CallWrapper& call_wrapper) {
2484 LoadHeapObject(edi, function);
2485 InvokeFunction(edi, expected, actual, flag, call_wrapper);
2486 }
2487
2488
LoadContext(Register dst,int context_chain_length)2489 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2490 if (context_chain_length > 0) {
2491 // Move up the chain of contexts to the context containing the slot.
2492 mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2493 for (int i = 1; i < context_chain_length; i++) {
2494 mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2495 }
2496 } else {
2497 // Slot is in the current function context. Move it into the
2498 // destination register in case we store into it (the write barrier
2499 // cannot be allowed to destroy the context in esi).
2500 mov(dst, esi);
2501 }
2502
2503 // We should not have found a with context by walking the context chain
2504 // (i.e., the static scope chain and runtime context chain do not agree).
2505 // A variable occurring in such a scope should have slot type LOOKUP and
2506 // not CONTEXT.
2507 if (emit_debug_code()) {
2508 cmp(FieldOperand(dst, HeapObject::kMapOffset),
2509 isolate()->factory()->with_context_map());
2510 Check(not_equal, kVariableResolvedToWithContext);
2511 }
2512 }
2513
2514
LoadGlobalProxy(Register dst)2515 void MacroAssembler::LoadGlobalProxy(Register dst) {
2516 mov(dst, NativeContextOperand());
2517 mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
2518 }
2519
2520
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2521 void MacroAssembler::LoadTransitionedArrayMapConditional(
2522 ElementsKind expected_kind,
2523 ElementsKind transitioned_kind,
2524 Register map_in_out,
2525 Register scratch,
2526 Label* no_map_match) {
2527 DCHECK(IsFastElementsKind(expected_kind));
2528 DCHECK(IsFastElementsKind(transitioned_kind));
2529
2530 // Check that the function's map is the same as the expected cached map.
2531 mov(scratch, NativeContextOperand());
2532 cmp(map_in_out,
2533 ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2534 j(not_equal, no_map_match);
2535
2536 // Use the transitioned cached map.
2537 mov(map_in_out,
2538 ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2539 }
2540
2541
LoadGlobalFunction(int index,Register function)2542 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2543 // Load the native context from the current context.
2544 mov(function, NativeContextOperand());
2545 // Load the function from the native context.
2546 mov(function, ContextOperand(function, index));
2547 }
2548
2549
LoadGlobalFunctionInitialMap(Register function,Register map)2550 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2551 Register map) {
2552 // Load the initial map. The global functions all have initial maps.
2553 mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2554 if (emit_debug_code()) {
2555 Label ok, fail;
2556 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2557 jmp(&ok);
2558 bind(&fail);
2559 Abort(kGlobalFunctionsMustHaveInitialMap);
2560 bind(&ok);
2561 }
2562 }
2563
2564
2565 // Store the value in register src in the safepoint register stack
2566 // slot for register dst.
StoreToSafepointRegisterSlot(Register dst,Register src)2567 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2568 mov(SafepointRegisterSlot(dst), src);
2569 }
2570
2571
StoreToSafepointRegisterSlot(Register dst,Immediate src)2572 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2573 mov(SafepointRegisterSlot(dst), src);
2574 }
2575
2576
LoadFromSafepointRegisterSlot(Register dst,Register src)2577 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2578 mov(dst, SafepointRegisterSlot(src));
2579 }
2580
2581
SafepointRegisterSlot(Register reg)2582 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2583 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2584 }
2585
2586
SafepointRegisterStackIndex(int reg_code)2587 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2588 // The registers are pushed starting with the lowest encoding,
2589 // which means that lowest encodings are furthest away from
2590 // the stack pointer.
2591 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2592 return kNumSafepointRegisters - reg_code - 1;
2593 }
2594
2595
LoadHeapObject(Register result,Handle<HeapObject> object)2596 void MacroAssembler::LoadHeapObject(Register result,
2597 Handle<HeapObject> object) {
2598 AllowDeferredHandleDereference embedding_raw_address;
2599 if (isolate()->heap()->InNewSpace(*object)) {
2600 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2601 mov(result, Operand::ForCell(cell));
2602 } else {
2603 mov(result, object);
2604 }
2605 }
2606
2607
CmpHeapObject(Register reg,Handle<HeapObject> object)2608 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2609 AllowDeferredHandleDereference using_raw_address;
2610 if (isolate()->heap()->InNewSpace(*object)) {
2611 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2612 cmp(reg, Operand::ForCell(cell));
2613 } else {
2614 cmp(reg, object);
2615 }
2616 }
2617
2618
PushHeapObject(Handle<HeapObject> object)2619 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2620 AllowDeferredHandleDereference using_raw_address;
2621 if (isolate()->heap()->InNewSpace(*object)) {
2622 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2623 push(Operand::ForCell(cell));
2624 } else {
2625 Push(object);
2626 }
2627 }
2628
2629
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2630 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2631 Register scratch) {
2632 mov(scratch, cell);
2633 cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2634 }
2635
2636
GetWeakValue(Register value,Handle<WeakCell> cell)2637 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2638 mov(value, cell);
2639 mov(value, FieldOperand(value, WeakCell::kValueOffset));
2640 }
2641
2642
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2643 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2644 Label* miss) {
2645 GetWeakValue(value, cell);
2646 JumpIfSmi(value, miss);
2647 }
2648
2649
Ret()2650 void MacroAssembler::Ret() {
2651 ret(0);
2652 }
2653
2654
Ret(int bytes_dropped,Register scratch)2655 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2656 if (is_uint16(bytes_dropped)) {
2657 ret(bytes_dropped);
2658 } else {
2659 pop(scratch);
2660 add(esp, Immediate(bytes_dropped));
2661 push(scratch);
2662 ret(0);
2663 }
2664 }
2665
2666
Drop(int stack_elements)2667 void MacroAssembler::Drop(int stack_elements) {
2668 if (stack_elements > 0) {
2669 add(esp, Immediate(stack_elements * kPointerSize));
2670 }
2671 }
2672
2673
Move(Register dst,Register src)2674 void MacroAssembler::Move(Register dst, Register src) {
2675 if (!dst.is(src)) {
2676 mov(dst, src);
2677 }
2678 }
2679
2680
Move(Register dst,const Immediate & x)2681 void MacroAssembler::Move(Register dst, const Immediate& x) {
2682 if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
2683 xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
2684 } else {
2685 mov(dst, x);
2686 }
2687 }
2688
2689
Move(const Operand & dst,const Immediate & x)2690 void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2691 mov(dst, x);
2692 }
2693
2694
Move(XMMRegister dst,uint32_t src)2695 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2696 if (src == 0) {
2697 pxor(dst, dst);
2698 } else {
2699 unsigned cnt = base::bits::CountPopulation32(src);
2700 unsigned nlz = base::bits::CountLeadingZeros32(src);
2701 unsigned ntz = base::bits::CountTrailingZeros32(src);
2702 if (nlz + cnt + ntz == 32) {
2703 pcmpeqd(dst, dst);
2704 if (ntz == 0) {
2705 psrld(dst, 32 - cnt);
2706 } else {
2707 pslld(dst, 32 - cnt);
2708 if (nlz != 0) psrld(dst, nlz);
2709 }
2710 } else {
2711 push(eax);
2712 mov(eax, Immediate(src));
2713 movd(dst, Operand(eax));
2714 pop(eax);
2715 }
2716 }
2717 }
2718
2719
Move(XMMRegister dst,uint64_t src)2720 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2721 if (src == 0) {
2722 pxor(dst, dst);
2723 } else {
2724 uint32_t lower = static_cast<uint32_t>(src);
2725 uint32_t upper = static_cast<uint32_t>(src >> 32);
2726 unsigned cnt = base::bits::CountPopulation64(src);
2727 unsigned nlz = base::bits::CountLeadingZeros64(src);
2728 unsigned ntz = base::bits::CountTrailingZeros64(src);
2729 if (nlz + cnt + ntz == 64) {
2730 pcmpeqd(dst, dst);
2731 if (ntz == 0) {
2732 psrlq(dst, 64 - cnt);
2733 } else {
2734 psllq(dst, 64 - cnt);
2735 if (nlz != 0) psrlq(dst, nlz);
2736 }
2737 } else if (lower == 0) {
2738 Move(dst, upper);
2739 psllq(dst, 32);
2740 } else if (CpuFeatures::IsSupported(SSE4_1)) {
2741 CpuFeatureScope scope(this, SSE4_1);
2742 push(eax);
2743 Move(eax, Immediate(lower));
2744 movd(dst, Operand(eax));
2745 Move(eax, Immediate(upper));
2746 pinsrd(dst, Operand(eax), 1);
2747 pop(eax);
2748 } else {
2749 push(Immediate(upper));
2750 push(Immediate(lower));
2751 movsd(dst, Operand(esp, 0));
2752 add(esp, Immediate(kDoubleSize));
2753 }
2754 }
2755 }
2756
2757
Pextrd(Register dst,XMMRegister src,int8_t imm8)2758 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2759 if (imm8 == 0) {
2760 movd(dst, src);
2761 return;
2762 }
2763 DCHECK_EQ(1, imm8);
2764 if (CpuFeatures::IsSupported(SSE4_1)) {
2765 CpuFeatureScope sse_scope(this, SSE4_1);
2766 pextrd(dst, src, imm8);
2767 return;
2768 }
2769 pshufd(xmm0, src, 1);
2770 movd(dst, xmm0);
2771 }
2772
2773
Pinsrd(XMMRegister dst,const Operand & src,int8_t imm8)2774 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2775 DCHECK(imm8 == 0 || imm8 == 1);
2776 if (CpuFeatures::IsSupported(SSE4_1)) {
2777 CpuFeatureScope sse_scope(this, SSE4_1);
2778 pinsrd(dst, src, imm8);
2779 return;
2780 }
2781 movd(xmm0, src);
2782 if (imm8 == 1) {
2783 punpckldq(dst, xmm0);
2784 } else {
2785 DCHECK_EQ(0, imm8);
2786 psrlq(dst, 32);
2787 punpckldq(xmm0, dst);
2788 movaps(dst, xmm0);
2789 }
2790 }
2791
2792
Lzcnt(Register dst,const Operand & src)2793 void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
2794 if (CpuFeatures::IsSupported(LZCNT)) {
2795 CpuFeatureScope scope(this, LZCNT);
2796 lzcnt(dst, src);
2797 return;
2798 }
2799 Label not_zero_src;
2800 bsr(dst, src);
2801 j(not_zero, ¬_zero_src, Label::kNear);
2802 Move(dst, Immediate(63)); // 63^31 == 32
2803 bind(¬_zero_src);
2804 xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
2805 }
2806
2807
Tzcnt(Register dst,const Operand & src)2808 void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
2809 if (CpuFeatures::IsSupported(BMI1)) {
2810 CpuFeatureScope scope(this, BMI1);
2811 tzcnt(dst, src);
2812 return;
2813 }
2814 Label not_zero_src;
2815 bsf(dst, src);
2816 j(not_zero, ¬_zero_src, Label::kNear);
2817 Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
2818 bind(¬_zero_src);
2819 }
2820
2821
Popcnt(Register dst,const Operand & src)2822 void MacroAssembler::Popcnt(Register dst, const Operand& src) {
2823 if (CpuFeatures::IsSupported(POPCNT)) {
2824 CpuFeatureScope scope(this, POPCNT);
2825 popcnt(dst, src);
2826 return;
2827 }
2828 UNREACHABLE();
2829 }
2830
2831
SetCounter(StatsCounter * counter,int value)2832 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2833 if (FLAG_native_code_counters && counter->Enabled()) {
2834 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2835 }
2836 }
2837
2838
IncrementCounter(StatsCounter * counter,int value)2839 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2840 DCHECK(value > 0);
2841 if (FLAG_native_code_counters && counter->Enabled()) {
2842 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2843 if (value == 1) {
2844 inc(operand);
2845 } else {
2846 add(operand, Immediate(value));
2847 }
2848 }
2849 }
2850
2851
DecrementCounter(StatsCounter * counter,int value)2852 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2853 DCHECK(value > 0);
2854 if (FLAG_native_code_counters && counter->Enabled()) {
2855 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2856 if (value == 1) {
2857 dec(operand);
2858 } else {
2859 sub(operand, Immediate(value));
2860 }
2861 }
2862 }
2863
2864
IncrementCounter(Condition cc,StatsCounter * counter,int value)2865 void MacroAssembler::IncrementCounter(Condition cc,
2866 StatsCounter* counter,
2867 int value) {
2868 DCHECK(value > 0);
2869 if (FLAG_native_code_counters && counter->Enabled()) {
2870 Label skip;
2871 j(NegateCondition(cc), &skip);
2872 pushfd();
2873 IncrementCounter(counter, value);
2874 popfd();
2875 bind(&skip);
2876 }
2877 }
2878
2879
DecrementCounter(Condition cc,StatsCounter * counter,int value)2880 void MacroAssembler::DecrementCounter(Condition cc,
2881 StatsCounter* counter,
2882 int value) {
2883 DCHECK(value > 0);
2884 if (FLAG_native_code_counters && counter->Enabled()) {
2885 Label skip;
2886 j(NegateCondition(cc), &skip);
2887 pushfd();
2888 DecrementCounter(counter, value);
2889 popfd();
2890 bind(&skip);
2891 }
2892 }
2893
2894
Assert(Condition cc,BailoutReason reason)2895 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2896 if (emit_debug_code()) Check(cc, reason);
2897 }
2898
2899
AssertFastElements(Register elements)2900 void MacroAssembler::AssertFastElements(Register elements) {
2901 if (emit_debug_code()) {
2902 Factory* factory = isolate()->factory();
2903 Label ok;
2904 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2905 Immediate(factory->fixed_array_map()));
2906 j(equal, &ok);
2907 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2908 Immediate(factory->fixed_double_array_map()));
2909 j(equal, &ok);
2910 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2911 Immediate(factory->fixed_cow_array_map()));
2912 j(equal, &ok);
2913 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2914 bind(&ok);
2915 }
2916 }
2917
2918
Check(Condition cc,BailoutReason reason)2919 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2920 Label L;
2921 j(cc, &L);
2922 Abort(reason);
2923 // will not return here
2924 bind(&L);
2925 }
2926
2927
CheckStackAlignment()2928 void MacroAssembler::CheckStackAlignment() {
2929 int frame_alignment = base::OS::ActivationFrameAlignment();
2930 int frame_alignment_mask = frame_alignment - 1;
2931 if (frame_alignment > kPointerSize) {
2932 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2933 Label alignment_as_expected;
2934 test(esp, Immediate(frame_alignment_mask));
2935 j(zero, &alignment_as_expected);
2936 // Abort if stack is not aligned.
2937 int3();
2938 bind(&alignment_as_expected);
2939 }
2940 }
2941
2942
Abort(BailoutReason reason)2943 void MacroAssembler::Abort(BailoutReason reason) {
2944 #ifdef DEBUG
2945 const char* msg = GetBailoutReason(reason);
2946 if (msg != NULL) {
2947 RecordComment("Abort message: ");
2948 RecordComment(msg);
2949 }
2950
2951 if (FLAG_trap_on_abort) {
2952 int3();
2953 return;
2954 }
2955 #endif
2956
2957 push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2958 // Disable stub call restrictions to always allow calls to abort.
2959 if (!has_frame_) {
2960 // We don't actually want to generate a pile of code for this, so just
2961 // claim there is a stack frame, without generating one.
2962 FrameScope scope(this, StackFrame::NONE);
2963 CallRuntime(Runtime::kAbort);
2964 } else {
2965 CallRuntime(Runtime::kAbort);
2966 }
2967 // will not return here
2968 int3();
2969 }
2970
2971
LoadInstanceDescriptors(Register map,Register descriptors)2972 void MacroAssembler::LoadInstanceDescriptors(Register map,
2973 Register descriptors) {
2974 mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2975 }
2976
2977
NumberOfOwnDescriptors(Register dst,Register map)2978 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2979 mov(dst, FieldOperand(map, Map::kBitField3Offset));
2980 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2981 }
2982
2983
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)2984 void MacroAssembler::LoadAccessor(Register dst, Register holder,
2985 int accessor_index,
2986 AccessorComponent accessor) {
2987 mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
2988 LoadInstanceDescriptors(dst, dst);
2989 mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
2990 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
2991 : AccessorPair::kSetterOffset;
2992 mov(dst, FieldOperand(dst, offset));
2993 }
2994
2995
LoadPowerOf2(XMMRegister dst,Register scratch,int power)2996 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2997 Register scratch,
2998 int power) {
2999 DCHECK(is_uintn(power + HeapNumber::kExponentBias,
3000 HeapNumber::kExponentBits));
3001 mov(scratch, Immediate(power + HeapNumber::kExponentBias));
3002 movd(dst, scratch);
3003 psllq(dst, HeapNumber::kMantissaBits);
3004 }
3005
3006
JumpIfInstanceTypeIsNotSequentialOneByte(Register instance_type,Register scratch,Label * failure)3007 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
3008 Register instance_type, Register scratch, Label* failure) {
3009 if (!scratch.is(instance_type)) {
3010 mov(scratch, instance_type);
3011 }
3012 and_(scratch,
3013 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3014 cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
3015 j(not_equal, failure);
3016 }
3017
3018
JumpIfNotBothSequentialOneByteStrings(Register object1,Register object2,Register scratch1,Register scratch2,Label * failure)3019 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
3020 Register object2,
3021 Register scratch1,
3022 Register scratch2,
3023 Label* failure) {
3024 // Check that both objects are not smis.
3025 STATIC_ASSERT(kSmiTag == 0);
3026 mov(scratch1, object1);
3027 and_(scratch1, object2);
3028 JumpIfSmi(scratch1, failure);
3029
3030 // Load instance type for both strings.
3031 mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
3032 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
3033 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
3034 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
3035
3036 // Check that both are flat one-byte strings.
3037 const int kFlatOneByteStringMask =
3038 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
3039 const int kFlatOneByteStringTag =
3040 kStringTag | kOneByteStringTag | kSeqStringTag;
3041 // Interleave bits from both instance types and compare them in one check.
3042 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
3043 and_(scratch1, kFlatOneByteStringMask);
3044 and_(scratch2, kFlatOneByteStringMask);
3045 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
3046 cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
3047 j(not_equal, failure);
3048 }
3049
3050
JumpIfNotUniqueNameInstanceType(Operand operand,Label * not_unique_name,Label::Distance distance)3051 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
3052 Label* not_unique_name,
3053 Label::Distance distance) {
3054 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3055 Label succeed;
3056 test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3057 j(zero, &succeed);
3058 cmpb(operand, Immediate(SYMBOL_TYPE));
3059 j(not_equal, not_unique_name, distance);
3060
3061 bind(&succeed);
3062 }
3063
3064
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3065 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3066 Register index,
3067 Register value,
3068 uint32_t encoding_mask) {
3069 Label is_object;
3070 JumpIfNotSmi(string, &is_object, Label::kNear);
3071 Abort(kNonObject);
3072 bind(&is_object);
3073
3074 push(value);
3075 mov(value, FieldOperand(string, HeapObject::kMapOffset));
3076 movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
3077
3078 and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
3079 cmp(value, Immediate(encoding_mask));
3080 pop(value);
3081 Check(equal, kUnexpectedStringType);
3082
3083 // The index is assumed to be untagged coming in, tag it to compare with the
3084 // string length without using a temp register, it is restored at the end of
3085 // this function.
3086 SmiTag(index);
3087 Check(no_overflow, kIndexIsTooLarge);
3088
3089 cmp(index, FieldOperand(string, String::kLengthOffset));
3090 Check(less, kIndexIsTooLarge);
3091
3092 cmp(index, Immediate(Smi::FromInt(0)));
3093 Check(greater_equal, kIndexIsNegative);
3094
3095 // Restore the index
3096 SmiUntag(index);
3097 }
3098
3099
PrepareCallCFunction(int num_arguments,Register scratch)3100 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3101 int frame_alignment = base::OS::ActivationFrameAlignment();
3102 if (frame_alignment != 0) {
3103 // Make stack end at alignment and make room for num_arguments words
3104 // and the original value of esp.
3105 mov(scratch, esp);
3106 sub(esp, Immediate((num_arguments + 1) * kPointerSize));
3107 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3108 and_(esp, -frame_alignment);
3109 mov(Operand(esp, num_arguments * kPointerSize), scratch);
3110 } else {
3111 sub(esp, Immediate(num_arguments * kPointerSize));
3112 }
3113 }
3114
3115
CallCFunction(ExternalReference function,int num_arguments)3116 void MacroAssembler::CallCFunction(ExternalReference function,
3117 int num_arguments) {
3118 // Trashing eax is ok as it will be the return value.
3119 mov(eax, Immediate(function));
3120 CallCFunction(eax, num_arguments);
3121 }
3122
3123
CallCFunction(Register function,int num_arguments)3124 void MacroAssembler::CallCFunction(Register function,
3125 int num_arguments) {
3126 DCHECK(has_frame());
3127 // Check stack alignment.
3128 if (emit_debug_code()) {
3129 CheckStackAlignment();
3130 }
3131
3132 call(function);
3133 if (base::OS::ActivationFrameAlignment() != 0) {
3134 mov(esp, Operand(esp, num_arguments * kPointerSize));
3135 } else {
3136 add(esp, Immediate(num_arguments * kPointerSize));
3137 }
3138 }
3139
3140
3141 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3142 bool AreAliased(Register reg1,
3143 Register reg2,
3144 Register reg3,
3145 Register reg4,
3146 Register reg5,
3147 Register reg6,
3148 Register reg7,
3149 Register reg8) {
3150 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3151 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3152 reg7.is_valid() + reg8.is_valid();
3153
3154 RegList regs = 0;
3155 if (reg1.is_valid()) regs |= reg1.bit();
3156 if (reg2.is_valid()) regs |= reg2.bit();
3157 if (reg3.is_valid()) regs |= reg3.bit();
3158 if (reg4.is_valid()) regs |= reg4.bit();
3159 if (reg5.is_valid()) regs |= reg5.bit();
3160 if (reg6.is_valid()) regs |= reg6.bit();
3161 if (reg7.is_valid()) regs |= reg7.bit();
3162 if (reg8.is_valid()) regs |= reg8.bit();
3163 int n_of_non_aliasing_regs = NumRegs(regs);
3164
3165 return n_of_valid_regs != n_of_non_aliasing_regs;
3166 }
3167 #endif
3168
3169
CodePatcher(Isolate * isolate,byte * address,int size)3170 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
3171 : address_(address),
3172 size_(size),
3173 masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
3174 // Create a new macro assembler pointing to the address of the code to patch.
3175 // The size is adjusted with kGap on order for the assembler to generate size
3176 // bytes of instructions without failing with buffer size constraints.
3177 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3178 }
3179
3180
~CodePatcher()3181 CodePatcher::~CodePatcher() {
3182 // Indicate that code has changed.
3183 Assembler::FlushICache(masm_.isolate(), address_, size_);
3184
3185 // Check that the code was patched as expected.
3186 DCHECK(masm_.pc_ == address_ + size_);
3187 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3188 }
3189
3190
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)3191 void MacroAssembler::CheckPageFlag(
3192 Register object,
3193 Register scratch,
3194 int mask,
3195 Condition cc,
3196 Label* condition_met,
3197 Label::Distance condition_met_distance) {
3198 DCHECK(cc == zero || cc == not_zero);
3199 if (scratch.is(object)) {
3200 and_(scratch, Immediate(~Page::kPageAlignmentMask));
3201 } else {
3202 mov(scratch, Immediate(~Page::kPageAlignmentMask));
3203 and_(scratch, object);
3204 }
3205 if (mask < (1 << kBitsPerByte)) {
3206 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3207 } else {
3208 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3209 }
3210 j(cc, condition_met, condition_met_distance);
3211 }
3212
3213
CheckPageFlagForMap(Handle<Map> map,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)3214 void MacroAssembler::CheckPageFlagForMap(
3215 Handle<Map> map,
3216 int mask,
3217 Condition cc,
3218 Label* condition_met,
3219 Label::Distance condition_met_distance) {
3220 DCHECK(cc == zero || cc == not_zero);
3221 Page* page = Page::FromAddress(map->address());
3222 DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
3223 ExternalReference reference(ExternalReference::page_flags(page));
3224 // The inlined static address check of the page's flags relies
3225 // on maps never being compacted.
3226 DCHECK(!isolate()->heap()->mark_compact_collector()->
3227 IsOnEvacuationCandidate(*map));
3228 if (mask < (1 << kBitsPerByte)) {
3229 test_b(Operand::StaticVariable(reference), Immediate(mask));
3230 } else {
3231 test(Operand::StaticVariable(reference), Immediate(mask));
3232 }
3233 j(cc, condition_met, condition_met_distance);
3234 }
3235
3236
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black,Label::Distance on_black_near)3237 void MacroAssembler::JumpIfBlack(Register object,
3238 Register scratch0,
3239 Register scratch1,
3240 Label* on_black,
3241 Label::Distance on_black_near) {
3242 HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
3243 1); // kBlackBitPattern.
3244 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3245 }
3246
3247
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,Label::Distance has_color_distance,int first_bit,int second_bit)3248 void MacroAssembler::HasColor(Register object,
3249 Register bitmap_scratch,
3250 Register mask_scratch,
3251 Label* has_color,
3252 Label::Distance has_color_distance,
3253 int first_bit,
3254 int second_bit) {
3255 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
3256
3257 GetMarkBits(object, bitmap_scratch, mask_scratch);
3258
3259 Label other_color, word_boundary;
3260 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3261 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
3262 add(mask_scratch, mask_scratch); // Shift left 1 by adding.
3263 j(zero, &word_boundary, Label::kNear);
3264 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3265 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3266 jmp(&other_color, Label::kNear);
3267
3268 bind(&word_boundary);
3269 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
3270 Immediate(1));
3271
3272 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3273 bind(&other_color);
3274 }
3275
3276
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3277 void MacroAssembler::GetMarkBits(Register addr_reg,
3278 Register bitmap_reg,
3279 Register mask_reg) {
3280 DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
3281 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3282 and_(bitmap_reg, addr_reg);
3283 mov(ecx, addr_reg);
3284 int shift =
3285 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3286 shr(ecx, shift);
3287 and_(ecx,
3288 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3289
3290 add(bitmap_reg, ecx);
3291 mov(ecx, addr_reg);
3292 shr(ecx, kPointerSizeLog2);
3293 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3294 mov(mask_reg, Immediate(1));
3295 shl_cl(mask_reg);
3296 }
3297
3298
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white,Label::Distance distance)3299 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3300 Register mask_scratch, Label* value_is_white,
3301 Label::Distance distance) {
3302 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3303 GetMarkBits(value, bitmap_scratch, mask_scratch);
3304
3305 // If the value is black or grey we don't need to do anything.
3306 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3307 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3308 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3309 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3310
3311 // Since both black and grey have a 1 in the first position and white does
3312 // not have a 1 there we only need to check one bit.
3313 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3314 j(zero, value_is_white, Label::kNear);
3315 }
3316
3317
EnumLength(Register dst,Register map)3318 void MacroAssembler::EnumLength(Register dst, Register map) {
3319 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3320 mov(dst, FieldOperand(map, Map::kBitField3Offset));
3321 and_(dst, Immediate(Map::EnumLengthBits::kMask));
3322 SmiTag(dst);
3323 }
3324
3325
CheckEnumCache(Label * call_runtime)3326 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3327 Label next, start;
3328 mov(ecx, eax);
3329
3330 // Check if the enum length field is properly initialized, indicating that
3331 // there is an enum cache.
3332 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3333
3334 EnumLength(edx, ebx);
3335 cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3336 j(equal, call_runtime);
3337
3338 jmp(&start);
3339
3340 bind(&next);
3341 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3342
3343 // For all objects but the receiver, check that the cache is empty.
3344 EnumLength(edx, ebx);
3345 cmp(edx, Immediate(Smi::FromInt(0)));
3346 j(not_equal, call_runtime);
3347
3348 bind(&start);
3349
3350 // Check that there are no elements. Register rcx contains the current JS
3351 // object we've reached through the prototype chain.
3352 Label no_elements;
3353 mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3354 cmp(ecx, isolate()->factory()->empty_fixed_array());
3355 j(equal, &no_elements);
3356
3357 // Second chance, the object may be using the empty slow element dictionary.
3358 cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3359 j(not_equal, call_runtime);
3360
3361 bind(&no_elements);
3362 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3363 cmp(ecx, isolate()->factory()->null_value());
3364 j(not_equal, &next);
3365 }
3366
3367
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3368 void MacroAssembler::TestJSArrayForAllocationMemento(
3369 Register receiver_reg,
3370 Register scratch_reg,
3371 Label* no_memento_found) {
3372 Label map_check;
3373 Label top_check;
3374 ExternalReference new_space_allocation_top =
3375 ExternalReference::new_space_allocation_top_address(isolate());
3376 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3377 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
3378
3379 // Bail out if the object is not in new space.
3380 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3381 // If the object is in new space, we need to check whether it is on the same
3382 // page as the current top.
3383 lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
3384 xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3385 test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
3386 j(zero, &top_check);
3387 // The object is on a different page than allocation top. Bail out if the
3388 // object sits on the page boundary as no memento can follow and we cannot
3389 // touch the memory following it.
3390 lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
3391 xor_(scratch_reg, receiver_reg);
3392 test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
3393 j(not_zero, no_memento_found);
3394 // Continue with the actual map check.
3395 jmp(&map_check);
3396 // If top is on the same page as the current object, we need to check whether
3397 // we are below top.
3398 bind(&top_check);
3399 lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
3400 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3401 j(greater, no_memento_found);
3402 // Memento map check.
3403 bind(&map_check);
3404 mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
3405 cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
3406 }
3407
3408
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3409 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3410 Register object,
3411 Register scratch0,
3412 Register scratch1,
3413 Label* found) {
3414 DCHECK(!scratch1.is(scratch0));
3415 Factory* factory = isolate()->factory();
3416 Register current = scratch0;
3417 Label loop_again, end;
3418
3419 // scratch contained elements pointer.
3420 mov(current, object);
3421 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3422 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3423 cmp(current, Immediate(factory->null_value()));
3424 j(equal, &end);
3425
3426 // Loop based on the map going up the prototype chain.
3427 bind(&loop_again);
3428 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3429 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3430 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3431 CmpInstanceType(current, JS_OBJECT_TYPE);
3432 j(below, found);
3433 mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3434 DecodeField<Map::ElementsKindBits>(scratch1);
3435 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3436 j(equal, found);
3437 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3438 cmp(current, Immediate(factory->null_value()));
3439 j(not_equal, &loop_again);
3440
3441 bind(&end);
3442 }
3443
3444
TruncatingDiv(Register dividend,int32_t divisor)3445 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3446 DCHECK(!dividend.is(eax));
3447 DCHECK(!dividend.is(edx));
3448 base::MagicNumbersForDivision<uint32_t> mag =
3449 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
3450 mov(eax, Immediate(mag.multiplier));
3451 imul(dividend);
3452 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
3453 if (divisor > 0 && neg) add(edx, dividend);
3454 if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
3455 if (mag.shift > 0) sar(edx, mag.shift);
3456 mov(eax, dividend);
3457 shr(eax, 31);
3458 add(edx, eax);
3459 }
3460
3461
3462 } // namespace internal
3463 } // namespace v8
3464
3465 #endif // V8_TARGET_ARCH_IA32
3466