• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/debug/debug.h"
12 #include "src/heap/heap.h"
13 #include "src/register-configuration.h"
14 #include "src/x64/assembler-x64.h"
15 #include "src/x64/macro-assembler-x64.h"
16 
17 namespace v8 {
18 namespace internal {
19 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21                                CodeObjectRequired create_code_object)
22     : Assembler(arg_isolate, buffer, size),
23       generating_stub_(false),
24       has_frame_(false),
25       root_array_available_(true) {
26   if (create_code_object == CodeObjectRequired::kYes) {
27     code_object_ =
28         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
29   }
30 }
31 
32 
33 static const int64_t kInvalidRootRegisterDelta = -1;
34 
35 
RootRegisterDelta(ExternalReference other)36 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
37   if (predictable_code_size() &&
38       (other.address() < reinterpret_cast<Address>(isolate()) ||
39        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
40     return kInvalidRootRegisterDelta;
41   }
42   Address roots_register_value = kRootRegisterBias +
43       reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
44 
45   int64_t delta = kInvalidRootRegisterDelta;  // Bogus initialization.
46   if (kPointerSize == kInt64Size) {
47     delta = other.address() - roots_register_value;
48   } else {
49     // For x32, zero extend the address to 64-bit and calculate the delta.
50     uint64_t o = static_cast<uint32_t>(
51         reinterpret_cast<intptr_t>(other.address()));
52     uint64_t r = static_cast<uint32_t>(
53         reinterpret_cast<intptr_t>(roots_register_value));
54     delta = o - r;
55   }
56   return delta;
57 }
58 
59 
ExternalOperand(ExternalReference target,Register scratch)60 Operand MacroAssembler::ExternalOperand(ExternalReference target,
61                                         Register scratch) {
62   if (root_array_available_ && !serializer_enabled()) {
63     int64_t delta = RootRegisterDelta(target);
64     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
65       return Operand(kRootRegister, static_cast<int32_t>(delta));
66     }
67   }
68   Move(scratch, target);
69   return Operand(scratch, 0);
70 }
71 
72 
Load(Register destination,ExternalReference source)73 void MacroAssembler::Load(Register destination, ExternalReference source) {
74   if (root_array_available_ && !serializer_enabled()) {
75     int64_t delta = RootRegisterDelta(source);
76     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
77       movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
78       return;
79     }
80   }
81   // Safe code.
82   if (destination.is(rax)) {
83     load_rax(source);
84   } else {
85     Move(kScratchRegister, source);
86     movp(destination, Operand(kScratchRegister, 0));
87   }
88 }
89 
90 
Store(ExternalReference destination,Register source)91 void MacroAssembler::Store(ExternalReference destination, Register source) {
92   if (root_array_available_ && !serializer_enabled()) {
93     int64_t delta = RootRegisterDelta(destination);
94     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
95       movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
96       return;
97     }
98   }
99   // Safe code.
100   if (source.is(rax)) {
101     store_rax(destination);
102   } else {
103     Move(kScratchRegister, destination);
104     movp(Operand(kScratchRegister, 0), source);
105   }
106 }
107 
108 
LoadAddress(Register destination,ExternalReference source)109 void MacroAssembler::LoadAddress(Register destination,
110                                  ExternalReference source) {
111   if (root_array_available_ && !serializer_enabled()) {
112     int64_t delta = RootRegisterDelta(source);
113     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
114       leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
115       return;
116     }
117   }
118   // Safe code.
119   Move(destination, source);
120 }
121 
122 
LoadAddressSize(ExternalReference source)123 int MacroAssembler::LoadAddressSize(ExternalReference source) {
124   if (root_array_available_ && !serializer_enabled()) {
125     // This calculation depends on the internals of LoadAddress.
126     // It's correctness is ensured by the asserts in the Call
127     // instruction below.
128     int64_t delta = RootRegisterDelta(source);
129     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
130       // Operand is leap(scratch, Operand(kRootRegister, delta));
131       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
132       int size = 4;
133       if (!is_int8(static_cast<int32_t>(delta))) {
134         size += 3;  // Need full four-byte displacement in lea.
135       }
136       return size;
137     }
138   }
139   // Size of movp(destination, src);
140   return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
141 }
142 
143 
PushAddress(ExternalReference source)144 void MacroAssembler::PushAddress(ExternalReference source) {
145   int64_t address = reinterpret_cast<int64_t>(source.address());
146   if (is_int32(address) && !serializer_enabled()) {
147     if (emit_debug_code()) {
148       Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
149     }
150     Push(Immediate(static_cast<int32_t>(address)));
151     return;
152   }
153   LoadAddress(kScratchRegister, source);
154   Push(kScratchRegister);
155 }
156 
157 
LoadRoot(Register destination,Heap::RootListIndex index)158 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
159   DCHECK(root_array_available_);
160   movp(destination, Operand(kRootRegister,
161                             (index << kPointerSizeLog2) - kRootRegisterBias));
162 }
163 
164 
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)165 void MacroAssembler::LoadRootIndexed(Register destination,
166                                      Register variable_offset,
167                                      int fixed_offset) {
168   DCHECK(root_array_available_);
169   movp(destination,
170        Operand(kRootRegister,
171                variable_offset, times_pointer_size,
172                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
173 }
174 
175 
StoreRoot(Register source,Heap::RootListIndex index)176 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
177   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
178   DCHECK(root_array_available_);
179   movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
180        source);
181 }
182 
183 
PushRoot(Heap::RootListIndex index)184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185   DCHECK(root_array_available_);
186   Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
187 }
188 
189 
CompareRoot(Register with,Heap::RootListIndex index)190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191   DCHECK(root_array_available_);
192   cmpp(with, Operand(kRootRegister,
193                      (index << kPointerSizeLog2) - kRootRegisterBias));
194 }
195 
196 
CompareRoot(const Operand & with,Heap::RootListIndex index)197 void MacroAssembler::CompareRoot(const Operand& with,
198                                  Heap::RootListIndex index) {
199   DCHECK(root_array_available_);
200   DCHECK(!with.AddressUsesRegister(kScratchRegister));
201   LoadRoot(kScratchRegister, index);
202   cmpp(with, kScratchRegister);
203 }
204 
205 
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)206 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
207                                          Register addr,
208                                          Register scratch,
209                                          SaveFPRegsMode save_fp,
210                                          RememberedSetFinalAction and_then) {
211   if (emit_debug_code()) {
212     Label ok;
213     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
214     int3();
215     bind(&ok);
216   }
217   // Load store buffer top.
218   ExternalReference store_buffer =
219       ExternalReference::store_buffer_top(isolate());
220   movp(scratch, ExternalOperand(store_buffer));
221   // Store pointer to buffer.
222   movp(Operand(scratch, 0), addr);
223   // Increment buffer top.
224   addp(scratch, Immediate(kPointerSize));
225   // Write back new top of buffer.
226   movp(ExternalOperand(store_buffer), scratch);
227   // Call stub on end of buffer.
228   Label done;
229   // Check for end of buffer.
230   testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
231   if (and_then == kReturnAtEnd) {
232     Label buffer_overflowed;
233     j(equal, &buffer_overflowed, Label::kNear);
234     ret(0);
235     bind(&buffer_overflowed);
236   } else {
237     DCHECK(and_then == kFallThroughAtEnd);
238     j(not_equal, &done, Label::kNear);
239   }
240   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241   CallStub(&store_buffer_overflow);
242   if (and_then == kReturnAtEnd) {
243     ret(0);
244   } else {
245     DCHECK(and_then == kFallThroughAtEnd);
246     bind(&done);
247   }
248 }
249 
250 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)251 void MacroAssembler::InNewSpace(Register object,
252                                 Register scratch,
253                                 Condition cc,
254                                 Label* branch,
255                                 Label::Distance distance) {
256   CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch,
257                 distance);
258 }
259 
260 
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)261 void MacroAssembler::RecordWriteField(
262     Register object,
263     int offset,
264     Register value,
265     Register dst,
266     SaveFPRegsMode save_fp,
267     RememberedSetAction remembered_set_action,
268     SmiCheck smi_check,
269     PointersToHereCheck pointers_to_here_check_for_value) {
270   // First, check if a write barrier is even needed. The tests below
271   // catch stores of Smis.
272   Label done;
273 
274   // Skip barrier if writing a smi.
275   if (smi_check == INLINE_SMI_CHECK) {
276     JumpIfSmi(value, &done);
277   }
278 
279   // Although the object register is tagged, the offset is relative to the start
280   // of the object, so so offset must be a multiple of kPointerSize.
281   DCHECK(IsAligned(offset, kPointerSize));
282 
283   leap(dst, FieldOperand(object, offset));
284   if (emit_debug_code()) {
285     Label ok;
286     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
287     j(zero, &ok, Label::kNear);
288     int3();
289     bind(&ok);
290   }
291 
292   RecordWrite(object, dst, value, save_fp, remembered_set_action,
293               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
294 
295   bind(&done);
296 
297   // Clobber clobbered input registers when running with the debug-code flag
298   // turned on to provoke errors.
299   if (emit_debug_code()) {
300     Move(value, kZapValue, Assembler::RelocInfoNone());
301     Move(dst, kZapValue, Assembler::RelocInfoNone());
302   }
303 }
304 
305 
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)306 void MacroAssembler::RecordWriteArray(
307     Register object,
308     Register value,
309     Register index,
310     SaveFPRegsMode save_fp,
311     RememberedSetAction remembered_set_action,
312     SmiCheck smi_check,
313     PointersToHereCheck pointers_to_here_check_for_value) {
314   // First, check if a write barrier is even needed. The tests below
315   // catch stores of Smis.
316   Label done;
317 
318   // Skip barrier if writing a smi.
319   if (smi_check == INLINE_SMI_CHECK) {
320     JumpIfSmi(value, &done);
321   }
322 
323   // Array access: calculate the destination address. Index is not a smi.
324   Register dst = index;
325   leap(dst, Operand(object, index, times_pointer_size,
326                    FixedArray::kHeaderSize - kHeapObjectTag));
327 
328   RecordWrite(object, dst, value, save_fp, remembered_set_action,
329               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
330 
331   bind(&done);
332 
333   // Clobber clobbered input registers when running with the debug-code flag
334   // turned on to provoke errors.
335   if (emit_debug_code()) {
336     Move(value, kZapValue, Assembler::RelocInfoNone());
337     Move(index, kZapValue, Assembler::RelocInfoNone());
338   }
339 }
340 
341 
RecordWriteForMap(Register object,Register map,Register dst,SaveFPRegsMode fp_mode)342 void MacroAssembler::RecordWriteForMap(Register object,
343                                        Register map,
344                                        Register dst,
345                                        SaveFPRegsMode fp_mode) {
346   DCHECK(!object.is(kScratchRegister));
347   DCHECK(!object.is(map));
348   DCHECK(!object.is(dst));
349   DCHECK(!map.is(dst));
350   AssertNotSmi(object);
351 
352   if (emit_debug_code()) {
353     Label ok;
354     if (map.is(kScratchRegister)) pushq(map);
355     CompareMap(map, isolate()->factory()->meta_map());
356     if (map.is(kScratchRegister)) popq(map);
357     j(equal, &ok, Label::kNear);
358     int3();
359     bind(&ok);
360   }
361 
362   if (!FLAG_incremental_marking) {
363     return;
364   }
365 
366   if (emit_debug_code()) {
367     Label ok;
368     if (map.is(kScratchRegister)) pushq(map);
369     cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
370     if (map.is(kScratchRegister)) popq(map);
371     j(equal, &ok, Label::kNear);
372     int3();
373     bind(&ok);
374   }
375 
376   // Compute the address.
377   leap(dst, FieldOperand(object, HeapObject::kMapOffset));
378 
379   // First, check if a write barrier is even needed. The tests below
380   // catch stores of smis and stores into the young generation.
381   Label done;
382 
383   // A single check of the map's pages interesting flag suffices, since it is
384   // only set during incremental collection, and then it's also guaranteed that
385   // the from object's page's interesting flag is also set.  This optimization
386   // relies on the fact that maps can never be in new space.
387   CheckPageFlag(map,
388                 map,  // Used as scratch.
389                 MemoryChunk::kPointersToHereAreInterestingMask,
390                 zero,
391                 &done,
392                 Label::kNear);
393 
394   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
395                        fp_mode);
396   CallStub(&stub);
397 
398   bind(&done);
399 
400   // Count number of write barriers in generated code.
401   isolate()->counters()->write_barriers_static()->Increment();
402   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
403 
404   // Clobber clobbered registers when running with the debug-code flag
405   // turned on to provoke errors.
406   if (emit_debug_code()) {
407     Move(dst, kZapValue, Assembler::RelocInfoNone());
408     Move(map, kZapValue, Assembler::RelocInfoNone());
409   }
410 }
411 
412 
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)413 void MacroAssembler::RecordWrite(
414     Register object,
415     Register address,
416     Register value,
417     SaveFPRegsMode fp_mode,
418     RememberedSetAction remembered_set_action,
419     SmiCheck smi_check,
420     PointersToHereCheck pointers_to_here_check_for_value) {
421   DCHECK(!object.is(value));
422   DCHECK(!object.is(address));
423   DCHECK(!value.is(address));
424   AssertNotSmi(object);
425 
426   if (remembered_set_action == OMIT_REMEMBERED_SET &&
427       !FLAG_incremental_marking) {
428     return;
429   }
430 
431   if (emit_debug_code()) {
432     Label ok;
433     cmpp(value, Operand(address, 0));
434     j(equal, &ok, Label::kNear);
435     int3();
436     bind(&ok);
437   }
438 
439   // First, check if a write barrier is even needed. The tests below
440   // catch stores of smis and stores into the young generation.
441   Label done;
442 
443   if (smi_check == INLINE_SMI_CHECK) {
444     // Skip barrier if writing a smi.
445     JumpIfSmi(value, &done);
446   }
447 
448   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
449     CheckPageFlag(value,
450                   value,  // Used as scratch.
451                   MemoryChunk::kPointersToHereAreInterestingMask,
452                   zero,
453                   &done,
454                   Label::kNear);
455   }
456 
457   CheckPageFlag(object,
458                 value,  // Used as scratch.
459                 MemoryChunk::kPointersFromHereAreInterestingMask,
460                 zero,
461                 &done,
462                 Label::kNear);
463 
464   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
465                        fp_mode);
466   CallStub(&stub);
467 
468   bind(&done);
469 
470   // Count number of write barriers in generated code.
471   isolate()->counters()->write_barriers_static()->Increment();
472   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
473 
474   // Clobber clobbered registers when running with the debug-code flag
475   // turned on to provoke errors.
476   if (emit_debug_code()) {
477     Move(address, kZapValue, Assembler::RelocInfoNone());
478     Move(value, kZapValue, Assembler::RelocInfoNone());
479   }
480 }
481 
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)482 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
483                                                Register code_entry,
484                                                Register scratch) {
485   const int offset = JSFunction::kCodeEntryOffset;
486 
487   // The input registers are fixed to make calling the C write barrier function
488   // easier.
489   DCHECK(js_function.is(rdi));
490   DCHECK(code_entry.is(rcx));
491   DCHECK(scratch.is(r15));
492 
493   // Since a code entry (value) is always in old space, we don't need to update
494   // remembered set. If incremental marking is off, there is nothing for us to
495   // do.
496   if (!FLAG_incremental_marking) return;
497 
498   AssertNotSmi(js_function);
499 
500   if (emit_debug_code()) {
501     Label ok;
502     leap(scratch, FieldOperand(js_function, offset));
503     cmpp(code_entry, Operand(scratch, 0));
504     j(equal, &ok, Label::kNear);
505     int3();
506     bind(&ok);
507   }
508 
509   // First, check if a write barrier is even needed. The tests below
510   // catch stores of Smis and stores into young gen.
511   Label done;
512 
513   CheckPageFlag(code_entry, scratch,
514                 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
515                 Label::kNear);
516   CheckPageFlag(js_function, scratch,
517                 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
518                 Label::kNear);
519 
520   // Save input registers.
521   Push(js_function);
522   Push(code_entry);
523 
524   const Register dst = scratch;
525   leap(dst, FieldOperand(js_function, offset));
526 
527   // Save caller-saved registers.
528   PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
529 
530   int argument_count = 3;
531   PrepareCallCFunction(argument_count);
532 
533   // Load the argument registers.
534   if (arg_reg_1.is(rcx)) {
535     // Windows calling convention.
536     DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
537 
538     movp(arg_reg_1, js_function);  // rcx gets rdi.
539     movp(arg_reg_2, dst);          // rdx gets r15.
540   } else {
541     // AMD64 calling convention.
542     DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
543 
544     // rdi is already loaded with js_function.
545     movp(arg_reg_2, dst);  // rsi gets r15.
546   }
547   Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
548 
549   {
550     AllowExternalCallThatCantCauseGC scope(this);
551     CallCFunction(
552         ExternalReference::incremental_marking_record_write_code_entry_function(
553             isolate()),
554         argument_count);
555   }
556 
557   // Restore caller-saved registers.
558   PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
559 
560   // Restore input registers.
561   Pop(code_entry);
562   Pop(js_function);
563 
564   bind(&done);
565 }
566 
Assert(Condition cc,BailoutReason reason)567 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
568   if (emit_debug_code()) Check(cc, reason);
569 }
570 
571 
AssertFastElements(Register elements)572 void MacroAssembler::AssertFastElements(Register elements) {
573   if (emit_debug_code()) {
574     Label ok;
575     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
576                 Heap::kFixedArrayMapRootIndex);
577     j(equal, &ok, Label::kNear);
578     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
579                 Heap::kFixedDoubleArrayMapRootIndex);
580     j(equal, &ok, Label::kNear);
581     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
582                 Heap::kFixedCOWArrayMapRootIndex);
583     j(equal, &ok, Label::kNear);
584     Abort(kJSObjectWithFastElementsMapHasSlowElements);
585     bind(&ok);
586   }
587 }
588 
589 
Check(Condition cc,BailoutReason reason)590 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
591   Label L;
592   j(cc, &L, Label::kNear);
593   Abort(reason);
594   // Control will not return here.
595   bind(&L);
596 }
597 
598 
CheckStackAlignment()599 void MacroAssembler::CheckStackAlignment() {
600   int frame_alignment = base::OS::ActivationFrameAlignment();
601   int frame_alignment_mask = frame_alignment - 1;
602   if (frame_alignment > kPointerSize) {
603     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
604     Label alignment_as_expected;
605     testp(rsp, Immediate(frame_alignment_mask));
606     j(zero, &alignment_as_expected, Label::kNear);
607     // Abort if stack is not aligned.
608     int3();
609     bind(&alignment_as_expected);
610   }
611 }
612 
613 
NegativeZeroTest(Register result,Register op,Label * then_label)614 void MacroAssembler::NegativeZeroTest(Register result,
615                                       Register op,
616                                       Label* then_label) {
617   Label ok;
618   testl(result, result);
619   j(not_zero, &ok, Label::kNear);
620   testl(op, op);
621   j(sign, then_label);
622   bind(&ok);
623 }
624 
625 
Abort(BailoutReason reason)626 void MacroAssembler::Abort(BailoutReason reason) {
627 #ifdef DEBUG
628   const char* msg = GetBailoutReason(reason);
629   if (msg != NULL) {
630     RecordComment("Abort message: ");
631     RecordComment(msg);
632   }
633 
634   if (FLAG_trap_on_abort) {
635     int3();
636     return;
637   }
638 #endif
639 
640   // Check if Abort() has already been initialized.
641   DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
642 
643   Move(rdx, Smi::FromInt(static_cast<int>(reason)));
644 
645   if (!has_frame_) {
646     // We don't actually want to generate a pile of code for this, so just
647     // claim there is a stack frame, without generating one.
648     FrameScope scope(this, StackFrame::NONE);
649     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
650   } else {
651     Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
652   }
653   // Control will not return here.
654   int3();
655 }
656 
657 
CallStub(CodeStub * stub,TypeFeedbackId ast_id)658 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
659   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
660   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
661 }
662 
663 
TailCallStub(CodeStub * stub)664 void MacroAssembler::TailCallStub(CodeStub* stub) {
665   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
666 }
667 
668 
StubReturn(int argc)669 void MacroAssembler::StubReturn(int argc) {
670   DCHECK(argc >= 1 && generating_stub());
671   ret((argc - 1) * kPointerSize);
672 }
673 
674 
AllowThisStubCall(CodeStub * stub)675 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
676   return has_frame_ || !stub->SometimesSetsUpAFrame();
677 }
678 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)679 void MacroAssembler::CallRuntime(const Runtime::Function* f,
680                                  int num_arguments,
681                                  SaveFPRegsMode save_doubles) {
682   // If the expected number of arguments of the runtime function is
683   // constant, we check that the actual number of arguments match the
684   // expectation.
685   CHECK(f->nargs < 0 || f->nargs == num_arguments);
686 
687   // TODO(1236192): Most runtime routines don't need the number of
688   // arguments passed in because it is constant. At some point we
689   // should remove this need and make the runtime routine entry code
690   // smarter.
691   Set(rax, num_arguments);
692   LoadAddress(rbx, ExternalReference(f, isolate()));
693   CEntryStub ces(isolate(), f->result_size, save_doubles);
694   CallStub(&ces);
695 }
696 
697 
CallExternalReference(const ExternalReference & ext,int num_arguments)698 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
699                                            int num_arguments) {
700   Set(rax, num_arguments);
701   LoadAddress(rbx, ext);
702 
703   CEntryStub stub(isolate(), 1);
704   CallStub(&stub);
705 }
706 
707 
TailCallRuntime(Runtime::FunctionId fid)708 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
709   // ----------- S t a t e -------------
710   //  -- rsp[0]                 : return address
711   //  -- rsp[8]                 : argument num_arguments - 1
712   //  ...
713   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
714   //
715   //  For runtime functions with variable arguments:
716   //  -- rax                    : number of  arguments
717   // -----------------------------------
718 
719   const Runtime::Function* function = Runtime::FunctionForId(fid);
720   DCHECK_EQ(1, function->result_size);
721   if (function->nargs >= 0) {
722     Set(rax, function->nargs);
723   }
724   JumpToExternalReference(ExternalReference(fid, isolate()));
725 }
726 
JumpToExternalReference(const ExternalReference & ext,bool builtin_exit_frame)727 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
728                                              bool builtin_exit_frame) {
729   // Set the entry point and jump to the C entry runtime stub.
730   LoadAddress(rbx, ext);
731   CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
732                  builtin_exit_frame);
733   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
734 }
735 
736 #define REG(Name) \
737   { Register::kCode_##Name }
738 
739 static const Register saved_regs[] = {
740   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
741   REG(r9), REG(r10), REG(r11)
742 };
743 
744 #undef REG
745 
746 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
747 
748 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)749 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
750                                      Register exclusion1,
751                                      Register exclusion2,
752                                      Register exclusion3) {
753   // We don't allow a GC during a store buffer overflow so there is no need to
754   // store the registers in any particular way, but we do have to store and
755   // restore them.
756   for (int i = 0; i < kNumberOfSavedRegs; i++) {
757     Register reg = saved_regs[i];
758     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
759       pushq(reg);
760     }
761   }
762   // R12 to r15 are callee save on all platforms.
763   if (fp_mode == kSaveFPRegs) {
764     subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
765     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
766       XMMRegister reg = XMMRegister::from_code(i);
767       Movsd(Operand(rsp, i * kDoubleSize), reg);
768     }
769   }
770 }
771 
772 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)773 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
774                                     Register exclusion1,
775                                     Register exclusion2,
776                                     Register exclusion3) {
777   if (fp_mode == kSaveFPRegs) {
778     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
779       XMMRegister reg = XMMRegister::from_code(i);
780       Movsd(reg, Operand(rsp, i * kDoubleSize));
781     }
782     addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
783   }
784   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
785     Register reg = saved_regs[i];
786     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
787       popq(reg);
788     }
789   }
790 }
791 
792 
Cvtss2sd(XMMRegister dst,XMMRegister src)793 void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
794   if (CpuFeatures::IsSupported(AVX)) {
795     CpuFeatureScope scope(this, AVX);
796     vcvtss2sd(dst, src, src);
797   } else {
798     cvtss2sd(dst, src);
799   }
800 }
801 
802 
Cvtss2sd(XMMRegister dst,const Operand & src)803 void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
804   if (CpuFeatures::IsSupported(AVX)) {
805     CpuFeatureScope scope(this, AVX);
806     vcvtss2sd(dst, dst, src);
807   } else {
808     cvtss2sd(dst, src);
809   }
810 }
811 
812 
Cvtsd2ss(XMMRegister dst,XMMRegister src)813 void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
814   if (CpuFeatures::IsSupported(AVX)) {
815     CpuFeatureScope scope(this, AVX);
816     vcvtsd2ss(dst, src, src);
817   } else {
818     cvtsd2ss(dst, src);
819   }
820 }
821 
822 
Cvtsd2ss(XMMRegister dst,const Operand & src)823 void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
824   if (CpuFeatures::IsSupported(AVX)) {
825     CpuFeatureScope scope(this, AVX);
826     vcvtsd2ss(dst, dst, src);
827   } else {
828     cvtsd2ss(dst, src);
829   }
830 }
831 
832 
Cvtlsi2sd(XMMRegister dst,Register src)833 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
834   if (CpuFeatures::IsSupported(AVX)) {
835     CpuFeatureScope scope(this, AVX);
836     vxorpd(dst, dst, dst);
837     vcvtlsi2sd(dst, dst, src);
838   } else {
839     xorpd(dst, dst);
840     cvtlsi2sd(dst, src);
841   }
842 }
843 
844 
Cvtlsi2sd(XMMRegister dst,const Operand & src)845 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
846   if (CpuFeatures::IsSupported(AVX)) {
847     CpuFeatureScope scope(this, AVX);
848     vxorpd(dst, dst, dst);
849     vcvtlsi2sd(dst, dst, src);
850   } else {
851     xorpd(dst, dst);
852     cvtlsi2sd(dst, src);
853   }
854 }
855 
856 
Cvtlsi2ss(XMMRegister dst,Register src)857 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
858   if (CpuFeatures::IsSupported(AVX)) {
859     CpuFeatureScope scope(this, AVX);
860     vxorps(dst, dst, dst);
861     vcvtlsi2ss(dst, dst, src);
862   } else {
863     xorps(dst, dst);
864     cvtlsi2ss(dst, src);
865   }
866 }
867 
868 
Cvtlsi2ss(XMMRegister dst,const Operand & src)869 void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
870   if (CpuFeatures::IsSupported(AVX)) {
871     CpuFeatureScope scope(this, AVX);
872     vxorps(dst, dst, dst);
873     vcvtlsi2ss(dst, dst, src);
874   } else {
875     xorps(dst, dst);
876     cvtlsi2ss(dst, src);
877   }
878 }
879 
880 
Cvtqsi2ss(XMMRegister dst,Register src)881 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
882   if (CpuFeatures::IsSupported(AVX)) {
883     CpuFeatureScope scope(this, AVX);
884     vxorps(dst, dst, dst);
885     vcvtqsi2ss(dst, dst, src);
886   } else {
887     xorps(dst, dst);
888     cvtqsi2ss(dst, src);
889   }
890 }
891 
892 
Cvtqsi2ss(XMMRegister dst,const Operand & src)893 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
894   if (CpuFeatures::IsSupported(AVX)) {
895     CpuFeatureScope scope(this, AVX);
896     vxorps(dst, dst, dst);
897     vcvtqsi2ss(dst, dst, src);
898   } else {
899     xorps(dst, dst);
900     cvtqsi2ss(dst, src);
901   }
902 }
903 
904 
Cvtqsi2sd(XMMRegister dst,Register src)905 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
906   if (CpuFeatures::IsSupported(AVX)) {
907     CpuFeatureScope scope(this, AVX);
908     vxorpd(dst, dst, dst);
909     vcvtqsi2sd(dst, dst, src);
910   } else {
911     xorpd(dst, dst);
912     cvtqsi2sd(dst, src);
913   }
914 }
915 
916 
Cvtqsi2sd(XMMRegister dst,const Operand & src)917 void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
918   if (CpuFeatures::IsSupported(AVX)) {
919     CpuFeatureScope scope(this, AVX);
920     vxorpd(dst, dst, dst);
921     vcvtqsi2sd(dst, dst, src);
922   } else {
923     xorpd(dst, dst);
924     cvtqsi2sd(dst, src);
925   }
926 }
927 
928 
Cvtqui2ss(XMMRegister dst,Register src,Register tmp)929 void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
930   Label msb_set_src;
931   Label jmp_return;
932   testq(src, src);
933   j(sign, &msb_set_src, Label::kNear);
934   Cvtqsi2ss(dst, src);
935   jmp(&jmp_return, Label::kNear);
936   bind(&msb_set_src);
937   movq(tmp, src);
938   shrq(src, Immediate(1));
939   // Recover the least significant bit to avoid rounding errors.
940   andq(tmp, Immediate(1));
941   orq(src, tmp);
942   Cvtqsi2ss(dst, src);
943   addss(dst, dst);
944   bind(&jmp_return);
945 }
946 
947 
Cvtqui2sd(XMMRegister dst,Register src,Register tmp)948 void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
949   Label msb_set_src;
950   Label jmp_return;
951   testq(src, src);
952   j(sign, &msb_set_src, Label::kNear);
953   Cvtqsi2sd(dst, src);
954   jmp(&jmp_return, Label::kNear);
955   bind(&msb_set_src);
956   movq(tmp, src);
957   shrq(src, Immediate(1));
958   andq(tmp, Immediate(1));
959   orq(src, tmp);
960   Cvtqsi2sd(dst, src);
961   addsd(dst, dst);
962   bind(&jmp_return);
963 }
964 
965 
Cvtsd2si(Register dst,XMMRegister src)966 void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
967   if (CpuFeatures::IsSupported(AVX)) {
968     CpuFeatureScope scope(this, AVX);
969     vcvtsd2si(dst, src);
970   } else {
971     cvtsd2si(dst, src);
972   }
973 }
974 
975 
Cvttss2si(Register dst,XMMRegister src)976 void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
977   if (CpuFeatures::IsSupported(AVX)) {
978     CpuFeatureScope scope(this, AVX);
979     vcvttss2si(dst, src);
980   } else {
981     cvttss2si(dst, src);
982   }
983 }
984 
985 
Cvttss2si(Register dst,const Operand & src)986 void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
987   if (CpuFeatures::IsSupported(AVX)) {
988     CpuFeatureScope scope(this, AVX);
989     vcvttss2si(dst, src);
990   } else {
991     cvttss2si(dst, src);
992   }
993 }
994 
995 
Cvttsd2si(Register dst,XMMRegister src)996 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
997   if (CpuFeatures::IsSupported(AVX)) {
998     CpuFeatureScope scope(this, AVX);
999     vcvttsd2si(dst, src);
1000   } else {
1001     cvttsd2si(dst, src);
1002   }
1003 }
1004 
1005 
Cvttsd2si(Register dst,const Operand & src)1006 void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
1007   if (CpuFeatures::IsSupported(AVX)) {
1008     CpuFeatureScope scope(this, AVX);
1009     vcvttsd2si(dst, src);
1010   } else {
1011     cvttsd2si(dst, src);
1012   }
1013 }
1014 
1015 
Cvttss2siq(Register dst,XMMRegister src)1016 void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1017   if (CpuFeatures::IsSupported(AVX)) {
1018     CpuFeatureScope scope(this, AVX);
1019     vcvttss2siq(dst, src);
1020   } else {
1021     cvttss2siq(dst, src);
1022   }
1023 }
1024 
1025 
Cvttss2siq(Register dst,const Operand & src)1026 void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
1027   if (CpuFeatures::IsSupported(AVX)) {
1028     CpuFeatureScope scope(this, AVX);
1029     vcvttss2siq(dst, src);
1030   } else {
1031     cvttss2siq(dst, src);
1032   }
1033 }
1034 
1035 
Cvttsd2siq(Register dst,XMMRegister src)1036 void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1037   if (CpuFeatures::IsSupported(AVX)) {
1038     CpuFeatureScope scope(this, AVX);
1039     vcvttsd2siq(dst, src);
1040   } else {
1041     cvttsd2siq(dst, src);
1042   }
1043 }
1044 
1045 
Cvttsd2siq(Register dst,const Operand & src)1046 void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
1047   if (CpuFeatures::IsSupported(AVX)) {
1048     CpuFeatureScope scope(this, AVX);
1049     vcvttsd2siq(dst, src);
1050   } else {
1051     cvttsd2siq(dst, src);
1052   }
1053 }
1054 
1055 
Load(Register dst,const Operand & src,Representation r)1056 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
1057   DCHECK(!r.IsDouble());
1058   if (r.IsInteger8()) {
1059     movsxbq(dst, src);
1060   } else if (r.IsUInteger8()) {
1061     movzxbl(dst, src);
1062   } else if (r.IsInteger16()) {
1063     movsxwq(dst, src);
1064   } else if (r.IsUInteger16()) {
1065     movzxwl(dst, src);
1066   } else if (r.IsInteger32()) {
1067     movl(dst, src);
1068   } else {
1069     movp(dst, src);
1070   }
1071 }
1072 
1073 
Store(const Operand & dst,Register src,Representation r)1074 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
1075   DCHECK(!r.IsDouble());
1076   if (r.IsInteger8() || r.IsUInteger8()) {
1077     movb(dst, src);
1078   } else if (r.IsInteger16() || r.IsUInteger16()) {
1079     movw(dst, src);
1080   } else if (r.IsInteger32()) {
1081     movl(dst, src);
1082   } else {
1083     if (r.IsHeapObject()) {
1084       AssertNotSmi(src);
1085     } else if (r.IsSmi()) {
1086       AssertSmi(src);
1087     }
1088     movp(dst, src);
1089   }
1090 }
1091 
1092 
Set(Register dst,int64_t x)1093 void MacroAssembler::Set(Register dst, int64_t x) {
1094   if (x == 0) {
1095     xorl(dst, dst);
1096   } else if (is_uint32(x)) {
1097     movl(dst, Immediate(static_cast<uint32_t>(x)));
1098   } else if (is_int32(x)) {
1099     movq(dst, Immediate(static_cast<int32_t>(x)));
1100   } else {
1101     movq(dst, x);
1102   }
1103 }
1104 
Set(const Operand & dst,intptr_t x)1105 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1106   if (kPointerSize == kInt64Size) {
1107     if (is_int32(x)) {
1108       movp(dst, Immediate(static_cast<int32_t>(x)));
1109     } else {
1110       Set(kScratchRegister, x);
1111       movp(dst, kScratchRegister);
1112     }
1113   } else {
1114     movp(dst, Immediate(static_cast<int32_t>(x)));
1115   }
1116 }
1117 
1118 
1119 // ----------------------------------------------------------------------------
1120 // Smi tagging, untagging and tag detection.
1121 
IsUnsafeInt(const int32_t x)1122 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1123   static const int kMaxBits = 17;
1124   return !is_intn(x, kMaxBits);
1125 }
1126 
1127 
SafeMove(Register dst,Smi * src)1128 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1129   DCHECK(!dst.is(kScratchRegister));
1130   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1131     if (SmiValuesAre32Bits()) {
1132       // JIT cookie can be converted to Smi.
1133       Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1134       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1135       xorp(dst, kScratchRegister);
1136     } else {
1137       DCHECK(SmiValuesAre31Bits());
1138       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1139       movp(dst, Immediate(value ^ jit_cookie()));
1140       xorp(dst, Immediate(jit_cookie()));
1141     }
1142   } else {
1143     Move(dst, src);
1144   }
1145 }
1146 
1147 
SafePush(Smi * src)1148 void MacroAssembler::SafePush(Smi* src) {
1149   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1150     if (SmiValuesAre32Bits()) {
1151       // JIT cookie can be converted to Smi.
1152       Push(Smi::FromInt(src->value() ^ jit_cookie()));
1153       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1154       xorp(Operand(rsp, 0), kScratchRegister);
1155     } else {
1156       DCHECK(SmiValuesAre31Bits());
1157       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1158       Push(Immediate(value ^ jit_cookie()));
1159       xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1160     }
1161   } else {
1162     Push(src);
1163   }
1164 }
1165 
1166 
GetSmiConstant(Smi * source)1167 Register MacroAssembler::GetSmiConstant(Smi* source) {
1168   STATIC_ASSERT(kSmiTag == 0);
1169   int value = source->value();
1170   if (value == 0) {
1171     xorl(kScratchRegister, kScratchRegister);
1172     return kScratchRegister;
1173   }
1174   LoadSmiConstant(kScratchRegister, source);
1175   return kScratchRegister;
1176 }
1177 
1178 
LoadSmiConstant(Register dst,Smi * source)1179 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1180   STATIC_ASSERT(kSmiTag == 0);
1181   int value = source->value();
1182   if (value == 0) {
1183     xorl(dst, dst);
1184   } else {
1185     Move(dst, source, Assembler::RelocInfoNone());
1186   }
1187 }
1188 
1189 
Integer32ToSmi(Register dst,Register src)1190 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1191   STATIC_ASSERT(kSmiTag == 0);
1192   if (!dst.is(src)) {
1193     movl(dst, src);
1194   }
1195   shlp(dst, Immediate(kSmiShift));
1196 }
1197 
1198 
Integer32ToSmiField(const Operand & dst,Register src)1199 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1200   if (emit_debug_code()) {
1201     testb(dst, Immediate(0x01));
1202     Label ok;
1203     j(zero, &ok, Label::kNear);
1204     Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1205     bind(&ok);
1206   }
1207 
1208   if (SmiValuesAre32Bits()) {
1209     DCHECK(kSmiShift % kBitsPerByte == 0);
1210     movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1211   } else {
1212     DCHECK(SmiValuesAre31Bits());
1213     Integer32ToSmi(kScratchRegister, src);
1214     movp(dst, kScratchRegister);
1215   }
1216 }
1217 
1218 
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1219 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1220                                                 Register src,
1221                                                 int constant) {
1222   if (dst.is(src)) {
1223     addl(dst, Immediate(constant));
1224   } else {
1225     leal(dst, Operand(src, constant));
1226   }
1227   shlp(dst, Immediate(kSmiShift));
1228 }
1229 
1230 
SmiToInteger32(Register dst,Register src)1231 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1232   STATIC_ASSERT(kSmiTag == 0);
1233   if (!dst.is(src)) {
1234     movp(dst, src);
1235   }
1236 
1237   if (SmiValuesAre32Bits()) {
1238     shrp(dst, Immediate(kSmiShift));
1239   } else {
1240     DCHECK(SmiValuesAre31Bits());
1241     sarl(dst, Immediate(kSmiShift));
1242   }
1243 }
1244 
1245 
SmiToInteger32(Register dst,const Operand & src)1246 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1247   if (SmiValuesAre32Bits()) {
1248     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1249   } else {
1250     DCHECK(SmiValuesAre31Bits());
1251     movl(dst, src);
1252     sarl(dst, Immediate(kSmiShift));
1253   }
1254 }
1255 
1256 
SmiToInteger64(Register dst,Register src)1257 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1258   STATIC_ASSERT(kSmiTag == 0);
1259   if (!dst.is(src)) {
1260     movp(dst, src);
1261   }
1262   sarp(dst, Immediate(kSmiShift));
1263   if (kPointerSize == kInt32Size) {
1264     // Sign extend to 64-bit.
1265     movsxlq(dst, dst);
1266   }
1267 }
1268 
1269 
SmiToInteger64(Register dst,const Operand & src)1270 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1271   if (SmiValuesAre32Bits()) {
1272     movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1273   } else {
1274     DCHECK(SmiValuesAre31Bits());
1275     movp(dst, src);
1276     SmiToInteger64(dst, dst);
1277   }
1278 }
1279 
1280 
SmiTest(Register src)1281 void MacroAssembler::SmiTest(Register src) {
1282   AssertSmi(src);
1283   testp(src, src);
1284 }
1285 
1286 
SmiCompare(Register smi1,Register smi2)1287 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1288   AssertSmi(smi1);
1289   AssertSmi(smi2);
1290   cmpp(smi1, smi2);
1291 }
1292 
1293 
SmiCompare(Register dst,Smi * src)1294 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1295   AssertSmi(dst);
1296   Cmp(dst, src);
1297 }
1298 
1299 
Cmp(Register dst,Smi * src)1300 void MacroAssembler::Cmp(Register dst, Smi* src) {
1301   DCHECK(!dst.is(kScratchRegister));
1302   if (src->value() == 0) {
1303     testp(dst, dst);
1304   } else {
1305     Register constant_reg = GetSmiConstant(src);
1306     cmpp(dst, constant_reg);
1307   }
1308 }
1309 
1310 
SmiCompare(Register dst,const Operand & src)1311 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1312   AssertSmi(dst);
1313   AssertSmi(src);
1314   cmpp(dst, src);
1315 }
1316 
1317 
SmiCompare(const Operand & dst,Register src)1318 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1319   AssertSmi(dst);
1320   AssertSmi(src);
1321   cmpp(dst, src);
1322 }
1323 
1324 
SmiCompare(const Operand & dst,Smi * src)1325 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1326   AssertSmi(dst);
1327   if (SmiValuesAre32Bits()) {
1328     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1329   } else {
1330     DCHECK(SmiValuesAre31Bits());
1331     cmpl(dst, Immediate(src));
1332   }
1333 }
1334 
1335 
Cmp(const Operand & dst,Smi * src)1336 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1337   // The Operand cannot use the smi register.
1338   Register smi_reg = GetSmiConstant(src);
1339   DCHECK(!dst.AddressUsesRegister(smi_reg));
1340   cmpp(dst, smi_reg);
1341 }
1342 
1343 
SmiCompareInteger32(const Operand & dst,Register src)1344 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1345   if (SmiValuesAre32Bits()) {
1346     cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1347   } else {
1348     DCHECK(SmiValuesAre31Bits());
1349     SmiToInteger32(kScratchRegister, dst);
1350     cmpl(kScratchRegister, src);
1351   }
1352 }
1353 
1354 
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1355 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1356                                                            Register src,
1357                                                            int power) {
1358   DCHECK(power >= 0);
1359   DCHECK(power < 64);
1360   if (power == 0) {
1361     SmiToInteger64(dst, src);
1362     return;
1363   }
1364   if (!dst.is(src)) {
1365     movp(dst, src);
1366   }
1367   if (power < kSmiShift) {
1368     sarp(dst, Immediate(kSmiShift - power));
1369   } else if (power > kSmiShift) {
1370     shlp(dst, Immediate(power - kSmiShift));
1371   }
1372 }
1373 
1374 
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1375 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1376                                                          Register src,
1377                                                          int power) {
1378   DCHECK((0 <= power) && (power < 32));
1379   if (dst.is(src)) {
1380     shrp(dst, Immediate(power + kSmiShift));
1381   } else {
1382     UNIMPLEMENTED();  // Not used.
1383   }
1384 }
1385 
1386 
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1387 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1388                                  Label* on_not_smis,
1389                                  Label::Distance near_jump) {
1390   if (dst.is(src1) || dst.is(src2)) {
1391     DCHECK(!src1.is(kScratchRegister));
1392     DCHECK(!src2.is(kScratchRegister));
1393     movp(kScratchRegister, src1);
1394     orp(kScratchRegister, src2);
1395     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1396     movp(dst, kScratchRegister);
1397   } else {
1398     movp(dst, src1);
1399     orp(dst, src2);
1400     JumpIfNotSmi(dst, on_not_smis, near_jump);
1401   }
1402 }
1403 
1404 
CheckSmi(Register src)1405 Condition MacroAssembler::CheckSmi(Register src) {
1406   STATIC_ASSERT(kSmiTag == 0);
1407   testb(src, Immediate(kSmiTagMask));
1408   return zero;
1409 }
1410 
1411 
CheckSmi(const Operand & src)1412 Condition MacroAssembler::CheckSmi(const Operand& src) {
1413   STATIC_ASSERT(kSmiTag == 0);
1414   testb(src, Immediate(kSmiTagMask));
1415   return zero;
1416 }
1417 
1418 
CheckNonNegativeSmi(Register src)1419 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1420   STATIC_ASSERT(kSmiTag == 0);
1421   // Test that both bits of the mask 0x8000000000000001 are zero.
1422   movp(kScratchRegister, src);
1423   rolp(kScratchRegister, Immediate(1));
1424   testb(kScratchRegister, Immediate(3));
1425   return zero;
1426 }
1427 
1428 
CheckBothSmi(Register first,Register second)1429 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1430   if (first.is(second)) {
1431     return CheckSmi(first);
1432   }
1433   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1434   if (SmiValuesAre32Bits()) {
1435     leal(kScratchRegister, Operand(first, second, times_1, 0));
1436     testb(kScratchRegister, Immediate(0x03));
1437   } else {
1438     DCHECK(SmiValuesAre31Bits());
1439     movl(kScratchRegister, first);
1440     orl(kScratchRegister, second);
1441     testb(kScratchRegister, Immediate(kSmiTagMask));
1442   }
1443   return zero;
1444 }
1445 
1446 
CheckBothNonNegativeSmi(Register first,Register second)1447 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1448                                                   Register second) {
1449   if (first.is(second)) {
1450     return CheckNonNegativeSmi(first);
1451   }
1452   movp(kScratchRegister, first);
1453   orp(kScratchRegister, second);
1454   rolp(kScratchRegister, Immediate(1));
1455   testl(kScratchRegister, Immediate(3));
1456   return zero;
1457 }
1458 
1459 
CheckEitherSmi(Register first,Register second,Register scratch)1460 Condition MacroAssembler::CheckEitherSmi(Register first,
1461                                          Register second,
1462                                          Register scratch) {
1463   if (first.is(second)) {
1464     return CheckSmi(first);
1465   }
1466   if (scratch.is(second)) {
1467     andl(scratch, first);
1468   } else {
1469     if (!scratch.is(first)) {
1470       movl(scratch, first);
1471     }
1472     andl(scratch, second);
1473   }
1474   testb(scratch, Immediate(kSmiTagMask));
1475   return zero;
1476 }
1477 
1478 
CheckInteger32ValidSmiValue(Register src)1479 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1480   if (SmiValuesAre32Bits()) {
1481     // A 32-bit integer value can always be converted to a smi.
1482     return always;
1483   } else {
1484     DCHECK(SmiValuesAre31Bits());
1485     cmpl(src, Immediate(0xc0000000));
1486     return positive;
1487   }
1488 }
1489 
1490 
CheckUInteger32ValidSmiValue(Register src)1491 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1492   if (SmiValuesAre32Bits()) {
1493     // An unsigned 32-bit integer value is valid as long as the high bit
1494     // is not set.
1495     testl(src, src);
1496     return positive;
1497   } else {
1498     DCHECK(SmiValuesAre31Bits());
1499     testl(src, Immediate(0xc0000000));
1500     return zero;
1501   }
1502 }
1503 
1504 
CheckSmiToIndicator(Register dst,Register src)1505 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1506   if (dst.is(src)) {
1507     andl(dst, Immediate(kSmiTagMask));
1508   } else {
1509     movl(dst, Immediate(kSmiTagMask));
1510     andl(dst, src);
1511   }
1512 }
1513 
1514 
CheckSmiToIndicator(Register dst,const Operand & src)1515 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1516   if (!(src.AddressUsesRegister(dst))) {
1517     movl(dst, Immediate(kSmiTagMask));
1518     andl(dst, src);
1519   } else {
1520     movl(dst, src);
1521     andl(dst, Immediate(kSmiTagMask));
1522   }
1523 }
1524 
1525 
JumpIfValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1526 void MacroAssembler::JumpIfValidSmiValue(Register src,
1527                                          Label* on_valid,
1528                                          Label::Distance near_jump) {
1529   Condition is_valid = CheckInteger32ValidSmiValue(src);
1530   j(is_valid, on_valid, near_jump);
1531 }
1532 
1533 
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1534 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1535                                             Label* on_invalid,
1536                                             Label::Distance near_jump) {
1537   Condition is_valid = CheckInteger32ValidSmiValue(src);
1538   j(NegateCondition(is_valid), on_invalid, near_jump);
1539 }
1540 
1541 
JumpIfUIntValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1542 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1543                                              Label* on_valid,
1544                                              Label::Distance near_jump) {
1545   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1546   j(is_valid, on_valid, near_jump);
1547 }
1548 
1549 
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1550 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1551                                                 Label* on_invalid,
1552                                                 Label::Distance near_jump) {
1553   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1554   j(NegateCondition(is_valid), on_invalid, near_jump);
1555 }
1556 
1557 
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1558 void MacroAssembler::JumpIfSmi(Register src,
1559                                Label* on_smi,
1560                                Label::Distance near_jump) {
1561   Condition smi = CheckSmi(src);
1562   j(smi, on_smi, near_jump);
1563 }
1564 
1565 
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1566 void MacroAssembler::JumpIfNotSmi(Register src,
1567                                   Label* on_not_smi,
1568                                   Label::Distance near_jump) {
1569   Condition smi = CheckSmi(src);
1570   j(NegateCondition(smi), on_not_smi, near_jump);
1571 }
1572 
1573 
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1574 void MacroAssembler::JumpUnlessNonNegativeSmi(
1575     Register src, Label* on_not_smi_or_negative,
1576     Label::Distance near_jump) {
1577   Condition non_negative_smi = CheckNonNegativeSmi(src);
1578   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1579 }
1580 
1581 
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1582 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1583                                              Smi* constant,
1584                                              Label* on_equals,
1585                                              Label::Distance near_jump) {
1586   SmiCompare(src, constant);
1587   j(equal, on_equals, near_jump);
1588 }
1589 
1590 
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1591 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1592                                       Register src2,
1593                                       Label* on_not_both_smi,
1594                                       Label::Distance near_jump) {
1595   Condition both_smi = CheckBothSmi(src1, src2);
1596   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1597 }
1598 
1599 
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1600 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1601                                                   Register src2,
1602                                                   Label* on_not_both_smi,
1603                                                   Label::Distance near_jump) {
1604   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1605   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1606 }
1607 
1608 
SmiAddConstant(Register dst,Register src,Smi * constant)1609 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1610   if (constant->value() == 0) {
1611     if (!dst.is(src)) {
1612       movp(dst, src);
1613     }
1614     return;
1615   } else if (dst.is(src)) {
1616     DCHECK(!dst.is(kScratchRegister));
1617     Register constant_reg = GetSmiConstant(constant);
1618     addp(dst, constant_reg);
1619   } else {
1620     LoadSmiConstant(dst, constant);
1621     addp(dst, src);
1622   }
1623 }
1624 
1625 
SmiAddConstant(const Operand & dst,Smi * constant)1626 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1627   if (constant->value() != 0) {
1628     if (SmiValuesAre32Bits()) {
1629       addl(Operand(dst, kSmiShift / kBitsPerByte),
1630            Immediate(constant->value()));
1631     } else {
1632       DCHECK(SmiValuesAre31Bits());
1633       addp(dst, Immediate(constant));
1634     }
1635   }
1636 }
1637 
1638 
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1639 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1640                                     SmiOperationConstraints constraints,
1641                                     Label* bailout_label,
1642                                     Label::Distance near_jump) {
1643   if (constant->value() == 0) {
1644     if (!dst.is(src)) {
1645       movp(dst, src);
1646     }
1647   } else if (dst.is(src)) {
1648     DCHECK(!dst.is(kScratchRegister));
1649     LoadSmiConstant(kScratchRegister, constant);
1650     addp(dst, kScratchRegister);
1651     if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1652       j(no_overflow, bailout_label, near_jump);
1653       DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1654       subp(dst, kScratchRegister);
1655     } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1656       if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1657         Label done;
1658         j(no_overflow, &done, Label::kNear);
1659         subp(dst, kScratchRegister);
1660         jmp(bailout_label, near_jump);
1661         bind(&done);
1662       } else {
1663         // Bailout if overflow without reserving src.
1664         j(overflow, bailout_label, near_jump);
1665       }
1666     } else {
1667       UNREACHABLE();
1668     }
1669   } else {
1670     DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1671     DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1672     LoadSmiConstant(dst, constant);
1673     addp(dst, src);
1674     j(overflow, bailout_label, near_jump);
1675   }
1676 }
1677 
1678 
SmiSubConstant(Register dst,Register src,Smi * constant)1679 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1680   if (constant->value() == 0) {
1681     if (!dst.is(src)) {
1682       movp(dst, src);
1683     }
1684   } else if (dst.is(src)) {
1685     DCHECK(!dst.is(kScratchRegister));
1686     Register constant_reg = GetSmiConstant(constant);
1687     subp(dst, constant_reg);
1688   } else {
1689     if (constant->value() == Smi::kMinValue) {
1690       LoadSmiConstant(dst, constant);
1691       // Adding and subtracting the min-value gives the same result, it only
1692       // differs on the overflow bit, which we don't check here.
1693       addp(dst, src);
1694     } else {
1695       // Subtract by adding the negation.
1696       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1697       addp(dst, src);
1698     }
1699   }
1700 }
1701 
1702 
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationConstraints constraints,Label * bailout_label,Label::Distance near_jump)1703 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1704                                     SmiOperationConstraints constraints,
1705                                     Label* bailout_label,
1706                                     Label::Distance near_jump) {
1707   if (constant->value() == 0) {
1708     if (!dst.is(src)) {
1709       movp(dst, src);
1710     }
1711   } else if (dst.is(src)) {
1712     DCHECK(!dst.is(kScratchRegister));
1713     LoadSmiConstant(kScratchRegister, constant);
1714     subp(dst, kScratchRegister);
1715     if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1716       j(no_overflow, bailout_label, near_jump);
1717       DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1718       addp(dst, kScratchRegister);
1719     } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1720       if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1721         Label done;
1722         j(no_overflow, &done, Label::kNear);
1723         addp(dst, kScratchRegister);
1724         jmp(bailout_label, near_jump);
1725         bind(&done);
1726       } else {
1727         // Bailout if overflow without reserving src.
1728         j(overflow, bailout_label, near_jump);
1729       }
1730     } else {
1731       UNREACHABLE();
1732     }
1733   } else {
1734     DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1735     DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1736     if (constant->value() == Smi::kMinValue) {
1737       DCHECK(!dst.is(kScratchRegister));
1738       movp(dst, src);
1739       LoadSmiConstant(kScratchRegister, constant);
1740       subp(dst, kScratchRegister);
1741       j(overflow, bailout_label, near_jump);
1742     } else {
1743       // Subtract by adding the negation.
1744       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1745       addp(dst, src);
1746       j(overflow, bailout_label, near_jump);
1747     }
1748   }
1749 }
1750 
1751 
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1752 void MacroAssembler::SmiNeg(Register dst,
1753                             Register src,
1754                             Label* on_smi_result,
1755                             Label::Distance near_jump) {
1756   if (dst.is(src)) {
1757     DCHECK(!dst.is(kScratchRegister));
1758     movp(kScratchRegister, src);
1759     negp(dst);  // Low 32 bits are retained as zero by negation.
1760     // Test if result is zero or Smi::kMinValue.
1761     cmpp(dst, kScratchRegister);
1762     j(not_equal, on_smi_result, near_jump);
1763     movp(src, kScratchRegister);
1764   } else {
1765     movp(dst, src);
1766     negp(dst);
1767     cmpp(dst, src);
1768     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1769     j(not_equal, on_smi_result, near_jump);
1770   }
1771 }
1772 
1773 
1774 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1775 static void SmiAddHelper(MacroAssembler* masm,
1776                          Register dst,
1777                          Register src1,
1778                          T src2,
1779                          Label* on_not_smi_result,
1780                          Label::Distance near_jump) {
1781   if (dst.is(src1)) {
1782     Label done;
1783     masm->addp(dst, src2);
1784     masm->j(no_overflow, &done, Label::kNear);
1785     // Restore src1.
1786     masm->subp(dst, src2);
1787     masm->jmp(on_not_smi_result, near_jump);
1788     masm->bind(&done);
1789   } else {
1790     masm->movp(dst, src1);
1791     masm->addp(dst, src2);
1792     masm->j(overflow, on_not_smi_result, near_jump);
1793   }
1794 }
1795 
1796 
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1797 void MacroAssembler::SmiAdd(Register dst,
1798                             Register src1,
1799                             Register src2,
1800                             Label* on_not_smi_result,
1801                             Label::Distance near_jump) {
1802   DCHECK_NOT_NULL(on_not_smi_result);
1803   DCHECK(!dst.is(src2));
1804   SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1805 }
1806 
1807 
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1808 void MacroAssembler::SmiAdd(Register dst,
1809                             Register src1,
1810                             const Operand& src2,
1811                             Label* on_not_smi_result,
1812                             Label::Distance near_jump) {
1813   DCHECK_NOT_NULL(on_not_smi_result);
1814   DCHECK(!src2.AddressUsesRegister(dst));
1815   SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1816 }
1817 
1818 
SmiAdd(Register dst,Register src1,Register src2)1819 void MacroAssembler::SmiAdd(Register dst,
1820                             Register src1,
1821                             Register src2) {
1822   // No overflow checking. Use only when it's known that
1823   // overflowing is impossible.
1824   if (!dst.is(src1)) {
1825     if (emit_debug_code()) {
1826       movp(kScratchRegister, src1);
1827       addp(kScratchRegister, src2);
1828       Check(no_overflow, kSmiAdditionOverflow);
1829     }
1830     leap(dst, Operand(src1, src2, times_1, 0));
1831   } else {
1832     addp(dst, src2);
1833     Assert(no_overflow, kSmiAdditionOverflow);
1834   }
1835 }
1836 
1837 
1838 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1839 static void SmiSubHelper(MacroAssembler* masm,
1840                          Register dst,
1841                          Register src1,
1842                          T src2,
1843                          Label* on_not_smi_result,
1844                          Label::Distance near_jump) {
1845   if (dst.is(src1)) {
1846     Label done;
1847     masm->subp(dst, src2);
1848     masm->j(no_overflow, &done, Label::kNear);
1849     // Restore src1.
1850     masm->addp(dst, src2);
1851     masm->jmp(on_not_smi_result, near_jump);
1852     masm->bind(&done);
1853   } else {
1854     masm->movp(dst, src1);
1855     masm->subp(dst, src2);
1856     masm->j(overflow, on_not_smi_result, near_jump);
1857   }
1858 }
1859 
1860 
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1861 void MacroAssembler::SmiSub(Register dst,
1862                             Register src1,
1863                             Register src2,
1864                             Label* on_not_smi_result,
1865                             Label::Distance near_jump) {
1866   DCHECK_NOT_NULL(on_not_smi_result);
1867   DCHECK(!dst.is(src2));
1868   SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1869 }
1870 
1871 
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1872 void MacroAssembler::SmiSub(Register dst,
1873                             Register src1,
1874                             const Operand& src2,
1875                             Label* on_not_smi_result,
1876                             Label::Distance near_jump) {
1877   DCHECK_NOT_NULL(on_not_smi_result);
1878   DCHECK(!src2.AddressUsesRegister(dst));
1879   SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1880 }
1881 
1882 
1883 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1884 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1885                                    Register dst,
1886                                    Register src1,
1887                                    T src2) {
1888   // No overflow checking. Use only when it's known that
1889   // overflowing is impossible (e.g., subtracting two positive smis).
1890   if (!dst.is(src1)) {
1891     masm->movp(dst, src1);
1892   }
1893   masm->subp(dst, src2);
1894   masm->Assert(no_overflow, kSmiSubtractionOverflow);
1895 }
1896 
1897 
SmiSub(Register dst,Register src1,Register src2)1898 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1899   DCHECK(!dst.is(src2));
1900   SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1901 }
1902 
1903 
SmiSub(Register dst,Register src1,const Operand & src2)1904 void MacroAssembler::SmiSub(Register dst,
1905                             Register src1,
1906                             const Operand& src2) {
1907   SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1908 }
1909 
1910 
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1911 void MacroAssembler::SmiMul(Register dst,
1912                             Register src1,
1913                             Register src2,
1914                             Label* on_not_smi_result,
1915                             Label::Distance near_jump) {
1916   DCHECK(!dst.is(src2));
1917   DCHECK(!dst.is(kScratchRegister));
1918   DCHECK(!src1.is(kScratchRegister));
1919   DCHECK(!src2.is(kScratchRegister));
1920 
1921   if (dst.is(src1)) {
1922     Label failure, zero_correct_result;
1923     movp(kScratchRegister, src1);  // Create backup for later testing.
1924     SmiToInteger64(dst, src1);
1925     imulp(dst, src2);
1926     j(overflow, &failure, Label::kNear);
1927 
1928     // Check for negative zero result.  If product is zero, and one
1929     // argument is negative, go to slow case.
1930     Label correct_result;
1931     testp(dst, dst);
1932     j(not_zero, &correct_result, Label::kNear);
1933 
1934     movp(dst, kScratchRegister);
1935     xorp(dst, src2);
1936     // Result was positive zero.
1937     j(positive, &zero_correct_result, Label::kNear);
1938 
1939     bind(&failure);  // Reused failure exit, restores src1.
1940     movp(src1, kScratchRegister);
1941     jmp(on_not_smi_result, near_jump);
1942 
1943     bind(&zero_correct_result);
1944     Set(dst, 0);
1945 
1946     bind(&correct_result);
1947   } else {
1948     SmiToInteger64(dst, src1);
1949     imulp(dst, src2);
1950     j(overflow, on_not_smi_result, near_jump);
1951     // Check for negative zero result.  If product is zero, and one
1952     // argument is negative, go to slow case.
1953     Label correct_result;
1954     testp(dst, dst);
1955     j(not_zero, &correct_result, Label::kNear);
1956     // One of src1 and src2 is zero, the check whether the other is
1957     // negative.
1958     movp(kScratchRegister, src1);
1959     xorp(kScratchRegister, src2);
1960     j(negative, on_not_smi_result, near_jump);
1961     bind(&correct_result);
1962   }
1963 }
1964 
1965 
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1966 void MacroAssembler::SmiDiv(Register dst,
1967                             Register src1,
1968                             Register src2,
1969                             Label* on_not_smi_result,
1970                             Label::Distance near_jump) {
1971   DCHECK(!src1.is(kScratchRegister));
1972   DCHECK(!src2.is(kScratchRegister));
1973   DCHECK(!dst.is(kScratchRegister));
1974   DCHECK(!src2.is(rax));
1975   DCHECK(!src2.is(rdx));
1976   DCHECK(!src1.is(rdx));
1977 
1978   // Check for 0 divisor (result is +/-Infinity).
1979   testp(src2, src2);
1980   j(zero, on_not_smi_result, near_jump);
1981 
1982   if (src1.is(rax)) {
1983     movp(kScratchRegister, src1);
1984   }
1985   SmiToInteger32(rax, src1);
1986   // We need to rule out dividing Smi::kMinValue by -1, since that would
1987   // overflow in idiv and raise an exception.
1988   // We combine this with negative zero test (negative zero only happens
1989   // when dividing zero by a negative number).
1990 
1991   // We overshoot a little and go to slow case if we divide min-value
1992   // by any negative value, not just -1.
1993   Label safe_div;
1994   testl(rax, Immediate(~Smi::kMinValue));
1995   j(not_zero, &safe_div, Label::kNear);
1996   testp(src2, src2);
1997   if (src1.is(rax)) {
1998     j(positive, &safe_div, Label::kNear);
1999     movp(src1, kScratchRegister);
2000     jmp(on_not_smi_result, near_jump);
2001   } else {
2002     j(negative, on_not_smi_result, near_jump);
2003   }
2004   bind(&safe_div);
2005 
2006   SmiToInteger32(src2, src2);
2007   // Sign extend src1 into edx:eax.
2008   cdq();
2009   idivl(src2);
2010   Integer32ToSmi(src2, src2);
2011   // Check that the remainder is zero.
2012   testl(rdx, rdx);
2013   if (src1.is(rax)) {
2014     Label smi_result;
2015     j(zero, &smi_result, Label::kNear);
2016     movp(src1, kScratchRegister);
2017     jmp(on_not_smi_result, near_jump);
2018     bind(&smi_result);
2019   } else {
2020     j(not_zero, on_not_smi_result, near_jump);
2021   }
2022   if (!dst.is(src1) && src1.is(rax)) {
2023     movp(src1, kScratchRegister);
2024   }
2025   Integer32ToSmi(dst, rax);
2026 }
2027 
2028 
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2029 void MacroAssembler::SmiMod(Register dst,
2030                             Register src1,
2031                             Register src2,
2032                             Label* on_not_smi_result,
2033                             Label::Distance near_jump) {
2034   DCHECK(!dst.is(kScratchRegister));
2035   DCHECK(!src1.is(kScratchRegister));
2036   DCHECK(!src2.is(kScratchRegister));
2037   DCHECK(!src2.is(rax));
2038   DCHECK(!src2.is(rdx));
2039   DCHECK(!src1.is(rdx));
2040   DCHECK(!src1.is(src2));
2041 
2042   testp(src2, src2);
2043   j(zero, on_not_smi_result, near_jump);
2044 
2045   if (src1.is(rax)) {
2046     movp(kScratchRegister, src1);
2047   }
2048   SmiToInteger32(rax, src1);
2049   SmiToInteger32(src2, src2);
2050 
2051   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2052   Label safe_div;
2053   cmpl(rax, Immediate(Smi::kMinValue));
2054   j(not_equal, &safe_div, Label::kNear);
2055   cmpl(src2, Immediate(-1));
2056   j(not_equal, &safe_div, Label::kNear);
2057   // Retag inputs and go slow case.
2058   Integer32ToSmi(src2, src2);
2059   if (src1.is(rax)) {
2060     movp(src1, kScratchRegister);
2061   }
2062   jmp(on_not_smi_result, near_jump);
2063   bind(&safe_div);
2064 
2065   // Sign extend eax into edx:eax.
2066   cdq();
2067   idivl(src2);
2068   // Restore smi tags on inputs.
2069   Integer32ToSmi(src2, src2);
2070   if (src1.is(rax)) {
2071     movp(src1, kScratchRegister);
2072   }
2073   // Check for a negative zero result.  If the result is zero, and the
2074   // dividend is negative, go slow to return a floating point negative zero.
2075   Label smi_result;
2076   testl(rdx, rdx);
2077   j(not_zero, &smi_result, Label::kNear);
2078   testp(src1, src1);
2079   j(negative, on_not_smi_result, near_jump);
2080   bind(&smi_result);
2081   Integer32ToSmi(dst, rdx);
2082 }
2083 
2084 
SmiNot(Register dst,Register src)2085 void MacroAssembler::SmiNot(Register dst, Register src) {
2086   DCHECK(!dst.is(kScratchRegister));
2087   DCHECK(!src.is(kScratchRegister));
2088   if (SmiValuesAre32Bits()) {
2089     // Set tag and padding bits before negating, so that they are zero
2090     // afterwards.
2091     movl(kScratchRegister, Immediate(~0));
2092   } else {
2093     DCHECK(SmiValuesAre31Bits());
2094     movl(kScratchRegister, Immediate(1));
2095   }
2096   if (dst.is(src)) {
2097     xorp(dst, kScratchRegister);
2098   } else {
2099     leap(dst, Operand(src, kScratchRegister, times_1, 0));
2100   }
2101   notp(dst);
2102 }
2103 
2104 
SmiAnd(Register dst,Register src1,Register src2)2105 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2106   DCHECK(!dst.is(src2));
2107   if (!dst.is(src1)) {
2108     movp(dst, src1);
2109   }
2110   andp(dst, src2);
2111 }
2112 
2113 
SmiAndConstant(Register dst,Register src,Smi * constant)2114 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2115   if (constant->value() == 0) {
2116     Set(dst, 0);
2117   } else if (dst.is(src)) {
2118     DCHECK(!dst.is(kScratchRegister));
2119     Register constant_reg = GetSmiConstant(constant);
2120     andp(dst, constant_reg);
2121   } else {
2122     LoadSmiConstant(dst, constant);
2123     andp(dst, src);
2124   }
2125 }
2126 
2127 
SmiOr(Register dst,Register src1,Register src2)2128 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2129   if (!dst.is(src1)) {
2130     DCHECK(!src1.is(src2));
2131     movp(dst, src1);
2132   }
2133   orp(dst, src2);
2134 }
2135 
2136 
SmiOrConstant(Register dst,Register src,Smi * constant)2137 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2138   if (dst.is(src)) {
2139     DCHECK(!dst.is(kScratchRegister));
2140     Register constant_reg = GetSmiConstant(constant);
2141     orp(dst, constant_reg);
2142   } else {
2143     LoadSmiConstant(dst, constant);
2144     orp(dst, src);
2145   }
2146 }
2147 
2148 
SmiXor(Register dst,Register src1,Register src2)2149 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2150   if (!dst.is(src1)) {
2151     DCHECK(!src1.is(src2));
2152     movp(dst, src1);
2153   }
2154   xorp(dst, src2);
2155 }
2156 
2157 
SmiXorConstant(Register dst,Register src,Smi * constant)2158 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2159   if (dst.is(src)) {
2160     DCHECK(!dst.is(kScratchRegister));
2161     Register constant_reg = GetSmiConstant(constant);
2162     xorp(dst, constant_reg);
2163   } else {
2164     LoadSmiConstant(dst, constant);
2165     xorp(dst, src);
2166   }
2167 }
2168 
2169 
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2170 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2171                                                      Register src,
2172                                                      int shift_value) {
2173   DCHECK(is_uint5(shift_value));
2174   if (shift_value > 0) {
2175     if (dst.is(src)) {
2176       sarp(dst, Immediate(shift_value + kSmiShift));
2177       shlp(dst, Immediate(kSmiShift));
2178     } else {
2179       UNIMPLEMENTED();  // Not used.
2180     }
2181   }
2182 }
2183 
2184 
SmiShiftLeftConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2185 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2186                                           Register src,
2187                                           int shift_value,
2188                                           Label* on_not_smi_result,
2189                                           Label::Distance near_jump) {
2190   if (SmiValuesAre32Bits()) {
2191     if (!dst.is(src)) {
2192       movp(dst, src);
2193     }
2194     if (shift_value > 0) {
2195       // Shift amount specified by lower 5 bits, not six as the shl opcode.
2196       shlq(dst, Immediate(shift_value & 0x1f));
2197     }
2198   } else {
2199     DCHECK(SmiValuesAre31Bits());
2200     if (dst.is(src)) {
2201       UNIMPLEMENTED();  // Not used.
2202     } else {
2203       SmiToInteger32(dst, src);
2204       shll(dst, Immediate(shift_value));
2205       JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2206       Integer32ToSmi(dst, dst);
2207     }
2208   }
2209 }
2210 
2211 
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2212 void MacroAssembler::SmiShiftLogicalRightConstant(
2213     Register dst, Register src, int shift_value,
2214     Label* on_not_smi_result, Label::Distance near_jump) {
2215   // Logic right shift interprets its result as an *unsigned* number.
2216   if (dst.is(src)) {
2217     UNIMPLEMENTED();  // Not used.
2218   } else {
2219     if (shift_value == 0) {
2220       testp(src, src);
2221       j(negative, on_not_smi_result, near_jump);
2222     }
2223     if (SmiValuesAre32Bits()) {
2224       movp(dst, src);
2225       shrp(dst, Immediate(shift_value + kSmiShift));
2226       shlp(dst, Immediate(kSmiShift));
2227     } else {
2228       DCHECK(SmiValuesAre31Bits());
2229       SmiToInteger32(dst, src);
2230       shrp(dst, Immediate(shift_value));
2231       JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2232       Integer32ToSmi(dst, dst);
2233     }
2234   }
2235 }
2236 
2237 
SmiShiftLeft(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2238 void MacroAssembler::SmiShiftLeft(Register dst,
2239                                   Register src1,
2240                                   Register src2,
2241                                   Label* on_not_smi_result,
2242                                   Label::Distance near_jump) {
2243   if (SmiValuesAre32Bits()) {
2244     DCHECK(!dst.is(rcx));
2245     if (!dst.is(src1)) {
2246       movp(dst, src1);
2247     }
2248     // Untag shift amount.
2249     SmiToInteger32(rcx, src2);
2250     // Shift amount specified by lower 5 bits, not six as the shl opcode.
2251     andp(rcx, Immediate(0x1f));
2252     shlq_cl(dst);
2253   } else {
2254     DCHECK(SmiValuesAre31Bits());
2255     DCHECK(!dst.is(kScratchRegister));
2256     DCHECK(!src1.is(kScratchRegister));
2257     DCHECK(!src2.is(kScratchRegister));
2258     DCHECK(!dst.is(src2));
2259     DCHECK(!dst.is(rcx));
2260 
2261     if (src1.is(rcx) || src2.is(rcx)) {
2262       movq(kScratchRegister, rcx);
2263     }
2264     if (dst.is(src1)) {
2265       UNIMPLEMENTED();  // Not used.
2266     } else {
2267       Label valid_result;
2268       SmiToInteger32(dst, src1);
2269       SmiToInteger32(rcx, src2);
2270       shll_cl(dst);
2271       JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2272       // As src1 or src2 could not be dst, we do not need to restore them for
2273       // clobbering dst.
2274       if (src1.is(rcx) || src2.is(rcx)) {
2275         if (src1.is(rcx)) {
2276           movq(src1, kScratchRegister);
2277         } else {
2278           movq(src2, kScratchRegister);
2279         }
2280       }
2281       jmp(on_not_smi_result, near_jump);
2282       bind(&valid_result);
2283       Integer32ToSmi(dst, dst);
2284     }
2285   }
2286 }
2287 
2288 
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2289 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2290                                           Register src1,
2291                                           Register src2,
2292                                           Label* on_not_smi_result,
2293                                           Label::Distance near_jump) {
2294   DCHECK(!dst.is(kScratchRegister));
2295   DCHECK(!src1.is(kScratchRegister));
2296   DCHECK(!src2.is(kScratchRegister));
2297   DCHECK(!dst.is(src2));
2298   DCHECK(!dst.is(rcx));
2299   if (src1.is(rcx) || src2.is(rcx)) {
2300     movq(kScratchRegister, rcx);
2301   }
2302   if (dst.is(src1)) {
2303     UNIMPLEMENTED();  // Not used.
2304   } else {
2305     Label valid_result;
2306     SmiToInteger32(dst, src1);
2307     SmiToInteger32(rcx, src2);
2308     shrl_cl(dst);
2309     JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2310     // As src1 or src2 could not be dst, we do not need to restore them for
2311     // clobbering dst.
2312     if (src1.is(rcx) || src2.is(rcx)) {
2313       if (src1.is(rcx)) {
2314         movq(src1, kScratchRegister);
2315       } else {
2316         movq(src2, kScratchRegister);
2317       }
2318      }
2319     jmp(on_not_smi_result, near_jump);
2320     bind(&valid_result);
2321     Integer32ToSmi(dst, dst);
2322   }
2323 }
2324 
2325 
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2326 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2327                                              Register src1,
2328                                              Register src2) {
2329   DCHECK(!dst.is(kScratchRegister));
2330   DCHECK(!src1.is(kScratchRegister));
2331   DCHECK(!src2.is(kScratchRegister));
2332   DCHECK(!dst.is(rcx));
2333 
2334   SmiToInteger32(rcx, src2);
2335   if (!dst.is(src1)) {
2336     movp(dst, src1);
2337   }
2338   SmiToInteger32(dst, dst);
2339   sarl_cl(dst);
2340   Integer32ToSmi(dst, dst);
2341 }
2342 
2343 
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2344 void MacroAssembler::SelectNonSmi(Register dst,
2345                                   Register src1,
2346                                   Register src2,
2347                                   Label* on_not_smis,
2348                                   Label::Distance near_jump) {
2349   DCHECK(!dst.is(kScratchRegister));
2350   DCHECK(!src1.is(kScratchRegister));
2351   DCHECK(!src2.is(kScratchRegister));
2352   DCHECK(!dst.is(src1));
2353   DCHECK(!dst.is(src2));
2354   // Both operands must not be smis.
2355 #ifdef DEBUG
2356   Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2357   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2358 #endif
2359   STATIC_ASSERT(kSmiTag == 0);
2360   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
2361   movl(kScratchRegister, Immediate(kSmiTagMask));
2362   andp(kScratchRegister, src1);
2363   testl(kScratchRegister, src2);
2364   // If non-zero then both are smis.
2365   j(not_zero, on_not_smis, near_jump);
2366 
2367   // Exactly one operand is a smi.
2368   DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2369   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2370   subp(kScratchRegister, Immediate(1));
2371   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2372   movp(dst, src1);
2373   xorp(dst, src2);
2374   andp(dst, kScratchRegister);
2375   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2376   xorp(dst, src1);
2377   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2378 }
2379 
2380 
SmiToIndex(Register dst,Register src,int shift)2381 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2382                                     Register src,
2383                                     int shift) {
2384   if (SmiValuesAre32Bits()) {
2385     DCHECK(is_uint6(shift));
2386     // There is a possible optimization if shift is in the range 60-63, but that
2387     // will (and must) never happen.
2388     if (!dst.is(src)) {
2389       movp(dst, src);
2390     }
2391     if (shift < kSmiShift) {
2392       sarp(dst, Immediate(kSmiShift - shift));
2393     } else {
2394       shlp(dst, Immediate(shift - kSmiShift));
2395     }
2396     return SmiIndex(dst, times_1);
2397   } else {
2398     DCHECK(SmiValuesAre31Bits());
2399     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2400     if (!dst.is(src)) {
2401       movp(dst, src);
2402     }
2403     // We have to sign extend the index register to 64-bit as the SMI might
2404     // be negative.
2405     movsxlq(dst, dst);
2406     if (shift == times_1) {
2407       sarq(dst, Immediate(kSmiShift));
2408       return SmiIndex(dst, times_1);
2409     }
2410     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2411   }
2412 }
2413 
2414 
SmiToNegativeIndex(Register dst,Register src,int shift)2415 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2416                                             Register src,
2417                                             int shift) {
2418   if (SmiValuesAre32Bits()) {
2419     // Register src holds a positive smi.
2420     DCHECK(is_uint6(shift));
2421     if (!dst.is(src)) {
2422       movp(dst, src);
2423     }
2424     negp(dst);
2425     if (shift < kSmiShift) {
2426       sarp(dst, Immediate(kSmiShift - shift));
2427     } else {
2428       shlp(dst, Immediate(shift - kSmiShift));
2429     }
2430     return SmiIndex(dst, times_1);
2431   } else {
2432     DCHECK(SmiValuesAre31Bits());
2433     DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2434     if (!dst.is(src)) {
2435       movp(dst, src);
2436     }
2437     negq(dst);
2438     if (shift == times_1) {
2439       sarq(dst, Immediate(kSmiShift));
2440       return SmiIndex(dst, times_1);
2441     }
2442     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2443   }
2444 }
2445 
2446 
AddSmiField(Register dst,const Operand & src)2447 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2448   if (SmiValuesAre32Bits()) {
2449     DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2450     addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2451   } else {
2452     DCHECK(SmiValuesAre31Bits());
2453     SmiToInteger32(kScratchRegister, src);
2454     addl(dst, kScratchRegister);
2455   }
2456 }
2457 
2458 
Push(Smi * source)2459 void MacroAssembler::Push(Smi* source) {
2460   intptr_t smi = reinterpret_cast<intptr_t>(source);
2461   if (is_int32(smi)) {
2462     Push(Immediate(static_cast<int32_t>(smi)));
2463   } else {
2464     Register constant = GetSmiConstant(source);
2465     Push(constant);
2466   }
2467 }
2468 
2469 
PushRegisterAsTwoSmis(Register src,Register scratch)2470 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2471   DCHECK(!src.is(scratch));
2472   movp(scratch, src);
2473   // High bits.
2474   shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2475   shlp(src, Immediate(kSmiShift));
2476   Push(src);
2477   // Low bits.
2478   shlp(scratch, Immediate(kSmiShift));
2479   Push(scratch);
2480 }
2481 
2482 
PopRegisterAsTwoSmis(Register dst,Register scratch)2483 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2484   DCHECK(!dst.is(scratch));
2485   Pop(scratch);
2486   // Low bits.
2487   shrp(scratch, Immediate(kSmiShift));
2488   Pop(dst);
2489   shrp(dst, Immediate(kSmiShift));
2490   // High bits.
2491   shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2492   orp(dst, scratch);
2493 }
2494 
2495 
Test(const Operand & src,Smi * source)2496 void MacroAssembler::Test(const Operand& src, Smi* source) {
2497   if (SmiValuesAre32Bits()) {
2498     testl(Operand(src, kIntSize), Immediate(source->value()));
2499   } else {
2500     DCHECK(SmiValuesAre31Bits());
2501     testl(src, Immediate(source));
2502   }
2503 }
2504 
2505 
2506 // ----------------------------------------------------------------------------
2507 
2508 
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2509 void MacroAssembler::JumpIfNotString(Register object,
2510                                      Register object_map,
2511                                      Label* not_string,
2512                                      Label::Distance near_jump) {
2513   Condition is_smi = CheckSmi(object);
2514   j(is_smi, not_string, near_jump);
2515   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2516   j(above_equal, not_string, near_jump);
2517 }
2518 
2519 
JumpIfNotBothSequentialOneByteStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2520 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2521     Register first_object, Register second_object, Register scratch1,
2522     Register scratch2, Label* on_fail, Label::Distance near_jump) {
2523   // Check that both objects are not smis.
2524   Condition either_smi = CheckEitherSmi(first_object, second_object);
2525   j(either_smi, on_fail, near_jump);
2526 
2527   // Load instance type for both strings.
2528   movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2529   movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2530   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2531   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2532 
2533   // Check that both are flat one-byte strings.
2534   DCHECK(kNotStringTag != 0);
2535   const int kFlatOneByteStringMask =
2536       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2537   const int kFlatOneByteStringTag =
2538       kStringTag | kOneByteStringTag | kSeqStringTag;
2539 
2540   andl(scratch1, Immediate(kFlatOneByteStringMask));
2541   andl(scratch2, Immediate(kFlatOneByteStringMask));
2542   // Interleave the bits to check both scratch1 and scratch2 in one test.
2543   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2544   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2545   cmpl(scratch1,
2546        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2547   j(not_equal, on_fail, near_jump);
2548 }
2549 
2550 
JumpIfInstanceTypeIsNotSequentialOneByte(Register instance_type,Register scratch,Label * failure,Label::Distance near_jump)2551 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2552     Register instance_type, Register scratch, Label* failure,
2553     Label::Distance near_jump) {
2554   if (!scratch.is(instance_type)) {
2555     movl(scratch, instance_type);
2556   }
2557 
2558   const int kFlatOneByteStringMask =
2559       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2560 
2561   andl(scratch, Immediate(kFlatOneByteStringMask));
2562   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2563   j(not_equal, failure, near_jump);
2564 }
2565 
2566 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2567 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2568     Register first_object_instance_type, Register second_object_instance_type,
2569     Register scratch1, Register scratch2, Label* on_fail,
2570     Label::Distance near_jump) {
2571   // Load instance type for both strings.
2572   movp(scratch1, first_object_instance_type);
2573   movp(scratch2, second_object_instance_type);
2574 
2575   // Check that both are flat one-byte strings.
2576   DCHECK(kNotStringTag != 0);
2577   const int kFlatOneByteStringMask =
2578       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2579   const int kFlatOneByteStringTag =
2580       kStringTag | kOneByteStringTag | kSeqStringTag;
2581 
2582   andl(scratch1, Immediate(kFlatOneByteStringMask));
2583   andl(scratch2, Immediate(kFlatOneByteStringMask));
2584   // Interleave the bits to check both scratch1 and scratch2 in one test.
2585   DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2586   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2587   cmpl(scratch1,
2588        Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2589   j(not_equal, on_fail, near_jump);
2590 }
2591 
2592 
2593 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2594 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2595                                       T operand_or_register,
2596                                       Label* not_unique_name,
2597                                       Label::Distance distance) {
2598   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2599   Label succeed;
2600   masm->testb(operand_or_register,
2601               Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2602   masm->j(zero, &succeed, Label::kNear);
2603   masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2604   masm->j(not_equal, not_unique_name, distance);
2605 
2606   masm->bind(&succeed);
2607 }
2608 
2609 
JumpIfNotUniqueNameInstanceType(Operand operand,Label * not_unique_name,Label::Distance distance)2610 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2611                                                      Label* not_unique_name,
2612                                                      Label::Distance distance) {
2613   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2614 }
2615 
2616 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name,Label::Distance distance)2617 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2618                                                      Label* not_unique_name,
2619                                                      Label::Distance distance) {
2620   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2621 }
2622 
2623 
Move(Register dst,Register src)2624 void MacroAssembler::Move(Register dst, Register src) {
2625   if (!dst.is(src)) {
2626     movp(dst, src);
2627   }
2628 }
2629 
2630 
Move(Register dst,Handle<Object> source)2631 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2632   AllowDeferredHandleDereference smi_check;
2633   if (source->IsSmi()) {
2634     Move(dst, Smi::cast(*source));
2635   } else {
2636     MoveHeapObject(dst, source);
2637   }
2638 }
2639 
2640 
Move(const Operand & dst,Handle<Object> source)2641 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2642   AllowDeferredHandleDereference smi_check;
2643   if (source->IsSmi()) {
2644     Move(dst, Smi::cast(*source));
2645   } else {
2646     MoveHeapObject(kScratchRegister, source);
2647     movp(dst, kScratchRegister);
2648   }
2649 }
2650 
2651 
Move(XMMRegister dst,uint32_t src)2652 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2653   if (src == 0) {
2654     Xorpd(dst, dst);
2655   } else {
2656     unsigned pop = base::bits::CountPopulation32(src);
2657     DCHECK_NE(0u, pop);
2658     if (pop == 32) {
2659       Pcmpeqd(dst, dst);
2660     } else {
2661       movl(kScratchRegister, Immediate(src));
2662       Movq(dst, kScratchRegister);
2663     }
2664   }
2665 }
2666 
2667 
Move(XMMRegister dst,uint64_t src)2668 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2669   if (src == 0) {
2670     Xorpd(dst, dst);
2671   } else {
2672     unsigned nlz = base::bits::CountLeadingZeros64(src);
2673     unsigned ntz = base::bits::CountTrailingZeros64(src);
2674     unsigned pop = base::bits::CountPopulation64(src);
2675     DCHECK_NE(0u, pop);
2676     if (pop == 64) {
2677       Pcmpeqd(dst, dst);
2678     } else if (pop + ntz == 64) {
2679       Pcmpeqd(dst, dst);
2680       Psllq(dst, ntz);
2681     } else if (pop + nlz == 64) {
2682       Pcmpeqd(dst, dst);
2683       Psrlq(dst, nlz);
2684     } else {
2685       uint32_t lower = static_cast<uint32_t>(src);
2686       uint32_t upper = static_cast<uint32_t>(src >> 32);
2687       if (upper == 0) {
2688         Move(dst, lower);
2689       } else {
2690         movq(kScratchRegister, src);
2691         Movq(dst, kScratchRegister);
2692       }
2693     }
2694   }
2695 }
2696 
2697 
Movaps(XMMRegister dst,XMMRegister src)2698 void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
2699   if (CpuFeatures::IsSupported(AVX)) {
2700     CpuFeatureScope scope(this, AVX);
2701     vmovaps(dst, src);
2702   } else {
2703     movaps(dst, src);
2704   }
2705 }
2706 
Movups(XMMRegister dst,XMMRegister src)2707 void MacroAssembler::Movups(XMMRegister dst, XMMRegister src) {
2708   if (CpuFeatures::IsSupported(AVX)) {
2709     CpuFeatureScope scope(this, AVX);
2710     vmovups(dst, src);
2711   } else {
2712     movups(dst, src);
2713   }
2714 }
2715 
Movups(XMMRegister dst,const Operand & src)2716 void MacroAssembler::Movups(XMMRegister dst, const Operand& src) {
2717   if (CpuFeatures::IsSupported(AVX)) {
2718     CpuFeatureScope scope(this, AVX);
2719     vmovups(dst, src);
2720   } else {
2721     movups(dst, src);
2722   }
2723 }
2724 
Movups(const Operand & dst,XMMRegister src)2725 void MacroAssembler::Movups(const Operand& dst, XMMRegister src) {
2726   if (CpuFeatures::IsSupported(AVX)) {
2727     CpuFeatureScope scope(this, AVX);
2728     vmovups(dst, src);
2729   } else {
2730     movups(dst, src);
2731   }
2732 }
2733 
Movapd(XMMRegister dst,XMMRegister src)2734 void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
2735   if (CpuFeatures::IsSupported(AVX)) {
2736     CpuFeatureScope scope(this, AVX);
2737     vmovapd(dst, src);
2738   } else {
2739     movapd(dst, src);
2740   }
2741 }
2742 
Movupd(XMMRegister dst,const Operand & src)2743 void MacroAssembler::Movupd(XMMRegister dst, const Operand& src) {
2744   if (CpuFeatures::IsSupported(AVX)) {
2745     CpuFeatureScope scope(this, AVX);
2746     vmovupd(dst, src);
2747   } else {
2748     movupd(dst, src);
2749   }
2750 }
2751 
Movupd(const Operand & dst,XMMRegister src)2752 void MacroAssembler::Movupd(const Operand& dst, XMMRegister src) {
2753   if (CpuFeatures::IsSupported(AVX)) {
2754     CpuFeatureScope scope(this, AVX);
2755     vmovupd(dst, src);
2756   } else {
2757     movupd(dst, src);
2758   }
2759 }
2760 
Movsd(XMMRegister dst,XMMRegister src)2761 void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
2762   if (CpuFeatures::IsSupported(AVX)) {
2763     CpuFeatureScope scope(this, AVX);
2764     vmovsd(dst, dst, src);
2765   } else {
2766     movsd(dst, src);
2767   }
2768 }
2769 
2770 
Movsd(XMMRegister dst,const Operand & src)2771 void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
2772   if (CpuFeatures::IsSupported(AVX)) {
2773     CpuFeatureScope scope(this, AVX);
2774     vmovsd(dst, src);
2775   } else {
2776     movsd(dst, src);
2777   }
2778 }
2779 
2780 
Movsd(const Operand & dst,XMMRegister src)2781 void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
2782   if (CpuFeatures::IsSupported(AVX)) {
2783     CpuFeatureScope scope(this, AVX);
2784     vmovsd(dst, src);
2785   } else {
2786     movsd(dst, src);
2787   }
2788 }
2789 
2790 
Movss(XMMRegister dst,XMMRegister src)2791 void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
2792   if (CpuFeatures::IsSupported(AVX)) {
2793     CpuFeatureScope scope(this, AVX);
2794     vmovss(dst, dst, src);
2795   } else {
2796     movss(dst, src);
2797   }
2798 }
2799 
2800 
Movss(XMMRegister dst,const Operand & src)2801 void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
2802   if (CpuFeatures::IsSupported(AVX)) {
2803     CpuFeatureScope scope(this, AVX);
2804     vmovss(dst, src);
2805   } else {
2806     movss(dst, src);
2807   }
2808 }
2809 
2810 
Movss(const Operand & dst,XMMRegister src)2811 void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
2812   if (CpuFeatures::IsSupported(AVX)) {
2813     CpuFeatureScope scope(this, AVX);
2814     vmovss(dst, src);
2815   } else {
2816     movss(dst, src);
2817   }
2818 }
2819 
2820 
Movd(XMMRegister dst,Register src)2821 void MacroAssembler::Movd(XMMRegister dst, Register src) {
2822   if (CpuFeatures::IsSupported(AVX)) {
2823     CpuFeatureScope scope(this, AVX);
2824     vmovd(dst, src);
2825   } else {
2826     movd(dst, src);
2827   }
2828 }
2829 
2830 
Movd(XMMRegister dst,const Operand & src)2831 void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
2832   if (CpuFeatures::IsSupported(AVX)) {
2833     CpuFeatureScope scope(this, AVX);
2834     vmovd(dst, src);
2835   } else {
2836     movd(dst, src);
2837   }
2838 }
2839 
2840 
Movd(Register dst,XMMRegister src)2841 void MacroAssembler::Movd(Register dst, XMMRegister src) {
2842   if (CpuFeatures::IsSupported(AVX)) {
2843     CpuFeatureScope scope(this, AVX);
2844     vmovd(dst, src);
2845   } else {
2846     movd(dst, src);
2847   }
2848 }
2849 
2850 
Movq(XMMRegister dst,Register src)2851 void MacroAssembler::Movq(XMMRegister dst, Register src) {
2852   if (CpuFeatures::IsSupported(AVX)) {
2853     CpuFeatureScope scope(this, AVX);
2854     vmovq(dst, src);
2855   } else {
2856     movq(dst, src);
2857   }
2858 }
2859 
2860 
Movq(Register dst,XMMRegister src)2861 void MacroAssembler::Movq(Register dst, XMMRegister src) {
2862   if (CpuFeatures::IsSupported(AVX)) {
2863     CpuFeatureScope scope(this, AVX);
2864     vmovq(dst, src);
2865   } else {
2866     movq(dst, src);
2867   }
2868 }
2869 
Movmskps(Register dst,XMMRegister src)2870 void MacroAssembler::Movmskps(Register dst, XMMRegister src) {
2871   if (CpuFeatures::IsSupported(AVX)) {
2872     CpuFeatureScope scope(this, AVX);
2873     vmovmskps(dst, src);
2874   } else {
2875     movmskps(dst, src);
2876   }
2877 }
2878 
Movmskpd(Register dst,XMMRegister src)2879 void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
2880   if (CpuFeatures::IsSupported(AVX)) {
2881     CpuFeatureScope scope(this, AVX);
2882     vmovmskpd(dst, src);
2883   } else {
2884     movmskpd(dst, src);
2885   }
2886 }
2887 
Xorps(XMMRegister dst,XMMRegister src)2888 void MacroAssembler::Xorps(XMMRegister dst, XMMRegister src) {
2889   if (CpuFeatures::IsSupported(AVX)) {
2890     CpuFeatureScope scope(this, AVX);
2891     vxorps(dst, dst, src);
2892   } else {
2893     xorps(dst, src);
2894   }
2895 }
2896 
Xorps(XMMRegister dst,const Operand & src)2897 void MacroAssembler::Xorps(XMMRegister dst, const Operand& src) {
2898   if (CpuFeatures::IsSupported(AVX)) {
2899     CpuFeatureScope scope(this, AVX);
2900     vxorps(dst, dst, src);
2901   } else {
2902     xorps(dst, src);
2903   }
2904 }
2905 
Roundss(XMMRegister dst,XMMRegister src,RoundingMode mode)2906 void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
2907                              RoundingMode mode) {
2908   if (CpuFeatures::IsSupported(AVX)) {
2909     CpuFeatureScope scope(this, AVX);
2910     vroundss(dst, dst, src, mode);
2911   } else {
2912     roundss(dst, src, mode);
2913   }
2914 }
2915 
2916 
Roundsd(XMMRegister dst,XMMRegister src,RoundingMode mode)2917 void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
2918                              RoundingMode mode) {
2919   if (CpuFeatures::IsSupported(AVX)) {
2920     CpuFeatureScope scope(this, AVX);
2921     vroundsd(dst, dst, src, mode);
2922   } else {
2923     roundsd(dst, src, mode);
2924   }
2925 }
2926 
2927 
Sqrtsd(XMMRegister dst,XMMRegister src)2928 void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
2929   if (CpuFeatures::IsSupported(AVX)) {
2930     CpuFeatureScope scope(this, AVX);
2931     vsqrtsd(dst, dst, src);
2932   } else {
2933     sqrtsd(dst, src);
2934   }
2935 }
2936 
2937 
Sqrtsd(XMMRegister dst,const Operand & src)2938 void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
2939   if (CpuFeatures::IsSupported(AVX)) {
2940     CpuFeatureScope scope(this, AVX);
2941     vsqrtsd(dst, dst, src);
2942   } else {
2943     sqrtsd(dst, src);
2944   }
2945 }
2946 
2947 
Ucomiss(XMMRegister src1,XMMRegister src2)2948 void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
2949   if (CpuFeatures::IsSupported(AVX)) {
2950     CpuFeatureScope scope(this, AVX);
2951     vucomiss(src1, src2);
2952   } else {
2953     ucomiss(src1, src2);
2954   }
2955 }
2956 
2957 
Ucomiss(XMMRegister src1,const Operand & src2)2958 void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
2959   if (CpuFeatures::IsSupported(AVX)) {
2960     CpuFeatureScope scope(this, AVX);
2961     vucomiss(src1, src2);
2962   } else {
2963     ucomiss(src1, src2);
2964   }
2965 }
2966 
2967 
Ucomisd(XMMRegister src1,XMMRegister src2)2968 void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
2969   if (CpuFeatures::IsSupported(AVX)) {
2970     CpuFeatureScope scope(this, AVX);
2971     vucomisd(src1, src2);
2972   } else {
2973     ucomisd(src1, src2);
2974   }
2975 }
2976 
2977 
Ucomisd(XMMRegister src1,const Operand & src2)2978 void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
2979   if (CpuFeatures::IsSupported(AVX)) {
2980     CpuFeatureScope scope(this, AVX);
2981     vucomisd(src1, src2);
2982   } else {
2983     ucomisd(src1, src2);
2984   }
2985 }
2986 
2987 // ----------------------------------------------------------------------------
2988 
Absps(XMMRegister dst)2989 void MacroAssembler::Absps(XMMRegister dst) {
2990   Andps(dst,
2991         ExternalOperand(ExternalReference::address_of_float_abs_constant()));
2992 }
2993 
Negps(XMMRegister dst)2994 void MacroAssembler::Negps(XMMRegister dst) {
2995   Xorps(dst,
2996         ExternalOperand(ExternalReference::address_of_float_neg_constant()));
2997 }
2998 
Abspd(XMMRegister dst)2999 void MacroAssembler::Abspd(XMMRegister dst) {
3000   Andps(dst,
3001         ExternalOperand(ExternalReference::address_of_double_abs_constant()));
3002 }
3003 
Negpd(XMMRegister dst)3004 void MacroAssembler::Negpd(XMMRegister dst) {
3005   Xorps(dst,
3006         ExternalOperand(ExternalReference::address_of_double_neg_constant()));
3007 }
3008 
Cmp(Register dst,Handle<Object> source)3009 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
3010   AllowDeferredHandleDereference smi_check;
3011   if (source->IsSmi()) {
3012     Cmp(dst, Smi::cast(*source));
3013   } else {
3014     MoveHeapObject(kScratchRegister, source);
3015     cmpp(dst, kScratchRegister);
3016   }
3017 }
3018 
3019 
Cmp(const Operand & dst,Handle<Object> source)3020 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
3021   AllowDeferredHandleDereference smi_check;
3022   if (source->IsSmi()) {
3023     Cmp(dst, Smi::cast(*source));
3024   } else {
3025     MoveHeapObject(kScratchRegister, source);
3026     cmpp(dst, kScratchRegister);
3027   }
3028 }
3029 
3030 
Push(Handle<Object> source)3031 void MacroAssembler::Push(Handle<Object> source) {
3032   AllowDeferredHandleDereference smi_check;
3033   if (source->IsSmi()) {
3034     Push(Smi::cast(*source));
3035   } else {
3036     MoveHeapObject(kScratchRegister, source);
3037     Push(kScratchRegister);
3038   }
3039 }
3040 
3041 
MoveHeapObject(Register result,Handle<Object> object)3042 void MacroAssembler::MoveHeapObject(Register result,
3043                                     Handle<Object> object) {
3044   DCHECK(object->IsHeapObject());
3045   Move(result, object, RelocInfo::EMBEDDED_OBJECT);
3046 }
3047 
3048 
LoadGlobalCell(Register dst,Handle<Cell> cell)3049 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
3050   if (dst.is(rax)) {
3051     AllowDeferredHandleDereference embedding_raw_address;
3052     load_rax(cell.location(), RelocInfo::CELL);
3053   } else {
3054     Move(dst, cell, RelocInfo::CELL);
3055     movp(dst, Operand(dst, 0));
3056   }
3057 }
3058 
3059 
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)3060 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3061                                   Register scratch) {
3062   Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
3063   cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
3064 }
3065 
3066 
GetWeakValue(Register value,Handle<WeakCell> cell)3067 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3068   Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
3069   movp(value, FieldOperand(value, WeakCell::kValueOffset));
3070 }
3071 
3072 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)3073 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3074                                    Label* miss) {
3075   GetWeakValue(value, cell);
3076   JumpIfSmi(value, miss);
3077 }
3078 
3079 
Drop(int stack_elements)3080 void MacroAssembler::Drop(int stack_elements) {
3081   if (stack_elements > 0) {
3082     addp(rsp, Immediate(stack_elements * kPointerSize));
3083   }
3084 }
3085 
3086 
DropUnderReturnAddress(int stack_elements,Register scratch)3087 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
3088                                             Register scratch) {
3089   DCHECK(stack_elements > 0);
3090   if (kPointerSize == kInt64Size && stack_elements == 1) {
3091     popq(MemOperand(rsp, 0));
3092     return;
3093   }
3094 
3095   PopReturnAddressTo(scratch);
3096   Drop(stack_elements);
3097   PushReturnAddressFrom(scratch);
3098 }
3099 
3100 
Push(Register src)3101 void MacroAssembler::Push(Register src) {
3102   if (kPointerSize == kInt64Size) {
3103     pushq(src);
3104   } else {
3105     // x32 uses 64-bit push for rbp in the prologue.
3106     DCHECK(src.code() != rbp.code());
3107     leal(rsp, Operand(rsp, -4));
3108     movp(Operand(rsp, 0), src);
3109   }
3110 }
3111 
3112 
Push(const Operand & src)3113 void MacroAssembler::Push(const Operand& src) {
3114   if (kPointerSize == kInt64Size) {
3115     pushq(src);
3116   } else {
3117     movp(kScratchRegister, src);
3118     leal(rsp, Operand(rsp, -4));
3119     movp(Operand(rsp, 0), kScratchRegister);
3120   }
3121 }
3122 
3123 
PushQuad(const Operand & src)3124 void MacroAssembler::PushQuad(const Operand& src) {
3125   if (kPointerSize == kInt64Size) {
3126     pushq(src);
3127   } else {
3128     movp(kScratchRegister, src);
3129     pushq(kScratchRegister);
3130   }
3131 }
3132 
3133 
Push(Immediate value)3134 void MacroAssembler::Push(Immediate value) {
3135   if (kPointerSize == kInt64Size) {
3136     pushq(value);
3137   } else {
3138     leal(rsp, Operand(rsp, -4));
3139     movp(Operand(rsp, 0), value);
3140   }
3141 }
3142 
3143 
PushImm32(int32_t imm32)3144 void MacroAssembler::PushImm32(int32_t imm32) {
3145   if (kPointerSize == kInt64Size) {
3146     pushq_imm32(imm32);
3147   } else {
3148     leal(rsp, Operand(rsp, -4));
3149     movp(Operand(rsp, 0), Immediate(imm32));
3150   }
3151 }
3152 
3153 
Pop(Register dst)3154 void MacroAssembler::Pop(Register dst) {
3155   if (kPointerSize == kInt64Size) {
3156     popq(dst);
3157   } else {
3158     // x32 uses 64-bit pop for rbp in the epilogue.
3159     DCHECK(dst.code() != rbp.code());
3160     movp(dst, Operand(rsp, 0));
3161     leal(rsp, Operand(rsp, 4));
3162   }
3163 }
3164 
3165 
Pop(const Operand & dst)3166 void MacroAssembler::Pop(const Operand& dst) {
3167   if (kPointerSize == kInt64Size) {
3168     popq(dst);
3169   } else {
3170     Register scratch = dst.AddressUsesRegister(kScratchRegister)
3171         ? kRootRegister : kScratchRegister;
3172     movp(scratch, Operand(rsp, 0));
3173     movp(dst, scratch);
3174     leal(rsp, Operand(rsp, 4));
3175     if (scratch.is(kRootRegister)) {
3176       // Restore kRootRegister.
3177       InitializeRootRegister();
3178     }
3179   }
3180 }
3181 
3182 
PopQuad(const Operand & dst)3183 void MacroAssembler::PopQuad(const Operand& dst) {
3184   if (kPointerSize == kInt64Size) {
3185     popq(dst);
3186   } else {
3187     popq(kScratchRegister);
3188     movp(dst, kScratchRegister);
3189   }
3190 }
3191 
3192 
LoadSharedFunctionInfoSpecialField(Register dst,Register base,int offset)3193 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3194                                                         Register base,
3195                                                         int offset) {
3196   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3197          offset <= SharedFunctionInfo::kSize &&
3198          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3199   if (kPointerSize == kInt64Size) {
3200     movsxlq(dst, FieldOperand(base, offset));
3201   } else {
3202     movp(dst, FieldOperand(base, offset));
3203     SmiToInteger32(dst, dst);
3204   }
3205 }
3206 
3207 
TestBitSharedFunctionInfoSpecialField(Register base,int offset,int bits)3208 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3209                                                            int offset,
3210                                                            int bits) {
3211   DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3212          offset <= SharedFunctionInfo::kSize &&
3213          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3214   if (kPointerSize == kInt32Size) {
3215     // On x32, this field is represented by SMI.
3216     bits += kSmiShift;
3217   }
3218   int byte_offset = bits / kBitsPerByte;
3219   int bit_in_byte = bits & (kBitsPerByte - 1);
3220   testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3221 }
3222 
3223 
Jump(ExternalReference ext)3224 void MacroAssembler::Jump(ExternalReference ext) {
3225   LoadAddress(kScratchRegister, ext);
3226   jmp(kScratchRegister);
3227 }
3228 
3229 
Jump(const Operand & op)3230 void MacroAssembler::Jump(const Operand& op) {
3231   if (kPointerSize == kInt64Size) {
3232     jmp(op);
3233   } else {
3234     movp(kScratchRegister, op);
3235     jmp(kScratchRegister);
3236   }
3237 }
3238 
3239 
Jump(Address destination,RelocInfo::Mode rmode)3240 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3241   Move(kScratchRegister, destination, rmode);
3242   jmp(kScratchRegister);
3243 }
3244 
3245 
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)3246 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3247   // TODO(X64): Inline this
3248   jmp(code_object, rmode);
3249 }
3250 
3251 
CallSize(ExternalReference ext)3252 int MacroAssembler::CallSize(ExternalReference ext) {
3253   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3254   return LoadAddressSize(ext) +
3255          Assembler::kCallScratchRegisterInstructionLength;
3256 }
3257 
3258 
Call(ExternalReference ext)3259 void MacroAssembler::Call(ExternalReference ext) {
3260 #ifdef DEBUG
3261   int end_position = pc_offset() + CallSize(ext);
3262 #endif
3263   LoadAddress(kScratchRegister, ext);
3264   call(kScratchRegister);
3265 #ifdef DEBUG
3266   CHECK_EQ(end_position, pc_offset());
3267 #endif
3268 }
3269 
3270 
Call(const Operand & op)3271 void MacroAssembler::Call(const Operand& op) {
3272   if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
3273     call(op);
3274   } else {
3275     movp(kScratchRegister, op);
3276     call(kScratchRegister);
3277   }
3278 }
3279 
3280 
Call(Address destination,RelocInfo::Mode rmode)3281 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3282 #ifdef DEBUG
3283   int end_position = pc_offset() + CallSize(destination);
3284 #endif
3285   Move(kScratchRegister, destination, rmode);
3286   call(kScratchRegister);
3287 #ifdef DEBUG
3288   CHECK_EQ(pc_offset(), end_position);
3289 #endif
3290 }
3291 
3292 
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)3293 void MacroAssembler::Call(Handle<Code> code_object,
3294                           RelocInfo::Mode rmode,
3295                           TypeFeedbackId ast_id) {
3296 #ifdef DEBUG
3297   int end_position = pc_offset() + CallSize(code_object);
3298 #endif
3299   DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3300       rmode == RelocInfo::CODE_AGE_SEQUENCE);
3301   call(code_object, rmode, ast_id);
3302 #ifdef DEBUG
3303   CHECK_EQ(end_position, pc_offset());
3304 #endif
3305 }
3306 
3307 
Pextrd(Register dst,XMMRegister src,int8_t imm8)3308 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
3309   if (imm8 == 0) {
3310     Movd(dst, src);
3311     return;
3312   }
3313   if (CpuFeatures::IsSupported(SSE4_1)) {
3314     CpuFeatureScope sse_scope(this, SSE4_1);
3315     pextrd(dst, src, imm8);
3316     return;
3317   }
3318   DCHECK_EQ(1, imm8);
3319   movq(dst, src);
3320   shrq(dst, Immediate(32));
3321 }
3322 
3323 
Pinsrd(XMMRegister dst,Register src,int8_t imm8)3324 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
3325   if (CpuFeatures::IsSupported(SSE4_1)) {
3326     CpuFeatureScope sse_scope(this, SSE4_1);
3327     pinsrd(dst, src, imm8);
3328     return;
3329   }
3330   Movd(kScratchDoubleReg, src);
3331   if (imm8 == 1) {
3332     punpckldq(dst, kScratchDoubleReg);
3333   } else {
3334     DCHECK_EQ(0, imm8);
3335     Movss(dst, kScratchDoubleReg);
3336   }
3337 }
3338 
3339 
Pinsrd(XMMRegister dst,const Operand & src,int8_t imm8)3340 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
3341   DCHECK(imm8 == 0 || imm8 == 1);
3342   if (CpuFeatures::IsSupported(SSE4_1)) {
3343     CpuFeatureScope sse_scope(this, SSE4_1);
3344     pinsrd(dst, src, imm8);
3345     return;
3346   }
3347   Movd(kScratchDoubleReg, src);
3348   if (imm8 == 1) {
3349     punpckldq(dst, kScratchDoubleReg);
3350   } else {
3351     DCHECK_EQ(0, imm8);
3352     Movss(dst, kScratchDoubleReg);
3353   }
3354 }
3355 
3356 
Lzcntl(Register dst,Register src)3357 void MacroAssembler::Lzcntl(Register dst, Register src) {
3358   if (CpuFeatures::IsSupported(LZCNT)) {
3359     CpuFeatureScope scope(this, LZCNT);
3360     lzcntl(dst, src);
3361     return;
3362   }
3363   Label not_zero_src;
3364   bsrl(dst, src);
3365   j(not_zero, &not_zero_src, Label::kNear);
3366   Set(dst, 63);  // 63^31 == 32
3367   bind(&not_zero_src);
3368   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
3369 }
3370 
3371 
Lzcntl(Register dst,const Operand & src)3372 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
3373   if (CpuFeatures::IsSupported(LZCNT)) {
3374     CpuFeatureScope scope(this, LZCNT);
3375     lzcntl(dst, src);
3376     return;
3377   }
3378   Label not_zero_src;
3379   bsrl(dst, src);
3380   j(not_zero, &not_zero_src, Label::kNear);
3381   Set(dst, 63);  // 63^31 == 32
3382   bind(&not_zero_src);
3383   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
3384 }
3385 
3386 
Lzcntq(Register dst,Register src)3387 void MacroAssembler::Lzcntq(Register dst, Register src) {
3388   if (CpuFeatures::IsSupported(LZCNT)) {
3389     CpuFeatureScope scope(this, LZCNT);
3390     lzcntq(dst, src);
3391     return;
3392   }
3393   Label not_zero_src;
3394   bsrq(dst, src);
3395   j(not_zero, &not_zero_src, Label::kNear);
3396   Set(dst, 127);  // 127^63 == 64
3397   bind(&not_zero_src);
3398   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
3399 }
3400 
3401 
Lzcntq(Register dst,const Operand & src)3402 void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
3403   if (CpuFeatures::IsSupported(LZCNT)) {
3404     CpuFeatureScope scope(this, LZCNT);
3405     lzcntq(dst, src);
3406     return;
3407   }
3408   Label not_zero_src;
3409   bsrq(dst, src);
3410   j(not_zero, &not_zero_src, Label::kNear);
3411   Set(dst, 127);  // 127^63 == 64
3412   bind(&not_zero_src);
3413   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
3414 }
3415 
3416 
Tzcntq(Register dst,Register src)3417 void MacroAssembler::Tzcntq(Register dst, Register src) {
3418   if (CpuFeatures::IsSupported(BMI1)) {
3419     CpuFeatureScope scope(this, BMI1);
3420     tzcntq(dst, src);
3421     return;
3422   }
3423   Label not_zero_src;
3424   bsfq(dst, src);
3425   j(not_zero, &not_zero_src, Label::kNear);
3426   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3427   Set(dst, 64);
3428   bind(&not_zero_src);
3429 }
3430 
3431 
Tzcntq(Register dst,const Operand & src)3432 void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
3433   if (CpuFeatures::IsSupported(BMI1)) {
3434     CpuFeatureScope scope(this, BMI1);
3435     tzcntq(dst, src);
3436     return;
3437   }
3438   Label not_zero_src;
3439   bsfq(dst, src);
3440   j(not_zero, &not_zero_src, Label::kNear);
3441   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3442   Set(dst, 64);
3443   bind(&not_zero_src);
3444 }
3445 
3446 
Tzcntl(Register dst,Register src)3447 void MacroAssembler::Tzcntl(Register dst, Register src) {
3448   if (CpuFeatures::IsSupported(BMI1)) {
3449     CpuFeatureScope scope(this, BMI1);
3450     tzcntl(dst, src);
3451     return;
3452   }
3453   Label not_zero_src;
3454   bsfl(dst, src);
3455   j(not_zero, &not_zero_src, Label::kNear);
3456   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
3457   bind(&not_zero_src);
3458 }
3459 
3460 
Tzcntl(Register dst,const Operand & src)3461 void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
3462   if (CpuFeatures::IsSupported(BMI1)) {
3463     CpuFeatureScope scope(this, BMI1);
3464     tzcntl(dst, src);
3465     return;
3466   }
3467   Label not_zero_src;
3468   bsfl(dst, src);
3469   j(not_zero, &not_zero_src, Label::kNear);
3470   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
3471   bind(&not_zero_src);
3472 }
3473 
3474 
Popcntl(Register dst,Register src)3475 void MacroAssembler::Popcntl(Register dst, Register src) {
3476   if (CpuFeatures::IsSupported(POPCNT)) {
3477     CpuFeatureScope scope(this, POPCNT);
3478     popcntl(dst, src);
3479     return;
3480   }
3481   UNREACHABLE();
3482 }
3483 
3484 
Popcntl(Register dst,const Operand & src)3485 void MacroAssembler::Popcntl(Register dst, const Operand& src) {
3486   if (CpuFeatures::IsSupported(POPCNT)) {
3487     CpuFeatureScope scope(this, POPCNT);
3488     popcntl(dst, src);
3489     return;
3490   }
3491   UNREACHABLE();
3492 }
3493 
3494 
Popcntq(Register dst,Register src)3495 void MacroAssembler::Popcntq(Register dst, Register src) {
3496   if (CpuFeatures::IsSupported(POPCNT)) {
3497     CpuFeatureScope scope(this, POPCNT);
3498     popcntq(dst, src);
3499     return;
3500   }
3501   UNREACHABLE();
3502 }
3503 
3504 
Popcntq(Register dst,const Operand & src)3505 void MacroAssembler::Popcntq(Register dst, const Operand& src) {
3506   if (CpuFeatures::IsSupported(POPCNT)) {
3507     CpuFeatureScope scope(this, POPCNT);
3508     popcntq(dst, src);
3509     return;
3510   }
3511   UNREACHABLE();
3512 }
3513 
3514 
Pushad()3515 void MacroAssembler::Pushad() {
3516   Push(rax);
3517   Push(rcx);
3518   Push(rdx);
3519   Push(rbx);
3520   // Not pushing rsp or rbp.
3521   Push(rsi);
3522   Push(rdi);
3523   Push(r8);
3524   Push(r9);
3525   // r10 is kScratchRegister.
3526   Push(r11);
3527   Push(r12);
3528   // r13 is kRootRegister.
3529   Push(r14);
3530   Push(r15);
3531   STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
3532   // Use lea for symmetry with Popad.
3533   int sp_delta =
3534       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3535   leap(rsp, Operand(rsp, -sp_delta));
3536 }
3537 
3538 
Popad()3539 void MacroAssembler::Popad() {
3540   // Popad must not change the flags, so use lea instead of addq.
3541   int sp_delta =
3542       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3543   leap(rsp, Operand(rsp, sp_delta));
3544   Pop(r15);
3545   Pop(r14);
3546   Pop(r12);
3547   Pop(r11);
3548   Pop(r9);
3549   Pop(r8);
3550   Pop(rdi);
3551   Pop(rsi);
3552   Pop(rbx);
3553   Pop(rdx);
3554   Pop(rcx);
3555   Pop(rax);
3556 }
3557 
3558 
Dropad()3559 void MacroAssembler::Dropad() {
3560   addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3561 }
3562 
3563 
3564 // Order general registers are pushed by Pushad:
3565 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3566 const int
3567 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3568     0,
3569     1,
3570     2,
3571     3,
3572     -1,
3573     -1,
3574     4,
3575     5,
3576     6,
3577     7,
3578     -1,
3579     8,
3580     9,
3581     -1,
3582     10,
3583     11
3584 };
3585 
3586 
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)3587 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3588                                                   const Immediate& imm) {
3589   movp(SafepointRegisterSlot(dst), imm);
3590 }
3591 
3592 
StoreToSafepointRegisterSlot(Register dst,Register src)3593 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3594   movp(SafepointRegisterSlot(dst), src);
3595 }
3596 
3597 
LoadFromSafepointRegisterSlot(Register dst,Register src)3598 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3599   movp(dst, SafepointRegisterSlot(src));
3600 }
3601 
3602 
SafepointRegisterSlot(Register reg)3603 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3604   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3605 }
3606 
3607 
PushStackHandler()3608 void MacroAssembler::PushStackHandler() {
3609   // Adjust this code if not the case.
3610   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3611   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3612 
3613   // Link the current handler as the next handler.
3614   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3615   Push(ExternalOperand(handler_address));
3616 
3617   // Set this new handler as the current one.
3618   movp(ExternalOperand(handler_address), rsp);
3619 }
3620 
3621 
PopStackHandler()3622 void MacroAssembler::PopStackHandler() {
3623   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3624   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3625   Pop(ExternalOperand(handler_address));
3626   addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3627 }
3628 
3629 
Ret()3630 void MacroAssembler::Ret() {
3631   ret(0);
3632 }
3633 
3634 
Ret(int bytes_dropped,Register scratch)3635 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3636   if (is_uint16(bytes_dropped)) {
3637     ret(bytes_dropped);
3638   } else {
3639     PopReturnAddressTo(scratch);
3640     addp(rsp, Immediate(bytes_dropped));
3641     PushReturnAddressFrom(scratch);
3642     ret(0);
3643   }
3644 }
3645 
3646 
FCmp()3647 void MacroAssembler::FCmp() {
3648   fucomip();
3649   fstp(0);
3650 }
3651 
3652 
CmpObjectType(Register heap_object,InstanceType type,Register map)3653 void MacroAssembler::CmpObjectType(Register heap_object,
3654                                    InstanceType type,
3655                                    Register map) {
3656   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3657   CmpInstanceType(map, type);
3658 }
3659 
3660 
CmpInstanceType(Register map,InstanceType type)3661 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3662   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3663        Immediate(static_cast<int8_t>(type)));
3664 }
3665 
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)3666 void MacroAssembler::CheckFastObjectElements(Register map,
3667                                              Label* fail,
3668                                              Label::Distance distance) {
3669   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3670   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3671   STATIC_ASSERT(FAST_ELEMENTS == 2);
3672   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3673   cmpb(FieldOperand(map, Map::kBitField2Offset),
3674        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3675   j(below_equal, fail, distance);
3676   cmpb(FieldOperand(map, Map::kBitField2Offset),
3677        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3678   j(above, fail, distance);
3679 }
3680 
3681 
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)3682 void MacroAssembler::CheckFastSmiElements(Register map,
3683                                           Label* fail,
3684                                           Label::Distance distance) {
3685   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3686   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3687   cmpb(FieldOperand(map, Map::kBitField2Offset),
3688        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3689   j(above, fail, distance);
3690 }
3691 
3692 
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register index,XMMRegister xmm_scratch,Label * fail,int elements_offset)3693 void MacroAssembler::StoreNumberToDoubleElements(
3694     Register maybe_number,
3695     Register elements,
3696     Register index,
3697     XMMRegister xmm_scratch,
3698     Label* fail,
3699     int elements_offset) {
3700   Label smi_value, done;
3701 
3702   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3703 
3704   CheckMap(maybe_number,
3705            isolate()->factory()->heap_number_map(),
3706            fail,
3707            DONT_DO_SMI_CHECK);
3708 
3709   // Double value, turn potential sNaN into qNaN.
3710   Move(xmm_scratch, 1.0);
3711   mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3712   jmp(&done, Label::kNear);
3713 
3714   bind(&smi_value);
3715   // Value is a smi. convert to a double and store.
3716   // Preserve original value.
3717   SmiToInteger32(kScratchRegister, maybe_number);
3718   Cvtlsi2sd(xmm_scratch, kScratchRegister);
3719   bind(&done);
3720   Movsd(FieldOperand(elements, index, times_8,
3721                      FixedDoubleArray::kHeaderSize - elements_offset),
3722         xmm_scratch);
3723 }
3724 
3725 
CompareMap(Register obj,Handle<Map> map)3726 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3727   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3728 }
3729 
3730 
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3731 void MacroAssembler::CheckMap(Register obj,
3732                               Handle<Map> map,
3733                               Label* fail,
3734                               SmiCheckType smi_check_type) {
3735   if (smi_check_type == DO_SMI_CHECK) {
3736     JumpIfSmi(obj, fail);
3737   }
3738 
3739   CompareMap(obj, map);
3740   j(not_equal, fail);
3741 }
3742 
3743 
ClampUint8(Register reg)3744 void MacroAssembler::ClampUint8(Register reg) {
3745   Label done;
3746   testl(reg, Immediate(0xFFFFFF00));
3747   j(zero, &done, Label::kNear);
3748   setcc(negative, reg);  // 1 if negative, 0 if positive.
3749   decb(reg);  // 0 if negative, 255 if positive.
3750   bind(&done);
3751 }
3752 
3753 
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3754 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3755                                         XMMRegister temp_xmm_reg,
3756                                         Register result_reg) {
3757   Label done;
3758   Label conv_failure;
3759   Xorpd(temp_xmm_reg, temp_xmm_reg);
3760   Cvtsd2si(result_reg, input_reg);
3761   testl(result_reg, Immediate(0xFFFFFF00));
3762   j(zero, &done, Label::kNear);
3763   cmpl(result_reg, Immediate(1));
3764   j(overflow, &conv_failure, Label::kNear);
3765   movl(result_reg, Immediate(0));
3766   setcc(sign, result_reg);
3767   subl(result_reg, Immediate(1));
3768   andl(result_reg, Immediate(255));
3769   jmp(&done, Label::kNear);
3770   bind(&conv_failure);
3771   Set(result_reg, 0);
3772   Ucomisd(input_reg, temp_xmm_reg);
3773   j(below, &done, Label::kNear);
3774   Set(result_reg, 255);
3775   bind(&done);
3776 }
3777 
3778 
LoadUint32(XMMRegister dst,Register src)3779 void MacroAssembler::LoadUint32(XMMRegister dst,
3780                                 Register src) {
3781   if (FLAG_debug_code) {
3782     cmpq(src, Immediate(0xffffffff));
3783     Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3784   }
3785   Cvtqsi2sd(dst, src);
3786 }
3787 
3788 
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3789 void MacroAssembler::SlowTruncateToI(Register result_reg,
3790                                      Register input_reg,
3791                                      int offset) {
3792   DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3793   call(stub.GetCode(), RelocInfo::CODE_TARGET);
3794 }
3795 
3796 
TruncateHeapNumberToI(Register result_reg,Register input_reg)3797 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3798                                            Register input_reg) {
3799   Label done;
3800   Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3801   Cvttsd2siq(result_reg, kScratchDoubleReg);
3802   cmpq(result_reg, Immediate(1));
3803   j(no_overflow, &done, Label::kNear);
3804 
3805   // Slow case.
3806   if (input_reg.is(result_reg)) {
3807     subp(rsp, Immediate(kDoubleSize));
3808     Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
3809     SlowTruncateToI(result_reg, rsp, 0);
3810     addp(rsp, Immediate(kDoubleSize));
3811   } else {
3812     SlowTruncateToI(result_reg, input_reg);
3813   }
3814 
3815   bind(&done);
3816   // Keep our invariant that the upper 32 bits are zero.
3817   movl(result_reg, result_reg);
3818 }
3819 
3820 
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3821 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3822                                        XMMRegister input_reg) {
3823   Label done;
3824   Cvttsd2siq(result_reg, input_reg);
3825   cmpq(result_reg, Immediate(1));
3826   j(no_overflow, &done, Label::kNear);
3827 
3828   subp(rsp, Immediate(kDoubleSize));
3829   Movsd(MemOperand(rsp, 0), input_reg);
3830   SlowTruncateToI(result_reg, rsp, 0);
3831   addp(rsp, Immediate(kDoubleSize));
3832 
3833   bind(&done);
3834   // Keep our invariant that the upper 32 bits are zero.
3835   movl(result_reg, result_reg);
3836 }
3837 
3838 
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * lost_precision,Label * is_nan,Label * minus_zero,Label::Distance dst)3839 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3840                                XMMRegister scratch,
3841                                MinusZeroMode minus_zero_mode,
3842                                Label* lost_precision, Label* is_nan,
3843                                Label* minus_zero, Label::Distance dst) {
3844   Cvttsd2si(result_reg, input_reg);
3845   Cvtlsi2sd(kScratchDoubleReg, result_reg);
3846   Ucomisd(kScratchDoubleReg, input_reg);
3847   j(not_equal, lost_precision, dst);
3848   j(parity_even, is_nan, dst);  // NaN.
3849   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3850     Label done;
3851     // The integer converted back is equal to the original. We
3852     // only have to test if we got -0 as an input.
3853     testl(result_reg, result_reg);
3854     j(not_zero, &done, Label::kNear);
3855     Movmskpd(result_reg, input_reg);
3856     // Bit 0 contains the sign of the double in input_reg.
3857     // If input was positive, we are ok and return 0, otherwise
3858     // jump to minus_zero.
3859     andl(result_reg, Immediate(1));
3860     j(not_zero, minus_zero, dst);
3861     bind(&done);
3862   }
3863 }
3864 
3865 
LoadInstanceDescriptors(Register map,Register descriptors)3866 void MacroAssembler::LoadInstanceDescriptors(Register map,
3867                                              Register descriptors) {
3868   movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3869 }
3870 
3871 
NumberOfOwnDescriptors(Register dst,Register map)3872 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3873   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3874   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3875 }
3876 
3877 
EnumLength(Register dst,Register map)3878 void MacroAssembler::EnumLength(Register dst, Register map) {
3879   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3880   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3881   andl(dst, Immediate(Map::EnumLengthBits::kMask));
3882   Integer32ToSmi(dst, dst);
3883 }
3884 
3885 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3886 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3887                                   int accessor_index,
3888                                   AccessorComponent accessor) {
3889   movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3890   LoadInstanceDescriptors(dst, dst);
3891   movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3892   int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3893                                            : AccessorPair::kSetterOffset;
3894   movp(dst, FieldOperand(dst, offset));
3895 }
3896 
3897 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3898 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3899                                      Register scratch2, Handle<WeakCell> cell,
3900                                      Handle<Code> success,
3901                                      SmiCheckType smi_check_type) {
3902   Label fail;
3903   if (smi_check_type == DO_SMI_CHECK) {
3904     JumpIfSmi(obj, &fail);
3905   }
3906   movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3907   CmpWeakValue(scratch1, cell, scratch2);
3908   j(equal, success, RelocInfo::CODE_TARGET);
3909   bind(&fail);
3910 }
3911 
3912 
AssertNumber(Register object)3913 void MacroAssembler::AssertNumber(Register object) {
3914   if (emit_debug_code()) {
3915     Label ok;
3916     Condition is_smi = CheckSmi(object);
3917     j(is_smi, &ok, Label::kNear);
3918     Cmp(FieldOperand(object, HeapObject::kMapOffset),
3919         isolate()->factory()->heap_number_map());
3920     Check(equal, kOperandIsNotANumber);
3921     bind(&ok);
3922   }
3923 }
3924 
AssertNotNumber(Register object)3925 void MacroAssembler::AssertNotNumber(Register object) {
3926   if (emit_debug_code()) {
3927     Condition is_smi = CheckSmi(object);
3928     Check(NegateCondition(is_smi), kOperandIsANumber);
3929     Cmp(FieldOperand(object, HeapObject::kMapOffset),
3930         isolate()->factory()->heap_number_map());
3931     Check(not_equal, kOperandIsANumber);
3932   }
3933 }
3934 
AssertNotSmi(Register object)3935 void MacroAssembler::AssertNotSmi(Register object) {
3936   if (emit_debug_code()) {
3937     Condition is_smi = CheckSmi(object);
3938     Check(NegateCondition(is_smi), kOperandIsASmi);
3939   }
3940 }
3941 
3942 
AssertSmi(Register object)3943 void MacroAssembler::AssertSmi(Register object) {
3944   if (emit_debug_code()) {
3945     Condition is_smi = CheckSmi(object);
3946     Check(is_smi, kOperandIsNotASmi);
3947   }
3948 }
3949 
3950 
AssertSmi(const Operand & object)3951 void MacroAssembler::AssertSmi(const Operand& object) {
3952   if (emit_debug_code()) {
3953     Condition is_smi = CheckSmi(object);
3954     Check(is_smi, kOperandIsNotASmi);
3955   }
3956 }
3957 
3958 
AssertZeroExtended(Register int32_register)3959 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3960   if (emit_debug_code()) {
3961     DCHECK(!int32_register.is(kScratchRegister));
3962     movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3963     cmpq(kScratchRegister, int32_register);
3964     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3965   }
3966 }
3967 
3968 
AssertString(Register object)3969 void MacroAssembler::AssertString(Register object) {
3970   if (emit_debug_code()) {
3971     testb(object, Immediate(kSmiTagMask));
3972     Check(not_equal, kOperandIsASmiAndNotAString);
3973     Push(object);
3974     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3975     CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3976     Pop(object);
3977     Check(below, kOperandIsNotAString);
3978   }
3979 }
3980 
3981 
AssertName(Register object)3982 void MacroAssembler::AssertName(Register object) {
3983   if (emit_debug_code()) {
3984     testb(object, Immediate(kSmiTagMask));
3985     Check(not_equal, kOperandIsASmiAndNotAName);
3986     Push(object);
3987     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3988     CmpInstanceType(object, LAST_NAME_TYPE);
3989     Pop(object);
3990     Check(below_equal, kOperandIsNotAName);
3991   }
3992 }
3993 
3994 
AssertFunction(Register object)3995 void MacroAssembler::AssertFunction(Register object) {
3996   if (emit_debug_code()) {
3997     testb(object, Immediate(kSmiTagMask));
3998     Check(not_equal, kOperandIsASmiAndNotAFunction);
3999     Push(object);
4000     CmpObjectType(object, JS_FUNCTION_TYPE, object);
4001     Pop(object);
4002     Check(equal, kOperandIsNotAFunction);
4003   }
4004 }
4005 
4006 
AssertBoundFunction(Register object)4007 void MacroAssembler::AssertBoundFunction(Register object) {
4008   if (emit_debug_code()) {
4009     testb(object, Immediate(kSmiTagMask));
4010     Check(not_equal, kOperandIsASmiAndNotABoundFunction);
4011     Push(object);
4012     CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
4013     Pop(object);
4014     Check(equal, kOperandIsNotABoundFunction);
4015   }
4016 }
4017 
AssertGeneratorObject(Register object)4018 void MacroAssembler::AssertGeneratorObject(Register object) {
4019   if (emit_debug_code()) {
4020     testb(object, Immediate(kSmiTagMask));
4021     Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
4022     Push(object);
4023     CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
4024     Pop(object);
4025     Check(equal, kOperandIsNotAGeneratorObject);
4026   }
4027 }
4028 
AssertReceiver(Register object)4029 void MacroAssembler::AssertReceiver(Register object) {
4030   if (emit_debug_code()) {
4031     testb(object, Immediate(kSmiTagMask));
4032     Check(not_equal, kOperandIsASmiAndNotAReceiver);
4033     Push(object);
4034     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
4035     CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
4036     Pop(object);
4037     Check(above_equal, kOperandIsNotAReceiver);
4038   }
4039 }
4040 
4041 
AssertUndefinedOrAllocationSite(Register object)4042 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
4043   if (emit_debug_code()) {
4044     Label done_checking;
4045     AssertNotSmi(object);
4046     Cmp(object, isolate()->factory()->undefined_value());
4047     j(equal, &done_checking);
4048     Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
4049     Assert(equal, kExpectedUndefinedOrCell);
4050     bind(&done_checking);
4051   }
4052 }
4053 
4054 
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)4055 void MacroAssembler::AssertRootValue(Register src,
4056                                      Heap::RootListIndex root_value_index,
4057                                      BailoutReason reason) {
4058   if (emit_debug_code()) {
4059     DCHECK(!src.is(kScratchRegister));
4060     LoadRoot(kScratchRegister, root_value_index);
4061     cmpp(src, kScratchRegister);
4062     Check(equal, reason);
4063   }
4064 }
4065 
4066 
4067 
IsObjectStringType(Register heap_object,Register map,Register instance_type)4068 Condition MacroAssembler::IsObjectStringType(Register heap_object,
4069                                              Register map,
4070                                              Register instance_type) {
4071   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
4072   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4073   STATIC_ASSERT(kNotStringTag != 0);
4074   testb(instance_type, Immediate(kIsNotStringMask));
4075   return zero;
4076 }
4077 
4078 
IsObjectNameType(Register heap_object,Register map,Register instance_type)4079 Condition MacroAssembler::IsObjectNameType(Register heap_object,
4080                                            Register map,
4081                                            Register instance_type) {
4082   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
4083   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4084   cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
4085   return below_equal;
4086 }
4087 
4088 
GetMapConstructor(Register result,Register map,Register temp)4089 void MacroAssembler::GetMapConstructor(Register result, Register map,
4090                                        Register temp) {
4091   Label done, loop;
4092   movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
4093   bind(&loop);
4094   JumpIfSmi(result, &done, Label::kNear);
4095   CmpObjectType(result, MAP_TYPE, temp);
4096   j(not_equal, &done, Label::kNear);
4097   movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
4098   jmp(&loop);
4099   bind(&done);
4100 }
4101 
4102 
TryGetFunctionPrototype(Register function,Register result,Label * miss)4103 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4104                                              Label* miss) {
4105   // Get the prototype or initial map from the function.
4106   movp(result,
4107        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4108 
4109   // If the prototype or initial map is the hole, don't return it and
4110   // simply miss the cache instead. This will allow us to allocate a
4111   // prototype object on-demand in the runtime system.
4112   CompareRoot(result, Heap::kTheHoleValueRootIndex);
4113   j(equal, miss);
4114 
4115   // If the function does not have an initial map, we're done.
4116   Label done;
4117   CmpObjectType(result, MAP_TYPE, kScratchRegister);
4118   j(not_equal, &done, Label::kNear);
4119 
4120   // Get the prototype from the initial map.
4121   movp(result, FieldOperand(result, Map::kPrototypeOffset));
4122 
4123   // All done.
4124   bind(&done);
4125 }
4126 
4127 
SetCounter(StatsCounter * counter,int value)4128 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
4129   if (FLAG_native_code_counters && counter->Enabled()) {
4130     Operand counter_operand = ExternalOperand(ExternalReference(counter));
4131     movl(counter_operand, Immediate(value));
4132   }
4133 }
4134 
4135 
IncrementCounter(StatsCounter * counter,int value)4136 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
4137   DCHECK(value > 0);
4138   if (FLAG_native_code_counters && counter->Enabled()) {
4139     Operand counter_operand = ExternalOperand(ExternalReference(counter));
4140     if (value == 1) {
4141       incl(counter_operand);
4142     } else {
4143       addl(counter_operand, Immediate(value));
4144     }
4145   }
4146 }
4147 
4148 
DecrementCounter(StatsCounter * counter,int value)4149 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
4150   DCHECK(value > 0);
4151   if (FLAG_native_code_counters && counter->Enabled()) {
4152     Operand counter_operand = ExternalOperand(ExternalReference(counter));
4153     if (value == 1) {
4154       decl(counter_operand);
4155     } else {
4156       subl(counter_operand, Immediate(value));
4157     }
4158   }
4159 }
4160 
4161 
DebugBreak()4162 void MacroAssembler::DebugBreak() {
4163   Set(rax, 0);  // No arguments.
4164   LoadAddress(rbx,
4165               ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4166   CEntryStub ces(isolate(), 1);
4167   DCHECK(AllowThisStubCall(&ces));
4168   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4169 }
4170 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1,ReturnAddressState ra_state)4171 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4172                                         Register caller_args_count_reg,
4173                                         Register scratch0, Register scratch1,
4174                                         ReturnAddressState ra_state) {
4175 #if DEBUG
4176   if (callee_args_count.is_reg()) {
4177     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4178                        scratch1));
4179   } else {
4180     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4181   }
4182 #endif
4183 
4184   // Calculate the destination address where we will put the return address
4185   // after we drop current frame.
4186   Register new_sp_reg = scratch0;
4187   if (callee_args_count.is_reg()) {
4188     subp(caller_args_count_reg, callee_args_count.reg());
4189     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
4190                              StandardFrameConstants::kCallerPCOffset));
4191   } else {
4192     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
4193                              StandardFrameConstants::kCallerPCOffset -
4194                                  callee_args_count.immediate() * kPointerSize));
4195   }
4196 
4197   if (FLAG_debug_code) {
4198     cmpp(rsp, new_sp_reg);
4199     Check(below, kStackAccessBelowStackPointer);
4200   }
4201 
4202   // Copy return address from caller's frame to current frame's return address
4203   // to avoid its trashing and let the following loop copy it to the right
4204   // place.
4205   Register tmp_reg = scratch1;
4206   if (ra_state == ReturnAddressState::kOnStack) {
4207     movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
4208     movp(Operand(rsp, 0), tmp_reg);
4209   } else {
4210     DCHECK(ReturnAddressState::kNotOnStack == ra_state);
4211     Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
4212   }
4213 
4214   // Restore caller's frame pointer now as it could be overwritten by
4215   // the copying loop.
4216   movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
4217 
4218   // +2 here is to copy both receiver and return address.
4219   Register count_reg = caller_args_count_reg;
4220   if (callee_args_count.is_reg()) {
4221     leap(count_reg, Operand(callee_args_count.reg(), 2));
4222   } else {
4223     movp(count_reg, Immediate(callee_args_count.immediate() + 2));
4224     // TODO(ishell): Unroll copying loop for small immediate values.
4225   }
4226 
4227   // Now copy callee arguments to the caller frame going backwards to avoid
4228   // callee arguments corruption (source and destination areas could overlap).
4229   Label loop, entry;
4230   jmp(&entry, Label::kNear);
4231   bind(&loop);
4232   decp(count_reg);
4233   movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
4234   movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
4235   bind(&entry);
4236   cmpp(count_reg, Immediate(0));
4237   j(not_equal, &loop, Label::kNear);
4238 
4239   // Leave current frame.
4240   movp(rsp, new_sp_reg);
4241 }
4242 
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4243 void MacroAssembler::InvokeFunction(Register function,
4244                                     Register new_target,
4245                                     const ParameterCount& actual,
4246                                     InvokeFlag flag,
4247                                     const CallWrapper& call_wrapper) {
4248   movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
4249   LoadSharedFunctionInfoSpecialField(
4250       rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
4251 
4252   ParameterCount expected(rbx);
4253   InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
4254 }
4255 
4256 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4257 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4258                                     const ParameterCount& expected,
4259                                     const ParameterCount& actual,
4260                                     InvokeFlag flag,
4261                                     const CallWrapper& call_wrapper) {
4262   Move(rdi, function);
4263   InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
4264 }
4265 
4266 
InvokeFunction(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4267 void MacroAssembler::InvokeFunction(Register function,
4268                                     Register new_target,
4269                                     const ParameterCount& expected,
4270                                     const ParameterCount& actual,
4271                                     InvokeFlag flag,
4272                                     const CallWrapper& call_wrapper) {
4273   DCHECK(function.is(rdi));
4274   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
4275   InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
4276 }
4277 
4278 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)4279 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4280                                         const ParameterCount& expected,
4281                                         const ParameterCount& actual,
4282                                         InvokeFlag flag,
4283                                         const CallWrapper& call_wrapper) {
4284   // You can't call a function without a valid frame.
4285   DCHECK(flag == JUMP_FUNCTION || has_frame());
4286   DCHECK(function.is(rdi));
4287   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
4288 
4289   if (call_wrapper.NeedsDebugStepCheck()) {
4290     FloodFunctionIfStepping(function, new_target, expected, actual);
4291   }
4292 
4293   // Clear the new.target register if not given.
4294   if (!new_target.is_valid()) {
4295     LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
4296   }
4297 
4298   Label done;
4299   bool definitely_mismatches = false;
4300   InvokePrologue(expected,
4301                  actual,
4302                  &done,
4303                  &definitely_mismatches,
4304                  flag,
4305                  Label::kNear,
4306                  call_wrapper);
4307   if (!definitely_mismatches) {
4308     // We call indirectly through the code field in the function to
4309     // allow recompilation to take effect without changing any of the
4310     // call sites.
4311     Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
4312     if (flag == CALL_FUNCTION) {
4313       call_wrapper.BeforeCall(CallSize(code));
4314       call(code);
4315       call_wrapper.AfterCall();
4316     } else {
4317       DCHECK(flag == JUMP_FUNCTION);
4318       jmp(code);
4319     }
4320     bind(&done);
4321   }
4322 }
4323 
4324 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper)4325 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4326                                     const ParameterCount& actual,
4327                                     Label* done,
4328                                     bool* definitely_mismatches,
4329                                     InvokeFlag flag,
4330                                     Label::Distance near_jump,
4331                                     const CallWrapper& call_wrapper) {
4332   bool definitely_matches = false;
4333   *definitely_mismatches = false;
4334   Label invoke;
4335   if (expected.is_immediate()) {
4336     DCHECK(actual.is_immediate());
4337     Set(rax, actual.immediate());
4338     if (expected.immediate() == actual.immediate()) {
4339       definitely_matches = true;
4340     } else {
4341       if (expected.immediate() ==
4342               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4343         // Don't worry about adapting arguments for built-ins that
4344         // don't want that done. Skip adaption code by making it look
4345         // like we have a match between expected and actual number of
4346         // arguments.
4347         definitely_matches = true;
4348       } else {
4349         *definitely_mismatches = true;
4350         Set(rbx, expected.immediate());
4351       }
4352     }
4353   } else {
4354     if (actual.is_immediate()) {
4355       // Expected is in register, actual is immediate. This is the
4356       // case when we invoke function values without going through the
4357       // IC mechanism.
4358       Set(rax, actual.immediate());
4359       cmpp(expected.reg(), Immediate(actual.immediate()));
4360       j(equal, &invoke, Label::kNear);
4361       DCHECK(expected.reg().is(rbx));
4362     } else if (!expected.reg().is(actual.reg())) {
4363       // Both expected and actual are in (different) registers. This
4364       // is the case when we invoke functions using call and apply.
4365       cmpp(expected.reg(), actual.reg());
4366       j(equal, &invoke, Label::kNear);
4367       DCHECK(actual.reg().is(rax));
4368       DCHECK(expected.reg().is(rbx));
4369     } else {
4370       Move(rax, actual.reg());
4371     }
4372   }
4373 
4374   if (!definitely_matches) {
4375     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4376     if (flag == CALL_FUNCTION) {
4377       call_wrapper.BeforeCall(CallSize(adaptor));
4378       Call(adaptor, RelocInfo::CODE_TARGET);
4379       call_wrapper.AfterCall();
4380       if (!*definitely_mismatches) {
4381         jmp(done, near_jump);
4382       }
4383     } else {
4384       Jump(adaptor, RelocInfo::CODE_TARGET);
4385     }
4386     bind(&invoke);
4387   }
4388 }
4389 
4390 
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)4391 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4392                                              const ParameterCount& expected,
4393                                              const ParameterCount& actual) {
4394   Label skip_flooding;
4395   ExternalReference last_step_action =
4396       ExternalReference::debug_last_step_action_address(isolate());
4397   Operand last_step_action_operand = ExternalOperand(last_step_action);
4398   STATIC_ASSERT(StepFrame > StepIn);
4399   cmpb(last_step_action_operand, Immediate(StepIn));
4400   j(less, &skip_flooding);
4401   {
4402     FrameScope frame(this,
4403                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4404     if (expected.is_reg()) {
4405       Integer32ToSmi(expected.reg(), expected.reg());
4406       Push(expected.reg());
4407     }
4408     if (actual.is_reg()) {
4409       Integer32ToSmi(actual.reg(), actual.reg());
4410       Push(actual.reg());
4411     }
4412     if (new_target.is_valid()) {
4413       Push(new_target);
4414     }
4415     Push(fun);
4416     Push(fun);
4417     CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
4418     Pop(fun);
4419     if (new_target.is_valid()) {
4420       Pop(new_target);
4421     }
4422     if (actual.is_reg()) {
4423       Pop(actual.reg());
4424       SmiToInteger64(actual.reg(), actual.reg());
4425     }
4426     if (expected.is_reg()) {
4427       Pop(expected.reg());
4428       SmiToInteger64(expected.reg(), expected.reg());
4429     }
4430   }
4431   bind(&skip_flooding);
4432 }
4433 
StubPrologue(StackFrame::Type type)4434 void MacroAssembler::StubPrologue(StackFrame::Type type) {
4435   pushq(rbp);  // Caller's frame pointer.
4436   movp(rbp, rsp);
4437   Push(Smi::FromInt(type));
4438 }
4439 
Prologue(bool code_pre_aging)4440 void MacroAssembler::Prologue(bool code_pre_aging) {
4441   PredictableCodeSizeScope predictible_code_size_scope(this,
4442       kNoCodeAgeSequenceLength);
4443   if (code_pre_aging) {
4444       // Pre-age the code.
4445     Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4446          RelocInfo::CODE_AGE_SEQUENCE);
4447     Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4448   } else {
4449     pushq(rbp);  // Caller's frame pointer.
4450     movp(rbp, rsp);
4451     Push(rsi);  // Callee's context.
4452     Push(rdi);  // Callee's JS function.
4453   }
4454 }
4455 
4456 
EmitLoadTypeFeedbackVector(Register vector)4457 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
4458   movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4459   movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
4460   movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
4461 }
4462 
4463 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)4464 void MacroAssembler::EnterFrame(StackFrame::Type type,
4465                                 bool load_constant_pool_pointer_reg) {
4466   // Out-of-line constant pool not implemented on x64.
4467   UNREACHABLE();
4468 }
4469 
4470 
EnterFrame(StackFrame::Type type)4471 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4472   pushq(rbp);
4473   movp(rbp, rsp);
4474   Push(Smi::FromInt(type));
4475   if (type == StackFrame::INTERNAL) {
4476     Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4477     Push(kScratchRegister);
4478   }
4479   if (emit_debug_code()) {
4480     Move(kScratchRegister,
4481          isolate()->factory()->undefined_value(),
4482          RelocInfo::EMBEDDED_OBJECT);
4483     cmpp(Operand(rsp, 0), kScratchRegister);
4484     Check(not_equal, kCodeObjectNotProperlyPatched);
4485   }
4486 }
4487 
4488 
LeaveFrame(StackFrame::Type type)4489 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4490   if (emit_debug_code()) {
4491     Move(kScratchRegister, Smi::FromInt(type));
4492     cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
4493          kScratchRegister);
4494     Check(equal, kStackFrameTypesMustMatch);
4495   }
4496   movp(rsp, rbp);
4497   popq(rbp);
4498 }
4499 
EnterBuiltinFrame(Register context,Register target,Register argc)4500 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
4501                                        Register argc) {
4502   Push(rbp);
4503   Move(rbp, rsp);
4504   Push(context);
4505   Push(target);
4506   Push(argc);
4507 }
4508 
LeaveBuiltinFrame(Register context,Register target,Register argc)4509 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
4510                                        Register argc) {
4511   Pop(argc);
4512   Pop(target);
4513   Pop(context);
4514   leave();
4515 }
4516 
EnterExitFramePrologue(bool save_rax,StackFrame::Type frame_type)4517 void MacroAssembler::EnterExitFramePrologue(bool save_rax,
4518                                             StackFrame::Type frame_type) {
4519   DCHECK(frame_type == StackFrame::EXIT ||
4520          frame_type == StackFrame::BUILTIN_EXIT);
4521 
4522   // Set up the frame structure on the stack.
4523   // All constants are relative to the frame pointer of the exit frame.
4524   DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
4525             ExitFrameConstants::kCallerSPDisplacement);
4526   DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
4527   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
4528   pushq(rbp);
4529   movp(rbp, rsp);
4530 
4531   // Reserve room for entry stack pointer and push the code object.
4532   Push(Smi::FromInt(frame_type));
4533   DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
4534   Push(Immediate(0));  // Saved entry sp, patched before call.
4535   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4536   Push(kScratchRegister);  // Accessed from ExitFrame::code_slot.
4537 
4538   // Save the frame pointer and the context in top.
4539   if (save_rax) {
4540     movp(r14, rax);  // Backup rax in callee-save register.
4541   }
4542 
4543   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4544   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4545   Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
4546 }
4547 
4548 
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)4549 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4550                                             bool save_doubles) {
4551 #ifdef _WIN64
4552   const int kShadowSpace = 4;
4553   arg_stack_space += kShadowSpace;
4554 #endif
4555   // Optionally save all XMM registers.
4556   if (save_doubles) {
4557     int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
4558                 arg_stack_space * kRegisterSize;
4559     subp(rsp, Immediate(space));
4560     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
4561     const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4562     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4563       DoubleRegister reg =
4564           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4565       Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4566     }
4567   } else if (arg_stack_space > 0) {
4568     subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4569   }
4570 
4571   // Get the required frame alignment for the OS.
4572   const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4573   if (kFrameAlignment > 0) {
4574     DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4575     DCHECK(is_int8(kFrameAlignment));
4576     andp(rsp, Immediate(-kFrameAlignment));
4577   }
4578 
4579   // Patch the saved entry sp.
4580   movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4581 }
4582 
EnterExitFrame(int arg_stack_space,bool save_doubles,StackFrame::Type frame_type)4583 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
4584                                     StackFrame::Type frame_type) {
4585   EnterExitFramePrologue(true, frame_type);
4586 
4587   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4588   // so it must be retained across the C-call.
4589   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4590   leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4591 
4592   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4593 }
4594 
4595 
EnterApiExitFrame(int arg_stack_space)4596 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4597   EnterExitFramePrologue(false, StackFrame::EXIT);
4598   EnterExitFrameEpilogue(arg_stack_space, false);
4599 }
4600 
4601 
LeaveExitFrame(bool save_doubles,bool pop_arguments)4602 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
4603   // Registers:
4604   // r15 : argv
4605   if (save_doubles) {
4606     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
4607     const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4608     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4609       DoubleRegister reg =
4610           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4611       Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4612     }
4613   }
4614 
4615   if (pop_arguments) {
4616     // Get the return address from the stack and restore the frame pointer.
4617     movp(rcx, Operand(rbp, kFPOnStackSize));
4618     movp(rbp, Operand(rbp, 0 * kPointerSize));
4619 
4620     // Drop everything up to and including the arguments and the receiver
4621     // from the caller stack.
4622     leap(rsp, Operand(r15, 1 * kPointerSize));
4623 
4624     PushReturnAddressFrom(rcx);
4625   } else {
4626     // Otherwise just leave the exit frame.
4627     leave();
4628   }
4629 
4630   LeaveExitFrameEpilogue(true);
4631 }
4632 
4633 
LeaveApiExitFrame(bool restore_context)4634 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4635   movp(rsp, rbp);
4636   popq(rbp);
4637 
4638   LeaveExitFrameEpilogue(restore_context);
4639 }
4640 
4641 
LeaveExitFrameEpilogue(bool restore_context)4642 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4643   // Restore current context from top and clear it in debug mode.
4644   ExternalReference context_address(Isolate::kContextAddress, isolate());
4645   Operand context_operand = ExternalOperand(context_address);
4646   if (restore_context) {
4647     movp(rsi, context_operand);
4648   }
4649 #ifdef DEBUG
4650   movp(context_operand, Immediate(0));
4651 #endif
4652 
4653   // Clear the top frame.
4654   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4655                                        isolate());
4656   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4657   movp(c_entry_fp_operand, Immediate(0));
4658 }
4659 
4660 
4661 // Compute the hash code from the untagged key.  This must be kept in sync with
4662 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4663 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)4664 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4665   // First of all we assign the hash seed to scratch.
4666   LoadRoot(scratch, Heap::kHashSeedRootIndex);
4667   SmiToInteger32(scratch, scratch);
4668 
4669   // Xor original key with a seed.
4670   xorl(r0, scratch);
4671 
4672   // Compute the hash code from the untagged key.  This must be kept in sync
4673   // with ComputeIntegerHash in utils.h.
4674   //
4675   // hash = ~hash + (hash << 15);
4676   movl(scratch, r0);
4677   notl(r0);
4678   shll(scratch, Immediate(15));
4679   addl(r0, scratch);
4680   // hash = hash ^ (hash >> 12);
4681   movl(scratch, r0);
4682   shrl(scratch, Immediate(12));
4683   xorl(r0, scratch);
4684   // hash = hash + (hash << 2);
4685   leal(r0, Operand(r0, r0, times_4, 0));
4686   // hash = hash ^ (hash >> 4);
4687   movl(scratch, r0);
4688   shrl(scratch, Immediate(4));
4689   xorl(r0, scratch);
4690   // hash = hash * 2057;
4691   imull(r0, r0, Immediate(2057));
4692   // hash = hash ^ (hash >> 16);
4693   movl(scratch, r0);
4694   shrl(scratch, Immediate(16));
4695   xorl(r0, scratch);
4696   andl(r0, Immediate(0x3fffffff));
4697 }
4698 
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4699 void MacroAssembler::LoadAllocationTopHelper(Register result,
4700                                              Register scratch,
4701                                              AllocationFlags flags) {
4702   ExternalReference allocation_top =
4703       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4704 
4705   // Just return if allocation top is already known.
4706   if ((flags & RESULT_CONTAINS_TOP) != 0) {
4707     // No use of scratch if allocation top is provided.
4708     DCHECK(!scratch.is_valid());
4709 #ifdef DEBUG
4710     // Assert that result actually contains top on entry.
4711     Operand top_operand = ExternalOperand(allocation_top);
4712     cmpp(result, top_operand);
4713     Check(equal, kUnexpectedAllocationTop);
4714 #endif
4715     return;
4716   }
4717 
4718   // Move address of new object to result. Use scratch register if available,
4719   // and keep address in scratch until call to UpdateAllocationTopHelper.
4720   if (scratch.is_valid()) {
4721     LoadAddress(scratch, allocation_top);
4722     movp(result, Operand(scratch, 0));
4723   } else {
4724     Load(result, allocation_top);
4725   }
4726 }
4727 
4728 
MakeSureDoubleAlignedHelper(Register result,Register scratch,Label * gc_required,AllocationFlags flags)4729 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4730                                                  Register scratch,
4731                                                  Label* gc_required,
4732                                                  AllocationFlags flags) {
4733   if (kPointerSize == kDoubleSize) {
4734     if (FLAG_debug_code) {
4735       testl(result, Immediate(kDoubleAlignmentMask));
4736       Check(zero, kAllocationIsNotDoubleAligned);
4737     }
4738   } else {
4739     // Align the next allocation. Storing the filler map without checking top
4740     // is safe in new-space because the limit of the heap is aligned there.
4741     DCHECK(kPointerSize * 2 == kDoubleSize);
4742     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4743     // Make sure scratch is not clobbered by this function as it might be
4744     // used in UpdateAllocationTopHelper later.
4745     DCHECK(!scratch.is(kScratchRegister));
4746     Label aligned;
4747     testl(result, Immediate(kDoubleAlignmentMask));
4748     j(zero, &aligned, Label::kNear);
4749     if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
4750       ExternalReference allocation_limit =
4751           AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4752       cmpp(result, ExternalOperand(allocation_limit));
4753       j(above_equal, gc_required);
4754     }
4755     LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4756     movp(Operand(result, 0), kScratchRegister);
4757     addp(result, Immediate(kDoubleSize / 2));
4758     bind(&aligned);
4759   }
4760 }
4761 
4762 
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4763 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4764                                                Register scratch,
4765                                                AllocationFlags flags) {
4766   if (emit_debug_code()) {
4767     testp(result_end, Immediate(kObjectAlignmentMask));
4768     Check(zero, kUnalignedAllocationInNewSpace);
4769   }
4770 
4771   ExternalReference allocation_top =
4772       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4773 
4774   // Update new top.
4775   if (scratch.is_valid()) {
4776     // Scratch already contains address of allocation top.
4777     movp(Operand(scratch, 0), result_end);
4778   } else {
4779     Store(allocation_top, result_end);
4780   }
4781 }
4782 
4783 
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4784 void MacroAssembler::Allocate(int object_size,
4785                               Register result,
4786                               Register result_end,
4787                               Register scratch,
4788                               Label* gc_required,
4789                               AllocationFlags flags) {
4790   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4791   DCHECK(object_size <= kMaxRegularHeapObjectSize);
4792   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4793   if (!FLAG_inline_new) {
4794     if (emit_debug_code()) {
4795       // Trash the registers to simulate an allocation failure.
4796       movl(result, Immediate(0x7091));
4797       if (result_end.is_valid()) {
4798         movl(result_end, Immediate(0x7191));
4799       }
4800       if (scratch.is_valid()) {
4801         movl(scratch, Immediate(0x7291));
4802       }
4803     }
4804     jmp(gc_required);
4805     return;
4806   }
4807   DCHECK(!result.is(result_end));
4808 
4809   // Load address of new object into result.
4810   LoadAllocationTopHelper(result, scratch, flags);
4811 
4812   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4813     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4814   }
4815 
4816   // Calculate new top and bail out if new space is exhausted.
4817   ExternalReference allocation_limit =
4818       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4819 
4820   Register top_reg = result_end.is_valid() ? result_end : result;
4821 
4822   if (!top_reg.is(result)) {
4823     movp(top_reg, result);
4824   }
4825   addp(top_reg, Immediate(object_size));
4826   Operand limit_operand = ExternalOperand(allocation_limit);
4827   cmpp(top_reg, limit_operand);
4828   j(above, gc_required);
4829 
4830   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4831     // The top pointer is not updated for allocation folding dominators.
4832     UpdateAllocationTopHelper(top_reg, scratch, flags);
4833   }
4834 
4835   if (top_reg.is(result)) {
4836     subp(result, Immediate(object_size - kHeapObjectTag));
4837   } else {
4838     // Tag the result.
4839     DCHECK(kHeapObjectTag == 1);
4840     incp(result);
4841   }
4842 }
4843 
4844 
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4845 void MacroAssembler::Allocate(int header_size,
4846                               ScaleFactor element_size,
4847                               Register element_count,
4848                               Register result,
4849                               Register result_end,
4850                               Register scratch,
4851                               Label* gc_required,
4852                               AllocationFlags flags) {
4853   DCHECK((flags & SIZE_IN_WORDS) == 0);
4854   DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
4855   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4856   leap(result_end, Operand(element_count, element_size, header_size));
4857   Allocate(result_end, result, result_end, scratch, gc_required, flags);
4858 }
4859 
4860 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4861 void MacroAssembler::Allocate(Register object_size,
4862                               Register result,
4863                               Register result_end,
4864                               Register scratch,
4865                               Label* gc_required,
4866                               AllocationFlags flags) {
4867   DCHECK((flags & SIZE_IN_WORDS) == 0);
4868   DCHECK((flags & ALLOCATION_FOLDED) == 0);
4869   if (!FLAG_inline_new) {
4870     if (emit_debug_code()) {
4871       // Trash the registers to simulate an allocation failure.
4872       movl(result, Immediate(0x7091));
4873       movl(result_end, Immediate(0x7191));
4874       if (scratch.is_valid()) {
4875         movl(scratch, Immediate(0x7291));
4876       }
4877       // object_size is left unchanged by this function.
4878     }
4879     jmp(gc_required);
4880     return;
4881   }
4882   DCHECK(!result.is(result_end));
4883 
4884   // Load address of new object into result.
4885   LoadAllocationTopHelper(result, scratch, flags);
4886 
4887   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4888     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4889   }
4890 
4891   ExternalReference allocation_limit =
4892       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4893   if (!object_size.is(result_end)) {
4894     movp(result_end, object_size);
4895   }
4896   addp(result_end, result);
4897   Operand limit_operand = ExternalOperand(allocation_limit);
4898   cmpp(result_end, limit_operand);
4899   j(above, gc_required);
4900 
4901   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4902     // The top pointer is not updated for allocation folding dominators.
4903     UpdateAllocationTopHelper(result_end, scratch, flags);
4904   }
4905 
4906   // Tag the result.
4907   addp(result, Immediate(kHeapObjectTag));
4908 }
4909 
FastAllocate(int object_size,Register result,Register result_end,AllocationFlags flags)4910 void MacroAssembler::FastAllocate(int object_size, Register result,
4911                                   Register result_end, AllocationFlags flags) {
4912   DCHECK(!result.is(result_end));
4913   // Load address of new object into result.
4914   LoadAllocationTopHelper(result, no_reg, flags);
4915 
4916   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4917     MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
4918   }
4919 
4920   leap(result_end, Operand(result, object_size));
4921 
4922   UpdateAllocationTopHelper(result_end, no_reg, flags);
4923 
4924   addp(result, Immediate(kHeapObjectTag));
4925 }
4926 
FastAllocate(Register object_size,Register result,Register result_end,AllocationFlags flags)4927 void MacroAssembler::FastAllocate(Register object_size, Register result,
4928                                   Register result_end, AllocationFlags flags) {
4929   DCHECK(!result.is(result_end));
4930   // Load address of new object into result.
4931   LoadAllocationTopHelper(result, no_reg, flags);
4932 
4933   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4934     MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
4935   }
4936 
4937   leap(result_end, Operand(result, object_size, times_1, 0));
4938 
4939   UpdateAllocationTopHelper(result_end, no_reg, flags);
4940 
4941   addp(result, Immediate(kHeapObjectTag));
4942 }
4943 
AllocateHeapNumber(Register result,Register scratch,Label * gc_required,MutableMode mode)4944 void MacroAssembler::AllocateHeapNumber(Register result,
4945                                         Register scratch,
4946                                         Label* gc_required,
4947                                         MutableMode mode) {
4948   // Allocate heap number in new space.
4949   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
4950            NO_ALLOCATION_FLAGS);
4951 
4952   Heap::RootListIndex map_index = mode == MUTABLE
4953       ? Heap::kMutableHeapNumberMapRootIndex
4954       : Heap::kHeapNumberMapRootIndex;
4955 
4956   // Set the map.
4957   LoadRoot(kScratchRegister, map_index);
4958   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4959 }
4960 
4961 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4962 void MacroAssembler::AllocateTwoByteString(Register result,
4963                                            Register length,
4964                                            Register scratch1,
4965                                            Register scratch2,
4966                                            Register scratch3,
4967                                            Label* gc_required) {
4968   // Calculate the number of bytes needed for the characters in the string while
4969   // observing object alignment.
4970   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4971                                kObjectAlignmentMask;
4972   DCHECK(kShortSize == 2);
4973   // scratch1 = length * 2 + kObjectAlignmentMask.
4974   leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4975                 kHeaderAlignment));
4976   andp(scratch1, Immediate(~kObjectAlignmentMask));
4977   if (kHeaderAlignment > 0) {
4978     subp(scratch1, Immediate(kHeaderAlignment));
4979   }
4980 
4981   // Allocate two byte string in new space.
4982   Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
4983            scratch3, gc_required, NO_ALLOCATION_FLAGS);
4984 
4985   // Set the map, length and hash field.
4986   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4987   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4988   Integer32ToSmi(scratch1, length);
4989   movp(FieldOperand(result, String::kLengthOffset), scratch1);
4990   movp(FieldOperand(result, String::kHashFieldOffset),
4991        Immediate(String::kEmptyHashField));
4992 }
4993 
4994 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4995 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4996                                            Register scratch1, Register scratch2,
4997                                            Register scratch3,
4998                                            Label* gc_required) {
4999   // Calculate the number of bytes needed for the characters in the string while
5000   // observing object alignment.
5001   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
5002                                kObjectAlignmentMask;
5003   movl(scratch1, length);
5004   DCHECK(kCharSize == 1);
5005   addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
5006   andp(scratch1, Immediate(~kObjectAlignmentMask));
5007   if (kHeaderAlignment > 0) {
5008     subp(scratch1, Immediate(kHeaderAlignment));
5009   }
5010 
5011   // Allocate one-byte string in new space.
5012   Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
5013            scratch3, gc_required, NO_ALLOCATION_FLAGS);
5014 
5015   // Set the map, length and hash field.
5016   LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
5017   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5018   Integer32ToSmi(scratch1, length);
5019   movp(FieldOperand(result, String::kLengthOffset), scratch1);
5020   movp(FieldOperand(result, String::kHashFieldOffset),
5021        Immediate(String::kEmptyHashField));
5022 }
5023 
5024 
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)5025 void MacroAssembler::AllocateTwoByteConsString(Register result,
5026                                         Register scratch1,
5027                                         Register scratch2,
5028                                         Label* gc_required) {
5029   // Allocate heap number in new space.
5030   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
5031            NO_ALLOCATION_FLAGS);
5032 
5033   // Set the map. The other fields are left uninitialized.
5034   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
5035   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5036 }
5037 
5038 
AllocateOneByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)5039 void MacroAssembler::AllocateOneByteConsString(Register result,
5040                                                Register scratch1,
5041                                                Register scratch2,
5042                                                Label* gc_required) {
5043   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
5044            NO_ALLOCATION_FLAGS);
5045 
5046   // Set the map. The other fields are left uninitialized.
5047   LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
5048   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5049 }
5050 
5051 
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)5052 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
5053                                           Register scratch1,
5054                                           Register scratch2,
5055                                           Label* gc_required) {
5056   // Allocate heap number in new space.
5057   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
5058            NO_ALLOCATION_FLAGS);
5059 
5060   // Set the map. The other fields are left uninitialized.
5061   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
5062   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5063 }
5064 
5065 
AllocateOneByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)5066 void MacroAssembler::AllocateOneByteSlicedString(Register result,
5067                                                  Register scratch1,
5068                                                  Register scratch2,
5069                                                  Label* gc_required) {
5070   // Allocate heap number in new space.
5071   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
5072            NO_ALLOCATION_FLAGS);
5073 
5074   // Set the map. The other fields are left uninitialized.
5075   LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
5076   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5077 }
5078 
5079 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch,Label * gc_required)5080 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
5081                                      Register value, Register scratch,
5082                                      Label* gc_required) {
5083   DCHECK(!result.is(constructor));
5084   DCHECK(!result.is(scratch));
5085   DCHECK(!result.is(value));
5086 
5087   // Allocate JSValue in new space.
5088   Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
5089            NO_ALLOCATION_FLAGS);
5090 
5091   // Initialize the JSValue.
5092   LoadGlobalFunctionInitialMap(constructor, scratch);
5093   movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
5094   LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
5095   movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
5096   movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
5097   movp(FieldOperand(result, JSValue::kValueOffset), value);
5098   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
5099 }
5100 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)5101 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
5102                                                 Register end_address,
5103                                                 Register filler) {
5104   Label loop, entry;
5105   jmp(&entry, Label::kNear);
5106   bind(&loop);
5107   movp(Operand(current_address, 0), filler);
5108   addp(current_address, Immediate(kPointerSize));
5109   bind(&entry);
5110   cmpp(current_address, end_address);
5111   j(below, &loop, Label::kNear);
5112 }
5113 
5114 
LoadContext(Register dst,int context_chain_length)5115 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5116   if (context_chain_length > 0) {
5117     // Move up the chain of contexts to the context containing the slot.
5118     movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5119     for (int i = 1; i < context_chain_length; i++) {
5120       movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5121     }
5122   } else {
5123     // Slot is in the current function context.  Move it into the
5124     // destination register in case we store into it (the write barrier
5125     // cannot be allowed to destroy the context in rsi).
5126     movp(dst, rsi);
5127   }
5128 
5129   // We should not have found a with context by walking the context
5130   // chain (i.e., the static scope chain and runtime context chain do
5131   // not agree).  A variable occurring in such a scope should have
5132   // slot type LOOKUP and not CONTEXT.
5133   if (emit_debug_code()) {
5134     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
5135                 Heap::kWithContextMapRootIndex);
5136     Check(not_equal, kVariableResolvedToWithContext);
5137   }
5138 }
5139 
5140 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)5141 void MacroAssembler::LoadTransitionedArrayMapConditional(
5142     ElementsKind expected_kind,
5143     ElementsKind transitioned_kind,
5144     Register map_in_out,
5145     Register scratch,
5146     Label* no_map_match) {
5147   DCHECK(IsFastElementsKind(expected_kind));
5148   DCHECK(IsFastElementsKind(transitioned_kind));
5149 
5150   // Check that the function's map is the same as the expected cached map.
5151   movp(scratch, NativeContextOperand());
5152   cmpp(map_in_out,
5153        ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5154   j(not_equal, no_map_match);
5155 
5156   // Use the transitioned cached map.
5157   movp(map_in_out,
5158        ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5159 }
5160 
5161 
5162 #ifdef _WIN64
5163 static const int kRegisterPassedArguments = 4;
5164 #else
5165 static const int kRegisterPassedArguments = 6;
5166 #endif
5167 
5168 
LoadNativeContextSlot(int index,Register dst)5169 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5170   movp(dst, NativeContextOperand());
5171   movp(dst, ContextOperand(dst, index));
5172 }
5173 
5174 
LoadGlobalFunctionInitialMap(Register function,Register map)5175 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5176                                                   Register map) {
5177   // Load the initial map.  The global functions all have initial maps.
5178   movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5179   if (emit_debug_code()) {
5180     Label ok, fail;
5181     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5182     jmp(&ok);
5183     bind(&fail);
5184     Abort(kGlobalFunctionsMustHaveInitialMap);
5185     bind(&ok);
5186   }
5187 }
5188 
5189 
ArgumentStackSlotsForCFunctionCall(int num_arguments)5190 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5191   // On Windows 64 stack slots are reserved by the caller for all arguments
5192   // including the ones passed in registers, and space is always allocated for
5193   // the four register arguments even if the function takes fewer than four
5194   // arguments.
5195   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5196   // and the caller does not reserve stack slots for them.
5197   DCHECK(num_arguments >= 0);
5198 #ifdef _WIN64
5199   const int kMinimumStackSlots = kRegisterPassedArguments;
5200   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5201   return num_arguments;
5202 #else
5203   if (num_arguments < kRegisterPassedArguments) return 0;
5204   return num_arguments - kRegisterPassedArguments;
5205 #endif
5206 }
5207 
5208 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)5209 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5210                                                Register index,
5211                                                Register value,
5212                                                uint32_t encoding_mask) {
5213   Label is_object;
5214   JumpIfNotSmi(string, &is_object);
5215   Abort(kNonObject);
5216   bind(&is_object);
5217 
5218   Push(value);
5219   movp(value, FieldOperand(string, HeapObject::kMapOffset));
5220   movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5221 
5222   andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5223   cmpp(value, Immediate(encoding_mask));
5224   Pop(value);
5225   Check(equal, kUnexpectedStringType);
5226 
5227   // The index is assumed to be untagged coming in, tag it to compare with the
5228   // string length without using a temp register, it is restored at the end of
5229   // this function.
5230   Integer32ToSmi(index, index);
5231   SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5232   Check(less, kIndexIsTooLarge);
5233 
5234   SmiCompare(index, Smi::kZero);
5235   Check(greater_equal, kIndexIsNegative);
5236 
5237   // Restore the index
5238   SmiToInteger32(index, index);
5239 }
5240 
5241 
PrepareCallCFunction(int num_arguments)5242 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5243   int frame_alignment = base::OS::ActivationFrameAlignment();
5244   DCHECK(frame_alignment != 0);
5245   DCHECK(num_arguments >= 0);
5246 
5247   // Make stack end at alignment and allocate space for arguments and old rsp.
5248   movp(kScratchRegister, rsp);
5249   DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5250   int argument_slots_on_stack =
5251       ArgumentStackSlotsForCFunctionCall(num_arguments);
5252   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5253   andp(rsp, Immediate(-frame_alignment));
5254   movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5255 }
5256 
5257 
CallCFunction(ExternalReference function,int num_arguments)5258 void MacroAssembler::CallCFunction(ExternalReference function,
5259                                    int num_arguments) {
5260   LoadAddress(rax, function);
5261   CallCFunction(rax, num_arguments);
5262 }
5263 
5264 
CallCFunction(Register function,int num_arguments)5265 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5266   DCHECK(has_frame());
5267   // Check stack alignment.
5268   if (emit_debug_code()) {
5269     CheckStackAlignment();
5270   }
5271 
5272   call(function);
5273   DCHECK(base::OS::ActivationFrameAlignment() != 0);
5274   DCHECK(num_arguments >= 0);
5275   int argument_slots_on_stack =
5276       ArgumentStackSlotsForCFunctionCall(num_arguments);
5277   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5278 }
5279 
5280 
5281 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)5282 bool AreAliased(Register reg1,
5283                 Register reg2,
5284                 Register reg3,
5285                 Register reg4,
5286                 Register reg5,
5287                 Register reg6,
5288                 Register reg7,
5289                 Register reg8) {
5290   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5291       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5292       reg7.is_valid() + reg8.is_valid();
5293 
5294   RegList regs = 0;
5295   if (reg1.is_valid()) regs |= reg1.bit();
5296   if (reg2.is_valid()) regs |= reg2.bit();
5297   if (reg3.is_valid()) regs |= reg3.bit();
5298   if (reg4.is_valid()) regs |= reg4.bit();
5299   if (reg5.is_valid()) regs |= reg5.bit();
5300   if (reg6.is_valid()) regs |= reg6.bit();
5301   if (reg7.is_valid()) regs |= reg7.bit();
5302   if (reg8.is_valid()) regs |= reg8.bit();
5303   int n_of_non_aliasing_regs = NumRegs(regs);
5304 
5305   return n_of_valid_regs != n_of_non_aliasing_regs;
5306 }
5307 #endif
5308 
5309 
CodePatcher(Isolate * isolate,byte * address,int size)5310 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
5311     : address_(address),
5312       size_(size),
5313       masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
5314   // Create a new macro assembler pointing to the address of the code to patch.
5315   // The size is adjusted with kGap on order for the assembler to generate size
5316   // bytes of instructions without failing with buffer size constraints.
5317   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5318 }
5319 
5320 
~CodePatcher()5321 CodePatcher::~CodePatcher() {
5322   // Indicate that code has changed.
5323   Assembler::FlushICache(masm_.isolate(), address_, size_);
5324 
5325   // Check that the code was patched as expected.
5326   DCHECK(masm_.pc_ == address_ + size_);
5327   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5328 }
5329 
5330 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)5331 void MacroAssembler::CheckPageFlag(
5332     Register object,
5333     Register scratch,
5334     int mask,
5335     Condition cc,
5336     Label* condition_met,
5337     Label::Distance condition_met_distance) {
5338   DCHECK(cc == zero || cc == not_zero);
5339   if (scratch.is(object)) {
5340     andp(scratch, Immediate(~Page::kPageAlignmentMask));
5341   } else {
5342     movp(scratch, Immediate(~Page::kPageAlignmentMask));
5343     andp(scratch, object);
5344   }
5345   if (mask < (1 << kBitsPerByte)) {
5346     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5347           Immediate(static_cast<uint8_t>(mask)));
5348   } else {
5349     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5350   }
5351   j(cc, condition_met, condition_met_distance);
5352 }
5353 
5354 
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)5355 void MacroAssembler::JumpIfBlack(Register object,
5356                                  Register bitmap_scratch,
5357                                  Register mask_scratch,
5358                                  Label* on_black,
5359                                  Label::Distance on_black_distance) {
5360   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5361 
5362   GetMarkBits(object, bitmap_scratch, mask_scratch);
5363 
5364   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5365   // The mask_scratch register contains a 1 at the position of the first bit
5366   // and a 1 at a position of the second bit. All other positions are zero.
5367   movp(rcx, mask_scratch);
5368   andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5369   cmpp(mask_scratch, rcx);
5370   j(equal, on_black, on_black_distance);
5371 }
5372 
5373 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5374 void MacroAssembler::GetMarkBits(Register addr_reg,
5375                                  Register bitmap_reg,
5376                                  Register mask_reg) {
5377   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5378   movp(bitmap_reg, addr_reg);
5379   // Sign extended 32 bit immediate.
5380   andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5381   movp(rcx, addr_reg);
5382   int shift =
5383       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5384   shrl(rcx, Immediate(shift));
5385   andp(rcx,
5386        Immediate((Page::kPageAlignmentMask >> shift) &
5387                  ~(Bitmap::kBytesPerCell - 1)));
5388 
5389   addp(bitmap_reg, rcx);
5390   movp(rcx, addr_reg);
5391   shrl(rcx, Immediate(kPointerSizeLog2));
5392   andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5393   movl(mask_reg, Immediate(3));
5394   shlp_cl(mask_reg);
5395 }
5396 
5397 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white,Label::Distance distance)5398 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
5399                                  Register mask_scratch, Label* value_is_white,
5400                                  Label::Distance distance) {
5401   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5402   GetMarkBits(value, bitmap_scratch, mask_scratch);
5403 
5404   // If the value is black or grey we don't need to do anything.
5405   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5406   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5407   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
5408   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5409 
5410   // Since both black and grey have a 1 in the first position and white does
5411   // not have a 1 there we only need to check one bit.
5412   testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5413   j(zero, value_is_white, distance);
5414 }
5415 
5416 
CheckEnumCache(Label * call_runtime)5417 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
5418   Label next, start;
5419   Register empty_fixed_array_value = r8;
5420   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5421   movp(rcx, rax);
5422 
5423   // Check if the enum length field is properly initialized, indicating that
5424   // there is an enum cache.
5425   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5426 
5427   EnumLength(rdx, rbx);
5428   Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5429   j(equal, call_runtime);
5430 
5431   jmp(&start);
5432 
5433   bind(&next);
5434 
5435   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5436 
5437   // For all objects but the receiver, check that the cache is empty.
5438   EnumLength(rdx, rbx);
5439   Cmp(rdx, Smi::kZero);
5440   j(not_equal, call_runtime);
5441 
5442   bind(&start);
5443 
5444   // Check that there are no elements. Register rcx contains the current JS
5445   // object we've reached through the prototype chain.
5446   Label no_elements;
5447   cmpp(empty_fixed_array_value,
5448        FieldOperand(rcx, JSObject::kElementsOffset));
5449   j(equal, &no_elements);
5450 
5451   // Second chance, the object may be using the empty slow element dictionary.
5452   LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5453   cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5454   j(not_equal, call_runtime);
5455 
5456   bind(&no_elements);
5457   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5458   CompareRoot(rcx, Heap::kNullValueRootIndex);
5459   j(not_equal, &next);
5460 }
5461 
5462 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5463 void MacroAssembler::TestJSArrayForAllocationMemento(
5464     Register receiver_reg,
5465     Register scratch_reg,
5466     Label* no_memento_found) {
5467   Label map_check;
5468   Label top_check;
5469   ExternalReference new_space_allocation_top =
5470       ExternalReference::new_space_allocation_top_address(isolate());
5471   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
5472   const int kMementoLastWordOffset =
5473       kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
5474 
5475   // Bail out if the object is not in new space.
5476   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
5477   // If the object is in new space, we need to check whether it is on the same
5478   // page as the current top.
5479   leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5480   xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
5481   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
5482   j(zero, &top_check);
5483   // The object is on a different page than allocation top. Bail out if the
5484   // object sits on the page boundary as no memento can follow and we cannot
5485   // touch the memory following it.
5486   leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5487   xorp(scratch_reg, receiver_reg);
5488   testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
5489   j(not_zero, no_memento_found);
5490   // Continue with the actual map check.
5491   jmp(&map_check);
5492   // If top is on the same page as the current object, we need to check whether
5493   // we are below top.
5494   bind(&top_check);
5495   leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
5496   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5497   j(greater_equal, no_memento_found);
5498   // Memento map check.
5499   bind(&map_check);
5500   CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
5501               Heap::kAllocationMementoMapRootIndex);
5502 }
5503 
5504 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5505 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5506     Register object,
5507     Register scratch0,
5508     Register scratch1,
5509     Label* found) {
5510   DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5511   DCHECK(!scratch1.is(scratch0));
5512   Register current = scratch0;
5513   Label loop_again, end;
5514 
5515   movp(current, object);
5516   movp(current, FieldOperand(current, HeapObject::kMapOffset));
5517   movp(current, FieldOperand(current, Map::kPrototypeOffset));
5518   CompareRoot(current, Heap::kNullValueRootIndex);
5519   j(equal, &end);
5520 
5521   // Loop based on the map going up the prototype chain.
5522   bind(&loop_again);
5523   movp(current, FieldOperand(current, HeapObject::kMapOffset));
5524   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5525   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5526   CmpInstanceType(current, JS_OBJECT_TYPE);
5527   j(below, found);
5528   movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5529   DecodeField<Map::ElementsKindBits>(scratch1);
5530   cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5531   j(equal, found);
5532   movp(current, FieldOperand(current, Map::kPrototypeOffset));
5533   CompareRoot(current, Heap::kNullValueRootIndex);
5534   j(not_equal, &loop_again);
5535 
5536   bind(&end);
5537 }
5538 
5539 
TruncatingDiv(Register dividend,int32_t divisor)5540 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5541   DCHECK(!dividend.is(rax));
5542   DCHECK(!dividend.is(rdx));
5543   base::MagicNumbersForDivision<uint32_t> mag =
5544       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5545   movl(rax, Immediate(mag.multiplier));
5546   imull(dividend);
5547   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5548   if (divisor > 0 && neg) addl(rdx, dividend);
5549   if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5550   if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5551   movl(rax, dividend);
5552   shrl(rax, Immediate(31));
5553   addl(rdx, rax);
5554 }
5555 
5556 
5557 }  // namespace internal
5558 }  // namespace v8
5559 
5560 #endif  // V8_TARGET_ARCH_X64
5561