• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X64
8 
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
14 #include "src/serialize.h"
15 #include "src/debug.h"
16 #include "src/heap.h"
17 #include "src/isolate-inl.h"
18 
19 namespace v8 {
20 namespace internal {
21 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23     : Assembler(arg_isolate, buffer, size),
24       generating_stub_(false),
25       has_frame_(false),
26       root_array_available_(true) {
27   if (isolate() != NULL) {
28     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
29                                   isolate());
30   }
31 }
32 
33 
34 static const int64_t kInvalidRootRegisterDelta = -1;
35 
36 
RootRegisterDelta(ExternalReference other)37 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
38   if (predictable_code_size() &&
39       (other.address() < reinterpret_cast<Address>(isolate()) ||
40        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
41     return kInvalidRootRegisterDelta;
42   }
43   Address roots_register_value = kRootRegisterBias +
44       reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
45 
46   int64_t delta = kInvalidRootRegisterDelta;  // Bogus initialization.
47   if (kPointerSize == kInt64Size) {
48     delta = other.address() - roots_register_value;
49   } else {
50     // For x32, zero extend the address to 64-bit and calculate the delta.
51     uint64_t o = static_cast<uint32_t>(
52         reinterpret_cast<intptr_t>(other.address()));
53     uint64_t r = static_cast<uint32_t>(
54         reinterpret_cast<intptr_t>(roots_register_value));
55     delta = o - r;
56   }
57   return delta;
58 }
59 
60 
ExternalOperand(ExternalReference target,Register scratch)61 Operand MacroAssembler::ExternalOperand(ExternalReference target,
62                                         Register scratch) {
63   if (root_array_available_ && !serializer_enabled()) {
64     int64_t delta = RootRegisterDelta(target);
65     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
66       return Operand(kRootRegister, static_cast<int32_t>(delta));
67     }
68   }
69   Move(scratch, target);
70   return Operand(scratch, 0);
71 }
72 
73 
Load(Register destination,ExternalReference source)74 void MacroAssembler::Load(Register destination, ExternalReference source) {
75   if (root_array_available_ && !serializer_enabled()) {
76     int64_t delta = RootRegisterDelta(source);
77     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78       movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
79       return;
80     }
81   }
82   // Safe code.
83   if (destination.is(rax)) {
84     load_rax(source);
85   } else {
86     Move(kScratchRegister, source);
87     movp(destination, Operand(kScratchRegister, 0));
88   }
89 }
90 
91 
Store(ExternalReference destination,Register source)92 void MacroAssembler::Store(ExternalReference destination, Register source) {
93   if (root_array_available_ && !serializer_enabled()) {
94     int64_t delta = RootRegisterDelta(destination);
95     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
96       movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
97       return;
98     }
99   }
100   // Safe code.
101   if (source.is(rax)) {
102     store_rax(destination);
103   } else {
104     Move(kScratchRegister, destination);
105     movp(Operand(kScratchRegister, 0), source);
106   }
107 }
108 
109 
LoadAddress(Register destination,ExternalReference source)110 void MacroAssembler::LoadAddress(Register destination,
111                                  ExternalReference source) {
112   if (root_array_available_ && !serializer_enabled()) {
113     int64_t delta = RootRegisterDelta(source);
114     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
115       leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
116       return;
117     }
118   }
119   // Safe code.
120   Move(destination, source);
121 }
122 
123 
LoadAddressSize(ExternalReference source)124 int MacroAssembler::LoadAddressSize(ExternalReference source) {
125   if (root_array_available_ && !serializer_enabled()) {
126     // This calculation depends on the internals of LoadAddress.
127     // It's correctness is ensured by the asserts in the Call
128     // instruction below.
129     int64_t delta = RootRegisterDelta(source);
130     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
131       // Operand is leap(scratch, Operand(kRootRegister, delta));
132       // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
133       int size = 4;
134       if (!is_int8(static_cast<int32_t>(delta))) {
135         size += 3;  // Need full four-byte displacement in lea.
136       }
137       return size;
138     }
139   }
140   // Size of movp(destination, src);
141   return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
142 }
143 
144 
PushAddress(ExternalReference source)145 void MacroAssembler::PushAddress(ExternalReference source) {
146   int64_t address = reinterpret_cast<int64_t>(source.address());
147   if (is_int32(address) && !serializer_enabled()) {
148     if (emit_debug_code()) {
149       Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
150     }
151     Push(Immediate(static_cast<int32_t>(address)));
152     return;
153   }
154   LoadAddress(kScratchRegister, source);
155   Push(kScratchRegister);
156 }
157 
158 
LoadRoot(Register destination,Heap::RootListIndex index)159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
160   ASSERT(root_array_available_);
161   movp(destination, Operand(kRootRegister,
162                             (index << kPointerSizeLog2) - kRootRegisterBias));
163 }
164 
165 
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)166 void MacroAssembler::LoadRootIndexed(Register destination,
167                                      Register variable_offset,
168                                      int fixed_offset) {
169   ASSERT(root_array_available_);
170   movp(destination,
171        Operand(kRootRegister,
172                variable_offset, times_pointer_size,
173                (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
174 }
175 
176 
StoreRoot(Register source,Heap::RootListIndex index)177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
178   ASSERT(root_array_available_);
179   movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
180        source);
181 }
182 
183 
PushRoot(Heap::RootListIndex index)184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185   ASSERT(root_array_available_);
186   Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
187 }
188 
189 
CompareRoot(Register with,Heap::RootListIndex index)190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191   ASSERT(root_array_available_);
192   cmpp(with, Operand(kRootRegister,
193                      (index << kPointerSizeLog2) - kRootRegisterBias));
194 }
195 
196 
CompareRoot(const Operand & with,Heap::RootListIndex index)197 void MacroAssembler::CompareRoot(const Operand& with,
198                                  Heap::RootListIndex index) {
199   ASSERT(root_array_available_);
200   ASSERT(!with.AddressUsesRegister(kScratchRegister));
201   LoadRoot(kScratchRegister, index);
202   cmpp(with, kScratchRegister);
203 }
204 
205 
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)206 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
207                                          Register addr,
208                                          Register scratch,
209                                          SaveFPRegsMode save_fp,
210                                          RememberedSetFinalAction and_then) {
211   if (emit_debug_code()) {
212     Label ok;
213     JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
214     int3();
215     bind(&ok);
216   }
217   // Load store buffer top.
218   LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219   // Store pointer to buffer.
220   movp(Operand(scratch, 0), addr);
221   // Increment buffer top.
222   addp(scratch, Immediate(kPointerSize));
223   // Write back new top of buffer.
224   StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225   // Call stub on end of buffer.
226   Label done;
227   // Check for end of buffer.
228   testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229   if (and_then == kReturnAtEnd) {
230     Label buffer_overflowed;
231     j(not_equal, &buffer_overflowed, Label::kNear);
232     ret(0);
233     bind(&buffer_overflowed);
234   } else {
235     ASSERT(and_then == kFallThroughAtEnd);
236     j(equal, &done, Label::kNear);
237   }
238   StoreBufferOverflowStub store_buffer_overflow =
239       StoreBufferOverflowStub(isolate(), save_fp);
240   CallStub(&store_buffer_overflow);
241   if (and_then == kReturnAtEnd) {
242     ret(0);
243   } else {
244     ASSERT(and_then == kFallThroughAtEnd);
245     bind(&done);
246   }
247 }
248 
249 
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)250 void MacroAssembler::InNewSpace(Register object,
251                                 Register scratch,
252                                 Condition cc,
253                                 Label* branch,
254                                 Label::Distance distance) {
255   if (serializer_enabled()) {
256     // Can't do arithmetic on external references if it might get serialized.
257     // The mask isn't really an address.  We load it as an external reference in
258     // case the size of the new space is different between the snapshot maker
259     // and the running system.
260     if (scratch.is(object)) {
261       Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262       andp(scratch, kScratchRegister);
263     } else {
264       Move(scratch, ExternalReference::new_space_mask(isolate()));
265       andp(scratch, object);
266     }
267     Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268     cmpp(scratch, kScratchRegister);
269     j(cc, branch, distance);
270   } else {
271     ASSERT(kPointerSize == kInt64Size
272         ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
273         : kPointerSize == kInt32Size);
274     intptr_t new_space_start =
275         reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
276     Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
277          Assembler::RelocInfoNone());
278     if (scratch.is(object)) {
279       addp(scratch, kScratchRegister);
280     } else {
281       leap(scratch, Operand(object, kScratchRegister, times_1, 0));
282     }
283     andp(scratch,
284          Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
285     j(cc, branch, distance);
286   }
287 }
288 
289 
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)290 void MacroAssembler::RecordWriteField(
291     Register object,
292     int offset,
293     Register value,
294     Register dst,
295     SaveFPRegsMode save_fp,
296     RememberedSetAction remembered_set_action,
297     SmiCheck smi_check,
298     PointersToHereCheck pointers_to_here_check_for_value) {
299   // First, check if a write barrier is even needed. The tests below
300   // catch stores of Smis.
301   Label done;
302 
303   // Skip barrier if writing a smi.
304   if (smi_check == INLINE_SMI_CHECK) {
305     JumpIfSmi(value, &done);
306   }
307 
308   // Although the object register is tagged, the offset is relative to the start
309   // of the object, so so offset must be a multiple of kPointerSize.
310   ASSERT(IsAligned(offset, kPointerSize));
311 
312   leap(dst, FieldOperand(object, offset));
313   if (emit_debug_code()) {
314     Label ok;
315     testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
316     j(zero, &ok, Label::kNear);
317     int3();
318     bind(&ok);
319   }
320 
321   RecordWrite(object, dst, value, save_fp, remembered_set_action,
322               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
323 
324   bind(&done);
325 
326   // Clobber clobbered input registers when running with the debug-code flag
327   // turned on to provoke errors.
328   if (emit_debug_code()) {
329     Move(value, kZapValue, Assembler::RelocInfoNone());
330     Move(dst, kZapValue, Assembler::RelocInfoNone());
331   }
332 }
333 
334 
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)335 void MacroAssembler::RecordWriteArray(
336     Register object,
337     Register value,
338     Register index,
339     SaveFPRegsMode save_fp,
340     RememberedSetAction remembered_set_action,
341     SmiCheck smi_check,
342     PointersToHereCheck pointers_to_here_check_for_value) {
343   // First, check if a write barrier is even needed. The tests below
344   // catch stores of Smis.
345   Label done;
346 
347   // Skip barrier if writing a smi.
348   if (smi_check == INLINE_SMI_CHECK) {
349     JumpIfSmi(value, &done);
350   }
351 
352   // Array access: calculate the destination address. Index is not a smi.
353   Register dst = index;
354   leap(dst, Operand(object, index, times_pointer_size,
355                    FixedArray::kHeaderSize - kHeapObjectTag));
356 
357   RecordWrite(object, dst, value, save_fp, remembered_set_action,
358               OMIT_SMI_CHECK, pointers_to_here_check_for_value);
359 
360   bind(&done);
361 
362   // Clobber clobbered input registers when running with the debug-code flag
363   // turned on to provoke errors.
364   if (emit_debug_code()) {
365     Move(value, kZapValue, Assembler::RelocInfoNone());
366     Move(index, kZapValue, Assembler::RelocInfoNone());
367   }
368 }
369 
370 
RecordWriteForMap(Register object,Register map,Register dst,SaveFPRegsMode fp_mode)371 void MacroAssembler::RecordWriteForMap(Register object,
372                                        Register map,
373                                        Register dst,
374                                        SaveFPRegsMode fp_mode) {
375   ASSERT(!object.is(kScratchRegister));
376   ASSERT(!object.is(map));
377   ASSERT(!object.is(dst));
378   ASSERT(!map.is(dst));
379   AssertNotSmi(object);
380 
381   if (emit_debug_code()) {
382     Label ok;
383     if (map.is(kScratchRegister)) pushq(map);
384     CompareMap(map, isolate()->factory()->meta_map());
385     if (map.is(kScratchRegister)) popq(map);
386     j(equal, &ok, Label::kNear);
387     int3();
388     bind(&ok);
389   }
390 
391   if (!FLAG_incremental_marking) {
392     return;
393   }
394 
395   if (emit_debug_code()) {
396     Label ok;
397     if (map.is(kScratchRegister)) pushq(map);
398     cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
399     if (map.is(kScratchRegister)) popq(map);
400     j(equal, &ok, Label::kNear);
401     int3();
402     bind(&ok);
403   }
404 
405   // Compute the address.
406   leap(dst, FieldOperand(object, HeapObject::kMapOffset));
407 
408   // Count number of write barriers in generated code.
409   isolate()->counters()->write_barriers_static()->Increment();
410   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
411 
412   // First, check if a write barrier is even needed. The tests below
413   // catch stores of smis and stores into the young generation.
414   Label done;
415 
416   // A single check of the map's pages interesting flag suffices, since it is
417   // only set during incremental collection, and then it's also guaranteed that
418   // the from object's page's interesting flag is also set.  This optimization
419   // relies on the fact that maps can never be in new space.
420   CheckPageFlag(map,
421                 map,  // Used as scratch.
422                 MemoryChunk::kPointersToHereAreInterestingMask,
423                 zero,
424                 &done,
425                 Label::kNear);
426 
427   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
428                        fp_mode);
429   CallStub(&stub);
430 
431   bind(&done);
432 
433   // Clobber clobbered registers when running with the debug-code flag
434   // turned on to provoke errors.
435   if (emit_debug_code()) {
436     Move(dst, kZapValue, Assembler::RelocInfoNone());
437     Move(map, kZapValue, Assembler::RelocInfoNone());
438   }
439 }
440 
441 
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)442 void MacroAssembler::RecordWrite(
443     Register object,
444     Register address,
445     Register value,
446     SaveFPRegsMode fp_mode,
447     RememberedSetAction remembered_set_action,
448     SmiCheck smi_check,
449     PointersToHereCheck pointers_to_here_check_for_value) {
450   ASSERT(!object.is(value));
451   ASSERT(!object.is(address));
452   ASSERT(!value.is(address));
453   AssertNotSmi(object);
454 
455   if (remembered_set_action == OMIT_REMEMBERED_SET &&
456       !FLAG_incremental_marking) {
457     return;
458   }
459 
460   if (emit_debug_code()) {
461     Label ok;
462     cmpp(value, Operand(address, 0));
463     j(equal, &ok, Label::kNear);
464     int3();
465     bind(&ok);
466   }
467 
468   // Count number of write barriers in generated code.
469   isolate()->counters()->write_barriers_static()->Increment();
470   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
471 
472   // First, check if a write barrier is even needed. The tests below
473   // catch stores of smis and stores into the young generation.
474   Label done;
475 
476   if (smi_check == INLINE_SMI_CHECK) {
477     // Skip barrier if writing a smi.
478     JumpIfSmi(value, &done);
479   }
480 
481   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
482     CheckPageFlag(value,
483                   value,  // Used as scratch.
484                   MemoryChunk::kPointersToHereAreInterestingMask,
485                   zero,
486                   &done,
487                   Label::kNear);
488   }
489 
490   CheckPageFlag(object,
491                 value,  // Used as scratch.
492                 MemoryChunk::kPointersFromHereAreInterestingMask,
493                 zero,
494                 &done,
495                 Label::kNear);
496 
497   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
498                        fp_mode);
499   CallStub(&stub);
500 
501   bind(&done);
502 
503   // Clobber clobbered registers when running with the debug-code flag
504   // turned on to provoke errors.
505   if (emit_debug_code()) {
506     Move(address, kZapValue, Assembler::RelocInfoNone());
507     Move(value, kZapValue, Assembler::RelocInfoNone());
508   }
509 }
510 
511 
Assert(Condition cc,BailoutReason reason)512 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
513   if (emit_debug_code()) Check(cc, reason);
514 }
515 
516 
AssertFastElements(Register elements)517 void MacroAssembler::AssertFastElements(Register elements) {
518   if (emit_debug_code()) {
519     Label ok;
520     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
521                 Heap::kFixedArrayMapRootIndex);
522     j(equal, &ok, Label::kNear);
523     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
524                 Heap::kFixedDoubleArrayMapRootIndex);
525     j(equal, &ok, Label::kNear);
526     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
527                 Heap::kFixedCOWArrayMapRootIndex);
528     j(equal, &ok, Label::kNear);
529     Abort(kJSObjectWithFastElementsMapHasSlowElements);
530     bind(&ok);
531   }
532 }
533 
534 
Check(Condition cc,BailoutReason reason)535 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
536   Label L;
537   j(cc, &L, Label::kNear);
538   Abort(reason);
539   // Control will not return here.
540   bind(&L);
541 }
542 
543 
CheckStackAlignment()544 void MacroAssembler::CheckStackAlignment() {
545   int frame_alignment = OS::ActivationFrameAlignment();
546   int frame_alignment_mask = frame_alignment - 1;
547   if (frame_alignment > kPointerSize) {
548     ASSERT(IsPowerOf2(frame_alignment));
549     Label alignment_as_expected;
550     testp(rsp, Immediate(frame_alignment_mask));
551     j(zero, &alignment_as_expected, Label::kNear);
552     // Abort if stack is not aligned.
553     int3();
554     bind(&alignment_as_expected);
555   }
556 }
557 
558 
NegativeZeroTest(Register result,Register op,Label * then_label)559 void MacroAssembler::NegativeZeroTest(Register result,
560                                       Register op,
561                                       Label* then_label) {
562   Label ok;
563   testl(result, result);
564   j(not_zero, &ok, Label::kNear);
565   testl(op, op);
566   j(sign, then_label);
567   bind(&ok);
568 }
569 
570 
Abort(BailoutReason reason)571 void MacroAssembler::Abort(BailoutReason reason) {
572 #ifdef DEBUG
573   const char* msg = GetBailoutReason(reason);
574   if (msg != NULL) {
575     RecordComment("Abort message: ");
576     RecordComment(msg);
577   }
578 
579   if (FLAG_trap_on_abort) {
580     int3();
581     return;
582   }
583 #endif
584 
585   Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
586        Assembler::RelocInfoNone());
587   Push(kScratchRegister);
588 
589   if (!has_frame_) {
590     // We don't actually want to generate a pile of code for this, so just
591     // claim there is a stack frame, without generating one.
592     FrameScope scope(this, StackFrame::NONE);
593     CallRuntime(Runtime::kAbort, 1);
594   } else {
595     CallRuntime(Runtime::kAbort, 1);
596   }
597   // Control will not return here.
598   int3();
599 }
600 
601 
CallStub(CodeStub * stub,TypeFeedbackId ast_id)602 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
603   ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
604   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
605 }
606 
607 
TailCallStub(CodeStub * stub)608 void MacroAssembler::TailCallStub(CodeStub* stub) {
609   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
610 }
611 
612 
StubReturn(int argc)613 void MacroAssembler::StubReturn(int argc) {
614   ASSERT(argc >= 1 && generating_stub());
615   ret((argc - 1) * kPointerSize);
616 }
617 
618 
AllowThisStubCall(CodeStub * stub)619 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
620   return has_frame_ || !stub->SometimesSetsUpAFrame();
621 }
622 
623 
IndexFromHash(Register hash,Register index)624 void MacroAssembler::IndexFromHash(Register hash, Register index) {
625   // The assert checks that the constants for the maximum number of digits
626   // for an array index cached in the hash field and the number of bits
627   // reserved for it does not conflict.
628   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
629          (1 << String::kArrayIndexValueBits));
630   if (!hash.is(index)) {
631     movl(index, hash);
632   }
633   DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
634 }
635 
636 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)637 void MacroAssembler::CallRuntime(const Runtime::Function* f,
638                                  int num_arguments,
639                                  SaveFPRegsMode save_doubles) {
640   // If the expected number of arguments of the runtime function is
641   // constant, we check that the actual number of arguments match the
642   // expectation.
643   CHECK(f->nargs < 0 || f->nargs == num_arguments);
644 
645   // TODO(1236192): Most runtime routines don't need the number of
646   // arguments passed in because it is constant. At some point we
647   // should remove this need and make the runtime routine entry code
648   // smarter.
649   Set(rax, num_arguments);
650   LoadAddress(rbx, ExternalReference(f, isolate()));
651   CEntryStub ces(isolate(), f->result_size, save_doubles);
652   CallStub(&ces);
653 }
654 
655 
CallExternalReference(const ExternalReference & ext,int num_arguments)656 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
657                                            int num_arguments) {
658   Set(rax, num_arguments);
659   LoadAddress(rbx, ext);
660 
661   CEntryStub stub(isolate(), 1);
662   CallStub(&stub);
663 }
664 
665 
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)666 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
667                                                int num_arguments,
668                                                int result_size) {
669   // ----------- S t a t e -------------
670   //  -- rsp[0]                 : return address
671   //  -- rsp[8]                 : argument num_arguments - 1
672   //  ...
673   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
674   // -----------------------------------
675 
676   // TODO(1236192): Most runtime routines don't need the number of
677   // arguments passed in because it is constant. At some point we
678   // should remove this need and make the runtime routine entry code
679   // smarter.
680   Set(rax, num_arguments);
681   JumpToExternalReference(ext, result_size);
682 }
683 
684 
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)685 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
686                                      int num_arguments,
687                                      int result_size) {
688   TailCallExternalReference(ExternalReference(fid, isolate()),
689                             num_arguments,
690                             result_size);
691 }
692 
693 
Offset(ExternalReference ref0,ExternalReference ref1)694 static int Offset(ExternalReference ref0, ExternalReference ref1) {
695   int64_t offset = (ref0.address() - ref1.address());
696   // Check that fits into int.
697   ASSERT(static_cast<int>(offset) == offset);
698   return static_cast<int>(offset);
699 }
700 
701 
PrepareCallApiFunction(int arg_stack_space)702 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
703   EnterApiExitFrame(arg_stack_space);
704 }
705 
706 
CallApiFunctionAndReturn(Register function_address,ExternalReference thunk_ref,Register thunk_last_arg,int stack_space,Operand return_value_operand,Operand * context_restore_operand)707 void MacroAssembler::CallApiFunctionAndReturn(
708     Register function_address,
709     ExternalReference thunk_ref,
710     Register thunk_last_arg,
711     int stack_space,
712     Operand return_value_operand,
713     Operand* context_restore_operand) {
714   Label prologue;
715   Label promote_scheduled_exception;
716   Label exception_handled;
717   Label delete_allocated_handles;
718   Label leave_exit_frame;
719   Label write_back;
720 
721   Factory* factory = isolate()->factory();
722   ExternalReference next_address =
723       ExternalReference::handle_scope_next_address(isolate());
724   const int kNextOffset = 0;
725   const int kLimitOffset = Offset(
726       ExternalReference::handle_scope_limit_address(isolate()),
727       next_address);
728   const int kLevelOffset = Offset(
729       ExternalReference::handle_scope_level_address(isolate()),
730       next_address);
731   ExternalReference scheduled_exception_address =
732       ExternalReference::scheduled_exception_address(isolate());
733 
734   ASSERT(rdx.is(function_address) || r8.is(function_address));
735   // Allocate HandleScope in callee-save registers.
736   Register prev_next_address_reg = r14;
737   Register prev_limit_reg = rbx;
738   Register base_reg = r15;
739   Move(base_reg, next_address);
740   movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
741   movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
742   addl(Operand(base_reg, kLevelOffset), Immediate(1));
743 
744   if (FLAG_log_timer_events) {
745     FrameScope frame(this, StackFrame::MANUAL);
746     PushSafepointRegisters();
747     PrepareCallCFunction(1);
748     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
749     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
750     PopSafepointRegisters();
751   }
752 
753 
754   Label profiler_disabled;
755   Label end_profiler_check;
756   Move(rax, ExternalReference::is_profiling_address(isolate()));
757   cmpb(Operand(rax, 0), Immediate(0));
758   j(zero, &profiler_disabled);
759 
760   // Third parameter is the address of the actual getter function.
761   Move(thunk_last_arg, function_address);
762   Move(rax, thunk_ref);
763   jmp(&end_profiler_check);
764 
765   bind(&profiler_disabled);
766   // Call the api function!
767   Move(rax, function_address);
768 
769   bind(&end_profiler_check);
770 
771   // Call the api function!
772   call(rax);
773 
774   if (FLAG_log_timer_events) {
775     FrameScope frame(this, StackFrame::MANUAL);
776     PushSafepointRegisters();
777     PrepareCallCFunction(1);
778     LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
779     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
780     PopSafepointRegisters();
781   }
782 
783   // Load the value from ReturnValue
784   movp(rax, return_value_operand);
785   bind(&prologue);
786 
787   // No more valid handles (the result handle was the last one). Restore
788   // previous handle scope.
789   subl(Operand(base_reg, kLevelOffset), Immediate(1));
790   movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
791   cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
792   j(not_equal, &delete_allocated_handles);
793   bind(&leave_exit_frame);
794 
795   // Check if the function scheduled an exception.
796   Move(rsi, scheduled_exception_address);
797   Cmp(Operand(rsi, 0), factory->the_hole_value());
798   j(not_equal, &promote_scheduled_exception);
799   bind(&exception_handled);
800 
801 #if ENABLE_EXTRA_CHECKS
802   // Check if the function returned a valid JavaScript value.
803   Label ok;
804   Register return_value = rax;
805   Register map = rcx;
806 
807   JumpIfSmi(return_value, &ok, Label::kNear);
808   movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
809 
810   CmpInstanceType(map, FIRST_NONSTRING_TYPE);
811   j(below, &ok, Label::kNear);
812 
813   CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
814   j(above_equal, &ok, Label::kNear);
815 
816   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
817   j(equal, &ok, Label::kNear);
818 
819   CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
820   j(equal, &ok, Label::kNear);
821 
822   CompareRoot(return_value, Heap::kTrueValueRootIndex);
823   j(equal, &ok, Label::kNear);
824 
825   CompareRoot(return_value, Heap::kFalseValueRootIndex);
826   j(equal, &ok, Label::kNear);
827 
828   CompareRoot(return_value, Heap::kNullValueRootIndex);
829   j(equal, &ok, Label::kNear);
830 
831   Abort(kAPICallReturnedInvalidObject);
832 
833   bind(&ok);
834 #endif
835 
836   bool restore_context = context_restore_operand != NULL;
837   if (restore_context) {
838     movp(rsi, *context_restore_operand);
839   }
840   LeaveApiExitFrame(!restore_context);
841   ret(stack_space * kPointerSize);
842 
843   bind(&promote_scheduled_exception);
844   {
845     FrameScope frame(this, StackFrame::INTERNAL);
846     CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
847   }
848   jmp(&exception_handled);
849 
850   // HandleScope limit has changed. Delete allocated extensions.
851   bind(&delete_allocated_handles);
852   movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
853   movp(prev_limit_reg, rax);
854   LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
855   LoadAddress(rax,
856               ExternalReference::delete_handle_scope_extensions(isolate()));
857   call(rax);
858   movp(rax, prev_limit_reg);
859   jmp(&leave_exit_frame);
860 }
861 
862 
JumpToExternalReference(const ExternalReference & ext,int result_size)863 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
864                                              int result_size) {
865   // Set the entry point and jump to the C entry runtime stub.
866   LoadAddress(rbx, ext);
867   CEntryStub ces(isolate(), result_size);
868   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
869 }
870 
871 
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)872 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
873                                    InvokeFlag flag,
874                                    const CallWrapper& call_wrapper) {
875   // You can't call a builtin without a valid frame.
876   ASSERT(flag == JUMP_FUNCTION || has_frame());
877 
878   // Rely on the assertion to check that the number of provided
879   // arguments match the expected number of arguments. Fake a
880   // parameter count to avoid emitting code to do the check.
881   ParameterCount expected(0);
882   GetBuiltinEntry(rdx, id);
883   InvokeCode(rdx, expected, expected, flag, call_wrapper);
884 }
885 
886 
GetBuiltinFunction(Register target,Builtins::JavaScript id)887 void MacroAssembler::GetBuiltinFunction(Register target,
888                                         Builtins::JavaScript id) {
889   // Load the builtins object into target register.
890   movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
891   movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
892   movp(target, FieldOperand(target,
893                             JSBuiltinsObject::OffsetOfFunctionWithId(id)));
894 }
895 
896 
GetBuiltinEntry(Register target,Builtins::JavaScript id)897 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
898   ASSERT(!target.is(rdi));
899   // Load the JavaScript builtin function from the builtins object.
900   GetBuiltinFunction(rdi, id);
901   movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
902 }
903 
904 
905 #define REG(Name) { kRegister_ ## Name ## _Code }
906 
907 static const Register saved_regs[] = {
908   REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
909   REG(r9), REG(r10), REG(r11)
910 };
911 
912 #undef REG
913 
914 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
915 
916 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)917 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
918                                      Register exclusion1,
919                                      Register exclusion2,
920                                      Register exclusion3) {
921   // We don't allow a GC during a store buffer overflow so there is no need to
922   // store the registers in any particular way, but we do have to store and
923   // restore them.
924   for (int i = 0; i < kNumberOfSavedRegs; i++) {
925     Register reg = saved_regs[i];
926     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
927       pushq(reg);
928     }
929   }
930   // R12 to r15 are callee save on all platforms.
931   if (fp_mode == kSaveFPRegs) {
932     subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
933     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
934       XMMRegister reg = XMMRegister::from_code(i);
935       movsd(Operand(rsp, i * kDoubleSize), reg);
936     }
937   }
938 }
939 
940 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)941 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
942                                     Register exclusion1,
943                                     Register exclusion2,
944                                     Register exclusion3) {
945   if (fp_mode == kSaveFPRegs) {
946     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
947       XMMRegister reg = XMMRegister::from_code(i);
948       movsd(reg, Operand(rsp, i * kDoubleSize));
949     }
950     addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
951   }
952   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
953     Register reg = saved_regs[i];
954     if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
955       popq(reg);
956     }
957   }
958 }
959 
960 
Cvtlsi2sd(XMMRegister dst,Register src)961 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
962   xorps(dst, dst);
963   cvtlsi2sd(dst, src);
964 }
965 
966 
Cvtlsi2sd(XMMRegister dst,const Operand & src)967 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
968   xorps(dst, dst);
969   cvtlsi2sd(dst, src);
970 }
971 
972 
Load(Register dst,const Operand & src,Representation r)973 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
974   ASSERT(!r.IsDouble());
975   if (r.IsInteger8()) {
976     movsxbq(dst, src);
977   } else if (r.IsUInteger8()) {
978     movzxbl(dst, src);
979   } else if (r.IsInteger16()) {
980     movsxwq(dst, src);
981   } else if (r.IsUInteger16()) {
982     movzxwl(dst, src);
983   } else if (r.IsInteger32()) {
984     movl(dst, src);
985   } else {
986     movp(dst, src);
987   }
988 }
989 
990 
Store(const Operand & dst,Register src,Representation r)991 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
992   ASSERT(!r.IsDouble());
993   if (r.IsInteger8() || r.IsUInteger8()) {
994     movb(dst, src);
995   } else if (r.IsInteger16() || r.IsUInteger16()) {
996     movw(dst, src);
997   } else if (r.IsInteger32()) {
998     movl(dst, src);
999   } else {
1000     if (r.IsHeapObject()) {
1001       AssertNotSmi(src);
1002     } else if (r.IsSmi()) {
1003       AssertSmi(src);
1004     }
1005     movp(dst, src);
1006   }
1007 }
1008 
1009 
Set(Register dst,int64_t x)1010 void MacroAssembler::Set(Register dst, int64_t x) {
1011   if (x == 0) {
1012     xorl(dst, dst);
1013   } else if (is_uint32(x)) {
1014     movl(dst, Immediate(static_cast<uint32_t>(x)));
1015   } else if (is_int32(x)) {
1016     movq(dst, Immediate(static_cast<int32_t>(x)));
1017   } else {
1018     movq(dst, x);
1019   }
1020 }
1021 
1022 
Set(const Operand & dst,intptr_t x)1023 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1024   if (kPointerSize == kInt64Size) {
1025     if (is_int32(x)) {
1026       movp(dst, Immediate(static_cast<int32_t>(x)));
1027     } else {
1028       Set(kScratchRegister, x);
1029       movp(dst, kScratchRegister);
1030     }
1031   } else {
1032     movp(dst, Immediate(static_cast<int32_t>(x)));
1033   }
1034 }
1035 
1036 
1037 // ----------------------------------------------------------------------------
1038 // Smi tagging, untagging and tag detection.
1039 
IsUnsafeInt(const int32_t x)1040 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1041   static const int kMaxBits = 17;
1042   return !is_intn(x, kMaxBits);
1043 }
1044 
1045 
SafeMove(Register dst,Smi * src)1046 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1047   ASSERT(!dst.is(kScratchRegister));
1048   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1049     if (SmiValuesAre32Bits()) {
1050       // JIT cookie can be converted to Smi.
1051       Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1052       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1053       xorp(dst, kScratchRegister);
1054     } else {
1055       ASSERT(SmiValuesAre31Bits());
1056       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1057       movp(dst, Immediate(value ^ jit_cookie()));
1058       xorp(dst, Immediate(jit_cookie()));
1059     }
1060   } else {
1061     Move(dst, src);
1062   }
1063 }
1064 
1065 
SafePush(Smi * src)1066 void MacroAssembler::SafePush(Smi* src) {
1067   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1068     if (SmiValuesAre32Bits()) {
1069       // JIT cookie can be converted to Smi.
1070       Push(Smi::FromInt(src->value() ^ jit_cookie()));
1071       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1072       xorp(Operand(rsp, 0), kScratchRegister);
1073     } else {
1074       ASSERT(SmiValuesAre31Bits());
1075       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1076       Push(Immediate(value ^ jit_cookie()));
1077       xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1078     }
1079   } else {
1080     Push(src);
1081   }
1082 }
1083 
1084 
GetSmiConstant(Smi * source)1085 Register MacroAssembler::GetSmiConstant(Smi* source) {
1086   int value = source->value();
1087   if (value == 0) {
1088     xorl(kScratchRegister, kScratchRegister);
1089     return kScratchRegister;
1090   }
1091   if (value == 1) {
1092     return kSmiConstantRegister;
1093   }
1094   LoadSmiConstant(kScratchRegister, source);
1095   return kScratchRegister;
1096 }
1097 
1098 
LoadSmiConstant(Register dst,Smi * source)1099 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1100   if (emit_debug_code()) {
1101     Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1102          Assembler::RelocInfoNone());
1103     cmpq(dst, kSmiConstantRegister);
1104     Assert(equal, kUninitializedKSmiConstantRegister);
1105   }
1106   int value = source->value();
1107   if (value == 0) {
1108     xorl(dst, dst);
1109     return;
1110   }
1111   bool negative = value < 0;
1112   unsigned int uvalue = negative ? -value : value;
1113 
1114   switch (uvalue) {
1115     case 9:
1116       leap(dst,
1117            Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1118       break;
1119     case 8:
1120       xorl(dst, dst);
1121       leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1122       break;
1123     case 4:
1124       xorl(dst, dst);
1125       leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1126       break;
1127     case 5:
1128       leap(dst,
1129            Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1130       break;
1131     case 3:
1132       leap(dst,
1133            Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1134       break;
1135     case 2:
1136       leap(dst,
1137            Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1138       break;
1139     case 1:
1140       movp(dst, kSmiConstantRegister);
1141       break;
1142     case 0:
1143       UNREACHABLE();
1144       return;
1145     default:
1146       Move(dst, source, Assembler::RelocInfoNone());
1147       return;
1148   }
1149   if (negative) {
1150     negp(dst);
1151   }
1152 }
1153 
1154 
Integer32ToSmi(Register dst,Register src)1155 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1156   STATIC_ASSERT(kSmiTag == 0);
1157   if (!dst.is(src)) {
1158     movl(dst, src);
1159   }
1160   shlp(dst, Immediate(kSmiShift));
1161 }
1162 
1163 
Integer32ToSmiField(const Operand & dst,Register src)1164 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1165   if (emit_debug_code()) {
1166     testb(dst, Immediate(0x01));
1167     Label ok;
1168     j(zero, &ok, Label::kNear);
1169     Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1170     bind(&ok);
1171   }
1172 
1173   if (SmiValuesAre32Bits()) {
1174     ASSERT(kSmiShift % kBitsPerByte == 0);
1175     movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1176   } else {
1177     ASSERT(SmiValuesAre31Bits());
1178     Integer32ToSmi(kScratchRegister, src);
1179     movp(dst, kScratchRegister);
1180   }
1181 }
1182 
1183 
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1184 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1185                                                 Register src,
1186                                                 int constant) {
1187   if (dst.is(src)) {
1188     addl(dst, Immediate(constant));
1189   } else {
1190     leal(dst, Operand(src, constant));
1191   }
1192   shlp(dst, Immediate(kSmiShift));
1193 }
1194 
1195 
SmiToInteger32(Register dst,Register src)1196 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1197   STATIC_ASSERT(kSmiTag == 0);
1198   if (!dst.is(src)) {
1199     movp(dst, src);
1200   }
1201 
1202   if (SmiValuesAre32Bits()) {
1203     shrp(dst, Immediate(kSmiShift));
1204   } else {
1205     ASSERT(SmiValuesAre31Bits());
1206     sarl(dst, Immediate(kSmiShift));
1207   }
1208 }
1209 
1210 
SmiToInteger32(Register dst,const Operand & src)1211 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1212   if (SmiValuesAre32Bits()) {
1213     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1214   } else {
1215     ASSERT(SmiValuesAre31Bits());
1216     movl(dst, src);
1217     sarl(dst, Immediate(kSmiShift));
1218   }
1219 }
1220 
1221 
SmiToInteger64(Register dst,Register src)1222 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1223   STATIC_ASSERT(kSmiTag == 0);
1224   if (!dst.is(src)) {
1225     movp(dst, src);
1226   }
1227   sarp(dst, Immediate(kSmiShift));
1228   if (kPointerSize == kInt32Size) {
1229     // Sign extend to 64-bit.
1230     movsxlq(dst, dst);
1231   }
1232 }
1233 
1234 
SmiToInteger64(Register dst,const Operand & src)1235 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1236   if (SmiValuesAre32Bits()) {
1237     movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1238   } else {
1239     ASSERT(SmiValuesAre31Bits());
1240     movp(dst, src);
1241     SmiToInteger64(dst, dst);
1242   }
1243 }
1244 
1245 
SmiTest(Register src)1246 void MacroAssembler::SmiTest(Register src) {
1247   AssertSmi(src);
1248   testp(src, src);
1249 }
1250 
1251 
SmiCompare(Register smi1,Register smi2)1252 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1253   AssertSmi(smi1);
1254   AssertSmi(smi2);
1255   cmpp(smi1, smi2);
1256 }
1257 
1258 
SmiCompare(Register dst,Smi * src)1259 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1260   AssertSmi(dst);
1261   Cmp(dst, src);
1262 }
1263 
1264 
Cmp(Register dst,Smi * src)1265 void MacroAssembler::Cmp(Register dst, Smi* src) {
1266   ASSERT(!dst.is(kScratchRegister));
1267   if (src->value() == 0) {
1268     testp(dst, dst);
1269   } else {
1270     Register constant_reg = GetSmiConstant(src);
1271     cmpp(dst, constant_reg);
1272   }
1273 }
1274 
1275 
SmiCompare(Register dst,const Operand & src)1276 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1277   AssertSmi(dst);
1278   AssertSmi(src);
1279   cmpp(dst, src);
1280 }
1281 
1282 
SmiCompare(const Operand & dst,Register src)1283 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1284   AssertSmi(dst);
1285   AssertSmi(src);
1286   cmpp(dst, src);
1287 }
1288 
1289 
SmiCompare(const Operand & dst,Smi * src)1290 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1291   AssertSmi(dst);
1292   if (SmiValuesAre32Bits()) {
1293     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1294   } else {
1295     ASSERT(SmiValuesAre31Bits());
1296     cmpl(dst, Immediate(src));
1297   }
1298 }
1299 
1300 
Cmp(const Operand & dst,Smi * src)1301 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1302   // The Operand cannot use the smi register.
1303   Register smi_reg = GetSmiConstant(src);
1304   ASSERT(!dst.AddressUsesRegister(smi_reg));
1305   cmpp(dst, smi_reg);
1306 }
1307 
1308 
SmiCompareInteger32(const Operand & dst,Register src)1309 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1310   if (SmiValuesAre32Bits()) {
1311     cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1312   } else {
1313     ASSERT(SmiValuesAre31Bits());
1314     SmiToInteger32(kScratchRegister, dst);
1315     cmpl(kScratchRegister, src);
1316   }
1317 }
1318 
1319 
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1320 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1321                                                            Register src,
1322                                                            int power) {
1323   ASSERT(power >= 0);
1324   ASSERT(power < 64);
1325   if (power == 0) {
1326     SmiToInteger64(dst, src);
1327     return;
1328   }
1329   if (!dst.is(src)) {
1330     movp(dst, src);
1331   }
1332   if (power < kSmiShift) {
1333     sarp(dst, Immediate(kSmiShift - power));
1334   } else if (power > kSmiShift) {
1335     shlp(dst, Immediate(power - kSmiShift));
1336   }
1337 }
1338 
1339 
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1340 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1341                                                          Register src,
1342                                                          int power) {
1343   ASSERT((0 <= power) && (power < 32));
1344   if (dst.is(src)) {
1345     shrp(dst, Immediate(power + kSmiShift));
1346   } else {
1347     UNIMPLEMENTED();  // Not used.
1348   }
1349 }
1350 
1351 
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1352 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1353                                  Label* on_not_smis,
1354                                  Label::Distance near_jump) {
1355   if (dst.is(src1) || dst.is(src2)) {
1356     ASSERT(!src1.is(kScratchRegister));
1357     ASSERT(!src2.is(kScratchRegister));
1358     movp(kScratchRegister, src1);
1359     orp(kScratchRegister, src2);
1360     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1361     movp(dst, kScratchRegister);
1362   } else {
1363     movp(dst, src1);
1364     orp(dst, src2);
1365     JumpIfNotSmi(dst, on_not_smis, near_jump);
1366   }
1367 }
1368 
1369 
CheckSmi(Register src)1370 Condition MacroAssembler::CheckSmi(Register src) {
1371   STATIC_ASSERT(kSmiTag == 0);
1372   testb(src, Immediate(kSmiTagMask));
1373   return zero;
1374 }
1375 
1376 
CheckSmi(const Operand & src)1377 Condition MacroAssembler::CheckSmi(const Operand& src) {
1378   STATIC_ASSERT(kSmiTag == 0);
1379   testb(src, Immediate(kSmiTagMask));
1380   return zero;
1381 }
1382 
1383 
CheckNonNegativeSmi(Register src)1384 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1385   STATIC_ASSERT(kSmiTag == 0);
1386   // Test that both bits of the mask 0x8000000000000001 are zero.
1387   movp(kScratchRegister, src);
1388   rolp(kScratchRegister, Immediate(1));
1389   testb(kScratchRegister, Immediate(3));
1390   return zero;
1391 }
1392 
1393 
CheckBothSmi(Register first,Register second)1394 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1395   if (first.is(second)) {
1396     return CheckSmi(first);
1397   }
1398   STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1399   if (SmiValuesAre32Bits()) {
1400     leal(kScratchRegister, Operand(first, second, times_1, 0));
1401     testb(kScratchRegister, Immediate(0x03));
1402   } else {
1403     ASSERT(SmiValuesAre31Bits());
1404     movl(kScratchRegister, first);
1405     orl(kScratchRegister, second);
1406     testb(kScratchRegister, Immediate(kSmiTagMask));
1407   }
1408   return zero;
1409 }
1410 
1411 
CheckBothNonNegativeSmi(Register first,Register second)1412 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1413                                                   Register second) {
1414   if (first.is(second)) {
1415     return CheckNonNegativeSmi(first);
1416   }
1417   movp(kScratchRegister, first);
1418   orp(kScratchRegister, second);
1419   rolp(kScratchRegister, Immediate(1));
1420   testl(kScratchRegister, Immediate(3));
1421   return zero;
1422 }
1423 
1424 
CheckEitherSmi(Register first,Register second,Register scratch)1425 Condition MacroAssembler::CheckEitherSmi(Register first,
1426                                          Register second,
1427                                          Register scratch) {
1428   if (first.is(second)) {
1429     return CheckSmi(first);
1430   }
1431   if (scratch.is(second)) {
1432     andl(scratch, first);
1433   } else {
1434     if (!scratch.is(first)) {
1435       movl(scratch, first);
1436     }
1437     andl(scratch, second);
1438   }
1439   testb(scratch, Immediate(kSmiTagMask));
1440   return zero;
1441 }
1442 
1443 
CheckIsMinSmi(Register src)1444 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1445   ASSERT(!src.is(kScratchRegister));
1446   // If we overflow by subtracting one, it's the minimal smi value.
1447   cmpp(src, kSmiConstantRegister);
1448   return overflow;
1449 }
1450 
1451 
CheckInteger32ValidSmiValue(Register src)1452 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1453   if (SmiValuesAre32Bits()) {
1454     // A 32-bit integer value can always be converted to a smi.
1455     return always;
1456   } else {
1457     ASSERT(SmiValuesAre31Bits());
1458     cmpl(src, Immediate(0xc0000000));
1459     return positive;
1460   }
1461 }
1462 
1463 
CheckUInteger32ValidSmiValue(Register src)1464 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1465   if (SmiValuesAre32Bits()) {
1466     // An unsigned 32-bit integer value is valid as long as the high bit
1467     // is not set.
1468     testl(src, src);
1469     return positive;
1470   } else {
1471     ASSERT(SmiValuesAre31Bits());
1472     testl(src, Immediate(0xc0000000));
1473     return zero;
1474   }
1475 }
1476 
1477 
CheckSmiToIndicator(Register dst,Register src)1478 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1479   if (dst.is(src)) {
1480     andl(dst, Immediate(kSmiTagMask));
1481   } else {
1482     movl(dst, Immediate(kSmiTagMask));
1483     andl(dst, src);
1484   }
1485 }
1486 
1487 
CheckSmiToIndicator(Register dst,const Operand & src)1488 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1489   if (!(src.AddressUsesRegister(dst))) {
1490     movl(dst, Immediate(kSmiTagMask));
1491     andl(dst, src);
1492   } else {
1493     movl(dst, src);
1494     andl(dst, Immediate(kSmiTagMask));
1495   }
1496 }
1497 
1498 
JumpIfValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1499 void MacroAssembler::JumpIfValidSmiValue(Register src,
1500                                          Label* on_valid,
1501                                          Label::Distance near_jump) {
1502   Condition is_valid = CheckInteger32ValidSmiValue(src);
1503   j(is_valid, on_valid, near_jump);
1504 }
1505 
1506 
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1507 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1508                                             Label* on_invalid,
1509                                             Label::Distance near_jump) {
1510   Condition is_valid = CheckInteger32ValidSmiValue(src);
1511   j(NegateCondition(is_valid), on_invalid, near_jump);
1512 }
1513 
1514 
JumpIfUIntValidSmiValue(Register src,Label * on_valid,Label::Distance near_jump)1515 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1516                                              Label* on_valid,
1517                                              Label::Distance near_jump) {
1518   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1519   j(is_valid, on_valid, near_jump);
1520 }
1521 
1522 
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1523 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1524                                                 Label* on_invalid,
1525                                                 Label::Distance near_jump) {
1526   Condition is_valid = CheckUInteger32ValidSmiValue(src);
1527   j(NegateCondition(is_valid), on_invalid, near_jump);
1528 }
1529 
1530 
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1531 void MacroAssembler::JumpIfSmi(Register src,
1532                                Label* on_smi,
1533                                Label::Distance near_jump) {
1534   Condition smi = CheckSmi(src);
1535   j(smi, on_smi, near_jump);
1536 }
1537 
1538 
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1539 void MacroAssembler::JumpIfNotSmi(Register src,
1540                                   Label* on_not_smi,
1541                                   Label::Distance near_jump) {
1542   Condition smi = CheckSmi(src);
1543   j(NegateCondition(smi), on_not_smi, near_jump);
1544 }
1545 
1546 
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1547 void MacroAssembler::JumpUnlessNonNegativeSmi(
1548     Register src, Label* on_not_smi_or_negative,
1549     Label::Distance near_jump) {
1550   Condition non_negative_smi = CheckNonNegativeSmi(src);
1551   j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1552 }
1553 
1554 
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1555 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1556                                              Smi* constant,
1557                                              Label* on_equals,
1558                                              Label::Distance near_jump) {
1559   SmiCompare(src, constant);
1560   j(equal, on_equals, near_jump);
1561 }
1562 
1563 
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1564 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1565                                       Register src2,
1566                                       Label* on_not_both_smi,
1567                                       Label::Distance near_jump) {
1568   Condition both_smi = CheckBothSmi(src1, src2);
1569   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1570 }
1571 
1572 
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1573 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1574                                                   Register src2,
1575                                                   Label* on_not_both_smi,
1576                                                   Label::Distance near_jump) {
1577   Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1578   j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1579 }
1580 
1581 
SmiAddConstant(Register dst,Register src,Smi * constant)1582 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1583   if (constant->value() == 0) {
1584     if (!dst.is(src)) {
1585       movp(dst, src);
1586     }
1587     return;
1588   } else if (dst.is(src)) {
1589     ASSERT(!dst.is(kScratchRegister));
1590     switch (constant->value()) {
1591       case 1:
1592         addp(dst, kSmiConstantRegister);
1593         return;
1594       case 2:
1595         leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1596         return;
1597       case 4:
1598         leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1599         return;
1600       case 8:
1601         leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1602         return;
1603       default:
1604         Register constant_reg = GetSmiConstant(constant);
1605         addp(dst, constant_reg);
1606         return;
1607     }
1608   } else {
1609     switch (constant->value()) {
1610       case 1:
1611         leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1612         return;
1613       case 2:
1614         leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1615         return;
1616       case 4:
1617         leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1618         return;
1619       case 8:
1620         leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1621         return;
1622       default:
1623         LoadSmiConstant(dst, constant);
1624         addp(dst, src);
1625         return;
1626     }
1627   }
1628 }
1629 
1630 
SmiAddConstant(const Operand & dst,Smi * constant)1631 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1632   if (constant->value() != 0) {
1633     if (SmiValuesAre32Bits()) {
1634       addl(Operand(dst, kSmiShift / kBitsPerByte),
1635            Immediate(constant->value()));
1636     } else {
1637       ASSERT(SmiValuesAre31Bits());
1638       addp(dst, Immediate(constant));
1639     }
1640   }
1641 }
1642 
1643 
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1644 void MacroAssembler::SmiAddConstant(Register dst,
1645                                     Register src,
1646                                     Smi* constant,
1647                                     SmiOperationExecutionMode mode,
1648                                     Label* bailout_label,
1649                                     Label::Distance near_jump) {
1650   if (constant->value() == 0) {
1651     if (!dst.is(src)) {
1652       movp(dst, src);
1653     }
1654   } else if (dst.is(src)) {
1655     ASSERT(!dst.is(kScratchRegister));
1656     LoadSmiConstant(kScratchRegister, constant);
1657     addp(dst, kScratchRegister);
1658     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1659       j(no_overflow, bailout_label, near_jump);
1660       ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1661       subp(dst, kScratchRegister);
1662     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1663       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1664         Label done;
1665         j(no_overflow, &done, Label::kNear);
1666         subp(dst, kScratchRegister);
1667         jmp(bailout_label, near_jump);
1668         bind(&done);
1669       } else {
1670         // Bailout if overflow without reserving src.
1671         j(overflow, bailout_label, near_jump);
1672       }
1673     } else {
1674       CHECK(mode.IsEmpty());
1675     }
1676   } else {
1677     ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1678     ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1679     LoadSmiConstant(dst, constant);
1680     addp(dst, src);
1681     j(overflow, bailout_label, near_jump);
1682   }
1683 }
1684 
1685 
SmiSubConstant(Register dst,Register src,Smi * constant)1686 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1687   if (constant->value() == 0) {
1688     if (!dst.is(src)) {
1689       movp(dst, src);
1690     }
1691   } else if (dst.is(src)) {
1692     ASSERT(!dst.is(kScratchRegister));
1693     Register constant_reg = GetSmiConstant(constant);
1694     subp(dst, constant_reg);
1695   } else {
1696     if (constant->value() == Smi::kMinValue) {
1697       LoadSmiConstant(dst, constant);
1698       // Adding and subtracting the min-value gives the same result, it only
1699       // differs on the overflow bit, which we don't check here.
1700       addp(dst, src);
1701     } else {
1702       // Subtract by adding the negation.
1703       LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1704       addp(dst, src);
1705     }
1706   }
1707 }
1708 
1709 
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1710 void MacroAssembler::SmiSubConstant(Register dst,
1711                                     Register src,
1712                                     Smi* constant,
1713                                     SmiOperationExecutionMode mode,
1714                                     Label* bailout_label,
1715                                     Label::Distance near_jump) {
1716   if (constant->value() == 0) {
1717     if (!dst.is(src)) {
1718       movp(dst, src);
1719     }
1720   } else if (dst.is(src)) {
1721     ASSERT(!dst.is(kScratchRegister));
1722     LoadSmiConstant(kScratchRegister, constant);
1723     subp(dst, kScratchRegister);
1724     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1725       j(no_overflow, bailout_label, near_jump);
1726       ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1727       addp(dst, kScratchRegister);
1728     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1729       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1730         Label done;
1731         j(no_overflow, &done, Label::kNear);
1732         addp(dst, kScratchRegister);
1733         jmp(bailout_label, near_jump);
1734         bind(&done);
1735       } else {
1736         // Bailout if overflow without reserving src.
1737         j(overflow, bailout_label, near_jump);
1738       }
1739     } else {
1740       CHECK(mode.IsEmpty());
1741     }
1742   } else {
1743     ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1744     ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1745     if (constant->value() == Smi::kMinValue) {
1746       ASSERT(!dst.is(kScratchRegister));
1747       movp(dst, src);
1748       LoadSmiConstant(kScratchRegister, constant);
1749       subp(dst, kScratchRegister);
1750       j(overflow, bailout_label, near_jump);
1751     } else {
1752       // Subtract by adding the negation.
1753       LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1754       addp(dst, src);
1755       j(overflow, bailout_label, near_jump);
1756     }
1757   }
1758 }
1759 
1760 
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1761 void MacroAssembler::SmiNeg(Register dst,
1762                             Register src,
1763                             Label* on_smi_result,
1764                             Label::Distance near_jump) {
1765   if (dst.is(src)) {
1766     ASSERT(!dst.is(kScratchRegister));
1767     movp(kScratchRegister, src);
1768     negp(dst);  // Low 32 bits are retained as zero by negation.
1769     // Test if result is zero or Smi::kMinValue.
1770     cmpp(dst, kScratchRegister);
1771     j(not_equal, on_smi_result, near_jump);
1772     movp(src, kScratchRegister);
1773   } else {
1774     movp(dst, src);
1775     negp(dst);
1776     cmpp(dst, src);
1777     // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1778     j(not_equal, on_smi_result, near_jump);
1779   }
1780 }
1781 
1782 
1783 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1784 static void SmiAddHelper(MacroAssembler* masm,
1785                          Register dst,
1786                          Register src1,
1787                          T src2,
1788                          Label* on_not_smi_result,
1789                          Label::Distance near_jump) {
1790   if (dst.is(src1)) {
1791     Label done;
1792     masm->addp(dst, src2);
1793     masm->j(no_overflow, &done, Label::kNear);
1794     // Restore src1.
1795     masm->subp(dst, src2);
1796     masm->jmp(on_not_smi_result, near_jump);
1797     masm->bind(&done);
1798   } else {
1799     masm->movp(dst, src1);
1800     masm->addp(dst, src2);
1801     masm->j(overflow, on_not_smi_result, near_jump);
1802   }
1803 }
1804 
1805 
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1806 void MacroAssembler::SmiAdd(Register dst,
1807                             Register src1,
1808                             Register src2,
1809                             Label* on_not_smi_result,
1810                             Label::Distance near_jump) {
1811   ASSERT_NOT_NULL(on_not_smi_result);
1812   ASSERT(!dst.is(src2));
1813   SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1814 }
1815 
1816 
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1817 void MacroAssembler::SmiAdd(Register dst,
1818                             Register src1,
1819                             const Operand& src2,
1820                             Label* on_not_smi_result,
1821                             Label::Distance near_jump) {
1822   ASSERT_NOT_NULL(on_not_smi_result);
1823   ASSERT(!src2.AddressUsesRegister(dst));
1824   SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1825 }
1826 
1827 
SmiAdd(Register dst,Register src1,Register src2)1828 void MacroAssembler::SmiAdd(Register dst,
1829                             Register src1,
1830                             Register src2) {
1831   // No overflow checking. Use only when it's known that
1832   // overflowing is impossible.
1833   if (!dst.is(src1)) {
1834     if (emit_debug_code()) {
1835       movp(kScratchRegister, src1);
1836       addp(kScratchRegister, src2);
1837       Check(no_overflow, kSmiAdditionOverflow);
1838     }
1839     leap(dst, Operand(src1, src2, times_1, 0));
1840   } else {
1841     addp(dst, src2);
1842     Assert(no_overflow, kSmiAdditionOverflow);
1843   }
1844 }
1845 
1846 
1847 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1848 static void SmiSubHelper(MacroAssembler* masm,
1849                          Register dst,
1850                          Register src1,
1851                          T src2,
1852                          Label* on_not_smi_result,
1853                          Label::Distance near_jump) {
1854   if (dst.is(src1)) {
1855     Label done;
1856     masm->subp(dst, src2);
1857     masm->j(no_overflow, &done, Label::kNear);
1858     // Restore src1.
1859     masm->addp(dst, src2);
1860     masm->jmp(on_not_smi_result, near_jump);
1861     masm->bind(&done);
1862   } else {
1863     masm->movp(dst, src1);
1864     masm->subp(dst, src2);
1865     masm->j(overflow, on_not_smi_result, near_jump);
1866   }
1867 }
1868 
1869 
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1870 void MacroAssembler::SmiSub(Register dst,
1871                             Register src1,
1872                             Register src2,
1873                             Label* on_not_smi_result,
1874                             Label::Distance near_jump) {
1875   ASSERT_NOT_NULL(on_not_smi_result);
1876   ASSERT(!dst.is(src2));
1877   SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1878 }
1879 
1880 
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1881 void MacroAssembler::SmiSub(Register dst,
1882                             Register src1,
1883                             const Operand& src2,
1884                             Label* on_not_smi_result,
1885                             Label::Distance near_jump) {
1886   ASSERT_NOT_NULL(on_not_smi_result);
1887   ASSERT(!src2.AddressUsesRegister(dst));
1888   SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1889 }
1890 
1891 
1892 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1893 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1894                                    Register dst,
1895                                    Register src1,
1896                                    T src2) {
1897   // No overflow checking. Use only when it's known that
1898   // overflowing is impossible (e.g., subtracting two positive smis).
1899   if (!dst.is(src1)) {
1900     masm->movp(dst, src1);
1901   }
1902   masm->subp(dst, src2);
1903   masm->Assert(no_overflow, kSmiSubtractionOverflow);
1904 }
1905 
1906 
SmiSub(Register dst,Register src1,Register src2)1907 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1908   ASSERT(!dst.is(src2));
1909   SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1910 }
1911 
1912 
SmiSub(Register dst,Register src1,const Operand & src2)1913 void MacroAssembler::SmiSub(Register dst,
1914                             Register src1,
1915                             const Operand& src2) {
1916   SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1917 }
1918 
1919 
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1920 void MacroAssembler::SmiMul(Register dst,
1921                             Register src1,
1922                             Register src2,
1923                             Label* on_not_smi_result,
1924                             Label::Distance near_jump) {
1925   ASSERT(!dst.is(src2));
1926   ASSERT(!dst.is(kScratchRegister));
1927   ASSERT(!src1.is(kScratchRegister));
1928   ASSERT(!src2.is(kScratchRegister));
1929 
1930   if (dst.is(src1)) {
1931     Label failure, zero_correct_result;
1932     movp(kScratchRegister, src1);  // Create backup for later testing.
1933     SmiToInteger64(dst, src1);
1934     imulp(dst, src2);
1935     j(overflow, &failure, Label::kNear);
1936 
1937     // Check for negative zero result.  If product is zero, and one
1938     // argument is negative, go to slow case.
1939     Label correct_result;
1940     testp(dst, dst);
1941     j(not_zero, &correct_result, Label::kNear);
1942 
1943     movp(dst, kScratchRegister);
1944     xorp(dst, src2);
1945     // Result was positive zero.
1946     j(positive, &zero_correct_result, Label::kNear);
1947 
1948     bind(&failure);  // Reused failure exit, restores src1.
1949     movp(src1, kScratchRegister);
1950     jmp(on_not_smi_result, near_jump);
1951 
1952     bind(&zero_correct_result);
1953     Set(dst, 0);
1954 
1955     bind(&correct_result);
1956   } else {
1957     SmiToInteger64(dst, src1);
1958     imulp(dst, src2);
1959     j(overflow, on_not_smi_result, near_jump);
1960     // Check for negative zero result.  If product is zero, and one
1961     // argument is negative, go to slow case.
1962     Label correct_result;
1963     testp(dst, dst);
1964     j(not_zero, &correct_result, Label::kNear);
1965     // One of src1 and src2 is zero, the check whether the other is
1966     // negative.
1967     movp(kScratchRegister, src1);
1968     xorp(kScratchRegister, src2);
1969     j(negative, on_not_smi_result, near_jump);
1970     bind(&correct_result);
1971   }
1972 }
1973 
1974 
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1975 void MacroAssembler::SmiDiv(Register dst,
1976                             Register src1,
1977                             Register src2,
1978                             Label* on_not_smi_result,
1979                             Label::Distance near_jump) {
1980   ASSERT(!src1.is(kScratchRegister));
1981   ASSERT(!src2.is(kScratchRegister));
1982   ASSERT(!dst.is(kScratchRegister));
1983   ASSERT(!src2.is(rax));
1984   ASSERT(!src2.is(rdx));
1985   ASSERT(!src1.is(rdx));
1986 
1987   // Check for 0 divisor (result is +/-Infinity).
1988   testp(src2, src2);
1989   j(zero, on_not_smi_result, near_jump);
1990 
1991   if (src1.is(rax)) {
1992     movp(kScratchRegister, src1);
1993   }
1994   SmiToInteger32(rax, src1);
1995   // We need to rule out dividing Smi::kMinValue by -1, since that would
1996   // overflow in idiv and raise an exception.
1997   // We combine this with negative zero test (negative zero only happens
1998   // when dividing zero by a negative number).
1999 
2000   // We overshoot a little and go to slow case if we divide min-value
2001   // by any negative value, not just -1.
2002   Label safe_div;
2003   testl(rax, Immediate(~Smi::kMinValue));
2004   j(not_zero, &safe_div, Label::kNear);
2005   testp(src2, src2);
2006   if (src1.is(rax)) {
2007     j(positive, &safe_div, Label::kNear);
2008     movp(src1, kScratchRegister);
2009     jmp(on_not_smi_result, near_jump);
2010   } else {
2011     j(negative, on_not_smi_result, near_jump);
2012   }
2013   bind(&safe_div);
2014 
2015   SmiToInteger32(src2, src2);
2016   // Sign extend src1 into edx:eax.
2017   cdq();
2018   idivl(src2);
2019   Integer32ToSmi(src2, src2);
2020   // Check that the remainder is zero.
2021   testl(rdx, rdx);
2022   if (src1.is(rax)) {
2023     Label smi_result;
2024     j(zero, &smi_result, Label::kNear);
2025     movp(src1, kScratchRegister);
2026     jmp(on_not_smi_result, near_jump);
2027     bind(&smi_result);
2028   } else {
2029     j(not_zero, on_not_smi_result, near_jump);
2030   }
2031   if (!dst.is(src1) && src1.is(rax)) {
2032     movp(src1, kScratchRegister);
2033   }
2034   Integer32ToSmi(dst, rax);
2035 }
2036 
2037 
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2038 void MacroAssembler::SmiMod(Register dst,
2039                             Register src1,
2040                             Register src2,
2041                             Label* on_not_smi_result,
2042                             Label::Distance near_jump) {
2043   ASSERT(!dst.is(kScratchRegister));
2044   ASSERT(!src1.is(kScratchRegister));
2045   ASSERT(!src2.is(kScratchRegister));
2046   ASSERT(!src2.is(rax));
2047   ASSERT(!src2.is(rdx));
2048   ASSERT(!src1.is(rdx));
2049   ASSERT(!src1.is(src2));
2050 
2051   testp(src2, src2);
2052   j(zero, on_not_smi_result, near_jump);
2053 
2054   if (src1.is(rax)) {
2055     movp(kScratchRegister, src1);
2056   }
2057   SmiToInteger32(rax, src1);
2058   SmiToInteger32(src2, src2);
2059 
2060   // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2061   Label safe_div;
2062   cmpl(rax, Immediate(Smi::kMinValue));
2063   j(not_equal, &safe_div, Label::kNear);
2064   cmpl(src2, Immediate(-1));
2065   j(not_equal, &safe_div, Label::kNear);
2066   // Retag inputs and go slow case.
2067   Integer32ToSmi(src2, src2);
2068   if (src1.is(rax)) {
2069     movp(src1, kScratchRegister);
2070   }
2071   jmp(on_not_smi_result, near_jump);
2072   bind(&safe_div);
2073 
2074   // Sign extend eax into edx:eax.
2075   cdq();
2076   idivl(src2);
2077   // Restore smi tags on inputs.
2078   Integer32ToSmi(src2, src2);
2079   if (src1.is(rax)) {
2080     movp(src1, kScratchRegister);
2081   }
2082   // Check for a negative zero result.  If the result is zero, and the
2083   // dividend is negative, go slow to return a floating point negative zero.
2084   Label smi_result;
2085   testl(rdx, rdx);
2086   j(not_zero, &smi_result, Label::kNear);
2087   testp(src1, src1);
2088   j(negative, on_not_smi_result, near_jump);
2089   bind(&smi_result);
2090   Integer32ToSmi(dst, rdx);
2091 }
2092 
2093 
SmiNot(Register dst,Register src)2094 void MacroAssembler::SmiNot(Register dst, Register src) {
2095   ASSERT(!dst.is(kScratchRegister));
2096   ASSERT(!src.is(kScratchRegister));
2097   if (SmiValuesAre32Bits()) {
2098     // Set tag and padding bits before negating, so that they are zero
2099     // afterwards.
2100     movl(kScratchRegister, Immediate(~0));
2101   } else {
2102     ASSERT(SmiValuesAre31Bits());
2103     movl(kScratchRegister, Immediate(1));
2104   }
2105   if (dst.is(src)) {
2106     xorp(dst, kScratchRegister);
2107   } else {
2108     leap(dst, Operand(src, kScratchRegister, times_1, 0));
2109   }
2110   notp(dst);
2111 }
2112 
2113 
SmiAnd(Register dst,Register src1,Register src2)2114 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2115   ASSERT(!dst.is(src2));
2116   if (!dst.is(src1)) {
2117     movp(dst, src1);
2118   }
2119   andp(dst, src2);
2120 }
2121 
2122 
SmiAndConstant(Register dst,Register src,Smi * constant)2123 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2124   if (constant->value() == 0) {
2125     Set(dst, 0);
2126   } else if (dst.is(src)) {
2127     ASSERT(!dst.is(kScratchRegister));
2128     Register constant_reg = GetSmiConstant(constant);
2129     andp(dst, constant_reg);
2130   } else {
2131     LoadSmiConstant(dst, constant);
2132     andp(dst, src);
2133   }
2134 }
2135 
2136 
SmiOr(Register dst,Register src1,Register src2)2137 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2138   if (!dst.is(src1)) {
2139     ASSERT(!src1.is(src2));
2140     movp(dst, src1);
2141   }
2142   orp(dst, src2);
2143 }
2144 
2145 
SmiOrConstant(Register dst,Register src,Smi * constant)2146 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2147   if (dst.is(src)) {
2148     ASSERT(!dst.is(kScratchRegister));
2149     Register constant_reg = GetSmiConstant(constant);
2150     orp(dst, constant_reg);
2151   } else {
2152     LoadSmiConstant(dst, constant);
2153     orp(dst, src);
2154   }
2155 }
2156 
2157 
SmiXor(Register dst,Register src1,Register src2)2158 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2159   if (!dst.is(src1)) {
2160     ASSERT(!src1.is(src2));
2161     movp(dst, src1);
2162   }
2163   xorp(dst, src2);
2164 }
2165 
2166 
SmiXorConstant(Register dst,Register src,Smi * constant)2167 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2168   if (dst.is(src)) {
2169     ASSERT(!dst.is(kScratchRegister));
2170     Register constant_reg = GetSmiConstant(constant);
2171     xorp(dst, constant_reg);
2172   } else {
2173     LoadSmiConstant(dst, constant);
2174     xorp(dst, src);
2175   }
2176 }
2177 
2178 
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2179 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2180                                                      Register src,
2181                                                      int shift_value) {
2182   ASSERT(is_uint5(shift_value));
2183   if (shift_value > 0) {
2184     if (dst.is(src)) {
2185       sarp(dst, Immediate(shift_value + kSmiShift));
2186       shlp(dst, Immediate(kSmiShift));
2187     } else {
2188       UNIMPLEMENTED();  // Not used.
2189     }
2190   }
2191 }
2192 
2193 
SmiShiftLeftConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2194 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2195                                           Register src,
2196                                           int shift_value,
2197                                           Label* on_not_smi_result,
2198                                           Label::Distance near_jump) {
2199   if (SmiValuesAre32Bits()) {
2200     if (!dst.is(src)) {
2201       movp(dst, src);
2202     }
2203     if (shift_value > 0) {
2204       // Shift amount specified by lower 5 bits, not six as the shl opcode.
2205       shlq(dst, Immediate(shift_value & 0x1f));
2206     }
2207   } else {
2208     ASSERT(SmiValuesAre31Bits());
2209     if (dst.is(src)) {
2210       UNIMPLEMENTED();  // Not used.
2211     } else {
2212       SmiToInteger32(dst, src);
2213       shll(dst, Immediate(shift_value));
2214       JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2215       Integer32ToSmi(dst, dst);
2216     }
2217   }
2218 }
2219 
2220 
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2221 void MacroAssembler::SmiShiftLogicalRightConstant(
2222     Register dst, Register src, int shift_value,
2223     Label* on_not_smi_result, Label::Distance near_jump) {
2224   // Logic right shift interprets its result as an *unsigned* number.
2225   if (dst.is(src)) {
2226     UNIMPLEMENTED();  // Not used.
2227   } else {
2228     if (shift_value == 0) {
2229       testp(src, src);
2230       j(negative, on_not_smi_result, near_jump);
2231     }
2232     if (SmiValuesAre32Bits()) {
2233       movp(dst, src);
2234       shrp(dst, Immediate(shift_value + kSmiShift));
2235       shlp(dst, Immediate(kSmiShift));
2236     } else {
2237       ASSERT(SmiValuesAre31Bits());
2238       SmiToInteger32(dst, src);
2239       shrp(dst, Immediate(shift_value));
2240       JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2241       Integer32ToSmi(dst, dst);
2242     }
2243   }
2244 }
2245 
2246 
SmiShiftLeft(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2247 void MacroAssembler::SmiShiftLeft(Register dst,
2248                                   Register src1,
2249                                   Register src2,
2250                                   Label* on_not_smi_result,
2251                                   Label::Distance near_jump) {
2252   if (SmiValuesAre32Bits()) {
2253     ASSERT(!dst.is(rcx));
2254     if (!dst.is(src1)) {
2255       movp(dst, src1);
2256     }
2257     // Untag shift amount.
2258     SmiToInteger32(rcx, src2);
2259     // Shift amount specified by lower 5 bits, not six as the shl opcode.
2260     andp(rcx, Immediate(0x1f));
2261     shlq_cl(dst);
2262   } else {
2263     ASSERT(SmiValuesAre31Bits());
2264     ASSERT(!dst.is(kScratchRegister));
2265     ASSERT(!src1.is(kScratchRegister));
2266     ASSERT(!src2.is(kScratchRegister));
2267     ASSERT(!dst.is(src2));
2268     ASSERT(!dst.is(rcx));
2269 
2270     if (src1.is(rcx) || src2.is(rcx)) {
2271       movq(kScratchRegister, rcx);
2272     }
2273     if (dst.is(src1)) {
2274       UNIMPLEMENTED();  // Not used.
2275     } else {
2276       Label valid_result;
2277       SmiToInteger32(dst, src1);
2278       SmiToInteger32(rcx, src2);
2279       shll_cl(dst);
2280       JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2281       // As src1 or src2 could not be dst, we do not need to restore them for
2282       // clobbering dst.
2283       if (src1.is(rcx) || src2.is(rcx)) {
2284         if (src1.is(rcx)) {
2285           movq(src1, kScratchRegister);
2286         } else {
2287           movq(src2, kScratchRegister);
2288         }
2289       }
2290       jmp(on_not_smi_result, near_jump);
2291       bind(&valid_result);
2292       Integer32ToSmi(dst, dst);
2293     }
2294   }
2295 }
2296 
2297 
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2298 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2299                                           Register src1,
2300                                           Register src2,
2301                                           Label* on_not_smi_result,
2302                                           Label::Distance near_jump) {
2303   ASSERT(!dst.is(kScratchRegister));
2304   ASSERT(!src1.is(kScratchRegister));
2305   ASSERT(!src2.is(kScratchRegister));
2306   ASSERT(!dst.is(src2));
2307   ASSERT(!dst.is(rcx));
2308   if (src1.is(rcx) || src2.is(rcx)) {
2309     movq(kScratchRegister, rcx);
2310   }
2311   if (dst.is(src1)) {
2312     UNIMPLEMENTED();  // Not used.
2313   } else {
2314     Label valid_result;
2315     SmiToInteger32(dst, src1);
2316     SmiToInteger32(rcx, src2);
2317     shrl_cl(dst);
2318     JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2319     // As src1 or src2 could not be dst, we do not need to restore them for
2320     // clobbering dst.
2321     if (src1.is(rcx) || src2.is(rcx)) {
2322       if (src1.is(rcx)) {
2323         movq(src1, kScratchRegister);
2324       } else {
2325         movq(src2, kScratchRegister);
2326       }
2327      }
2328     jmp(on_not_smi_result, near_jump);
2329     bind(&valid_result);
2330     Integer32ToSmi(dst, dst);
2331   }
2332 }
2333 
2334 
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2335 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2336                                              Register src1,
2337                                              Register src2) {
2338   ASSERT(!dst.is(kScratchRegister));
2339   ASSERT(!src1.is(kScratchRegister));
2340   ASSERT(!src2.is(kScratchRegister));
2341   ASSERT(!dst.is(rcx));
2342 
2343   SmiToInteger32(rcx, src2);
2344   if (!dst.is(src1)) {
2345     movp(dst, src1);
2346   }
2347   SmiToInteger32(dst, dst);
2348   sarl_cl(dst);
2349   Integer32ToSmi(dst, dst);
2350 }
2351 
2352 
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2353 void MacroAssembler::SelectNonSmi(Register dst,
2354                                   Register src1,
2355                                   Register src2,
2356                                   Label* on_not_smis,
2357                                   Label::Distance near_jump) {
2358   ASSERT(!dst.is(kScratchRegister));
2359   ASSERT(!src1.is(kScratchRegister));
2360   ASSERT(!src2.is(kScratchRegister));
2361   ASSERT(!dst.is(src1));
2362   ASSERT(!dst.is(src2));
2363   // Both operands must not be smis.
2364 #ifdef DEBUG
2365   Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2366   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2367 #endif
2368   STATIC_ASSERT(kSmiTag == 0);
2369   ASSERT_EQ(0, Smi::FromInt(0));
2370   movl(kScratchRegister, Immediate(kSmiTagMask));
2371   andp(kScratchRegister, src1);
2372   testl(kScratchRegister, src2);
2373   // If non-zero then both are smis.
2374   j(not_zero, on_not_smis, near_jump);
2375 
2376   // Exactly one operand is a smi.
2377   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2378   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2379   subp(kScratchRegister, Immediate(1));
2380   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2381   movp(dst, src1);
2382   xorp(dst, src2);
2383   andp(dst, kScratchRegister);
2384   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2385   xorp(dst, src1);
2386   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2387 }
2388 
2389 
SmiToIndex(Register dst,Register src,int shift)2390 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2391                                     Register src,
2392                                     int shift) {
2393   if (SmiValuesAre32Bits()) {
2394     ASSERT(is_uint6(shift));
2395     // There is a possible optimization if shift is in the range 60-63, but that
2396     // will (and must) never happen.
2397     if (!dst.is(src)) {
2398       movp(dst, src);
2399     }
2400     if (shift < kSmiShift) {
2401       sarp(dst, Immediate(kSmiShift - shift));
2402     } else {
2403       shlp(dst, Immediate(shift - kSmiShift));
2404     }
2405     return SmiIndex(dst, times_1);
2406   } else {
2407     ASSERT(SmiValuesAre31Bits());
2408     ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2409     if (!dst.is(src)) {
2410       movp(dst, src);
2411     }
2412     // We have to sign extend the index register to 64-bit as the SMI might
2413     // be negative.
2414     movsxlq(dst, dst);
2415     if (shift == times_1) {
2416       sarq(dst, Immediate(kSmiShift));
2417       return SmiIndex(dst, times_1);
2418     }
2419     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2420   }
2421 }
2422 
2423 
SmiToNegativeIndex(Register dst,Register src,int shift)2424 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2425                                             Register src,
2426                                             int shift) {
2427   if (SmiValuesAre32Bits()) {
2428     // Register src holds a positive smi.
2429     ASSERT(is_uint6(shift));
2430     if (!dst.is(src)) {
2431       movp(dst, src);
2432     }
2433     negp(dst);
2434     if (shift < kSmiShift) {
2435       sarp(dst, Immediate(kSmiShift - shift));
2436     } else {
2437       shlp(dst, Immediate(shift - kSmiShift));
2438     }
2439     return SmiIndex(dst, times_1);
2440   } else {
2441     ASSERT(SmiValuesAre31Bits());
2442     ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2443     if (!dst.is(src)) {
2444       movp(dst, src);
2445     }
2446     negq(dst);
2447     if (shift == times_1) {
2448       sarq(dst, Immediate(kSmiShift));
2449       return SmiIndex(dst, times_1);
2450     }
2451     return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2452   }
2453 }
2454 
2455 
AddSmiField(Register dst,const Operand & src)2456 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2457   if (SmiValuesAre32Bits()) {
2458     ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2459     addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2460   } else {
2461     ASSERT(SmiValuesAre31Bits());
2462     SmiToInteger32(kScratchRegister, src);
2463     addl(dst, kScratchRegister);
2464   }
2465 }
2466 
2467 
Push(Smi * source)2468 void MacroAssembler::Push(Smi* source) {
2469   intptr_t smi = reinterpret_cast<intptr_t>(source);
2470   if (is_int32(smi)) {
2471     Push(Immediate(static_cast<int32_t>(smi)));
2472   } else {
2473     Register constant = GetSmiConstant(source);
2474     Push(constant);
2475   }
2476 }
2477 
2478 
PushRegisterAsTwoSmis(Register src,Register scratch)2479 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2480   ASSERT(!src.is(scratch));
2481   movp(scratch, src);
2482   // High bits.
2483   shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2484   shlp(src, Immediate(kSmiShift));
2485   Push(src);
2486   // Low bits.
2487   shlp(scratch, Immediate(kSmiShift));
2488   Push(scratch);
2489 }
2490 
2491 
PopRegisterAsTwoSmis(Register dst,Register scratch)2492 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2493   ASSERT(!dst.is(scratch));
2494   Pop(scratch);
2495   // Low bits.
2496   shrp(scratch, Immediate(kSmiShift));
2497   Pop(dst);
2498   shrp(dst, Immediate(kSmiShift));
2499   // High bits.
2500   shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2501   orp(dst, scratch);
2502 }
2503 
2504 
Test(const Operand & src,Smi * source)2505 void MacroAssembler::Test(const Operand& src, Smi* source) {
2506   if (SmiValuesAre32Bits()) {
2507     testl(Operand(src, kIntSize), Immediate(source->value()));
2508   } else {
2509     ASSERT(SmiValuesAre31Bits());
2510     testl(src, Immediate(source));
2511   }
2512 }
2513 
2514 
2515 // ----------------------------------------------------------------------------
2516 
2517 
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Label * not_found)2518 void MacroAssembler::LookupNumberStringCache(Register object,
2519                                              Register result,
2520                                              Register scratch1,
2521                                              Register scratch2,
2522                                              Label* not_found) {
2523   // Use of registers. Register result is used as a temporary.
2524   Register number_string_cache = result;
2525   Register mask = scratch1;
2526   Register scratch = scratch2;
2527 
2528   // Load the number string cache.
2529   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2530 
2531   // Make the hash mask from the length of the number string cache. It
2532   // contains two elements (number and string) for each cache entry.
2533   SmiToInteger32(
2534       mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2535   shrl(mask, Immediate(1));
2536   subp(mask, Immediate(1));  // Make mask.
2537 
2538   // Calculate the entry in the number string cache. The hash value in the
2539   // number string cache for smis is just the smi value, and the hash for
2540   // doubles is the xor of the upper and lower words. See
2541   // Heap::GetNumberStringCache.
2542   Label is_smi;
2543   Label load_result_from_cache;
2544   JumpIfSmi(object, &is_smi);
2545   CheckMap(object,
2546            isolate()->factory()->heap_number_map(),
2547            not_found,
2548            DONT_DO_SMI_CHECK);
2549 
2550   STATIC_ASSERT(8 == kDoubleSize);
2551   movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2552   xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2553   andp(scratch, mask);
2554   // Each entry in string cache consists of two pointer sized fields,
2555   // but times_twice_pointer_size (multiplication by 16) scale factor
2556   // is not supported by addrmode on x64 platform.
2557   // So we have to premultiply entry index before lookup.
2558   shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2559 
2560   Register index = scratch;
2561   Register probe = mask;
2562   movp(probe,
2563        FieldOperand(number_string_cache,
2564                     index,
2565                     times_1,
2566                     FixedArray::kHeaderSize));
2567   JumpIfSmi(probe, not_found);
2568   movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2569   ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2570   j(parity_even, not_found);  // Bail out if NaN is involved.
2571   j(not_equal, not_found);  // The cache did not contain this value.
2572   jmp(&load_result_from_cache);
2573 
2574   bind(&is_smi);
2575   SmiToInteger32(scratch, object);
2576   andp(scratch, mask);
2577   // Each entry in string cache consists of two pointer sized fields,
2578   // but times_twice_pointer_size (multiplication by 16) scale factor
2579   // is not supported by addrmode on x64 platform.
2580   // So we have to premultiply entry index before lookup.
2581   shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2582 
2583   // Check if the entry is the smi we are looking for.
2584   cmpp(object,
2585        FieldOperand(number_string_cache,
2586                     index,
2587                     times_1,
2588                     FixedArray::kHeaderSize));
2589   j(not_equal, not_found);
2590 
2591   // Get the result from the cache.
2592   bind(&load_result_from_cache);
2593   movp(result,
2594        FieldOperand(number_string_cache,
2595                     index,
2596                     times_1,
2597                     FixedArray::kHeaderSize + kPointerSize));
2598   IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2599 }
2600 
2601 
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2602 void MacroAssembler::JumpIfNotString(Register object,
2603                                      Register object_map,
2604                                      Label* not_string,
2605                                      Label::Distance near_jump) {
2606   Condition is_smi = CheckSmi(object);
2607   j(is_smi, not_string, near_jump);
2608   CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2609   j(above_equal, not_string, near_jump);
2610 }
2611 
2612 
JumpIfNotBothSequentialAsciiStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2613 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2614     Register first_object,
2615     Register second_object,
2616     Register scratch1,
2617     Register scratch2,
2618     Label* on_fail,
2619     Label::Distance near_jump) {
2620   // Check that both objects are not smis.
2621   Condition either_smi = CheckEitherSmi(first_object, second_object);
2622   j(either_smi, on_fail, near_jump);
2623 
2624   // Load instance type for both strings.
2625   movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2626   movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2627   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2628   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2629 
2630   // Check that both are flat ASCII strings.
2631   ASSERT(kNotStringTag != 0);
2632   const int kFlatAsciiStringMask =
2633       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2634   const int kFlatAsciiStringTag =
2635       kStringTag | kOneByteStringTag | kSeqStringTag;
2636 
2637   andl(scratch1, Immediate(kFlatAsciiStringMask));
2638   andl(scratch2, Immediate(kFlatAsciiStringMask));
2639   // Interleave the bits to check both scratch1 and scratch2 in one test.
2640   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2641   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2642   cmpl(scratch1,
2643        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2644   j(not_equal, on_fail, near_jump);
2645 }
2646 
2647 
JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,Register scratch,Label * failure,Label::Distance near_jump)2648 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2649     Register instance_type,
2650     Register scratch,
2651     Label* failure,
2652     Label::Distance near_jump) {
2653   if (!scratch.is(instance_type)) {
2654     movl(scratch, instance_type);
2655   }
2656 
2657   const int kFlatAsciiStringMask =
2658       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2659 
2660   andl(scratch, Immediate(kFlatAsciiStringMask));
2661   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2662   j(not_equal, failure, near_jump);
2663 }
2664 
2665 
JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2666 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2667     Register first_object_instance_type,
2668     Register second_object_instance_type,
2669     Register scratch1,
2670     Register scratch2,
2671     Label* on_fail,
2672     Label::Distance near_jump) {
2673   // Load instance type for both strings.
2674   movp(scratch1, first_object_instance_type);
2675   movp(scratch2, second_object_instance_type);
2676 
2677   // Check that both are flat ASCII strings.
2678   ASSERT(kNotStringTag != 0);
2679   const int kFlatAsciiStringMask =
2680       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2681   const int kFlatAsciiStringTag =
2682       kStringTag | kOneByteStringTag | kSeqStringTag;
2683 
2684   andl(scratch1, Immediate(kFlatAsciiStringMask));
2685   andl(scratch2, Immediate(kFlatAsciiStringMask));
2686   // Interleave the bits to check both scratch1 and scratch2 in one test.
2687   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2688   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2689   cmpl(scratch1,
2690        Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2691   j(not_equal, on_fail, near_jump);
2692 }
2693 
2694 
2695 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2696 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2697                                       T operand_or_register,
2698                                       Label* not_unique_name,
2699                                       Label::Distance distance) {
2700   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2701   Label succeed;
2702   masm->testb(operand_or_register,
2703               Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2704   masm->j(zero, &succeed, Label::kNear);
2705   masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2706   masm->j(not_equal, not_unique_name, distance);
2707 
2708   masm->bind(&succeed);
2709 }
2710 
2711 
JumpIfNotUniqueName(Operand operand,Label * not_unique_name,Label::Distance distance)2712 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2713                                          Label* not_unique_name,
2714                                          Label::Distance distance) {
2715   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2716 }
2717 
2718 
JumpIfNotUniqueName(Register reg,Label * not_unique_name,Label::Distance distance)2719 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2720                                          Label* not_unique_name,
2721                                          Label::Distance distance) {
2722   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2723 }
2724 
2725 
Move(Register dst,Register src)2726 void MacroAssembler::Move(Register dst, Register src) {
2727   if (!dst.is(src)) {
2728     movp(dst, src);
2729   }
2730 }
2731 
2732 
Move(Register dst,Handle<Object> source)2733 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2734   AllowDeferredHandleDereference smi_check;
2735   if (source->IsSmi()) {
2736     Move(dst, Smi::cast(*source));
2737   } else {
2738     MoveHeapObject(dst, source);
2739   }
2740 }
2741 
2742 
Move(const Operand & dst,Handle<Object> source)2743 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2744   AllowDeferredHandleDereference smi_check;
2745   if (source->IsSmi()) {
2746     Move(dst, Smi::cast(*source));
2747   } else {
2748     MoveHeapObject(kScratchRegister, source);
2749     movp(dst, kScratchRegister);
2750   }
2751 }
2752 
2753 
Cmp(Register dst,Handle<Object> source)2754 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2755   AllowDeferredHandleDereference smi_check;
2756   if (source->IsSmi()) {
2757     Cmp(dst, Smi::cast(*source));
2758   } else {
2759     MoveHeapObject(kScratchRegister, source);
2760     cmpp(dst, kScratchRegister);
2761   }
2762 }
2763 
2764 
Cmp(const Operand & dst,Handle<Object> source)2765 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2766   AllowDeferredHandleDereference smi_check;
2767   if (source->IsSmi()) {
2768     Cmp(dst, Smi::cast(*source));
2769   } else {
2770     MoveHeapObject(kScratchRegister, source);
2771     cmpp(dst, kScratchRegister);
2772   }
2773 }
2774 
2775 
Push(Handle<Object> source)2776 void MacroAssembler::Push(Handle<Object> source) {
2777   AllowDeferredHandleDereference smi_check;
2778   if (source->IsSmi()) {
2779     Push(Smi::cast(*source));
2780   } else {
2781     MoveHeapObject(kScratchRegister, source);
2782     Push(kScratchRegister);
2783   }
2784 }
2785 
2786 
MoveHeapObject(Register result,Handle<Object> object)2787 void MacroAssembler::MoveHeapObject(Register result,
2788                                     Handle<Object> object) {
2789   AllowDeferredHandleDereference using_raw_address;
2790   ASSERT(object->IsHeapObject());
2791   if (isolate()->heap()->InNewSpace(*object)) {
2792     Handle<Cell> cell = isolate()->factory()->NewCell(object);
2793     Move(result, cell, RelocInfo::CELL);
2794     movp(result, Operand(result, 0));
2795   } else {
2796     Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2797   }
2798 }
2799 
2800 
LoadGlobalCell(Register dst,Handle<Cell> cell)2801 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2802   if (dst.is(rax)) {
2803     AllowDeferredHandleDereference embedding_raw_address;
2804     load_rax(cell.location(), RelocInfo::CELL);
2805   } else {
2806     Move(dst, cell, RelocInfo::CELL);
2807     movp(dst, Operand(dst, 0));
2808   }
2809 }
2810 
2811 
Drop(int stack_elements)2812 void MacroAssembler::Drop(int stack_elements) {
2813   if (stack_elements > 0) {
2814     addp(rsp, Immediate(stack_elements * kPointerSize));
2815   }
2816 }
2817 
2818 
DropUnderReturnAddress(int stack_elements,Register scratch)2819 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2820                                             Register scratch) {
2821   ASSERT(stack_elements > 0);
2822   if (kPointerSize == kInt64Size && stack_elements == 1) {
2823     popq(MemOperand(rsp, 0));
2824     return;
2825   }
2826 
2827   PopReturnAddressTo(scratch);
2828   Drop(stack_elements);
2829   PushReturnAddressFrom(scratch);
2830 }
2831 
2832 
Push(Register src)2833 void MacroAssembler::Push(Register src) {
2834   if (kPointerSize == kInt64Size) {
2835     pushq(src);
2836   } else {
2837     // x32 uses 64-bit push for rbp in the prologue.
2838     ASSERT(src.code() != rbp.code());
2839     leal(rsp, Operand(rsp, -4));
2840     movp(Operand(rsp, 0), src);
2841   }
2842 }
2843 
2844 
Push(const Operand & src)2845 void MacroAssembler::Push(const Operand& src) {
2846   if (kPointerSize == kInt64Size) {
2847     pushq(src);
2848   } else {
2849     movp(kScratchRegister, src);
2850     leal(rsp, Operand(rsp, -4));
2851     movp(Operand(rsp, 0), kScratchRegister);
2852   }
2853 }
2854 
2855 
PushQuad(const Operand & src)2856 void MacroAssembler::PushQuad(const Operand& src) {
2857   if (kPointerSize == kInt64Size) {
2858     pushq(src);
2859   } else {
2860     movp(kScratchRegister, src);
2861     pushq(kScratchRegister);
2862   }
2863 }
2864 
2865 
Push(Immediate value)2866 void MacroAssembler::Push(Immediate value) {
2867   if (kPointerSize == kInt64Size) {
2868     pushq(value);
2869   } else {
2870     leal(rsp, Operand(rsp, -4));
2871     movp(Operand(rsp, 0), value);
2872   }
2873 }
2874 
2875 
PushImm32(int32_t imm32)2876 void MacroAssembler::PushImm32(int32_t imm32) {
2877   if (kPointerSize == kInt64Size) {
2878     pushq_imm32(imm32);
2879   } else {
2880     leal(rsp, Operand(rsp, -4));
2881     movp(Operand(rsp, 0), Immediate(imm32));
2882   }
2883 }
2884 
2885 
Pop(Register dst)2886 void MacroAssembler::Pop(Register dst) {
2887   if (kPointerSize == kInt64Size) {
2888     popq(dst);
2889   } else {
2890     // x32 uses 64-bit pop for rbp in the epilogue.
2891     ASSERT(dst.code() != rbp.code());
2892     movp(dst, Operand(rsp, 0));
2893     leal(rsp, Operand(rsp, 4));
2894   }
2895 }
2896 
2897 
Pop(const Operand & dst)2898 void MacroAssembler::Pop(const Operand& dst) {
2899   if (kPointerSize == kInt64Size) {
2900     popq(dst);
2901   } else {
2902     Register scratch = dst.AddressUsesRegister(kScratchRegister)
2903         ? kSmiConstantRegister : kScratchRegister;
2904     movp(scratch, Operand(rsp, 0));
2905     movp(dst, scratch);
2906     leal(rsp, Operand(rsp, 4));
2907     if (scratch.is(kSmiConstantRegister)) {
2908       // Restore kSmiConstantRegister.
2909       movp(kSmiConstantRegister,
2910            reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2911            Assembler::RelocInfoNone());
2912     }
2913   }
2914 }
2915 
2916 
PopQuad(const Operand & dst)2917 void MacroAssembler::PopQuad(const Operand& dst) {
2918   if (kPointerSize == kInt64Size) {
2919     popq(dst);
2920   } else {
2921     popq(kScratchRegister);
2922     movp(dst, kScratchRegister);
2923   }
2924 }
2925 
2926 
LoadSharedFunctionInfoSpecialField(Register dst,Register base,int offset)2927 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2928                                                         Register base,
2929                                                         int offset) {
2930   ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
2931          offset <= SharedFunctionInfo::kSize &&
2932          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2933   if (kPointerSize == kInt64Size) {
2934     movsxlq(dst, FieldOperand(base, offset));
2935   } else {
2936     movp(dst, FieldOperand(base, offset));
2937     SmiToInteger32(dst, dst);
2938   }
2939 }
2940 
2941 
TestBitSharedFunctionInfoSpecialField(Register base,int offset,int bits)2942 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2943                                                            int offset,
2944                                                            int bits) {
2945   ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
2946          offset <= SharedFunctionInfo::kSize &&
2947          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2948   if (kPointerSize == kInt32Size) {
2949     // On x32, this field is represented by SMI.
2950     bits += kSmiShift;
2951   }
2952   int byte_offset = bits / kBitsPerByte;
2953   int bit_in_byte = bits & (kBitsPerByte - 1);
2954   testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2955 }
2956 
2957 
Jump(ExternalReference ext)2958 void MacroAssembler::Jump(ExternalReference ext) {
2959   LoadAddress(kScratchRegister, ext);
2960   jmp(kScratchRegister);
2961 }
2962 
2963 
Jump(const Operand & op)2964 void MacroAssembler::Jump(const Operand& op) {
2965   if (kPointerSize == kInt64Size) {
2966     jmp(op);
2967   } else {
2968     movp(kScratchRegister, op);
2969     jmp(kScratchRegister);
2970   }
2971 }
2972 
2973 
Jump(Address destination,RelocInfo::Mode rmode)2974 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2975   Move(kScratchRegister, destination, rmode);
2976   jmp(kScratchRegister);
2977 }
2978 
2979 
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)2980 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2981   // TODO(X64): Inline this
2982   jmp(code_object, rmode);
2983 }
2984 
2985 
CallSize(ExternalReference ext)2986 int MacroAssembler::CallSize(ExternalReference ext) {
2987   // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2988   return LoadAddressSize(ext) +
2989          Assembler::kCallScratchRegisterInstructionLength;
2990 }
2991 
2992 
Call(ExternalReference ext)2993 void MacroAssembler::Call(ExternalReference ext) {
2994 #ifdef DEBUG
2995   int end_position = pc_offset() + CallSize(ext);
2996 #endif
2997   LoadAddress(kScratchRegister, ext);
2998   call(kScratchRegister);
2999 #ifdef DEBUG
3000   CHECK_EQ(end_position, pc_offset());
3001 #endif
3002 }
3003 
3004 
Call(const Operand & op)3005 void MacroAssembler::Call(const Operand& op) {
3006   if (kPointerSize == kInt64Size) {
3007     call(op);
3008   } else {
3009     movp(kScratchRegister, op);
3010     call(kScratchRegister);
3011   }
3012 }
3013 
3014 
Call(Address destination,RelocInfo::Mode rmode)3015 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3016 #ifdef DEBUG
3017   int end_position = pc_offset() + CallSize(destination);
3018 #endif
3019   Move(kScratchRegister, destination, rmode);
3020   call(kScratchRegister);
3021 #ifdef DEBUG
3022   CHECK_EQ(pc_offset(), end_position);
3023 #endif
3024 }
3025 
3026 
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)3027 void MacroAssembler::Call(Handle<Code> code_object,
3028                           RelocInfo::Mode rmode,
3029                           TypeFeedbackId ast_id) {
3030 #ifdef DEBUG
3031   int end_position = pc_offset() + CallSize(code_object);
3032 #endif
3033   ASSERT(RelocInfo::IsCodeTarget(rmode) ||
3034       rmode == RelocInfo::CODE_AGE_SEQUENCE);
3035   call(code_object, rmode, ast_id);
3036 #ifdef DEBUG
3037   CHECK_EQ(end_position, pc_offset());
3038 #endif
3039 }
3040 
3041 
Pushad()3042 void MacroAssembler::Pushad() {
3043   Push(rax);
3044   Push(rcx);
3045   Push(rdx);
3046   Push(rbx);
3047   // Not pushing rsp or rbp.
3048   Push(rsi);
3049   Push(rdi);
3050   Push(r8);
3051   Push(r9);
3052   // r10 is kScratchRegister.
3053   Push(r11);
3054   // r12 is kSmiConstantRegister.
3055   // r13 is kRootRegister.
3056   Push(r14);
3057   Push(r15);
3058   STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3059   // Use lea for symmetry with Popad.
3060   int sp_delta =
3061       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3062   leap(rsp, Operand(rsp, -sp_delta));
3063 }
3064 
3065 
Popad()3066 void MacroAssembler::Popad() {
3067   // Popad must not change the flags, so use lea instead of addq.
3068   int sp_delta =
3069       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3070   leap(rsp, Operand(rsp, sp_delta));
3071   Pop(r15);
3072   Pop(r14);
3073   Pop(r11);
3074   Pop(r9);
3075   Pop(r8);
3076   Pop(rdi);
3077   Pop(rsi);
3078   Pop(rbx);
3079   Pop(rdx);
3080   Pop(rcx);
3081   Pop(rax);
3082 }
3083 
3084 
Dropad()3085 void MacroAssembler::Dropad() {
3086   addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3087 }
3088 
3089 
3090 // Order general registers are pushed by Pushad:
3091 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3092 const int
3093 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3094     0,
3095     1,
3096     2,
3097     3,
3098     -1,
3099     -1,
3100     4,
3101     5,
3102     6,
3103     7,
3104     -1,
3105     8,
3106     -1,
3107     -1,
3108     9,
3109     10
3110 };
3111 
3112 
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)3113 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3114                                                   const Immediate& imm) {
3115   movp(SafepointRegisterSlot(dst), imm);
3116 }
3117 
3118 
StoreToSafepointRegisterSlot(Register dst,Register src)3119 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3120   movp(SafepointRegisterSlot(dst), src);
3121 }
3122 
3123 
LoadFromSafepointRegisterSlot(Register dst,Register src)3124 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3125   movp(dst, SafepointRegisterSlot(src));
3126 }
3127 
3128 
SafepointRegisterSlot(Register reg)3129 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3130   return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3131 }
3132 
3133 
PushTryHandler(StackHandler::Kind kind,int handler_index)3134 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3135                                     int handler_index) {
3136   // Adjust this code if not the case.
3137   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3138                                                 kFPOnStackSize);
3139   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3140   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3141   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3142   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3143   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3144 
3145   // We will build up the handler from the bottom by pushing on the stack.
3146   // First push the frame pointer and context.
3147   if (kind == StackHandler::JS_ENTRY) {
3148     // The frame pointer does not point to a JS frame so we save NULL for
3149     // rbp. We expect the code throwing an exception to check rbp before
3150     // dereferencing it to restore the context.
3151     pushq(Immediate(0));  // NULL frame pointer.
3152     Push(Smi::FromInt(0));  // No context.
3153   } else {
3154     pushq(rbp);
3155     Push(rsi);
3156   }
3157 
3158   // Push the state and the code object.
3159   unsigned state =
3160       StackHandler::IndexField::encode(handler_index) |
3161       StackHandler::KindField::encode(kind);
3162   Push(Immediate(state));
3163   Push(CodeObject());
3164 
3165   // Link the current handler as the next handler.
3166   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3167   Push(ExternalOperand(handler_address));
3168   // Set this new handler as the current one.
3169   movp(ExternalOperand(handler_address), rsp);
3170 }
3171 
3172 
PopTryHandler()3173 void MacroAssembler::PopTryHandler() {
3174   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3175   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3176   Pop(ExternalOperand(handler_address));
3177   addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3178 }
3179 
3180 
JumpToHandlerEntry()3181 void MacroAssembler::JumpToHandlerEntry() {
3182   // Compute the handler entry address and jump to it.  The handler table is
3183   // a fixed array of (smi-tagged) code offsets.
3184   // rax = exception, rdi = code object, rdx = state.
3185   movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3186   shrp(rdx, Immediate(StackHandler::kKindWidth));
3187   movp(rdx,
3188        FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3189   SmiToInteger64(rdx, rdx);
3190   leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3191   jmp(rdi);
3192 }
3193 
3194 
Throw(Register value)3195 void MacroAssembler::Throw(Register value) {
3196   // Adjust this code if not the case.
3197   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3198                                                 kFPOnStackSize);
3199   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3200   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3201   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3202   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3203   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3204 
3205   // The exception is expected in rax.
3206   if (!value.is(rax)) {
3207     movp(rax, value);
3208   }
3209   // Drop the stack pointer to the top of the top handler.
3210   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3211   movp(rsp, ExternalOperand(handler_address));
3212   // Restore the next handler.
3213   Pop(ExternalOperand(handler_address));
3214 
3215   // Remove the code object and state, compute the handler address in rdi.
3216   Pop(rdi);  // Code object.
3217   Pop(rdx);  // Offset and state.
3218 
3219   // Restore the context and frame pointer.
3220   Pop(rsi);  // Context.
3221   popq(rbp);  // Frame pointer.
3222 
3223   // If the handler is a JS frame, restore the context to the frame.
3224   // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3225   // rbp or rsi.
3226   Label skip;
3227   testp(rsi, rsi);
3228   j(zero, &skip, Label::kNear);
3229   movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3230   bind(&skip);
3231 
3232   JumpToHandlerEntry();
3233 }
3234 
3235 
ThrowUncatchable(Register value)3236 void MacroAssembler::ThrowUncatchable(Register value) {
3237   // Adjust this code if not the case.
3238   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3239                                                 kFPOnStackSize);
3240   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3241   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3242   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3243   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3244   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3245 
3246   // The exception is expected in rax.
3247   if (!value.is(rax)) {
3248     movp(rax, value);
3249   }
3250   // Drop the stack pointer to the top of the top stack handler.
3251   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3252   Load(rsp, handler_address);
3253 
3254   // Unwind the handlers until the top ENTRY handler is found.
3255   Label fetch_next, check_kind;
3256   jmp(&check_kind, Label::kNear);
3257   bind(&fetch_next);
3258   movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3259 
3260   bind(&check_kind);
3261   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3262   testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3263         Immediate(StackHandler::KindField::kMask));
3264   j(not_zero, &fetch_next);
3265 
3266   // Set the top handler address to next handler past the top ENTRY handler.
3267   Pop(ExternalOperand(handler_address));
3268 
3269   // Remove the code object and state, compute the handler address in rdi.
3270   Pop(rdi);  // Code object.
3271   Pop(rdx);  // Offset and state.
3272 
3273   // Clear the context pointer and frame pointer (0 was saved in the handler).
3274   Pop(rsi);
3275   popq(rbp);
3276 
3277   JumpToHandlerEntry();
3278 }
3279 
3280 
Ret()3281 void MacroAssembler::Ret() {
3282   ret(0);
3283 }
3284 
3285 
Ret(int bytes_dropped,Register scratch)3286 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3287   if (is_uint16(bytes_dropped)) {
3288     ret(bytes_dropped);
3289   } else {
3290     PopReturnAddressTo(scratch);
3291     addp(rsp, Immediate(bytes_dropped));
3292     PushReturnAddressFrom(scratch);
3293     ret(0);
3294   }
3295 }
3296 
3297 
FCmp()3298 void MacroAssembler::FCmp() {
3299   fucomip();
3300   fstp(0);
3301 }
3302 
3303 
CmpObjectType(Register heap_object,InstanceType type,Register map)3304 void MacroAssembler::CmpObjectType(Register heap_object,
3305                                    InstanceType type,
3306                                    Register map) {
3307   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3308   CmpInstanceType(map, type);
3309 }
3310 
3311 
CmpInstanceType(Register map,InstanceType type)3312 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3313   cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3314        Immediate(static_cast<int8_t>(type)));
3315 }
3316 
3317 
CheckFastElements(Register map,Label * fail,Label::Distance distance)3318 void MacroAssembler::CheckFastElements(Register map,
3319                                        Label* fail,
3320                                        Label::Distance distance) {
3321   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3322   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3323   STATIC_ASSERT(FAST_ELEMENTS == 2);
3324   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3325   cmpb(FieldOperand(map, Map::kBitField2Offset),
3326        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3327   j(above, fail, distance);
3328 }
3329 
3330 
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)3331 void MacroAssembler::CheckFastObjectElements(Register map,
3332                                              Label* fail,
3333                                              Label::Distance distance) {
3334   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3335   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3336   STATIC_ASSERT(FAST_ELEMENTS == 2);
3337   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3338   cmpb(FieldOperand(map, Map::kBitField2Offset),
3339        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3340   j(below_equal, fail, distance);
3341   cmpb(FieldOperand(map, Map::kBitField2Offset),
3342        Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3343   j(above, fail, distance);
3344 }
3345 
3346 
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)3347 void MacroAssembler::CheckFastSmiElements(Register map,
3348                                           Label* fail,
3349                                           Label::Distance distance) {
3350   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3351   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3352   cmpb(FieldOperand(map, Map::kBitField2Offset),
3353        Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3354   j(above, fail, distance);
3355 }
3356 
3357 
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register index,XMMRegister xmm_scratch,Label * fail,int elements_offset)3358 void MacroAssembler::StoreNumberToDoubleElements(
3359     Register maybe_number,
3360     Register elements,
3361     Register index,
3362     XMMRegister xmm_scratch,
3363     Label* fail,
3364     int elements_offset) {
3365   Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3366 
3367   JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3368 
3369   CheckMap(maybe_number,
3370            isolate()->factory()->heap_number_map(),
3371            fail,
3372            DONT_DO_SMI_CHECK);
3373 
3374   // Double value, canonicalize NaN.
3375   uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3376   cmpl(FieldOperand(maybe_number, offset),
3377        Immediate(kNaNOrInfinityLowerBoundUpper32));
3378   j(greater_equal, &maybe_nan, Label::kNear);
3379 
3380   bind(&not_nan);
3381   movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3382   bind(&have_double_value);
3383   movsd(FieldOperand(elements, index, times_8,
3384                      FixedDoubleArray::kHeaderSize - elements_offset),
3385         xmm_scratch);
3386   jmp(&done);
3387 
3388   bind(&maybe_nan);
3389   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3390   // it's an Infinity, and the non-NaN code path applies.
3391   j(greater, &is_nan, Label::kNear);
3392   cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3393   j(zero, &not_nan);
3394   bind(&is_nan);
3395   // Convert all NaNs to the same canonical NaN value when they are stored in
3396   // the double array.
3397   Set(kScratchRegister, BitCast<uint64_t>(
3398       FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3399   movq(xmm_scratch, kScratchRegister);
3400   jmp(&have_double_value, Label::kNear);
3401 
3402   bind(&smi_value);
3403   // Value is a smi. convert to a double and store.
3404   // Preserve original value.
3405   SmiToInteger32(kScratchRegister, maybe_number);
3406   Cvtlsi2sd(xmm_scratch, kScratchRegister);
3407   movsd(FieldOperand(elements, index, times_8,
3408                      FixedDoubleArray::kHeaderSize - elements_offset),
3409         xmm_scratch);
3410   bind(&done);
3411 }
3412 
3413 
CompareMap(Register obj,Handle<Map> map)3414 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3415   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3416 }
3417 
3418 
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3419 void MacroAssembler::CheckMap(Register obj,
3420                               Handle<Map> map,
3421                               Label* fail,
3422                               SmiCheckType smi_check_type) {
3423   if (smi_check_type == DO_SMI_CHECK) {
3424     JumpIfSmi(obj, fail);
3425   }
3426 
3427   CompareMap(obj, map);
3428   j(not_equal, fail);
3429 }
3430 
3431 
ClampUint8(Register reg)3432 void MacroAssembler::ClampUint8(Register reg) {
3433   Label done;
3434   testl(reg, Immediate(0xFFFFFF00));
3435   j(zero, &done, Label::kNear);
3436   setcc(negative, reg);  // 1 if negative, 0 if positive.
3437   decb(reg);  // 0 if negative, 255 if positive.
3438   bind(&done);
3439 }
3440 
3441 
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3442 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3443                                         XMMRegister temp_xmm_reg,
3444                                         Register result_reg) {
3445   Label done;
3446   Label conv_failure;
3447   xorps(temp_xmm_reg, temp_xmm_reg);
3448   cvtsd2si(result_reg, input_reg);
3449   testl(result_reg, Immediate(0xFFFFFF00));
3450   j(zero, &done, Label::kNear);
3451   cmpl(result_reg, Immediate(1));
3452   j(overflow, &conv_failure, Label::kNear);
3453   movl(result_reg, Immediate(0));
3454   setcc(sign, result_reg);
3455   subl(result_reg, Immediate(1));
3456   andl(result_reg, Immediate(255));
3457   jmp(&done, Label::kNear);
3458   bind(&conv_failure);
3459   Set(result_reg, 0);
3460   ucomisd(input_reg, temp_xmm_reg);
3461   j(below, &done, Label::kNear);
3462   Set(result_reg, 255);
3463   bind(&done);
3464 }
3465 
3466 
LoadUint32(XMMRegister dst,Register src)3467 void MacroAssembler::LoadUint32(XMMRegister dst,
3468                                 Register src) {
3469   if (FLAG_debug_code) {
3470     cmpq(src, Immediate(0xffffffff));
3471     Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3472   }
3473   cvtqsi2sd(dst, src);
3474 }
3475 
3476 
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3477 void MacroAssembler::SlowTruncateToI(Register result_reg,
3478                                      Register input_reg,
3479                                      int offset) {
3480   DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3481   call(stub.GetCode(), RelocInfo::CODE_TARGET);
3482 }
3483 
3484 
TruncateHeapNumberToI(Register result_reg,Register input_reg)3485 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3486                                            Register input_reg) {
3487   Label done;
3488   movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3489   cvttsd2siq(result_reg, xmm0);
3490   cmpq(result_reg, Immediate(1));
3491   j(no_overflow, &done, Label::kNear);
3492 
3493   // Slow case.
3494   if (input_reg.is(result_reg)) {
3495     subp(rsp, Immediate(kDoubleSize));
3496     movsd(MemOperand(rsp, 0), xmm0);
3497     SlowTruncateToI(result_reg, rsp, 0);
3498     addp(rsp, Immediate(kDoubleSize));
3499   } else {
3500     SlowTruncateToI(result_reg, input_reg);
3501   }
3502 
3503   bind(&done);
3504   // Keep our invariant that the upper 32 bits are zero.
3505   movl(result_reg, result_reg);
3506 }
3507 
3508 
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3509 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3510                                        XMMRegister input_reg) {
3511   Label done;
3512   cvttsd2siq(result_reg, input_reg);
3513   cmpq(result_reg, Immediate(1));
3514   j(no_overflow, &done, Label::kNear);
3515 
3516   subp(rsp, Immediate(kDoubleSize));
3517   movsd(MemOperand(rsp, 0), input_reg);
3518   SlowTruncateToI(result_reg, rsp, 0);
3519   addp(rsp, Immediate(kDoubleSize));
3520 
3521   bind(&done);
3522   // Keep our invariant that the upper 32 bits are zero.
3523   movl(result_reg, result_reg);
3524 }
3525 
3526 
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * conversion_failed,Label::Distance dst)3527 void MacroAssembler::DoubleToI(Register result_reg,
3528                                XMMRegister input_reg,
3529                                XMMRegister scratch,
3530                                MinusZeroMode minus_zero_mode,
3531                                Label* conversion_failed,
3532                                Label::Distance dst) {
3533   cvttsd2si(result_reg, input_reg);
3534   Cvtlsi2sd(xmm0, result_reg);
3535   ucomisd(xmm0, input_reg);
3536   j(not_equal, conversion_failed, dst);
3537   j(parity_even, conversion_failed, dst);  // NaN.
3538   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3539     Label done;
3540     // The integer converted back is equal to the original. We
3541     // only have to test if we got -0 as an input.
3542     testl(result_reg, result_reg);
3543     j(not_zero, &done, Label::kNear);
3544     movmskpd(result_reg, input_reg);
3545     // Bit 0 contains the sign of the double in input_reg.
3546     // If input was positive, we are ok and return 0, otherwise
3547     // jump to conversion_failed.
3548     andl(result_reg, Immediate(1));
3549     j(not_zero, conversion_failed, dst);
3550     bind(&done);
3551   }
3552 }
3553 
3554 
TaggedToI(Register result_reg,Register input_reg,XMMRegister temp,MinusZeroMode minus_zero_mode,Label * lost_precision,Label::Distance dst)3555 void MacroAssembler::TaggedToI(Register result_reg,
3556                                Register input_reg,
3557                                XMMRegister temp,
3558                                MinusZeroMode minus_zero_mode,
3559                                Label* lost_precision,
3560                                Label::Distance dst) {
3561   Label done;
3562   ASSERT(!temp.is(xmm0));
3563 
3564   // Heap number map check.
3565   CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3566               Heap::kHeapNumberMapRootIndex);
3567   j(not_equal, lost_precision, dst);
3568 
3569   movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3570   cvttsd2si(result_reg, xmm0);
3571   Cvtlsi2sd(temp, result_reg);
3572   ucomisd(xmm0, temp);
3573   RecordComment("Deferred TaggedToI: lost precision");
3574   j(not_equal, lost_precision, dst);
3575   RecordComment("Deferred TaggedToI: NaN");
3576   j(parity_even, lost_precision, dst);  // NaN.
3577   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3578     testl(result_reg, result_reg);
3579     j(not_zero, &done, Label::kNear);
3580     movmskpd(result_reg, xmm0);
3581     andl(result_reg, Immediate(1));
3582     j(not_zero, lost_precision, dst);
3583   }
3584   bind(&done);
3585 }
3586 
3587 
LoadInstanceDescriptors(Register map,Register descriptors)3588 void MacroAssembler::LoadInstanceDescriptors(Register map,
3589                                              Register descriptors) {
3590   movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3591 }
3592 
3593 
NumberOfOwnDescriptors(Register dst,Register map)3594 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3595   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3596   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3597 }
3598 
3599 
EnumLength(Register dst,Register map)3600 void MacroAssembler::EnumLength(Register dst, Register map) {
3601   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3602   movl(dst, FieldOperand(map, Map::kBitField3Offset));
3603   andl(dst, Immediate(Map::EnumLengthBits::kMask));
3604   Integer32ToSmi(dst, dst);
3605 }
3606 
3607 
DispatchMap(Register obj,Register unused,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3608 void MacroAssembler::DispatchMap(Register obj,
3609                                  Register unused,
3610                                  Handle<Map> map,
3611                                  Handle<Code> success,
3612                                  SmiCheckType smi_check_type) {
3613   Label fail;
3614   if (smi_check_type == DO_SMI_CHECK) {
3615     JumpIfSmi(obj, &fail);
3616   }
3617   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3618   j(equal, success, RelocInfo::CODE_TARGET);
3619 
3620   bind(&fail);
3621 }
3622 
3623 
AssertNumber(Register object)3624 void MacroAssembler::AssertNumber(Register object) {
3625   if (emit_debug_code()) {
3626     Label ok;
3627     Condition is_smi = CheckSmi(object);
3628     j(is_smi, &ok, Label::kNear);
3629     Cmp(FieldOperand(object, HeapObject::kMapOffset),
3630         isolate()->factory()->heap_number_map());
3631     Check(equal, kOperandIsNotANumber);
3632     bind(&ok);
3633   }
3634 }
3635 
3636 
AssertNotSmi(Register object)3637 void MacroAssembler::AssertNotSmi(Register object) {
3638   if (emit_debug_code()) {
3639     Condition is_smi = CheckSmi(object);
3640     Check(NegateCondition(is_smi), kOperandIsASmi);
3641   }
3642 }
3643 
3644 
AssertSmi(Register object)3645 void MacroAssembler::AssertSmi(Register object) {
3646   if (emit_debug_code()) {
3647     Condition is_smi = CheckSmi(object);
3648     Check(is_smi, kOperandIsNotASmi);
3649   }
3650 }
3651 
3652 
AssertSmi(const Operand & object)3653 void MacroAssembler::AssertSmi(const Operand& object) {
3654   if (emit_debug_code()) {
3655     Condition is_smi = CheckSmi(object);
3656     Check(is_smi, kOperandIsNotASmi);
3657   }
3658 }
3659 
3660 
AssertZeroExtended(Register int32_register)3661 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3662   if (emit_debug_code()) {
3663     ASSERT(!int32_register.is(kScratchRegister));
3664     movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3665     cmpq(kScratchRegister, int32_register);
3666     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3667   }
3668 }
3669 
3670 
AssertString(Register object)3671 void MacroAssembler::AssertString(Register object) {
3672   if (emit_debug_code()) {
3673     testb(object, Immediate(kSmiTagMask));
3674     Check(not_equal, kOperandIsASmiAndNotAString);
3675     Push(object);
3676     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3677     CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3678     Pop(object);
3679     Check(below, kOperandIsNotAString);
3680   }
3681 }
3682 
3683 
AssertName(Register object)3684 void MacroAssembler::AssertName(Register object) {
3685   if (emit_debug_code()) {
3686     testb(object, Immediate(kSmiTagMask));
3687     Check(not_equal, kOperandIsASmiAndNotAName);
3688     Push(object);
3689     movp(object, FieldOperand(object, HeapObject::kMapOffset));
3690     CmpInstanceType(object, LAST_NAME_TYPE);
3691     Pop(object);
3692     Check(below_equal, kOperandIsNotAName);
3693   }
3694 }
3695 
3696 
AssertUndefinedOrAllocationSite(Register object)3697 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3698   if (emit_debug_code()) {
3699     Label done_checking;
3700     AssertNotSmi(object);
3701     Cmp(object, isolate()->factory()->undefined_value());
3702     j(equal, &done_checking);
3703     Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3704     Assert(equal, kExpectedUndefinedOrCell);
3705     bind(&done_checking);
3706   }
3707 }
3708 
3709 
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)3710 void MacroAssembler::AssertRootValue(Register src,
3711                                      Heap::RootListIndex root_value_index,
3712                                      BailoutReason reason) {
3713   if (emit_debug_code()) {
3714     ASSERT(!src.is(kScratchRegister));
3715     LoadRoot(kScratchRegister, root_value_index);
3716     cmpp(src, kScratchRegister);
3717     Check(equal, reason);
3718   }
3719 }
3720 
3721 
3722 
IsObjectStringType(Register heap_object,Register map,Register instance_type)3723 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3724                                              Register map,
3725                                              Register instance_type) {
3726   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3727   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3728   STATIC_ASSERT(kNotStringTag != 0);
3729   testb(instance_type, Immediate(kIsNotStringMask));
3730   return zero;
3731 }
3732 
3733 
IsObjectNameType(Register heap_object,Register map,Register instance_type)3734 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3735                                            Register map,
3736                                            Register instance_type) {
3737   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3738   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3739   cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3740   return below_equal;
3741 }
3742 
3743 
TryGetFunctionPrototype(Register function,Register result,Label * miss,bool miss_on_bound_function)3744 void MacroAssembler::TryGetFunctionPrototype(Register function,
3745                                              Register result,
3746                                              Label* miss,
3747                                              bool miss_on_bound_function) {
3748   // Check that the receiver isn't a smi.
3749   testl(function, Immediate(kSmiTagMask));
3750   j(zero, miss);
3751 
3752   // Check that the function really is a function.
3753   CmpObjectType(function, JS_FUNCTION_TYPE, result);
3754   j(not_equal, miss);
3755 
3756   if (miss_on_bound_function) {
3757     movp(kScratchRegister,
3758          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3759     // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3760     // field).
3761     TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3762         SharedFunctionInfo::kCompilerHintsOffset,
3763         SharedFunctionInfo::kBoundFunction);
3764     j(not_zero, miss);
3765   }
3766 
3767   // Make sure that the function has an instance prototype.
3768   Label non_instance;
3769   testb(FieldOperand(result, Map::kBitFieldOffset),
3770         Immediate(1 << Map::kHasNonInstancePrototype));
3771   j(not_zero, &non_instance, Label::kNear);
3772 
3773   // Get the prototype or initial map from the function.
3774   movp(result,
3775        FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3776 
3777   // If the prototype or initial map is the hole, don't return it and
3778   // simply miss the cache instead. This will allow us to allocate a
3779   // prototype object on-demand in the runtime system.
3780   CompareRoot(result, Heap::kTheHoleValueRootIndex);
3781   j(equal, miss);
3782 
3783   // If the function does not have an initial map, we're done.
3784   Label done;
3785   CmpObjectType(result, MAP_TYPE, kScratchRegister);
3786   j(not_equal, &done, Label::kNear);
3787 
3788   // Get the prototype from the initial map.
3789   movp(result, FieldOperand(result, Map::kPrototypeOffset));
3790   jmp(&done, Label::kNear);
3791 
3792   // Non-instance prototype: Fetch prototype from constructor field
3793   // in initial map.
3794   bind(&non_instance);
3795   movp(result, FieldOperand(result, Map::kConstructorOffset));
3796 
3797   // All done.
3798   bind(&done);
3799 }
3800 
3801 
SetCounter(StatsCounter * counter,int value)3802 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3803   if (FLAG_native_code_counters && counter->Enabled()) {
3804     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3805     movl(counter_operand, Immediate(value));
3806   }
3807 }
3808 
3809 
IncrementCounter(StatsCounter * counter,int value)3810 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3811   ASSERT(value > 0);
3812   if (FLAG_native_code_counters && counter->Enabled()) {
3813     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3814     if (value == 1) {
3815       incl(counter_operand);
3816     } else {
3817       addl(counter_operand, Immediate(value));
3818     }
3819   }
3820 }
3821 
3822 
DecrementCounter(StatsCounter * counter,int value)3823 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3824   ASSERT(value > 0);
3825   if (FLAG_native_code_counters && counter->Enabled()) {
3826     Operand counter_operand = ExternalOperand(ExternalReference(counter));
3827     if (value == 1) {
3828       decl(counter_operand);
3829     } else {
3830       subl(counter_operand, Immediate(value));
3831     }
3832   }
3833 }
3834 
3835 
DebugBreak()3836 void MacroAssembler::DebugBreak() {
3837   Set(rax, 0);  // No arguments.
3838   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3839   CEntryStub ces(isolate(), 1);
3840   ASSERT(AllowThisStubCall(&ces));
3841   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3842 }
3843 
3844 
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3845 void MacroAssembler::InvokeCode(Register code,
3846                                 const ParameterCount& expected,
3847                                 const ParameterCount& actual,
3848                                 InvokeFlag flag,
3849                                 const CallWrapper& call_wrapper) {
3850   // You can't call a function without a valid frame.
3851   ASSERT(flag == JUMP_FUNCTION || has_frame());
3852 
3853   Label done;
3854   bool definitely_mismatches = false;
3855   InvokePrologue(expected,
3856                  actual,
3857                  Handle<Code>::null(),
3858                  code,
3859                  &done,
3860                  &definitely_mismatches,
3861                  flag,
3862                  Label::kNear,
3863                  call_wrapper);
3864   if (!definitely_mismatches) {
3865     if (flag == CALL_FUNCTION) {
3866       call_wrapper.BeforeCall(CallSize(code));
3867       call(code);
3868       call_wrapper.AfterCall();
3869     } else {
3870       ASSERT(flag == JUMP_FUNCTION);
3871       jmp(code);
3872     }
3873     bind(&done);
3874   }
3875 }
3876 
3877 
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3878 void MacroAssembler::InvokeFunction(Register function,
3879                                     const ParameterCount& actual,
3880                                     InvokeFlag flag,
3881                                     const CallWrapper& call_wrapper) {
3882   // You can't call a function without a valid frame.
3883   ASSERT(flag == JUMP_FUNCTION || has_frame());
3884 
3885   ASSERT(function.is(rdi));
3886   movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3887   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3888   LoadSharedFunctionInfoSpecialField(rbx, rdx,
3889       SharedFunctionInfo::kFormalParameterCountOffset);
3890   // Advances rdx to the end of the Code object header, to the start of
3891   // the executable code.
3892   movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3893 
3894   ParameterCount expected(rbx);
3895   InvokeCode(rdx, expected, actual, flag, call_wrapper);
3896 }
3897 
3898 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3899 void MacroAssembler::InvokeFunction(Register function,
3900                                     const ParameterCount& expected,
3901                                     const ParameterCount& actual,
3902                                     InvokeFlag flag,
3903                                     const CallWrapper& call_wrapper) {
3904   // You can't call a function without a valid frame.
3905   ASSERT(flag == JUMP_FUNCTION || has_frame());
3906 
3907   ASSERT(function.is(rdi));
3908   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3909   // Advances rdx to the end of the Code object header, to the start of
3910   // the executable code.
3911   movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3912 
3913   InvokeCode(rdx, expected, actual, flag, call_wrapper);
3914 }
3915 
3916 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)3917 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3918                                     const ParameterCount& expected,
3919                                     const ParameterCount& actual,
3920                                     InvokeFlag flag,
3921                                     const CallWrapper& call_wrapper) {
3922   Move(rdi, function);
3923   InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3924 }
3925 
3926 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_register,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper)3927 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3928                                     const ParameterCount& actual,
3929                                     Handle<Code> code_constant,
3930                                     Register code_register,
3931                                     Label* done,
3932                                     bool* definitely_mismatches,
3933                                     InvokeFlag flag,
3934                                     Label::Distance near_jump,
3935                                     const CallWrapper& call_wrapper) {
3936   bool definitely_matches = false;
3937   *definitely_mismatches = false;
3938   Label invoke;
3939   if (expected.is_immediate()) {
3940     ASSERT(actual.is_immediate());
3941     if (expected.immediate() == actual.immediate()) {
3942       definitely_matches = true;
3943     } else {
3944       Set(rax, actual.immediate());
3945       if (expected.immediate() ==
3946               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3947         // Don't worry about adapting arguments for built-ins that
3948         // don't want that done. Skip adaption code by making it look
3949         // like we have a match between expected and actual number of
3950         // arguments.
3951         definitely_matches = true;
3952       } else {
3953         *definitely_mismatches = true;
3954         Set(rbx, expected.immediate());
3955       }
3956     }
3957   } else {
3958     if (actual.is_immediate()) {
3959       // Expected is in register, actual is immediate. This is the
3960       // case when we invoke function values without going through the
3961       // IC mechanism.
3962       cmpp(expected.reg(), Immediate(actual.immediate()));
3963       j(equal, &invoke, Label::kNear);
3964       ASSERT(expected.reg().is(rbx));
3965       Set(rax, actual.immediate());
3966     } else if (!expected.reg().is(actual.reg())) {
3967       // Both expected and actual are in (different) registers. This
3968       // is the case when we invoke functions using call and apply.
3969       cmpp(expected.reg(), actual.reg());
3970       j(equal, &invoke, Label::kNear);
3971       ASSERT(actual.reg().is(rax));
3972       ASSERT(expected.reg().is(rbx));
3973     }
3974   }
3975 
3976   if (!definitely_matches) {
3977     Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3978     if (!code_constant.is_null()) {
3979       Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3980       addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3981     } else if (!code_register.is(rdx)) {
3982       movp(rdx, code_register);
3983     }
3984 
3985     if (flag == CALL_FUNCTION) {
3986       call_wrapper.BeforeCall(CallSize(adaptor));
3987       Call(adaptor, RelocInfo::CODE_TARGET);
3988       call_wrapper.AfterCall();
3989       if (!*definitely_mismatches) {
3990         jmp(done, near_jump);
3991       }
3992     } else {
3993       Jump(adaptor, RelocInfo::CODE_TARGET);
3994     }
3995     bind(&invoke);
3996   }
3997 }
3998 
3999 
StubPrologue()4000 void MacroAssembler::StubPrologue() {
4001     pushq(rbp);  // Caller's frame pointer.
4002     movp(rbp, rsp);
4003     Push(rsi);  // Callee's context.
4004     Push(Smi::FromInt(StackFrame::STUB));
4005 }
4006 
4007 
Prologue(bool code_pre_aging)4008 void MacroAssembler::Prologue(bool code_pre_aging) {
4009   PredictableCodeSizeScope predictible_code_size_scope(this,
4010       kNoCodeAgeSequenceLength);
4011   if (code_pre_aging) {
4012       // Pre-age the code.
4013     Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4014          RelocInfo::CODE_AGE_SEQUENCE);
4015     Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4016   } else {
4017     pushq(rbp);  // Caller's frame pointer.
4018     movp(rbp, rsp);
4019     Push(rsi);  // Callee's context.
4020     Push(rdi);  // Callee's JS function.
4021   }
4022 }
4023 
4024 
EnterFrame(StackFrame::Type type)4025 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4026   pushq(rbp);
4027   movp(rbp, rsp);
4028   Push(rsi);  // Context.
4029   Push(Smi::FromInt(type));
4030   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4031   Push(kScratchRegister);
4032   if (emit_debug_code()) {
4033     Move(kScratchRegister,
4034          isolate()->factory()->undefined_value(),
4035          RelocInfo::EMBEDDED_OBJECT);
4036     cmpp(Operand(rsp, 0), kScratchRegister);
4037     Check(not_equal, kCodeObjectNotProperlyPatched);
4038   }
4039 }
4040 
4041 
LeaveFrame(StackFrame::Type type)4042 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4043   if (emit_debug_code()) {
4044     Move(kScratchRegister, Smi::FromInt(type));
4045     cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4046     Check(equal, kStackFrameTypesMustMatch);
4047   }
4048   movp(rsp, rbp);
4049   popq(rbp);
4050 }
4051 
4052 
EnterExitFramePrologue(bool save_rax)4053 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4054   // Set up the frame structure on the stack.
4055   // All constants are relative to the frame pointer of the exit frame.
4056   ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
4057          kFPOnStackSize + kPCOnStackSize);
4058   ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4059   ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4060   pushq(rbp);
4061   movp(rbp, rsp);
4062 
4063   // Reserve room for entry stack pointer and push the code object.
4064   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4065   Push(Immediate(0));  // Saved entry sp, patched before call.
4066   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4067   Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
4068 
4069   // Save the frame pointer and the context in top.
4070   if (save_rax) {
4071     movp(r14, rax);  // Backup rax in callee-save register.
4072   }
4073 
4074   Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4075   Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4076 }
4077 
4078 
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)4079 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4080                                             bool save_doubles) {
4081 #ifdef _WIN64
4082   const int kShadowSpace = 4;
4083   arg_stack_space += kShadowSpace;
4084 #endif
4085   // Optionally save all XMM registers.
4086   if (save_doubles) {
4087     int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
4088         arg_stack_space * kRegisterSize;
4089     subp(rsp, Immediate(space));
4090     int offset = -2 * kPointerSize;
4091     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4092       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4093       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4094     }
4095   } else if (arg_stack_space > 0) {
4096     subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4097   }
4098 
4099   // Get the required frame alignment for the OS.
4100   const int kFrameAlignment = OS::ActivationFrameAlignment();
4101   if (kFrameAlignment > 0) {
4102     ASSERT(IsPowerOf2(kFrameAlignment));
4103     ASSERT(is_int8(kFrameAlignment));
4104     andp(rsp, Immediate(-kFrameAlignment));
4105   }
4106 
4107   // Patch the saved entry sp.
4108   movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4109 }
4110 
4111 
EnterExitFrame(int arg_stack_space,bool save_doubles)4112 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4113   EnterExitFramePrologue(true);
4114 
4115   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4116   // so it must be retained across the C-call.
4117   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4118   leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4119 
4120   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4121 }
4122 
4123 
EnterApiExitFrame(int arg_stack_space)4124 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4125   EnterExitFramePrologue(false);
4126   EnterExitFrameEpilogue(arg_stack_space, false);
4127 }
4128 
4129 
LeaveExitFrame(bool save_doubles)4130 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4131   // Registers:
4132   // r15 : argv
4133   if (save_doubles) {
4134     int offset = -2 * kPointerSize;
4135     for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4136       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4137       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4138     }
4139   }
4140   // Get the return address from the stack and restore the frame pointer.
4141   movp(rcx, Operand(rbp, kFPOnStackSize));
4142   movp(rbp, Operand(rbp, 0 * kPointerSize));
4143 
4144   // Drop everything up to and including the arguments and the receiver
4145   // from the caller stack.
4146   leap(rsp, Operand(r15, 1 * kPointerSize));
4147 
4148   PushReturnAddressFrom(rcx);
4149 
4150   LeaveExitFrameEpilogue(true);
4151 }
4152 
4153 
LeaveApiExitFrame(bool restore_context)4154 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4155   movp(rsp, rbp);
4156   popq(rbp);
4157 
4158   LeaveExitFrameEpilogue(restore_context);
4159 }
4160 
4161 
LeaveExitFrameEpilogue(bool restore_context)4162 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4163   // Restore current context from top and clear it in debug mode.
4164   ExternalReference context_address(Isolate::kContextAddress, isolate());
4165   Operand context_operand = ExternalOperand(context_address);
4166   if (restore_context) {
4167     movp(rsi, context_operand);
4168   }
4169 #ifdef DEBUG
4170   movp(context_operand, Immediate(0));
4171 #endif
4172 
4173   // Clear the top frame.
4174   ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4175                                        isolate());
4176   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4177   movp(c_entry_fp_operand, Immediate(0));
4178 }
4179 
4180 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)4181 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4182                                             Register scratch,
4183                                             Label* miss) {
4184   Label same_contexts;
4185 
4186   ASSERT(!holder_reg.is(scratch));
4187   ASSERT(!scratch.is(kScratchRegister));
4188   // Load current lexical context from the stack frame.
4189   movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4190 
4191   // When generating debug code, make sure the lexical context is set.
4192   if (emit_debug_code()) {
4193     cmpp(scratch, Immediate(0));
4194     Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4195   }
4196   // Load the native context of the current context.
4197   int offset =
4198       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4199   movp(scratch, FieldOperand(scratch, offset));
4200   movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4201 
4202   // Check the context is a native context.
4203   if (emit_debug_code()) {
4204     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4205         isolate()->factory()->native_context_map());
4206     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4207   }
4208 
4209   // Check if both contexts are the same.
4210   cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4211   j(equal, &same_contexts);
4212 
4213   // Compare security tokens.
4214   // Check that the security token in the calling global object is
4215   // compatible with the security token in the receiving global
4216   // object.
4217 
4218   // Check the context is a native context.
4219   if (emit_debug_code()) {
4220     // Preserve original value of holder_reg.
4221     Push(holder_reg);
4222     movp(holder_reg,
4223          FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4224     CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4225     Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4226 
4227     // Read the first word and compare to native_context_map(),
4228     movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4229     CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4230     Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4231     Pop(holder_reg);
4232   }
4233 
4234   movp(kScratchRegister,
4235        FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4236   int token_offset =
4237       Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4238   movp(scratch, FieldOperand(scratch, token_offset));
4239   cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4240   j(not_equal, miss);
4241 
4242   bind(&same_contexts);
4243 }
4244 
4245 
4246 // Compute the hash code from the untagged key.  This must be kept in sync with
4247 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4248 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)4249 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4250   // First of all we assign the hash seed to scratch.
4251   LoadRoot(scratch, Heap::kHashSeedRootIndex);
4252   SmiToInteger32(scratch, scratch);
4253 
4254   // Xor original key with a seed.
4255   xorl(r0, scratch);
4256 
4257   // Compute the hash code from the untagged key.  This must be kept in sync
4258   // with ComputeIntegerHash in utils.h.
4259   //
4260   // hash = ~hash + (hash << 15);
4261   movl(scratch, r0);
4262   notl(r0);
4263   shll(scratch, Immediate(15));
4264   addl(r0, scratch);
4265   // hash = hash ^ (hash >> 12);
4266   movl(scratch, r0);
4267   shrl(scratch, Immediate(12));
4268   xorl(r0, scratch);
4269   // hash = hash + (hash << 2);
4270   leal(r0, Operand(r0, r0, times_4, 0));
4271   // hash = hash ^ (hash >> 4);
4272   movl(scratch, r0);
4273   shrl(scratch, Immediate(4));
4274   xorl(r0, scratch);
4275   // hash = hash * 2057;
4276   imull(r0, r0, Immediate(2057));
4277   // hash = hash ^ (hash >> 16);
4278   movl(scratch, r0);
4279   shrl(scratch, Immediate(16));
4280   xorl(r0, scratch);
4281 }
4282 
4283 
4284 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register r0,Register r1,Register r2,Register result)4285 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4286                                               Register elements,
4287                                               Register key,
4288                                               Register r0,
4289                                               Register r1,
4290                                               Register r2,
4291                                               Register result) {
4292   // Register use:
4293   //
4294   // elements - holds the slow-case elements of the receiver on entry.
4295   //            Unchanged unless 'result' is the same register.
4296   //
4297   // key      - holds the smi key on entry.
4298   //            Unchanged unless 'result' is the same register.
4299   //
4300   // Scratch registers:
4301   //
4302   // r0 - holds the untagged key on entry and holds the hash once computed.
4303   //
4304   // r1 - used to hold the capacity mask of the dictionary
4305   //
4306   // r2 - used for the index into the dictionary.
4307   //
4308   // result - holds the result on exit if the load succeeded.
4309   //          Allowed to be the same as 'key' or 'result'.
4310   //          Unchanged on bailout so 'key' or 'result' can be used
4311   //          in further computation.
4312 
4313   Label done;
4314 
4315   GetNumberHash(r0, r1);
4316 
4317   // Compute capacity mask.
4318   SmiToInteger32(r1, FieldOperand(elements,
4319                                   SeededNumberDictionary::kCapacityOffset));
4320   decl(r1);
4321 
4322   // Generate an unrolled loop that performs a few probes before giving up.
4323   for (int i = 0; i < kNumberDictionaryProbes; i++) {
4324     // Use r2 for index calculations and keep the hash intact in r0.
4325     movp(r2, r0);
4326     // Compute the masked index: (hash + i + i * i) & mask.
4327     if (i > 0) {
4328       addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4329     }
4330     andp(r2, r1);
4331 
4332     // Scale the index by multiplying by the entry size.
4333     ASSERT(SeededNumberDictionary::kEntrySize == 3);
4334     leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
4335 
4336     // Check if the key matches.
4337     cmpp(key, FieldOperand(elements,
4338                            r2,
4339                            times_pointer_size,
4340                            SeededNumberDictionary::kElementsStartOffset));
4341     if (i != (kNumberDictionaryProbes - 1)) {
4342       j(equal, &done);
4343     } else {
4344       j(not_equal, miss);
4345     }
4346   }
4347 
4348   bind(&done);
4349   // Check that the value is a normal propety.
4350   const int kDetailsOffset =
4351       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4352   ASSERT_EQ(NORMAL, 0);
4353   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4354        Smi::FromInt(PropertyDetails::TypeField::kMask));
4355   j(not_zero, miss);
4356 
4357   // Get the value at the masked, scaled index.
4358   const int kValueOffset =
4359       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4360   movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4361 }
4362 
4363 
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4364 void MacroAssembler::LoadAllocationTopHelper(Register result,
4365                                              Register scratch,
4366                                              AllocationFlags flags) {
4367   ExternalReference allocation_top =
4368       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4369 
4370   // Just return if allocation top is already known.
4371   if ((flags & RESULT_CONTAINS_TOP) != 0) {
4372     // No use of scratch if allocation top is provided.
4373     ASSERT(!scratch.is_valid());
4374 #ifdef DEBUG
4375     // Assert that result actually contains top on entry.
4376     Operand top_operand = ExternalOperand(allocation_top);
4377     cmpp(result, top_operand);
4378     Check(equal, kUnexpectedAllocationTop);
4379 #endif
4380     return;
4381   }
4382 
4383   // Move address of new object to result. Use scratch register if available,
4384   // and keep address in scratch until call to UpdateAllocationTopHelper.
4385   if (scratch.is_valid()) {
4386     LoadAddress(scratch, allocation_top);
4387     movp(result, Operand(scratch, 0));
4388   } else {
4389     Load(result, allocation_top);
4390   }
4391 }
4392 
4393 
MakeSureDoubleAlignedHelper(Register result,Register scratch,Label * gc_required,AllocationFlags flags)4394 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4395                                                  Register scratch,
4396                                                  Label* gc_required,
4397                                                  AllocationFlags flags) {
4398   if (kPointerSize == kDoubleSize) {
4399     if (FLAG_debug_code) {
4400       testl(result, Immediate(kDoubleAlignmentMask));
4401       Check(zero, kAllocationIsNotDoubleAligned);
4402     }
4403   } else {
4404     // Align the next allocation. Storing the filler map without checking top
4405     // is safe in new-space because the limit of the heap is aligned there.
4406     ASSERT(kPointerSize * 2 == kDoubleSize);
4407     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4408     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
4409     // Make sure scratch is not clobbered by this function as it might be
4410     // used in UpdateAllocationTopHelper later.
4411     ASSERT(!scratch.is(kScratchRegister));
4412     Label aligned;
4413     testl(result, Immediate(kDoubleAlignmentMask));
4414     j(zero, &aligned, Label::kNear);
4415     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4416       ExternalReference allocation_limit =
4417           AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4418       cmpp(result, ExternalOperand(allocation_limit));
4419       j(above_equal, gc_required);
4420     }
4421     LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4422     movp(Operand(result, 0), kScratchRegister);
4423     addp(result, Immediate(kDoubleSize / 2));
4424     bind(&aligned);
4425   }
4426 }
4427 
4428 
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4429 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4430                                                Register scratch,
4431                                                AllocationFlags flags) {
4432   if (emit_debug_code()) {
4433     testp(result_end, Immediate(kObjectAlignmentMask));
4434     Check(zero, kUnalignedAllocationInNewSpace);
4435   }
4436 
4437   ExternalReference allocation_top =
4438       AllocationUtils::GetAllocationTopReference(isolate(), flags);
4439 
4440   // Update new top.
4441   if (scratch.is_valid()) {
4442     // Scratch already contains address of allocation top.
4443     movp(Operand(scratch, 0), result_end);
4444   } else {
4445     Store(allocation_top, result_end);
4446   }
4447 }
4448 
4449 
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4450 void MacroAssembler::Allocate(int object_size,
4451                               Register result,
4452                               Register result_end,
4453                               Register scratch,
4454                               Label* gc_required,
4455                               AllocationFlags flags) {
4456   ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4457   ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
4458   if (!FLAG_inline_new) {
4459     if (emit_debug_code()) {
4460       // Trash the registers to simulate an allocation failure.
4461       movl(result, Immediate(0x7091));
4462       if (result_end.is_valid()) {
4463         movl(result_end, Immediate(0x7191));
4464       }
4465       if (scratch.is_valid()) {
4466         movl(scratch, Immediate(0x7291));
4467       }
4468     }
4469     jmp(gc_required);
4470     return;
4471   }
4472   ASSERT(!result.is(result_end));
4473 
4474   // Load address of new object into result.
4475   LoadAllocationTopHelper(result, scratch, flags);
4476 
4477   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4478     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4479   }
4480 
4481   // Calculate new top and bail out if new space is exhausted.
4482   ExternalReference allocation_limit =
4483       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4484 
4485   Register top_reg = result_end.is_valid() ? result_end : result;
4486 
4487   if (!top_reg.is(result)) {
4488     movp(top_reg, result);
4489   }
4490   addp(top_reg, Immediate(object_size));
4491   j(carry, gc_required);
4492   Operand limit_operand = ExternalOperand(allocation_limit);
4493   cmpp(top_reg, limit_operand);
4494   j(above, gc_required);
4495 
4496   // Update allocation top.
4497   UpdateAllocationTopHelper(top_reg, scratch, flags);
4498 
4499   bool tag_result = (flags & TAG_OBJECT) != 0;
4500   if (top_reg.is(result)) {
4501     if (tag_result) {
4502       subp(result, Immediate(object_size - kHeapObjectTag));
4503     } else {
4504       subp(result, Immediate(object_size));
4505     }
4506   } else if (tag_result) {
4507     // Tag the result if requested.
4508     ASSERT(kHeapObjectTag == 1);
4509     incp(result);
4510   }
4511 }
4512 
4513 
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4514 void MacroAssembler::Allocate(int header_size,
4515                               ScaleFactor element_size,
4516                               Register element_count,
4517                               Register result,
4518                               Register result_end,
4519                               Register scratch,
4520                               Label* gc_required,
4521                               AllocationFlags flags) {
4522   ASSERT((flags & SIZE_IN_WORDS) == 0);
4523   leap(result_end, Operand(element_count, element_size, header_size));
4524   Allocate(result_end, result, result_end, scratch, gc_required, flags);
4525 }
4526 
4527 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4528 void MacroAssembler::Allocate(Register object_size,
4529                               Register result,
4530                               Register result_end,
4531                               Register scratch,
4532                               Label* gc_required,
4533                               AllocationFlags flags) {
4534   ASSERT((flags & SIZE_IN_WORDS) == 0);
4535   if (!FLAG_inline_new) {
4536     if (emit_debug_code()) {
4537       // Trash the registers to simulate an allocation failure.
4538       movl(result, Immediate(0x7091));
4539       movl(result_end, Immediate(0x7191));
4540       if (scratch.is_valid()) {
4541         movl(scratch, Immediate(0x7291));
4542       }
4543       // object_size is left unchanged by this function.
4544     }
4545     jmp(gc_required);
4546     return;
4547   }
4548   ASSERT(!result.is(result_end));
4549 
4550   // Load address of new object into result.
4551   LoadAllocationTopHelper(result, scratch, flags);
4552 
4553   if ((flags & DOUBLE_ALIGNMENT) != 0) {
4554     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4555   }
4556 
4557   // Calculate new top and bail out if new space is exhausted.
4558   ExternalReference allocation_limit =
4559       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4560   if (!object_size.is(result_end)) {
4561     movp(result_end, object_size);
4562   }
4563   addp(result_end, result);
4564   j(carry, gc_required);
4565   Operand limit_operand = ExternalOperand(allocation_limit);
4566   cmpp(result_end, limit_operand);
4567   j(above, gc_required);
4568 
4569   // Update allocation top.
4570   UpdateAllocationTopHelper(result_end, scratch, flags);
4571 
4572   // Tag the result if requested.
4573   if ((flags & TAG_OBJECT) != 0) {
4574     addp(result, Immediate(kHeapObjectTag));
4575   }
4576 }
4577 
4578 
UndoAllocationInNewSpace(Register object)4579 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4580   ExternalReference new_space_allocation_top =
4581       ExternalReference::new_space_allocation_top_address(isolate());
4582 
4583   // Make sure the object has no tag before resetting top.
4584   andp(object, Immediate(~kHeapObjectTagMask));
4585   Operand top_operand = ExternalOperand(new_space_allocation_top);
4586 #ifdef DEBUG
4587   cmpp(object, top_operand);
4588   Check(below, kUndoAllocationOfNonAllocatedMemory);
4589 #endif
4590   movp(top_operand, object);
4591 }
4592 
4593 
AllocateHeapNumber(Register result,Register scratch,Label * gc_required)4594 void MacroAssembler::AllocateHeapNumber(Register result,
4595                                         Register scratch,
4596                                         Label* gc_required) {
4597   // Allocate heap number in new space.
4598   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4599 
4600   // Set the map.
4601   LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4602   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4603 }
4604 
4605 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4606 void MacroAssembler::AllocateTwoByteString(Register result,
4607                                            Register length,
4608                                            Register scratch1,
4609                                            Register scratch2,
4610                                            Register scratch3,
4611                                            Label* gc_required) {
4612   // Calculate the number of bytes needed for the characters in the string while
4613   // observing object alignment.
4614   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4615                                kObjectAlignmentMask;
4616   ASSERT(kShortSize == 2);
4617   // scratch1 = length * 2 + kObjectAlignmentMask.
4618   leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4619                 kHeaderAlignment));
4620   andp(scratch1, Immediate(~kObjectAlignmentMask));
4621   if (kHeaderAlignment > 0) {
4622     subp(scratch1, Immediate(kHeaderAlignment));
4623   }
4624 
4625   // Allocate two byte string in new space.
4626   Allocate(SeqTwoByteString::kHeaderSize,
4627            times_1,
4628            scratch1,
4629            result,
4630            scratch2,
4631            scratch3,
4632            gc_required,
4633            TAG_OBJECT);
4634 
4635   // Set the map, length and hash field.
4636   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4637   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4638   Integer32ToSmi(scratch1, length);
4639   movp(FieldOperand(result, String::kLengthOffset), scratch1);
4640   movp(FieldOperand(result, String::kHashFieldOffset),
4641        Immediate(String::kEmptyHashField));
4642 }
4643 
4644 
AllocateAsciiString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4645 void MacroAssembler::AllocateAsciiString(Register result,
4646                                          Register length,
4647                                          Register scratch1,
4648                                          Register scratch2,
4649                                          Register scratch3,
4650                                          Label* gc_required) {
4651   // Calculate the number of bytes needed for the characters in the string while
4652   // observing object alignment.
4653   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4654                                kObjectAlignmentMask;
4655   movl(scratch1, length);
4656   ASSERT(kCharSize == 1);
4657   addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4658   andp(scratch1, Immediate(~kObjectAlignmentMask));
4659   if (kHeaderAlignment > 0) {
4660     subp(scratch1, Immediate(kHeaderAlignment));
4661   }
4662 
4663   // Allocate ASCII string in new space.
4664   Allocate(SeqOneByteString::kHeaderSize,
4665            times_1,
4666            scratch1,
4667            result,
4668            scratch2,
4669            scratch3,
4670            gc_required,
4671            TAG_OBJECT);
4672 
4673   // Set the map, length and hash field.
4674   LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4675   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4676   Integer32ToSmi(scratch1, length);
4677   movp(FieldOperand(result, String::kLengthOffset), scratch1);
4678   movp(FieldOperand(result, String::kHashFieldOffset),
4679        Immediate(String::kEmptyHashField));
4680 }
4681 
4682 
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4683 void MacroAssembler::AllocateTwoByteConsString(Register result,
4684                                         Register scratch1,
4685                                         Register scratch2,
4686                                         Label* gc_required) {
4687   // Allocate heap number in new space.
4688   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4689            TAG_OBJECT);
4690 
4691   // Set the map. The other fields are left uninitialized.
4692   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4693   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4694 }
4695 
4696 
AllocateAsciiConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4697 void MacroAssembler::AllocateAsciiConsString(Register result,
4698                                              Register scratch1,
4699                                              Register scratch2,
4700                                              Label* gc_required) {
4701   Allocate(ConsString::kSize,
4702            result,
4703            scratch1,
4704            scratch2,
4705            gc_required,
4706            TAG_OBJECT);
4707 
4708   // Set the map. The other fields are left uninitialized.
4709   LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4710   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4711 }
4712 
4713 
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4714 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4715                                           Register scratch1,
4716                                           Register scratch2,
4717                                           Label* gc_required) {
4718   // Allocate heap number in new space.
4719   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4720            TAG_OBJECT);
4721 
4722   // Set the map. The other fields are left uninitialized.
4723   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4724   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4725 }
4726 
4727 
AllocateAsciiSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4728 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4729                                                Register scratch1,
4730                                                Register scratch2,
4731                                                Label* gc_required) {
4732   // Allocate heap number in new space.
4733   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4734            TAG_OBJECT);
4735 
4736   // Set the map. The other fields are left uninitialized.
4737   LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4738   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4739 }
4740 
4741 
4742 // Copy memory, byte-by-byte, from source to destination.  Not optimized for
4743 // long or aligned copies.  The contents of scratch and length are destroyed.
4744 // Destination is incremented by length, source, length and scratch are
4745 // clobbered.
4746 // A simpler loop is faster on small copies, but slower on large ones.
4747 // The cld() instruction must have been emitted, to set the direction flag(),
4748 // before calling this function.
CopyBytes(Register destination,Register source,Register length,int min_length,Register scratch)4749 void MacroAssembler::CopyBytes(Register destination,
4750                                Register source,
4751                                Register length,
4752                                int min_length,
4753                                Register scratch) {
4754   ASSERT(min_length >= 0);
4755   if (emit_debug_code()) {
4756     cmpl(length, Immediate(min_length));
4757     Assert(greater_equal, kInvalidMinLength);
4758   }
4759   Label short_loop, len8, len16, len24, done, short_string;
4760 
4761   const int kLongStringLimit = 4 * kPointerSize;
4762   if (min_length <= kLongStringLimit) {
4763     cmpl(length, Immediate(kPointerSize));
4764     j(below, &short_string, Label::kNear);
4765   }
4766 
4767   ASSERT(source.is(rsi));
4768   ASSERT(destination.is(rdi));
4769   ASSERT(length.is(rcx));
4770 
4771   if (min_length <= kLongStringLimit) {
4772     cmpl(length, Immediate(2 * kPointerSize));
4773     j(below_equal, &len8, Label::kNear);
4774     cmpl(length, Immediate(3 * kPointerSize));
4775     j(below_equal, &len16, Label::kNear);
4776     cmpl(length, Immediate(4 * kPointerSize));
4777     j(below_equal, &len24, Label::kNear);
4778   }
4779 
4780   // Because source is 8-byte aligned in our uses of this function,
4781   // we keep source aligned for the rep movs operation by copying the odd bytes
4782   // at the end of the ranges.
4783   movp(scratch, length);
4784   shrl(length, Immediate(kPointerSizeLog2));
4785   repmovsp();
4786   // Move remaining bytes of length.
4787   andl(scratch, Immediate(kPointerSize - 1));
4788   movp(length, Operand(source, scratch, times_1, -kPointerSize));
4789   movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4790   addp(destination, scratch);
4791 
4792   if (min_length <= kLongStringLimit) {
4793     jmp(&done, Label::kNear);
4794     bind(&len24);
4795     movp(scratch, Operand(source, 2 * kPointerSize));
4796     movp(Operand(destination, 2 * kPointerSize), scratch);
4797     bind(&len16);
4798     movp(scratch, Operand(source, kPointerSize));
4799     movp(Operand(destination, kPointerSize), scratch);
4800     bind(&len8);
4801     movp(scratch, Operand(source, 0));
4802     movp(Operand(destination, 0), scratch);
4803     // Move remaining bytes of length.
4804     movp(scratch, Operand(source, length, times_1, -kPointerSize));
4805     movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4806     addp(destination, length);
4807     jmp(&done, Label::kNear);
4808 
4809     bind(&short_string);
4810     if (min_length == 0) {
4811       testl(length, length);
4812       j(zero, &done, Label::kNear);
4813     }
4814 
4815     bind(&short_loop);
4816     movb(scratch, Operand(source, 0));
4817     movb(Operand(destination, 0), scratch);
4818     incp(source);
4819     incp(destination);
4820     decl(length);
4821     j(not_zero, &short_loop);
4822   }
4823 
4824   bind(&done);
4825 }
4826 
4827 
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)4828 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4829                                                 Register end_offset,
4830                                                 Register filler) {
4831   Label loop, entry;
4832   jmp(&entry);
4833   bind(&loop);
4834   movp(Operand(start_offset, 0), filler);
4835   addp(start_offset, Immediate(kPointerSize));
4836   bind(&entry);
4837   cmpp(start_offset, end_offset);
4838   j(less, &loop);
4839 }
4840 
4841 
LoadContext(Register dst,int context_chain_length)4842 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4843   if (context_chain_length > 0) {
4844     // Move up the chain of contexts to the context containing the slot.
4845     movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4846     for (int i = 1; i < context_chain_length; i++) {
4847       movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4848     }
4849   } else {
4850     // Slot is in the current function context.  Move it into the
4851     // destination register in case we store into it (the write barrier
4852     // cannot be allowed to destroy the context in rsi).
4853     movp(dst, rsi);
4854   }
4855 
4856   // We should not have found a with context by walking the context
4857   // chain (i.e., the static scope chain and runtime context chain do
4858   // not agree).  A variable occurring in such a scope should have
4859   // slot type LOOKUP and not CONTEXT.
4860   if (emit_debug_code()) {
4861     CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4862                 Heap::kWithContextMapRootIndex);
4863     Check(not_equal, kVariableResolvedToWithContext);
4864   }
4865 }
4866 
4867 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4868 void MacroAssembler::LoadTransitionedArrayMapConditional(
4869     ElementsKind expected_kind,
4870     ElementsKind transitioned_kind,
4871     Register map_in_out,
4872     Register scratch,
4873     Label* no_map_match) {
4874   // Load the global or builtins object from the current context.
4875   movp(scratch,
4876        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4877   movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4878 
4879   // Check that the function's map is the same as the expected cached map.
4880   movp(scratch, Operand(scratch,
4881                         Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4882 
4883   int offset = expected_kind * kPointerSize +
4884       FixedArrayBase::kHeaderSize;
4885   cmpp(map_in_out, FieldOperand(scratch, offset));
4886   j(not_equal, no_map_match);
4887 
4888   // Use the transitioned cached map.
4889   offset = transitioned_kind * kPointerSize +
4890       FixedArrayBase::kHeaderSize;
4891   movp(map_in_out, FieldOperand(scratch, offset));
4892 }
4893 
4894 
4895 #ifdef _WIN64
4896 static const int kRegisterPassedArguments = 4;
4897 #else
4898 static const int kRegisterPassedArguments = 6;
4899 #endif
4900 
LoadGlobalFunction(int index,Register function)4901 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4902   // Load the global or builtins object from the current context.
4903   movp(function,
4904        Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4905   // Load the native context from the global or builtins object.
4906   movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4907   // Load the function from the native context.
4908   movp(function, Operand(function, Context::SlotOffset(index)));
4909 }
4910 
4911 
LoadGlobalFunctionInitialMap(Register function,Register map)4912 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4913                                                   Register map) {
4914   // Load the initial map.  The global functions all have initial maps.
4915   movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4916   if (emit_debug_code()) {
4917     Label ok, fail;
4918     CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4919     jmp(&ok);
4920     bind(&fail);
4921     Abort(kGlobalFunctionsMustHaveInitialMap);
4922     bind(&ok);
4923   }
4924 }
4925 
4926 
ArgumentStackSlotsForCFunctionCall(int num_arguments)4927 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4928   // On Windows 64 stack slots are reserved by the caller for all arguments
4929   // including the ones passed in registers, and space is always allocated for
4930   // the four register arguments even if the function takes fewer than four
4931   // arguments.
4932   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4933   // and the caller does not reserve stack slots for them.
4934   ASSERT(num_arguments >= 0);
4935 #ifdef _WIN64
4936   const int kMinimumStackSlots = kRegisterPassedArguments;
4937   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4938   return num_arguments;
4939 #else
4940   if (num_arguments < kRegisterPassedArguments) return 0;
4941   return num_arguments - kRegisterPassedArguments;
4942 #endif
4943 }
4944 
4945 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)4946 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4947                                                Register index,
4948                                                Register value,
4949                                                uint32_t encoding_mask) {
4950   Label is_object;
4951   JumpIfNotSmi(string, &is_object);
4952   Abort(kNonObject);
4953   bind(&is_object);
4954 
4955   Push(value);
4956   movp(value, FieldOperand(string, HeapObject::kMapOffset));
4957   movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4958 
4959   andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4960   cmpp(value, Immediate(encoding_mask));
4961   Pop(value);
4962   Check(equal, kUnexpectedStringType);
4963 
4964   // The index is assumed to be untagged coming in, tag it to compare with the
4965   // string length without using a temp register, it is restored at the end of
4966   // this function.
4967   Integer32ToSmi(index, index);
4968   SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4969   Check(less, kIndexIsTooLarge);
4970 
4971   SmiCompare(index, Smi::FromInt(0));
4972   Check(greater_equal, kIndexIsNegative);
4973 
4974   // Restore the index
4975   SmiToInteger32(index, index);
4976 }
4977 
4978 
PrepareCallCFunction(int num_arguments)4979 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4980   int frame_alignment = OS::ActivationFrameAlignment();
4981   ASSERT(frame_alignment != 0);
4982   ASSERT(num_arguments >= 0);
4983 
4984   // Make stack end at alignment and allocate space for arguments and old rsp.
4985   movp(kScratchRegister, rsp);
4986   ASSERT(IsPowerOf2(frame_alignment));
4987   int argument_slots_on_stack =
4988       ArgumentStackSlotsForCFunctionCall(num_arguments);
4989   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4990   andp(rsp, Immediate(-frame_alignment));
4991   movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4992 }
4993 
4994 
CallCFunction(ExternalReference function,int num_arguments)4995 void MacroAssembler::CallCFunction(ExternalReference function,
4996                                    int num_arguments) {
4997   LoadAddress(rax, function);
4998   CallCFunction(rax, num_arguments);
4999 }
5000 
5001 
CallCFunction(Register function,int num_arguments)5002 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5003   ASSERT(has_frame());
5004   // Check stack alignment.
5005   if (emit_debug_code()) {
5006     CheckStackAlignment();
5007   }
5008 
5009   call(function);
5010   ASSERT(OS::ActivationFrameAlignment() != 0);
5011   ASSERT(num_arguments >= 0);
5012   int argument_slots_on_stack =
5013       ArgumentStackSlotsForCFunctionCall(num_arguments);
5014   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5015 }
5016 
5017 
AreAliased(Register r1,Register r2,Register r3,Register r4)5018 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5019   if (r1.is(r2)) return true;
5020   if (r1.is(r3)) return true;
5021   if (r1.is(r4)) return true;
5022   if (r2.is(r3)) return true;
5023   if (r2.is(r4)) return true;
5024   if (r3.is(r4)) return true;
5025   return false;
5026 }
5027 
5028 
CodePatcher(byte * address,int size)5029 CodePatcher::CodePatcher(byte* address, int size)
5030     : address_(address),
5031       size_(size),
5032       masm_(NULL, address, size + Assembler::kGap) {
5033   // Create a new macro assembler pointing to the address of the code to patch.
5034   // The size is adjusted with kGap on order for the assembler to generate size
5035   // bytes of instructions without failing with buffer size constraints.
5036   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5037 }
5038 
5039 
~CodePatcher()5040 CodePatcher::~CodePatcher() {
5041   // Indicate that code has changed.
5042   CPU::FlushICache(address_, size_);
5043 
5044   // Check that the code was patched as expected.
5045   ASSERT(masm_.pc_ == address_ + size_);
5046   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5047 }
5048 
5049 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)5050 void MacroAssembler::CheckPageFlag(
5051     Register object,
5052     Register scratch,
5053     int mask,
5054     Condition cc,
5055     Label* condition_met,
5056     Label::Distance condition_met_distance) {
5057   ASSERT(cc == zero || cc == not_zero);
5058   if (scratch.is(object)) {
5059     andp(scratch, Immediate(~Page::kPageAlignmentMask));
5060   } else {
5061     movp(scratch, Immediate(~Page::kPageAlignmentMask));
5062     andp(scratch, object);
5063   }
5064   if (mask < (1 << kBitsPerByte)) {
5065     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5066           Immediate(static_cast<uint8_t>(mask)));
5067   } else {
5068     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5069   }
5070   j(cc, condition_met, condition_met_distance);
5071 }
5072 
5073 
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)5074 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5075                                         Register scratch,
5076                                         Label* if_deprecated) {
5077   if (map->CanBeDeprecated()) {
5078     Move(scratch, map);
5079     movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5080     andl(scratch, Immediate(Map::Deprecated::kMask));
5081     j(not_zero, if_deprecated);
5082   }
5083 }
5084 
5085 
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)5086 void MacroAssembler::JumpIfBlack(Register object,
5087                                  Register bitmap_scratch,
5088                                  Register mask_scratch,
5089                                  Label* on_black,
5090                                  Label::Distance on_black_distance) {
5091   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5092   GetMarkBits(object, bitmap_scratch, mask_scratch);
5093 
5094   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5095   // The mask_scratch register contains a 1 at the position of the first bit
5096   // and a 0 at all other positions, including the position of the second bit.
5097   movp(rcx, mask_scratch);
5098   // Make rcx into a mask that covers both marking bits using the operation
5099   // rcx = mask | (mask << 1).
5100   leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5101   // Note that we are using a 4-byte aligned 8-byte load.
5102   andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5103   cmpp(mask_scratch, rcx);
5104   j(equal, on_black, on_black_distance);
5105 }
5106 
5107 
5108 // Detect some, but not all, common pointer-free objects.  This is used by the
5109 // incremental write barrier which doesn't care about oddballs (they are always
5110 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object,Label::Distance not_data_object_distance)5111 void MacroAssembler::JumpIfDataObject(
5112     Register value,
5113     Register scratch,
5114     Label* not_data_object,
5115     Label::Distance not_data_object_distance) {
5116   Label is_data_object;
5117   movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5118   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5119   j(equal, &is_data_object, Label::kNear);
5120   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5121   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5122   // If it's a string and it's not a cons string then it's an object containing
5123   // no GC pointers.
5124   testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5125         Immediate(kIsIndirectStringMask | kIsNotStringMask));
5126   j(not_zero, not_data_object, not_data_object_distance);
5127   bind(&is_data_object);
5128 }
5129 
5130 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)5131 void MacroAssembler::GetMarkBits(Register addr_reg,
5132                                  Register bitmap_reg,
5133                                  Register mask_reg) {
5134   ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5135   movp(bitmap_reg, addr_reg);
5136   // Sign extended 32 bit immediate.
5137   andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5138   movp(rcx, addr_reg);
5139   int shift =
5140       Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5141   shrl(rcx, Immediate(shift));
5142   andp(rcx,
5143        Immediate((Page::kPageAlignmentMask >> shift) &
5144                  ~(Bitmap::kBytesPerCell - 1)));
5145 
5146   addp(bitmap_reg, rcx);
5147   movp(rcx, addr_reg);
5148   shrl(rcx, Immediate(kPointerSizeLog2));
5149   andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5150   movl(mask_reg, Immediate(1));
5151   shlp_cl(mask_reg);
5152 }
5153 
5154 
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white_and_not_data,Label::Distance distance)5155 void MacroAssembler::EnsureNotWhite(
5156     Register value,
5157     Register bitmap_scratch,
5158     Register mask_scratch,
5159     Label* value_is_white_and_not_data,
5160     Label::Distance distance) {
5161   ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5162   GetMarkBits(value, bitmap_scratch, mask_scratch);
5163 
5164   // If the value is black or grey we don't need to do anything.
5165   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5166   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5167   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5168   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5169 
5170   Label done;
5171 
5172   // Since both black and grey have a 1 in the first position and white does
5173   // not have a 1 there we only need to check one bit.
5174   testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5175   j(not_zero, &done, Label::kNear);
5176 
5177   if (emit_debug_code()) {
5178     // Check for impossible bit pattern.
5179     Label ok;
5180     Push(mask_scratch);
5181     // shl.  May overflow making the check conservative.
5182     addp(mask_scratch, mask_scratch);
5183     testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5184     j(zero, &ok, Label::kNear);
5185     int3();
5186     bind(&ok);
5187     Pop(mask_scratch);
5188   }
5189 
5190   // Value is white.  We check whether it is data that doesn't need scanning.
5191   // Currently only checks for HeapNumber and non-cons strings.
5192   Register map = rcx;  // Holds map while checking type.
5193   Register length = rcx;  // Holds length of object after checking type.
5194   Label not_heap_number;
5195   Label is_data_object;
5196 
5197   // Check for heap-number
5198   movp(map, FieldOperand(value, HeapObject::kMapOffset));
5199   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5200   j(not_equal, &not_heap_number, Label::kNear);
5201   movp(length, Immediate(HeapNumber::kSize));
5202   jmp(&is_data_object, Label::kNear);
5203 
5204   bind(&not_heap_number);
5205   // Check for strings.
5206   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5207   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5208   // If it's a string and it's not a cons string then it's an object containing
5209   // no GC pointers.
5210   Register instance_type = rcx;
5211   movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5212   testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5213   j(not_zero, value_is_white_and_not_data);
5214   // It's a non-indirect (non-cons and non-slice) string.
5215   // If it's external, the length is just ExternalString::kSize.
5216   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5217   Label not_external;
5218   // External strings are the only ones with the kExternalStringTag bit
5219   // set.
5220   ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5221   ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5222   testb(instance_type, Immediate(kExternalStringTag));
5223   j(zero, &not_external, Label::kNear);
5224   movp(length, Immediate(ExternalString::kSize));
5225   jmp(&is_data_object, Label::kNear);
5226 
5227   bind(&not_external);
5228   // Sequential string, either ASCII or UC16.
5229   ASSERT(kOneByteStringTag == 0x04);
5230   andp(length, Immediate(kStringEncodingMask));
5231   xorp(length, Immediate(kStringEncodingMask));
5232   addp(length, Immediate(0x04));
5233   // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
5234   imulp(length, FieldOperand(value, String::kLengthOffset));
5235   shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5236   addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5237   andp(length, Immediate(~kObjectAlignmentMask));
5238 
5239   bind(&is_data_object);
5240   // Value is a data object, and it is white.  Mark it black.  Since we know
5241   // that the object is white we can make it black by flipping one bit.
5242   orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5243 
5244   andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5245   addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5246 
5247   bind(&done);
5248 }
5249 
5250 
CheckEnumCache(Register null_value,Label * call_runtime)5251 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5252   Label next, start;
5253   Register empty_fixed_array_value = r8;
5254   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5255   movp(rcx, rax);
5256 
5257   // Check if the enum length field is properly initialized, indicating that
5258   // there is an enum cache.
5259   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5260 
5261   EnumLength(rdx, rbx);
5262   Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5263   j(equal, call_runtime);
5264 
5265   jmp(&start);
5266 
5267   bind(&next);
5268 
5269   movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5270 
5271   // For all objects but the receiver, check that the cache is empty.
5272   EnumLength(rdx, rbx);
5273   Cmp(rdx, Smi::FromInt(0));
5274   j(not_equal, call_runtime);
5275 
5276   bind(&start);
5277 
5278   // Check that there are no elements. Register rcx contains the current JS
5279   // object we've reached through the prototype chain.
5280   Label no_elements;
5281   cmpp(empty_fixed_array_value,
5282        FieldOperand(rcx, JSObject::kElementsOffset));
5283   j(equal, &no_elements);
5284 
5285   // Second chance, the object may be using the empty slow element dictionary.
5286   LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5287   cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5288   j(not_equal, call_runtime);
5289 
5290   bind(&no_elements);
5291   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5292   cmpp(rcx, null_value);
5293   j(not_equal, &next);
5294 }
5295 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5296 void MacroAssembler::TestJSArrayForAllocationMemento(
5297     Register receiver_reg,
5298     Register scratch_reg,
5299     Label* no_memento_found) {
5300   ExternalReference new_space_start =
5301       ExternalReference::new_space_start(isolate());
5302   ExternalReference new_space_allocation_top =
5303       ExternalReference::new_space_allocation_top_address(isolate());
5304 
5305   leap(scratch_reg, Operand(receiver_reg,
5306       JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5307   Move(kScratchRegister, new_space_start);
5308   cmpp(scratch_reg, kScratchRegister);
5309   j(less, no_memento_found);
5310   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5311   j(greater, no_memento_found);
5312   CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5313               Heap::kAllocationMementoMapRootIndex);
5314 }
5315 
5316 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5317 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5318     Register object,
5319     Register scratch0,
5320     Register scratch1,
5321     Label* found) {
5322   ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5323   ASSERT(!scratch1.is(scratch0));
5324   Register current = scratch0;
5325   Label loop_again;
5326 
5327   movp(current, object);
5328 
5329   // Loop based on the map going up the prototype chain.
5330   bind(&loop_again);
5331   movp(current, FieldOperand(current, HeapObject::kMapOffset));
5332   movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5333   DecodeField<Map::ElementsKindBits>(scratch1);
5334   cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5335   j(equal, found);
5336   movp(current, FieldOperand(current, Map::kPrototypeOffset));
5337   CompareRoot(current, Heap::kNullValueRootIndex);
5338   j(not_equal, &loop_again);
5339 }
5340 
5341 
TruncatingDiv(Register dividend,int32_t divisor)5342 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5343   ASSERT(!dividend.is(rax));
5344   ASSERT(!dividend.is(rdx));
5345   MultiplierAndShift ms(divisor);
5346   movl(rax, Immediate(ms.multiplier()));
5347   imull(dividend);
5348   if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5349   if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5350   if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5351   movl(rax, dividend);
5352   shrl(rax, Immediate(31));
5353   addl(rdx, rax);
5354 }
5355 
5356 
5357 } }  // namespace v8::internal
5358 
5359 #endif  // V8_TARGET_ARCH_X64
5360