1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if V8_TARGET_ARCH_X64
31
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "cpu-profiler.h"
35 #include "assembler-x64.h"
36 #include "macro-assembler-x64.h"
37 #include "serialize.h"
38 #include "debug.h"
39 #include "heap.h"
40 #include "isolate-inl.h"
41
42 namespace v8 {
43 namespace internal {
44
MacroAssembler(Isolate * arg_isolate,void * buffer,int size)45 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
46 : Assembler(arg_isolate, buffer, size),
47 generating_stub_(false),
48 has_frame_(false),
49 root_array_available_(true) {
50 if (isolate() != NULL) {
51 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
52 isolate());
53 }
54 }
55
56
57 static const int kInvalidRootRegisterDelta = -1;
58
59
RootRegisterDelta(ExternalReference other)60 intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
61 if (predictable_code_size() &&
62 (other.address() < reinterpret_cast<Address>(isolate()) ||
63 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
64 return kInvalidRootRegisterDelta;
65 }
66 Address roots_register_value = kRootRegisterBias +
67 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
68 intptr_t delta = other.address() - roots_register_value;
69 return delta;
70 }
71
72
ExternalOperand(ExternalReference target,Register scratch)73 Operand MacroAssembler::ExternalOperand(ExternalReference target,
74 Register scratch) {
75 if (root_array_available_ && !Serializer::enabled()) {
76 intptr_t delta = RootRegisterDelta(target);
77 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78 Serializer::TooLateToEnableNow();
79 return Operand(kRootRegister, static_cast<int32_t>(delta));
80 }
81 }
82 Move(scratch, target);
83 return Operand(scratch, 0);
84 }
85
86
Load(Register destination,ExternalReference source)87 void MacroAssembler::Load(Register destination, ExternalReference source) {
88 if (root_array_available_ && !Serializer::enabled()) {
89 intptr_t delta = RootRegisterDelta(source);
90 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
91 Serializer::TooLateToEnableNow();
92 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
93 return;
94 }
95 }
96 // Safe code.
97 if (destination.is(rax)) {
98 load_rax(source);
99 } else {
100 Move(kScratchRegister, source);
101 movq(destination, Operand(kScratchRegister, 0));
102 }
103 }
104
105
Store(ExternalReference destination,Register source)106 void MacroAssembler::Store(ExternalReference destination, Register source) {
107 if (root_array_available_ && !Serializer::enabled()) {
108 intptr_t delta = RootRegisterDelta(destination);
109 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
110 Serializer::TooLateToEnableNow();
111 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
112 return;
113 }
114 }
115 // Safe code.
116 if (source.is(rax)) {
117 store_rax(destination);
118 } else {
119 Move(kScratchRegister, destination);
120 movq(Operand(kScratchRegister, 0), source);
121 }
122 }
123
124
LoadAddress(Register destination,ExternalReference source)125 void MacroAssembler::LoadAddress(Register destination,
126 ExternalReference source) {
127 if (root_array_available_ && !Serializer::enabled()) {
128 intptr_t delta = RootRegisterDelta(source);
129 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
130 Serializer::TooLateToEnableNow();
131 lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
132 return;
133 }
134 }
135 // Safe code.
136 Move(destination, source);
137 }
138
139
LoadAddressSize(ExternalReference source)140 int MacroAssembler::LoadAddressSize(ExternalReference source) {
141 if (root_array_available_ && !Serializer::enabled()) {
142 // This calculation depends on the internals of LoadAddress.
143 // It's correctness is ensured by the asserts in the Call
144 // instruction below.
145 intptr_t delta = RootRegisterDelta(source);
146 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
147 Serializer::TooLateToEnableNow();
148 // Operand is lea(scratch, Operand(kRootRegister, delta));
149 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
150 int size = 4;
151 if (!is_int8(static_cast<int32_t>(delta))) {
152 size += 3; // Need full four-byte displacement in lea.
153 }
154 return size;
155 }
156 }
157 // Size of movq(destination, src);
158 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
159 }
160
161
PushAddress(ExternalReference source)162 void MacroAssembler::PushAddress(ExternalReference source) {
163 int64_t address = reinterpret_cast<int64_t>(source.address());
164 if (is_int32(address) && !Serializer::enabled()) {
165 if (emit_debug_code()) {
166 movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
167 }
168 push(Immediate(static_cast<int32_t>(address)));
169 return;
170 }
171 LoadAddress(kScratchRegister, source);
172 push(kScratchRegister);
173 }
174
175
LoadRoot(Register destination,Heap::RootListIndex index)176 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
177 ASSERT(root_array_available_);
178 movq(destination, Operand(kRootRegister,
179 (index << kPointerSizeLog2) - kRootRegisterBias));
180 }
181
182
LoadRootIndexed(Register destination,Register variable_offset,int fixed_offset)183 void MacroAssembler::LoadRootIndexed(Register destination,
184 Register variable_offset,
185 int fixed_offset) {
186 ASSERT(root_array_available_);
187 movq(destination,
188 Operand(kRootRegister,
189 variable_offset, times_pointer_size,
190 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
191 }
192
193
StoreRoot(Register source,Heap::RootListIndex index)194 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
195 ASSERT(root_array_available_);
196 movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
197 source);
198 }
199
200
PushRoot(Heap::RootListIndex index)201 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
202 ASSERT(root_array_available_);
203 push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
204 }
205
206
CompareRoot(Register with,Heap::RootListIndex index)207 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
208 ASSERT(root_array_available_);
209 cmpq(with, Operand(kRootRegister,
210 (index << kPointerSizeLog2) - kRootRegisterBias));
211 }
212
213
CompareRoot(const Operand & with,Heap::RootListIndex index)214 void MacroAssembler::CompareRoot(const Operand& with,
215 Heap::RootListIndex index) {
216 ASSERT(root_array_available_);
217 ASSERT(!with.AddressUsesRegister(kScratchRegister));
218 LoadRoot(kScratchRegister, index);
219 cmpq(with, kScratchRegister);
220 }
221
222
RememberedSetHelper(Register object,Register addr,Register scratch,SaveFPRegsMode save_fp,RememberedSetFinalAction and_then)223 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
224 Register addr,
225 Register scratch,
226 SaveFPRegsMode save_fp,
227 RememberedSetFinalAction and_then) {
228 if (emit_debug_code()) {
229 Label ok;
230 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
231 int3();
232 bind(&ok);
233 }
234 // Load store buffer top.
235 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
236 // Store pointer to buffer.
237 movq(Operand(scratch, 0), addr);
238 // Increment buffer top.
239 addq(scratch, Immediate(kPointerSize));
240 // Write back new top of buffer.
241 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
242 // Call stub on end of buffer.
243 Label done;
244 // Check for end of buffer.
245 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
246 if (and_then == kReturnAtEnd) {
247 Label buffer_overflowed;
248 j(not_equal, &buffer_overflowed, Label::kNear);
249 ret(0);
250 bind(&buffer_overflowed);
251 } else {
252 ASSERT(and_then == kFallThroughAtEnd);
253 j(equal, &done, Label::kNear);
254 }
255 StoreBufferOverflowStub store_buffer_overflow =
256 StoreBufferOverflowStub(save_fp);
257 CallStub(&store_buffer_overflow);
258 if (and_then == kReturnAtEnd) {
259 ret(0);
260 } else {
261 ASSERT(and_then == kFallThroughAtEnd);
262 bind(&done);
263 }
264 }
265
266
InNewSpace(Register object,Register scratch,Condition cc,Label * branch,Label::Distance distance)267 void MacroAssembler::InNewSpace(Register object,
268 Register scratch,
269 Condition cc,
270 Label* branch,
271 Label::Distance distance) {
272 if (Serializer::enabled()) {
273 // Can't do arithmetic on external references if it might get serialized.
274 // The mask isn't really an address. We load it as an external reference in
275 // case the size of the new space is different between the snapshot maker
276 // and the running system.
277 if (scratch.is(object)) {
278 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
279 and_(scratch, kScratchRegister);
280 } else {
281 Move(scratch, ExternalReference::new_space_mask(isolate()));
282 and_(scratch, object);
283 }
284 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
285 cmpq(scratch, kScratchRegister);
286 j(cc, branch, distance);
287 } else {
288 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
289 intptr_t new_space_start =
290 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
291 movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
292 RelocInfo::NONE64);
293 if (scratch.is(object)) {
294 addq(scratch, kScratchRegister);
295 } else {
296 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
297 }
298 and_(scratch,
299 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
300 j(cc, branch, distance);
301 }
302 }
303
304
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)305 void MacroAssembler::RecordWriteField(
306 Register object,
307 int offset,
308 Register value,
309 Register dst,
310 SaveFPRegsMode save_fp,
311 RememberedSetAction remembered_set_action,
312 SmiCheck smi_check) {
313 // First, check if a write barrier is even needed. The tests below
314 // catch stores of Smis.
315 Label done;
316
317 // Skip barrier if writing a smi.
318 if (smi_check == INLINE_SMI_CHECK) {
319 JumpIfSmi(value, &done);
320 }
321
322 // Although the object register is tagged, the offset is relative to the start
323 // of the object, so so offset must be a multiple of kPointerSize.
324 ASSERT(IsAligned(offset, kPointerSize));
325
326 lea(dst, FieldOperand(object, offset));
327 if (emit_debug_code()) {
328 Label ok;
329 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
330 j(zero, &ok, Label::kNear);
331 int3();
332 bind(&ok);
333 }
334
335 RecordWrite(
336 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
337
338 bind(&done);
339
340 // Clobber clobbered input registers when running with the debug-code flag
341 // turned on to provoke errors.
342 if (emit_debug_code()) {
343 movq(value, kZapValue, RelocInfo::NONE64);
344 movq(dst, kZapValue, RelocInfo::NONE64);
345 }
346 }
347
348
RecordWriteArray(Register object,Register value,Register index,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)349 void MacroAssembler::RecordWriteArray(Register object,
350 Register value,
351 Register index,
352 SaveFPRegsMode save_fp,
353 RememberedSetAction remembered_set_action,
354 SmiCheck smi_check) {
355 // First, check if a write barrier is even needed. The tests below
356 // catch stores of Smis.
357 Label done;
358
359 // Skip barrier if writing a smi.
360 if (smi_check == INLINE_SMI_CHECK) {
361 JumpIfSmi(value, &done);
362 }
363
364 // Array access: calculate the destination address. Index is not a smi.
365 Register dst = index;
366 lea(dst, Operand(object, index, times_pointer_size,
367 FixedArray::kHeaderSize - kHeapObjectTag));
368
369 RecordWrite(
370 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
371
372 bind(&done);
373
374 // Clobber clobbered input registers when running with the debug-code flag
375 // turned on to provoke errors.
376 if (emit_debug_code()) {
377 movq(value, kZapValue, RelocInfo::NONE64);
378 movq(index, kZapValue, RelocInfo::NONE64);
379 }
380 }
381
382
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)383 void MacroAssembler::RecordWrite(Register object,
384 Register address,
385 Register value,
386 SaveFPRegsMode fp_mode,
387 RememberedSetAction remembered_set_action,
388 SmiCheck smi_check) {
389 ASSERT(!object.is(value));
390 ASSERT(!object.is(address));
391 ASSERT(!value.is(address));
392 AssertNotSmi(object);
393
394 if (remembered_set_action == OMIT_REMEMBERED_SET &&
395 !FLAG_incremental_marking) {
396 return;
397 }
398
399 if (emit_debug_code()) {
400 Label ok;
401 cmpq(value, Operand(address, 0));
402 j(equal, &ok, Label::kNear);
403 int3();
404 bind(&ok);
405 }
406
407 // Count number of write barriers in generated code.
408 isolate()->counters()->write_barriers_static()->Increment();
409 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
410
411 // First, check if a write barrier is even needed. The tests below
412 // catch stores of smis and stores into the young generation.
413 Label done;
414
415 if (smi_check == INLINE_SMI_CHECK) {
416 // Skip barrier if writing a smi.
417 JumpIfSmi(value, &done);
418 }
419
420 CheckPageFlag(value,
421 value, // Used as scratch.
422 MemoryChunk::kPointersToHereAreInterestingMask,
423 zero,
424 &done,
425 Label::kNear);
426
427 CheckPageFlag(object,
428 value, // Used as scratch.
429 MemoryChunk::kPointersFromHereAreInterestingMask,
430 zero,
431 &done,
432 Label::kNear);
433
434 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
435 CallStub(&stub);
436
437 bind(&done);
438
439 // Clobber clobbered registers when running with the debug-code flag
440 // turned on to provoke errors.
441 if (emit_debug_code()) {
442 movq(address, kZapValue, RelocInfo::NONE64);
443 movq(value, kZapValue, RelocInfo::NONE64);
444 }
445 }
446
447
Assert(Condition cc,BailoutReason reason)448 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
449 if (emit_debug_code()) Check(cc, reason);
450 }
451
452
AssertFastElements(Register elements)453 void MacroAssembler::AssertFastElements(Register elements) {
454 if (emit_debug_code()) {
455 Label ok;
456 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
457 Heap::kFixedArrayMapRootIndex);
458 j(equal, &ok, Label::kNear);
459 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
460 Heap::kFixedDoubleArrayMapRootIndex);
461 j(equal, &ok, Label::kNear);
462 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
463 Heap::kFixedCOWArrayMapRootIndex);
464 j(equal, &ok, Label::kNear);
465 Abort(kJSObjectWithFastElementsMapHasSlowElements);
466 bind(&ok);
467 }
468 }
469
470
Check(Condition cc,BailoutReason reason)471 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
472 Label L;
473 j(cc, &L, Label::kNear);
474 Abort(reason);
475 // Control will not return here.
476 bind(&L);
477 }
478
479
CheckStackAlignment()480 void MacroAssembler::CheckStackAlignment() {
481 int frame_alignment = OS::ActivationFrameAlignment();
482 int frame_alignment_mask = frame_alignment - 1;
483 if (frame_alignment > kPointerSize) {
484 ASSERT(IsPowerOf2(frame_alignment));
485 Label alignment_as_expected;
486 testq(rsp, Immediate(frame_alignment_mask));
487 j(zero, &alignment_as_expected, Label::kNear);
488 // Abort if stack is not aligned.
489 int3();
490 bind(&alignment_as_expected);
491 }
492 }
493
494
NegativeZeroTest(Register result,Register op,Label * then_label)495 void MacroAssembler::NegativeZeroTest(Register result,
496 Register op,
497 Label* then_label) {
498 Label ok;
499 testl(result, result);
500 j(not_zero, &ok, Label::kNear);
501 testl(op, op);
502 j(sign, then_label);
503 bind(&ok);
504 }
505
506
Abort(BailoutReason reason)507 void MacroAssembler::Abort(BailoutReason reason) {
508 // We want to pass the msg string like a smi to avoid GC
509 // problems, however msg is not guaranteed to be aligned
510 // properly. Instead, we pass an aligned pointer that is
511 // a proper v8 smi, but also pass the alignment difference
512 // from the real pointer as a smi.
513 const char* msg = GetBailoutReason(reason);
514 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
515 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
516 // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
517 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
518 #ifdef DEBUG
519 if (msg != NULL) {
520 RecordComment("Abort message: ");
521 RecordComment(msg);
522 }
523
524 if (FLAG_trap_on_abort) {
525 int3();
526 return;
527 }
528 #endif
529
530 push(rax);
531 movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
532 push(kScratchRegister);
533 movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
534 RelocInfo::NONE64);
535 push(kScratchRegister);
536
537 if (!has_frame_) {
538 // We don't actually want to generate a pile of code for this, so just
539 // claim there is a stack frame, without generating one.
540 FrameScope scope(this, StackFrame::NONE);
541 CallRuntime(Runtime::kAbort, 2);
542 } else {
543 CallRuntime(Runtime::kAbort, 2);
544 }
545 // Control will not return here.
546 int3();
547 }
548
549
CallStub(CodeStub * stub,TypeFeedbackId ast_id)550 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
551 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
552 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
553 }
554
555
TailCallStub(CodeStub * stub)556 void MacroAssembler::TailCallStub(CodeStub* stub) {
557 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
558 }
559
560
StubReturn(int argc)561 void MacroAssembler::StubReturn(int argc) {
562 ASSERT(argc >= 1 && generating_stub());
563 ret((argc - 1) * kPointerSize);
564 }
565
566
AllowThisStubCall(CodeStub * stub)567 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
568 return has_frame_ || !stub->SometimesSetsUpAFrame();
569 }
570
571
IllegalOperation(int num_arguments)572 void MacroAssembler::IllegalOperation(int num_arguments) {
573 if (num_arguments > 0) {
574 addq(rsp, Immediate(num_arguments * kPointerSize));
575 }
576 LoadRoot(rax, Heap::kUndefinedValueRootIndex);
577 }
578
579
IndexFromHash(Register hash,Register index)580 void MacroAssembler::IndexFromHash(Register hash, Register index) {
581 // The assert checks that the constants for the maximum number of digits
582 // for an array index cached in the hash field and the number of bits
583 // reserved for it does not conflict.
584 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
585 (1 << String::kArrayIndexValueBits));
586 // We want the smi-tagged index in key. Even if we subsequently go to
587 // the slow case, converting the key to a smi is always valid.
588 // key: string key
589 // hash: key's hash field, including its array index value.
590 and_(hash, Immediate(String::kArrayIndexValueMask));
591 shr(hash, Immediate(String::kHashShift));
592 // Here we actually clobber the key which will be used if calling into
593 // runtime later. However as the new key is the numeric value of a string key
594 // there is no difference in using either key.
595 Integer32ToSmi(index, hash);
596 }
597
598
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)599 void MacroAssembler::CallRuntime(const Runtime::Function* f,
600 int num_arguments,
601 SaveFPRegsMode save_doubles) {
602 // If the expected number of arguments of the runtime function is
603 // constant, we check that the actual number of arguments match the
604 // expectation.
605 if (f->nargs >= 0 && f->nargs != num_arguments) {
606 IllegalOperation(num_arguments);
607 return;
608 }
609
610 // TODO(1236192): Most runtime routines don't need the number of
611 // arguments passed in because it is constant. At some point we
612 // should remove this need and make the runtime routine entry code
613 // smarter.
614 Set(rax, num_arguments);
615 LoadAddress(rbx, ExternalReference(f, isolate()));
616 CEntryStub ces(f->result_size, save_doubles);
617 CallStub(&ces);
618 }
619
620
CallExternalReference(const ExternalReference & ext,int num_arguments)621 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
622 int num_arguments) {
623 Set(rax, num_arguments);
624 LoadAddress(rbx, ext);
625
626 CEntryStub stub(1);
627 CallStub(&stub);
628 }
629
630
TailCallExternalReference(const ExternalReference & ext,int num_arguments,int result_size)631 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
632 int num_arguments,
633 int result_size) {
634 // ----------- S t a t e -------------
635 // -- rsp[0] : return address
636 // -- rsp[8] : argument num_arguments - 1
637 // ...
638 // -- rsp[8 * num_arguments] : argument 0 (receiver)
639 // -----------------------------------
640
641 // TODO(1236192): Most runtime routines don't need the number of
642 // arguments passed in because it is constant. At some point we
643 // should remove this need and make the runtime routine entry code
644 // smarter.
645 Set(rax, num_arguments);
646 JumpToExternalReference(ext, result_size);
647 }
648
649
TailCallRuntime(Runtime::FunctionId fid,int num_arguments,int result_size)650 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
651 int num_arguments,
652 int result_size) {
653 TailCallExternalReference(ExternalReference(fid, isolate()),
654 num_arguments,
655 result_size);
656 }
657
658
Offset(ExternalReference ref0,ExternalReference ref1)659 static int Offset(ExternalReference ref0, ExternalReference ref1) {
660 int64_t offset = (ref0.address() - ref1.address());
661 // Check that fits into int.
662 ASSERT(static_cast<int>(offset) == offset);
663 return static_cast<int>(offset);
664 }
665
666
PrepareCallApiFunction(int arg_stack_space)667 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
668 EnterApiExitFrame(arg_stack_space);
669 }
670
671
CallApiFunctionAndReturn(Address function_address,Address thunk_address,Register thunk_last_arg,int stack_space,Operand return_value_operand,Operand * context_restore_operand)672 void MacroAssembler::CallApiFunctionAndReturn(
673 Address function_address,
674 Address thunk_address,
675 Register thunk_last_arg,
676 int stack_space,
677 Operand return_value_operand,
678 Operand* context_restore_operand) {
679 Label prologue;
680 Label promote_scheduled_exception;
681 Label exception_handled;
682 Label delete_allocated_handles;
683 Label leave_exit_frame;
684 Label write_back;
685
686 Factory* factory = isolate()->factory();
687 ExternalReference next_address =
688 ExternalReference::handle_scope_next_address(isolate());
689 const int kNextOffset = 0;
690 const int kLimitOffset = Offset(
691 ExternalReference::handle_scope_limit_address(isolate()),
692 next_address);
693 const int kLevelOffset = Offset(
694 ExternalReference::handle_scope_level_address(isolate()),
695 next_address);
696 ExternalReference scheduled_exception_address =
697 ExternalReference::scheduled_exception_address(isolate());
698
699 // Allocate HandleScope in callee-save registers.
700 Register prev_next_address_reg = r14;
701 Register prev_limit_reg = rbx;
702 Register base_reg = r15;
703 Move(base_reg, next_address);
704 movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
705 movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
706 addl(Operand(base_reg, kLevelOffset), Immediate(1));
707
708 if (FLAG_log_timer_events) {
709 FrameScope frame(this, StackFrame::MANUAL);
710 PushSafepointRegisters();
711 PrepareCallCFunction(1);
712 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
713 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
714 PopSafepointRegisters();
715 }
716
717
718 Label profiler_disabled;
719 Label end_profiler_check;
720 bool* is_profiling_flag =
721 isolate()->cpu_profiler()->is_profiling_address();
722 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
723 movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
724 cmpb(Operand(rax, 0), Immediate(0));
725 j(zero, &profiler_disabled);
726
727 // Third parameter is the address of the actual getter function.
728 movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
729 movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
730 jmp(&end_profiler_check);
731
732 bind(&profiler_disabled);
733 // Call the api function!
734 movq(rax, reinterpret_cast<Address>(function_address),
735 RelocInfo::EXTERNAL_REFERENCE);
736
737 bind(&end_profiler_check);
738
739 // Call the api function!
740 call(rax);
741
742 if (FLAG_log_timer_events) {
743 FrameScope frame(this, StackFrame::MANUAL);
744 PushSafepointRegisters();
745 PrepareCallCFunction(1);
746 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
747 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
748 PopSafepointRegisters();
749 }
750
751 // Load the value from ReturnValue
752 movq(rax, return_value_operand);
753 bind(&prologue);
754
755 // No more valid handles (the result handle was the last one). Restore
756 // previous handle scope.
757 subl(Operand(base_reg, kLevelOffset), Immediate(1));
758 movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
759 cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
760 j(not_equal, &delete_allocated_handles);
761 bind(&leave_exit_frame);
762
763 // Check if the function scheduled an exception.
764 Move(rsi, scheduled_exception_address);
765 Cmp(Operand(rsi, 0), factory->the_hole_value());
766 j(not_equal, &promote_scheduled_exception);
767 bind(&exception_handled);
768
769 #if ENABLE_EXTRA_CHECKS
770 // Check if the function returned a valid JavaScript value.
771 Label ok;
772 Register return_value = rax;
773 Register map = rcx;
774
775 JumpIfSmi(return_value, &ok, Label::kNear);
776 movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
777
778 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
779 j(below, &ok, Label::kNear);
780
781 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
782 j(above_equal, &ok, Label::kNear);
783
784 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
785 j(equal, &ok, Label::kNear);
786
787 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
788 j(equal, &ok, Label::kNear);
789
790 CompareRoot(return_value, Heap::kTrueValueRootIndex);
791 j(equal, &ok, Label::kNear);
792
793 CompareRoot(return_value, Heap::kFalseValueRootIndex);
794 j(equal, &ok, Label::kNear);
795
796 CompareRoot(return_value, Heap::kNullValueRootIndex);
797 j(equal, &ok, Label::kNear);
798
799 Abort(kAPICallReturnedInvalidObject);
800
801 bind(&ok);
802 #endif
803
804 bool restore_context = context_restore_operand != NULL;
805 if (restore_context) {
806 movq(rsi, *context_restore_operand);
807 }
808 LeaveApiExitFrame(!restore_context);
809 ret(stack_space * kPointerSize);
810
811 bind(&promote_scheduled_exception);
812 {
813 FrameScope frame(this, StackFrame::INTERNAL);
814 CallRuntime(Runtime::kPromoteScheduledException, 0);
815 }
816 jmp(&exception_handled);
817
818 // HandleScope limit has changed. Delete allocated extensions.
819 bind(&delete_allocated_handles);
820 movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
821 movq(prev_limit_reg, rax);
822 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
823 LoadAddress(rax,
824 ExternalReference::delete_handle_scope_extensions(isolate()));
825 call(rax);
826 movq(rax, prev_limit_reg);
827 jmp(&leave_exit_frame);
828 }
829
830
JumpToExternalReference(const ExternalReference & ext,int result_size)831 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
832 int result_size) {
833 // Set the entry point and jump to the C entry runtime stub.
834 LoadAddress(rbx, ext);
835 CEntryStub ces(result_size);
836 jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
837 }
838
839
InvokeBuiltin(Builtins::JavaScript id,InvokeFlag flag,const CallWrapper & call_wrapper)840 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
841 InvokeFlag flag,
842 const CallWrapper& call_wrapper) {
843 // You can't call a builtin without a valid frame.
844 ASSERT(flag == JUMP_FUNCTION || has_frame());
845
846 // Rely on the assertion to check that the number of provided
847 // arguments match the expected number of arguments. Fake a
848 // parameter count to avoid emitting code to do the check.
849 ParameterCount expected(0);
850 GetBuiltinEntry(rdx, id);
851 InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
852 }
853
854
GetBuiltinFunction(Register target,Builtins::JavaScript id)855 void MacroAssembler::GetBuiltinFunction(Register target,
856 Builtins::JavaScript id) {
857 // Load the builtins object into target register.
858 movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
859 movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
860 movq(target, FieldOperand(target,
861 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
862 }
863
864
GetBuiltinEntry(Register target,Builtins::JavaScript id)865 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
866 ASSERT(!target.is(rdi));
867 // Load the JavaScript builtin function from the builtins object.
868 GetBuiltinFunction(rdi, id);
869 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
870 }
871
872
873 #define REG(Name) { kRegister_ ## Name ## _Code }
874
875 static const Register saved_regs[] = {
876 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
877 REG(r9), REG(r10), REG(r11)
878 };
879
880 #undef REG
881
882 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
883
884
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)885 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
886 Register exclusion1,
887 Register exclusion2,
888 Register exclusion3) {
889 // We don't allow a GC during a store buffer overflow so there is no need to
890 // store the registers in any particular way, but we do have to store and
891 // restore them.
892 for (int i = 0; i < kNumberOfSavedRegs; i++) {
893 Register reg = saved_regs[i];
894 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
895 push(reg);
896 }
897 }
898 // R12 to r15 are callee save on all platforms.
899 if (fp_mode == kSaveFPRegs) {
900 subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
901 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
902 XMMRegister reg = XMMRegister::from_code(i);
903 movsd(Operand(rsp, i * kDoubleSize), reg);
904 }
905 }
906 }
907
908
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)909 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
910 Register exclusion1,
911 Register exclusion2,
912 Register exclusion3) {
913 if (fp_mode == kSaveFPRegs) {
914 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
915 XMMRegister reg = XMMRegister::from_code(i);
916 movsd(reg, Operand(rsp, i * kDoubleSize));
917 }
918 addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
919 }
920 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
921 Register reg = saved_regs[i];
922 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
923 pop(reg);
924 }
925 }
926 }
927
928
Cvtlsi2sd(XMMRegister dst,Register src)929 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
930 xorps(dst, dst);
931 cvtlsi2sd(dst, src);
932 }
933
934
Cvtlsi2sd(XMMRegister dst,const Operand & src)935 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
936 xorps(dst, dst);
937 cvtlsi2sd(dst, src);
938 }
939
940
Load(Register dst,const Operand & src,Representation r)941 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
942 ASSERT(!r.IsDouble());
943 if (r.IsInteger8()) {
944 movsxbq(dst, src);
945 } else if (r.IsUInteger8()) {
946 movzxbl(dst, src);
947 } else if (r.IsInteger16()) {
948 movsxwq(dst, src);
949 } else if (r.IsUInteger16()) {
950 movzxwl(dst, src);
951 } else if (r.IsInteger32()) {
952 movl(dst, src);
953 } else {
954 movq(dst, src);
955 }
956 }
957
958
Store(const Operand & dst,Register src,Representation r)959 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
960 ASSERT(!r.IsDouble());
961 if (r.IsInteger8() || r.IsUInteger8()) {
962 movb(dst, src);
963 } else if (r.IsInteger16() || r.IsUInteger16()) {
964 movw(dst, src);
965 } else if (r.IsInteger32()) {
966 movl(dst, src);
967 } else {
968 movq(dst, src);
969 }
970 }
971
972
Set(Register dst,int64_t x)973 void MacroAssembler::Set(Register dst, int64_t x) {
974 if (x == 0) {
975 xorl(dst, dst);
976 } else if (is_uint32(x)) {
977 movl(dst, Immediate(static_cast<uint32_t>(x)));
978 } else if (is_int32(x)) {
979 movq(dst, Immediate(static_cast<int32_t>(x)));
980 } else {
981 movq(dst, x);
982 }
983 }
984
985
Set(const Operand & dst,int64_t x)986 void MacroAssembler::Set(const Operand& dst, int64_t x) {
987 if (is_int32(x)) {
988 movq(dst, Immediate(static_cast<int32_t>(x)));
989 } else {
990 Set(kScratchRegister, x);
991 movq(dst, kScratchRegister);
992 }
993 }
994
995
996 // ----------------------------------------------------------------------------
997 // Smi tagging, untagging and tag detection.
998
IsUnsafeInt(const int32_t x)999 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1000 static const int kMaxBits = 17;
1001 return !is_intn(x, kMaxBits);
1002 }
1003
1004
SafeMove(Register dst,Smi * src)1005 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1006 ASSERT(!dst.is(kScratchRegister));
1007 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1008 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1009 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1010 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1011 xor_(dst, kScratchRegister);
1012 } else {
1013 Move(dst, src);
1014 }
1015 }
1016
1017
SafePush(Smi * src)1018 void MacroAssembler::SafePush(Smi* src) {
1019 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1020 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1021 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1022 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1023 xor_(Operand(rsp, 0), kScratchRegister);
1024 } else {
1025 Push(src);
1026 }
1027 }
1028
1029
GetSmiConstant(Smi * source)1030 Register MacroAssembler::GetSmiConstant(Smi* source) {
1031 int value = source->value();
1032 if (value == 0) {
1033 xorl(kScratchRegister, kScratchRegister);
1034 return kScratchRegister;
1035 }
1036 if (value == 1) {
1037 return kSmiConstantRegister;
1038 }
1039 LoadSmiConstant(kScratchRegister, source);
1040 return kScratchRegister;
1041 }
1042
1043
LoadSmiConstant(Register dst,Smi * source)1044 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1045 if (emit_debug_code()) {
1046 movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
1047 cmpq(dst, kSmiConstantRegister);
1048 Assert(equal, kUninitializedKSmiConstantRegister);
1049 }
1050 int value = source->value();
1051 if (value == 0) {
1052 xorl(dst, dst);
1053 return;
1054 }
1055 bool negative = value < 0;
1056 unsigned int uvalue = negative ? -value : value;
1057
1058 switch (uvalue) {
1059 case 9:
1060 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1061 break;
1062 case 8:
1063 xorl(dst, dst);
1064 lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1065 break;
1066 case 4:
1067 xorl(dst, dst);
1068 lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1069 break;
1070 case 5:
1071 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1072 break;
1073 case 3:
1074 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1075 break;
1076 case 2:
1077 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1078 break;
1079 case 1:
1080 movq(dst, kSmiConstantRegister);
1081 break;
1082 case 0:
1083 UNREACHABLE();
1084 return;
1085 default:
1086 movq(dst, source, RelocInfo::NONE64);
1087 return;
1088 }
1089 if (negative) {
1090 neg(dst);
1091 }
1092 }
1093
1094
Integer32ToSmi(Register dst,Register src)1095 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1096 STATIC_ASSERT(kSmiTag == 0);
1097 if (!dst.is(src)) {
1098 movl(dst, src);
1099 }
1100 shl(dst, Immediate(kSmiShift));
1101 }
1102
1103
Integer32ToSmiField(const Operand & dst,Register src)1104 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1105 if (emit_debug_code()) {
1106 testb(dst, Immediate(0x01));
1107 Label ok;
1108 j(zero, &ok, Label::kNear);
1109 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1110 bind(&ok);
1111 }
1112 ASSERT(kSmiShift % kBitsPerByte == 0);
1113 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1114 }
1115
1116
Integer64PlusConstantToSmi(Register dst,Register src,int constant)1117 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1118 Register src,
1119 int constant) {
1120 if (dst.is(src)) {
1121 addl(dst, Immediate(constant));
1122 } else {
1123 leal(dst, Operand(src, constant));
1124 }
1125 shl(dst, Immediate(kSmiShift));
1126 }
1127
1128
SmiToInteger32(Register dst,Register src)1129 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1130 STATIC_ASSERT(kSmiTag == 0);
1131 if (!dst.is(src)) {
1132 movq(dst, src);
1133 }
1134 shr(dst, Immediate(kSmiShift));
1135 }
1136
1137
SmiToInteger32(Register dst,const Operand & src)1138 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1139 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1140 }
1141
1142
SmiToInteger64(Register dst,Register src)1143 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1144 STATIC_ASSERT(kSmiTag == 0);
1145 if (!dst.is(src)) {
1146 movq(dst, src);
1147 }
1148 sar(dst, Immediate(kSmiShift));
1149 }
1150
1151
SmiToInteger64(Register dst,const Operand & src)1152 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1153 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1154 }
1155
1156
SmiTest(Register src)1157 void MacroAssembler::SmiTest(Register src) {
1158 AssertSmi(src);
1159 testq(src, src);
1160 }
1161
1162
SmiCompare(Register smi1,Register smi2)1163 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1164 AssertSmi(smi1);
1165 AssertSmi(smi2);
1166 cmpq(smi1, smi2);
1167 }
1168
1169
SmiCompare(Register dst,Smi * src)1170 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1171 AssertSmi(dst);
1172 Cmp(dst, src);
1173 }
1174
1175
Cmp(Register dst,Smi * src)1176 void MacroAssembler::Cmp(Register dst, Smi* src) {
1177 ASSERT(!dst.is(kScratchRegister));
1178 if (src->value() == 0) {
1179 testq(dst, dst);
1180 } else {
1181 Register constant_reg = GetSmiConstant(src);
1182 cmpq(dst, constant_reg);
1183 }
1184 }
1185
1186
SmiCompare(Register dst,const Operand & src)1187 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1188 AssertSmi(dst);
1189 AssertSmi(src);
1190 cmpq(dst, src);
1191 }
1192
1193
SmiCompare(const Operand & dst,Register src)1194 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1195 AssertSmi(dst);
1196 AssertSmi(src);
1197 cmpq(dst, src);
1198 }
1199
1200
SmiCompare(const Operand & dst,Smi * src)1201 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1202 AssertSmi(dst);
1203 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1204 }
1205
1206
Cmp(const Operand & dst,Smi * src)1207 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1208 // The Operand cannot use the smi register.
1209 Register smi_reg = GetSmiConstant(src);
1210 ASSERT(!dst.AddressUsesRegister(smi_reg));
1211 cmpq(dst, smi_reg);
1212 }
1213
1214
SmiCompareInteger32(const Operand & dst,Register src)1215 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1216 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1217 }
1218
1219
PositiveSmiTimesPowerOfTwoToInteger64(Register dst,Register src,int power)1220 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1221 Register src,
1222 int power) {
1223 ASSERT(power >= 0);
1224 ASSERT(power < 64);
1225 if (power == 0) {
1226 SmiToInteger64(dst, src);
1227 return;
1228 }
1229 if (!dst.is(src)) {
1230 movq(dst, src);
1231 }
1232 if (power < kSmiShift) {
1233 sar(dst, Immediate(kSmiShift - power));
1234 } else if (power > kSmiShift) {
1235 shl(dst, Immediate(power - kSmiShift));
1236 }
1237 }
1238
1239
PositiveSmiDivPowerOfTwoToInteger32(Register dst,Register src,int power)1240 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1241 Register src,
1242 int power) {
1243 ASSERT((0 <= power) && (power < 32));
1244 if (dst.is(src)) {
1245 shr(dst, Immediate(power + kSmiShift));
1246 } else {
1247 UNIMPLEMENTED(); // Not used.
1248 }
1249 }
1250
1251
SmiOrIfSmis(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)1252 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1253 Label* on_not_smis,
1254 Label::Distance near_jump) {
1255 if (dst.is(src1) || dst.is(src2)) {
1256 ASSERT(!src1.is(kScratchRegister));
1257 ASSERT(!src2.is(kScratchRegister));
1258 movq(kScratchRegister, src1);
1259 or_(kScratchRegister, src2);
1260 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1261 movq(dst, kScratchRegister);
1262 } else {
1263 movq(dst, src1);
1264 or_(dst, src2);
1265 JumpIfNotSmi(dst, on_not_smis, near_jump);
1266 }
1267 }
1268
1269
CheckSmi(Register src)1270 Condition MacroAssembler::CheckSmi(Register src) {
1271 STATIC_ASSERT(kSmiTag == 0);
1272 testb(src, Immediate(kSmiTagMask));
1273 return zero;
1274 }
1275
1276
CheckSmi(const Operand & src)1277 Condition MacroAssembler::CheckSmi(const Operand& src) {
1278 STATIC_ASSERT(kSmiTag == 0);
1279 testb(src, Immediate(kSmiTagMask));
1280 return zero;
1281 }
1282
1283
CheckNonNegativeSmi(Register src)1284 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1285 STATIC_ASSERT(kSmiTag == 0);
1286 // Test that both bits of the mask 0x8000000000000001 are zero.
1287 movq(kScratchRegister, src);
1288 rol(kScratchRegister, Immediate(1));
1289 testb(kScratchRegister, Immediate(3));
1290 return zero;
1291 }
1292
1293
CheckBothSmi(Register first,Register second)1294 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1295 if (first.is(second)) {
1296 return CheckSmi(first);
1297 }
1298 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1299 leal(kScratchRegister, Operand(first, second, times_1, 0));
1300 testb(kScratchRegister, Immediate(0x03));
1301 return zero;
1302 }
1303
1304
CheckBothNonNegativeSmi(Register first,Register second)1305 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1306 Register second) {
1307 if (first.is(second)) {
1308 return CheckNonNegativeSmi(first);
1309 }
1310 movq(kScratchRegister, first);
1311 or_(kScratchRegister, second);
1312 rol(kScratchRegister, Immediate(1));
1313 testl(kScratchRegister, Immediate(3));
1314 return zero;
1315 }
1316
1317
CheckEitherSmi(Register first,Register second,Register scratch)1318 Condition MacroAssembler::CheckEitherSmi(Register first,
1319 Register second,
1320 Register scratch) {
1321 if (first.is(second)) {
1322 return CheckSmi(first);
1323 }
1324 if (scratch.is(second)) {
1325 andl(scratch, first);
1326 } else {
1327 if (!scratch.is(first)) {
1328 movl(scratch, first);
1329 }
1330 andl(scratch, second);
1331 }
1332 testb(scratch, Immediate(kSmiTagMask));
1333 return zero;
1334 }
1335
1336
CheckIsMinSmi(Register src)1337 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1338 ASSERT(!src.is(kScratchRegister));
1339 // If we overflow by subtracting one, it's the minimal smi value.
1340 cmpq(src, kSmiConstantRegister);
1341 return overflow;
1342 }
1343
1344
CheckInteger32ValidSmiValue(Register src)1345 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1346 // A 32-bit integer value can always be converted to a smi.
1347 return always;
1348 }
1349
1350
CheckUInteger32ValidSmiValue(Register src)1351 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1352 // An unsigned 32-bit integer value is valid as long as the high bit
1353 // is not set.
1354 testl(src, src);
1355 return positive;
1356 }
1357
1358
CheckSmiToIndicator(Register dst,Register src)1359 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1360 if (dst.is(src)) {
1361 andl(dst, Immediate(kSmiTagMask));
1362 } else {
1363 movl(dst, Immediate(kSmiTagMask));
1364 andl(dst, src);
1365 }
1366 }
1367
1368
CheckSmiToIndicator(Register dst,const Operand & src)1369 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1370 if (!(src.AddressUsesRegister(dst))) {
1371 movl(dst, Immediate(kSmiTagMask));
1372 andl(dst, src);
1373 } else {
1374 movl(dst, src);
1375 andl(dst, Immediate(kSmiTagMask));
1376 }
1377 }
1378
1379
JumpIfNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1380 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1381 Label* on_invalid,
1382 Label::Distance near_jump) {
1383 Condition is_valid = CheckInteger32ValidSmiValue(src);
1384 j(NegateCondition(is_valid), on_invalid, near_jump);
1385 }
1386
1387
JumpIfUIntNotValidSmiValue(Register src,Label * on_invalid,Label::Distance near_jump)1388 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1389 Label* on_invalid,
1390 Label::Distance near_jump) {
1391 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1392 j(NegateCondition(is_valid), on_invalid, near_jump);
1393 }
1394
1395
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1396 void MacroAssembler::JumpIfSmi(Register src,
1397 Label* on_smi,
1398 Label::Distance near_jump) {
1399 Condition smi = CheckSmi(src);
1400 j(smi, on_smi, near_jump);
1401 }
1402
1403
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1404 void MacroAssembler::JumpIfNotSmi(Register src,
1405 Label* on_not_smi,
1406 Label::Distance near_jump) {
1407 Condition smi = CheckSmi(src);
1408 j(NegateCondition(smi), on_not_smi, near_jump);
1409 }
1410
1411
JumpUnlessNonNegativeSmi(Register src,Label * on_not_smi_or_negative,Label::Distance near_jump)1412 void MacroAssembler::JumpUnlessNonNegativeSmi(
1413 Register src, Label* on_not_smi_or_negative,
1414 Label::Distance near_jump) {
1415 Condition non_negative_smi = CheckNonNegativeSmi(src);
1416 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1417 }
1418
1419
JumpIfSmiEqualsConstant(Register src,Smi * constant,Label * on_equals,Label::Distance near_jump)1420 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1421 Smi* constant,
1422 Label* on_equals,
1423 Label::Distance near_jump) {
1424 SmiCompare(src, constant);
1425 j(equal, on_equals, near_jump);
1426 }
1427
1428
JumpIfNotBothSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1429 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1430 Register src2,
1431 Label* on_not_both_smi,
1432 Label::Distance near_jump) {
1433 Condition both_smi = CheckBothSmi(src1, src2);
1434 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1435 }
1436
1437
JumpUnlessBothNonNegativeSmi(Register src1,Register src2,Label * on_not_both_smi,Label::Distance near_jump)1438 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1439 Register src2,
1440 Label* on_not_both_smi,
1441 Label::Distance near_jump) {
1442 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1443 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1444 }
1445
1446
SmiAddConstant(Register dst,Register src,Smi * constant)1447 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1448 if (constant->value() == 0) {
1449 if (!dst.is(src)) {
1450 movq(dst, src);
1451 }
1452 return;
1453 } else if (dst.is(src)) {
1454 ASSERT(!dst.is(kScratchRegister));
1455 switch (constant->value()) {
1456 case 1:
1457 addq(dst, kSmiConstantRegister);
1458 return;
1459 case 2:
1460 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1461 return;
1462 case 4:
1463 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1464 return;
1465 case 8:
1466 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1467 return;
1468 default:
1469 Register constant_reg = GetSmiConstant(constant);
1470 addq(dst, constant_reg);
1471 return;
1472 }
1473 } else {
1474 switch (constant->value()) {
1475 case 1:
1476 lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1477 return;
1478 case 2:
1479 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1480 return;
1481 case 4:
1482 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1483 return;
1484 case 8:
1485 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1486 return;
1487 default:
1488 LoadSmiConstant(dst, constant);
1489 addq(dst, src);
1490 return;
1491 }
1492 }
1493 }
1494
1495
SmiAddConstant(const Operand & dst,Smi * constant)1496 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1497 if (constant->value() != 0) {
1498 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1499 }
1500 }
1501
1502
SmiAddConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1503 void MacroAssembler::SmiAddConstant(Register dst,
1504 Register src,
1505 Smi* constant,
1506 SmiOperationExecutionMode mode,
1507 Label* bailout_label,
1508 Label::Distance near_jump) {
1509 if (constant->value() == 0) {
1510 if (!dst.is(src)) {
1511 movq(dst, src);
1512 }
1513 } else if (dst.is(src)) {
1514 ASSERT(!dst.is(kScratchRegister));
1515 LoadSmiConstant(kScratchRegister, constant);
1516 addq(dst, kScratchRegister);
1517 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1518 j(no_overflow, bailout_label, near_jump);
1519 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1520 subq(dst, kScratchRegister);
1521 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1522 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1523 Label done;
1524 j(no_overflow, &done, Label::kNear);
1525 subq(dst, kScratchRegister);
1526 jmp(bailout_label, near_jump);
1527 bind(&done);
1528 } else {
1529 // Bailout if overflow without reserving src.
1530 j(overflow, bailout_label, near_jump);
1531 }
1532 } else {
1533 CHECK(mode.IsEmpty());
1534 }
1535 } else {
1536 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1537 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1538 LoadSmiConstant(dst, constant);
1539 addq(dst, src);
1540 j(overflow, bailout_label, near_jump);
1541 }
1542 }
1543
1544
SmiSubConstant(Register dst,Register src,Smi * constant)1545 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1546 if (constant->value() == 0) {
1547 if (!dst.is(src)) {
1548 movq(dst, src);
1549 }
1550 } else if (dst.is(src)) {
1551 ASSERT(!dst.is(kScratchRegister));
1552 Register constant_reg = GetSmiConstant(constant);
1553 subq(dst, constant_reg);
1554 } else {
1555 if (constant->value() == Smi::kMinValue) {
1556 LoadSmiConstant(dst, constant);
1557 // Adding and subtracting the min-value gives the same result, it only
1558 // differs on the overflow bit, which we don't check here.
1559 addq(dst, src);
1560 } else {
1561 // Subtract by adding the negation.
1562 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1563 addq(dst, src);
1564 }
1565 }
1566 }
1567
1568
SmiSubConstant(Register dst,Register src,Smi * constant,SmiOperationExecutionMode mode,Label * bailout_label,Label::Distance near_jump)1569 void MacroAssembler::SmiSubConstant(Register dst,
1570 Register src,
1571 Smi* constant,
1572 SmiOperationExecutionMode mode,
1573 Label* bailout_label,
1574 Label::Distance near_jump) {
1575 if (constant->value() == 0) {
1576 if (!dst.is(src)) {
1577 movq(dst, src);
1578 }
1579 } else if (dst.is(src)) {
1580 ASSERT(!dst.is(kScratchRegister));
1581 LoadSmiConstant(kScratchRegister, constant);
1582 subq(dst, kScratchRegister);
1583 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1584 j(no_overflow, bailout_label, near_jump);
1585 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1586 addq(dst, kScratchRegister);
1587 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1588 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1589 Label done;
1590 j(no_overflow, &done, Label::kNear);
1591 addq(dst, kScratchRegister);
1592 jmp(bailout_label, near_jump);
1593 bind(&done);
1594 } else {
1595 // Bailout if overflow without reserving src.
1596 j(overflow, bailout_label, near_jump);
1597 }
1598 } else {
1599 CHECK(mode.IsEmpty());
1600 }
1601 } else {
1602 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1603 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1604 if (constant->value() == Smi::kMinValue) {
1605 ASSERT(!dst.is(kScratchRegister));
1606 movq(dst, src);
1607 LoadSmiConstant(kScratchRegister, constant);
1608 subq(dst, kScratchRegister);
1609 j(overflow, bailout_label, near_jump);
1610 } else {
1611 // Subtract by adding the negation.
1612 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1613 addq(dst, src);
1614 j(overflow, bailout_label, near_jump);
1615 }
1616 }
1617 }
1618
1619
SmiNeg(Register dst,Register src,Label * on_smi_result,Label::Distance near_jump)1620 void MacroAssembler::SmiNeg(Register dst,
1621 Register src,
1622 Label* on_smi_result,
1623 Label::Distance near_jump) {
1624 if (dst.is(src)) {
1625 ASSERT(!dst.is(kScratchRegister));
1626 movq(kScratchRegister, src);
1627 neg(dst); // Low 32 bits are retained as zero by negation.
1628 // Test if result is zero or Smi::kMinValue.
1629 cmpq(dst, kScratchRegister);
1630 j(not_equal, on_smi_result, near_jump);
1631 movq(src, kScratchRegister);
1632 } else {
1633 movq(dst, src);
1634 neg(dst);
1635 cmpq(dst, src);
1636 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1637 j(not_equal, on_smi_result, near_jump);
1638 }
1639 }
1640
1641
1642 template<class T>
SmiAddHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1643 static void SmiAddHelper(MacroAssembler* masm,
1644 Register dst,
1645 Register src1,
1646 T src2,
1647 Label* on_not_smi_result,
1648 Label::Distance near_jump) {
1649 if (dst.is(src1)) {
1650 Label done;
1651 masm->addq(dst, src2);
1652 masm->j(no_overflow, &done, Label::kNear);
1653 // Restore src1.
1654 masm->subq(dst, src2);
1655 masm->jmp(on_not_smi_result, near_jump);
1656 masm->bind(&done);
1657 } else {
1658 masm->movq(dst, src1);
1659 masm->addq(dst, src2);
1660 masm->j(overflow, on_not_smi_result, near_jump);
1661 }
1662 }
1663
1664
SmiAdd(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1665 void MacroAssembler::SmiAdd(Register dst,
1666 Register src1,
1667 Register src2,
1668 Label* on_not_smi_result,
1669 Label::Distance near_jump) {
1670 ASSERT_NOT_NULL(on_not_smi_result);
1671 ASSERT(!dst.is(src2));
1672 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1673 }
1674
1675
SmiAdd(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1676 void MacroAssembler::SmiAdd(Register dst,
1677 Register src1,
1678 const Operand& src2,
1679 Label* on_not_smi_result,
1680 Label::Distance near_jump) {
1681 ASSERT_NOT_NULL(on_not_smi_result);
1682 ASSERT(!src2.AddressUsesRegister(dst));
1683 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1684 }
1685
1686
SmiAdd(Register dst,Register src1,Register src2)1687 void MacroAssembler::SmiAdd(Register dst,
1688 Register src1,
1689 Register src2) {
1690 // No overflow checking. Use only when it's known that
1691 // overflowing is impossible.
1692 if (!dst.is(src1)) {
1693 if (emit_debug_code()) {
1694 movq(kScratchRegister, src1);
1695 addq(kScratchRegister, src2);
1696 Check(no_overflow, kSmiAdditionOverflow);
1697 }
1698 lea(dst, Operand(src1, src2, times_1, 0));
1699 } else {
1700 addq(dst, src2);
1701 Assert(no_overflow, kSmiAdditionOverflow);
1702 }
1703 }
1704
1705
1706 template<class T>
SmiSubHelper(MacroAssembler * masm,Register dst,Register src1,T src2,Label * on_not_smi_result,Label::Distance near_jump)1707 static void SmiSubHelper(MacroAssembler* masm,
1708 Register dst,
1709 Register src1,
1710 T src2,
1711 Label* on_not_smi_result,
1712 Label::Distance near_jump) {
1713 if (dst.is(src1)) {
1714 Label done;
1715 masm->subq(dst, src2);
1716 masm->j(no_overflow, &done, Label::kNear);
1717 // Restore src1.
1718 masm->addq(dst, src2);
1719 masm->jmp(on_not_smi_result, near_jump);
1720 masm->bind(&done);
1721 } else {
1722 masm->movq(dst, src1);
1723 masm->subq(dst, src2);
1724 masm->j(overflow, on_not_smi_result, near_jump);
1725 }
1726 }
1727
1728
SmiSub(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1729 void MacroAssembler::SmiSub(Register dst,
1730 Register src1,
1731 Register src2,
1732 Label* on_not_smi_result,
1733 Label::Distance near_jump) {
1734 ASSERT_NOT_NULL(on_not_smi_result);
1735 ASSERT(!dst.is(src2));
1736 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1737 }
1738
1739
SmiSub(Register dst,Register src1,const Operand & src2,Label * on_not_smi_result,Label::Distance near_jump)1740 void MacroAssembler::SmiSub(Register dst,
1741 Register src1,
1742 const Operand& src2,
1743 Label* on_not_smi_result,
1744 Label::Distance near_jump) {
1745 ASSERT_NOT_NULL(on_not_smi_result);
1746 ASSERT(!src2.AddressUsesRegister(dst));
1747 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1748 }
1749
1750
1751 template<class T>
SmiSubNoOverflowHelper(MacroAssembler * masm,Register dst,Register src1,T src2)1752 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1753 Register dst,
1754 Register src1,
1755 T src2) {
1756 // No overflow checking. Use only when it's known that
1757 // overflowing is impossible (e.g., subtracting two positive smis).
1758 if (!dst.is(src1)) {
1759 masm->movq(dst, src1);
1760 }
1761 masm->subq(dst, src2);
1762 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1763 }
1764
1765
SmiSub(Register dst,Register src1,Register src2)1766 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1767 ASSERT(!dst.is(src2));
1768 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1769 }
1770
1771
SmiSub(Register dst,Register src1,const Operand & src2)1772 void MacroAssembler::SmiSub(Register dst,
1773 Register src1,
1774 const Operand& src2) {
1775 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1776 }
1777
1778
SmiMul(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1779 void MacroAssembler::SmiMul(Register dst,
1780 Register src1,
1781 Register src2,
1782 Label* on_not_smi_result,
1783 Label::Distance near_jump) {
1784 ASSERT(!dst.is(src2));
1785 ASSERT(!dst.is(kScratchRegister));
1786 ASSERT(!src1.is(kScratchRegister));
1787 ASSERT(!src2.is(kScratchRegister));
1788
1789 if (dst.is(src1)) {
1790 Label failure, zero_correct_result;
1791 movq(kScratchRegister, src1); // Create backup for later testing.
1792 SmiToInteger64(dst, src1);
1793 imul(dst, src2);
1794 j(overflow, &failure, Label::kNear);
1795
1796 // Check for negative zero result. If product is zero, and one
1797 // argument is negative, go to slow case.
1798 Label correct_result;
1799 testq(dst, dst);
1800 j(not_zero, &correct_result, Label::kNear);
1801
1802 movq(dst, kScratchRegister);
1803 xor_(dst, src2);
1804 // Result was positive zero.
1805 j(positive, &zero_correct_result, Label::kNear);
1806
1807 bind(&failure); // Reused failure exit, restores src1.
1808 movq(src1, kScratchRegister);
1809 jmp(on_not_smi_result, near_jump);
1810
1811 bind(&zero_correct_result);
1812 Set(dst, 0);
1813
1814 bind(&correct_result);
1815 } else {
1816 SmiToInteger64(dst, src1);
1817 imul(dst, src2);
1818 j(overflow, on_not_smi_result, near_jump);
1819 // Check for negative zero result. If product is zero, and one
1820 // argument is negative, go to slow case.
1821 Label correct_result;
1822 testq(dst, dst);
1823 j(not_zero, &correct_result, Label::kNear);
1824 // One of src1 and src2 is zero, the check whether the other is
1825 // negative.
1826 movq(kScratchRegister, src1);
1827 xor_(kScratchRegister, src2);
1828 j(negative, on_not_smi_result, near_jump);
1829 bind(&correct_result);
1830 }
1831 }
1832
1833
SmiDiv(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1834 void MacroAssembler::SmiDiv(Register dst,
1835 Register src1,
1836 Register src2,
1837 Label* on_not_smi_result,
1838 Label::Distance near_jump) {
1839 ASSERT(!src1.is(kScratchRegister));
1840 ASSERT(!src2.is(kScratchRegister));
1841 ASSERT(!dst.is(kScratchRegister));
1842 ASSERT(!src2.is(rax));
1843 ASSERT(!src2.is(rdx));
1844 ASSERT(!src1.is(rdx));
1845
1846 // Check for 0 divisor (result is +/-Infinity).
1847 testq(src2, src2);
1848 j(zero, on_not_smi_result, near_jump);
1849
1850 if (src1.is(rax)) {
1851 movq(kScratchRegister, src1);
1852 }
1853 SmiToInteger32(rax, src1);
1854 // We need to rule out dividing Smi::kMinValue by -1, since that would
1855 // overflow in idiv and raise an exception.
1856 // We combine this with negative zero test (negative zero only happens
1857 // when dividing zero by a negative number).
1858
1859 // We overshoot a little and go to slow case if we divide min-value
1860 // by any negative value, not just -1.
1861 Label safe_div;
1862 testl(rax, Immediate(0x7fffffff));
1863 j(not_zero, &safe_div, Label::kNear);
1864 testq(src2, src2);
1865 if (src1.is(rax)) {
1866 j(positive, &safe_div, Label::kNear);
1867 movq(src1, kScratchRegister);
1868 jmp(on_not_smi_result, near_jump);
1869 } else {
1870 j(negative, on_not_smi_result, near_jump);
1871 }
1872 bind(&safe_div);
1873
1874 SmiToInteger32(src2, src2);
1875 // Sign extend src1 into edx:eax.
1876 cdq();
1877 idivl(src2);
1878 Integer32ToSmi(src2, src2);
1879 // Check that the remainder is zero.
1880 testl(rdx, rdx);
1881 if (src1.is(rax)) {
1882 Label smi_result;
1883 j(zero, &smi_result, Label::kNear);
1884 movq(src1, kScratchRegister);
1885 jmp(on_not_smi_result, near_jump);
1886 bind(&smi_result);
1887 } else {
1888 j(not_zero, on_not_smi_result, near_jump);
1889 }
1890 if (!dst.is(src1) && src1.is(rax)) {
1891 movq(src1, kScratchRegister);
1892 }
1893 Integer32ToSmi(dst, rax);
1894 }
1895
1896
SmiMod(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)1897 void MacroAssembler::SmiMod(Register dst,
1898 Register src1,
1899 Register src2,
1900 Label* on_not_smi_result,
1901 Label::Distance near_jump) {
1902 ASSERT(!dst.is(kScratchRegister));
1903 ASSERT(!src1.is(kScratchRegister));
1904 ASSERT(!src2.is(kScratchRegister));
1905 ASSERT(!src2.is(rax));
1906 ASSERT(!src2.is(rdx));
1907 ASSERT(!src1.is(rdx));
1908 ASSERT(!src1.is(src2));
1909
1910 testq(src2, src2);
1911 j(zero, on_not_smi_result, near_jump);
1912
1913 if (src1.is(rax)) {
1914 movq(kScratchRegister, src1);
1915 }
1916 SmiToInteger32(rax, src1);
1917 SmiToInteger32(src2, src2);
1918
1919 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1920 Label safe_div;
1921 cmpl(rax, Immediate(Smi::kMinValue));
1922 j(not_equal, &safe_div, Label::kNear);
1923 cmpl(src2, Immediate(-1));
1924 j(not_equal, &safe_div, Label::kNear);
1925 // Retag inputs and go slow case.
1926 Integer32ToSmi(src2, src2);
1927 if (src1.is(rax)) {
1928 movq(src1, kScratchRegister);
1929 }
1930 jmp(on_not_smi_result, near_jump);
1931 bind(&safe_div);
1932
1933 // Sign extend eax into edx:eax.
1934 cdq();
1935 idivl(src2);
1936 // Restore smi tags on inputs.
1937 Integer32ToSmi(src2, src2);
1938 if (src1.is(rax)) {
1939 movq(src1, kScratchRegister);
1940 }
1941 // Check for a negative zero result. If the result is zero, and the
1942 // dividend is negative, go slow to return a floating point negative zero.
1943 Label smi_result;
1944 testl(rdx, rdx);
1945 j(not_zero, &smi_result, Label::kNear);
1946 testq(src1, src1);
1947 j(negative, on_not_smi_result, near_jump);
1948 bind(&smi_result);
1949 Integer32ToSmi(dst, rdx);
1950 }
1951
1952
SmiNot(Register dst,Register src)1953 void MacroAssembler::SmiNot(Register dst, Register src) {
1954 ASSERT(!dst.is(kScratchRegister));
1955 ASSERT(!src.is(kScratchRegister));
1956 // Set tag and padding bits before negating, so that they are zero afterwards.
1957 movl(kScratchRegister, Immediate(~0));
1958 if (dst.is(src)) {
1959 xor_(dst, kScratchRegister);
1960 } else {
1961 lea(dst, Operand(src, kScratchRegister, times_1, 0));
1962 }
1963 not_(dst);
1964 }
1965
1966
SmiAnd(Register dst,Register src1,Register src2)1967 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1968 ASSERT(!dst.is(src2));
1969 if (!dst.is(src1)) {
1970 movq(dst, src1);
1971 }
1972 and_(dst, src2);
1973 }
1974
1975
SmiAndConstant(Register dst,Register src,Smi * constant)1976 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1977 if (constant->value() == 0) {
1978 Set(dst, 0);
1979 } else if (dst.is(src)) {
1980 ASSERT(!dst.is(kScratchRegister));
1981 Register constant_reg = GetSmiConstant(constant);
1982 and_(dst, constant_reg);
1983 } else {
1984 LoadSmiConstant(dst, constant);
1985 and_(dst, src);
1986 }
1987 }
1988
1989
SmiOr(Register dst,Register src1,Register src2)1990 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1991 if (!dst.is(src1)) {
1992 ASSERT(!src1.is(src2));
1993 movq(dst, src1);
1994 }
1995 or_(dst, src2);
1996 }
1997
1998
SmiOrConstant(Register dst,Register src,Smi * constant)1999 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2000 if (dst.is(src)) {
2001 ASSERT(!dst.is(kScratchRegister));
2002 Register constant_reg = GetSmiConstant(constant);
2003 or_(dst, constant_reg);
2004 } else {
2005 LoadSmiConstant(dst, constant);
2006 or_(dst, src);
2007 }
2008 }
2009
2010
SmiXor(Register dst,Register src1,Register src2)2011 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2012 if (!dst.is(src1)) {
2013 ASSERT(!src1.is(src2));
2014 movq(dst, src1);
2015 }
2016 xor_(dst, src2);
2017 }
2018
2019
SmiXorConstant(Register dst,Register src,Smi * constant)2020 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2021 if (dst.is(src)) {
2022 ASSERT(!dst.is(kScratchRegister));
2023 Register constant_reg = GetSmiConstant(constant);
2024 xor_(dst, constant_reg);
2025 } else {
2026 LoadSmiConstant(dst, constant);
2027 xor_(dst, src);
2028 }
2029 }
2030
2031
SmiShiftArithmeticRightConstant(Register dst,Register src,int shift_value)2032 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2033 Register src,
2034 int shift_value) {
2035 ASSERT(is_uint5(shift_value));
2036 if (shift_value > 0) {
2037 if (dst.is(src)) {
2038 sar(dst, Immediate(shift_value + kSmiShift));
2039 shl(dst, Immediate(kSmiShift));
2040 } else {
2041 UNIMPLEMENTED(); // Not used.
2042 }
2043 }
2044 }
2045
2046
SmiShiftLeftConstant(Register dst,Register src,int shift_value)2047 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2048 Register src,
2049 int shift_value) {
2050 if (!dst.is(src)) {
2051 movq(dst, src);
2052 }
2053 if (shift_value > 0) {
2054 shl(dst, Immediate(shift_value));
2055 }
2056 }
2057
2058
SmiShiftLogicalRightConstant(Register dst,Register src,int shift_value,Label * on_not_smi_result,Label::Distance near_jump)2059 void MacroAssembler::SmiShiftLogicalRightConstant(
2060 Register dst, Register src, int shift_value,
2061 Label* on_not_smi_result, Label::Distance near_jump) {
2062 // Logic right shift interprets its result as an *unsigned* number.
2063 if (dst.is(src)) {
2064 UNIMPLEMENTED(); // Not used.
2065 } else {
2066 movq(dst, src);
2067 if (shift_value == 0) {
2068 testq(dst, dst);
2069 j(negative, on_not_smi_result, near_jump);
2070 }
2071 shr(dst, Immediate(shift_value + kSmiShift));
2072 shl(dst, Immediate(kSmiShift));
2073 }
2074 }
2075
2076
SmiShiftLeft(Register dst,Register src1,Register src2)2077 void MacroAssembler::SmiShiftLeft(Register dst,
2078 Register src1,
2079 Register src2) {
2080 ASSERT(!dst.is(rcx));
2081 // Untag shift amount.
2082 if (!dst.is(src1)) {
2083 movq(dst, src1);
2084 }
2085 SmiToInteger32(rcx, src2);
2086 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2087 and_(rcx, Immediate(0x1f));
2088 shl_cl(dst);
2089 }
2090
2091
SmiShiftLogicalRight(Register dst,Register src1,Register src2,Label * on_not_smi_result,Label::Distance near_jump)2092 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2093 Register src1,
2094 Register src2,
2095 Label* on_not_smi_result,
2096 Label::Distance near_jump) {
2097 ASSERT(!dst.is(kScratchRegister));
2098 ASSERT(!src1.is(kScratchRegister));
2099 ASSERT(!src2.is(kScratchRegister));
2100 ASSERT(!dst.is(rcx));
2101 // dst and src1 can be the same, because the one case that bails out
2102 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2103 if (src1.is(rcx) || src2.is(rcx)) {
2104 movq(kScratchRegister, rcx);
2105 }
2106 if (!dst.is(src1)) {
2107 movq(dst, src1);
2108 }
2109 SmiToInteger32(rcx, src2);
2110 orl(rcx, Immediate(kSmiShift));
2111 shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
2112 shl(dst, Immediate(kSmiShift));
2113 testq(dst, dst);
2114 if (src1.is(rcx) || src2.is(rcx)) {
2115 Label positive_result;
2116 j(positive, &positive_result, Label::kNear);
2117 if (src1.is(rcx)) {
2118 movq(src1, kScratchRegister);
2119 } else {
2120 movq(src2, kScratchRegister);
2121 }
2122 jmp(on_not_smi_result, near_jump);
2123 bind(&positive_result);
2124 } else {
2125 // src2 was zero and src1 negative.
2126 j(negative, on_not_smi_result, near_jump);
2127 }
2128 }
2129
2130
SmiShiftArithmeticRight(Register dst,Register src1,Register src2)2131 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2132 Register src1,
2133 Register src2) {
2134 ASSERT(!dst.is(kScratchRegister));
2135 ASSERT(!src1.is(kScratchRegister));
2136 ASSERT(!src2.is(kScratchRegister));
2137 ASSERT(!dst.is(rcx));
2138 if (src1.is(rcx)) {
2139 movq(kScratchRegister, src1);
2140 } else if (src2.is(rcx)) {
2141 movq(kScratchRegister, src2);
2142 }
2143 if (!dst.is(src1)) {
2144 movq(dst, src1);
2145 }
2146 SmiToInteger32(rcx, src2);
2147 orl(rcx, Immediate(kSmiShift));
2148 sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2149 shl(dst, Immediate(kSmiShift));
2150 if (src1.is(rcx)) {
2151 movq(src1, kScratchRegister);
2152 } else if (src2.is(rcx)) {
2153 movq(src2, kScratchRegister);
2154 }
2155 }
2156
2157
SelectNonSmi(Register dst,Register src1,Register src2,Label * on_not_smis,Label::Distance near_jump)2158 void MacroAssembler::SelectNonSmi(Register dst,
2159 Register src1,
2160 Register src2,
2161 Label* on_not_smis,
2162 Label::Distance near_jump) {
2163 ASSERT(!dst.is(kScratchRegister));
2164 ASSERT(!src1.is(kScratchRegister));
2165 ASSERT(!src2.is(kScratchRegister));
2166 ASSERT(!dst.is(src1));
2167 ASSERT(!dst.is(src2));
2168 // Both operands must not be smis.
2169 #ifdef DEBUG
2170 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2171 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2172 #endif
2173 STATIC_ASSERT(kSmiTag == 0);
2174 ASSERT_EQ(0, Smi::FromInt(0));
2175 movl(kScratchRegister, Immediate(kSmiTagMask));
2176 and_(kScratchRegister, src1);
2177 testl(kScratchRegister, src2);
2178 // If non-zero then both are smis.
2179 j(not_zero, on_not_smis, near_jump);
2180
2181 // Exactly one operand is a smi.
2182 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2183 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2184 subq(kScratchRegister, Immediate(1));
2185 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2186 movq(dst, src1);
2187 xor_(dst, src2);
2188 and_(dst, kScratchRegister);
2189 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2190 xor_(dst, src1);
2191 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2192 }
2193
2194
SmiToIndex(Register dst,Register src,int shift)2195 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2196 Register src,
2197 int shift) {
2198 ASSERT(is_uint6(shift));
2199 // There is a possible optimization if shift is in the range 60-63, but that
2200 // will (and must) never happen.
2201 if (!dst.is(src)) {
2202 movq(dst, src);
2203 }
2204 if (shift < kSmiShift) {
2205 sar(dst, Immediate(kSmiShift - shift));
2206 } else {
2207 shl(dst, Immediate(shift - kSmiShift));
2208 }
2209 return SmiIndex(dst, times_1);
2210 }
2211
SmiToNegativeIndex(Register dst,Register src,int shift)2212 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2213 Register src,
2214 int shift) {
2215 // Register src holds a positive smi.
2216 ASSERT(is_uint6(shift));
2217 if (!dst.is(src)) {
2218 movq(dst, src);
2219 }
2220 neg(dst);
2221 if (shift < kSmiShift) {
2222 sar(dst, Immediate(kSmiShift - shift));
2223 } else {
2224 shl(dst, Immediate(shift - kSmiShift));
2225 }
2226 return SmiIndex(dst, times_1);
2227 }
2228
2229
AddSmiField(Register dst,const Operand & src)2230 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2231 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2232 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2233 }
2234
2235
Push(Smi * source)2236 void MacroAssembler::Push(Smi* source) {
2237 intptr_t smi = reinterpret_cast<intptr_t>(source);
2238 if (is_int32(smi)) {
2239 push(Immediate(static_cast<int32_t>(smi)));
2240 } else {
2241 Register constant = GetSmiConstant(source);
2242 push(constant);
2243 }
2244 }
2245
2246
PushInt64AsTwoSmis(Register src,Register scratch)2247 void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
2248 movq(scratch, src);
2249 // High bits.
2250 shr(src, Immediate(64 - kSmiShift));
2251 shl(src, Immediate(kSmiShift));
2252 push(src);
2253 // Low bits.
2254 shl(scratch, Immediate(kSmiShift));
2255 push(scratch);
2256 }
2257
2258
PopInt64AsTwoSmis(Register dst,Register scratch)2259 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
2260 pop(scratch);
2261 // Low bits.
2262 shr(scratch, Immediate(kSmiShift));
2263 pop(dst);
2264 shr(dst, Immediate(kSmiShift));
2265 // High bits.
2266 shl(dst, Immediate(64 - kSmiShift));
2267 or_(dst, scratch);
2268 }
2269
2270
Test(const Operand & src,Smi * source)2271 void MacroAssembler::Test(const Operand& src, Smi* source) {
2272 testl(Operand(src, kIntSize), Immediate(source->value()));
2273 }
2274
2275
2276 // ----------------------------------------------------------------------------
2277
2278
LookupNumberStringCache(Register object,Register result,Register scratch1,Register scratch2,Label * not_found)2279 void MacroAssembler::LookupNumberStringCache(Register object,
2280 Register result,
2281 Register scratch1,
2282 Register scratch2,
2283 Label* not_found) {
2284 // Use of registers. Register result is used as a temporary.
2285 Register number_string_cache = result;
2286 Register mask = scratch1;
2287 Register scratch = scratch2;
2288
2289 // Load the number string cache.
2290 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2291
2292 // Make the hash mask from the length of the number string cache. It
2293 // contains two elements (number and string) for each cache entry.
2294 SmiToInteger32(
2295 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2296 shrl(mask, Immediate(1));
2297 subq(mask, Immediate(1)); // Make mask.
2298
2299 // Calculate the entry in the number string cache. The hash value in the
2300 // number string cache for smis is just the smi value, and the hash for
2301 // doubles is the xor of the upper and lower words. See
2302 // Heap::GetNumberStringCache.
2303 Label is_smi;
2304 Label load_result_from_cache;
2305 JumpIfSmi(object, &is_smi);
2306 CheckMap(object,
2307 isolate()->factory()->heap_number_map(),
2308 not_found,
2309 DONT_DO_SMI_CHECK);
2310
2311 STATIC_ASSERT(8 == kDoubleSize);
2312 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2313 xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2314 and_(scratch, mask);
2315 // Each entry in string cache consists of two pointer sized fields,
2316 // but times_twice_pointer_size (multiplication by 16) scale factor
2317 // is not supported by addrmode on x64 platform.
2318 // So we have to premultiply entry index before lookup.
2319 shl(scratch, Immediate(kPointerSizeLog2 + 1));
2320
2321 Register index = scratch;
2322 Register probe = mask;
2323 movq(probe,
2324 FieldOperand(number_string_cache,
2325 index,
2326 times_1,
2327 FixedArray::kHeaderSize));
2328 JumpIfSmi(probe, not_found);
2329 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2330 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2331 j(parity_even, not_found); // Bail out if NaN is involved.
2332 j(not_equal, not_found); // The cache did not contain this value.
2333 jmp(&load_result_from_cache);
2334
2335 bind(&is_smi);
2336 SmiToInteger32(scratch, object);
2337 and_(scratch, mask);
2338 // Each entry in string cache consists of two pointer sized fields,
2339 // but times_twice_pointer_size (multiplication by 16) scale factor
2340 // is not supported by addrmode on x64 platform.
2341 // So we have to premultiply entry index before lookup.
2342 shl(scratch, Immediate(kPointerSizeLog2 + 1));
2343
2344 // Check if the entry is the smi we are looking for.
2345 cmpq(object,
2346 FieldOperand(number_string_cache,
2347 index,
2348 times_1,
2349 FixedArray::kHeaderSize));
2350 j(not_equal, not_found);
2351
2352 // Get the result from the cache.
2353 bind(&load_result_from_cache);
2354 movq(result,
2355 FieldOperand(number_string_cache,
2356 index,
2357 times_1,
2358 FixedArray::kHeaderSize + kPointerSize));
2359 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2360 }
2361
2362
JumpIfNotString(Register object,Register object_map,Label * not_string,Label::Distance near_jump)2363 void MacroAssembler::JumpIfNotString(Register object,
2364 Register object_map,
2365 Label* not_string,
2366 Label::Distance near_jump) {
2367 Condition is_smi = CheckSmi(object);
2368 j(is_smi, not_string, near_jump);
2369 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2370 j(above_equal, not_string, near_jump);
2371 }
2372
2373
JumpIfNotBothSequentialAsciiStrings(Register first_object,Register second_object,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2374 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2375 Register first_object,
2376 Register second_object,
2377 Register scratch1,
2378 Register scratch2,
2379 Label* on_fail,
2380 Label::Distance near_jump) {
2381 // Check that both objects are not smis.
2382 Condition either_smi = CheckEitherSmi(first_object, second_object);
2383 j(either_smi, on_fail, near_jump);
2384
2385 // Load instance type for both strings.
2386 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2387 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2388 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2389 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2390
2391 // Check that both are flat ASCII strings.
2392 ASSERT(kNotStringTag != 0);
2393 const int kFlatAsciiStringMask =
2394 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2395 const int kFlatAsciiStringTag =
2396 kStringTag | kOneByteStringTag | kSeqStringTag;
2397
2398 andl(scratch1, Immediate(kFlatAsciiStringMask));
2399 andl(scratch2, Immediate(kFlatAsciiStringMask));
2400 // Interleave the bits to check both scratch1 and scratch2 in one test.
2401 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2402 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2403 cmpl(scratch1,
2404 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2405 j(not_equal, on_fail, near_jump);
2406 }
2407
2408
JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,Register scratch,Label * failure,Label::Distance near_jump)2409 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2410 Register instance_type,
2411 Register scratch,
2412 Label* failure,
2413 Label::Distance near_jump) {
2414 if (!scratch.is(instance_type)) {
2415 movl(scratch, instance_type);
2416 }
2417
2418 const int kFlatAsciiStringMask =
2419 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2420
2421 andl(scratch, Immediate(kFlatAsciiStringMask));
2422 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2423 j(not_equal, failure, near_jump);
2424 }
2425
2426
JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type,Register second_object_instance_type,Register scratch1,Register scratch2,Label * on_fail,Label::Distance near_jump)2427 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2428 Register first_object_instance_type,
2429 Register second_object_instance_type,
2430 Register scratch1,
2431 Register scratch2,
2432 Label* on_fail,
2433 Label::Distance near_jump) {
2434 // Load instance type for both strings.
2435 movq(scratch1, first_object_instance_type);
2436 movq(scratch2, second_object_instance_type);
2437
2438 // Check that both are flat ASCII strings.
2439 ASSERT(kNotStringTag != 0);
2440 const int kFlatAsciiStringMask =
2441 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2442 const int kFlatAsciiStringTag =
2443 kStringTag | kOneByteStringTag | kSeqStringTag;
2444
2445 andl(scratch1, Immediate(kFlatAsciiStringMask));
2446 andl(scratch2, Immediate(kFlatAsciiStringMask));
2447 // Interleave the bits to check both scratch1 and scratch2 in one test.
2448 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2449 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2450 cmpl(scratch1,
2451 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2452 j(not_equal, on_fail, near_jump);
2453 }
2454
2455
2456 template<class T>
JumpIfNotUniqueNameHelper(MacroAssembler * masm,T operand_or_register,Label * not_unique_name,Label::Distance distance)2457 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2458 T operand_or_register,
2459 Label* not_unique_name,
2460 Label::Distance distance) {
2461 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2462 Label succeed;
2463 masm->testb(operand_or_register,
2464 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2465 masm->j(zero, &succeed, Label::kNear);
2466 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2467 masm->j(not_equal, not_unique_name, distance);
2468
2469 masm->bind(&succeed);
2470 }
2471
2472
JumpIfNotUniqueName(Operand operand,Label * not_unique_name,Label::Distance distance)2473 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2474 Label* not_unique_name,
2475 Label::Distance distance) {
2476 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2477 }
2478
2479
JumpIfNotUniqueName(Register reg,Label * not_unique_name,Label::Distance distance)2480 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2481 Label* not_unique_name,
2482 Label::Distance distance) {
2483 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2484 }
2485
2486
Move(Register dst,Register src)2487 void MacroAssembler::Move(Register dst, Register src) {
2488 if (!dst.is(src)) {
2489 movq(dst, src);
2490 }
2491 }
2492
2493
Move(Register dst,Handle<Object> source)2494 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2495 AllowDeferredHandleDereference smi_check;
2496 if (source->IsSmi()) {
2497 Move(dst, Smi::cast(*source));
2498 } else {
2499 MoveHeapObject(dst, source);
2500 }
2501 }
2502
2503
Move(const Operand & dst,Handle<Object> source)2504 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2505 AllowDeferredHandleDereference smi_check;
2506 if (source->IsSmi()) {
2507 Move(dst, Smi::cast(*source));
2508 } else {
2509 MoveHeapObject(kScratchRegister, source);
2510 movq(dst, kScratchRegister);
2511 }
2512 }
2513
2514
Cmp(Register dst,Handle<Object> source)2515 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2516 AllowDeferredHandleDereference smi_check;
2517 if (source->IsSmi()) {
2518 Cmp(dst, Smi::cast(*source));
2519 } else {
2520 MoveHeapObject(kScratchRegister, source);
2521 cmpq(dst, kScratchRegister);
2522 }
2523 }
2524
2525
Cmp(const Operand & dst,Handle<Object> source)2526 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2527 AllowDeferredHandleDereference smi_check;
2528 if (source->IsSmi()) {
2529 Cmp(dst, Smi::cast(*source));
2530 } else {
2531 MoveHeapObject(kScratchRegister, source);
2532 cmpq(dst, kScratchRegister);
2533 }
2534 }
2535
2536
Push(Handle<Object> source)2537 void MacroAssembler::Push(Handle<Object> source) {
2538 AllowDeferredHandleDereference smi_check;
2539 if (source->IsSmi()) {
2540 Push(Smi::cast(*source));
2541 } else {
2542 MoveHeapObject(kScratchRegister, source);
2543 push(kScratchRegister);
2544 }
2545 }
2546
2547
MoveHeapObject(Register result,Handle<Object> object)2548 void MacroAssembler::MoveHeapObject(Register result,
2549 Handle<Object> object) {
2550 AllowDeferredHandleDereference using_raw_address;
2551 ASSERT(object->IsHeapObject());
2552 if (isolate()->heap()->InNewSpace(*object)) {
2553 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2554 movq(result, cell, RelocInfo::CELL);
2555 movq(result, Operand(result, 0));
2556 } else {
2557 movq(result, object, RelocInfo::EMBEDDED_OBJECT);
2558 }
2559 }
2560
2561
LoadGlobalCell(Register dst,Handle<Cell> cell)2562 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2563 if (dst.is(rax)) {
2564 AllowDeferredHandleDereference embedding_raw_address;
2565 load_rax(cell.location(), RelocInfo::CELL);
2566 } else {
2567 movq(dst, cell, RelocInfo::CELL);
2568 movq(dst, Operand(dst, 0));
2569 }
2570 }
2571
2572
Drop(int stack_elements)2573 void MacroAssembler::Drop(int stack_elements) {
2574 if (stack_elements > 0) {
2575 addq(rsp, Immediate(stack_elements * kPointerSize));
2576 }
2577 }
2578
2579
TestBit(const Operand & src,int bits)2580 void MacroAssembler::TestBit(const Operand& src, int bits) {
2581 int byte_offset = bits / kBitsPerByte;
2582 int bit_in_byte = bits & (kBitsPerByte - 1);
2583 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2584 }
2585
2586
Jump(ExternalReference ext)2587 void MacroAssembler::Jump(ExternalReference ext) {
2588 LoadAddress(kScratchRegister, ext);
2589 jmp(kScratchRegister);
2590 }
2591
2592
Jump(Address destination,RelocInfo::Mode rmode)2593 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2594 movq(kScratchRegister, destination, rmode);
2595 jmp(kScratchRegister);
2596 }
2597
2598
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)2599 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2600 // TODO(X64): Inline this
2601 jmp(code_object, rmode);
2602 }
2603
2604
CallSize(ExternalReference ext)2605 int MacroAssembler::CallSize(ExternalReference ext) {
2606 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2607 return LoadAddressSize(ext) +
2608 Assembler::kCallScratchRegisterInstructionLength;
2609 }
2610
2611
Call(ExternalReference ext)2612 void MacroAssembler::Call(ExternalReference ext) {
2613 #ifdef DEBUG
2614 int end_position = pc_offset() + CallSize(ext);
2615 #endif
2616 LoadAddress(kScratchRegister, ext);
2617 call(kScratchRegister);
2618 #ifdef DEBUG
2619 CHECK_EQ(end_position, pc_offset());
2620 #endif
2621 }
2622
2623
Call(Address destination,RelocInfo::Mode rmode)2624 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2625 #ifdef DEBUG
2626 int end_position = pc_offset() + CallSize(destination, rmode);
2627 #endif
2628 movq(kScratchRegister, destination, rmode);
2629 call(kScratchRegister);
2630 #ifdef DEBUG
2631 CHECK_EQ(pc_offset(), end_position);
2632 #endif
2633 }
2634
2635
Call(Handle<Code> code_object,RelocInfo::Mode rmode,TypeFeedbackId ast_id)2636 void MacroAssembler::Call(Handle<Code> code_object,
2637 RelocInfo::Mode rmode,
2638 TypeFeedbackId ast_id) {
2639 #ifdef DEBUG
2640 int end_position = pc_offset() + CallSize(code_object);
2641 #endif
2642 ASSERT(RelocInfo::IsCodeTarget(rmode) ||
2643 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2644 call(code_object, rmode, ast_id);
2645 #ifdef DEBUG
2646 CHECK_EQ(end_position, pc_offset());
2647 #endif
2648 }
2649
2650
Pushad()2651 void MacroAssembler::Pushad() {
2652 push(rax);
2653 push(rcx);
2654 push(rdx);
2655 push(rbx);
2656 // Not pushing rsp or rbp.
2657 push(rsi);
2658 push(rdi);
2659 push(r8);
2660 push(r9);
2661 // r10 is kScratchRegister.
2662 push(r11);
2663 // r12 is kSmiConstantRegister.
2664 // r13 is kRootRegister.
2665 push(r14);
2666 push(r15);
2667 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
2668 // Use lea for symmetry with Popad.
2669 int sp_delta =
2670 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2671 lea(rsp, Operand(rsp, -sp_delta));
2672 }
2673
2674
Popad()2675 void MacroAssembler::Popad() {
2676 // Popad must not change the flags, so use lea instead of addq.
2677 int sp_delta =
2678 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2679 lea(rsp, Operand(rsp, sp_delta));
2680 pop(r15);
2681 pop(r14);
2682 pop(r11);
2683 pop(r9);
2684 pop(r8);
2685 pop(rdi);
2686 pop(rsi);
2687 pop(rbx);
2688 pop(rdx);
2689 pop(rcx);
2690 pop(rax);
2691 }
2692
2693
Dropad()2694 void MacroAssembler::Dropad() {
2695 addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2696 }
2697
2698
2699 // Order general registers are pushed by Pushad:
2700 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2701 const int
2702 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2703 0,
2704 1,
2705 2,
2706 3,
2707 -1,
2708 -1,
2709 4,
2710 5,
2711 6,
2712 7,
2713 -1,
2714 8,
2715 -1,
2716 -1,
2717 9,
2718 10
2719 };
2720
2721
StoreToSafepointRegisterSlot(Register dst,const Immediate & imm)2722 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2723 const Immediate& imm) {
2724 movq(SafepointRegisterSlot(dst), imm);
2725 }
2726
2727
StoreToSafepointRegisterSlot(Register dst,Register src)2728 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2729 movq(SafepointRegisterSlot(dst), src);
2730 }
2731
2732
LoadFromSafepointRegisterSlot(Register dst,Register src)2733 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2734 movq(dst, SafepointRegisterSlot(src));
2735 }
2736
2737
SafepointRegisterSlot(Register reg)2738 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2739 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2740 }
2741
2742
PushTryHandler(StackHandler::Kind kind,int handler_index)2743 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2744 int handler_index) {
2745 // Adjust this code if not the case.
2746 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2747 kFPOnStackSize);
2748 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2749 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2750 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2751 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2752 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2753
2754 // We will build up the handler from the bottom by pushing on the stack.
2755 // First push the frame pointer and context.
2756 if (kind == StackHandler::JS_ENTRY) {
2757 // The frame pointer does not point to a JS frame so we save NULL for
2758 // rbp. We expect the code throwing an exception to check rbp before
2759 // dereferencing it to restore the context.
2760 push(Immediate(0)); // NULL frame pointer.
2761 Push(Smi::FromInt(0)); // No context.
2762 } else {
2763 push(rbp);
2764 push(rsi);
2765 }
2766
2767 // Push the state and the code object.
2768 unsigned state =
2769 StackHandler::IndexField::encode(handler_index) |
2770 StackHandler::KindField::encode(kind);
2771 push(Immediate(state));
2772 Push(CodeObject());
2773
2774 // Link the current handler as the next handler.
2775 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2776 push(ExternalOperand(handler_address));
2777 // Set this new handler as the current one.
2778 movq(ExternalOperand(handler_address), rsp);
2779 }
2780
2781
PopTryHandler()2782 void MacroAssembler::PopTryHandler() {
2783 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2784 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2785 pop(ExternalOperand(handler_address));
2786 addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2787 }
2788
2789
JumpToHandlerEntry()2790 void MacroAssembler::JumpToHandlerEntry() {
2791 // Compute the handler entry address and jump to it. The handler table is
2792 // a fixed array of (smi-tagged) code offsets.
2793 // rax = exception, rdi = code object, rdx = state.
2794 movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2795 shr(rdx, Immediate(StackHandler::kKindWidth));
2796 movq(rdx,
2797 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
2798 SmiToInteger64(rdx, rdx);
2799 lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2800 jmp(rdi);
2801 }
2802
2803
Throw(Register value)2804 void MacroAssembler::Throw(Register value) {
2805 // Adjust this code if not the case.
2806 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2807 kFPOnStackSize);
2808 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2809 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2810 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2811 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2812 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2813
2814 // The exception is expected in rax.
2815 if (!value.is(rax)) {
2816 movq(rax, value);
2817 }
2818 // Drop the stack pointer to the top of the top handler.
2819 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2820 movq(rsp, ExternalOperand(handler_address));
2821 // Restore the next handler.
2822 pop(ExternalOperand(handler_address));
2823
2824 // Remove the code object and state, compute the handler address in rdi.
2825 pop(rdi); // Code object.
2826 pop(rdx); // Offset and state.
2827
2828 // Restore the context and frame pointer.
2829 pop(rsi); // Context.
2830 pop(rbp); // Frame pointer.
2831
2832 // If the handler is a JS frame, restore the context to the frame.
2833 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2834 // rbp or rsi.
2835 Label skip;
2836 testq(rsi, rsi);
2837 j(zero, &skip, Label::kNear);
2838 movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2839 bind(&skip);
2840
2841 JumpToHandlerEntry();
2842 }
2843
2844
ThrowUncatchable(Register value)2845 void MacroAssembler::ThrowUncatchable(Register value) {
2846 // Adjust this code if not the case.
2847 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2848 kFPOnStackSize);
2849 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2850 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2851 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2852 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2853 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2854
2855 // The exception is expected in rax.
2856 if (!value.is(rax)) {
2857 movq(rax, value);
2858 }
2859 // Drop the stack pointer to the top of the top stack handler.
2860 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2861 Load(rsp, handler_address);
2862
2863 // Unwind the handlers until the top ENTRY handler is found.
2864 Label fetch_next, check_kind;
2865 jmp(&check_kind, Label::kNear);
2866 bind(&fetch_next);
2867 movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2868
2869 bind(&check_kind);
2870 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2871 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2872 Immediate(StackHandler::KindField::kMask));
2873 j(not_zero, &fetch_next);
2874
2875 // Set the top handler address to next handler past the top ENTRY handler.
2876 pop(ExternalOperand(handler_address));
2877
2878 // Remove the code object and state, compute the handler address in rdi.
2879 pop(rdi); // Code object.
2880 pop(rdx); // Offset and state.
2881
2882 // Clear the context pointer and frame pointer (0 was saved in the handler).
2883 pop(rsi);
2884 pop(rbp);
2885
2886 JumpToHandlerEntry();
2887 }
2888
2889
Ret()2890 void MacroAssembler::Ret() {
2891 ret(0);
2892 }
2893
2894
Ret(int bytes_dropped,Register scratch)2895 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2896 if (is_uint16(bytes_dropped)) {
2897 ret(bytes_dropped);
2898 } else {
2899 PopReturnAddressTo(scratch);
2900 addq(rsp, Immediate(bytes_dropped));
2901 PushReturnAddressFrom(scratch);
2902 ret(0);
2903 }
2904 }
2905
2906
FCmp()2907 void MacroAssembler::FCmp() {
2908 fucomip();
2909 fstp(0);
2910 }
2911
2912
CmpObjectType(Register heap_object,InstanceType type,Register map)2913 void MacroAssembler::CmpObjectType(Register heap_object,
2914 InstanceType type,
2915 Register map) {
2916 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2917 CmpInstanceType(map, type);
2918 }
2919
2920
CmpInstanceType(Register map,InstanceType type)2921 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2922 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2923 Immediate(static_cast<int8_t>(type)));
2924 }
2925
2926
CheckFastElements(Register map,Label * fail,Label::Distance distance)2927 void MacroAssembler::CheckFastElements(Register map,
2928 Label* fail,
2929 Label::Distance distance) {
2930 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2931 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2932 STATIC_ASSERT(FAST_ELEMENTS == 2);
2933 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2934 cmpb(FieldOperand(map, Map::kBitField2Offset),
2935 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2936 j(above, fail, distance);
2937 }
2938
2939
CheckFastObjectElements(Register map,Label * fail,Label::Distance distance)2940 void MacroAssembler::CheckFastObjectElements(Register map,
2941 Label* fail,
2942 Label::Distance distance) {
2943 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2944 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2945 STATIC_ASSERT(FAST_ELEMENTS == 2);
2946 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2947 cmpb(FieldOperand(map, Map::kBitField2Offset),
2948 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2949 j(below_equal, fail, distance);
2950 cmpb(FieldOperand(map, Map::kBitField2Offset),
2951 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2952 j(above, fail, distance);
2953 }
2954
2955
CheckFastSmiElements(Register map,Label * fail,Label::Distance distance)2956 void MacroAssembler::CheckFastSmiElements(Register map,
2957 Label* fail,
2958 Label::Distance distance) {
2959 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2960 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2961 cmpb(FieldOperand(map, Map::kBitField2Offset),
2962 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2963 j(above, fail, distance);
2964 }
2965
2966
StoreNumberToDoubleElements(Register maybe_number,Register elements,Register index,XMMRegister xmm_scratch,Label * fail,int elements_offset)2967 void MacroAssembler::StoreNumberToDoubleElements(
2968 Register maybe_number,
2969 Register elements,
2970 Register index,
2971 XMMRegister xmm_scratch,
2972 Label* fail,
2973 int elements_offset) {
2974 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2975
2976 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2977
2978 CheckMap(maybe_number,
2979 isolate()->factory()->heap_number_map(),
2980 fail,
2981 DONT_DO_SMI_CHECK);
2982
2983 // Double value, canonicalize NaN.
2984 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2985 cmpl(FieldOperand(maybe_number, offset),
2986 Immediate(kNaNOrInfinityLowerBoundUpper32));
2987 j(greater_equal, &maybe_nan, Label::kNear);
2988
2989 bind(¬_nan);
2990 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2991 bind(&have_double_value);
2992 movsd(FieldOperand(elements, index, times_8,
2993 FixedDoubleArray::kHeaderSize - elements_offset),
2994 xmm_scratch);
2995 jmp(&done);
2996
2997 bind(&maybe_nan);
2998 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2999 // it's an Infinity, and the non-NaN code path applies.
3000 j(greater, &is_nan, Label::kNear);
3001 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3002 j(zero, ¬_nan);
3003 bind(&is_nan);
3004 // Convert all NaNs to the same canonical NaN value when they are stored in
3005 // the double array.
3006 Set(kScratchRegister, BitCast<uint64_t>(
3007 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3008 movq(xmm_scratch, kScratchRegister);
3009 jmp(&have_double_value, Label::kNear);
3010
3011 bind(&smi_value);
3012 // Value is a smi. convert to a double and store.
3013 // Preserve original value.
3014 SmiToInteger32(kScratchRegister, maybe_number);
3015 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3016 movsd(FieldOperand(elements, index, times_8,
3017 FixedDoubleArray::kHeaderSize - elements_offset),
3018 xmm_scratch);
3019 bind(&done);
3020 }
3021
3022
CompareMap(Register obj,Handle<Map> map)3023 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3024 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3025 }
3026
3027
CheckMap(Register obj,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3028 void MacroAssembler::CheckMap(Register obj,
3029 Handle<Map> map,
3030 Label* fail,
3031 SmiCheckType smi_check_type) {
3032 if (smi_check_type == DO_SMI_CHECK) {
3033 JumpIfSmi(obj, fail);
3034 }
3035
3036 CompareMap(obj, map);
3037 j(not_equal, fail);
3038 }
3039
3040
ClampUint8(Register reg)3041 void MacroAssembler::ClampUint8(Register reg) {
3042 Label done;
3043 testl(reg, Immediate(0xFFFFFF00));
3044 j(zero, &done, Label::kNear);
3045 setcc(negative, reg); // 1 if negative, 0 if positive.
3046 decb(reg); // 0 if negative, 255 if positive.
3047 bind(&done);
3048 }
3049
3050
ClampDoubleToUint8(XMMRegister input_reg,XMMRegister temp_xmm_reg,Register result_reg)3051 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3052 XMMRegister temp_xmm_reg,
3053 Register result_reg) {
3054 Label done;
3055 Label conv_failure;
3056 xorps(temp_xmm_reg, temp_xmm_reg);
3057 cvtsd2si(result_reg, input_reg);
3058 testl(result_reg, Immediate(0xFFFFFF00));
3059 j(zero, &done, Label::kNear);
3060 cmpl(result_reg, Immediate(0x80000000));
3061 j(equal, &conv_failure, Label::kNear);
3062 movl(result_reg, Immediate(0));
3063 setcc(above, result_reg);
3064 subl(result_reg, Immediate(1));
3065 andl(result_reg, Immediate(255));
3066 jmp(&done, Label::kNear);
3067 bind(&conv_failure);
3068 Set(result_reg, 0);
3069 ucomisd(input_reg, temp_xmm_reg);
3070 j(below, &done, Label::kNear);
3071 Set(result_reg, 255);
3072 bind(&done);
3073 }
3074
3075
LoadUint32(XMMRegister dst,Register src,XMMRegister scratch)3076 void MacroAssembler::LoadUint32(XMMRegister dst,
3077 Register src,
3078 XMMRegister scratch) {
3079 if (FLAG_debug_code) {
3080 cmpq(src, Immediate(0xffffffff));
3081 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3082 }
3083 cvtqsi2sd(dst, src);
3084 }
3085
3086
SlowTruncateToI(Register result_reg,Register input_reg,int offset)3087 void MacroAssembler::SlowTruncateToI(Register result_reg,
3088 Register input_reg,
3089 int offset) {
3090 DoubleToIStub stub(input_reg, result_reg, offset, true);
3091 call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
3092 }
3093
3094
TruncateHeapNumberToI(Register result_reg,Register input_reg)3095 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3096 Register input_reg) {
3097 Label done;
3098 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3099 cvttsd2siq(result_reg, xmm0);
3100 Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
3101 cmpq(result_reg, kScratchRegister);
3102 j(not_equal, &done, Label::kNear);
3103
3104 // Slow case.
3105 if (input_reg.is(result_reg)) {
3106 subq(rsp, Immediate(kDoubleSize));
3107 movsd(MemOperand(rsp, 0), xmm0);
3108 SlowTruncateToI(result_reg, rsp, 0);
3109 addq(rsp, Immediate(kDoubleSize));
3110 } else {
3111 SlowTruncateToI(result_reg, input_reg);
3112 }
3113
3114 bind(&done);
3115 }
3116
3117
TruncateDoubleToI(Register result_reg,XMMRegister input_reg)3118 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3119 XMMRegister input_reg) {
3120 Label done;
3121 cvttsd2siq(result_reg, input_reg);
3122 movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
3123 cmpq(result_reg, kScratchRegister);
3124 j(not_equal, &done, Label::kNear);
3125
3126 subq(rsp, Immediate(kDoubleSize));
3127 movsd(MemOperand(rsp, 0), input_reg);
3128 SlowTruncateToI(result_reg, rsp, 0);
3129 addq(rsp, Immediate(kDoubleSize));
3130
3131 bind(&done);
3132 }
3133
3134
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,MinusZeroMode minus_zero_mode,Label * conversion_failed,Label::Distance dst)3135 void MacroAssembler::DoubleToI(Register result_reg,
3136 XMMRegister input_reg,
3137 XMMRegister scratch,
3138 MinusZeroMode minus_zero_mode,
3139 Label* conversion_failed,
3140 Label::Distance dst) {
3141 cvttsd2si(result_reg, input_reg);
3142 Cvtlsi2sd(xmm0, result_reg);
3143 ucomisd(xmm0, input_reg);
3144 j(not_equal, conversion_failed, dst);
3145 j(parity_even, conversion_failed, dst); // NaN.
3146 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3147 Label done;
3148 // The integer converted back is equal to the original. We
3149 // only have to test if we got -0 as an input.
3150 testl(result_reg, result_reg);
3151 j(not_zero, &done, Label::kNear);
3152 movmskpd(result_reg, input_reg);
3153 // Bit 0 contains the sign of the double in input_reg.
3154 // If input was positive, we are ok and return 0, otherwise
3155 // jump to conversion_failed.
3156 andl(result_reg, Immediate(1));
3157 j(not_zero, conversion_failed, dst);
3158 bind(&done);
3159 }
3160 }
3161
3162
TaggedToI(Register result_reg,Register input_reg,XMMRegister temp,MinusZeroMode minus_zero_mode,Label * lost_precision,Label::Distance dst)3163 void MacroAssembler::TaggedToI(Register result_reg,
3164 Register input_reg,
3165 XMMRegister temp,
3166 MinusZeroMode minus_zero_mode,
3167 Label* lost_precision,
3168 Label::Distance dst) {
3169 Label done;
3170 ASSERT(!temp.is(xmm0));
3171
3172 // Heap number map check.
3173 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3174 Heap::kHeapNumberMapRootIndex);
3175 j(not_equal, lost_precision, dst);
3176
3177 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3178 cvttsd2si(result_reg, xmm0);
3179 Cvtlsi2sd(temp, result_reg);
3180 ucomisd(xmm0, temp);
3181 RecordComment("Deferred TaggedToI: lost precision");
3182 j(not_equal, lost_precision, dst);
3183 RecordComment("Deferred TaggedToI: NaN");
3184 j(parity_even, lost_precision, dst); // NaN.
3185 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3186 testl(result_reg, result_reg);
3187 j(not_zero, &done, Label::kNear);
3188 movmskpd(result_reg, xmm0);
3189 andl(result_reg, Immediate(1));
3190 j(not_zero, lost_precision, dst);
3191 }
3192 bind(&done);
3193 }
3194
3195
Throw(BailoutReason reason)3196 void MacroAssembler::Throw(BailoutReason reason) {
3197 #ifdef DEBUG
3198 const char* msg = GetBailoutReason(reason);
3199 if (msg != NULL) {
3200 RecordComment("Throw message: ");
3201 RecordComment(msg);
3202 }
3203 #endif
3204
3205 push(rax);
3206 Push(Smi::FromInt(reason));
3207 if (!has_frame_) {
3208 // We don't actually want to generate a pile of code for this, so just
3209 // claim there is a stack frame, without generating one.
3210 FrameScope scope(this, StackFrame::NONE);
3211 CallRuntime(Runtime::kThrowMessage, 1);
3212 } else {
3213 CallRuntime(Runtime::kThrowMessage, 1);
3214 }
3215 // Control will not return here.
3216 int3();
3217 }
3218
3219
ThrowIf(Condition cc,BailoutReason reason)3220 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3221 Label L;
3222 j(NegateCondition(cc), &L);
3223 Throw(reason);
3224 // will not return here
3225 bind(&L);
3226 }
3227
3228
LoadInstanceDescriptors(Register map,Register descriptors)3229 void MacroAssembler::LoadInstanceDescriptors(Register map,
3230 Register descriptors) {
3231 movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3232 }
3233
3234
NumberOfOwnDescriptors(Register dst,Register map)3235 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3236 movq(dst, FieldOperand(map, Map::kBitField3Offset));
3237 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3238 }
3239
3240
EnumLength(Register dst,Register map)3241 void MacroAssembler::EnumLength(Register dst, Register map) {
3242 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3243 movq(dst, FieldOperand(map, Map::kBitField3Offset));
3244 Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
3245 and_(dst, kScratchRegister);
3246 }
3247
3248
DispatchMap(Register obj,Register unused,Handle<Map> map,Handle<Code> success,SmiCheckType smi_check_type)3249 void MacroAssembler::DispatchMap(Register obj,
3250 Register unused,
3251 Handle<Map> map,
3252 Handle<Code> success,
3253 SmiCheckType smi_check_type) {
3254 Label fail;
3255 if (smi_check_type == DO_SMI_CHECK) {
3256 JumpIfSmi(obj, &fail);
3257 }
3258 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3259 j(equal, success, RelocInfo::CODE_TARGET);
3260
3261 bind(&fail);
3262 }
3263
3264
AssertNumber(Register object)3265 void MacroAssembler::AssertNumber(Register object) {
3266 if (emit_debug_code()) {
3267 Label ok;
3268 Condition is_smi = CheckSmi(object);
3269 j(is_smi, &ok, Label::kNear);
3270 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3271 isolate()->factory()->heap_number_map());
3272 Check(equal, kOperandIsNotANumber);
3273 bind(&ok);
3274 }
3275 }
3276
3277
AssertNotSmi(Register object)3278 void MacroAssembler::AssertNotSmi(Register object) {
3279 if (emit_debug_code()) {
3280 Condition is_smi = CheckSmi(object);
3281 Check(NegateCondition(is_smi), kOperandIsASmi);
3282 }
3283 }
3284
3285
AssertSmi(Register object)3286 void MacroAssembler::AssertSmi(Register object) {
3287 if (emit_debug_code()) {
3288 Condition is_smi = CheckSmi(object);
3289 Check(is_smi, kOperandIsNotASmi);
3290 }
3291 }
3292
3293
AssertSmi(const Operand & object)3294 void MacroAssembler::AssertSmi(const Operand& object) {
3295 if (emit_debug_code()) {
3296 Condition is_smi = CheckSmi(object);
3297 Check(is_smi, kOperandIsNotASmi);
3298 }
3299 }
3300
3301
AssertZeroExtended(Register int32_register)3302 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3303 if (emit_debug_code()) {
3304 ASSERT(!int32_register.is(kScratchRegister));
3305 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3306 cmpq(kScratchRegister, int32_register);
3307 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3308 }
3309 }
3310
3311
AssertString(Register object)3312 void MacroAssembler::AssertString(Register object) {
3313 if (emit_debug_code()) {
3314 testb(object, Immediate(kSmiTagMask));
3315 Check(not_equal, kOperandIsASmiAndNotAString);
3316 push(object);
3317 movq(object, FieldOperand(object, HeapObject::kMapOffset));
3318 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3319 pop(object);
3320 Check(below, kOperandIsNotAString);
3321 }
3322 }
3323
3324
AssertName(Register object)3325 void MacroAssembler::AssertName(Register object) {
3326 if (emit_debug_code()) {
3327 testb(object, Immediate(kSmiTagMask));
3328 Check(not_equal, kOperandIsASmiAndNotAName);
3329 push(object);
3330 movq(object, FieldOperand(object, HeapObject::kMapOffset));
3331 CmpInstanceType(object, LAST_NAME_TYPE);
3332 pop(object);
3333 Check(below_equal, kOperandIsNotAName);
3334 }
3335 }
3336
3337
AssertRootValue(Register src,Heap::RootListIndex root_value_index,BailoutReason reason)3338 void MacroAssembler::AssertRootValue(Register src,
3339 Heap::RootListIndex root_value_index,
3340 BailoutReason reason) {
3341 if (emit_debug_code()) {
3342 ASSERT(!src.is(kScratchRegister));
3343 LoadRoot(kScratchRegister, root_value_index);
3344 cmpq(src, kScratchRegister);
3345 Check(equal, reason);
3346 }
3347 }
3348
3349
3350
IsObjectStringType(Register heap_object,Register map,Register instance_type)3351 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3352 Register map,
3353 Register instance_type) {
3354 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3355 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3356 STATIC_ASSERT(kNotStringTag != 0);
3357 testb(instance_type, Immediate(kIsNotStringMask));
3358 return zero;
3359 }
3360
3361
IsObjectNameType(Register heap_object,Register map,Register instance_type)3362 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3363 Register map,
3364 Register instance_type) {
3365 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3366 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3367 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3368 return below_equal;
3369 }
3370
3371
TryGetFunctionPrototype(Register function,Register result,Label * miss,bool miss_on_bound_function)3372 void MacroAssembler::TryGetFunctionPrototype(Register function,
3373 Register result,
3374 Label* miss,
3375 bool miss_on_bound_function) {
3376 // Check that the receiver isn't a smi.
3377 testl(function, Immediate(kSmiTagMask));
3378 j(zero, miss);
3379
3380 // Check that the function really is a function.
3381 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3382 j(not_equal, miss);
3383
3384 if (miss_on_bound_function) {
3385 movq(kScratchRegister,
3386 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3387 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3388 // field).
3389 TestBit(FieldOperand(kScratchRegister,
3390 SharedFunctionInfo::kCompilerHintsOffset),
3391 SharedFunctionInfo::kBoundFunction);
3392 j(not_zero, miss);
3393 }
3394
3395 // Make sure that the function has an instance prototype.
3396 Label non_instance;
3397 testb(FieldOperand(result, Map::kBitFieldOffset),
3398 Immediate(1 << Map::kHasNonInstancePrototype));
3399 j(not_zero, &non_instance, Label::kNear);
3400
3401 // Get the prototype or initial map from the function.
3402 movq(result,
3403 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3404
3405 // If the prototype or initial map is the hole, don't return it and
3406 // simply miss the cache instead. This will allow us to allocate a
3407 // prototype object on-demand in the runtime system.
3408 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3409 j(equal, miss);
3410
3411 // If the function does not have an initial map, we're done.
3412 Label done;
3413 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3414 j(not_equal, &done, Label::kNear);
3415
3416 // Get the prototype from the initial map.
3417 movq(result, FieldOperand(result, Map::kPrototypeOffset));
3418 jmp(&done, Label::kNear);
3419
3420 // Non-instance prototype: Fetch prototype from constructor field
3421 // in initial map.
3422 bind(&non_instance);
3423 movq(result, FieldOperand(result, Map::kConstructorOffset));
3424
3425 // All done.
3426 bind(&done);
3427 }
3428
3429
SetCounter(StatsCounter * counter,int value)3430 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3431 if (FLAG_native_code_counters && counter->Enabled()) {
3432 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3433 movl(counter_operand, Immediate(value));
3434 }
3435 }
3436
3437
IncrementCounter(StatsCounter * counter,int value)3438 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3439 ASSERT(value > 0);
3440 if (FLAG_native_code_counters && counter->Enabled()) {
3441 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3442 if (value == 1) {
3443 incl(counter_operand);
3444 } else {
3445 addl(counter_operand, Immediate(value));
3446 }
3447 }
3448 }
3449
3450
DecrementCounter(StatsCounter * counter,int value)3451 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3452 ASSERT(value > 0);
3453 if (FLAG_native_code_counters && counter->Enabled()) {
3454 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3455 if (value == 1) {
3456 decl(counter_operand);
3457 } else {
3458 subl(counter_operand, Immediate(value));
3459 }
3460 }
3461 }
3462
3463
3464 #ifdef ENABLE_DEBUGGER_SUPPORT
DebugBreak()3465 void MacroAssembler::DebugBreak() {
3466 Set(rax, 0); // No arguments.
3467 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3468 CEntryStub ces(1);
3469 ASSERT(AllowThisStubCall(&ces));
3470 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
3471 }
3472 #endif // ENABLE_DEBUGGER_SUPPORT
3473
3474
SetCallKind(Register dst,CallKind call_kind)3475 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3476 // This macro takes the dst register to make the code more readable
3477 // at the call sites. However, the dst register has to be rcx to
3478 // follow the calling convention which requires the call type to be
3479 // in rcx.
3480 ASSERT(dst.is(rcx));
3481 if (call_kind == CALL_AS_FUNCTION) {
3482 LoadSmiConstant(dst, Smi::FromInt(1));
3483 } else {
3484 LoadSmiConstant(dst, Smi::FromInt(0));
3485 }
3486 }
3487
3488
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)3489 void MacroAssembler::InvokeCode(Register code,
3490 const ParameterCount& expected,
3491 const ParameterCount& actual,
3492 InvokeFlag flag,
3493 const CallWrapper& call_wrapper,
3494 CallKind call_kind) {
3495 // You can't call a function without a valid frame.
3496 ASSERT(flag == JUMP_FUNCTION || has_frame());
3497
3498 Label done;
3499 bool definitely_mismatches = false;
3500 InvokePrologue(expected,
3501 actual,
3502 Handle<Code>::null(),
3503 code,
3504 &done,
3505 &definitely_mismatches,
3506 flag,
3507 Label::kNear,
3508 call_wrapper,
3509 call_kind);
3510 if (!definitely_mismatches) {
3511 if (flag == CALL_FUNCTION) {
3512 call_wrapper.BeforeCall(CallSize(code));
3513 SetCallKind(rcx, call_kind);
3514 call(code);
3515 call_wrapper.AfterCall();
3516 } else {
3517 ASSERT(flag == JUMP_FUNCTION);
3518 SetCallKind(rcx, call_kind);
3519 jmp(code);
3520 }
3521 bind(&done);
3522 }
3523 }
3524
3525
InvokeCode(Handle<Code> code,const ParameterCount & expected,const ParameterCount & actual,RelocInfo::Mode rmode,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)3526 void MacroAssembler::InvokeCode(Handle<Code> code,
3527 const ParameterCount& expected,
3528 const ParameterCount& actual,
3529 RelocInfo::Mode rmode,
3530 InvokeFlag flag,
3531 const CallWrapper& call_wrapper,
3532 CallKind call_kind) {
3533 // You can't call a function without a valid frame.
3534 ASSERT(flag == JUMP_FUNCTION || has_frame());
3535
3536 Label done;
3537 bool definitely_mismatches = false;
3538 Register dummy = rax;
3539 InvokePrologue(expected,
3540 actual,
3541 code,
3542 dummy,
3543 &done,
3544 &definitely_mismatches,
3545 flag,
3546 Label::kNear,
3547 call_wrapper,
3548 call_kind);
3549 if (!definitely_mismatches) {
3550 if (flag == CALL_FUNCTION) {
3551 call_wrapper.BeforeCall(CallSize(code));
3552 SetCallKind(rcx, call_kind);
3553 Call(code, rmode);
3554 call_wrapper.AfterCall();
3555 } else {
3556 ASSERT(flag == JUMP_FUNCTION);
3557 SetCallKind(rcx, call_kind);
3558 Jump(code, rmode);
3559 }
3560 bind(&done);
3561 }
3562 }
3563
3564
InvokeFunction(Register function,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)3565 void MacroAssembler::InvokeFunction(Register function,
3566 const ParameterCount& actual,
3567 InvokeFlag flag,
3568 const CallWrapper& call_wrapper,
3569 CallKind call_kind) {
3570 // You can't call a function without a valid frame.
3571 ASSERT(flag == JUMP_FUNCTION || has_frame());
3572
3573 ASSERT(function.is(rdi));
3574 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3575 movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3576 movsxlq(rbx,
3577 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3578 // Advances rdx to the end of the Code object header, to the start of
3579 // the executable code.
3580 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3581
3582 ParameterCount expected(rbx);
3583 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3584 }
3585
3586
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)3587 void MacroAssembler::InvokeFunction(Register function,
3588 const ParameterCount& expected,
3589 const ParameterCount& actual,
3590 InvokeFlag flag,
3591 const CallWrapper& call_wrapper,
3592 CallKind call_kind) {
3593 // You can't call a function without a valid frame.
3594 ASSERT(flag == JUMP_FUNCTION || has_frame());
3595
3596 ASSERT(function.is(rdi));
3597 movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3598 // Advances rdx to the end of the Code object header, to the start of
3599 // the executable code.
3600 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3601
3602 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3603 }
3604
3605
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)3606 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3607 const ParameterCount& expected,
3608 const ParameterCount& actual,
3609 InvokeFlag flag,
3610 const CallWrapper& call_wrapper,
3611 CallKind call_kind) {
3612 Move(rdi, function);
3613 InvokeFunction(rdi, expected, actual, flag, call_wrapper, call_kind);
3614 }
3615
3616
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Handle<Code> code_constant,Register code_register,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump,const CallWrapper & call_wrapper,CallKind call_kind)3617 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3618 const ParameterCount& actual,
3619 Handle<Code> code_constant,
3620 Register code_register,
3621 Label* done,
3622 bool* definitely_mismatches,
3623 InvokeFlag flag,
3624 Label::Distance near_jump,
3625 const CallWrapper& call_wrapper,
3626 CallKind call_kind) {
3627 bool definitely_matches = false;
3628 *definitely_mismatches = false;
3629 Label invoke;
3630 if (expected.is_immediate()) {
3631 ASSERT(actual.is_immediate());
3632 if (expected.immediate() == actual.immediate()) {
3633 definitely_matches = true;
3634 } else {
3635 Set(rax, actual.immediate());
3636 if (expected.immediate() ==
3637 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3638 // Don't worry about adapting arguments for built-ins that
3639 // don't want that done. Skip adaption code by making it look
3640 // like we have a match between expected and actual number of
3641 // arguments.
3642 definitely_matches = true;
3643 } else {
3644 *definitely_mismatches = true;
3645 Set(rbx, expected.immediate());
3646 }
3647 }
3648 } else {
3649 if (actual.is_immediate()) {
3650 // Expected is in register, actual is immediate. This is the
3651 // case when we invoke function values without going through the
3652 // IC mechanism.
3653 cmpq(expected.reg(), Immediate(actual.immediate()));
3654 j(equal, &invoke, Label::kNear);
3655 ASSERT(expected.reg().is(rbx));
3656 Set(rax, actual.immediate());
3657 } else if (!expected.reg().is(actual.reg())) {
3658 // Both expected and actual are in (different) registers. This
3659 // is the case when we invoke functions using call and apply.
3660 cmpq(expected.reg(), actual.reg());
3661 j(equal, &invoke, Label::kNear);
3662 ASSERT(actual.reg().is(rax));
3663 ASSERT(expected.reg().is(rbx));
3664 }
3665 }
3666
3667 if (!definitely_matches) {
3668 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3669 if (!code_constant.is_null()) {
3670 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3671 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3672 } else if (!code_register.is(rdx)) {
3673 movq(rdx, code_register);
3674 }
3675
3676 if (flag == CALL_FUNCTION) {
3677 call_wrapper.BeforeCall(CallSize(adaptor));
3678 SetCallKind(rcx, call_kind);
3679 Call(adaptor, RelocInfo::CODE_TARGET);
3680 call_wrapper.AfterCall();
3681 if (!*definitely_mismatches) {
3682 jmp(done, near_jump);
3683 }
3684 } else {
3685 SetCallKind(rcx, call_kind);
3686 Jump(adaptor, RelocInfo::CODE_TARGET);
3687 }
3688 bind(&invoke);
3689 }
3690 }
3691
3692
Prologue(PrologueFrameMode frame_mode)3693 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
3694 if (frame_mode == BUILD_STUB_FRAME) {
3695 push(rbp); // Caller's frame pointer.
3696 movq(rbp, rsp);
3697 push(rsi); // Callee's context.
3698 Push(Smi::FromInt(StackFrame::STUB));
3699 } else {
3700 PredictableCodeSizeScope predictible_code_size_scope(this,
3701 kNoCodeAgeSequenceLength);
3702 if (isolate()->IsCodePreAgingActive()) {
3703 // Pre-age the code.
3704 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3705 RelocInfo::CODE_AGE_SEQUENCE);
3706 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3707 } else {
3708 push(rbp); // Caller's frame pointer.
3709 movq(rbp, rsp);
3710 push(rsi); // Callee's context.
3711 push(rdi); // Callee's JS function.
3712 }
3713 }
3714 }
3715
3716
EnterFrame(StackFrame::Type type)3717 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3718 push(rbp);
3719 movq(rbp, rsp);
3720 push(rsi); // Context.
3721 Push(Smi::FromInt(type));
3722 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3723 push(kScratchRegister);
3724 if (emit_debug_code()) {
3725 movq(kScratchRegister,
3726 isolate()->factory()->undefined_value(),
3727 RelocInfo::EMBEDDED_OBJECT);
3728 cmpq(Operand(rsp, 0), kScratchRegister);
3729 Check(not_equal, kCodeObjectNotProperlyPatched);
3730 }
3731 }
3732
3733
LeaveFrame(StackFrame::Type type)3734 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3735 if (emit_debug_code()) {
3736 Move(kScratchRegister, Smi::FromInt(type));
3737 cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3738 Check(equal, kStackFrameTypesMustMatch);
3739 }
3740 movq(rsp, rbp);
3741 pop(rbp);
3742 }
3743
3744
EnterExitFramePrologue(bool save_rax)3745 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3746 // Set up the frame structure on the stack.
3747 // All constants are relative to the frame pointer of the exit frame.
3748 ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
3749 kFPOnStackSize + kPCOnStackSize);
3750 ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3751 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3752 push(rbp);
3753 movq(rbp, rsp);
3754
3755 // Reserve room for entry stack pointer and push the code object.
3756 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3757 push(Immediate(0)); // Saved entry sp, patched before call.
3758 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3759 push(kScratchRegister); // Accessed from EditFrame::code_slot.
3760
3761 // Save the frame pointer and the context in top.
3762 if (save_rax) {
3763 movq(r14, rax); // Backup rax in callee-save register.
3764 }
3765
3766 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3767 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3768 }
3769
3770
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)3771 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3772 bool save_doubles) {
3773 #ifdef _WIN64
3774 const int kShadowSpace = 4;
3775 arg_stack_space += kShadowSpace;
3776 #endif
3777 // Optionally save all XMM registers.
3778 if (save_doubles) {
3779 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3780 arg_stack_space * kPointerSize;
3781 subq(rsp, Immediate(space));
3782 int offset = -2 * kPointerSize;
3783 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3784 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3785 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3786 }
3787 } else if (arg_stack_space > 0) {
3788 subq(rsp, Immediate(arg_stack_space * kPointerSize));
3789 }
3790
3791 // Get the required frame alignment for the OS.
3792 const int kFrameAlignment = OS::ActivationFrameAlignment();
3793 if (kFrameAlignment > 0) {
3794 ASSERT(IsPowerOf2(kFrameAlignment));
3795 ASSERT(is_int8(kFrameAlignment));
3796 and_(rsp, Immediate(-kFrameAlignment));
3797 }
3798
3799 // Patch the saved entry sp.
3800 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3801 }
3802
3803
EnterExitFrame(int arg_stack_space,bool save_doubles)3804 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3805 EnterExitFramePrologue(true);
3806
3807 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3808 // so it must be retained across the C-call.
3809 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3810 lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3811
3812 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3813 }
3814
3815
EnterApiExitFrame(int arg_stack_space)3816 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3817 EnterExitFramePrologue(false);
3818 EnterExitFrameEpilogue(arg_stack_space, false);
3819 }
3820
3821
LeaveExitFrame(bool save_doubles)3822 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3823 // Registers:
3824 // r15 : argv
3825 if (save_doubles) {
3826 int offset = -2 * kPointerSize;
3827 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3828 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3829 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3830 }
3831 }
3832 // Get the return address from the stack and restore the frame pointer.
3833 movq(rcx, Operand(rbp, 1 * kPointerSize));
3834 movq(rbp, Operand(rbp, 0 * kPointerSize));
3835
3836 // Drop everything up to and including the arguments and the receiver
3837 // from the caller stack.
3838 lea(rsp, Operand(r15, 1 * kPointerSize));
3839
3840 PushReturnAddressFrom(rcx);
3841
3842 LeaveExitFrameEpilogue(true);
3843 }
3844
3845
LeaveApiExitFrame(bool restore_context)3846 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3847 movq(rsp, rbp);
3848 pop(rbp);
3849
3850 LeaveExitFrameEpilogue(restore_context);
3851 }
3852
3853
LeaveExitFrameEpilogue(bool restore_context)3854 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3855 // Restore current context from top and clear it in debug mode.
3856 ExternalReference context_address(Isolate::kContextAddress, isolate());
3857 Operand context_operand = ExternalOperand(context_address);
3858 if (restore_context) {
3859 movq(rsi, context_operand);
3860 }
3861 #ifdef DEBUG
3862 movq(context_operand, Immediate(0));
3863 #endif
3864
3865 // Clear the top frame.
3866 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3867 isolate());
3868 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3869 movq(c_entry_fp_operand, Immediate(0));
3870 }
3871
3872
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)3873 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3874 Register scratch,
3875 Label* miss) {
3876 Label same_contexts;
3877
3878 ASSERT(!holder_reg.is(scratch));
3879 ASSERT(!scratch.is(kScratchRegister));
3880 // Load current lexical context from the stack frame.
3881 movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3882
3883 // When generating debug code, make sure the lexical context is set.
3884 if (emit_debug_code()) {
3885 cmpq(scratch, Immediate(0));
3886 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3887 }
3888 // Load the native context of the current context.
3889 int offset =
3890 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3891 movq(scratch, FieldOperand(scratch, offset));
3892 movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3893
3894 // Check the context is a native context.
3895 if (emit_debug_code()) {
3896 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3897 isolate()->factory()->native_context_map());
3898 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3899 }
3900
3901 // Check if both contexts are the same.
3902 cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3903 j(equal, &same_contexts);
3904
3905 // Compare security tokens.
3906 // Check that the security token in the calling global object is
3907 // compatible with the security token in the receiving global
3908 // object.
3909
3910 // Check the context is a native context.
3911 if (emit_debug_code()) {
3912 // Preserve original value of holder_reg.
3913 push(holder_reg);
3914 movq(holder_reg,
3915 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3916 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3917 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3918
3919 // Read the first word and compare to native_context_map(),
3920 movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3921 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3922 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3923 pop(holder_reg);
3924 }
3925
3926 movq(kScratchRegister,
3927 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3928 int token_offset =
3929 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3930 movq(scratch, FieldOperand(scratch, token_offset));
3931 cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3932 j(not_equal, miss);
3933
3934 bind(&same_contexts);
3935 }
3936
3937
3938 // Compute the hash code from the untagged key. This must be kept in sync with
3939 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
3940 // code-stub-hydrogen.cc
GetNumberHash(Register r0,Register scratch)3941 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3942 // First of all we assign the hash seed to scratch.
3943 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3944 SmiToInteger32(scratch, scratch);
3945
3946 // Xor original key with a seed.
3947 xorl(r0, scratch);
3948
3949 // Compute the hash code from the untagged key. This must be kept in sync
3950 // with ComputeIntegerHash in utils.h.
3951 //
3952 // hash = ~hash + (hash << 15);
3953 movl(scratch, r0);
3954 notl(r0);
3955 shll(scratch, Immediate(15));
3956 addl(r0, scratch);
3957 // hash = hash ^ (hash >> 12);
3958 movl(scratch, r0);
3959 shrl(scratch, Immediate(12));
3960 xorl(r0, scratch);
3961 // hash = hash + (hash << 2);
3962 leal(r0, Operand(r0, r0, times_4, 0));
3963 // hash = hash ^ (hash >> 4);
3964 movl(scratch, r0);
3965 shrl(scratch, Immediate(4));
3966 xorl(r0, scratch);
3967 // hash = hash * 2057;
3968 imull(r0, r0, Immediate(2057));
3969 // hash = hash ^ (hash >> 16);
3970 movl(scratch, r0);
3971 shrl(scratch, Immediate(16));
3972 xorl(r0, scratch);
3973 }
3974
3975
3976
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register r0,Register r1,Register r2,Register result)3977 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3978 Register elements,
3979 Register key,
3980 Register r0,
3981 Register r1,
3982 Register r2,
3983 Register result) {
3984 // Register use:
3985 //
3986 // elements - holds the slow-case elements of the receiver on entry.
3987 // Unchanged unless 'result' is the same register.
3988 //
3989 // key - holds the smi key on entry.
3990 // Unchanged unless 'result' is the same register.
3991 //
3992 // Scratch registers:
3993 //
3994 // r0 - holds the untagged key on entry and holds the hash once computed.
3995 //
3996 // r1 - used to hold the capacity mask of the dictionary
3997 //
3998 // r2 - used for the index into the dictionary.
3999 //
4000 // result - holds the result on exit if the load succeeded.
4001 // Allowed to be the same as 'key' or 'result'.
4002 // Unchanged on bailout so 'key' or 'result' can be used
4003 // in further computation.
4004
4005 Label done;
4006
4007 GetNumberHash(r0, r1);
4008
4009 // Compute capacity mask.
4010 SmiToInteger32(r1, FieldOperand(elements,
4011 SeededNumberDictionary::kCapacityOffset));
4012 decl(r1);
4013
4014 // Generate an unrolled loop that performs a few probes before giving up.
4015 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4016 // Use r2 for index calculations and keep the hash intact in r0.
4017 movq(r2, r0);
4018 // Compute the masked index: (hash + i + i * i) & mask.
4019 if (i > 0) {
4020 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4021 }
4022 and_(r2, r1);
4023
4024 // Scale the index by multiplying by the entry size.
4025 ASSERT(SeededNumberDictionary::kEntrySize == 3);
4026 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4027
4028 // Check if the key matches.
4029 cmpq(key, FieldOperand(elements,
4030 r2,
4031 times_pointer_size,
4032 SeededNumberDictionary::kElementsStartOffset));
4033 if (i != (kNumberDictionaryProbes - 1)) {
4034 j(equal, &done);
4035 } else {
4036 j(not_equal, miss);
4037 }
4038 }
4039
4040 bind(&done);
4041 // Check that the value is a normal propety.
4042 const int kDetailsOffset =
4043 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4044 ASSERT_EQ(NORMAL, 0);
4045 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4046 Smi::FromInt(PropertyDetails::TypeField::kMask));
4047 j(not_zero, miss);
4048
4049 // Get the value at the masked, scaled index.
4050 const int kValueOffset =
4051 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4052 movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4053 }
4054
4055
LoadAllocationTopHelper(Register result,Register scratch,AllocationFlags flags)4056 void MacroAssembler::LoadAllocationTopHelper(Register result,
4057 Register scratch,
4058 AllocationFlags flags) {
4059 ExternalReference allocation_top =
4060 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4061
4062 // Just return if allocation top is already known.
4063 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4064 // No use of scratch if allocation top is provided.
4065 ASSERT(!scratch.is_valid());
4066 #ifdef DEBUG
4067 // Assert that result actually contains top on entry.
4068 Operand top_operand = ExternalOperand(allocation_top);
4069 cmpq(result, top_operand);
4070 Check(equal, kUnexpectedAllocationTop);
4071 #endif
4072 return;
4073 }
4074
4075 // Move address of new object to result. Use scratch register if available,
4076 // and keep address in scratch until call to UpdateAllocationTopHelper.
4077 if (scratch.is_valid()) {
4078 LoadAddress(scratch, allocation_top);
4079 movq(result, Operand(scratch, 0));
4080 } else {
4081 Load(result, allocation_top);
4082 }
4083 }
4084
4085
UpdateAllocationTopHelper(Register result_end,Register scratch,AllocationFlags flags)4086 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4087 Register scratch,
4088 AllocationFlags flags) {
4089 if (emit_debug_code()) {
4090 testq(result_end, Immediate(kObjectAlignmentMask));
4091 Check(zero, kUnalignedAllocationInNewSpace);
4092 }
4093
4094 ExternalReference allocation_top =
4095 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4096
4097 // Update new top.
4098 if (scratch.is_valid()) {
4099 // Scratch already contains address of allocation top.
4100 movq(Operand(scratch, 0), result_end);
4101 } else {
4102 Store(allocation_top, result_end);
4103 }
4104 }
4105
4106
Allocate(int object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4107 void MacroAssembler::Allocate(int object_size,
4108 Register result,
4109 Register result_end,
4110 Register scratch,
4111 Label* gc_required,
4112 AllocationFlags flags) {
4113 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4114 ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
4115 if (!FLAG_inline_new) {
4116 if (emit_debug_code()) {
4117 // Trash the registers to simulate an allocation failure.
4118 movl(result, Immediate(0x7091));
4119 if (result_end.is_valid()) {
4120 movl(result_end, Immediate(0x7191));
4121 }
4122 if (scratch.is_valid()) {
4123 movl(scratch, Immediate(0x7291));
4124 }
4125 }
4126 jmp(gc_required);
4127 return;
4128 }
4129 ASSERT(!result.is(result_end));
4130
4131 // Load address of new object into result.
4132 LoadAllocationTopHelper(result, scratch, flags);
4133
4134 // Align the next allocation. Storing the filler map without checking top is
4135 // safe in new-space because the limit of the heap is aligned there.
4136 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4137 testq(result, Immediate(kDoubleAlignmentMask));
4138 Check(zero, kAllocationIsNotDoubleAligned);
4139 }
4140
4141 // Calculate new top and bail out if new space is exhausted.
4142 ExternalReference allocation_limit =
4143 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4144
4145 Register top_reg = result_end.is_valid() ? result_end : result;
4146
4147 if (!top_reg.is(result)) {
4148 movq(top_reg, result);
4149 }
4150 addq(top_reg, Immediate(object_size));
4151 j(carry, gc_required);
4152 Operand limit_operand = ExternalOperand(allocation_limit);
4153 cmpq(top_reg, limit_operand);
4154 j(above, gc_required);
4155
4156 // Update allocation top.
4157 UpdateAllocationTopHelper(top_reg, scratch, flags);
4158
4159 bool tag_result = (flags & TAG_OBJECT) != 0;
4160 if (top_reg.is(result)) {
4161 if (tag_result) {
4162 subq(result, Immediate(object_size - kHeapObjectTag));
4163 } else {
4164 subq(result, Immediate(object_size));
4165 }
4166 } else if (tag_result) {
4167 // Tag the result if requested.
4168 ASSERT(kHeapObjectTag == 1);
4169 incq(result);
4170 }
4171 }
4172
4173
Allocate(int header_size,ScaleFactor element_size,Register element_count,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4174 void MacroAssembler::Allocate(int header_size,
4175 ScaleFactor element_size,
4176 Register element_count,
4177 Register result,
4178 Register result_end,
4179 Register scratch,
4180 Label* gc_required,
4181 AllocationFlags flags) {
4182 ASSERT((flags & SIZE_IN_WORDS) == 0);
4183 lea(result_end, Operand(element_count, element_size, header_size));
4184 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4185 }
4186
4187
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4188 void MacroAssembler::Allocate(Register object_size,
4189 Register result,
4190 Register result_end,
4191 Register scratch,
4192 Label* gc_required,
4193 AllocationFlags flags) {
4194 ASSERT((flags & SIZE_IN_WORDS) == 0);
4195 if (!FLAG_inline_new) {
4196 if (emit_debug_code()) {
4197 // Trash the registers to simulate an allocation failure.
4198 movl(result, Immediate(0x7091));
4199 movl(result_end, Immediate(0x7191));
4200 if (scratch.is_valid()) {
4201 movl(scratch, Immediate(0x7291));
4202 }
4203 // object_size is left unchanged by this function.
4204 }
4205 jmp(gc_required);
4206 return;
4207 }
4208 ASSERT(!result.is(result_end));
4209
4210 // Load address of new object into result.
4211 LoadAllocationTopHelper(result, scratch, flags);
4212
4213 // Align the next allocation. Storing the filler map without checking top is
4214 // safe in new-space because the limit of the heap is aligned there.
4215 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4216 testq(result, Immediate(kDoubleAlignmentMask));
4217 Check(zero, kAllocationIsNotDoubleAligned);
4218 }
4219
4220 // Calculate new top and bail out if new space is exhausted.
4221 ExternalReference allocation_limit =
4222 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4223 if (!object_size.is(result_end)) {
4224 movq(result_end, object_size);
4225 }
4226 addq(result_end, result);
4227 j(carry, gc_required);
4228 Operand limit_operand = ExternalOperand(allocation_limit);
4229 cmpq(result_end, limit_operand);
4230 j(above, gc_required);
4231
4232 // Update allocation top.
4233 UpdateAllocationTopHelper(result_end, scratch, flags);
4234
4235 // Tag the result if requested.
4236 if ((flags & TAG_OBJECT) != 0) {
4237 addq(result, Immediate(kHeapObjectTag));
4238 }
4239 }
4240
4241
UndoAllocationInNewSpace(Register object)4242 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4243 ExternalReference new_space_allocation_top =
4244 ExternalReference::new_space_allocation_top_address(isolate());
4245
4246 // Make sure the object has no tag before resetting top.
4247 and_(object, Immediate(~kHeapObjectTagMask));
4248 Operand top_operand = ExternalOperand(new_space_allocation_top);
4249 #ifdef DEBUG
4250 cmpq(object, top_operand);
4251 Check(below, kUndoAllocationOfNonAllocatedMemory);
4252 #endif
4253 movq(top_operand, object);
4254 }
4255
4256
AllocateHeapNumber(Register result,Register scratch,Label * gc_required)4257 void MacroAssembler::AllocateHeapNumber(Register result,
4258 Register scratch,
4259 Label* gc_required) {
4260 // Allocate heap number in new space.
4261 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4262
4263 // Set the map.
4264 LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4265 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4266 }
4267
4268
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4269 void MacroAssembler::AllocateTwoByteString(Register result,
4270 Register length,
4271 Register scratch1,
4272 Register scratch2,
4273 Register scratch3,
4274 Label* gc_required) {
4275 // Calculate the number of bytes needed for the characters in the string while
4276 // observing object alignment.
4277 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4278 kObjectAlignmentMask;
4279 ASSERT(kShortSize == 2);
4280 // scratch1 = length * 2 + kObjectAlignmentMask.
4281 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4282 kHeaderAlignment));
4283 and_(scratch1, Immediate(~kObjectAlignmentMask));
4284 if (kHeaderAlignment > 0) {
4285 subq(scratch1, Immediate(kHeaderAlignment));
4286 }
4287
4288 // Allocate two byte string in new space.
4289 Allocate(SeqTwoByteString::kHeaderSize,
4290 times_1,
4291 scratch1,
4292 result,
4293 scratch2,
4294 scratch3,
4295 gc_required,
4296 TAG_OBJECT);
4297
4298 // Set the map, length and hash field.
4299 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4300 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4301 Integer32ToSmi(scratch1, length);
4302 movq(FieldOperand(result, String::kLengthOffset), scratch1);
4303 movq(FieldOperand(result, String::kHashFieldOffset),
4304 Immediate(String::kEmptyHashField));
4305 }
4306
4307
AllocateAsciiString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4308 void MacroAssembler::AllocateAsciiString(Register result,
4309 Register length,
4310 Register scratch1,
4311 Register scratch2,
4312 Register scratch3,
4313 Label* gc_required) {
4314 // Calculate the number of bytes needed for the characters in the string while
4315 // observing object alignment.
4316 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4317 kObjectAlignmentMask;
4318 movl(scratch1, length);
4319 ASSERT(kCharSize == 1);
4320 addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4321 and_(scratch1, Immediate(~kObjectAlignmentMask));
4322 if (kHeaderAlignment > 0) {
4323 subq(scratch1, Immediate(kHeaderAlignment));
4324 }
4325
4326 // Allocate ASCII string in new space.
4327 Allocate(SeqOneByteString::kHeaderSize,
4328 times_1,
4329 scratch1,
4330 result,
4331 scratch2,
4332 scratch3,
4333 gc_required,
4334 TAG_OBJECT);
4335
4336 // Set the map, length and hash field.
4337 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4338 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4339 Integer32ToSmi(scratch1, length);
4340 movq(FieldOperand(result, String::kLengthOffset), scratch1);
4341 movq(FieldOperand(result, String::kHashFieldOffset),
4342 Immediate(String::kEmptyHashField));
4343 }
4344
4345
AllocateTwoByteConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4346 void MacroAssembler::AllocateTwoByteConsString(Register result,
4347 Register scratch1,
4348 Register scratch2,
4349 Label* gc_required) {
4350 // Allocate heap number in new space.
4351 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4352 TAG_OBJECT);
4353
4354 // Set the map. The other fields are left uninitialized.
4355 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4356 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4357 }
4358
4359
AllocateAsciiConsString(Register result,Register scratch1,Register scratch2,Label * gc_required)4360 void MacroAssembler::AllocateAsciiConsString(Register result,
4361 Register scratch1,
4362 Register scratch2,
4363 Label* gc_required) {
4364 Label allocate_new_space, install_map;
4365 AllocationFlags flags = TAG_OBJECT;
4366
4367 ExternalReference high_promotion_mode = ExternalReference::
4368 new_space_high_promotion_mode_active_address(isolate());
4369
4370 Load(scratch1, high_promotion_mode);
4371 testb(scratch1, Immediate(1));
4372 j(zero, &allocate_new_space);
4373 Allocate(ConsString::kSize,
4374 result,
4375 scratch1,
4376 scratch2,
4377 gc_required,
4378 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
4379
4380 jmp(&install_map);
4381
4382 bind(&allocate_new_space);
4383 Allocate(ConsString::kSize,
4384 result,
4385 scratch1,
4386 scratch2,
4387 gc_required,
4388 flags);
4389
4390 bind(&install_map);
4391
4392 // Set the map. The other fields are left uninitialized.
4393 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4394 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4395 }
4396
4397
AllocateTwoByteSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4398 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4399 Register scratch1,
4400 Register scratch2,
4401 Label* gc_required) {
4402 // Allocate heap number in new space.
4403 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4404 TAG_OBJECT);
4405
4406 // Set the map. The other fields are left uninitialized.
4407 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4408 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4409 }
4410
4411
AllocateAsciiSlicedString(Register result,Register scratch1,Register scratch2,Label * gc_required)4412 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4413 Register scratch1,
4414 Register scratch2,
4415 Label* gc_required) {
4416 // Allocate heap number in new space.
4417 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4418 TAG_OBJECT);
4419
4420 // Set the map. The other fields are left uninitialized.
4421 LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4422 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4423 }
4424
4425
4426 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4427 // long or aligned copies. The contents of scratch and length are destroyed.
4428 // Destination is incremented by length, source, length and scratch are
4429 // clobbered.
4430 // A simpler loop is faster on small copies, but slower on large ones.
4431 // The cld() instruction must have been emitted, to set the direction flag(),
4432 // before calling this function.
CopyBytes(Register destination,Register source,Register length,int min_length,Register scratch)4433 void MacroAssembler::CopyBytes(Register destination,
4434 Register source,
4435 Register length,
4436 int min_length,
4437 Register scratch) {
4438 ASSERT(min_length >= 0);
4439 if (emit_debug_code()) {
4440 cmpl(length, Immediate(min_length));
4441 Assert(greater_equal, kInvalidMinLength);
4442 }
4443 Label short_loop, len8, len16, len24, done, short_string;
4444
4445 const int kLongStringLimit = 4 * kPointerSize;
4446 if (min_length <= kLongStringLimit) {
4447 cmpl(length, Immediate(kPointerSize));
4448 j(below, &short_string, Label::kNear);
4449 }
4450
4451 ASSERT(source.is(rsi));
4452 ASSERT(destination.is(rdi));
4453 ASSERT(length.is(rcx));
4454
4455 if (min_length <= kLongStringLimit) {
4456 cmpl(length, Immediate(2 * kPointerSize));
4457 j(below_equal, &len8, Label::kNear);
4458 cmpl(length, Immediate(3 * kPointerSize));
4459 j(below_equal, &len16, Label::kNear);
4460 cmpl(length, Immediate(4 * kPointerSize));
4461 j(below_equal, &len24, Label::kNear);
4462 }
4463
4464 // Because source is 8-byte aligned in our uses of this function,
4465 // we keep source aligned for the rep movs operation by copying the odd bytes
4466 // at the end of the ranges.
4467 movq(scratch, length);
4468 shrl(length, Immediate(kPointerSizeLog2));
4469 repmovsq();
4470 // Move remaining bytes of length.
4471 andl(scratch, Immediate(kPointerSize - 1));
4472 movq(length, Operand(source, scratch, times_1, -kPointerSize));
4473 movq(Operand(destination, scratch, times_1, -kPointerSize), length);
4474 addq(destination, scratch);
4475
4476 if (min_length <= kLongStringLimit) {
4477 jmp(&done, Label::kNear);
4478 bind(&len24);
4479 movq(scratch, Operand(source, 2 * kPointerSize));
4480 movq(Operand(destination, 2 * kPointerSize), scratch);
4481 bind(&len16);
4482 movq(scratch, Operand(source, kPointerSize));
4483 movq(Operand(destination, kPointerSize), scratch);
4484 bind(&len8);
4485 movq(scratch, Operand(source, 0));
4486 movq(Operand(destination, 0), scratch);
4487 // Move remaining bytes of length.
4488 movq(scratch, Operand(source, length, times_1, -kPointerSize));
4489 movq(Operand(destination, length, times_1, -kPointerSize), scratch);
4490 addq(destination, length);
4491 jmp(&done, Label::kNear);
4492
4493 bind(&short_string);
4494 if (min_length == 0) {
4495 testl(length, length);
4496 j(zero, &done, Label::kNear);
4497 }
4498
4499 bind(&short_loop);
4500 movb(scratch, Operand(source, 0));
4501 movb(Operand(destination, 0), scratch);
4502 incq(source);
4503 incq(destination);
4504 decl(length);
4505 j(not_zero, &short_loop);
4506 }
4507
4508 bind(&done);
4509 }
4510
4511
InitializeFieldsWithFiller(Register start_offset,Register end_offset,Register filler)4512 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4513 Register end_offset,
4514 Register filler) {
4515 Label loop, entry;
4516 jmp(&entry);
4517 bind(&loop);
4518 movq(Operand(start_offset, 0), filler);
4519 addq(start_offset, Immediate(kPointerSize));
4520 bind(&entry);
4521 cmpq(start_offset, end_offset);
4522 j(less, &loop);
4523 }
4524
4525
LoadContext(Register dst,int context_chain_length)4526 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4527 if (context_chain_length > 0) {
4528 // Move up the chain of contexts to the context containing the slot.
4529 movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4530 for (int i = 1; i < context_chain_length; i++) {
4531 movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4532 }
4533 } else {
4534 // Slot is in the current function context. Move it into the
4535 // destination register in case we store into it (the write barrier
4536 // cannot be allowed to destroy the context in rsi).
4537 movq(dst, rsi);
4538 }
4539
4540 // We should not have found a with context by walking the context
4541 // chain (i.e., the static scope chain and runtime context chain do
4542 // not agree). A variable occurring in such a scope should have
4543 // slot type LOOKUP and not CONTEXT.
4544 if (emit_debug_code()) {
4545 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4546 Heap::kWithContextMapRootIndex);
4547 Check(not_equal, kVariableResolvedToWithContext);
4548 }
4549 }
4550
4551
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)4552 void MacroAssembler::LoadTransitionedArrayMapConditional(
4553 ElementsKind expected_kind,
4554 ElementsKind transitioned_kind,
4555 Register map_in_out,
4556 Register scratch,
4557 Label* no_map_match) {
4558 // Load the global or builtins object from the current context.
4559 movq(scratch,
4560 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4561 movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4562
4563 // Check that the function's map is the same as the expected cached map.
4564 movq(scratch, Operand(scratch,
4565 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4566
4567 int offset = expected_kind * kPointerSize +
4568 FixedArrayBase::kHeaderSize;
4569 cmpq(map_in_out, FieldOperand(scratch, offset));
4570 j(not_equal, no_map_match);
4571
4572 // Use the transitioned cached map.
4573 offset = transitioned_kind * kPointerSize +
4574 FixedArrayBase::kHeaderSize;
4575 movq(map_in_out, FieldOperand(scratch, offset));
4576 }
4577
4578
LoadInitialArrayMap(Register function_in,Register scratch,Register map_out,bool can_have_holes)4579 void MacroAssembler::LoadInitialArrayMap(
4580 Register function_in, Register scratch,
4581 Register map_out, bool can_have_holes) {
4582 ASSERT(!function_in.is(map_out));
4583 Label done;
4584 movq(map_out, FieldOperand(function_in,
4585 JSFunction::kPrototypeOrInitialMapOffset));
4586 if (!FLAG_smi_only_arrays) {
4587 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4588 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4589 kind,
4590 map_out,
4591 scratch,
4592 &done);
4593 } else if (can_have_holes) {
4594 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4595 FAST_HOLEY_SMI_ELEMENTS,
4596 map_out,
4597 scratch,
4598 &done);
4599 }
4600 bind(&done);
4601 }
4602
4603 #ifdef _WIN64
4604 static const int kRegisterPassedArguments = 4;
4605 #else
4606 static const int kRegisterPassedArguments = 6;
4607 #endif
4608
LoadGlobalFunction(int index,Register function)4609 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4610 // Load the global or builtins object from the current context.
4611 movq(function,
4612 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4613 // Load the native context from the global or builtins object.
4614 movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4615 // Load the function from the native context.
4616 movq(function, Operand(function, Context::SlotOffset(index)));
4617 }
4618
4619
LoadArrayFunction(Register function)4620 void MacroAssembler::LoadArrayFunction(Register function) {
4621 movq(function,
4622 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4623 movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
4624 movq(function,
4625 Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4626 }
4627
4628
LoadGlobalFunctionInitialMap(Register function,Register map)4629 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4630 Register map) {
4631 // Load the initial map. The global functions all have initial maps.
4632 movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4633 if (emit_debug_code()) {
4634 Label ok, fail;
4635 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4636 jmp(&ok);
4637 bind(&fail);
4638 Abort(kGlobalFunctionsMustHaveInitialMap);
4639 bind(&ok);
4640 }
4641 }
4642
4643
ArgumentStackSlotsForCFunctionCall(int num_arguments)4644 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4645 // On Windows 64 stack slots are reserved by the caller for all arguments
4646 // including the ones passed in registers, and space is always allocated for
4647 // the four register arguments even if the function takes fewer than four
4648 // arguments.
4649 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4650 // and the caller does not reserve stack slots for them.
4651 ASSERT(num_arguments >= 0);
4652 #ifdef _WIN64
4653 const int kMinimumStackSlots = kRegisterPassedArguments;
4654 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4655 return num_arguments;
4656 #else
4657 if (num_arguments < kRegisterPassedArguments) return 0;
4658 return num_arguments - kRegisterPassedArguments;
4659 #endif
4660 }
4661
4662
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)4663 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4664 Register index,
4665 Register value,
4666 uint32_t encoding_mask) {
4667 Label is_object;
4668 JumpIfNotSmi(string, &is_object);
4669 Throw(kNonObject);
4670 bind(&is_object);
4671
4672 push(value);
4673 movq(value, FieldOperand(string, HeapObject::kMapOffset));
4674 movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
4675
4676 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4677 cmpq(value, Immediate(encoding_mask));
4678 pop(value);
4679 ThrowIf(not_equal, kUnexpectedStringType);
4680
4681 // The index is assumed to be untagged coming in, tag it to compare with the
4682 // string length without using a temp register, it is restored at the end of
4683 // this function.
4684 Integer32ToSmi(index, index);
4685 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4686 ThrowIf(greater_equal, kIndexIsTooLarge);
4687
4688 SmiCompare(index, Smi::FromInt(0));
4689 ThrowIf(less, kIndexIsNegative);
4690
4691 // Restore the index
4692 SmiToInteger32(index, index);
4693 }
4694
4695
PrepareCallCFunction(int num_arguments)4696 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4697 int frame_alignment = OS::ActivationFrameAlignment();
4698 ASSERT(frame_alignment != 0);
4699 ASSERT(num_arguments >= 0);
4700
4701 // Make stack end at alignment and allocate space for arguments and old rsp.
4702 movq(kScratchRegister, rsp);
4703 ASSERT(IsPowerOf2(frame_alignment));
4704 int argument_slots_on_stack =
4705 ArgumentStackSlotsForCFunctionCall(num_arguments);
4706 subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
4707 and_(rsp, Immediate(-frame_alignment));
4708 movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
4709 }
4710
4711
CallCFunction(ExternalReference function,int num_arguments)4712 void MacroAssembler::CallCFunction(ExternalReference function,
4713 int num_arguments) {
4714 LoadAddress(rax, function);
4715 CallCFunction(rax, num_arguments);
4716 }
4717
4718
CallCFunction(Register function,int num_arguments)4719 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4720 ASSERT(has_frame());
4721 // Check stack alignment.
4722 if (emit_debug_code()) {
4723 CheckStackAlignment();
4724 }
4725
4726 call(function);
4727 ASSERT(OS::ActivationFrameAlignment() != 0);
4728 ASSERT(num_arguments >= 0);
4729 int argument_slots_on_stack =
4730 ArgumentStackSlotsForCFunctionCall(num_arguments);
4731 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4732 }
4733
4734
AreAliased(Register r1,Register r2,Register r3,Register r4)4735 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4736 if (r1.is(r2)) return true;
4737 if (r1.is(r3)) return true;
4738 if (r1.is(r4)) return true;
4739 if (r2.is(r3)) return true;
4740 if (r2.is(r4)) return true;
4741 if (r3.is(r4)) return true;
4742 return false;
4743 }
4744
4745
CodePatcher(byte * address,int size)4746 CodePatcher::CodePatcher(byte* address, int size)
4747 : address_(address),
4748 size_(size),
4749 masm_(NULL, address, size + Assembler::kGap) {
4750 // Create a new macro assembler pointing to the address of the code to patch.
4751 // The size is adjusted with kGap on order for the assembler to generate size
4752 // bytes of instructions without failing with buffer size constraints.
4753 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4754 }
4755
4756
~CodePatcher()4757 CodePatcher::~CodePatcher() {
4758 // Indicate that code has changed.
4759 CPU::FlushICache(address_, size_);
4760
4761 // Check that the code was patched as expected.
4762 ASSERT(masm_.pc_ == address_ + size_);
4763 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4764 }
4765
4766
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)4767 void MacroAssembler::CheckPageFlag(
4768 Register object,
4769 Register scratch,
4770 int mask,
4771 Condition cc,
4772 Label* condition_met,
4773 Label::Distance condition_met_distance) {
4774 ASSERT(cc == zero || cc == not_zero);
4775 if (scratch.is(object)) {
4776 and_(scratch, Immediate(~Page::kPageAlignmentMask));
4777 } else {
4778 movq(scratch, Immediate(~Page::kPageAlignmentMask));
4779 and_(scratch, object);
4780 }
4781 if (mask < (1 << kBitsPerByte)) {
4782 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4783 Immediate(static_cast<uint8_t>(mask)));
4784 } else {
4785 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4786 }
4787 j(cc, condition_met, condition_met_distance);
4788 }
4789
4790
CheckMapDeprecated(Handle<Map> map,Register scratch,Label * if_deprecated)4791 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4792 Register scratch,
4793 Label* if_deprecated) {
4794 if (map->CanBeDeprecated()) {
4795 Move(scratch, map);
4796 movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
4797 SmiToInteger32(scratch, scratch);
4798 and_(scratch, Immediate(Map::Deprecated::kMask));
4799 j(not_zero, if_deprecated);
4800 }
4801 }
4802
4803
JumpIfBlack(Register object,Register bitmap_scratch,Register mask_scratch,Label * on_black,Label::Distance on_black_distance)4804 void MacroAssembler::JumpIfBlack(Register object,
4805 Register bitmap_scratch,
4806 Register mask_scratch,
4807 Label* on_black,
4808 Label::Distance on_black_distance) {
4809 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4810 GetMarkBits(object, bitmap_scratch, mask_scratch);
4811
4812 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4813 // The mask_scratch register contains a 1 at the position of the first bit
4814 // and a 0 at all other positions, including the position of the second bit.
4815 movq(rcx, mask_scratch);
4816 // Make rcx into a mask that covers both marking bits using the operation
4817 // rcx = mask | (mask << 1).
4818 lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4819 // Note that we are using a 4-byte aligned 8-byte load.
4820 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4821 cmpq(mask_scratch, rcx);
4822 j(equal, on_black, on_black_distance);
4823 }
4824
4825
4826 // Detect some, but not all, common pointer-free objects. This is used by the
4827 // incremental write barrier which doesn't care about oddballs (they are always
4828 // marked black immediately so this code is not hit).
JumpIfDataObject(Register value,Register scratch,Label * not_data_object,Label::Distance not_data_object_distance)4829 void MacroAssembler::JumpIfDataObject(
4830 Register value,
4831 Register scratch,
4832 Label* not_data_object,
4833 Label::Distance not_data_object_distance) {
4834 Label is_data_object;
4835 movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4836 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4837 j(equal, &is_data_object, Label::kNear);
4838 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4839 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4840 // If it's a string and it's not a cons string then it's an object containing
4841 // no GC pointers.
4842 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4843 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4844 j(not_zero, not_data_object, not_data_object_distance);
4845 bind(&is_data_object);
4846 }
4847
4848
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)4849 void MacroAssembler::GetMarkBits(Register addr_reg,
4850 Register bitmap_reg,
4851 Register mask_reg) {
4852 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4853 movq(bitmap_reg, addr_reg);
4854 // Sign extended 32 bit immediate.
4855 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4856 movq(rcx, addr_reg);
4857 int shift =
4858 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4859 shrl(rcx, Immediate(shift));
4860 and_(rcx,
4861 Immediate((Page::kPageAlignmentMask >> shift) &
4862 ~(Bitmap::kBytesPerCell - 1)));
4863
4864 addq(bitmap_reg, rcx);
4865 movq(rcx, addr_reg);
4866 shrl(rcx, Immediate(kPointerSizeLog2));
4867 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4868 movl(mask_reg, Immediate(1));
4869 shl_cl(mask_reg);
4870 }
4871
4872
EnsureNotWhite(Register value,Register bitmap_scratch,Register mask_scratch,Label * value_is_white_and_not_data,Label::Distance distance)4873 void MacroAssembler::EnsureNotWhite(
4874 Register value,
4875 Register bitmap_scratch,
4876 Register mask_scratch,
4877 Label* value_is_white_and_not_data,
4878 Label::Distance distance) {
4879 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4880 GetMarkBits(value, bitmap_scratch, mask_scratch);
4881
4882 // If the value is black or grey we don't need to do anything.
4883 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4884 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4885 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4886 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4887
4888 Label done;
4889
4890 // Since both black and grey have a 1 in the first position and white does
4891 // not have a 1 there we only need to check one bit.
4892 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4893 j(not_zero, &done, Label::kNear);
4894
4895 if (emit_debug_code()) {
4896 // Check for impossible bit pattern.
4897 Label ok;
4898 push(mask_scratch);
4899 // shl. May overflow making the check conservative.
4900 addq(mask_scratch, mask_scratch);
4901 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4902 j(zero, &ok, Label::kNear);
4903 int3();
4904 bind(&ok);
4905 pop(mask_scratch);
4906 }
4907
4908 // Value is white. We check whether it is data that doesn't need scanning.
4909 // Currently only checks for HeapNumber and non-cons strings.
4910 Register map = rcx; // Holds map while checking type.
4911 Register length = rcx; // Holds length of object after checking type.
4912 Label not_heap_number;
4913 Label is_data_object;
4914
4915 // Check for heap-number
4916 movq(map, FieldOperand(value, HeapObject::kMapOffset));
4917 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4918 j(not_equal, ¬_heap_number, Label::kNear);
4919 movq(length, Immediate(HeapNumber::kSize));
4920 jmp(&is_data_object, Label::kNear);
4921
4922 bind(¬_heap_number);
4923 // Check for strings.
4924 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4925 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4926 // If it's a string and it's not a cons string then it's an object containing
4927 // no GC pointers.
4928 Register instance_type = rcx;
4929 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4930 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4931 j(not_zero, value_is_white_and_not_data);
4932 // It's a non-indirect (non-cons and non-slice) string.
4933 // If it's external, the length is just ExternalString::kSize.
4934 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4935 Label not_external;
4936 // External strings are the only ones with the kExternalStringTag bit
4937 // set.
4938 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4939 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4940 testb(instance_type, Immediate(kExternalStringTag));
4941 j(zero, ¬_external, Label::kNear);
4942 movq(length, Immediate(ExternalString::kSize));
4943 jmp(&is_data_object, Label::kNear);
4944
4945 bind(¬_external);
4946 // Sequential string, either ASCII or UC16.
4947 ASSERT(kOneByteStringTag == 0x04);
4948 and_(length, Immediate(kStringEncodingMask));
4949 xor_(length, Immediate(kStringEncodingMask));
4950 addq(length, Immediate(0x04));
4951 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4952 imul(length, FieldOperand(value, String::kLengthOffset));
4953 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4954 addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4955 and_(length, Immediate(~kObjectAlignmentMask));
4956
4957 bind(&is_data_object);
4958 // Value is a data object, and it is white. Mark it black. Since we know
4959 // that the object is white we can make it black by flipping one bit.
4960 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4961
4962 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4963 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4964
4965 bind(&done);
4966 }
4967
4968
CheckEnumCache(Register null_value,Label * call_runtime)4969 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4970 Label next, start;
4971 Register empty_fixed_array_value = r8;
4972 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4973 movq(rcx, rax);
4974
4975 // Check if the enum length field is properly initialized, indicating that
4976 // there is an enum cache.
4977 movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4978
4979 EnumLength(rdx, rbx);
4980 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
4981 j(equal, call_runtime);
4982
4983 jmp(&start);
4984
4985 bind(&next);
4986
4987 movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4988
4989 // For all objects but the receiver, check that the cache is empty.
4990 EnumLength(rdx, rbx);
4991 Cmp(rdx, Smi::FromInt(0));
4992 j(not_equal, call_runtime);
4993
4994 bind(&start);
4995
4996 // Check that there are no elements. Register rcx contains the current JS
4997 // object we've reached through the prototype chain.
4998 cmpq(empty_fixed_array_value,
4999 FieldOperand(rcx, JSObject::kElementsOffset));
5000 j(not_equal, call_runtime);
5001
5002 movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5003 cmpq(rcx, null_value);
5004 j(not_equal, &next);
5005 }
5006
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)5007 void MacroAssembler::TestJSArrayForAllocationMemento(
5008 Register receiver_reg,
5009 Register scratch_reg,
5010 Label* no_memento_found) {
5011 ExternalReference new_space_start =
5012 ExternalReference::new_space_start(isolate());
5013 ExternalReference new_space_allocation_top =
5014 ExternalReference::new_space_allocation_top_address(isolate());
5015
5016 lea(scratch_reg, Operand(receiver_reg,
5017 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5018 Move(kScratchRegister, new_space_start);
5019 cmpq(scratch_reg, kScratchRegister);
5020 j(less, no_memento_found);
5021 cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
5022 j(greater, no_memento_found);
5023 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5024 Heap::kAllocationMementoMapRootIndex);
5025 }
5026
5027
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)5028 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5029 Register object,
5030 Register scratch0,
5031 Register scratch1,
5032 Label* found) {
5033 ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5034 ASSERT(!scratch1.is(scratch0));
5035 Register current = scratch0;
5036 Label loop_again;
5037
5038 movq(current, object);
5039
5040 // Loop based on the map going up the prototype chain.
5041 bind(&loop_again);
5042 movq(current, FieldOperand(current, HeapObject::kMapOffset));
5043 movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
5044 and_(scratch1, Immediate(Map::kElementsKindMask));
5045 shr(scratch1, Immediate(Map::kElementsKindShift));
5046 cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
5047 j(equal, found);
5048 movq(current, FieldOperand(current, Map::kPrototypeOffset));
5049 CompareRoot(current, Heap::kNullValueRootIndex);
5050 j(not_equal, &loop_again);
5051 }
5052
5053
5054 } } // namespace v8::internal
5055
5056 #endif // V8_TARGET_ARCH_X64
5057