• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/bootstrapper.h"
11 #include "src/callable.h"
12 #include "src/code-factory.h"
13 #include "src/code-stubs.h"
14 #include "src/counters.h"
15 #include "src/debug/debug.h"
16 #include "src/external-reference-table.h"
17 #include "src/frames-inl.h"
18 #include "src/globals.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/instruction-stream.h"
21 #include "src/objects-inl.h"
22 #include "src/register-configuration.h"
23 #include "src/snapshot/snapshot.h"
24 #include "src/x64/assembler-x64.h"
25 
26 #include "src/x64/macro-assembler-x64.h"  // Cannot be the first include.
27 
28 namespace v8 {
29 namespace internal {
30 
GetArgumentOperand(int index)31 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
32   DCHECK_GE(index, 0);
33   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
34   int displacement_to_last_argument =
35       base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
36   displacement_to_last_argument += extra_displacement_to_last_argument_;
37   if (argument_count_reg_ == no_reg) {
38     // argument[0] is at base_reg_ + displacement_to_last_argument +
39     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
40     DCHECK_GT(argument_count_immediate_ + receiver, 0);
41     return Operand(
42         base_reg_,
43         displacement_to_last_argument +
44             (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
45   } else {
46     // argument[0] is at base_reg_ + displacement_to_last_argument +
47     // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
48     return Operand(
49         base_reg_, argument_count_reg_, times_pointer_size,
50         displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
51   }
52 }
53 
StackArgumentsAccessor(Register base_reg,const ParameterCount & parameter_count,StackArgumentsAccessorReceiverMode receiver_mode,int extra_displacement_to_last_argument)54 StackArgumentsAccessor::StackArgumentsAccessor(
55     Register base_reg, const ParameterCount& parameter_count,
56     StackArgumentsAccessorReceiverMode receiver_mode,
57     int extra_displacement_to_last_argument)
58     : base_reg_(base_reg),
59       argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg()
60                                                    : no_reg),
61       argument_count_immediate_(
62           parameter_count.is_immediate() ? parameter_count.immediate() : 0),
63       receiver_mode_(receiver_mode),
64       extra_displacement_to_last_argument_(
65           extra_displacement_to_last_argument) {}
66 
MacroAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int size,CodeObjectRequired create_code_object)67 MacroAssembler::MacroAssembler(Isolate* isolate,
68                                const AssemblerOptions& options, void* buffer,
69                                int size, CodeObjectRequired create_code_object)
70     : TurboAssembler(isolate, options, buffer, size, create_code_object) {
71   if (create_code_object == CodeObjectRequired::kYes) {
72     // Unlike TurboAssembler, which can be used off the main thread and may not
73     // allocate, macro assembler creates its own copy of the self-reference
74     // marker in order to disambiguate between self-references during nested
75     // code generation (e.g.: codegen of the current object triggers stub
76     // compilation through CodeStub::GetCode()).
77     code_object_ = Handle<HeapObject>::New(
78         *isolate->factory()->NewSelfReferenceMarker(), isolate);
79   }
80 }
81 
82 static const int64_t kInvalidRootRegisterDelta = -1;
83 
RootRegisterDelta(ExternalReference other)84 int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
85   if (predictable_code_size() &&
86       (other.address() < reinterpret_cast<Address>(isolate()) ||
87        other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
88     return kInvalidRootRegisterDelta;
89   }
90   return RootRegisterOffsetForExternalReference(isolate(), other);
91 }
92 
Load(Register destination,ExternalReference source)93 void MacroAssembler::Load(Register destination, ExternalReference source) {
94   if (root_array_available_ && options().enable_root_array_delta_access) {
95     int64_t delta = RootRegisterDelta(source);
96     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
97       movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
98       return;
99     }
100   }
101   // Safe code.
102   if (FLAG_embedded_builtins) {
103     if (root_array_available_ && options().isolate_independent_code) {
104       IndirectLoadExternalReference(kScratchRegister, source);
105       movp(destination, Operand(kScratchRegister, 0));
106       return;
107     }
108   }
109   if (destination == rax) {
110     load_rax(source);
111   } else {
112     Move(kScratchRegister, source);
113     movp(destination, Operand(kScratchRegister, 0));
114   }
115 }
116 
117 
Store(ExternalReference destination,Register source)118 void MacroAssembler::Store(ExternalReference destination, Register source) {
119   if (root_array_available_ && options().enable_root_array_delta_access) {
120     int64_t delta = RootRegisterDelta(destination);
121     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
122       movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
123       return;
124     }
125   }
126   // Safe code.
127   if (source == rax) {
128     store_rax(destination);
129   } else {
130     Move(kScratchRegister, destination);
131     movp(Operand(kScratchRegister, 0), source);
132   }
133 }
134 
LoadFromConstantsTable(Register destination,int constant_index)135 void TurboAssembler::LoadFromConstantsTable(Register destination,
136                                             int constant_index) {
137   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
138       Heap::kBuiltinsConstantsTableRootIndex));
139   LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
140   movp(destination,
141        FieldOperand(destination,
142                     FixedArray::kHeaderSize + constant_index * kPointerSize));
143 }
144 
LoadRootRegisterOffset(Register destination,intptr_t offset)145 void TurboAssembler::LoadRootRegisterOffset(Register destination,
146                                             intptr_t offset) {
147   DCHECK(is_int32(offset));
148   if (offset == 0) {
149     Move(destination, kRootRegister);
150   } else {
151     leap(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
152   }
153 }
154 
LoadRootRelative(Register destination,int32_t offset)155 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
156   movp(destination, Operand(kRootRegister, offset));
157 }
158 
LoadAddress(Register destination,ExternalReference source)159 void TurboAssembler::LoadAddress(Register destination,
160                                  ExternalReference source) {
161   if (root_array_available_ && options().enable_root_array_delta_access) {
162     int64_t delta = RootRegisterDelta(source);
163     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
164       leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
165       return;
166     }
167   }
168   // Safe code.
169   if (FLAG_embedded_builtins) {
170     if (root_array_available_ && options().isolate_independent_code) {
171       IndirectLoadExternalReference(destination, source);
172       return;
173     }
174   }
175   Move(destination, source);
176 }
177 
ExternalOperand(ExternalReference target,Register scratch)178 Operand TurboAssembler::ExternalOperand(ExternalReference target,
179                                         Register scratch) {
180   if (root_array_available_ && options().enable_root_array_delta_access) {
181     int64_t delta = RootRegisterDelta(target);
182     if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
183       return Operand(kRootRegister, static_cast<int32_t>(delta));
184     }
185   }
186   Move(scratch, target);
187   return Operand(scratch, 0);
188 }
189 
PushAddress(ExternalReference source)190 void MacroAssembler::PushAddress(ExternalReference source) {
191   LoadAddress(kScratchRegister, source);
192   Push(kScratchRegister);
193 }
194 
LoadRoot(Register destination,Heap::RootListIndex index)195 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
196   DCHECK(root_array_available_);
197   movp(destination, Operand(kRootRegister, RootRegisterOffset(index)));
198 }
199 
PushRoot(Heap::RootListIndex index)200 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
201   DCHECK(root_array_available_);
202   Push(Operand(kRootRegister, RootRegisterOffset(index)));
203 }
204 
CompareRoot(Register with,Heap::RootListIndex index)205 void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
206   DCHECK(root_array_available_);
207   cmpp(with, Operand(kRootRegister, RootRegisterOffset(index)));
208 }
209 
CompareRoot(Operand with,Heap::RootListIndex index)210 void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
211   DCHECK(root_array_available_);
212   DCHECK(!with.AddressUsesRegister(kScratchRegister));
213   LoadRoot(kScratchRegister, index);
214   cmpp(with, kScratchRegister);
215 }
216 
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)217 void MacroAssembler::RecordWriteField(Register object, int offset,
218                                       Register value, Register dst,
219                                       SaveFPRegsMode save_fp,
220                                       RememberedSetAction remembered_set_action,
221                                       SmiCheck smi_check) {
222   // First, check if a write barrier is even needed. The tests below
223   // catch stores of Smis.
224   Label done;
225 
226   // Skip barrier if writing a smi.
227   if (smi_check == INLINE_SMI_CHECK) {
228     JumpIfSmi(value, &done);
229   }
230 
231   // Although the object register is tagged, the offset is relative to the start
232   // of the object, so so offset must be a multiple of kPointerSize.
233   DCHECK(IsAligned(offset, kPointerSize));
234 
235   leap(dst, FieldOperand(object, offset));
236   if (emit_debug_code()) {
237     Label ok;
238     testb(dst, Immediate(kPointerSize - 1));
239     j(zero, &ok, Label::kNear);
240     int3();
241     bind(&ok);
242   }
243 
244   RecordWrite(object, dst, value, save_fp, remembered_set_action,
245               OMIT_SMI_CHECK);
246 
247   bind(&done);
248 
249   // Clobber clobbered input registers when running with the debug-code flag
250   // turned on to provoke errors.
251   if (emit_debug_code()) {
252     Move(value, kZapValue, RelocInfo::NONE);
253     Move(dst, kZapValue, RelocInfo::NONE);
254   }
255 }
256 
SaveRegisters(RegList registers)257 void TurboAssembler::SaveRegisters(RegList registers) {
258   DCHECK_GT(NumRegs(registers), 0);
259   for (int i = 0; i < Register::kNumRegisters; ++i) {
260     if ((registers >> i) & 1u) {
261       pushq(Register::from_code(i));
262     }
263   }
264 }
265 
RestoreRegisters(RegList registers)266 void TurboAssembler::RestoreRegisters(RegList registers) {
267   DCHECK_GT(NumRegs(registers), 0);
268   for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
269     if ((registers >> i) & 1u) {
270       popq(Register::from_code(i));
271     }
272   }
273 }
274 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)275 void TurboAssembler::CallRecordWriteStub(
276     Register object, Register address,
277     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
278   Callable const callable =
279       Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
280   RegList registers = callable.descriptor().allocatable_registers();
281 
282   SaveRegisters(registers);
283 
284   Register object_parameter(callable.descriptor().GetRegisterParameter(
285       RecordWriteDescriptor::kObject));
286   Register slot_parameter(
287       callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
288   Register isolate_parameter(callable.descriptor().GetRegisterParameter(
289       RecordWriteDescriptor::kIsolate));
290   Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
291       RecordWriteDescriptor::kRememberedSet));
292   Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
293       RecordWriteDescriptor::kFPMode));
294 
295   // Prepare argument registers for calling RecordWrite
296   // slot_parameter   <= address
297   // object_parameter <= object
298   if (slot_parameter != object) {
299     // Normal case
300     Move(slot_parameter, address);
301     Move(object_parameter, object);
302   } else if (object_parameter != address) {
303     // Only slot_parameter and object are the same register
304     // object_parameter <= object
305     // slot_parameter   <= address
306     Move(object_parameter, object);
307     Move(slot_parameter, address);
308   } else {
309     // slot_parameter   \/ address
310     // object_parameter /\ object
311     xchgq(slot_parameter, object_parameter);
312   }
313 
314   LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate()));
315 
316   Smi* smi_rsa = Smi::FromEnum(remembered_set_action);
317   Smi* smi_fm = Smi::FromEnum(fp_mode);
318   Move(remembered_set_parameter, smi_rsa);
319   if (smi_rsa != smi_fm) {
320     Move(fp_mode_parameter, smi_fm);
321   } else {
322     movq(fp_mode_parameter, remembered_set_parameter);
323   }
324   Call(callable.code(), RelocInfo::CODE_TARGET);
325 
326   RestoreRegisters(registers);
327 }
328 
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)329 void MacroAssembler::RecordWrite(Register object, Register address,
330                                  Register value, SaveFPRegsMode fp_mode,
331                                  RememberedSetAction remembered_set_action,
332                                  SmiCheck smi_check) {
333   DCHECK(object != value);
334   DCHECK(object != address);
335   DCHECK(value != address);
336   AssertNotSmi(object);
337 
338   if (remembered_set_action == OMIT_REMEMBERED_SET &&
339       !FLAG_incremental_marking) {
340     return;
341   }
342 
343   if (emit_debug_code()) {
344     Label ok;
345     cmpp(value, Operand(address, 0));
346     j(equal, &ok, Label::kNear);
347     int3();
348     bind(&ok);
349   }
350 
351   // First, check if a write barrier is even needed. The tests below
352   // catch stores of smis and stores into the young generation.
353   Label done;
354 
355   if (smi_check == INLINE_SMI_CHECK) {
356     // Skip barrier if writing a smi.
357     JumpIfSmi(value, &done);
358   }
359 
360   CheckPageFlag(value,
361                 value,  // Used as scratch.
362                 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
363                 Label::kNear);
364 
365   CheckPageFlag(object,
366                 value,  // Used as scratch.
367                 MemoryChunk::kPointersFromHereAreInterestingMask,
368                 zero,
369                 &done,
370                 Label::kNear);
371 
372   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
373 
374   bind(&done);
375 
376   // Count number of write barriers in generated code.
377   isolate()->counters()->write_barriers_static()->Increment();
378   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
379 
380   // Clobber clobbered registers when running with the debug-code flag
381   // turned on to provoke errors.
382   if (emit_debug_code()) {
383     Move(address, kZapValue, RelocInfo::NONE);
384     Move(value, kZapValue, RelocInfo::NONE);
385   }
386 }
387 
Assert(Condition cc,AbortReason reason)388 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
389   if (emit_debug_code()) Check(cc, reason);
390 }
391 
AssertUnreachable(AbortReason reason)392 void TurboAssembler::AssertUnreachable(AbortReason reason) {
393   if (emit_debug_code()) Abort(reason);
394 }
395 
Check(Condition cc,AbortReason reason)396 void TurboAssembler::Check(Condition cc, AbortReason reason) {
397   Label L;
398   j(cc, &L, Label::kNear);
399   Abort(reason);
400   // Control will not return here.
401   bind(&L);
402 }
403 
CheckStackAlignment()404 void TurboAssembler::CheckStackAlignment() {
405   int frame_alignment = base::OS::ActivationFrameAlignment();
406   int frame_alignment_mask = frame_alignment - 1;
407   if (frame_alignment > kPointerSize) {
408     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
409     Label alignment_as_expected;
410     testp(rsp, Immediate(frame_alignment_mask));
411     j(zero, &alignment_as_expected, Label::kNear);
412     // Abort if stack is not aligned.
413     int3();
414     bind(&alignment_as_expected);
415   }
416 }
417 
Abort(AbortReason reason)418 void TurboAssembler::Abort(AbortReason reason) {
419 #ifdef DEBUG
420   const char* msg = GetAbortReason(reason);
421   RecordComment("Abort message: ");
422   RecordComment(msg);
423 #endif
424 
425   // Avoid emitting call to builtin if requested.
426   if (trap_on_abort()) {
427     int3();
428     return;
429   }
430 
431   if (should_abort_hard()) {
432     // We don't care if we constructed a frame. Just pretend we did.
433     FrameScope assume_frame(this, StackFrame::NONE);
434     movl(arg_reg_1, Immediate(static_cast<int>(reason)));
435     PrepareCallCFunction(1);
436     LoadAddress(rax, ExternalReference::abort_with_reason());
437     call(rax);
438     return;
439   }
440 
441   Move(rdx, Smi::FromInt(static_cast<int>(reason)));
442 
443   if (!has_frame()) {
444     // We don't actually want to generate a pile of code for this, so just
445     // claim there is a stack frame, without generating one.
446     FrameScope scope(this, StackFrame::NONE);
447     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
448   } else {
449     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
450   }
451   // Control will not return here.
452   int3();
453 }
454 
CallStubDelayed(CodeStub * stub)455 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
456   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
457   call(stub);
458 }
459 
CallStub(CodeStub * stub)460 void MacroAssembler::CallStub(CodeStub* stub) {
461   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
462   Call(stub->GetCode(), RelocInfo::CODE_TARGET);
463 }
464 
465 
TailCallStub(CodeStub * stub)466 void MacroAssembler::TailCallStub(CodeStub* stub) {
467   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
468 }
469 
AllowThisStubCall(CodeStub * stub)470 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
471   return has_frame() || !stub->SometimesSetsUpAFrame();
472 }
473 
CallRuntimeWithCEntry(Runtime::FunctionId fid,Register centry)474 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
475                                            Register centry) {
476   const Runtime::Function* f = Runtime::FunctionForId(fid);
477   // TODO(1236192): Most runtime routines don't need the number of
478   // arguments passed in because it is constant. At some point we
479   // should remove this need and make the runtime routine entry code
480   // smarter.
481   Set(rax, f->nargs);
482   LoadAddress(rbx, ExternalReference::Create(f));
483   DCHECK(!AreAliased(centry, rax, rbx));
484   addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
485   Call(centry);
486 }
487 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)488 void MacroAssembler::CallRuntime(const Runtime::Function* f,
489                                  int num_arguments,
490                                  SaveFPRegsMode save_doubles) {
491   // If the expected number of arguments of the runtime function is
492   // constant, we check that the actual number of arguments match the
493   // expectation.
494   CHECK(f->nargs < 0 || f->nargs == num_arguments);
495 
496   // TODO(1236192): Most runtime routines don't need the number of
497   // arguments passed in because it is constant. At some point we
498   // should remove this need and make the runtime routine entry code
499   // smarter.
500   Set(rax, num_arguments);
501   LoadAddress(rbx, ExternalReference::Create(f));
502   Handle<Code> code =
503       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
504   Call(code, RelocInfo::CODE_TARGET);
505 }
506 
TailCallRuntime(Runtime::FunctionId fid)507 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
508   // ----------- S t a t e -------------
509   //  -- rsp[0]                 : return address
510   //  -- rsp[8]                 : argument num_arguments - 1
511   //  ...
512   //  -- rsp[8 * num_arguments] : argument 0 (receiver)
513   //
514   //  For runtime functions with variable arguments:
515   //  -- rax                    : number of  arguments
516   // -----------------------------------
517 
518   const Runtime::Function* function = Runtime::FunctionForId(fid);
519   DCHECK_EQ(1, function->result_size);
520   if (function->nargs >= 0) {
521     Set(rax, function->nargs);
522   }
523   JumpToExternalReference(ExternalReference::Create(fid));
524 }
525 
JumpToExternalReference(const ExternalReference & ext,bool builtin_exit_frame)526 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
527                                              bool builtin_exit_frame) {
528   // Set the entry point and jump to the C entry runtime stub.
529   LoadAddress(rbx, ext);
530   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
531                                           kArgvOnStack, builtin_exit_frame);
532   Jump(code, RelocInfo::CODE_TARGET);
533 }
534 
535 static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
536                                           rdi, r8,  r9,  r10, r11};
537 
538 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
539 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const540 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
541                                                     Register exclusion1,
542                                                     Register exclusion2,
543                                                     Register exclusion3) const {
544   int bytes = 0;
545   for (int i = 0; i < kNumberOfSavedRegs; i++) {
546     Register reg = saved_regs[i];
547     if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
548       bytes += kPointerSize;
549     }
550   }
551 
552   // R12 to r15 are callee save on all platforms.
553   if (fp_mode == kSaveFPRegs) {
554     bytes += kDoubleSize * XMMRegister::kNumRegisters;
555   }
556 
557   return bytes;
558 }
559 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)560 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
561                                     Register exclusion2, Register exclusion3) {
562   // We don't allow a GC during a store buffer overflow so there is no need to
563   // store the registers in any particular way, but we do have to store and
564   // restore them.
565   int bytes = 0;
566   for (int i = 0; i < kNumberOfSavedRegs; i++) {
567     Register reg = saved_regs[i];
568     if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
569       pushq(reg);
570       bytes += kPointerSize;
571     }
572   }
573 
574   // R12 to r15 are callee save on all platforms.
575   if (fp_mode == kSaveFPRegs) {
576     int delta = kDoubleSize * XMMRegister::kNumRegisters;
577     subp(rsp, Immediate(delta));
578     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
579       XMMRegister reg = XMMRegister::from_code(i);
580       Movsd(Operand(rsp, i * kDoubleSize), reg);
581     }
582     bytes += delta;
583   }
584 
585   return bytes;
586 }
587 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)588 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
589                                    Register exclusion2, Register exclusion3) {
590   int bytes = 0;
591   if (fp_mode == kSaveFPRegs) {
592     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
593       XMMRegister reg = XMMRegister::from_code(i);
594       Movsd(reg, Operand(rsp, i * kDoubleSize));
595     }
596     int delta = kDoubleSize * XMMRegister::kNumRegisters;
597     addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
598     bytes += delta;
599   }
600 
601   for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
602     Register reg = saved_regs[i];
603     if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
604       popq(reg);
605       bytes += kPointerSize;
606     }
607   }
608 
609   return bytes;
610 }
611 
Cvtss2sd(XMMRegister dst,XMMRegister src)612 void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
613   if (CpuFeatures::IsSupported(AVX)) {
614     CpuFeatureScope scope(this, AVX);
615     vcvtss2sd(dst, src, src);
616   } else {
617     cvtss2sd(dst, src);
618   }
619 }
620 
Cvtss2sd(XMMRegister dst,Operand src)621 void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
622   if (CpuFeatures::IsSupported(AVX)) {
623     CpuFeatureScope scope(this, AVX);
624     vcvtss2sd(dst, dst, src);
625   } else {
626     cvtss2sd(dst, src);
627   }
628 }
629 
Cvtsd2ss(XMMRegister dst,XMMRegister src)630 void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
631   if (CpuFeatures::IsSupported(AVX)) {
632     CpuFeatureScope scope(this, AVX);
633     vcvtsd2ss(dst, src, src);
634   } else {
635     cvtsd2ss(dst, src);
636   }
637 }
638 
Cvtsd2ss(XMMRegister dst,Operand src)639 void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
640   if (CpuFeatures::IsSupported(AVX)) {
641     CpuFeatureScope scope(this, AVX);
642     vcvtsd2ss(dst, dst, src);
643   } else {
644     cvtsd2ss(dst, src);
645   }
646 }
647 
Cvtlsi2sd(XMMRegister dst,Register src)648 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
649   if (CpuFeatures::IsSupported(AVX)) {
650     CpuFeatureScope scope(this, AVX);
651     vxorpd(dst, dst, dst);
652     vcvtlsi2sd(dst, dst, src);
653   } else {
654     xorpd(dst, dst);
655     cvtlsi2sd(dst, src);
656   }
657 }
658 
Cvtlsi2sd(XMMRegister dst,Operand src)659 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
660   if (CpuFeatures::IsSupported(AVX)) {
661     CpuFeatureScope scope(this, AVX);
662     vxorpd(dst, dst, dst);
663     vcvtlsi2sd(dst, dst, src);
664   } else {
665     xorpd(dst, dst);
666     cvtlsi2sd(dst, src);
667   }
668 }
669 
Cvtlsi2ss(XMMRegister dst,Register src)670 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
671   if (CpuFeatures::IsSupported(AVX)) {
672     CpuFeatureScope scope(this, AVX);
673     vxorps(dst, dst, dst);
674     vcvtlsi2ss(dst, dst, src);
675   } else {
676     xorps(dst, dst);
677     cvtlsi2ss(dst, src);
678   }
679 }
680 
Cvtlsi2ss(XMMRegister dst,Operand src)681 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
682   if (CpuFeatures::IsSupported(AVX)) {
683     CpuFeatureScope scope(this, AVX);
684     vxorps(dst, dst, dst);
685     vcvtlsi2ss(dst, dst, src);
686   } else {
687     xorps(dst, dst);
688     cvtlsi2ss(dst, src);
689   }
690 }
691 
Cvtqsi2ss(XMMRegister dst,Register src)692 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
693   if (CpuFeatures::IsSupported(AVX)) {
694     CpuFeatureScope scope(this, AVX);
695     vxorps(dst, dst, dst);
696     vcvtqsi2ss(dst, dst, src);
697   } else {
698     xorps(dst, dst);
699     cvtqsi2ss(dst, src);
700   }
701 }
702 
Cvtqsi2ss(XMMRegister dst,Operand src)703 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
704   if (CpuFeatures::IsSupported(AVX)) {
705     CpuFeatureScope scope(this, AVX);
706     vxorps(dst, dst, dst);
707     vcvtqsi2ss(dst, dst, src);
708   } else {
709     xorps(dst, dst);
710     cvtqsi2ss(dst, src);
711   }
712 }
713 
Cvtqsi2sd(XMMRegister dst,Register src)714 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
715   if (CpuFeatures::IsSupported(AVX)) {
716     CpuFeatureScope scope(this, AVX);
717     vxorpd(dst, dst, dst);
718     vcvtqsi2sd(dst, dst, src);
719   } else {
720     xorpd(dst, dst);
721     cvtqsi2sd(dst, src);
722   }
723 }
724 
Cvtqsi2sd(XMMRegister dst,Operand src)725 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
726   if (CpuFeatures::IsSupported(AVX)) {
727     CpuFeatureScope scope(this, AVX);
728     vxorpd(dst, dst, dst);
729     vcvtqsi2sd(dst, dst, src);
730   } else {
731     xorpd(dst, dst);
732     cvtqsi2sd(dst, src);
733   }
734 }
735 
Cvtlui2ss(XMMRegister dst,Register src)736 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
737   // Zero-extend the 32 bit value to 64 bit.
738   movl(kScratchRegister, src);
739   Cvtqsi2ss(dst, kScratchRegister);
740 }
741 
Cvtlui2ss(XMMRegister dst,Operand src)742 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
743   // Zero-extend the 32 bit value to 64 bit.
744   movl(kScratchRegister, src);
745   Cvtqsi2ss(dst, kScratchRegister);
746 }
747 
Cvtlui2sd(XMMRegister dst,Register src)748 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
749   // Zero-extend the 32 bit value to 64 bit.
750   movl(kScratchRegister, src);
751   Cvtqsi2sd(dst, kScratchRegister);
752 }
753 
Cvtlui2sd(XMMRegister dst,Operand src)754 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
755   // Zero-extend the 32 bit value to 64 bit.
756   movl(kScratchRegister, src);
757   Cvtqsi2sd(dst, kScratchRegister);
758 }
759 
Cvtqui2ss(XMMRegister dst,Register src)760 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
761   Label done;
762   Cvtqsi2ss(dst, src);
763   testq(src, src);
764   j(positive, &done, Label::kNear);
765 
766   // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
767   if (src != kScratchRegister) movq(kScratchRegister, src);
768   shrq(kScratchRegister, Immediate(1));
769   // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
770   Label msb_not_set;
771   j(not_carry, &msb_not_set, Label::kNear);
772   orq(kScratchRegister, Immediate(1));
773   bind(&msb_not_set);
774   Cvtqsi2ss(dst, kScratchRegister);
775   addss(dst, dst);
776   bind(&done);
777 }
778 
Cvtqui2ss(XMMRegister dst,Operand src)779 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
780   movq(kScratchRegister, src);
781   Cvtqui2ss(dst, kScratchRegister);
782 }
783 
Cvtqui2sd(XMMRegister dst,Register src)784 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
785   Label done;
786   Cvtqsi2sd(dst, src);
787   testq(src, src);
788   j(positive, &done, Label::kNear);
789 
790   // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
791   if (src != kScratchRegister) movq(kScratchRegister, src);
792   shrq(kScratchRegister, Immediate(1));
793   // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
794   Label msb_not_set;
795   j(not_carry, &msb_not_set, Label::kNear);
796   orq(kScratchRegister, Immediate(1));
797   bind(&msb_not_set);
798   Cvtqsi2sd(dst, kScratchRegister);
799   addsd(dst, dst);
800   bind(&done);
801 }
802 
Cvtqui2sd(XMMRegister dst,Operand src)803 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
804   movq(kScratchRegister, src);
805   Cvtqui2sd(dst, kScratchRegister);
806 }
807 
Cvttss2si(Register dst,XMMRegister src)808 void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
809   if (CpuFeatures::IsSupported(AVX)) {
810     CpuFeatureScope scope(this, AVX);
811     vcvttss2si(dst, src);
812   } else {
813     cvttss2si(dst, src);
814   }
815 }
816 
Cvttss2si(Register dst,Operand src)817 void TurboAssembler::Cvttss2si(Register dst, Operand src) {
818   if (CpuFeatures::IsSupported(AVX)) {
819     CpuFeatureScope scope(this, AVX);
820     vcvttss2si(dst, src);
821   } else {
822     cvttss2si(dst, src);
823   }
824 }
825 
Cvttsd2si(Register dst,XMMRegister src)826 void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
827   if (CpuFeatures::IsSupported(AVX)) {
828     CpuFeatureScope scope(this, AVX);
829     vcvttsd2si(dst, src);
830   } else {
831     cvttsd2si(dst, src);
832   }
833 }
834 
Cvttsd2si(Register dst,Operand src)835 void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
836   if (CpuFeatures::IsSupported(AVX)) {
837     CpuFeatureScope scope(this, AVX);
838     vcvttsd2si(dst, src);
839   } else {
840     cvttsd2si(dst, src);
841   }
842 }
843 
Cvttss2siq(Register dst,XMMRegister src)844 void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
845   if (CpuFeatures::IsSupported(AVX)) {
846     CpuFeatureScope scope(this, AVX);
847     vcvttss2siq(dst, src);
848   } else {
849     cvttss2siq(dst, src);
850   }
851 }
852 
Cvttss2siq(Register dst,Operand src)853 void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
854   if (CpuFeatures::IsSupported(AVX)) {
855     CpuFeatureScope scope(this, AVX);
856     vcvttss2siq(dst, src);
857   } else {
858     cvttss2siq(dst, src);
859   }
860 }
861 
Cvttsd2siq(Register dst,XMMRegister src)862 void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
863   if (CpuFeatures::IsSupported(AVX)) {
864     CpuFeatureScope scope(this, AVX);
865     vcvttsd2siq(dst, src);
866   } else {
867     cvttsd2siq(dst, src);
868   }
869 }
870 
Cvttsd2siq(Register dst,Operand src)871 void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
872   if (CpuFeatures::IsSupported(AVX)) {
873     CpuFeatureScope scope(this, AVX);
874     vcvttsd2siq(dst, src);
875   } else {
876     cvttsd2siq(dst, src);
877   }
878 }
879 
880 namespace {
881 template <typename OperandOrXMMRegister, bool is_double>
ConvertFloatToUint64(TurboAssembler * tasm,Register dst,OperandOrXMMRegister src,Label * fail)882 void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
883                           OperandOrXMMRegister src, Label* fail) {
884   Label success;
885   // There does not exist a native float-to-uint instruction, so we have to use
886   // a float-to-int, and postprocess the result.
887   if (is_double) {
888     tasm->Cvttsd2siq(dst, src);
889   } else {
890     tasm->Cvttss2siq(dst, src);
891   }
892   // If the result of the conversion is positive, we are already done.
893   tasm->testq(dst, dst);
894   tasm->j(positive, &success);
895   // The result of the first conversion was negative, which means that the
896   // input value was not within the positive int64 range. We subtract 2^63
897   // and convert it again to see if it is within the uint64 range.
898   if (is_double) {
899     tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
900     tasm->addsd(kScratchDoubleReg, src);
901     tasm->Cvttsd2siq(dst, kScratchDoubleReg);
902   } else {
903     tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
904     tasm->addss(kScratchDoubleReg, src);
905     tasm->Cvttss2siq(dst, kScratchDoubleReg);
906   }
907   tasm->testq(dst, dst);
908   // The only possible negative value here is 0x80000000000000000, which is
909   // used on x64 to indicate an integer overflow.
910   tasm->j(negative, fail ? fail : &success);
911   // The input value is within uint64 range and the second conversion worked
912   // successfully, but we still have to undo the subtraction we did
913   // earlier.
914   tasm->Set(kScratchRegister, 0x8000000000000000);
915   tasm->orq(dst, kScratchRegister);
916   tasm->bind(&success);
917 }
918 }  // namespace
919 
Cvttsd2uiq(Register dst,Operand src,Label * success)920 void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* success) {
921   ConvertFloatToUint64<Operand, true>(this, dst, src, success);
922 }
923 
Cvttsd2uiq(Register dst,XMMRegister src,Label * success)924 void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* success) {
925   ConvertFloatToUint64<XMMRegister, true>(this, dst, src, success);
926 }
927 
Cvttss2uiq(Register dst,Operand src,Label * success)928 void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* success) {
929   ConvertFloatToUint64<Operand, false>(this, dst, src, success);
930 }
931 
Cvttss2uiq(Register dst,XMMRegister src,Label * success)932 void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) {
933   ConvertFloatToUint64<XMMRegister, false>(this, dst, src, success);
934 }
935 
Load(Register dst,Operand src,Representation r)936 void MacroAssembler::Load(Register dst, Operand src, Representation r) {
937   DCHECK(!r.IsDouble());
938   if (r.IsInteger8()) {
939     movsxbq(dst, src);
940   } else if (r.IsUInteger8()) {
941     movzxbl(dst, src);
942   } else if (r.IsInteger16()) {
943     movsxwq(dst, src);
944   } else if (r.IsUInteger16()) {
945     movzxwl(dst, src);
946   } else if (r.IsInteger32()) {
947     movl(dst, src);
948   } else {
949     movp(dst, src);
950   }
951 }
952 
Store(Operand dst,Register src,Representation r)953 void MacroAssembler::Store(Operand dst, Register src, Representation r) {
954   DCHECK(!r.IsDouble());
955   if (r.IsInteger8() || r.IsUInteger8()) {
956     movb(dst, src);
957   } else if (r.IsInteger16() || r.IsUInteger16()) {
958     movw(dst, src);
959   } else if (r.IsInteger32()) {
960     movl(dst, src);
961   } else {
962     if (r.IsHeapObject()) {
963       AssertNotSmi(src);
964     } else if (r.IsSmi()) {
965       AssertSmi(src);
966     }
967     movp(dst, src);
968   }
969 }
970 
Set(Register dst,int64_t x)971 void TurboAssembler::Set(Register dst, int64_t x) {
972   if (x == 0) {
973     xorl(dst, dst);
974   } else if (is_uint32(x)) {
975     movl(dst, Immediate(static_cast<uint32_t>(x)));
976   } else if (is_int32(x)) {
977     movq(dst, Immediate(static_cast<int32_t>(x)));
978   } else {
979     movq(dst, x);
980   }
981 }
982 
Set(Operand dst,intptr_t x)983 void TurboAssembler::Set(Operand dst, intptr_t x) {
984   if (kPointerSize == kInt64Size) {
985     if (is_int32(x)) {
986       movp(dst, Immediate(static_cast<int32_t>(x)));
987     } else {
988       Set(kScratchRegister, x);
989       movp(dst, kScratchRegister);
990     }
991   } else {
992     movp(dst, Immediate(static_cast<int32_t>(x)));
993   }
994 }
995 
996 
997 // ----------------------------------------------------------------------------
998 // Smi tagging, untagging and tag detection.
999 
GetSmiConstant(Smi * source)1000 Register TurboAssembler::GetSmiConstant(Smi* source) {
1001   STATIC_ASSERT(kSmiTag == 0);
1002   int value = source->value();
1003   if (value == 0) {
1004     xorl(kScratchRegister, kScratchRegister);
1005     return kScratchRegister;
1006   }
1007   Move(kScratchRegister, source);
1008   return kScratchRegister;
1009 }
1010 
Move(Register dst,Smi * source)1011 void TurboAssembler::Move(Register dst, Smi* source) {
1012   STATIC_ASSERT(kSmiTag == 0);
1013   int value = source->value();
1014   if (value == 0) {
1015     xorl(dst, dst);
1016   } else {
1017     Move(dst, reinterpret_cast<Address>(source), RelocInfo::NONE);
1018   }
1019 }
1020 
Move(Register dst,ExternalReference ext)1021 void TurboAssembler::Move(Register dst, ExternalReference ext) {
1022   if (FLAG_embedded_builtins) {
1023     if (root_array_available_ && options().isolate_independent_code) {
1024       IndirectLoadExternalReference(dst, ext);
1025       return;
1026     }
1027   }
1028   movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
1029 }
1030 
SmiTag(Register dst,Register src)1031 void MacroAssembler::SmiTag(Register dst, Register src) {
1032   STATIC_ASSERT(kSmiTag == 0);
1033   if (dst != src) {
1034     movp(dst, src);
1035   }
1036   DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1037   shlp(dst, Immediate(kSmiShift));
1038 }
1039 
SmiUntag(Register dst,Register src)1040 void TurboAssembler::SmiUntag(Register dst, Register src) {
1041   STATIC_ASSERT(kSmiTag == 0);
1042   if (dst != src) {
1043     movp(dst, src);
1044   }
1045   DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1046   sarp(dst, Immediate(kSmiShift));
1047 }
1048 
SmiUntag(Register dst,Operand src)1049 void TurboAssembler::SmiUntag(Register dst, Operand src) {
1050   if (SmiValuesAre32Bits()) {
1051     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1052     // Sign extend to 64-bit.
1053     movsxlq(dst, dst);
1054   } else {
1055     DCHECK(SmiValuesAre31Bits());
1056     movp(dst, src);
1057     sarp(dst, Immediate(kSmiShift));
1058   }
1059 }
1060 
SmiCompare(Register smi1,Register smi2)1061 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1062   AssertSmi(smi1);
1063   AssertSmi(smi2);
1064   cmpp(smi1, smi2);
1065 }
1066 
1067 
SmiCompare(Register dst,Smi * src)1068 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1069   AssertSmi(dst);
1070   Cmp(dst, src);
1071 }
1072 
1073 
Cmp(Register dst,Smi * src)1074 void MacroAssembler::Cmp(Register dst, Smi* src) {
1075   DCHECK_NE(dst, kScratchRegister);
1076   if (src->value() == 0) {
1077     testp(dst, dst);
1078   } else {
1079     Register constant_reg = GetSmiConstant(src);
1080     cmpp(dst, constant_reg);
1081   }
1082 }
1083 
SmiCompare(Register dst,Operand src)1084 void MacroAssembler::SmiCompare(Register dst, Operand src) {
1085   AssertSmi(dst);
1086   AssertSmi(src);
1087   cmpp(dst, src);
1088 }
1089 
SmiCompare(Operand dst,Register src)1090 void MacroAssembler::SmiCompare(Operand dst, Register src) {
1091   AssertSmi(dst);
1092   AssertSmi(src);
1093   cmpp(dst, src);
1094 }
1095 
SmiCompare(Operand dst,Smi * src)1096 void MacroAssembler::SmiCompare(Operand dst, Smi* src) {
1097   AssertSmi(dst);
1098   if (SmiValuesAre32Bits()) {
1099     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1100   } else {
1101     DCHECK(SmiValuesAre31Bits());
1102     cmpl(dst, Immediate(src));
1103   }
1104 }
1105 
Cmp(Operand dst,Smi * src)1106 void MacroAssembler::Cmp(Operand dst, Smi* src) {
1107   // The Operand cannot use the smi register.
1108   Register smi_reg = GetSmiConstant(src);
1109   DCHECK(!dst.AddressUsesRegister(smi_reg));
1110   cmpp(dst, smi_reg);
1111 }
1112 
1113 
CheckSmi(Register src)1114 Condition TurboAssembler::CheckSmi(Register src) {
1115   STATIC_ASSERT(kSmiTag == 0);
1116   testb(src, Immediate(kSmiTagMask));
1117   return zero;
1118 }
1119 
CheckSmi(Operand src)1120 Condition TurboAssembler::CheckSmi(Operand src) {
1121   STATIC_ASSERT(kSmiTag == 0);
1122   testb(src, Immediate(kSmiTagMask));
1123   return zero;
1124 }
1125 
JumpIfSmi(Register src,Label * on_smi,Label::Distance near_jump)1126 void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
1127                                Label::Distance near_jump) {
1128   Condition smi = CheckSmi(src);
1129   j(smi, on_smi, near_jump);
1130 }
1131 
JumpIfNotSmi(Register src,Label * on_not_smi,Label::Distance near_jump)1132 void MacroAssembler::JumpIfNotSmi(Register src,
1133                                   Label* on_not_smi,
1134                                   Label::Distance near_jump) {
1135   Condition smi = CheckSmi(src);
1136   j(NegateCondition(smi), on_not_smi, near_jump);
1137 }
1138 
JumpIfNotSmi(Operand src,Label * on_not_smi,Label::Distance near_jump)1139 void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
1140                                   Label::Distance near_jump) {
1141   Condition smi = CheckSmi(src);
1142   j(NegateCondition(smi), on_not_smi, near_jump);
1143 }
1144 
SmiAddConstant(Operand dst,Smi * constant)1145 void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
1146   if (constant->value() != 0) {
1147     if (SmiValuesAre32Bits()) {
1148       addl(Operand(dst, kSmiShift / kBitsPerByte),
1149            Immediate(constant->value()));
1150     } else {
1151       DCHECK(SmiValuesAre31Bits());
1152       if (kPointerSize == kInt64Size) {
1153         // Sign-extend value after addition
1154         movl(kScratchRegister, dst);
1155         addl(kScratchRegister, Immediate(constant));
1156         movsxlq(kScratchRegister, kScratchRegister);
1157         movq(dst, kScratchRegister);
1158       } else {
1159         DCHECK_EQ(kSmiShiftSize, 32);
1160         addp(dst, Immediate(constant));
1161       }
1162     }
1163   }
1164 }
1165 
SmiToIndex(Register dst,Register src,int shift)1166 SmiIndex MacroAssembler::SmiToIndex(Register dst,
1167                                     Register src,
1168                                     int shift) {
1169   if (SmiValuesAre32Bits()) {
1170     DCHECK(is_uint6(shift));
1171     // There is a possible optimization if shift is in the range 60-63, but that
1172     // will (and must) never happen.
1173     if (dst != src) {
1174       movp(dst, src);
1175     }
1176     if (shift < kSmiShift) {
1177       sarp(dst, Immediate(kSmiShift - shift));
1178     } else {
1179       shlp(dst, Immediate(shift - kSmiShift));
1180     }
1181     return SmiIndex(dst, times_1);
1182   } else {
1183     DCHECK(SmiValuesAre31Bits());
1184     if (dst != src) {
1185       movp(dst, src);
1186     }
1187     // We have to sign extend the index register to 64-bit as the SMI might
1188     // be negative.
1189     movsxlq(dst, dst);
1190     if (shift < kSmiShift) {
1191       sarq(dst, Immediate(kSmiShift - shift));
1192     } else if (shift != kSmiShift) {
1193       if (shift - kSmiShift <= static_cast<int>(times_8)) {
1194         return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
1195       }
1196       shlq(dst, Immediate(shift - kSmiShift));
1197     }
1198     return SmiIndex(dst, times_1);
1199   }
1200 }
1201 
Push(Smi * source)1202 void TurboAssembler::Push(Smi* source) {
1203   intptr_t smi = reinterpret_cast<intptr_t>(source);
1204   if (is_int32(smi)) {
1205     Push(Immediate(static_cast<int32_t>(smi)));
1206     return;
1207   }
1208   int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
1209   int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
1210   if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
1211     // This sequence has only 7 bytes, compared to the 12 bytes below.
1212     Push(Immediate(0));
1213     movb(Operand(rsp, first_byte_set),
1214          Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
1215     return;
1216   }
1217   Register constant = GetSmiConstant(source);
1218   Push(constant);
1219 }
1220 
1221 // ----------------------------------------------------------------------------
1222 
Move(Register dst,Register src)1223 void TurboAssembler::Move(Register dst, Register src) {
1224   if (dst != src) {
1225     movp(dst, src);
1226   }
1227 }
1228 
MoveNumber(Register dst,double value)1229 void TurboAssembler::MoveNumber(Register dst, double value) {
1230   int32_t smi;
1231   if (DoubleToSmiInteger(value, &smi)) {
1232     Move(dst, Smi::FromInt(smi));
1233   } else {
1234     movp_heap_number(dst, value);
1235   }
1236 }
1237 
Move(XMMRegister dst,uint32_t src)1238 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1239   if (src == 0) {
1240     Xorps(dst, dst);
1241   } else {
1242     unsigned nlz = base::bits::CountLeadingZeros(src);
1243     unsigned ntz = base::bits::CountTrailingZeros(src);
1244     unsigned pop = base::bits::CountPopulation(src);
1245     DCHECK_NE(0u, pop);
1246     if (pop + ntz + nlz == 32) {
1247       Pcmpeqd(dst, dst);
1248       if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
1249       if (nlz) Psrld(dst, static_cast<byte>(nlz));
1250     } else {
1251       movl(kScratchRegister, Immediate(src));
1252       Movd(dst, kScratchRegister);
1253     }
1254   }
1255 }
1256 
Move(XMMRegister dst,uint64_t src)1257 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1258   if (src == 0) {
1259     Xorpd(dst, dst);
1260   } else {
1261     unsigned nlz = base::bits::CountLeadingZeros(src);
1262     unsigned ntz = base::bits::CountTrailingZeros(src);
1263     unsigned pop = base::bits::CountPopulation(src);
1264     DCHECK_NE(0u, pop);
1265     if (pop + ntz + nlz == 64) {
1266       Pcmpeqd(dst, dst);
1267       if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
1268       if (nlz) Psrlq(dst, static_cast<byte>(nlz));
1269     } else {
1270       uint32_t lower = static_cast<uint32_t>(src);
1271       uint32_t upper = static_cast<uint32_t>(src >> 32);
1272       if (upper == 0) {
1273         Move(dst, lower);
1274       } else {
1275         movq(kScratchRegister, src);
1276         Movq(dst, kScratchRegister);
1277       }
1278     }
1279   }
1280 }
1281 
1282 // ----------------------------------------------------------------------------
1283 
Absps(XMMRegister dst)1284 void MacroAssembler::Absps(XMMRegister dst) {
1285   Andps(dst,
1286         ExternalOperand(ExternalReference::address_of_float_abs_constant()));
1287 }
1288 
Negps(XMMRegister dst)1289 void MacroAssembler::Negps(XMMRegister dst) {
1290   Xorps(dst,
1291         ExternalOperand(ExternalReference::address_of_float_neg_constant()));
1292 }
1293 
Abspd(XMMRegister dst)1294 void MacroAssembler::Abspd(XMMRegister dst) {
1295   Andps(dst,
1296         ExternalOperand(ExternalReference::address_of_double_abs_constant()));
1297 }
1298 
Negpd(XMMRegister dst)1299 void MacroAssembler::Negpd(XMMRegister dst) {
1300   Xorps(dst,
1301         ExternalOperand(ExternalReference::address_of_double_neg_constant()));
1302 }
1303 
Cmp(Register dst,Handle<Object> source)1304 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1305   AllowDeferredHandleDereference smi_check;
1306   if (source->IsSmi()) {
1307     Cmp(dst, Smi::cast(*source));
1308   } else {
1309     Move(kScratchRegister, Handle<HeapObject>::cast(source));
1310     cmpp(dst, kScratchRegister);
1311   }
1312 }
1313 
Cmp(Operand dst,Handle<Object> source)1314 void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
1315   AllowDeferredHandleDereference smi_check;
1316   if (source->IsSmi()) {
1317     Cmp(dst, Smi::cast(*source));
1318   } else {
1319     Move(kScratchRegister, Handle<HeapObject>::cast(source));
1320     cmpp(dst, kScratchRegister);
1321   }
1322 }
1323 
Push(Handle<HeapObject> source)1324 void TurboAssembler::Push(Handle<HeapObject> source) {
1325   Move(kScratchRegister, source);
1326   Push(kScratchRegister);
1327 }
1328 
Move(Register result,Handle<HeapObject> object,RelocInfo::Mode rmode)1329 void TurboAssembler::Move(Register result, Handle<HeapObject> object,
1330                           RelocInfo::Mode rmode) {
1331   if (FLAG_embedded_builtins) {
1332     if (root_array_available_ && options().isolate_independent_code) {
1333       IndirectLoadConstant(result, object);
1334       return;
1335     }
1336   }
1337   movp(result, object.address(), rmode);
1338 }
1339 
Move(Operand dst,Handle<HeapObject> object,RelocInfo::Mode rmode)1340 void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
1341                           RelocInfo::Mode rmode) {
1342   Move(kScratchRegister, object, rmode);
1343   movp(dst, kScratchRegister);
1344 }
1345 
Drop(int stack_elements)1346 void MacroAssembler::Drop(int stack_elements) {
1347   if (stack_elements > 0) {
1348     addp(rsp, Immediate(stack_elements * kPointerSize));
1349   }
1350 }
1351 
1352 
DropUnderReturnAddress(int stack_elements,Register scratch)1353 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
1354                                             Register scratch) {
1355   DCHECK_GT(stack_elements, 0);
1356   if (kPointerSize == kInt64Size && stack_elements == 1) {
1357     popq(MemOperand(rsp, 0));
1358     return;
1359   }
1360 
1361   PopReturnAddressTo(scratch);
1362   Drop(stack_elements);
1363   PushReturnAddressFrom(scratch);
1364 }
1365 
Push(Register src)1366 void TurboAssembler::Push(Register src) {
1367   if (kPointerSize == kInt64Size) {
1368     pushq(src);
1369   } else {
1370     // x32 uses 64-bit push for rbp in the prologue.
1371     DCHECK(src.code() != rbp.code());
1372     leal(rsp, Operand(rsp, -4));
1373     movp(Operand(rsp, 0), src);
1374   }
1375 }
1376 
Push(Operand src)1377 void TurboAssembler::Push(Operand src) {
1378   if (kPointerSize == kInt64Size) {
1379     pushq(src);
1380   } else {
1381     movp(kScratchRegister, src);
1382     leal(rsp, Operand(rsp, -4));
1383     movp(Operand(rsp, 0), kScratchRegister);
1384   }
1385 }
1386 
PushQuad(Operand src)1387 void MacroAssembler::PushQuad(Operand src) {
1388   if (kPointerSize == kInt64Size) {
1389     pushq(src);
1390   } else {
1391     movp(kScratchRegister, src);
1392     pushq(kScratchRegister);
1393   }
1394 }
1395 
Push(Immediate value)1396 void TurboAssembler::Push(Immediate value) {
1397   if (kPointerSize == kInt64Size) {
1398     pushq(value);
1399   } else {
1400     leal(rsp, Operand(rsp, -4));
1401     movp(Operand(rsp, 0), value);
1402   }
1403 }
1404 
1405 
PushImm32(int32_t imm32)1406 void MacroAssembler::PushImm32(int32_t imm32) {
1407   if (kPointerSize == kInt64Size) {
1408     pushq_imm32(imm32);
1409   } else {
1410     leal(rsp, Operand(rsp, -4));
1411     movp(Operand(rsp, 0), Immediate(imm32));
1412   }
1413 }
1414 
1415 
Pop(Register dst)1416 void MacroAssembler::Pop(Register dst) {
1417   if (kPointerSize == kInt64Size) {
1418     popq(dst);
1419   } else {
1420     // x32 uses 64-bit pop for rbp in the epilogue.
1421     DCHECK(dst.code() != rbp.code());
1422     movp(dst, Operand(rsp, 0));
1423     leal(rsp, Operand(rsp, 4));
1424   }
1425 }
1426 
Pop(Operand dst)1427 void MacroAssembler::Pop(Operand dst) {
1428   if (kPointerSize == kInt64Size) {
1429     popq(dst);
1430   } else {
1431     Register scratch = dst.AddressUsesRegister(kScratchRegister)
1432         ? kRootRegister : kScratchRegister;
1433     movp(scratch, Operand(rsp, 0));
1434     movp(dst, scratch);
1435     leal(rsp, Operand(rsp, 4));
1436     if (scratch == kRootRegister) {
1437       // Restore kRootRegister.
1438       InitializeRootRegister();
1439     }
1440   }
1441 }
1442 
PopQuad(Operand dst)1443 void MacroAssembler::PopQuad(Operand dst) {
1444   if (kPointerSize == kInt64Size) {
1445     popq(dst);
1446   } else {
1447     popq(kScratchRegister);
1448     movp(dst, kScratchRegister);
1449   }
1450 }
1451 
Jump(ExternalReference ext)1452 void TurboAssembler::Jump(ExternalReference ext) {
1453   LoadAddress(kScratchRegister, ext);
1454   jmp(kScratchRegister);
1455 }
1456 
Jump(Operand op)1457 void TurboAssembler::Jump(Operand op) {
1458   if (kPointerSize == kInt64Size) {
1459     jmp(op);
1460   } else {
1461     movp(kScratchRegister, op);
1462     jmp(kScratchRegister);
1463   }
1464 }
1465 
Jump(Address destination,RelocInfo::Mode rmode)1466 void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1467   Move(kScratchRegister, destination, rmode);
1468   jmp(kScratchRegister);
1469 }
1470 
Jump(Handle<Code> code_object,RelocInfo::Mode rmode,Condition cc)1471 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
1472                           Condition cc) {
1473 // TODO(X64): Inline this
1474 if (FLAG_embedded_builtins) {
1475   if (root_array_available_ && options().isolate_independent_code &&
1476       !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1477     // Calls to embedded targets are initially generated as standard
1478     // pc-relative calls below. When creating the embedded blob, call offsets
1479     // are patched up to point directly to the off-heap instruction start.
1480     // Note: It is safe to dereference code_object above since code generation
1481     // for builtins and code stubs happens on the main thread.
1482     Label skip;
1483     if (cc != always) {
1484       if (cc == never) return;
1485       j(NegateCondition(cc), &skip, Label::kNear);
1486     }
1487     IndirectLoadConstant(kScratchRegister, code_object);
1488     leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
1489     jmp(kScratchRegister);
1490     bind(&skip);
1491     return;
1492   } else if (options().inline_offheap_trampolines) {
1493     int builtin_index = Builtins::kNoBuiltinId;
1494     if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1495         Builtins::IsIsolateIndependent(builtin_index)) {
1496       // Inline the trampoline.
1497       RecordCommentForOffHeapTrampoline(builtin_index);
1498       CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1499       EmbeddedData d = EmbeddedData::FromBlob();
1500       Address entry = d.InstructionStartOfBuiltin(builtin_index);
1501       Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1502       jmp(kScratchRegister);
1503       return;
1504     }
1505   }
1506 }
1507 j(cc, code_object, rmode);
1508 }
1509 
JumpToInstructionStream(Address entry)1510 void MacroAssembler::JumpToInstructionStream(Address entry) {
1511   Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1512   jmp(kOffHeapTrampolineRegister);
1513 }
1514 
Call(ExternalReference ext)1515 void TurboAssembler::Call(ExternalReference ext) {
1516   LoadAddress(kScratchRegister, ext);
1517   call(kScratchRegister);
1518 }
1519 
Call(Operand op)1520 void TurboAssembler::Call(Operand op) {
1521   if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
1522     call(op);
1523   } else {
1524     movp(kScratchRegister, op);
1525     call(kScratchRegister);
1526   }
1527 }
1528 
Call(Address destination,RelocInfo::Mode rmode)1529 void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1530   Move(kScratchRegister, destination, rmode);
1531   call(kScratchRegister);
1532 }
1533 
Call(Handle<Code> code_object,RelocInfo::Mode rmode)1534 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1535   if (FLAG_embedded_builtins) {
1536     if (root_array_available_ && options().isolate_independent_code &&
1537         !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1538       // Calls to embedded targets are initially generated as standard
1539       // pc-relative calls below. When creating the embedded blob, call offsets
1540       // are patched up to point directly to the off-heap instruction start.
1541       // Note: It is safe to dereference code_object above since code generation
1542       // for builtins and code stubs happens on the main thread.
1543       IndirectLoadConstant(kScratchRegister, code_object);
1544       leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
1545       call(kScratchRegister);
1546       return;
1547     } else if (options().inline_offheap_trampolines) {
1548       int builtin_index = Builtins::kNoBuiltinId;
1549       if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1550           Builtins::IsIsolateIndependent(builtin_index)) {
1551         // Inline the trampoline.
1552         RecordCommentForOffHeapTrampoline(builtin_index);
1553         CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1554         EmbeddedData d = EmbeddedData::FromBlob();
1555         Address entry = d.InstructionStartOfBuiltin(builtin_index);
1556         Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1557         call(kScratchRegister);
1558         return;
1559       }
1560     }
1561   }
1562   DCHECK(RelocInfo::IsCodeTarget(rmode));
1563   call(code_object, rmode);
1564 }
1565 
RetpolineCall(Register reg)1566 void TurboAssembler::RetpolineCall(Register reg) {
1567   Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1568 
1569   jmp(&setup_return);  // Jump past the entire retpoline below.
1570 
1571   bind(&inner_indirect_branch);
1572   call(&setup_target);
1573 
1574   bind(&capture_spec);
1575   pause();
1576   jmp(&capture_spec);
1577 
1578   bind(&setup_target);
1579   movq(Operand(rsp, 0), reg);
1580   ret(0);
1581 
1582   bind(&setup_return);
1583   call(&inner_indirect_branch);  // Callee will return after this instruction.
1584 }
1585 
RetpolineCall(Address destination,RelocInfo::Mode rmode)1586 void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
1587   Move(kScratchRegister, destination, rmode);
1588   RetpolineCall(kScratchRegister);
1589 }
1590 
RetpolineJump(Register reg)1591 void TurboAssembler::RetpolineJump(Register reg) {
1592   Label setup_target, capture_spec;
1593 
1594   call(&setup_target);
1595 
1596   bind(&capture_spec);
1597   pause();
1598   jmp(&capture_spec);
1599 
1600   bind(&setup_target);
1601   movq(Operand(rsp, 0), reg);
1602   ret(0);
1603 }
1604 
Pextrd(Register dst,XMMRegister src,int8_t imm8)1605 void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
1606   if (imm8 == 0) {
1607     Movd(dst, src);
1608     return;
1609   }
1610   if (CpuFeatures::IsSupported(SSE4_1)) {
1611     CpuFeatureScope sse_scope(this, SSE4_1);
1612     pextrd(dst, src, imm8);
1613     return;
1614   }
1615   DCHECK_EQ(1, imm8);
1616   movq(dst, src);
1617   shrq(dst, Immediate(32));
1618 }
1619 
Pinsrd(XMMRegister dst,Register src,int8_t imm8)1620 void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
1621   if (CpuFeatures::IsSupported(SSE4_1)) {
1622     CpuFeatureScope sse_scope(this, SSE4_1);
1623     pinsrd(dst, src, imm8);
1624     return;
1625   }
1626   Movd(kScratchDoubleReg, src);
1627   if (imm8 == 1) {
1628     punpckldq(dst, kScratchDoubleReg);
1629   } else {
1630     DCHECK_EQ(0, imm8);
1631     Movss(dst, kScratchDoubleReg);
1632   }
1633 }
1634 
Pinsrd(XMMRegister dst,Operand src,int8_t imm8)1635 void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
1636   DCHECK(imm8 == 0 || imm8 == 1);
1637   if (CpuFeatures::IsSupported(SSE4_1)) {
1638     CpuFeatureScope sse_scope(this, SSE4_1);
1639     pinsrd(dst, src, imm8);
1640     return;
1641   }
1642   Movd(kScratchDoubleReg, src);
1643   if (imm8 == 1) {
1644     punpckldq(dst, kScratchDoubleReg);
1645   } else {
1646     DCHECK_EQ(0, imm8);
1647     Movss(dst, kScratchDoubleReg);
1648   }
1649 }
1650 
Lzcntl(Register dst,Register src)1651 void TurboAssembler::Lzcntl(Register dst, Register src) {
1652   if (CpuFeatures::IsSupported(LZCNT)) {
1653     CpuFeatureScope scope(this, LZCNT);
1654     lzcntl(dst, src);
1655     return;
1656   }
1657   Label not_zero_src;
1658   bsrl(dst, src);
1659   j(not_zero, &not_zero_src, Label::kNear);
1660   Set(dst, 63);  // 63^31 == 32
1661   bind(&not_zero_src);
1662   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
1663 }
1664 
Lzcntl(Register dst,Operand src)1665 void TurboAssembler::Lzcntl(Register dst, Operand src) {
1666   if (CpuFeatures::IsSupported(LZCNT)) {
1667     CpuFeatureScope scope(this, LZCNT);
1668     lzcntl(dst, src);
1669     return;
1670   }
1671   Label not_zero_src;
1672   bsrl(dst, src);
1673   j(not_zero, &not_zero_src, Label::kNear);
1674   Set(dst, 63);  // 63^31 == 32
1675   bind(&not_zero_src);
1676   xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
1677 }
1678 
Lzcntq(Register dst,Register src)1679 void TurboAssembler::Lzcntq(Register dst, Register src) {
1680   if (CpuFeatures::IsSupported(LZCNT)) {
1681     CpuFeatureScope scope(this, LZCNT);
1682     lzcntq(dst, src);
1683     return;
1684   }
1685   Label not_zero_src;
1686   bsrq(dst, src);
1687   j(not_zero, &not_zero_src, Label::kNear);
1688   Set(dst, 127);  // 127^63 == 64
1689   bind(&not_zero_src);
1690   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
1691 }
1692 
Lzcntq(Register dst,Operand src)1693 void TurboAssembler::Lzcntq(Register dst, Operand src) {
1694   if (CpuFeatures::IsSupported(LZCNT)) {
1695     CpuFeatureScope scope(this, LZCNT);
1696     lzcntq(dst, src);
1697     return;
1698   }
1699   Label not_zero_src;
1700   bsrq(dst, src);
1701   j(not_zero, &not_zero_src, Label::kNear);
1702   Set(dst, 127);  // 127^63 == 64
1703   bind(&not_zero_src);
1704   xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
1705 }
1706 
Tzcntq(Register dst,Register src)1707 void TurboAssembler::Tzcntq(Register dst, Register src) {
1708   if (CpuFeatures::IsSupported(BMI1)) {
1709     CpuFeatureScope scope(this, BMI1);
1710     tzcntq(dst, src);
1711     return;
1712   }
1713   Label not_zero_src;
1714   bsfq(dst, src);
1715   j(not_zero, &not_zero_src, Label::kNear);
1716   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
1717   Set(dst, 64);
1718   bind(&not_zero_src);
1719 }
1720 
Tzcntq(Register dst,Operand src)1721 void TurboAssembler::Tzcntq(Register dst, Operand src) {
1722   if (CpuFeatures::IsSupported(BMI1)) {
1723     CpuFeatureScope scope(this, BMI1);
1724     tzcntq(dst, src);
1725     return;
1726   }
1727   Label not_zero_src;
1728   bsfq(dst, src);
1729   j(not_zero, &not_zero_src, Label::kNear);
1730   // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
1731   Set(dst, 64);
1732   bind(&not_zero_src);
1733 }
1734 
Tzcntl(Register dst,Register src)1735 void TurboAssembler::Tzcntl(Register dst, Register src) {
1736   if (CpuFeatures::IsSupported(BMI1)) {
1737     CpuFeatureScope scope(this, BMI1);
1738     tzcntl(dst, src);
1739     return;
1740   }
1741   Label not_zero_src;
1742   bsfl(dst, src);
1743   j(not_zero, &not_zero_src, Label::kNear);
1744   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
1745   bind(&not_zero_src);
1746 }
1747 
Tzcntl(Register dst,Operand src)1748 void TurboAssembler::Tzcntl(Register dst, Operand src) {
1749   if (CpuFeatures::IsSupported(BMI1)) {
1750     CpuFeatureScope scope(this, BMI1);
1751     tzcntl(dst, src);
1752     return;
1753   }
1754   Label not_zero_src;
1755   bsfl(dst, src);
1756   j(not_zero, &not_zero_src, Label::kNear);
1757   Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
1758   bind(&not_zero_src);
1759 }
1760 
Popcntl(Register dst,Register src)1761 void TurboAssembler::Popcntl(Register dst, Register src) {
1762   if (CpuFeatures::IsSupported(POPCNT)) {
1763     CpuFeatureScope scope(this, POPCNT);
1764     popcntl(dst, src);
1765     return;
1766   }
1767   UNREACHABLE();
1768 }
1769 
Popcntl(Register dst,Operand src)1770 void TurboAssembler::Popcntl(Register dst, Operand src) {
1771   if (CpuFeatures::IsSupported(POPCNT)) {
1772     CpuFeatureScope scope(this, POPCNT);
1773     popcntl(dst, src);
1774     return;
1775   }
1776   UNREACHABLE();
1777 }
1778 
Popcntq(Register dst,Register src)1779 void TurboAssembler::Popcntq(Register dst, Register src) {
1780   if (CpuFeatures::IsSupported(POPCNT)) {
1781     CpuFeatureScope scope(this, POPCNT);
1782     popcntq(dst, src);
1783     return;
1784   }
1785   UNREACHABLE();
1786 }
1787 
Popcntq(Register dst,Operand src)1788 void TurboAssembler::Popcntq(Register dst, Operand src) {
1789   if (CpuFeatures::IsSupported(POPCNT)) {
1790     CpuFeatureScope scope(this, POPCNT);
1791     popcntq(dst, src);
1792     return;
1793   }
1794   UNREACHABLE();
1795 }
1796 
1797 
Pushad()1798 void MacroAssembler::Pushad() {
1799   Push(rax);
1800   Push(rcx);
1801   Push(rdx);
1802   Push(rbx);
1803   // Not pushing rsp or rbp.
1804   Push(rsi);
1805   Push(rdi);
1806   Push(r8);
1807   Push(r9);
1808   // r10 is kScratchRegister.
1809   Push(r11);
1810   Push(r12);
1811   // r13 is kRootRegister.
1812   Push(r14);
1813   Push(r15);
1814   STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
1815   // Use lea for symmetry with Popad.
1816   int sp_delta =
1817       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1818   leap(rsp, Operand(rsp, -sp_delta));
1819 }
1820 
1821 
Popad()1822 void MacroAssembler::Popad() {
1823   // Popad must not change the flags, so use lea instead of addq.
1824   int sp_delta =
1825       (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1826   leap(rsp, Operand(rsp, sp_delta));
1827   Pop(r15);
1828   Pop(r14);
1829   Pop(r12);
1830   Pop(r11);
1831   Pop(r9);
1832   Pop(r8);
1833   Pop(rdi);
1834   Pop(rsi);
1835   Pop(rbx);
1836   Pop(rdx);
1837   Pop(rcx);
1838   Pop(rax);
1839 }
1840 
1841 
1842 // Order general registers are pushed by Pushad:
1843 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1844 const int
1845 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
1846     0,
1847     1,
1848     2,
1849     3,
1850     -1,
1851     -1,
1852     4,
1853     5,
1854     6,
1855     7,
1856     -1,
1857     8,
1858     9,
1859     -1,
1860     10,
1861     11
1862 };
1863 
PushStackHandler()1864 void MacroAssembler::PushStackHandler() {
1865   // Adjust this code if not the case.
1866   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1867   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1868 
1869   Push(Immediate(0));  // Padding.
1870 
1871   // Link the current handler as the next handler.
1872   ExternalReference handler_address =
1873       ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1874   Push(ExternalOperand(handler_address));
1875 
1876   // Set this new handler as the current one.
1877   movp(ExternalOperand(handler_address), rsp);
1878 }
1879 
1880 
PopStackHandler()1881 void MacroAssembler::PopStackHandler() {
1882   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1883   ExternalReference handler_address =
1884       ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1885   Pop(ExternalOperand(handler_address));
1886   addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1887 }
1888 
Ret()1889 void TurboAssembler::Ret() { ret(0); }
1890 
Ret(int bytes_dropped,Register scratch)1891 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
1892   if (is_uint16(bytes_dropped)) {
1893     ret(bytes_dropped);
1894   } else {
1895     PopReturnAddressTo(scratch);
1896     addp(rsp, Immediate(bytes_dropped));
1897     PushReturnAddressFrom(scratch);
1898     ret(0);
1899   }
1900 }
1901 
CmpObjectType(Register heap_object,InstanceType type,Register map)1902 void MacroAssembler::CmpObjectType(Register heap_object,
1903                                    InstanceType type,
1904                                    Register map) {
1905   movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1906   CmpInstanceType(map, type);
1907 }
1908 
1909 
CmpInstanceType(Register map,InstanceType type)1910 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1911   cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
1912 }
1913 
DoubleToI(Register result_reg,XMMRegister input_reg,XMMRegister scratch,Label * lost_precision,Label * is_nan,Label::Distance dst)1914 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
1915                                XMMRegister scratch, Label* lost_precision,
1916                                Label* is_nan, Label::Distance dst) {
1917   Cvttsd2si(result_reg, input_reg);
1918   Cvtlsi2sd(kScratchDoubleReg, result_reg);
1919   Ucomisd(kScratchDoubleReg, input_reg);
1920   j(not_equal, lost_precision, dst);
1921   j(parity_even, is_nan, dst);  // NaN.
1922 }
1923 
1924 
AssertNotSmi(Register object)1925 void MacroAssembler::AssertNotSmi(Register object) {
1926   if (emit_debug_code()) {
1927     Condition is_smi = CheckSmi(object);
1928     Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
1929   }
1930 }
1931 
1932 
AssertSmi(Register object)1933 void MacroAssembler::AssertSmi(Register object) {
1934   if (emit_debug_code()) {
1935     Condition is_smi = CheckSmi(object);
1936     Check(is_smi, AbortReason::kOperandIsNotASmi);
1937   }
1938 }
1939 
AssertSmi(Operand object)1940 void MacroAssembler::AssertSmi(Operand object) {
1941   if (emit_debug_code()) {
1942     Condition is_smi = CheckSmi(object);
1943     Check(is_smi, AbortReason::kOperandIsNotASmi);
1944   }
1945 }
1946 
AssertZeroExtended(Register int32_register)1947 void TurboAssembler::AssertZeroExtended(Register int32_register) {
1948   if (emit_debug_code()) {
1949     DCHECK_NE(int32_register, kScratchRegister);
1950     movq(kScratchRegister, int64_t{0x0000000100000000});
1951     cmpq(kScratchRegister, int32_register);
1952     Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
1953   }
1954 }
1955 
AssertConstructor(Register object)1956 void MacroAssembler::AssertConstructor(Register object) {
1957   if (emit_debug_code()) {
1958     testb(object, Immediate(kSmiTagMask));
1959     Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
1960     Push(object);
1961     movq(object, FieldOperand(object, HeapObject::kMapOffset));
1962     testb(FieldOperand(object, Map::kBitFieldOffset),
1963           Immediate(Map::IsConstructorBit::kMask));
1964     Pop(object);
1965     Check(not_zero, AbortReason::kOperandIsNotAConstructor);
1966   }
1967 }
1968 
AssertFunction(Register object)1969 void MacroAssembler::AssertFunction(Register object) {
1970   if (emit_debug_code()) {
1971     testb(object, Immediate(kSmiTagMask));
1972     Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
1973     Push(object);
1974     CmpObjectType(object, JS_FUNCTION_TYPE, object);
1975     Pop(object);
1976     Check(equal, AbortReason::kOperandIsNotAFunction);
1977   }
1978 }
1979 
1980 
AssertBoundFunction(Register object)1981 void MacroAssembler::AssertBoundFunction(Register object) {
1982   if (emit_debug_code()) {
1983     testb(object, Immediate(kSmiTagMask));
1984     Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
1985     Push(object);
1986     CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
1987     Pop(object);
1988     Check(equal, AbortReason::kOperandIsNotABoundFunction);
1989   }
1990 }
1991 
AssertGeneratorObject(Register object)1992 void MacroAssembler::AssertGeneratorObject(Register object) {
1993   if (!emit_debug_code()) return;
1994   testb(object, Immediate(kSmiTagMask));
1995   Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
1996 
1997   // Load map
1998   Register map = object;
1999   Push(object);
2000   movp(map, FieldOperand(object, HeapObject::kMapOffset));
2001 
2002   Label do_check;
2003   // Check if JSGeneratorObject
2004   CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
2005   j(equal, &do_check);
2006 
2007   // Check if JSAsyncGeneratorObject
2008   CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
2009 
2010   bind(&do_check);
2011   // Restore generator object to register and perform assertion
2012   Pop(object);
2013   Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
2014 }
2015 
AssertUndefinedOrAllocationSite(Register object)2016 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
2017   if (emit_debug_code()) {
2018     Label done_checking;
2019     AssertNotSmi(object);
2020     Cmp(object, isolate()->factory()->undefined_value());
2021     j(equal, &done_checking);
2022     Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
2023     Assert(equal, AbortReason::kExpectedUndefinedOrCell);
2024     bind(&done_checking);
2025   }
2026 }
2027 
LoadWeakValue(Register in_out,Label * target_if_cleared)2028 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
2029   cmpp(in_out, Immediate(kClearedWeakHeapObject));
2030   j(equal, target_if_cleared);
2031 
2032   andp(in_out, Immediate(~kWeakHeapObjectMask));
2033 }
2034 
IncrementCounter(StatsCounter * counter,int value)2035 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2036   DCHECK_GT(value, 0);
2037   if (FLAG_native_code_counters && counter->Enabled()) {
2038     Operand counter_operand =
2039         ExternalOperand(ExternalReference::Create(counter));
2040     if (value == 1) {
2041       incl(counter_operand);
2042     } else {
2043       addl(counter_operand, Immediate(value));
2044     }
2045   }
2046 }
2047 
2048 
DecrementCounter(StatsCounter * counter,int value)2049 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2050   DCHECK_GT(value, 0);
2051   if (FLAG_native_code_counters && counter->Enabled()) {
2052     Operand counter_operand =
2053         ExternalOperand(ExternalReference::Create(counter));
2054     if (value == 1) {
2055       decl(counter_operand);
2056     } else {
2057       subl(counter_operand, Immediate(value));
2058     }
2059   }
2060 }
2061 
MaybeDropFrames()2062 void MacroAssembler::MaybeDropFrames() {
2063   // Check whether we need to drop frames to restart a function on the stack.
2064   ExternalReference restart_fp =
2065       ExternalReference::debug_restart_fp_address(isolate());
2066   Load(rbx, restart_fp);
2067   testp(rbx, rbx);
2068 
2069   Label dont_drop;
2070   j(zero, &dont_drop, Label::kNear);
2071   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
2072 
2073   bind(&dont_drop);
2074 }
2075 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)2076 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2077                                         Register caller_args_count_reg,
2078                                         Register scratch0, Register scratch1) {
2079 #if DEBUG
2080   if (callee_args_count.is_reg()) {
2081     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2082                        scratch1));
2083   } else {
2084     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2085   }
2086 #endif
2087 
2088   // Calculate the destination address where we will put the return address
2089   // after we drop current frame.
2090   Register new_sp_reg = scratch0;
2091   if (callee_args_count.is_reg()) {
2092     subp(caller_args_count_reg, callee_args_count.reg());
2093     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
2094                              StandardFrameConstants::kCallerPCOffset));
2095   } else {
2096     leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
2097                              StandardFrameConstants::kCallerPCOffset -
2098                                  callee_args_count.immediate() * kPointerSize));
2099   }
2100 
2101   if (FLAG_debug_code) {
2102     cmpp(rsp, new_sp_reg);
2103     Check(below, AbortReason::kStackAccessBelowStackPointer);
2104   }
2105 
2106   // Copy return address from caller's frame to current frame's return address
2107   // to avoid its trashing and let the following loop copy it to the right
2108   // place.
2109   Register tmp_reg = scratch1;
2110   movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
2111   movp(Operand(rsp, 0), tmp_reg);
2112 
2113   // Restore caller's frame pointer now as it could be overwritten by
2114   // the copying loop.
2115   movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2116 
2117   // +2 here is to copy both receiver and return address.
2118   Register count_reg = caller_args_count_reg;
2119   if (callee_args_count.is_reg()) {
2120     leap(count_reg, Operand(callee_args_count.reg(), 2));
2121   } else {
2122     movp(count_reg, Immediate(callee_args_count.immediate() + 2));
2123     // TODO(ishell): Unroll copying loop for small immediate values.
2124   }
2125 
2126   // Now copy callee arguments to the caller frame going backwards to avoid
2127   // callee arguments corruption (source and destination areas could overlap).
2128   Label loop, entry;
2129   jmp(&entry, Label::kNear);
2130   bind(&loop);
2131   decp(count_reg);
2132   movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
2133   movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
2134   bind(&entry);
2135   cmpp(count_reg, Immediate(0));
2136   j(not_equal, &loop, Label::kNear);
2137 
2138   // Leave current frame.
2139   movp(rsp, new_sp_reg);
2140 }
2141 
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag)2142 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2143                                     const ParameterCount& actual,
2144                                     InvokeFlag flag) {
2145   movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2146   movzxwq(rbx,
2147           FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
2148 
2149   ParameterCount expected(rbx);
2150   InvokeFunction(function, new_target, expected, actual, flag);
2151 }
2152 
InvokeFunction(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)2153 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2154                                     const ParameterCount& expected,
2155                                     const ParameterCount& actual,
2156                                     InvokeFlag flag) {
2157   DCHECK(function == rdi);
2158   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
2159   InvokeFunctionCode(rdi, new_target, expected, actual, flag);
2160 }
2161 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)2162 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2163                                         const ParameterCount& expected,
2164                                         const ParameterCount& actual,
2165                                         InvokeFlag flag) {
2166   // You can't call a function without a valid frame.
2167   DCHECK(flag == JUMP_FUNCTION || has_frame());
2168   DCHECK(function == rdi);
2169   DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
2170 
2171   // On function call, call into the debugger if necessary.
2172   CheckDebugHook(function, new_target, expected, actual);
2173 
2174   // Clear the new.target register if not given.
2175   if (!new_target.is_valid()) {
2176     LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
2177   }
2178 
2179   Label done;
2180   bool definitely_mismatches = false;
2181   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
2182                  Label::kNear);
2183   if (!definitely_mismatches) {
2184     // We call indirectly through the code field in the function to
2185     // allow recompilation to take effect without changing any of the
2186     // call sites.
2187     static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
2188     movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
2189     addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2190     if (flag == CALL_FUNCTION) {
2191       call(rcx);
2192     } else {
2193       DCHECK(flag == JUMP_FUNCTION);
2194       jmp(rcx);
2195     }
2196     bind(&done);
2197   }
2198 }
2199 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,Label::Distance near_jump)2200 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2201                                     const ParameterCount& actual, Label* done,
2202                                     bool* definitely_mismatches,
2203                                     InvokeFlag flag,
2204                                     Label::Distance near_jump) {
2205   bool definitely_matches = false;
2206   *definitely_mismatches = false;
2207   Label invoke;
2208   if (expected.is_immediate()) {
2209     DCHECK(actual.is_immediate());
2210     Set(rax, actual.immediate());
2211     if (expected.immediate() == actual.immediate()) {
2212       definitely_matches = true;
2213     } else {
2214       if (expected.immediate() ==
2215               SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2216         // Don't worry about adapting arguments for built-ins that
2217         // don't want that done. Skip adaption code by making it look
2218         // like we have a match between expected and actual number of
2219         // arguments.
2220         definitely_matches = true;
2221       } else {
2222         *definitely_mismatches = true;
2223         Set(rbx, expected.immediate());
2224       }
2225     }
2226   } else {
2227     if (actual.is_immediate()) {
2228       // Expected is in register, actual is immediate. This is the
2229       // case when we invoke function values without going through the
2230       // IC mechanism.
2231       Set(rax, actual.immediate());
2232       cmpp(expected.reg(), Immediate(actual.immediate()));
2233       j(equal, &invoke, Label::kNear);
2234       DCHECK(expected.reg() == rbx);
2235     } else if (expected.reg() != actual.reg()) {
2236       // Both expected and actual are in (different) registers. This
2237       // is the case when we invoke functions using call and apply.
2238       cmpp(expected.reg(), actual.reg());
2239       j(equal, &invoke, Label::kNear);
2240       DCHECK(actual.reg() == rax);
2241       DCHECK(expected.reg() == rbx);
2242     } else {
2243       definitely_matches = true;
2244       Move(rax, actual.reg());
2245     }
2246   }
2247 
2248   if (!definitely_matches) {
2249     Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
2250     if (flag == CALL_FUNCTION) {
2251       Call(adaptor, RelocInfo::CODE_TARGET);
2252       if (!*definitely_mismatches) {
2253         jmp(done, near_jump);
2254       }
2255     } else {
2256       Jump(adaptor, RelocInfo::CODE_TARGET);
2257     }
2258     bind(&invoke);
2259   }
2260 }
2261 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)2262 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
2263                                     const ParameterCount& expected,
2264                                     const ParameterCount& actual) {
2265   Label skip_hook;
2266   ExternalReference debug_hook_active =
2267       ExternalReference::debug_hook_on_function_call_address(isolate());
2268   Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
2269   cmpb(debug_hook_active_operand, Immediate(0));
2270   j(equal, &skip_hook);
2271 
2272   {
2273     FrameScope frame(this,
2274                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2275     if (expected.is_reg()) {
2276       SmiTag(expected.reg(), expected.reg());
2277       Push(expected.reg());
2278     }
2279     if (actual.is_reg()) {
2280       SmiTag(actual.reg(), actual.reg());
2281       Push(actual.reg());
2282       SmiUntag(actual.reg(), actual.reg());
2283     }
2284     if (new_target.is_valid()) {
2285       Push(new_target);
2286     }
2287     Push(fun);
2288     Push(fun);
2289     Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
2290     CallRuntime(Runtime::kDebugOnFunctionCall);
2291     Pop(fun);
2292     if (new_target.is_valid()) {
2293       Pop(new_target);
2294     }
2295     if (actual.is_reg()) {
2296       Pop(actual.reg());
2297       SmiUntag(actual.reg(), actual.reg());
2298     }
2299     if (expected.is_reg()) {
2300       Pop(expected.reg());
2301       SmiUntag(expected.reg(), expected.reg());
2302     }
2303   }
2304   bind(&skip_hook);
2305 }
2306 
StubPrologue(StackFrame::Type type)2307 void TurboAssembler::StubPrologue(StackFrame::Type type) {
2308   pushq(rbp);  // Caller's frame pointer.
2309   movp(rbp, rsp);
2310   Push(Immediate(StackFrame::TypeToMarker(type)));
2311 }
2312 
Prologue()2313 void TurboAssembler::Prologue() {
2314   pushq(rbp);  // Caller's frame pointer.
2315   movp(rbp, rsp);
2316   Push(rsi);  // Callee's context.
2317   Push(rdi);  // Callee's JS function.
2318 }
2319 
EnterFrame(StackFrame::Type type)2320 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2321   pushq(rbp);
2322   movp(rbp, rsp);
2323   Push(Immediate(StackFrame::TypeToMarker(type)));
2324 }
2325 
LeaveFrame(StackFrame::Type type)2326 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2327   if (emit_debug_code()) {
2328     cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
2329          Immediate(StackFrame::TypeToMarker(type)));
2330     Check(equal, AbortReason::kStackFrameTypesMustMatch);
2331   }
2332   movp(rsp, rbp);
2333   popq(rbp);
2334 }
2335 
EnterBuiltinFrame(Register context,Register target,Register argc)2336 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
2337                                        Register argc) {
2338   Push(rbp);
2339   Move(rbp, rsp);
2340   Push(context);
2341   Push(target);
2342   Push(argc);
2343 }
2344 
LeaveBuiltinFrame(Register context,Register target,Register argc)2345 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
2346                                        Register argc) {
2347   Pop(argc);
2348   Pop(target);
2349   Pop(context);
2350   leave();
2351 }
2352 
EnterExitFramePrologue(bool save_rax,StackFrame::Type frame_type)2353 void MacroAssembler::EnterExitFramePrologue(bool save_rax,
2354                                             StackFrame::Type frame_type) {
2355   DCHECK(frame_type == StackFrame::EXIT ||
2356          frame_type == StackFrame::BUILTIN_EXIT);
2357 
2358   // Set up the frame structure on the stack.
2359   // All constants are relative to the frame pointer of the exit frame.
2360   DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
2361             ExitFrameConstants::kCallerSPDisplacement);
2362   DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
2363   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
2364   pushq(rbp);
2365   movp(rbp, rsp);
2366 
2367   // Reserve room for entry stack pointer and push the code object.
2368   Push(Immediate(StackFrame::TypeToMarker(frame_type)));
2369   DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
2370   Push(Immediate(0));  // Saved entry sp, patched before call.
2371   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2372   Push(kScratchRegister);  // Accessed from ExitFrame::code_slot.
2373 
2374   // Save the frame pointer and the context in top.
2375   if (save_rax) {
2376     movp(r14, rax);  // Backup rax in callee-save register.
2377   }
2378 
2379   Store(
2380       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
2381       rbp);
2382   Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
2383         rsi);
2384   Store(
2385       ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
2386       rbx);
2387 }
2388 
2389 
EnterExitFrameEpilogue(int arg_stack_space,bool save_doubles)2390 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
2391                                             bool save_doubles) {
2392 #ifdef _WIN64
2393   const int kShadowSpace = 4;
2394   arg_stack_space += kShadowSpace;
2395 #endif
2396   // Optionally save all XMM registers.
2397   if (save_doubles) {
2398     int space = XMMRegister::kNumRegisters * kDoubleSize +
2399                 arg_stack_space * kRegisterSize;
2400     subp(rsp, Immediate(space));
2401     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2402     const RegisterConfiguration* config = RegisterConfiguration::Default();
2403     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2404       DoubleRegister reg =
2405           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2406       Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2407     }
2408   } else if (arg_stack_space > 0) {
2409     subp(rsp, Immediate(arg_stack_space * kRegisterSize));
2410   }
2411 
2412   // Get the required frame alignment for the OS.
2413   const int kFrameAlignment = base::OS::ActivationFrameAlignment();
2414   if (kFrameAlignment > 0) {
2415     DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
2416     DCHECK(is_int8(kFrameAlignment));
2417     andp(rsp, Immediate(-kFrameAlignment));
2418   }
2419 
2420   // Patch the saved entry sp.
2421   movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2422 }
2423 
EnterExitFrame(int arg_stack_space,bool save_doubles,StackFrame::Type frame_type)2424 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
2425                                     StackFrame::Type frame_type) {
2426   EnterExitFramePrologue(true, frame_type);
2427 
2428   // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
2429   // so it must be retained across the C-call.
2430   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2431   leap(r15, Operand(rbp, r14, times_pointer_size, offset));
2432 
2433   EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2434 }
2435 
2436 
EnterApiExitFrame(int arg_stack_space)2437 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2438   EnterExitFramePrologue(false, StackFrame::EXIT);
2439   EnterExitFrameEpilogue(arg_stack_space, false);
2440 }
2441 
2442 
LeaveExitFrame(bool save_doubles,bool pop_arguments)2443 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
2444   // Registers:
2445   // r15 : argv
2446   if (save_doubles) {
2447     int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2448     const RegisterConfiguration* config = RegisterConfiguration::Default();
2449     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2450       DoubleRegister reg =
2451           DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2452       Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2453     }
2454   }
2455 
2456   if (pop_arguments) {
2457     // Get the return address from the stack and restore the frame pointer.
2458     movp(rcx, Operand(rbp, kFPOnStackSize));
2459     movp(rbp, Operand(rbp, 0 * kPointerSize));
2460 
2461     // Drop everything up to and including the arguments and the receiver
2462     // from the caller stack.
2463     leap(rsp, Operand(r15, 1 * kPointerSize));
2464 
2465     PushReturnAddressFrom(rcx);
2466   } else {
2467     // Otherwise just leave the exit frame.
2468     leave();
2469   }
2470 
2471   LeaveExitFrameEpilogue();
2472 }
2473 
LeaveApiExitFrame()2474 void MacroAssembler::LeaveApiExitFrame() {
2475   movp(rsp, rbp);
2476   popq(rbp);
2477 
2478   LeaveExitFrameEpilogue();
2479 }
2480 
LeaveExitFrameEpilogue()2481 void MacroAssembler::LeaveExitFrameEpilogue() {
2482   // Restore current context from top and clear it in debug mode.
2483   ExternalReference context_address =
2484       ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
2485   Operand context_operand = ExternalOperand(context_address);
2486   movp(rsi, context_operand);
2487 #ifdef DEBUG
2488   movp(context_operand, Immediate(Context::kInvalidContext));
2489 #endif
2490 
2491   // Clear the top frame.
2492   ExternalReference c_entry_fp_address =
2493       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
2494   Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
2495   movp(c_entry_fp_operand, Immediate(0));
2496 }
2497 
2498 
2499 #ifdef _WIN64
2500 static const int kRegisterPassedArguments = 4;
2501 #else
2502 static const int kRegisterPassedArguments = 6;
2503 #endif
2504 
2505 
LoadNativeContextSlot(int index,Register dst)2506 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2507   movp(dst, NativeContextOperand());
2508   movp(dst, ContextOperand(dst, index));
2509 }
2510 
2511 
ArgumentStackSlotsForCFunctionCall(int num_arguments)2512 int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2513   // On Windows 64 stack slots are reserved by the caller for all arguments
2514   // including the ones passed in registers, and space is always allocated for
2515   // the four register arguments even if the function takes fewer than four
2516   // arguments.
2517   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2518   // and the caller does not reserve stack slots for them.
2519   DCHECK_GE(num_arguments, 0);
2520 #ifdef _WIN64
2521   const int kMinimumStackSlots = kRegisterPassedArguments;
2522   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2523   return num_arguments;
2524 #else
2525   if (num_arguments < kRegisterPassedArguments) return 0;
2526   return num_arguments - kRegisterPassedArguments;
2527 #endif
2528 }
2529 
PrepareCallCFunction(int num_arguments)2530 void TurboAssembler::PrepareCallCFunction(int num_arguments) {
2531   int frame_alignment = base::OS::ActivationFrameAlignment();
2532   DCHECK_NE(frame_alignment, 0);
2533   DCHECK_GE(num_arguments, 0);
2534 
2535   // Make stack end at alignment and allocate space for arguments and old rsp.
2536   movp(kScratchRegister, rsp);
2537   DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2538   int argument_slots_on_stack =
2539       ArgumentStackSlotsForCFunctionCall(num_arguments);
2540   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
2541   andp(rsp, Immediate(-frame_alignment));
2542   movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
2543 }
2544 
CallCFunction(ExternalReference function,int num_arguments)2545 void TurboAssembler::CallCFunction(ExternalReference function,
2546                                    int num_arguments) {
2547   LoadAddress(rax, function);
2548   CallCFunction(rax, num_arguments);
2549 }
2550 
CallCFunction(Register function,int num_arguments)2551 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2552   DCHECK_LE(num_arguments, kMaxCParameters);
2553   DCHECK(has_frame());
2554   // Check stack alignment.
2555   if (emit_debug_code()) {
2556     CheckStackAlignment();
2557   }
2558 
2559   call(function);
2560   DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
2561   DCHECK_GE(num_arguments, 0);
2562   int argument_slots_on_stack =
2563       ArgumentStackSlotsForCFunctionCall(num_arguments);
2564   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
2565 }
2566 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)2567 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
2568                                    Condition cc, Label* condition_met,
2569                                    Label::Distance condition_met_distance) {
2570   DCHECK(cc == zero || cc == not_zero);
2571   if (scratch == object) {
2572     andp(scratch, Immediate(~kPageAlignmentMask));
2573   } else {
2574     movp(scratch, Immediate(~kPageAlignmentMask));
2575     andp(scratch, object);
2576   }
2577   if (mask < (1 << kBitsPerByte)) {
2578     testb(Operand(scratch, MemoryChunk::kFlagsOffset),
2579           Immediate(static_cast<uint8_t>(mask)));
2580   } else {
2581     testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2582   }
2583   j(cc, condition_met, condition_met_distance);
2584 }
2585 
ComputeCodeStartAddress(Register dst)2586 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2587   Label current;
2588   bind(&current);
2589   int pc = pc_offset();
2590   // Load effective address to get the address of the current instruction.
2591   leaq(dst, Operand(&current, -pc));
2592 }
2593 
ResetSpeculationPoisonRegister()2594 void TurboAssembler::ResetSpeculationPoisonRegister() {
2595   // TODO(tebbi): Perhaps, we want to put an lfence here.
2596   Set(kSpeculationPoisonRegister, -1);
2597 }
2598 
2599 }  // namespace internal
2600 }  // namespace v8
2601 
2602 #endif  // V8_TARGET_ARCH_X64
2603