// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #if V8_TARGET_ARCH_X64 #include "src/base/bits.h" #include "src/base/division-by-constant.h" #include "src/base/utils/random-number-generator.h" #include "src/bootstrapper.h" #include "src/callable.h" #include "src/code-factory.h" #include "src/code-stubs.h" #include "src/counters.h" #include "src/debug/debug.h" #include "src/external-reference-table.h" #include "src/frames-inl.h" #include "src/globals.h" #include "src/heap/heap-inl.h" #include "src/instruction-stream.h" #include "src/objects-inl.h" #include "src/register-configuration.h" #include "src/snapshot/snapshot.h" #include "src/x64/assembler-x64.h" #include "src/x64/macro-assembler-x64.h" // Cannot be the first include. namespace v8 { namespace internal { Operand StackArgumentsAccessor::GetArgumentOperand(int index) { DCHECK_GE(index, 0); int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; int displacement_to_last_argument = base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; displacement_to_last_argument += extra_displacement_to_last_argument_; if (argument_count_reg_ == no_reg) { // argument[0] is at base_reg_ + displacement_to_last_argument + // (argument_count_immediate_ + receiver - 1) * kPointerSize. DCHECK_GT(argument_count_immediate_ + receiver, 0); return Operand( base_reg_, displacement_to_last_argument + (argument_count_immediate_ + receiver - 1 - index) * kPointerSize); } else { // argument[0] is at base_reg_ + displacement_to_last_argument + // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize. return Operand( base_reg_, argument_count_reg_, times_pointer_size, displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); } } StackArgumentsAccessor::StackArgumentsAccessor( Register base_reg, const ParameterCount& parameter_count, StackArgumentsAccessorReceiverMode receiver_mode, int extra_displacement_to_last_argument) : base_reg_(base_reg), argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg() : no_reg), argument_count_immediate_( parameter_count.is_immediate() ? parameter_count.immediate() : 0), receiver_mode_(receiver_mode), extra_displacement_to_last_argument_( extra_displacement_to_last_argument) {} MacroAssembler::MacroAssembler(Isolate* isolate, const AssemblerOptions& options, void* buffer, int size, CodeObjectRequired create_code_object) : TurboAssembler(isolate, options, buffer, size, create_code_object) { if (create_code_object == CodeObjectRequired::kYes) { // Unlike TurboAssembler, which can be used off the main thread and may not // allocate, macro assembler creates its own copy of the self-reference // marker in order to disambiguate between self-references during nested // code generation (e.g.: codegen of the current object triggers stub // compilation through CodeStub::GetCode()). code_object_ = Handle::New( *isolate->factory()->NewSelfReferenceMarker(), isolate); } } static const int64_t kInvalidRootRegisterDelta = -1; int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) { if (predictable_code_size() && (other.address() < reinterpret_cast
(isolate()) || other.address() >= reinterpret_cast
(isolate() + 1))) { return kInvalidRootRegisterDelta; } return RootRegisterOffsetForExternalReference(isolate(), other); } void MacroAssembler::Load(Register destination, ExternalReference source) { if (root_array_available_ && options().enable_root_array_delta_access) { int64_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { movp(destination, Operand(kRootRegister, static_cast(delta))); return; } } // Safe code. if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { IndirectLoadExternalReference(kScratchRegister, source); movp(destination, Operand(kScratchRegister, 0)); return; } } if (destination == rax) { load_rax(source); } else { Move(kScratchRegister, source); movp(destination, Operand(kScratchRegister, 0)); } } void MacroAssembler::Store(ExternalReference destination, Register source) { if (root_array_available_ && options().enable_root_array_delta_access) { int64_t delta = RootRegisterDelta(destination); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { movp(Operand(kRootRegister, static_cast(delta)), source); return; } } // Safe code. if (source == rax) { store_rax(destination); } else { Move(kScratchRegister, destination); movp(Operand(kScratchRegister, 0), source); } } void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( Heap::kBuiltinsConstantsTableRootIndex)); LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex); movp(destination, FieldOperand(destination, FixedArray::kHeaderSize + constant_index * kPointerSize)); } void TurboAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { DCHECK(is_int32(offset)); if (offset == 0) { Move(destination, kRootRegister); } else { leap(destination, Operand(kRootRegister, static_cast(offset))); } } void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { movp(destination, Operand(kRootRegister, offset)); } void TurboAssembler::LoadAddress(Register destination, ExternalReference source) { if (root_array_available_ && options().enable_root_array_delta_access) { int64_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { leap(destination, Operand(kRootRegister, static_cast(delta))); return; } } // Safe code. if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { IndirectLoadExternalReference(destination, source); return; } } Move(destination, source); } Operand TurboAssembler::ExternalOperand(ExternalReference target, Register scratch) { if (root_array_available_ && options().enable_root_array_delta_access) { int64_t delta = RootRegisterDelta(target); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { return Operand(kRootRegister, static_cast(delta)); } } Move(scratch, target); return Operand(scratch, 0); } void MacroAssembler::PushAddress(ExternalReference source) { LoadAddress(kScratchRegister, source); Push(kScratchRegister); } void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { DCHECK(root_array_available_); movp(destination, Operand(kRootRegister, RootRegisterOffset(index))); } void MacroAssembler::PushRoot(Heap::RootListIndex index) { DCHECK(root_array_available_); Push(Operand(kRootRegister, RootRegisterOffset(index))); } void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) { DCHECK(root_array_available_); cmpp(with, Operand(kRootRegister, RootRegisterOffset(index))); } void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) { DCHECK(root_array_available_); DCHECK(!with.AddressUsesRegister(kScratchRegister)); LoadRoot(kScratchRegister, index); cmpp(with, kScratchRegister); } void MacroAssembler::RecordWriteField(Register object, int offset, Register value, Register dst, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, SmiCheck smi_check) { // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; // Skip barrier if writing a smi. if (smi_check == INLINE_SMI_CHECK) { JumpIfSmi(value, &done); } // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. DCHECK(IsAligned(offset, kPointerSize)); leap(dst, FieldOperand(object, offset)); if (emit_debug_code()) { Label ok; testb(dst, Immediate(kPointerSize - 1)); j(zero, &ok, Label::kNear); int3(); bind(&ok); } RecordWrite(object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); bind(&done); // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { Move(value, kZapValue, RelocInfo::NONE); Move(dst, kZapValue, RelocInfo::NONE); } } void TurboAssembler::SaveRegisters(RegList registers) { DCHECK_GT(NumRegs(registers), 0); for (int i = 0; i < Register::kNumRegisters; ++i) { if ((registers >> i) & 1u) { pushq(Register::from_code(i)); } } } void TurboAssembler::RestoreRegisters(RegList registers) { DCHECK_GT(NumRegs(registers), 0); for (int i = Register::kNumRegisters - 1; i >= 0; --i) { if ((registers >> i) & 1u) { popq(Register::from_code(i)); } } } void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { Callable const callable = Builtins::CallableFor(isolate(), Builtins::kRecordWrite); RegList registers = callable.descriptor().allocatable_registers(); SaveRegisters(registers); Register object_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kObject)); Register slot_parameter( callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot)); Register isolate_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kIsolate)); Register remembered_set_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kRememberedSet)); Register fp_mode_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kFPMode)); // Prepare argument registers for calling RecordWrite // slot_parameter <= address // object_parameter <= object if (slot_parameter != object) { // Normal case Move(slot_parameter, address); Move(object_parameter, object); } else if (object_parameter != address) { // Only slot_parameter and object are the same register // object_parameter <= object // slot_parameter <= address Move(object_parameter, object); Move(slot_parameter, address); } else { // slot_parameter \/ address // object_parameter /\ object xchgq(slot_parameter, object_parameter); } LoadAddress(isolate_parameter, ExternalReference::isolate_address(isolate())); Smi* smi_rsa = Smi::FromEnum(remembered_set_action); Smi* smi_fm = Smi::FromEnum(fp_mode); Move(remembered_set_parameter, smi_rsa); if (smi_rsa != smi_fm) { Move(fp_mode_parameter, smi_fm); } else { movq(fp_mode_parameter, remembered_set_parameter); } Call(callable.code(), RelocInfo::CODE_TARGET); RestoreRegisters(registers); } void MacroAssembler::RecordWrite(Register object, Register address, Register value, SaveFPRegsMode fp_mode, RememberedSetAction remembered_set_action, SmiCheck smi_check) { DCHECK(object != value); DCHECK(object != address); DCHECK(value != address); AssertNotSmi(object); if (remembered_set_action == OMIT_REMEMBERED_SET && !FLAG_incremental_marking) { return; } if (emit_debug_code()) { Label ok; cmpp(value, Operand(address, 0)); j(equal, &ok, Label::kNear); int3(); bind(&ok); } // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. Label done; if (smi_check == INLINE_SMI_CHECK) { // Skip barrier if writing a smi. JumpIfSmi(value, &done); } CheckPageFlag(value, value, // Used as scratch. MemoryChunk::kPointersToHereAreInterestingMask, zero, &done, Label::kNear); CheckPageFlag(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done, Label::kNear); CallRecordWriteStub(object, address, remembered_set_action, fp_mode); bind(&done); // Count number of write barriers in generated code. isolate()->counters()->write_barriers_static()->Increment(); IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { Move(address, kZapValue, RelocInfo::NONE); Move(value, kZapValue, RelocInfo::NONE); } } void TurboAssembler::Assert(Condition cc, AbortReason reason) { if (emit_debug_code()) Check(cc, reason); } void TurboAssembler::AssertUnreachable(AbortReason reason) { if (emit_debug_code()) Abort(reason); } void TurboAssembler::Check(Condition cc, AbortReason reason) { Label L; j(cc, &L, Label::kNear); Abort(reason); // Control will not return here. bind(&L); } void TurboAssembler::CheckStackAlignment() { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); Label alignment_as_expected; testp(rsp, Immediate(frame_alignment_mask)); j(zero, &alignment_as_expected, Label::kNear); // Abort if stack is not aligned. int3(); bind(&alignment_as_expected); } } void TurboAssembler::Abort(AbortReason reason) { #ifdef DEBUG const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { int3(); return; } if (should_abort_hard()) { // We don't care if we constructed a frame. Just pretend we did. FrameScope assume_frame(this, StackFrame::NONE); movl(arg_reg_1, Immediate(static_cast(reason))); PrepareCallCFunction(1); LoadAddress(rax, ExternalReference::abort_with_reason()); call(rax); return; } Move(rdx, Smi::FromInt(static_cast(reason))); if (!has_frame()) { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(this, StackFrame::NONE); Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); } else { Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); } // Control will not return here. int3(); } void TurboAssembler::CallStubDelayed(CodeStub* stub) { DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs call(stub); } void MacroAssembler::CallStub(CodeStub* stub) { DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs Call(stub->GetCode(), RelocInfo::CODE_TARGET); } void MacroAssembler::TailCallStub(CodeStub* stub) { Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } bool TurboAssembler::AllowThisStubCall(CodeStub* stub) { return has_frame() || !stub->SometimesSetsUpAFrame(); } void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry) { const Runtime::Function* f = Runtime::FunctionForId(fid); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code // smarter. Set(rax, f->nargs); LoadAddress(rbx, ExternalReference::Create(f)); DCHECK(!AreAliased(centry, rax, rbx)); addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag)); Call(centry); } void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, SaveFPRegsMode save_doubles) { // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code // smarter. Set(rax, num_arguments); LoadAddress(rbx, ExternalReference::Create(f)); Handle code = CodeFactory::CEntry(isolate(), f->result_size, save_doubles); Call(code, RelocInfo::CODE_TARGET); } void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { // ----------- S t a t e ------------- // -- rsp[0] : return address // -- rsp[8] : argument num_arguments - 1 // ... // -- rsp[8 * num_arguments] : argument 0 (receiver) // // For runtime functions with variable arguments: // -- rax : number of arguments // ----------------------------------- const Runtime::Function* function = Runtime::FunctionForId(fid); DCHECK_EQ(1, function->result_size); if (function->nargs >= 0) { Set(rax, function->nargs); } JumpToExternalReference(ExternalReference::Create(fid)); } void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, bool builtin_exit_frame) { // Set the entry point and jump to the C entry runtime stub. LoadAddress(rbx, ext); Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11}; static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register); int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { int bytes = 0; for (int i = 0; i < kNumberOfSavedRegs; i++) { Register reg = saved_regs[i]; if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { bytes += kPointerSize; } } // R12 to r15 are callee save on all platforms. if (fp_mode == kSaveFPRegs) { bytes += kDoubleSize * XMMRegister::kNumRegisters; } return bytes; } int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { // We don't allow a GC during a store buffer overflow so there is no need to // store the registers in any particular way, but we do have to store and // restore them. int bytes = 0; for (int i = 0; i < kNumberOfSavedRegs; i++) { Register reg = saved_regs[i]; if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { pushq(reg); bytes += kPointerSize; } } // R12 to r15 are callee save on all platforms. if (fp_mode == kSaveFPRegs) { int delta = kDoubleSize * XMMRegister::kNumRegisters; subp(rsp, Immediate(delta)); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); Movsd(Operand(rsp, i * kDoubleSize), reg); } bytes += delta; } return bytes; } int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; if (fp_mode == kSaveFPRegs) { for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); Movsd(reg, Operand(rsp, i * kDoubleSize)); } int delta = kDoubleSize * XMMRegister::kNumRegisters; addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); bytes += delta; } for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { Register reg = saved_regs[i]; if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) { popq(reg); bytes += kPointerSize; } } return bytes; } void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, src, src); } else { cvtss2sd(dst, src); } } void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, dst, src); } else { cvtss2sd(dst, src); } } void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, src, src); } else { cvtsd2ss(dst, src); } } void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, dst, src); } else { cvtsd2ss(dst, src); } } void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorpd(dst, dst, dst); vcvtlsi2sd(dst, dst, src); } else { xorpd(dst, dst); cvtlsi2sd(dst, src); } } void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorpd(dst, dst, dst); vcvtlsi2sd(dst, dst, src); } else { xorpd(dst, dst); cvtlsi2sd(dst, src); } } void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorps(dst, dst, dst); vcvtlsi2ss(dst, dst, src); } else { xorps(dst, dst); cvtlsi2ss(dst, src); } } void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorps(dst, dst, dst); vcvtlsi2ss(dst, dst, src); } else { xorps(dst, dst); cvtlsi2ss(dst, src); } } void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorps(dst, dst, dst); vcvtqsi2ss(dst, dst, src); } else { xorps(dst, dst); cvtqsi2ss(dst, src); } } void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorps(dst, dst, dst); vcvtqsi2ss(dst, dst, src); } else { xorps(dst, dst); cvtqsi2ss(dst, src); } } void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorpd(dst, dst, dst); vcvtqsi2sd(dst, dst, src); } else { xorpd(dst, dst); cvtqsi2sd(dst, src); } } void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vxorpd(dst, dst, dst); vcvtqsi2sd(dst, dst, src); } else { xorpd(dst, dst); cvtqsi2sd(dst, src); } } void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { Label done; Cvtqsi2ss(dst, src); testq(src, src); j(positive, &done, Label::kNear); // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors). if (src != kScratchRegister) movq(kScratchRegister, src); shrq(kScratchRegister, Immediate(1)); // The LSB is shifted into CF. If it is set, set the LSB in {tmp}. Label msb_not_set; j(not_carry, &msb_not_set, Label::kNear); orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2ss(dst, kScratchRegister); addss(dst, dst); bind(&done); } void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2ss(dst, kScratchRegister); } void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { Label done; Cvtqsi2sd(dst, src); testq(src, src); j(positive, &done, Label::kNear); // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors). if (src != kScratchRegister) movq(kScratchRegister, src); shrq(kScratchRegister, Immediate(1)); // The LSB is shifted into CF. If it is set, set the LSB in {tmp}. Label msb_not_set; j(not_carry, &msb_not_set, Label::kNear); orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2sd(dst, kScratchRegister); addsd(dst, dst); bind(&done); } void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2sd(dst, kScratchRegister); } void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); } else { cvttss2si(dst, src); } } void TurboAssembler::Cvttss2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); } else { cvttss2si(dst, src); } } void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); } else { cvttsd2si(dst, src); } } void TurboAssembler::Cvttsd2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); } else { cvttsd2si(dst, src); } } void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); } else { cvttss2siq(dst, src); } } void TurboAssembler::Cvttss2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); } else { cvttss2siq(dst, src); } } void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); } else { cvttsd2siq(dst, src); } } void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); } else { cvttsd2siq(dst, src); } } namespace { template void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, OperandOrXMMRegister src, Label* fail) { Label success; // There does not exist a native float-to-uint instruction, so we have to use // a float-to-int, and postprocess the result. if (is_double) { tasm->Cvttsd2siq(dst, src); } else { tasm->Cvttss2siq(dst, src); } // If the result of the conversion is positive, we are already done. tasm->testq(dst, dst); tasm->j(positive, &success); // The result of the first conversion was negative, which means that the // input value was not within the positive int64 range. We subtract 2^63 // and convert it again to see if it is within the uint64 range. if (is_double) { tasm->Move(kScratchDoubleReg, -9223372036854775808.0); tasm->addsd(kScratchDoubleReg, src); tasm->Cvttsd2siq(dst, kScratchDoubleReg); } else { tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); tasm->addss(kScratchDoubleReg, src); tasm->Cvttss2siq(dst, kScratchDoubleReg); } tasm->testq(dst, dst); // The only possible negative value here is 0x80000000000000000, which is // used on x64 to indicate an integer overflow. tasm->j(negative, fail ? fail : &success); // The input value is within uint64 range and the second conversion worked // successfully, but we still have to undo the subtraction we did // earlier. tasm->Set(kScratchRegister, 0x8000000000000000); tasm->orq(dst, kScratchRegister); tasm->bind(&success); } } // namespace void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* success) { ConvertFloatToUint64(this, dst, src, success); } void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* success) { ConvertFloatToUint64(this, dst, src, success); } void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* success) { ConvertFloatToUint64(this, dst, src, success); } void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) { ConvertFloatToUint64(this, dst, src, success); } void MacroAssembler::Load(Register dst, Operand src, Representation r) { DCHECK(!r.IsDouble()); if (r.IsInteger8()) { movsxbq(dst, src); } else if (r.IsUInteger8()) { movzxbl(dst, src); } else if (r.IsInteger16()) { movsxwq(dst, src); } else if (r.IsUInteger16()) { movzxwl(dst, src); } else if (r.IsInteger32()) { movl(dst, src); } else { movp(dst, src); } } void MacroAssembler::Store(Operand dst, Register src, Representation r) { DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { movb(dst, src); } else if (r.IsInteger16() || r.IsUInteger16()) { movw(dst, src); } else if (r.IsInteger32()) { movl(dst, src); } else { if (r.IsHeapObject()) { AssertNotSmi(src); } else if (r.IsSmi()) { AssertSmi(src); } movp(dst, src); } } void TurboAssembler::Set(Register dst, int64_t x) { if (x == 0) { xorl(dst, dst); } else if (is_uint32(x)) { movl(dst, Immediate(static_cast(x))); } else if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); } else { movq(dst, x); } } void TurboAssembler::Set(Operand dst, intptr_t x) { if (kPointerSize == kInt64Size) { if (is_int32(x)) { movp(dst, Immediate(static_cast(x))); } else { Set(kScratchRegister, x); movp(dst, kScratchRegister); } } else { movp(dst, Immediate(static_cast(x))); } } // ---------------------------------------------------------------------------- // Smi tagging, untagging and tag detection. Register TurboAssembler::GetSmiConstant(Smi* source) { STATIC_ASSERT(kSmiTag == 0); int value = source->value(); if (value == 0) { xorl(kScratchRegister, kScratchRegister); return kScratchRegister; } Move(kScratchRegister, source); return kScratchRegister; } void TurboAssembler::Move(Register dst, Smi* source) { STATIC_ASSERT(kSmiTag == 0); int value = source->value(); if (value == 0) { xorl(dst, dst); } else { Move(dst, reinterpret_cast
(source), RelocInfo::NONE); } } void TurboAssembler::Move(Register dst, ExternalReference ext) { if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { IndirectLoadExternalReference(dst, ext); return; } } movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE); } void MacroAssembler::SmiTag(Register dst, Register src) { STATIC_ASSERT(kSmiTag == 0); if (dst != src) { movp(dst, src); } DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); shlp(dst, Immediate(kSmiShift)); } void TurboAssembler::SmiUntag(Register dst, Register src) { STATIC_ASSERT(kSmiTag == 0); if (dst != src) { movp(dst, src); } DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); sarp(dst, Immediate(kSmiShift)); } void TurboAssembler::SmiUntag(Register dst, Operand src) { if (SmiValuesAre32Bits()) { movl(dst, Operand(src, kSmiShift / kBitsPerByte)); // Sign extend to 64-bit. movsxlq(dst, dst); } else { DCHECK(SmiValuesAre31Bits()); movp(dst, src); sarp(dst, Immediate(kSmiShift)); } } void MacroAssembler::SmiCompare(Register smi1, Register smi2) { AssertSmi(smi1); AssertSmi(smi2); cmpp(smi1, smi2); } void MacroAssembler::SmiCompare(Register dst, Smi* src) { AssertSmi(dst); Cmp(dst, src); } void MacroAssembler::Cmp(Register dst, Smi* src) { DCHECK_NE(dst, kScratchRegister); if (src->value() == 0) { testp(dst, dst); } else { Register constant_reg = GetSmiConstant(src); cmpp(dst, constant_reg); } } void MacroAssembler::SmiCompare(Register dst, Operand src) { AssertSmi(dst); AssertSmi(src); cmpp(dst, src); } void MacroAssembler::SmiCompare(Operand dst, Register src) { AssertSmi(dst); AssertSmi(src); cmpp(dst, src); } void MacroAssembler::SmiCompare(Operand dst, Smi* src) { AssertSmi(dst); if (SmiValuesAre32Bits()) { cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); } else { DCHECK(SmiValuesAre31Bits()); cmpl(dst, Immediate(src)); } } void MacroAssembler::Cmp(Operand dst, Smi* src) { // The Operand cannot use the smi register. Register smi_reg = GetSmiConstant(src); DCHECK(!dst.AddressUsesRegister(smi_reg)); cmpp(dst, smi_reg); } Condition TurboAssembler::CheckSmi(Register src) { STATIC_ASSERT(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } Condition TurboAssembler::CheckSmi(Operand src) { STATIC_ASSERT(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } void TurboAssembler::JumpIfSmi(Register src, Label* on_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(smi, on_smi, near_jump); } void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) { if (constant->value() != 0) { if (SmiValuesAre32Bits()) { addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); } else { DCHECK(SmiValuesAre31Bits()); if (kPointerSize == kInt64Size) { // Sign-extend value after addition movl(kScratchRegister, dst); addl(kScratchRegister, Immediate(constant)); movsxlq(kScratchRegister, kScratchRegister); movq(dst, kScratchRegister); } else { DCHECK_EQ(kSmiShiftSize, 32); addp(dst, Immediate(constant)); } } } } SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { if (SmiValuesAre32Bits()) { DCHECK(is_uint6(shift)); // There is a possible optimization if shift is in the range 60-63, but that // will (and must) never happen. if (dst != src) { movp(dst, src); } if (shift < kSmiShift) { sarp(dst, Immediate(kSmiShift - shift)); } else { shlp(dst, Immediate(shift - kSmiShift)); } return SmiIndex(dst, times_1); } else { DCHECK(SmiValuesAre31Bits()); if (dst != src) { movp(dst, src); } // We have to sign extend the index register to 64-bit as the SMI might // be negative. movsxlq(dst, dst); if (shift < kSmiShift) { sarq(dst, Immediate(kSmiShift - shift)); } else if (shift != kSmiShift) { if (shift - kSmiShift <= static_cast(times_8)) { return SmiIndex(dst, static_cast(shift - kSmiShift)); } shlq(dst, Immediate(shift - kSmiShift)); } return SmiIndex(dst, times_1); } } void TurboAssembler::Push(Smi* source) { intptr_t smi = reinterpret_cast(source); if (is_int32(smi)) { Push(Immediate(static_cast(smi))); return; } int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8; int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8; if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) { // This sequence has only 7 bytes, compared to the 12 bytes below. Push(Immediate(0)); movb(Operand(rsp, first_byte_set), Immediate(static_cast(smi >> (8 * first_byte_set)))); return; } Register constant = GetSmiConstant(source); Push(constant); } // ---------------------------------------------------------------------------- void TurboAssembler::Move(Register dst, Register src) { if (dst != src) { movp(dst, src); } } void TurboAssembler::MoveNumber(Register dst, double value) { int32_t smi; if (DoubleToSmiInteger(value, &smi)) { Move(dst, Smi::FromInt(smi)); } else { movp_heap_number(dst, value); } } void TurboAssembler::Move(XMMRegister dst, uint32_t src) { if (src == 0) { Xorps(dst, dst); } else { unsigned nlz = base::bits::CountLeadingZeros(src); unsigned ntz = base::bits::CountTrailingZeros(src); unsigned pop = base::bits::CountPopulation(src); DCHECK_NE(0u, pop); if (pop + ntz + nlz == 32) { Pcmpeqd(dst, dst); if (ntz) Pslld(dst, static_cast(ntz + nlz)); if (nlz) Psrld(dst, static_cast(nlz)); } else { movl(kScratchRegister, Immediate(src)); Movd(dst, kScratchRegister); } } } void TurboAssembler::Move(XMMRegister dst, uint64_t src) { if (src == 0) { Xorpd(dst, dst); } else { unsigned nlz = base::bits::CountLeadingZeros(src); unsigned ntz = base::bits::CountTrailingZeros(src); unsigned pop = base::bits::CountPopulation(src); DCHECK_NE(0u, pop); if (pop + ntz + nlz == 64) { Pcmpeqd(dst, dst); if (ntz) Psllq(dst, static_cast(ntz + nlz)); if (nlz) Psrlq(dst, static_cast(nlz)); } else { uint32_t lower = static_cast(src); uint32_t upper = static_cast(src >> 32); if (upper == 0) { Move(dst, lower); } else { movq(kScratchRegister, src); Movq(dst, kScratchRegister); } } } } // ---------------------------------------------------------------------------- void MacroAssembler::Absps(XMMRegister dst) { Andps(dst, ExternalOperand(ExternalReference::address_of_float_abs_constant())); } void MacroAssembler::Negps(XMMRegister dst) { Xorps(dst, ExternalOperand(ExternalReference::address_of_float_neg_constant())); } void MacroAssembler::Abspd(XMMRegister dst) { Andps(dst, ExternalOperand(ExternalReference::address_of_double_abs_constant())); } void MacroAssembler::Negpd(XMMRegister dst) { Xorps(dst, ExternalOperand(ExternalReference::address_of_double_neg_constant())); } void MacroAssembler::Cmp(Register dst, Handle source) { AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { Move(kScratchRegister, Handle::cast(source)); cmpp(dst, kScratchRegister); } } void MacroAssembler::Cmp(Operand dst, Handle source) { AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { Move(kScratchRegister, Handle::cast(source)); cmpp(dst, kScratchRegister); } } void TurboAssembler::Push(Handle source) { Move(kScratchRegister, source); Push(kScratchRegister); } void TurboAssembler::Move(Register result, Handle object, RelocInfo::Mode rmode) { if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { IndirectLoadConstant(result, object); return; } } movp(result, object.address(), rmode); } void TurboAssembler::Move(Operand dst, Handle object, RelocInfo::Mode rmode) { Move(kScratchRegister, object, rmode); movp(dst, kScratchRegister); } void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { addp(rsp, Immediate(stack_elements * kPointerSize)); } } void MacroAssembler::DropUnderReturnAddress(int stack_elements, Register scratch) { DCHECK_GT(stack_elements, 0); if (kPointerSize == kInt64Size && stack_elements == 1) { popq(MemOperand(rsp, 0)); return; } PopReturnAddressTo(scratch); Drop(stack_elements); PushReturnAddressFrom(scratch); } void TurboAssembler::Push(Register src) { if (kPointerSize == kInt64Size) { pushq(src); } else { // x32 uses 64-bit push for rbp in the prologue. DCHECK(src.code() != rbp.code()); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), src); } } void TurboAssembler::Push(Operand src) { if (kPointerSize == kInt64Size) { pushq(src); } else { movp(kScratchRegister, src); leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), kScratchRegister); } } void MacroAssembler::PushQuad(Operand src) { if (kPointerSize == kInt64Size) { pushq(src); } else { movp(kScratchRegister, src); pushq(kScratchRegister); } } void TurboAssembler::Push(Immediate value) { if (kPointerSize == kInt64Size) { pushq(value); } else { leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), value); } } void MacroAssembler::PushImm32(int32_t imm32) { if (kPointerSize == kInt64Size) { pushq_imm32(imm32); } else { leal(rsp, Operand(rsp, -4)); movp(Operand(rsp, 0), Immediate(imm32)); } } void MacroAssembler::Pop(Register dst) { if (kPointerSize == kInt64Size) { popq(dst); } else { // x32 uses 64-bit pop for rbp in the epilogue. DCHECK(dst.code() != rbp.code()); movp(dst, Operand(rsp, 0)); leal(rsp, Operand(rsp, 4)); } } void MacroAssembler::Pop(Operand dst) { if (kPointerSize == kInt64Size) { popq(dst); } else { Register scratch = dst.AddressUsesRegister(kScratchRegister) ? kRootRegister : kScratchRegister; movp(scratch, Operand(rsp, 0)); movp(dst, scratch); leal(rsp, Operand(rsp, 4)); if (scratch == kRootRegister) { // Restore kRootRegister. InitializeRootRegister(); } } } void MacroAssembler::PopQuad(Operand dst) { if (kPointerSize == kInt64Size) { popq(dst); } else { popq(kScratchRegister); movp(dst, kScratchRegister); } } void TurboAssembler::Jump(ExternalReference ext) { LoadAddress(kScratchRegister, ext); jmp(kScratchRegister); } void TurboAssembler::Jump(Operand op) { if (kPointerSize == kInt64Size) { jmp(op); } else { movp(kScratchRegister, op); jmp(kScratchRegister); } } void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); jmp(kScratchRegister); } void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode, Condition cc) { // TODO(X64): Inline this if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code && !Builtins::IsIsolateIndependentBuiltin(*code_object)) { // Calls to embedded targets are initially generated as standard // pc-relative calls below. When creating the embedded blob, call offsets // are patched up to point directly to the off-heap instruction start. // Note: It is safe to dereference code_object above since code generation // for builtins and code stubs happens on the main thread. Label skip; if (cc != always) { if (cc == never) return; j(NegateCondition(cc), &skip, Label::kNear); } IndirectLoadConstant(kScratchRegister, code_object); leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize)); jmp(kScratchRegister); bind(&skip); return; } else if (options().inline_offheap_trampolines) { int builtin_index = Builtins::kNoBuiltinId; if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) && Builtins::IsIsolateIndependent(builtin_index)) { // Inline the trampoline. RecordCommentForOffHeapTrampoline(builtin_index); CHECK_NE(builtin_index, Builtins::kNoBuiltinId); EmbeddedData d = EmbeddedData::FromBlob(); Address entry = d.InstructionStartOfBuiltin(builtin_index); Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET); jmp(kScratchRegister); return; } } } j(cc, code_object, rmode); } void MacroAssembler::JumpToInstructionStream(Address entry) { Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET); jmp(kOffHeapTrampolineRegister); } void TurboAssembler::Call(ExternalReference ext) { LoadAddress(kScratchRegister, ext); call(kScratchRegister); } void TurboAssembler::Call(Operand op) { if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) { call(op); } else { movp(kScratchRegister, op); call(kScratchRegister); } } void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); call(kScratchRegister); } void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code && !Builtins::IsIsolateIndependentBuiltin(*code_object)) { // Calls to embedded targets are initially generated as standard // pc-relative calls below. When creating the embedded blob, call offsets // are patched up to point directly to the off-heap instruction start. // Note: It is safe to dereference code_object above since code generation // for builtins and code stubs happens on the main thread. IndirectLoadConstant(kScratchRegister, code_object); leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize)); call(kScratchRegister); return; } else if (options().inline_offheap_trampolines) { int builtin_index = Builtins::kNoBuiltinId; if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) && Builtins::IsIsolateIndependent(builtin_index)) { // Inline the trampoline. RecordCommentForOffHeapTrampoline(builtin_index); CHECK_NE(builtin_index, Builtins::kNoBuiltinId); EmbeddedData d = EmbeddedData::FromBlob(); Address entry = d.InstructionStartOfBuiltin(builtin_index); Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET); call(kScratchRegister); return; } } } DCHECK(RelocInfo::IsCodeTarget(rmode)); call(code_object, rmode); } void TurboAssembler::RetpolineCall(Register reg) { Label setup_return, setup_target, inner_indirect_branch, capture_spec; jmp(&setup_return); // Jump past the entire retpoline below. bind(&inner_indirect_branch); call(&setup_target); bind(&capture_spec); pause(); jmp(&capture_spec); bind(&setup_target); movq(Operand(rsp, 0), reg); ret(0); bind(&setup_return); call(&inner_indirect_branch); // Callee will return after this instruction. } void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); RetpolineCall(kScratchRegister); } void TurboAssembler::RetpolineJump(Register reg) { Label setup_target, capture_spec; call(&setup_target); bind(&capture_spec); pause(); jmp(&capture_spec); bind(&setup_target); movq(Operand(rsp, 0), reg); ret(0); } void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { if (imm8 == 0) { Movd(dst, src); return; } if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pextrd(dst, src, imm8); return; } DCHECK_EQ(1, imm8); movq(dst, src); shrq(dst, Immediate(32)); } void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; } Movd(kScratchDoubleReg, src); if (imm8 == 1) { punpckldq(dst, kScratchDoubleReg); } else { DCHECK_EQ(0, imm8); Movss(dst, kScratchDoubleReg); } } void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { DCHECK(imm8 == 0 || imm8 == 1); if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; } Movd(kScratchDoubleReg, src); if (imm8 == 1) { punpckldq(dst, kScratchDoubleReg); } else { DCHECK_EQ(0, imm8); Movss(dst, kScratchDoubleReg); } } void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); return; } Label not_zero_src; bsrl(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 63); // 63^31 == 32 bind(¬_zero_src); xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } void TurboAssembler::Lzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); return; } Label not_zero_src; bsrl(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 63); // 63^31 == 32 bind(¬_zero_src); xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } void TurboAssembler::Lzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); return; } Label not_zero_src; bsrq(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 127); // 127^63 == 64 bind(¬_zero_src); xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } void TurboAssembler::Lzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); return; } Label not_zero_src; bsrq(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 127); // 127^63 == 64 bind(¬_zero_src); xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } void TurboAssembler::Tzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); return; } Label not_zero_src; bsfq(dst, src); j(not_zero, ¬_zero_src, Label::kNear); // Define the result of tzcnt(0) separately, because bsf(0) is undefined. Set(dst, 64); bind(¬_zero_src); } void TurboAssembler::Tzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); return; } Label not_zero_src; bsfq(dst, src); j(not_zero, ¬_zero_src, Label::kNear); // Define the result of tzcnt(0) separately, because bsf(0) is undefined. Set(dst, 64); bind(¬_zero_src); } void TurboAssembler::Tzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); return; } Label not_zero_src; bsfl(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 32); // The result of tzcnt is 32 if src = 0. bind(¬_zero_src); } void TurboAssembler::Tzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); return; } Label not_zero_src; bsfl(dst, src); j(not_zero, ¬_zero_src, Label::kNear); Set(dst, 32); // The result of tzcnt is 32 if src = 0. bind(¬_zero_src); } void TurboAssembler::Popcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); return; } UNREACHABLE(); } void TurboAssembler::Popcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); return; } UNREACHABLE(); } void TurboAssembler::Popcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); return; } UNREACHABLE(); } void TurboAssembler::Popcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); return; } UNREACHABLE(); } void MacroAssembler::Pushad() { Push(rax); Push(rcx); Push(rdx); Push(rbx); // Not pushing rsp or rbp. Push(rsi); Push(rdi); Push(r8); Push(r9); // r10 is kScratchRegister. Push(r11); Push(r12); // r13 is kRootRegister. Push(r14); Push(r15); STATIC_ASSERT(12 == kNumSafepointSavedRegisters); // Use lea for symmetry with Popad. int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; leap(rsp, Operand(rsp, -sp_delta)); } void MacroAssembler::Popad() { // Popad must not change the flags, so use lea instead of addq. int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; leap(rsp, Operand(rsp, sp_delta)); Pop(r15); Pop(r14); Pop(r12); Pop(r11); Pop(r9); Pop(r8); Pop(rdi); Pop(rsi); Pop(rbx); Pop(rdx); Pop(rcx); Pop(rax); } // Order general registers are pushed by Pushad: // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. const int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { 0, 1, 2, 3, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11 }; void MacroAssembler::PushStackHandler() { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); Push(Immediate(0)); // Padding. // Link the current handler as the next handler. ExternalReference handler_address = ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()); Push(ExternalOperand(handler_address)); // Set this new handler as the current one. movp(ExternalOperand(handler_address), rsp); } void MacroAssembler::PopStackHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); ExternalReference handler_address = ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()); Pop(ExternalOperand(handler_address)); addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); } void TurboAssembler::Ret() { ret(0); } void TurboAssembler::Ret(int bytes_dropped, Register scratch) { if (is_uint16(bytes_dropped)) { ret(bytes_dropped); } else { PopReturnAddressTo(scratch); addp(rsp, Immediate(bytes_dropped)); PushReturnAddressFrom(scratch); ret(0); } } void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type, Register map) { movp(map, FieldOperand(heap_object, HeapObject::kMapOffset)); CmpInstanceType(map, type); } void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type)); } void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, XMMRegister scratch, Label* lost_precision, Label* is_nan, Label::Distance dst) { Cvttsd2si(result_reg, input_reg); Cvtlsi2sd(kScratchDoubleReg, result_reg); Ucomisd(kScratchDoubleReg, input_reg); j(not_equal, lost_precision, dst); j(parity_even, is_nan, dst); // NaN. } void MacroAssembler::AssertNotSmi(Register object) { if (emit_debug_code()) { Condition is_smi = CheckSmi(object); Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi); } } void MacroAssembler::AssertSmi(Register object) { if (emit_debug_code()) { Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } } void MacroAssembler::AssertSmi(Operand object) { if (emit_debug_code()) { Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } } void TurboAssembler::AssertZeroExtended(Register int32_register) { if (emit_debug_code()) { DCHECK_NE(int32_register, kScratchRegister); movq(kScratchRegister, int64_t{0x0000000100000000}); cmpq(kScratchRegister, int32_register); Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended); } } void MacroAssembler::AssertConstructor(Register object) { if (emit_debug_code()) { testb(object, Immediate(kSmiTagMask)); Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor); Push(object); movq(object, FieldOperand(object, HeapObject::kMapOffset)); testb(FieldOperand(object, Map::kBitFieldOffset), Immediate(Map::IsConstructorBit::kMask)); Pop(object); Check(not_zero, AbortReason::kOperandIsNotAConstructor); } } void MacroAssembler::AssertFunction(Register object) { if (emit_debug_code()) { testb(object, Immediate(kSmiTagMask)); Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction); Push(object); CmpObjectType(object, JS_FUNCTION_TYPE, object); Pop(object); Check(equal, AbortReason::kOperandIsNotAFunction); } } void MacroAssembler::AssertBoundFunction(Register object) { if (emit_debug_code()) { testb(object, Immediate(kSmiTagMask)); Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction); Push(object); CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object); Pop(object); Check(equal, AbortReason::kOperandIsNotABoundFunction); } } void MacroAssembler::AssertGeneratorObject(Register object) { if (!emit_debug_code()) return; testb(object, Immediate(kSmiTagMask)); Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject); // Load map Register map = object; Push(object); movp(map, FieldOperand(object, HeapObject::kMapOffset)); Label do_check; // Check if JSGeneratorObject CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE); j(equal, &do_check); // Check if JSAsyncGeneratorObject CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE); bind(&do_check); // Restore generator object to register and perform assertion Pop(object); Check(equal, AbortReason::kOperandIsNotAGeneratorObject); } void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { if (emit_debug_code()) { Label done_checking; AssertNotSmi(object); Cmp(object, isolate()->factory()->undefined_value()); j(equal, &done_checking); Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map()); Assert(equal, AbortReason::kExpectedUndefinedOrCell); bind(&done_checking); } } void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) { cmpp(in_out, Immediate(kClearedWeakHeapObject)); j(equal, target_if_cleared); andp(in_out, Immediate(~kWeakHeapObjectMask)); } void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { DCHECK_GT(value, 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand counter_operand = ExternalOperand(ExternalReference::Create(counter)); if (value == 1) { incl(counter_operand); } else { addl(counter_operand, Immediate(value)); } } } void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { DCHECK_GT(value, 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand counter_operand = ExternalOperand(ExternalReference::Create(counter)); if (value == 1) { decl(counter_operand); } else { subl(counter_operand, Immediate(value)); } } } void MacroAssembler::MaybeDropFrames() { // Check whether we need to drop frames to restart a function on the stack. ExternalReference restart_fp = ExternalReference::debug_restart_fp_address(isolate()); Load(rbx, restart_fp); testp(rbx, rbx); Label dont_drop; j(zero, &dont_drop, Label::kNear); Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET); bind(&dont_drop); } void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, Register caller_args_count_reg, Register scratch0, Register scratch1) { #if DEBUG if (callee_args_count.is_reg()) { DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0, scratch1)); } else { DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1)); } #endif // Calculate the destination address where we will put the return address // after we drop current frame. Register new_sp_reg = scratch0; if (callee_args_count.is_reg()) { subp(caller_args_count_reg, callee_args_count.reg()); leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size, StandardFrameConstants::kCallerPCOffset)); } else { leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size, StandardFrameConstants::kCallerPCOffset - callee_args_count.immediate() * kPointerSize)); } if (FLAG_debug_code) { cmpp(rsp, new_sp_reg); Check(below, AbortReason::kStackAccessBelowStackPointer); } // Copy return address from caller's frame to current frame's return address // to avoid its trashing and let the following loop copy it to the right // place. Register tmp_reg = scratch1; movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset)); movp(Operand(rsp, 0), tmp_reg); // Restore caller's frame pointer now as it could be overwritten by // the copying loop. movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); // +2 here is to copy both receiver and return address. Register count_reg = caller_args_count_reg; if (callee_args_count.is_reg()) { leap(count_reg, Operand(callee_args_count.reg(), 2)); } else { movp(count_reg, Immediate(callee_args_count.immediate() + 2)); // TODO(ishell): Unroll copying loop for small immediate values. } // Now copy callee arguments to the caller frame going backwards to avoid // callee arguments corruption (source and destination areas could overlap). Label loop, entry; jmp(&entry, Label::kNear); bind(&loop); decp(count_reg); movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0)); movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg); bind(&entry); cmpp(count_reg, Immediate(0)); j(not_equal, &loop, Label::kNear); // Leave current frame. movp(rsp, new_sp_reg); } void MacroAssembler::InvokeFunction(Register function, Register new_target, const ParameterCount& actual, InvokeFlag flag) { movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movzxwq(rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset)); ParameterCount expected(rbx); InvokeFunction(function, new_target, expected, actual, flag); } void MacroAssembler::InvokeFunction(Register function, Register new_target, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag) { DCHECK(function == rdi); movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); InvokeFunctionCode(rdi, new_target, expected, actual, flag); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag) { // You can't call a function without a valid frame. DCHECK(flag == JUMP_FUNCTION || has_frame()); DCHECK(function == rdi); DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx); // On function call, call into the debugger if necessary. CheckDebugHook(function, new_target, expected, actual); // Clear the new.target register if not given. if (!new_target.is_valid()) { LoadRoot(rdx, Heap::kUndefinedValueRootIndex); } Label done; bool definitely_mismatches = false; InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, Label::kNear); if (!definitely_mismatches) { // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); movp(rcx, FieldOperand(function, JSFunction::kCodeOffset)); addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag)); if (flag == CALL_FUNCTION) { call(rcx); } else { DCHECK(flag == JUMP_FUNCTION); jmp(rcx); } bind(&done); } } void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, Label* done, bool* definitely_mismatches, InvokeFlag flag, Label::Distance near_jump) { bool definitely_matches = false; *definitely_mismatches = false; Label invoke; if (expected.is_immediate()) { DCHECK(actual.is_immediate()); Set(rax, actual.immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { if (expected.immediate() == SharedFunctionInfo::kDontAdaptArgumentsSentinel) { // Don't worry about adapting arguments for built-ins that // don't want that done. Skip adaption code by making it look // like we have a match between expected and actual number of // arguments. definitely_matches = true; } else { *definitely_mismatches = true; Set(rbx, expected.immediate()); } } } else { if (actual.is_immediate()) { // Expected is in register, actual is immediate. This is the // case when we invoke function values without going through the // IC mechanism. Set(rax, actual.immediate()); cmpp(expected.reg(), Immediate(actual.immediate())); j(equal, &invoke, Label::kNear); DCHECK(expected.reg() == rbx); } else if (expected.reg() != actual.reg()) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. cmpp(expected.reg(), actual.reg()); j(equal, &invoke, Label::kNear); DCHECK(actual.reg() == rax); DCHECK(expected.reg() == rbx); } else { definitely_matches = true; Move(rax, actual.reg()); } } if (!definitely_matches) { Handle adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); if (flag == CALL_FUNCTION) { Call(adaptor, RelocInfo::CODE_TARGET); if (!*definitely_mismatches) { jmp(done, near_jump); } } else { Jump(adaptor, RelocInfo::CODE_TARGET); } bind(&invoke); } } void MacroAssembler::CheckDebugHook(Register fun, Register new_target, const ParameterCount& expected, const ParameterCount& actual) { Label skip_hook; ExternalReference debug_hook_active = ExternalReference::debug_hook_on_function_call_address(isolate()); Operand debug_hook_active_operand = ExternalOperand(debug_hook_active); cmpb(debug_hook_active_operand, Immediate(0)); j(equal, &skip_hook); { FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { SmiTag(expected.reg(), expected.reg()); Push(expected.reg()); } if (actual.is_reg()) { SmiTag(actual.reg(), actual.reg()); Push(actual.reg()); SmiUntag(actual.reg(), actual.reg()); } if (new_target.is_valid()) { Push(new_target); } Push(fun); Push(fun); Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); CallRuntime(Runtime::kDebugOnFunctionCall); Pop(fun); if (new_target.is_valid()) { Pop(new_target); } if (actual.is_reg()) { Pop(actual.reg()); SmiUntag(actual.reg(), actual.reg()); } if (expected.is_reg()) { Pop(expected.reg()); SmiUntag(expected.reg(), expected.reg()); } } bind(&skip_hook); } void TurboAssembler::StubPrologue(StackFrame::Type type) { pushq(rbp); // Caller's frame pointer. movp(rbp, rsp); Push(Immediate(StackFrame::TypeToMarker(type))); } void TurboAssembler::Prologue() { pushq(rbp); // Caller's frame pointer. movp(rbp, rsp); Push(rsi); // Callee's context. Push(rdi); // Callee's JS function. } void TurboAssembler::EnterFrame(StackFrame::Type type) { pushq(rbp); movp(rbp, rsp); Push(Immediate(StackFrame::TypeToMarker(type))); } void TurboAssembler::LeaveFrame(StackFrame::Type type) { if (emit_debug_code()) { cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset), Immediate(StackFrame::TypeToMarker(type))); Check(equal, AbortReason::kStackFrameTypesMustMatch); } movp(rsp, rbp); popq(rbp); } void MacroAssembler::EnterBuiltinFrame(Register context, Register target, Register argc) { Push(rbp); Move(rbp, rsp); Push(context); Push(target); Push(argc); } void MacroAssembler::LeaveBuiltinFrame(Register context, Register target, Register argc) { Pop(argc); Pop(target); Pop(context); leave(); } void MacroAssembler::EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type) { DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); // Set up the frame structure on the stack. // All constants are relative to the frame pointer of the exit frame. DCHECK_EQ(kFPOnStackSize + kPCOnStackSize, ExitFrameConstants::kCallerSPDisplacement); DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset); DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); pushq(rbp); movp(rbp, rsp); // Reserve room for entry stack pointer and push the code object. Push(Immediate(StackFrame::TypeToMarker(frame_type))); DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset); Push(Immediate(0)); // Saved entry sp, patched before call. Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); Push(kScratchRegister); // Accessed from ExitFrame::code_slot. // Save the frame pointer and the context in top. if (save_rax) { movp(r14, rax); // Backup rax in callee-save register. } Store( ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()), rbp); Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()), rsi); Store( ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()), rbx); } void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles) { #ifdef _WIN64 const int kShadowSpace = 4; arg_stack_space += kShadowSpace; #endif // Optionally save all XMM registers. if (save_doubles) { int space = XMMRegister::kNumRegisters * kDoubleSize + arg_stack_space * kRegisterSize; subp(rsp, Immediate(space)); int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; const RegisterConfiguration* config = RegisterConfiguration::Default(); for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { DoubleRegister reg = DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); } } else if (arg_stack_space > 0) { subp(rsp, Immediate(arg_stack_space * kRegisterSize)); } // Get the required frame alignment for the OS. const int kFrameAlignment = base::OS::ActivationFrameAlignment(); if (kFrameAlignment > 0) { DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment)); DCHECK(is_int8(kFrameAlignment)); andp(rsp, Immediate(-kFrameAlignment)); } // Patch the saved entry sp. movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); } void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, StackFrame::Type frame_type) { EnterExitFramePrologue(true, frame_type); // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame, // so it must be retained across the C-call. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; leap(r15, Operand(rbp, r14, times_pointer_size, offset)); EnterExitFrameEpilogue(arg_stack_space, save_doubles); } void MacroAssembler::EnterApiExitFrame(int arg_stack_space) { EnterExitFramePrologue(false, StackFrame::EXIT); EnterExitFrameEpilogue(arg_stack_space, false); } void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { // Registers: // r15 : argv if (save_doubles) { int offset = -ExitFrameConstants::kFixedFrameSizeFromFp; const RegisterConfiguration* config = RegisterConfiguration::Default(); for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { DoubleRegister reg = DoubleRegister::from_code(config->GetAllocatableDoubleCode(i)); Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); } } if (pop_arguments) { // Get the return address from the stack and restore the frame pointer. movp(rcx, Operand(rbp, kFPOnStackSize)); movp(rbp, Operand(rbp, 0 * kPointerSize)); // Drop everything up to and including the arguments and the receiver // from the caller stack. leap(rsp, Operand(r15, 1 * kPointerSize)); PushReturnAddressFrom(rcx); } else { // Otherwise just leave the exit frame. leave(); } LeaveExitFrameEpilogue(); } void MacroAssembler::LeaveApiExitFrame() { movp(rsp, rbp); popq(rbp); LeaveExitFrameEpilogue(); } void MacroAssembler::LeaveExitFrameEpilogue() { // Restore current context from top and clear it in debug mode. ExternalReference context_address = ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()); Operand context_operand = ExternalOperand(context_address); movp(rsi, context_operand); #ifdef DEBUG movp(context_operand, Immediate(Context::kInvalidContext)); #endif // Clear the top frame. ExternalReference c_entry_fp_address = ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()); Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address); movp(c_entry_fp_operand, Immediate(0)); } #ifdef _WIN64 static const int kRegisterPassedArguments = 4; #else static const int kRegisterPassedArguments = 6; #endif void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { movp(dst, NativeContextOperand()); movp(dst, ContextOperand(dst, index)); } int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { // On Windows 64 stack slots are reserved by the caller for all arguments // including the ones passed in registers, and space is always allocated for // the four register arguments even if the function takes fewer than four // arguments. // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers // and the caller does not reserve stack slots for them. DCHECK_GE(num_arguments, 0); #ifdef _WIN64 const int kMinimumStackSlots = kRegisterPassedArguments; if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; return num_arguments; #else if (num_arguments < kRegisterPassedArguments) return 0; return num_arguments - kRegisterPassedArguments; #endif } void TurboAssembler::PrepareCallCFunction(int num_arguments) { int frame_alignment = base::OS::ActivationFrameAlignment(); DCHECK_NE(frame_alignment, 0); DCHECK_GE(num_arguments, 0); // Make stack end at alignment and allocate space for arguments and old rsp. movp(kScratchRegister, rsp); DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); andp(rsp, Immediate(-frame_alignment)); movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); } void TurboAssembler::CallCFunction(ExternalReference function, int num_arguments) { LoadAddress(rax, function); CallCFunction(rax, num_arguments); } void TurboAssembler::CallCFunction(Register function, int num_arguments) { DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); } call(function); DCHECK_NE(base::OS::ActivationFrameAlignment(), 0); DCHECK_GE(num_arguments, 0); int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize)); } void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { DCHECK(cc == zero || cc == not_zero); if (scratch == object) { andp(scratch, Immediate(~kPageAlignmentMask)); } else { movp(scratch, Immediate(~kPageAlignmentMask)); andp(scratch, object); } if (mask < (1 << kBitsPerByte)) { testb(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(static_cast(mask))); } else { testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); } j(cc, condition_met, condition_met_distance); } void TurboAssembler::ComputeCodeStartAddress(Register dst) { Label current; bind(¤t); int pc = pc_offset(); // Load effective address to get the address of the current instruction. leaq(dst, Operand(¤t, -pc)); } void TurboAssembler::ResetSpeculationPoisonRegister() { // TODO(tebbi): Perhaps, we want to put an lfence here. Set(kSpeculationPoisonRegister, -1); } } // namespace internal } // namespace v8 #endif // V8_TARGET_ARCH_X64