/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "code_generator_x86.h" #include "arch/x86/jni_frame_x86.h" #include "art_method-inl.h" #include "class_table.h" #include "code_generator_utils.h" #include "compiled_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "gc/accounting/card_table.h" #include "gc/space/image_space.h" #include "heap_poisoning.h" #include "interpreter/mterp/nterp.h" #include "intrinsics.h" #include "intrinsics_x86.h" #include "jit/profiling_info.h" #include "linker/linker_patch.h" #include "lock_word.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/var_handle.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "utils/assembler.h" #include "utils/stack_checks.h" #include "utils/x86/assembler_x86.h" #include "utils/x86/managed_register_x86.h" namespace art { template class GcRoot; namespace x86 { static constexpr int kCurrentMethodStackOffset = 0; static constexpr Register kMethodRegisterArgument = EAX; static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI }; static constexpr int kC2ConditionMask = 0x400; static constexpr int kFakeReturnRegister = Register(8); static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000); static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000); static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() { InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK() // that the the kPrimNot result register is the same as the first argument register. return caller_saves; } // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. #define __ down_cast(codegen->GetAssembler())-> // NOLINT #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value() class NullCheckSlowPathX86 : public SlowPathCode { public: explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { // Live registers will be restored in the catch block if caught. SaveLiveRegisters(codegen, instruction_->GetLocations()); } x86_codegen->InvokeRuntime(kQuickThrowNullPointer, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); } bool IsFatal() const override { return true; } const char* GetDescription() const override { return "NullCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86); }; class DivZeroCheckSlowPathX86 : public SlowPathCode { public: explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); } bool IsFatal() const override { return true; } const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86); }; class DivRemMinusOneSlowPathX86 : public SlowPathCode { public: DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div) : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) override { __ Bind(GetEntryLabel()); if (is_div_) { __ negl(reg_); } else { __ movl(reg_, Immediate(0)); } __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; } private: Register reg_; bool is_div_; DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86); }; class BoundsCheckSlowPathX86 : public SlowPathCode { public: explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. if (instruction_->CanThrowIntoCatchBlock()) { // Live registers will be restored in the catch block if caught. SaveLiveRegisters(codegen, instruction_->GetLocations()); } // Are we using an array length from memory? HInstruction* array_length = instruction_->InputAt(1); Location length_loc = locations->InAt(1); InvokeRuntimeCallingConvention calling_convention; if (array_length->IsArrayLength() && array_length->IsEmittedAtUseSite()) { // Load the array length into our temporary. HArrayLength* length = array_length->AsArrayLength(); uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(length); Location array_loc = array_length->GetLocations()->InAt(0); Address array_len(array_loc.AsRegister(), len_offset); length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(1)); // Check for conflicts with index. if (length_loc.Equals(locations->InAt(0))) { // We know we aren't using parameter 2. length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2)); } __ movl(length_loc.AsRegister(), array_len); if (mirror::kUseStringCompression && length->IsStringLength()) { __ shrl(length_loc.AsRegister(), Immediate(1)); } } x86_codegen->EmitParallelMoves( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kInt32, length_loc, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; x86_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); CheckEntrypointTypes(); } bool IsFatal() const override { return true; } const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); }; class SuspendCheckSlowPathX86 : public SlowPathCode { public: SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCode(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD. x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD. if (successor_ == nullptr) { __ jmp(GetReturnLabel()); } else { __ jmp(x86_codegen->GetLabelOf(successor_)); } } Label* GetReturnLabel() { DCHECK(successor_ == nullptr); return &return_label_; } HBasicBlock* GetSuccessor() const { return successor_; } const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; } private: HBasicBlock* const successor_; Label return_label_; DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86); }; class LoadStringSlowPathX86 : public SlowPathCode { public: explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex(); __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index.index_)); x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "LoadStringSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86); }; class LoadClassSlowPathX86 : public SlowPathCode { public: LoadClassSlowPathX86(HLoadClass* cls, HInstruction* at) : SlowPathCode(at), cls_(cls) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); } void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Location out = locations->Out(); const uint32_t dex_pc = instruction_->GetDexPc(); bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath(); bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck(); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; if (must_resolve_type) { DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_codegen->GetGraph()->GetDexFile())); dex::TypeIndex type_index = cls_->GetTypeIndex(); __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_)); if (cls_->NeedsAccessCheck()) { CheckEntrypointTypes(); x86_codegen->InvokeRuntime(kQuickResolveTypeAndVerifyAccess, instruction_, dex_pc, this); } else { CheckEntrypointTypes(); x86_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this); } // If we also must_do_clinit, the resolved type is now in the correct register. } else { DCHECK(must_do_clinit); Location source = instruction_->IsLoadClass() ? out : locations->InAt(0); x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), source); } if (must_do_clinit) { x86_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this); CheckEntrypointTypes(); } // Move the class to the desired location. if (out.IsValid()) { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); x86_codegen->Move32(out, Location::RegisterLocation(EAX)); } RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "LoadClassSlowPathX86"; } private: // The class this slow path will load. HLoadClass* const cls_; DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86); }; class TypeCheckSlowPathX86 : public SlowPathCode { public: TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal) : SlowPathCode(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); if (kPoisonHeapReferences && instruction_->IsCheckCast() && instruction_->AsCheckCast()->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) { // First, unpoison the `cls` reference that was poisoned for direct memory comparison. __ UnpoisonHeapReference(locations->InAt(1).AsRegister()); } if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) { SaveLiveRegisters(codegen, locations); } // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; x86_codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), DataType::Type::kReference); if (instruction_->IsInstanceOf()) { x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); } else { DCHECK(instruction_->IsCheckCast()); x86_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); } if (!is_fatal_) { if (instruction_->IsInstanceOf()) { x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); } RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } } const char* GetDescription() const override { return "TypeCheckSlowPathX86"; } bool IsFatal() const override { return is_fatal_; } private: const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); }; class DeoptimizationSlowPathX86 : public SlowPathCode { public: explicit DeoptimizationSlowPathX86(HDeoptimize* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); LocationSummary* locations = instruction_->GetLocations(); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; x86_codegen->Load32BitValue( calling_convention.GetRegisterAt(0), static_cast(instruction_->AsDeoptimize()->GetDeoptimizationKind())); x86_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); } const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86); }; class ArraySetSlowPathX86 : public SlowPathCode { public: explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); CodeGeneratorX86* x86_codegen = down_cast(codegen); x86_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "ArraySetSlowPathX86"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86); }; // Slow path marking an object reference `ref` during a read // barrier. The field `obj.field` in the object `obj` holding this // reference does not get updated by this slow path after marking (see // ReadBarrierMarkAndUpdateFieldSlowPathX86 below for that). // // This means that after the execution of this slow path, `ref` will // always be up-to-date, but `obj.field` may not; i.e., after the // flip, `ref` will be a to-space reference, but `obj.field` will // probably still be a from-space reference (unless it gets updated by // another thread, or if another thread installed another object // reference (different from `ref`) in `obj.field`). class ReadBarrierMarkSlowPathX86 : public SlowPathCode { public: ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location ref, bool unpoison_ref_before_marking) : SlowPathCode(instruction), ref_(ref), unpoison_ref_before_marking_(unpoison_ref_before_marking) { DCHECK(kEmitCompilerReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; } void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg; DCHECK(instruction_->IsInstanceFieldGet() || instruction_->IsPredicatedInstanceFieldGet() || instruction_->IsStaticFieldGet() || instruction_->IsArrayGet() || instruction_->IsArraySet() || instruction_->IsLoadClass() || instruction_->IsLoadString() || instruction_->IsInstanceOf() || instruction_->IsCheckCast() || (instruction_->IsInvoke() && instruction_->GetLocations()->Intrinsified())) << "Unexpected instruction in read barrier marking slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); if (unpoison_ref_before_marking_) { // Object* ref = ref_addr->AsMirrorPtr() __ MaybeUnpoisonHeapReference(ref_reg); } // No need to save live registers; it's taken care of by the // entrypoint. Also, there is no need to update the stack mask, // as this runtime call will not trigger a garbage collection. CodeGeneratorX86* x86_codegen = down_cast(codegen); DCHECK_NE(ref_reg, ESP); DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg; // "Compact" slow path, saving two moves. // // Instead of using the standard runtime calling convention (input // and output in EAX): // // EAX <- ref // EAX <- ReadBarrierMark(EAX) // ref <- EAX // // we just use rX (the register containing `ref`) as input and output // of a dedicated entrypoint: // // rX <- ReadBarrierMarkRegX(rX) // int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ref_reg); // This runtime call does not require a stack map. x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this); __ jmp(GetExitLabel()); } private: // The location (register) of the marked object reference. const Location ref_; // Should the reference in `ref_` be unpoisoned prior to marking it? const bool unpoison_ref_before_marking_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86); }; // Slow path marking an object reference `ref` during a read barrier, // and if needed, atomically updating the field `obj.field` in the // object `obj` holding this reference after marking (contrary to // ReadBarrierMarkSlowPathX86 above, which never tries to update // `obj.field`). // // This means that after the execution of this slow path, both `ref` // and `obj.field` will be up-to-date; i.e., after the flip, both will // hold the same to-space reference (unless another thread installed // another object reference (different from `ref`) in `obj.field`). class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode { public: ReadBarrierMarkAndUpdateFieldSlowPathX86(HInstruction* instruction, Location ref, Register obj, const Address& field_addr, bool unpoison_ref_before_marking, Register temp) : SlowPathCode(instruction), ref_(ref), obj_(obj), field_addr_(field_addr), unpoison_ref_before_marking_(unpoison_ref_before_marking), temp_(temp) { DCHECK(kEmitCompilerReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register ref_reg = ref_.AsRegister(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg; // This slow path is only used by the UnsafeCASObject intrinsic. DCHECK((instruction_->IsInvoke() && instruction_->GetLocations()->Intrinsified())) << "Unexpected instruction in read barrier marking and field updating slow path: " << instruction_->DebugName(); DCHECK(instruction_->GetLocations()->Intrinsified()); Intrinsics intrinsic = instruction_->AsInvoke()->GetIntrinsic(); static constexpr auto kVarHandleCAS = mirror::VarHandle::AccessModeTemplate::kCompareAndSet; static constexpr auto kVarHandleGetAndSet = mirror::VarHandle::AccessModeTemplate::kGetAndUpdate; static constexpr auto kVarHandleCAX = mirror::VarHandle::AccessModeTemplate::kCompareAndExchange; DCHECK(intrinsic == Intrinsics::kUnsafeCASObject || mirror::VarHandle::GetAccessModeTemplateByIntrinsic(intrinsic) == kVarHandleCAS || mirror::VarHandle::GetAccessModeTemplateByIntrinsic(intrinsic) == kVarHandleGetAndSet || mirror::VarHandle::GetAccessModeTemplateByIntrinsic(intrinsic) == kVarHandleCAX); __ Bind(GetEntryLabel()); if (unpoison_ref_before_marking_) { // Object* ref = ref_addr->AsMirrorPtr() __ MaybeUnpoisonHeapReference(ref_reg); } // Save the old (unpoisoned) reference. __ movl(temp_, ref_reg); // No need to save live registers; it's taken care of by the // entrypoint. Also, there is no need to update the stack mask, // as this runtime call will not trigger a garbage collection. CodeGeneratorX86* x86_codegen = down_cast(codegen); DCHECK_NE(ref_reg, ESP); DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg; // "Compact" slow path, saving two moves. // // Instead of using the standard runtime calling convention (input // and output in EAX): // // EAX <- ref // EAX <- ReadBarrierMark(EAX) // ref <- EAX // // we just use rX (the register containing `ref`) as input and output // of a dedicated entrypoint: // // rX <- ReadBarrierMarkRegX(rX) // int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ref_reg); // This runtime call does not require a stack map. x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this); // If the new reference is different from the old reference, // update the field in the holder (`*field_addr`). // // Note that this field could also hold a different object, if // another thread had concurrently changed it. In that case, the // LOCK CMPXCHGL instruction in the compare-and-set (CAS) // operation below would abort the CAS, leaving the field as-is. NearLabel done; __ cmpl(temp_, ref_reg); __ j(kEqual, &done); // Update the the holder's field atomically. This may fail if // mutator updates before us, but it's OK. This is achieved // using a strong compare-and-set (CAS) operation with relaxed // memory synchronization ordering, where the expected value is // the old reference and the desired value is the new reference. // This operation is implemented with a 32-bit LOCK CMPXLCHG // instruction, which requires the expected value (the old // reference) to be in EAX. Save EAX beforehand, and move the // expected value (stored in `temp_`) into EAX. __ pushl(EAX); __ movl(EAX, temp_); // Convenience aliases. Register base = obj_; Register expected = EAX; Register value = ref_reg; bool base_equals_value = (base == value); if (kPoisonHeapReferences) { if (base_equals_value) { // If `base` and `value` are the same register location, move // `value` to a temporary register. This way, poisoning // `value` won't invalidate `base`. value = temp_; __ movl(value, base); } // Check that the register allocator did not assign the location // of `expected` (EAX) to `value` nor to `base`, so that heap // poisoning (when enabled) works as intended below. // - If `value` were equal to `expected`, both references would // be poisoned twice, meaning they would not be poisoned at // all, as heap poisoning uses address negation. // - If `base` were equal to `expected`, poisoning `expected` // would invalidate `base`. DCHECK_NE(value, expected); DCHECK_NE(base, expected); __ PoisonHeapReference(expected); __ PoisonHeapReference(value); } __ LockCmpxchgl(field_addr_, value); // If heap poisoning is enabled, we need to unpoison the values // that were poisoned earlier. if (kPoisonHeapReferences) { if (base_equals_value) { // `value` has been moved to a temporary register, no need // to unpoison it. } else { __ UnpoisonHeapReference(value); } // No need to unpoison `expected` (EAX), as it is be overwritten below. } // Restore EAX. __ popl(EAX); __ Bind(&done); __ jmp(GetExitLabel()); } private: // The location (register) of the marked object reference. const Location ref_; // The register containing the object holding the marked object reference field. const Register obj_; // The address of the marked reference field. The base of this address must be `obj_`. const Address field_addr_; // Should the reference in `ref_` be unpoisoned prior to marking it? const bool unpoison_ref_before_marking_; const Register temp_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86); }; // Slow path generating a read barrier for a heap reference. class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { public: ReadBarrierForHeapReferenceSlowPathX86(HInstruction* instruction, Location out, Location ref, Location obj, uint32_t offset, Location index) : SlowPathCode(instruction), out_(out), ref_(ref), obj_(obj), offset_(offset), index_(index) { DCHECK(kEmitCompilerReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: // // __ movl(out, Address(out, offset)); // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); // // In that case, we have lost the information about the original // object, and the emitted read barrier cannot work properly. DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out; DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } void EmitNativeCode(CodeGenerator* codegen) override { CodeGeneratorX86* x86_codegen = down_cast(codegen); LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); DCHECK(instruction_->IsInstanceFieldGet() || instruction_->IsPredicatedInstanceFieldGet() || instruction_->IsStaticFieldGet() || instruction_->IsArrayGet() || instruction_->IsInstanceOf() || instruction_->IsCheckCast() || (instruction_->IsInvoke() && instruction_->GetLocations()->Intrinsified())) << "Unexpected instruction in read barrier for heap reference slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); // We may have to change the index's value, but as `index_` is a // constant member (like other "inputs" of this slow path), // introduce a copy of it, `index`. Location index = index_; if (index_.IsValid()) { // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics. if (instruction_->IsArrayGet()) { // Compute the actual memory offset and store it in `index`. Register index_reg = index_.AsRegister(); DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg)); if (codegen->IsCoreCalleeSaveRegister(index_reg)) { // We are about to change the value of `index_reg` (see the // calls to art::x86::X86Assembler::shll and // art::x86::X86Assembler::AddImmediate below), but it has // not been saved by the previous call to // art::SlowPathCode::SaveLiveRegisters, as it is a // callee-save register -- // art::SlowPathCode::SaveLiveRegisters does not consider // callee-save registers, as it has been designed with the // assumption that callee-save registers are supposed to be // handled by the called function. So, as a callee-save // register, `index_reg` _would_ eventually be saved onto // the stack, but it would be too late: we would have // changed its value earlier. Therefore, we manually save // it here into another freely available register, // `free_reg`, chosen of course among the caller-save // registers (as a callee-save `free_reg` register would // exhibit the same problem). // // Note we could have requested a temporary register from // the register allocator instead; but we prefer not to, as // this is a slow path, and we know we can find a // caller-save register that is available. Register free_reg = FindAvailableCallerSaveRegister(codegen); __ movl(free_reg, index_reg); index_reg = free_reg; index = Location::RegisterLocation(index_reg); } else { // The initial register stored in `index_` has already been // saved in the call to art::SlowPathCode::SaveLiveRegisters // (as it is not a callee-save register), so we can freely // use it. } // Shifting the index value contained in `index_reg` by the scale // factor (2) cannot overflow in practice, as the runtime is // unable to allocate object arrays with a size larger than // 2^26 - 1 (that is, 2^28 - 4 bytes). __ shll(index_reg, Immediate(TIMES_4)); static_assert( sizeof(mirror::HeapReference) == sizeof(int32_t), "art::mirror::HeapReference and int32_t have different sizes."); __ AddImmediate(index_reg, Immediate(offset_)); } else { // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile // intrinsics, `index_` is not shifted by a scale factor of 2 // (as in the case of ArrayGet), as it is actually an offset // to an object field within an object. DCHECK(instruction_->IsInvoke()) << instruction_->DebugName(); DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) || (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)) << instruction_->AsInvoke()->GetIntrinsic(); DCHECK_EQ(offset_, 0U); DCHECK(index_.IsRegisterPair()); // UnsafeGet's offset location is a register pair, the low // part contains the correct offset. index = index_.ToLow(); } } // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); __ movl(calling_convention.GetRegisterAt(2), Immediate(offset_)); } x86_codegen->InvokeRuntime(kQuickReadBarrierSlow, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes< kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); x86_codegen->Move32(out_, Location::RegisterLocation(EAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; } private: Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { size_t ref = static_cast(ref_.AsRegister()); size_t obj = static_cast(obj_.AsRegister()); for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) { return static_cast(i); } } // We shall never fail to find a free caller-save register, as // there are more than two core caller-save registers on x86 // (meaning it is possible to find one which is different from // `ref` and `obj`). DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u); LOG(FATAL) << "Could not find a free caller-save register"; UNREACHABLE(); } const Location out_; const Location ref_; const Location obj_; const uint32_t offset_; // An additional location containing an index to an array. // Only used for HArrayGet and the UnsafeGetObject & // UnsafeGetObjectVolatile intrinsics. const Location index_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathX86); }; // Slow path generating a read barrier for a GC root. class ReadBarrierForRootSlowPathX86 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) << "Unexpected instruction in read barrier for GC root slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; CodeGeneratorX86* x86_codegen = down_cast(codegen); x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_); x86_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes*>(); x86_codegen->Move32(out_, Location::RegisterLocation(EAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; } private: const Location out_; const Location root_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86); }; #undef __ // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. #define __ down_cast(GetAssembler())-> // NOLINT inline Condition X86Condition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; case kCondLT: return kLess; case kCondLE: return kLessEqual; case kCondGT: return kGreater; case kCondGE: return kGreaterEqual; case kCondB: return kBelow; case kCondBE: return kBelowEqual; case kCondA: return kAbove; case kCondAE: return kAboveEqual; } LOG(FATAL) << "Unreachable"; UNREACHABLE(); } // Maps signed condition to unsigned condition and FP condition to x86 name. inline Condition X86UnsignedOrFPCondition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; // Signed to unsigned, and FP to x86 name. case kCondLT: return kBelow; case kCondLE: return kBelowEqual; case kCondGT: return kAbove; case kCondGE: return kAboveEqual; // Unsigned remain unchanged. case kCondB: return kBelow; case kCondBE: return kBelowEqual; case kCondA: return kAbove; case kCondAE: return kAboveEqual; } LOG(FATAL) << "Unreachable"; UNREACHABLE(); } void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const { stream << Register(reg); } void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const { stream << XmmRegister(reg); } const X86InstructionSetFeatures& CodeGeneratorX86::GetInstructionSetFeatures() const { return *GetCompilerOptions().GetInstructionSetFeatures()->AsX86InstructionSetFeatures(); } size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { __ movl(Address(ESP, stack_index), static_cast(reg_id)); return kX86WordSize; } size_t CodeGeneratorX86::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { __ movl(static_cast(reg_id), Address(ESP, stack_index)); return kX86WordSize; } size_t CodeGeneratorX86::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { if (GetGraph()->HasSIMD()) { __ movups(Address(ESP, stack_index), XmmRegister(reg_id)); } else { __ movsd(Address(ESP, stack_index), XmmRegister(reg_id)); } return GetSlowPathFPWidth(); } size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { if (GetGraph()->HasSIMD()) { __ movups(XmmRegister(reg_id), Address(ESP, stack_index)); } else { __ movsd(XmmRegister(reg_id), Address(ESP, stack_index)); } return GetSlowPathFPWidth(); } void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { ValidateInvokeRuntime(entrypoint, instruction, slow_path); GenerateInvokeRuntime(GetThreadOffset(entrypoint).Int32Value()); if (EntrypointRequiresStackMap(entrypoint)) { RecordPcInfo(instruction, dex_pc, slow_path); } } void CodeGeneratorX86::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, HInstruction* instruction, SlowPathCode* slow_path) { ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path); GenerateInvokeRuntime(entry_point_offset); } void CodeGeneratorX86::GenerateInvokeRuntime(int32_t entry_point_offset) { __ fs()->call(Address::Absolute(entry_point_offset)); } CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options, OptimizingCompilerStats* stats) : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters, kNumberOfRegisterPairs, ComputeRegisterMask(reinterpret_cast(kCoreCalleeSaves), arraysize(kCoreCalleeSaves)) | (1 << kFakeReturnRegister), 0, compiler_options, stats), block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), move_resolver_(graph->GetAllocator(), this), assembler_(graph->GetAllocator()), boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), public_type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), package_type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), boot_image_jni_entrypoint_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), constant_area_start_(-1), fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), method_address_offset_(std::less(), graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { // Use a fake return address register to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); } void CodeGeneratorX86::SetupBlockedRegisters() const { // Stack register is always reserved. blocked_core_registers_[ESP] = true; } InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen) : InstructionCodeGenerator(graph, codegen), assembler_(codegen->GetAssembler()), codegen_(codegen) {} static dwarf::Reg DWARFReg(Register reg) { return dwarf::Reg::X86Core(static_cast(reg)); } void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) { if (GetCompilerOptions().CountHotnessInCompiledCode()) { Register reg = EAX; if (is_frame_entry) { reg = kMethodRegisterArgument; } else { __ pushl(EAX); __ cfi().AdjustCFAOffset(4); __ movl(EAX, Address(ESP, kX86WordSize)); } NearLabel overflow; __ cmpw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(ArtMethod::MaxCounter())); __ j(kEqual, &overflow); __ addw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(1)); __ Bind(&overflow); if (!is_frame_entry) { __ popl(EAX); __ cfi().AdjustCFAOffset(-4); } } if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { ScopedProfilingInfoUse spiu( Runtime::Current()->GetJit(), GetGraph()->GetArtMethod(), Thread::Current()); ProfilingInfo* info = spiu.GetProfilingInfo(); if (info != nullptr) { uint32_t address = reinterpret_cast32(info); NearLabel done; if (HasEmptyFrame()) { CHECK(is_frame_entry); // Alignment IncreaseFrame(8); // We need a temporary. The stub also expects the method at bottom of stack. __ pushl(EAX); __ cfi().AdjustCFAOffset(4); __ movl(EAX, Immediate(address)); __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), Immediate(1)); __ andw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), Immediate(interpreter::kTieredHotnessMask)); __ j(kNotZero, &done); GenerateInvokeRuntime( GetThreadOffset(kQuickCompileOptimized).Int32Value()); __ Bind(&done); // We don't strictly require to restore EAX, but this makes the generated // code easier to reason about. __ popl(EAX); __ cfi().AdjustCFAOffset(-4); DecreaseFrame(8); } else { if (!RequiresCurrentMethod()) { CHECK(is_frame_entry); __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument); } // We need a temporary. __ pushl(EAX); __ cfi().AdjustCFAOffset(4); __ movl(EAX, Immediate(address)); __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), Immediate(1)); __ popl(EAX); // Put stack as expected before exiting or calling stub. __ cfi().AdjustCFAOffset(-4); __ j(kCarryClear, &done); GenerateInvokeRuntime( GetThreadOffset(kQuickCompileOptimized).Int32Value()); __ Bind(&done); } } } } void CodeGeneratorX86::GenerateFrameEntry() { __ cfi().SetCurrentCFAOffset(kX86WordSize); // return address __ Bind(&frame_entry_label_); bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86); DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); if (!skip_overflow_check) { size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86); __ testl(EAX, Address(ESP, -static_cast(reserved_bytes))); RecordPcInfo(nullptr, 0); } if (!HasEmptyFrame()) { for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) { Register reg = kCoreCalleeSaves[i]; if (allocated_registers_.ContainsCoreRegister(reg)) { __ pushl(reg); __ cfi().AdjustCFAOffset(kX86WordSize); __ cfi().RelOffset(DWARFReg(reg), 0); } } int adjust = GetFrameSize() - FrameEntrySpillSize(); IncreaseFrame(adjust); // Save the current method if we need it. Note that we do not // do this in HCurrentMethod, as the instruction might have been removed // in the SSA graph. if (RequiresCurrentMethod()) { __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument); } if (GetGraph()->HasShouldDeoptimizeFlag()) { // Initialize should_deoptimize flag to 0. __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0)); } } MaybeIncrementHotness(/* is_frame_entry= */ true); } void CodeGeneratorX86::GenerateFrameExit() { __ cfi().RememberState(); if (!HasEmptyFrame()) { int adjust = GetFrameSize() - FrameEntrySpillSize(); DecreaseFrame(adjust); for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) { Register reg = kCoreCalleeSaves[i]; if (allocated_registers_.ContainsCoreRegister(reg)) { __ popl(reg); __ cfi().AdjustCFAOffset(-static_cast(kX86WordSize)); __ cfi().Restore(DWARFReg(reg)); } } } __ ret(); __ cfi().RestoreState(); __ cfi().DefCFAOffset(GetFrameSize()); } void CodeGeneratorX86::Bind(HBasicBlock* block) { __ Bind(GetLabelOf(block)); } Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type type) const { switch (type) { case DataType::Type::kReference: case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: case DataType::Type::kUint16: case DataType::Type::kInt16: case DataType::Type::kUint32: case DataType::Type::kInt32: return Location::RegisterLocation(EAX); case DataType::Type::kUint64: case DataType::Type::kInt64: return Location::RegisterPairLocation(EAX, EDX); case DataType::Type::kVoid: return Location::NoLocation(); case DataType::Type::kFloat64: case DataType::Type::kFloat32: return Location::FpuRegisterLocation(XMM0); } UNREACHABLE(); } Location InvokeDexCallingConventionVisitorX86::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type type) { switch (type) { case DataType::Type::kReference: case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: case DataType::Type::kUint16: case DataType::Type::kInt16: case DataType::Type::kInt32: { uint32_t index = gp_index_++; stack_index_++; if (index < calling_convention.GetNumberOfRegisters()) { return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); } else { return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); } } case DataType::Type::kInt64: { uint32_t index = gp_index_; gp_index_ += 2; stack_index_ += 2; if (index + 1 < calling_convention.GetNumberOfRegisters()) { X86ManagedRegister pair = X86ManagedRegister::FromRegisterPair( calling_convention.GetRegisterPairAt(index)); return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); } else { return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); } } case DataType::Type::kFloat32: { uint32_t index = float_index_++; stack_index_++; if (index < calling_convention.GetNumberOfFpuRegisters()) { return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); } else { return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); } } case DataType::Type::kFloat64: { uint32_t index = float_index_++; stack_index_ += 2; if (index < calling_convention.GetNumberOfFpuRegisters()) { return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); } else { return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); } } case DataType::Type::kUint32: case DataType::Type::kUint64: case DataType::Type::kVoid: LOG(FATAL) << "Unexpected parameter type " << type; UNREACHABLE(); } return Location::NoLocation(); } Location CriticalNativeCallingConventionVisitorX86::GetNextLocation(DataType::Type type) { DCHECK_NE(type, DataType::Type::kReference); Location location; if (DataType::Is64BitType(type)) { location = Location::DoubleStackSlot(stack_offset_); stack_offset_ += 2 * kFramePointerSize; } else { location = Location::StackSlot(stack_offset_); stack_offset_ += kFramePointerSize; } if (for_register_allocation_) { location = Location::Any(); } return location; } Location CriticalNativeCallingConventionVisitorX86::GetReturnLocation(DataType::Type type) const { // We perform conversion to the managed ABI return register after the call if needed. InvokeDexCallingConventionVisitorX86 dex_calling_convention; return dex_calling_convention.GetReturnLocation(type); } Location CriticalNativeCallingConventionVisitorX86::GetMethodLocation() const { // Pass the method in the hidden argument EAX. return Location::RegisterLocation(EAX); } void CodeGeneratorX86::Move32(Location destination, Location source) { if (source.Equals(destination)) { return; } if (destination.IsRegister()) { if (source.IsRegister()) { __ movl(destination.AsRegister(), source.AsRegister()); } else if (source.IsFpuRegister()) { __ movd(destination.AsRegister(), source.AsFpuRegister()); } else if (source.IsConstant()) { int32_t value = GetInt32ValueOf(source.GetConstant()); __ movl(destination.AsRegister(), Immediate(value)); } else { DCHECK(source.IsStackSlot()); __ movl(destination.AsRegister(), Address(ESP, source.GetStackIndex())); } } else if (destination.IsFpuRegister()) { if (source.IsRegister()) { __ movd(destination.AsFpuRegister(), source.AsRegister()); } else if (source.IsFpuRegister()) { __ movaps(destination.AsFpuRegister(), source.AsFpuRegister()); } else { DCHECK(source.IsStackSlot()); __ movss(destination.AsFpuRegister(), Address(ESP, source.GetStackIndex())); } } else { DCHECK(destination.IsStackSlot()) << destination; if (source.IsRegister()) { __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister()); } else if (source.IsFpuRegister()) { __ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); int32_t value = GetInt32ValueOf(constant); __ movl(Address(ESP, destination.GetStackIndex()), Immediate(value)); } else { DCHECK(source.IsStackSlot()); __ pushl(Address(ESP, source.GetStackIndex())); __ popl(Address(ESP, destination.GetStackIndex())); } } } void CodeGeneratorX86::Move64(Location destination, Location source) { if (source.Equals(destination)) { return; } if (destination.IsRegisterPair()) { if (source.IsRegisterPair()) { EmitParallelMoves( Location::RegisterLocation(source.AsRegisterPairHigh()), Location::RegisterLocation(destination.AsRegisterPairHigh()), DataType::Type::kInt32, Location::RegisterLocation(source.AsRegisterPairLow()), Location::RegisterLocation(destination.AsRegisterPairLow()), DataType::Type::kInt32); } else if (source.IsFpuRegister()) { XmmRegister src_reg = source.AsFpuRegister(); __ movd(destination.AsRegisterPairLow(), src_reg); __ psrlq(src_reg, Immediate(32)); __ movd(destination.AsRegisterPairHigh(), src_reg); } else { // No conflict possible, so just do the moves. DCHECK(source.IsDoubleStackSlot()); __ movl(destination.AsRegisterPairLow(), Address(ESP, source.GetStackIndex())); __ movl(destination.AsRegisterPairHigh(), Address(ESP, source.GetHighStackIndex(kX86WordSize))); } } else if (destination.IsFpuRegister()) { if (source.IsFpuRegister()) { __ movaps(destination.AsFpuRegister(), source.AsFpuRegister()); } else if (source.IsDoubleStackSlot()) { __ movsd(destination.AsFpuRegister(), Address(ESP, source.GetStackIndex())); } else if (source.IsRegisterPair()) { size_t elem_size = DataType::Size(DataType::Type::kInt32); // Push the 2 source registers to the stack. __ pushl(source.AsRegisterPairHigh()); __ cfi().AdjustCFAOffset(elem_size); __ pushl(source.AsRegisterPairLow()); __ cfi().AdjustCFAOffset(elem_size); __ movsd(destination.AsFpuRegister(), Address(ESP, 0)); // And remove the temporary stack space we allocated. DecreaseFrame(2 * elem_size); } else { LOG(FATAL) << "Unimplemented"; } } else { DCHECK(destination.IsDoubleStackSlot()) << destination; if (source.IsRegisterPair()) { // No conflict possible, so just do the moves. __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow()); __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), source.AsRegisterPairHigh()); } else if (source.IsFpuRegister()) { __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant()); int64_t value = GetInt64ValueOf(constant); __ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value))); __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value))); } else { DCHECK(source.IsDoubleStackSlot()) << source; EmitParallelMoves( Location::StackSlot(source.GetStackIndex()), Location::StackSlot(destination.GetStackIndex()), DataType::Type::kInt32, Location::StackSlot(source.GetHighStackIndex(kX86WordSize)), Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)), DataType::Type::kInt32); } } } static Address CreateAddress(Register base, Register index = Register::kNoRegister, ScaleFactor scale = TIMES_1, int32_t disp = 0) { if (index == Register::kNoRegister) { return Address(base, disp); } return Address(base, index, scale, disp); } void CodeGeneratorX86::LoadFromMemoryNoBarrier(DataType::Type dst_type, Location dst, Address src, XmmRegister temp, bool is_atomic_load) { switch (dst_type) { case DataType::Type::kBool: case DataType::Type::kUint8: __ movzxb(dst.AsRegister(), src); break; case DataType::Type::kInt8: __ movsxb(dst.AsRegister(), src); break; case DataType::Type::kInt16: __ movsxw(dst.AsRegister(), src); break; case DataType::Type::kUint16: __ movzxw(dst.AsRegister(), src); break; case DataType::Type::kInt32: __ movl(dst.AsRegister(), src); break; case DataType::Type::kInt64: { if (is_atomic_load) { __ movsd(temp, src); __ movd(dst.AsRegisterPairLow(), temp); __ psrlq(temp, Immediate(32)); __ movd(dst.AsRegisterPairHigh(), temp); } else { DCHECK_NE(src.GetBaseRegister(), dst.AsRegisterPairLow()); Address src_high = src.displaceBy(kX86WordSize); __ movl(dst.AsRegisterPairLow(), src); __ movl(dst.AsRegisterPairHigh(), src_high); } break; } case DataType::Type::kFloat32: __ movss(dst.AsFpuRegister(), src); break; case DataType::Type::kFloat64: __ movsd(dst.AsFpuRegister(), src); break; case DataType::Type::kReference: __ movl(dst.AsRegister(), src); __ MaybeUnpoisonHeapReference(dst.AsRegister()); break; default: LOG(FATAL) << "Unreachable type " << dst_type; } } void CodeGeneratorX86::MoveToMemory(DataType::Type src_type, Location src, Register dst_base, Register dst_index, ScaleFactor dst_scale, int32_t dst_disp) { DCHECK(dst_base != Register::kNoRegister); Address dst = CreateAddress(dst_base, dst_index, dst_scale, dst_disp); switch (src_type) { case DataType::Type::kBool: case DataType::Type::kUint8: case DataType::Type::kInt8: { if (src.IsConstant()) { __ movb(dst, Immediate(CodeGenerator::GetInt8ValueOf(src.GetConstant()))); } else { __ movb(dst, src.AsRegister()); } break; } case DataType::Type::kUint16: case DataType::Type::kInt16: { if (src.IsConstant()) { __ movw(dst, Immediate(CodeGenerator::GetInt16ValueOf(src.GetConstant()))); } else { __ movw(dst, src.AsRegister()); } break; } case DataType::Type::kUint32: case DataType::Type::kInt32: { if (src.IsConstant()) { int32_t v = CodeGenerator::GetInt32ValueOf(src.GetConstant()); __ movl(dst, Immediate(v)); } else { __ movl(dst, src.AsRegister()); } break; } case DataType::Type::kUint64: case DataType::Type::kInt64: { Address dst_next_4_bytes = CreateAddress(dst_base, dst_index, dst_scale, dst_disp + 4); if (src.IsConstant()) { int64_t v = CodeGenerator::GetInt64ValueOf(src.GetConstant()); __ movl(dst, Immediate(Low32Bits(v))); __ movl(dst_next_4_bytes, Immediate(High32Bits(v))); } else { __ movl(dst, src.AsRegisterPairLow()); __ movl(dst_next_4_bytes, src.AsRegisterPairHigh()); } break; } case DataType::Type::kFloat32: { if (src.IsConstant()) { int32_t v = CodeGenerator::GetInt32ValueOf(src.GetConstant()); __ movl(dst, Immediate(v)); } else { __ movss(dst, src.AsFpuRegister()); } break; } case DataType::Type::kFloat64: { Address dst_next_4_bytes = CreateAddress(dst_base, dst_index, dst_scale, dst_disp + 4); if (src.IsConstant()) { int64_t v = CodeGenerator::GetInt64ValueOf(src.GetConstant()); __ movl(dst, Immediate(Low32Bits(v))); __ movl(dst_next_4_bytes, Immediate(High32Bits(v))); } else { __ movsd(dst, src.AsFpuRegister()); } break; } case DataType::Type::kVoid: case DataType::Type::kReference: LOG(FATAL) << "Unreachable type " << src_type; } } void CodeGeneratorX86::MoveConstant(Location location, int32_t value) { DCHECK(location.IsRegister()); __ movl(location.AsRegister(), Immediate(value)); } void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) { HParallelMove move(GetGraph()->GetAllocator()); if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) { move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr); move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr); } else { move.AddMove(src, dst, dst_type, nullptr); } GetMoveResolver()->EmitNativeCode(&move); } void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) { if (location.IsRegister()) { locations->AddTemp(location); } else if (location.IsRegisterPair()) { locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow())); locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh())); } else { UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; } } void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) { if (successor->IsExitBlock()) { DCHECK(got->GetPrevious()->AlwaysThrows()); return; // no code needed } HBasicBlock* block = got->GetBlock(); HInstruction* previous = got->GetPrevious(); HLoopInformation* info = block->GetLoopInformation(); if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false); GenerateSuspendCheck(info->GetSuspendCheck(), successor); return; } if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); } if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { __ jmp(codegen_->GetLabelOf(successor)); } } void LocationsBuilderX86::VisitGoto(HGoto* got) { got->SetLocations(nullptr); } void InstructionCodeGeneratorX86::VisitGoto(HGoto* got) { HandleGoto(got, got->GetSuccessor()); } void LocationsBuilderX86::VisitTryBoundary(HTryBoundary* try_boundary) { try_boundary->SetLocations(nullptr); } void InstructionCodeGeneratorX86::VisitTryBoundary(HTryBoundary* try_boundary) { HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor(); if (!successor->IsExitBlock()) { HandleGoto(try_boundary, successor); } } void LocationsBuilderX86::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } template void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label) { if (cond->IsFPConditionTrueIfNaN()) { __ j(kUnordered, true_label); } else if (cond->IsFPConditionFalseIfNaN()) { __ j(kUnordered, false_label); } __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label); } template void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond, LabelType* true_label, LabelType* false_label) { LocationSummary* locations = cond->GetLocations(); Location left = locations->InAt(0); Location right = locations->InAt(1); IfCondition if_cond = cond->GetCondition(); Register left_high = left.AsRegisterPairHigh(); Register left_low = left.AsRegisterPairLow(); IfCondition true_high_cond = if_cond; IfCondition false_high_cond = cond->GetOppositeCondition(); Condition final_condition = X86UnsignedOrFPCondition(if_cond); // unsigned on lower part // Set the conditions for the test, remembering that == needs to be // decided using the low words. switch (if_cond) { case kCondEQ: case kCondNE: // Nothing to do. break; case kCondLT: false_high_cond = kCondGT; break; case kCondLE: true_high_cond = kCondLT; break; case kCondGT: false_high_cond = kCondLT; break; case kCondGE: true_high_cond = kCondGT; break; case kCondB: false_high_cond = kCondA; break; case kCondBE: true_high_cond = kCondB; break; case kCondA: false_high_cond = kCondB; break; case kCondAE: true_high_cond = kCondA; break; } if (right.IsConstant()) { int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); int32_t val_high = High32Bits(value); int32_t val_low = Low32Bits(value); codegen_->Compare32BitValue(left_high, val_high); if (if_cond == kCondNE) { __ j(X86Condition(true_high_cond), true_label); } else if (if_cond == kCondEQ) { __ j(X86Condition(false_high_cond), false_label); } else { __ j(X86Condition(true_high_cond), true_label); __ j(X86Condition(false_high_cond), false_label); } // Must be equal high, so compare the lows. codegen_->Compare32BitValue(left_low, val_low); } else if (right.IsRegisterPair()) { Register right_high = right.AsRegisterPairHigh(); Register right_low = right.AsRegisterPairLow(); __ cmpl(left_high, right_high); if (if_cond == kCondNE) { __ j(X86Condition(true_high_cond), true_label); } else if (if_cond == kCondEQ) { __ j(X86Condition(false_high_cond), false_label); } else { __ j(X86Condition(true_high_cond), true_label); __ j(X86Condition(false_high_cond), false_label); } // Must be equal high, so compare the lows. __ cmpl(left_low, right_low); } else { DCHECK(right.IsDoubleStackSlot()); __ cmpl(left_high, Address(ESP, right.GetHighStackIndex(kX86WordSize))); if (if_cond == kCondNE) { __ j(X86Condition(true_high_cond), true_label); } else if (if_cond == kCondEQ) { __ j(X86Condition(false_high_cond), false_label); } else { __ j(X86Condition(true_high_cond), true_label); __ j(X86Condition(false_high_cond), false_label); } // Must be equal high, so compare the lows. __ cmpl(left_low, Address(ESP, right.GetStackIndex())); } // The last comparison might be unsigned. __ j(final_condition, true_label); } void InstructionCodeGeneratorX86::GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double) { HX86LoadFromConstantTable* const_area = insn->InputAt(1)->AsX86LoadFromConstantTable(); if (is_double) { if (rhs.IsFpuRegister()) { __ ucomisd(lhs.AsFpuRegister(), rhs.AsFpuRegister()); } else if (const_area != nullptr) { DCHECK(const_area->IsEmittedAtUseSite()); __ ucomisd(lhs.AsFpuRegister(), codegen_->LiteralDoubleAddress( const_area->GetConstant()->AsDoubleConstant()->GetValue(), const_area->GetBaseMethodAddress(), const_area->GetLocations()->InAt(0).AsRegister())); } else { DCHECK(rhs.IsDoubleStackSlot()); __ ucomisd(lhs.AsFpuRegister(), Address(ESP, rhs.GetStackIndex())); } } else { if (rhs.IsFpuRegister()) { __ ucomiss(lhs.AsFpuRegister(), rhs.AsFpuRegister()); } else if (const_area != nullptr) { DCHECK(const_area->IsEmittedAtUseSite()); __ ucomiss(lhs.AsFpuRegister(), codegen_->LiteralFloatAddress( const_area->GetConstant()->AsFloatConstant()->GetValue(), const_area->GetBaseMethodAddress(), const_area->GetLocations()->InAt(0).AsRegister())); } else { DCHECK(rhs.IsStackSlot()); __ ucomiss(lhs.AsFpuRegister(), Address(ESP, rhs.GetStackIndex())); } } } template void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condition, LabelType* true_target_in, LabelType* false_target_in) { // Generated branching requires both targets to be explicit. If either of the // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead. LabelType fallthrough_target; LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; LocationSummary* locations = condition->GetLocations(); Location left = locations->InAt(0); Location right = locations->InAt(1); DataType::Type type = condition->InputAt(0)->GetType(); switch (type) { case DataType::Type::kInt64: GenerateLongComparesAndJumps(condition, true_target, false_target); break; case DataType::Type::kFloat32: GenerateFPCompare(left, right, condition, false); GenerateFPJumps(condition, true_target, false_target); break; case DataType::Type::kFloat64: GenerateFPCompare(left, right, condition, true); GenerateFPJumps(condition, true_target, false_target); break; default: LOG(FATAL) << "Unexpected compare type " << type; } if (false_target != &fallthrough_target) { __ jmp(false_target); } if (fallthrough_target.IsLinked()) { __ Bind(&fallthrough_target); } } static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { // Moves may affect the eflags register (move zero uses xorl), so the EFLAGS // are set only strictly before `branch`. We can't use the eflags on long/FP // conditions if they are materialized due to the complex branching. return cond->IsCondition() && cond->GetNext() == branch && cond->InputAt(0)->GetType() != DataType::Type::kInt64 && !DataType::IsFloatingPointType(cond->InputAt(0)->GetType()); } template void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, LabelType* true_target, LabelType* false_target) { HInstruction* cond = instruction->InputAt(condition_input_index); if (true_target == nullptr && false_target == nullptr) { // Nothing to do. The code always falls through. return; } else if (cond->IsIntConstant()) { // Constant condition, statically compared against "true" (integer value 1). if (cond->AsIntConstant()->IsTrue()) { if (true_target != nullptr) { __ jmp(true_target); } } else { DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue(); if (false_target != nullptr) { __ jmp(false_target); } } return; } // The following code generates these patterns: // (1) true_target == nullptr && false_target != nullptr // - opposite condition true => branch to false_target // (2) true_target != nullptr && false_target == nullptr // - condition true => branch to true_target // (3) true_target != nullptr && false_target != nullptr // - condition true => branch to true_target // - branch to false_target if (IsBooleanValueOrMaterializedCondition(cond)) { if (AreEflagsSetFrom(cond, instruction)) { if (true_target == nullptr) { __ j(X86Condition(cond->AsCondition()->GetOppositeCondition()), false_target); } else { __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target); } } else { // Materialized condition, compare against 0. Location lhs = instruction->GetLocations()->InAt(condition_input_index); if (lhs.IsRegister()) { __ testl(lhs.AsRegister(), lhs.AsRegister()); } else { __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0)); } if (true_target == nullptr) { __ j(kEqual, false_target); } else { __ j(kNotEqual, true_target); } } } else { // Condition has not been materialized, use its inputs as the comparison and // its condition as the branch condition. HCondition* condition = cond->AsCondition(); // If this is a long or FP comparison that has been folded into // the HCondition, generate the comparison directly. DataType::Type type = condition->InputAt(0)->GetType(); if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { GenerateCompareTestAndBranch(condition, true_target, false_target); return; } Location lhs = condition->GetLocations()->InAt(0); Location rhs = condition->GetLocations()->InAt(1); // LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition). codegen_->GenerateIntCompare(lhs, rhs); if (true_target == nullptr) { __ j(X86Condition(condition->GetOppositeCondition()), false_target); } else { __ j(X86Condition(condition->GetCondition()), true_target); } } // If neither branch falls through (case 3), the conditional branch to `true_target` // was already emitted (case 2) and we need to emit a jump to `false_target`. if (true_target != nullptr && false_target != nullptr) { __ jmp(false_target); } } void LocationsBuilderX86::VisitIf(HIf* if_instr) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::Any()); } } void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { HBasicBlock* true_successor = if_instr->IfTrueSuccessor(); HBasicBlock* false_successor = if_instr->IfFalseSuccessor(); Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ? nullptr : codegen_->GetLabelOf(true_successor); Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetCustomSlowPathCallerSaves(caller_saves); if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::Any()); } } void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath(deoptimize); GenerateTestAndBranch