// Copyright 2020 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/compiler/backend/mid-tier-register-allocator.h" #include #include "src/base/bits.h" #include "src/base/logging.h" #include "src/base/macros.h" #include "src/base/optional.h" #include "src/codegen/machine-type.h" #include "src/codegen/register-configuration.h" #include "src/codegen/tick-counter.h" #include "src/common/globals.h" #include "src/compiler/backend/instruction.h" #include "src/compiler/linkage.h" #include "src/logging/counters.h" #include "src/utils/bit-vector.h" #include "src/zone/zone-containers.h" namespace v8 { namespace internal { namespace compiler { class RegisterState; class DeferredBlocksRegion; // BlockState stores details associated with a particular basic block. class BlockState final { public: BlockState(int block_count, Zone* zone) : general_registers_in_state_(nullptr), double_registers_in_state_(nullptr), deferred_blocks_region_(nullptr), dominated_blocks_(block_count, zone), successors_phi_index_(-1), is_deferred_block_boundary_(false) {} // Returns the RegisterState that applies to the input of this block. Can be // |nullptr| if the no registers of |kind| have been allocated up to this // point. RegisterState* register_in_state(RegisterKind kind); void set_register_in_state(RegisterState* register_state, RegisterKind kind); // Returns a bitvector representing all the basic blocks that are dominated // by this basic block. BitVector* dominated_blocks() { return &dominated_blocks_; } // Set / get this block's index for successor's phi operations. Will return // -1 if this block has no successor's with phi operations. int successors_phi_index() const { return successors_phi_index_; } void set_successors_phi_index(int index) { DCHECK_EQ(successors_phi_index_, -1); successors_phi_index_ = index; } // If this block is deferred, this represents region of deferred blocks // that are directly reachable from this block. DeferredBlocksRegion* deferred_blocks_region() const { return deferred_blocks_region_; } void set_deferred_blocks_region(DeferredBlocksRegion* region) { DCHECK_NULL(deferred_blocks_region_); deferred_blocks_region_ = region; } // Returns true if this block represents either a transition from // non-deferred to deferred or vice versa. bool is_deferred_block_boundary() const { return is_deferred_block_boundary_; } void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; } MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState); private: RegisterState* general_registers_in_state_; RegisterState* double_registers_in_state_; RegisterState* simd128_registers_in_state_; DeferredBlocksRegion* deferred_blocks_region_; BitVector dominated_blocks_; int successors_phi_index_; bool is_deferred_block_boundary_; }; RegisterState* BlockState::register_in_state(RegisterKind kind) { switch (kind) { case RegisterKind::kGeneral: return general_registers_in_state_; case RegisterKind::kDouble: return double_registers_in_state_; case RegisterKind::kSimd128: return simd128_registers_in_state_; } } void BlockState::set_register_in_state(RegisterState* register_state, RegisterKind kind) { switch (kind) { case RegisterKind::kGeneral: DCHECK_NULL(general_registers_in_state_); general_registers_in_state_ = register_state; break; case RegisterKind::kDouble: DCHECK_NULL(double_registers_in_state_); double_registers_in_state_ = register_state; break; case RegisterKind::kSimd128: DCHECK_NULL(simd128_registers_in_state_); simd128_registers_in_state_ = register_state; break; } } MidTierRegisterAllocationData::MidTierRegisterAllocationData( const RegisterConfiguration* config, Zone* zone, Frame* frame, InstructionSequence* code, TickCounter* tick_counter, const char* debug_name) : RegisterAllocationData(Type::kMidTier), allocation_zone_(zone), frame_(frame), code_(code), debug_name_(debug_name), config_(config), virtual_register_data_(code->VirtualRegisterCount(), allocation_zone()), block_states_(allocation_zone()), reference_map_instructions_(allocation_zone()), spilled_virtual_registers_(code->VirtualRegisterCount(), allocation_zone()), tick_counter_(tick_counter) { int basic_block_count = code->InstructionBlockCount(); block_states_.reserve(basic_block_count); for (int i = 0; i < basic_block_count; i++) { block_states_.emplace_back(basic_block_count, allocation_zone()); } } MoveOperands* MidTierRegisterAllocationData::AddGapMove( int instr_index, Instruction::GapPosition position, const InstructionOperand& from, const InstructionOperand& to) { Instruction* instr = code()->InstructionAt(instr_index); ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone()); return moves->AddMove(from, to); } MoveOperands* MidTierRegisterAllocationData::AddPendingOperandGapMove( int instr_index, Instruction::GapPosition position) { return AddGapMove(instr_index, position, PendingOperand(), PendingOperand()); } BlockState& MidTierRegisterAllocationData::block_state(RpoNumber rpo_number) { return block_states_[rpo_number.ToInt()]; } const InstructionBlock* MidTierRegisterAllocationData::GetBlock( RpoNumber rpo_number) { return code()->InstructionBlockAt(rpo_number); } const InstructionBlock* MidTierRegisterAllocationData::GetBlock( int instr_index) { return code()->InstructionAt(instr_index)->block(); } const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy( const InstructionBlock* block) { return block_state(block->rpo_number()).dominated_blocks(); } // RegisterIndex represents a particular register of a given kind (depending // on the RegisterKind of the allocator). class RegisterIndex final { public: RegisterIndex() : index_(kInvalidIndex) {} explicit RegisterIndex(int index) : index_(index) {} static RegisterIndex Invalid() { return RegisterIndex(); } bool is_valid() const { return index_ != kInvalidIndex; } int ToInt() const { DCHECK(is_valid()); return index_; } uintptr_t ToBit(MachineRepresentation rep) const { if (kFPAliasing != AliasingKind::kCombine || rep != MachineRepresentation::kSimd128) { return 1ull << ToInt(); } else { DCHECK_EQ(rep, MachineRepresentation::kSimd128); return 3ull << ToInt(); } } bool operator==(const RegisterIndex& rhs) const { return index_ == rhs.index_; } bool operator!=(const RegisterIndex& rhs) const { return index_ != rhs.index_; } class Iterator { public: explicit Iterator(int index) : index_(index) {} bool operator!=(const Iterator& rhs) const { return index_ != rhs.index_; } void operator++() { index_++; } RegisterIndex operator*() const { return RegisterIndex(index_); } private: int index_; }; private: static const int kInvalidIndex = -1; int8_t index_; }; // A Range from [start, end] of instructions, inclusive of start and end. class Range { public: Range() : start_(kMaxInt), end_(0) {} Range(int start, int end) : start_(start), end_(end) {} void AddInstr(int index) { start_ = std::min(start_, index); end_ = std::max(end_, index); } void AddRange(const Range& other) { start_ = std::min(start_, other.start_); end_ = std::max(end_, other.end_); } // Returns true if index is greater than start and less than or equal to end. bool Contains(int index) { return index >= start_ && index <= end_; } int start() const { return start_; } int end() const { return end_; } private: int start_; int end_; }; // Represents a connected region of deferred basic blocks. class DeferredBlocksRegion final { public: explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks) : spilled_vregs_(zone), blocks_covered_(number_of_blocks, zone), is_frozen_(false) {} void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) { DCHECK(data->GetBlock(block)->IsDeferred()); blocks_covered_.Add(block.ToInt()); data->block_state(block).set_deferred_blocks_region(this); } // Trys to adds |vreg| to the list of variables to potentially defer their // output to a spill slot until we enter this deferred block region. Returns // true if successful. bool TryDeferSpillOutputUntilEntry(int vreg) { if (spilled_vregs_.count(vreg) != 0) return true; if (is_frozen_) return false; spilled_vregs_.insert(vreg); return true; } void FreezeDeferredSpills() { is_frozen_ = true; } ZoneSet::const_iterator begin() const { return spilled_vregs_.begin(); } ZoneSet::const_iterator end() const { return spilled_vregs_.end(); } const BitVector* blocks_covered() const { return &blocks_covered_; } private: ZoneSet spilled_vregs_; BitVector blocks_covered_; bool is_frozen_; }; // VirtualRegisterData stores data specific to a particular virtual register, // and tracks spilled operands for that virtual register. class VirtualRegisterData final { public: VirtualRegisterData() = default; // Define VirtualRegisterData with the type of output that produces this // virtual register. void DefineAsUnallocatedOperand(int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block, bool is_exceptional_call_output); void DefineAsFixedSpillOperand(AllocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block, bool is_exceptional_call_output); void DefineAsConstantOperand(ConstantOperand* operand, MachineRepresentation rep, int instr_index, bool is_deferred_block); void DefineAsPhi(int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block); // Spill an operand that is assigned to this virtual register. void SpillOperand(InstructionOperand* operand, int instr_index, bool has_constant_policy, MidTierRegisterAllocationData* data); // Emit gap moves to / from the spill slot. void EmitGapMoveToInputFromSpillSlot(InstructionOperand to_operand, int instr_index, MidTierRegisterAllocationData* data); void EmitGapMoveFromOutputToSpillSlot(InstructionOperand from_operand, const InstructionBlock* current_block, int instr_index, MidTierRegisterAllocationData* data); void EmitGapMoveToSpillSlot(InstructionOperand from_operand, int instr_index, MidTierRegisterAllocationData* data); // Adds pending spills for deferred-blocks. void AddDeferredSpillUse(int instr_index, MidTierRegisterAllocationData* data); void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index, MidTierRegisterAllocationData* data); // Accessors for spill operand, which may still be pending allocation. bool HasSpillOperand() const { return spill_operand_ != nullptr; } InstructionOperand* spill_operand() const { DCHECK(HasSpillOperand()); return spill_operand_; } bool HasPendingSpillOperand() const { return HasSpillOperand() && spill_operand_->IsPending(); } bool HasAllocatedSpillOperand() const { return HasSpillOperand() && spill_operand_->IsAllocated(); } bool HasConstantSpillOperand() const { return HasSpillOperand() && spill_operand_->IsConstant(); } // Returns true if the virtual register should be spilled when it is output. bool NeedsSpillAtOutput() const { return needs_spill_at_output_; } void MarkAsNeedsSpillAtOutput() { if (HasConstantSpillOperand()) return; needs_spill_at_output_ = true; if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills(); } // Returns true if the virtual register should be spilled at entry to deferred // blocks in which it is spilled (to avoid spilling on output on // non-deferred blocks). bool NeedsSpillAtDeferredBlocks() const; void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data); bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) { DCHECK_GE(instr_index, output_instr_index()); if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true; if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) { return true; } return false; } // Allocates pending spill operands to the |allocated| spill slot. void AllocatePendingSpillOperand(const AllocatedOperand& allocated); int vreg() const { return vreg_; } MachineRepresentation rep() const { return rep_; } int output_instr_index() const { return output_instr_index_; } bool is_constant() const { return is_constant_; } bool is_phi() const { return is_phi_; } bool is_defined_in_deferred_block() const { return is_defined_in_deferred_block_; } bool is_exceptional_call_output() const { return is_exceptional_call_output_; } struct DeferredSpillSlotOutput { public: explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op, const BitVector* blocks) : instr_index(instr), operand(op), live_blocks(blocks) {} int instr_index; AllocatedOperand operand; const BitVector* live_blocks; }; // Represents the range of instructions for which this virtual register needs // to be spilled on the stack. class SpillRange : public ZoneObject { public: // Defines a spill range for an output operand. SpillRange(int definition_instr_index, const InstructionBlock* definition_block, MidTierRegisterAllocationData* data) : live_range_(definition_instr_index, definition_instr_index), live_blocks_(data->GetBlocksDominatedBy(definition_block)), deferred_spill_outputs_(nullptr) {} // Defines a spill range for a Phi variable. SpillRange(const InstructionBlock* phi_block, MidTierRegisterAllocationData* data) : live_range_(phi_block->first_instruction_index(), phi_block->first_instruction_index()), live_blocks_(data->GetBlocksDominatedBy(phi_block)), deferred_spill_outputs_(nullptr) { // For phis, add the gap move instructions in the predecssor blocks to // the live range. for (RpoNumber pred_rpo : phi_block->predecessors()) { const InstructionBlock* block = data->GetBlock(pred_rpo); live_range_.AddInstr(block->last_instruction_index()); } } SpillRange(const SpillRange&) = delete; SpillRange& operator=(const SpillRange&) = delete; bool IsLiveAt(int instr_index, InstructionBlock* block) { if (!live_range_.Contains(instr_index)) return false; int block_rpo = block->rpo_number().ToInt(); if (!live_blocks_->Contains(block_rpo)) return false; if (!HasDeferredBlockSpills()) { return true; } else { // If this spill range is only output for deferred block, then the spill // slot will only be live for the deferred blocks, not all blocks that // the virtual register is live. for (auto deferred_spill_output : *deferred_spill_outputs()) { if (deferred_spill_output.live_blocks->Contains(block_rpo)) { return true; } } return false; } } void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); } void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index, MidTierRegisterAllocationData* data) { if (deferred_spill_outputs_ == nullptr) { Zone* zone = data->allocation_zone(); deferred_spill_outputs_ = zone->New>(zone); } const InstructionBlock* block = data->GetBlock(instr_index); DCHECK_EQ(block->first_instruction_index(), instr_index); BlockState& block_state = data->block_state(block->rpo_number()); const BitVector* deferred_blocks = block_state.deferred_blocks_region()->blocks_covered(); deferred_spill_outputs_->emplace_back(instr_index, allocated_op, deferred_blocks); } void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; } bool HasDeferredBlockSpills() const { return deferred_spill_outputs_ != nullptr; } const ZoneVector* deferred_spill_outputs() const { DCHECK(HasDeferredBlockSpills()); return deferred_spill_outputs_; } Range& live_range() { return live_range_; } private: Range live_range_; const BitVector* live_blocks_; ZoneVector* deferred_spill_outputs_; }; bool HasSpillRange() const { return spill_range_ != nullptr; } SpillRange* spill_range() const { DCHECK(HasSpillRange()); return spill_range_; } private: void Initialize(int virtual_register, MachineRepresentation rep, InstructionOperand* spill_operand, int instr_index, bool is_phi, bool is_constant, bool is_defined_in_deferred_block, bool is_exceptional_call_output); void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data); void AddPendingSpillOperand(PendingOperand* pending_operand); void EnsureSpillRange(MidTierRegisterAllocationData* data); bool TrySpillOnEntryToDeferred(MidTierRegisterAllocationData* data, const InstructionBlock* block); InstructionOperand* spill_operand_; SpillRange* spill_range_; int output_instr_index_; int vreg_; MachineRepresentation rep_; bool is_phi_ : 1; bool is_constant_ : 1; bool is_defined_in_deferred_block_ : 1; bool needs_spill_at_output_ : 1; bool is_exceptional_call_output_ : 1; }; VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor( int virtual_register) { DCHECK_GE(virtual_register, 0); DCHECK_LT(virtual_register, virtual_register_data_.size()); return virtual_register_data_[virtual_register]; } void VirtualRegisterData::Initialize(int virtual_register, MachineRepresentation rep, InstructionOperand* spill_operand, int instr_index, bool is_phi, bool is_constant, bool is_defined_in_deferred_block, bool is_exceptional_call_output) { vreg_ = virtual_register; rep_ = rep; spill_operand_ = spill_operand; spill_range_ = nullptr; output_instr_index_ = instr_index; is_phi_ = is_phi; is_constant_ = is_constant; is_defined_in_deferred_block_ = is_defined_in_deferred_block; needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr; is_exceptional_call_output_ = is_exceptional_call_output; } void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand, MachineRepresentation rep, int instr_index, bool is_deferred_block) { Initialize(operand->virtual_register(), rep, operand, instr_index, false, true, is_deferred_block, false); } void VirtualRegisterData::DefineAsFixedSpillOperand( AllocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block, bool is_exceptional_call_output) { Initialize(virtual_register, rep, operand, instr_index, false, false, is_deferred_block, is_exceptional_call_output); } void VirtualRegisterData::DefineAsUnallocatedOperand( int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block, bool is_exceptional_call_output) { Initialize(virtual_register, rep, nullptr, instr_index, false, false, is_deferred_block, is_exceptional_call_output); } void VirtualRegisterData::DefineAsPhi(int virtual_register, MachineRepresentation rep, int instr_index, bool is_deferred_block) { Initialize(virtual_register, rep, nullptr, instr_index, true, false, is_deferred_block, false); } void VirtualRegisterData::EnsureSpillRange( MidTierRegisterAllocationData* data) { DCHECK(!HasConstantSpillOperand()); if (HasSpillRange()) return; const InstructionBlock* definition_block = data->GetBlock(output_instr_index_); if (is_phi()) { // Define a spill slot that is defined for the phi's range. spill_range_ = data->allocation_zone()->New(definition_block, data); } else { if (is_exceptional_call_output()) { // If this virtual register is output by a call which has an exception // catch handler, then the output will only be live in the IfSuccess // successor block, not the IfException side, so make the definition block // the IfSuccess successor block explicitly. DCHECK_EQ(output_instr_index_, definition_block->last_instruction_index() - 1); DCHECK_EQ(definition_block->SuccessorCount(), 2); DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler()); definition_block = data->GetBlock(definition_block->successors()[0]); } // The spill slot will be defined after the instruction that outputs it. spill_range_ = data->allocation_zone()->New( output_instr_index_ + 1, definition_block, data); } data->spilled_virtual_registers().Add(vreg()); } void VirtualRegisterData::AddSpillUse(int instr_index, MidTierRegisterAllocationData* data) { if (HasConstantSpillOperand()) return; EnsureSpillRange(data); spill_range_->ExtendRangeTo(instr_index); const InstructionBlock* block = data->GetBlock(instr_index); if (!TrySpillOnEntryToDeferred(data, block)) { MarkAsNeedsSpillAtOutput(); } } void VirtualRegisterData::AddDeferredSpillUse( int instr_index, MidTierRegisterAllocationData* data) { DCHECK(data->GetBlock(instr_index)->IsDeferred()); DCHECK(!is_defined_in_deferred_block()); AddSpillUse(instr_index, data); } bool VirtualRegisterData::TrySpillOnEntryToDeferred( MidTierRegisterAllocationData* data, const InstructionBlock* block) { BlockState& block_state = data->block_state(block->rpo_number()); if (!NeedsSpillAtOutput() && block->IsDeferred() && !is_defined_in_deferred_block() && !is_constant()) { return block_state.deferred_blocks_region()->TryDeferSpillOutputUntilEntry( vreg()); } return false; } void VirtualRegisterData::AddDeferredSpillOutput( AllocatedOperand allocated_op, int instr_index, MidTierRegisterAllocationData* data) { DCHECK(!NeedsSpillAtOutput()); DCHECK(HasSpillRange()); spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data); } void VirtualRegisterData::SpillOperand(InstructionOperand* operand, int instr_index, bool has_constant_policy, MidTierRegisterAllocationData* data) { if (!has_constant_policy && HasConstantSpillOperand()) { // Reset the constant spill operand to force a real spill slot since this // operand can't use the constant spill operand. spill_operand_ = nullptr; DCHECK(!HasConstantSpillOperand()); } AddSpillUse(instr_index, data); if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) { InstructionOperand::ReplaceWith(operand, spill_operand()); } else { PendingOperand pending_op; InstructionOperand::ReplaceWith(operand, &pending_op); AddPendingSpillOperand(PendingOperand::cast(operand)); } } bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const { return HasSpillRange() && spill_range()->HasDeferredBlockSpills(); } void VirtualRegisterData::EmitDeferredSpillOutputs( MidTierRegisterAllocationData* data) { DCHECK(NeedsSpillAtDeferredBlocks()); for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) { EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index, data); } } void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot( InstructionOperand to_operand, int instr_index, MidTierRegisterAllocationData* data) { AddSpillUse(instr_index, data); DCHECK(!to_operand.IsPending()); if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) { data->AddGapMove(instr_index, Instruction::END, *spill_operand(), to_operand); } else { MoveOperands* move_ops = data->AddPendingOperandGapMove(instr_index, Instruction::END); AddPendingSpillOperand(PendingOperand::cast(&move_ops->source())); InstructionOperand::ReplaceWith(&move_ops->destination(), &to_operand); } } void VirtualRegisterData::EmitGapMoveToSpillSlot( InstructionOperand from_operand, int instr_index, MidTierRegisterAllocationData* data) { AddSpillUse(instr_index, data); if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) { data->AddGapMove(instr_index, Instruction::START, from_operand, *spill_operand()); } else { MoveOperands* move_ops = data->AddPendingOperandGapMove(instr_index, Instruction::START); InstructionOperand::ReplaceWith(&move_ops->source(), &from_operand); AddPendingSpillOperand(PendingOperand::cast(&move_ops->destination())); } } void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot( InstructionOperand from_operand, const InstructionBlock* current_block, int instr_index, MidTierRegisterAllocationData* data) { DCHECK_EQ(data->GetBlock(instr_index), current_block); if (instr_index == current_block->last_instruction_index()) { // Add gap move to the first instruction of every successor block. for (const RpoNumber& succ : current_block->successors()) { const InstructionBlock* successor = data->GetBlock(succ); DCHECK_EQ(1, successor->PredecessorCount()); EmitGapMoveToSpillSlot(from_operand, successor->first_instruction_index(), data); } } else { // Add gap move to the next instruction. EmitGapMoveToSpillSlot(from_operand, instr_index + 1, data); } } void VirtualRegisterData::AddPendingSpillOperand(PendingOperand* pending_op) { DCHECK(HasSpillRange()); DCHECK_NULL(pending_op->next()); if (HasSpillOperand()) { pending_op->set_next(PendingOperand::cast(spill_operand())); } spill_operand_ = pending_op; } void VirtualRegisterData::AllocatePendingSpillOperand( const AllocatedOperand& allocated) { DCHECK(!HasAllocatedSpillOperand() && !HasConstantSpillOperand()); PendingOperand* current = PendingOperand::cast(spill_operand_); while (current) { PendingOperand* next = current->next(); InstructionOperand::ReplaceWith(current, &allocated); current = next; } } // RegisterState represents the state of the |kind| registers at a particular // point in program execution. The RegisterState can be cloned or merged with // other RegisterStates to model branches and merges in program control flow. class RegisterState final : public ZoneObject { public: static RegisterState* New(RegisterKind kind, int num_allocatable_registers, Zone* zone) { return zone->New(kind, num_allocatable_registers, zone); } RegisterState(RegisterKind kind, int num_allocatable_registers, Zone* zone); RegisterState(const RegisterState& other) V8_NOEXCEPT; bool IsAllocated(RegisterIndex reg); bool IsShared(RegisterIndex reg); int VirtualRegisterForRegister(RegisterIndex reg); // Commit the |reg| with the |allocated| operand. void Commit(RegisterIndex reg, AllocatedOperand allocated, InstructionOperand* operand, MidTierRegisterAllocationData* data); // Spill the contents of |reg| for an instruction in |current_block| using // the |allocated| operand to commit the spill gap move. void Spill(RegisterIndex reg, AllocatedOperand allocated, const InstructionBlock* current_block, MidTierRegisterAllocationData* data); // Add a pending spill of the contents of |reg| at the exit point of a // deferred block at |instr_index| using |allocated| operand to commit the // spill gap move, if the register never gets spilled in a non-deferred block. void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated, int instr_index, MidTierRegisterAllocationData* data); // Add a pending gap move from |reg| to |virtual_register|'s spill at the // entry point of a deferred block at |instr_index|, if the |virtual_register| // never spilled in a non-deferred block. void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register, int instr_index, MidTierRegisterAllocationData* data); // Allocate |reg| to |virtual_register| for the instruction at |instr_index|. // If the register is later spilled, a gap move will be added immediately // before |instr_index| to move |virtual_register| into this register. void AllocateUse(RegisterIndex reg, int virtual_register, InstructionOperand* operand, int instr_index, MidTierRegisterAllocationData* data); // Allocate |reg| as a pending use of |virtual_register| for |operand| in the // instruction at |instr_index|. If |virtual_register| later gets committed to // this register, then |operand| will be too, otherwise |operand| will be // replaced with |virtual_register|'s spill operand. void AllocatePendingUse(RegisterIndex reg, int virtual_register, InstructionOperand* operand, bool can_be_constant, int instr_index); // Mark that the register is holding a phi operand that is yet to be allocated // by the source block in the gap just before the last instruction in the // source block. void UseForPhiGapMove(RegisterIndex reg); bool IsPhiGapMove(RegisterIndex reg); // Returns true if |reg| only has pending uses allocated to it. bool HasPendingUsesOnly(RegisterIndex reg); // Clone this RegisterState for a successor block. RegisterState* Clone(); // Copy register details for |reg| from |source| to |this| RegisterState. void CopyFrom(RegisterIndex reg, RegisterState* source); // Returns true if the register details for |reg| are equal in |source| and // |this| RegisterStates. bool Equals(RegisterIndex reg, RegisterState* source); // Signals that the registers in this state are going to be shared across // |shared_use_count| blocks. void AddSharedUses(int shared_use_count); // When merging multiple block's RegisterState into the successor block with // |this| RegisterState, commit |reg| as being merged from a given predecessor // block. void CommitAtMerge(RegisterIndex reg); // Resets |reg| if it has register data that was shared with other basic // blocks and was spilled in those blocks. void ResetIfSpilledWhileShared(RegisterIndex reg); // Enable range-based for on allocatable register indices. RegisterIndex::Iterator begin() const { return RegisterIndex::Iterator(0); } RegisterIndex::Iterator end() const { return RegisterIndex::Iterator(num_allocatable_registers()); } private: // Represents a particular register and details of what virtual_register it is // currently holding, and how it should be updated if committed or spilled. class Register final : public ZoneObject { public: Register(); void Reset(); // Operations for committing, spilling and allocating uses of the register. void Commit(AllocatedOperand allocated_operand, MidTierRegisterAllocationData* data); void Spill(AllocatedOperand allocated_op, const InstructionBlock* current_block, MidTierRegisterAllocationData* data); void Use(int virtual_register, int instr_index); void PendingUse(InstructionOperand* operand, int virtual_register, bool can_be_constant, int instr_index); void SpillForDeferred(AllocatedOperand allocated, int instr_index, MidTierRegisterAllocationData* data); void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index, MidTierRegisterAllocationData* data); // Mark register as holding a phi. void MarkAsPhiMove(); bool is_phi_gap_move() const { return is_phi_gap_move_; } // The register has deferred block spills, that will be emitted if the // register is committed without having been spilled in a non-deferred block void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone); bool has_deferred_block_spills() const { return deferred_block_spills_.has_value(); } // Operations related to dealing with a Register that is shared across // multiple basic blocks. void CommitAtMerge(); void AddSharedUses(int shared_use_count); bool is_shared() const { return is_shared_; } bool was_spilled_while_shared() const { return is_shared() && !is_allocated(); } bool is_allocated() const { return virtual_register_ != InstructionOperand::kInvalidVirtualRegister; } // The current virtual register held by this register. int virtual_register() const { return virtual_register_; } // The instruction index for the last use of the current in-progress // allocation of this register in the instruction stream. Used both // as the instruction too add a gap move if |needs_gap_move_on_spill| and // the intruction which the virtual register's spill range should be // extended too if the register is spilled. int last_use_instr_index() const { return last_use_instr_index_; } // Returns true if a gap move should be added if the register is spilled. bool needs_gap_move_on_spill() const { return needs_gap_move_on_spill_; } // Returns a threaded list of the operands that have pending uses of this // register and will be resolved either to the register, or a spill slot // depending on whether this register is spilled or committed. PendingOperand* pending_uses() const { return pending_uses_; } private: struct DeferredBlockSpill { DeferredBlockSpill(int instr, bool on_exit) : instr_index(instr), on_deferred_exit(on_exit) {} int instr_index; bool on_deferred_exit; }; void SpillPendingUses(MidTierRegisterAllocationData* data); void SpillPhiGapMove(AllocatedOperand allocated_op, const InstructionBlock* block, MidTierRegisterAllocationData* data); bool needs_gap_move_on_spill_; bool is_shared_; bool is_phi_gap_move_; bool pending_uses_can_use_constant_; int last_use_instr_index_; int num_commits_required_; int virtual_register_; PendingOperand* pending_uses_; base::Optional> deferred_block_spills_; }; void ResetDataFor(RegisterIndex reg); bool HasRegisterData(RegisterIndex reg); void EnsureRegisterData(RegisterIndex reg); int num_allocatable_registers() const { return static_cast(register_data_.size()); } Register& reg_data(RegisterIndex reg); Zone* zone() const { return zone_; } ZoneVector register_data_; Zone* zone_; }; RegisterState::Register::Register() { Reset(); } void RegisterState::Register::Reset() { is_shared_ = false; is_phi_gap_move_ = false; needs_gap_move_on_spill_ = false; pending_uses_can_use_constant_ = true; last_use_instr_index_ = -1; num_commits_required_ = 0; virtual_register_ = InstructionOperand::kInvalidVirtualRegister; pending_uses_ = nullptr; deferred_block_spills_.reset(); } void RegisterState::Register::Use(int virtual_register, int instr_index) { // A register can have many pending uses, but should only ever have a single // non-pending use, since any subsiquent use will commit the preceeding use // first. DCHECK(!is_allocated()); DCHECK(!is_shared()); needs_gap_move_on_spill_ = true; virtual_register_ = virtual_register; last_use_instr_index_ = instr_index; num_commits_required_ = 1; } void RegisterState::Register::PendingUse(InstructionOperand* operand, int virtual_register, bool can_be_constant, int instr_index) { DCHECK(!was_spilled_while_shared()); if (!is_allocated()) { virtual_register_ = virtual_register; last_use_instr_index_ = instr_index; num_commits_required_ = 1; } DCHECK_EQ(virtual_register_, virtual_register); pending_uses_can_use_constant_ &= can_be_constant; PendingOperand pending_op(pending_uses()); InstructionOperand::ReplaceWith(operand, &pending_op); pending_uses_ = PendingOperand::cast(operand); } void RegisterState::Register::MarkAsPhiMove() { DCHECK(is_allocated()); is_phi_gap_move_ = true; } void RegisterState::Register::AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone) { DCHECK(is_allocated()); if (!deferred_block_spills_) { deferred_block_spills_.emplace(zone); } deferred_block_spills_->emplace_back(instr_index, on_exit); } void RegisterState::Register::AddSharedUses(int shared_use_count) { DCHECK(!was_spilled_while_shared()); is_shared_ = true; num_commits_required_ += shared_use_count; } void RegisterState::Register::CommitAtMerge() { DCHECK(is_shared()); DCHECK(is_allocated()); --num_commits_required_; // We should still have commits required that will be resolved in the merge // block. DCHECK_GT(num_commits_required_, 0); } void RegisterState::Register::Commit(AllocatedOperand allocated_op, MidTierRegisterAllocationData* data) { DCHECK(is_allocated()); DCHECK_GT(num_commits_required_, 0); if (--num_commits_required_ == 0) { // Allocate all pending uses to |allocated_op| if this commit is non-shared, // or if it is the final commit required on a register data shared across // blocks. PendingOperand* pending_use = pending_uses(); while (pending_use) { PendingOperand* next = pending_use->next(); InstructionOperand::ReplaceWith(pending_use, &allocated_op); pending_use = next; } pending_uses_ = nullptr; VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(virtual_register()); // If there are deferred block gap moves pending, emit them now that the // register has been committed. if (has_deferred_block_spills()) { for (DeferredBlockSpill& spill : *deferred_block_spills_) { if (spill.on_deferred_exit) { vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op, spill.instr_index, data); } else if (!vreg_data.NeedsSpillAtOutput()) { vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index, data); } } } // If this register was used as a phi gap move, then it being commited // is the point at which we have output the Phi. if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) { vreg_data.EmitDeferredSpillOutputs(data); } } DCHECK_IMPLIES(num_commits_required_ > 0, is_shared()); } void RegisterState::Register::Spill(AllocatedOperand allocated_op, const InstructionBlock* current_block, MidTierRegisterAllocationData* data) { VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(virtual_register()); SpillPendingUses(data); if (is_phi_gap_move()) { SpillPhiGapMove(allocated_op, current_block, data); } if (needs_gap_move_on_spill()) { vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op, last_use_instr_index(), data); } if (has_deferred_block_spills() || !current_block->IsDeferred()) { vreg_data.MarkAsNeedsSpillAtOutput(); } // TODO(1180335): Doing a full reset here shouldn't be necessary, but // investigate if it fixes crbug.com/1180335. bool is_shared = is_shared_; Reset(); is_shared_ = is_shared; DCHECK_IMPLIES(is_shared_, was_spilled_while_shared()); } void RegisterState::Register::SpillPhiGapMove( AllocatedOperand allocated_op, const InstructionBlock* current_block, MidTierRegisterAllocationData* data) { DCHECK_EQ(current_block->SuccessorCount(), 1); const InstructionBlock* phi_block = data->GetBlock(current_block->successors()[0]); // Add gap moves to the spilled phi for all blocks we previously allocated // assuming the the phi was in a register. VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(virtual_register()); for (RpoNumber predecessor : phi_block->predecessors()) { // If the predecessor has a lower rpo number than the current block, then // we have already processed it, so add the required gap move. if (predecessor > current_block->rpo_number()) { const InstructionBlock* predecessor_block = data->GetBlock(predecessor); vreg_data.EmitGapMoveToSpillSlot( allocated_op, predecessor_block->last_instruction_index(), data); } } } void RegisterState::Register::SpillPendingUses( MidTierRegisterAllocationData* data) { VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(virtual_register()); PendingOperand* pending_use = pending_uses(); while (pending_use) { // Spill all the pending operands associated with this register. PendingOperand* next = pending_use->next(); vreg_data.SpillOperand(pending_use, last_use_instr_index(), pending_uses_can_use_constant_, data); pending_use = next; } pending_uses_ = nullptr; } void RegisterState::Register::SpillForDeferred( AllocatedOperand allocated, int instr_index, MidTierRegisterAllocationData* data) { DCHECK(is_allocated()); DCHECK(is_shared()); // Add a pending deferred spill, then commit the register (with the commit // being fullfilled by the deferred spill if the register is fully commited). data->VirtualRegisterDataFor(virtual_register()) .AddDeferredSpillUse(instr_index, data); AddDeferredBlockSpill(instr_index, true, data->allocation_zone()); Commit(allocated, data); } void RegisterState::Register::MoveToSpillSlotOnDeferred( int virtual_register, int instr_index, MidTierRegisterAllocationData* data) { DCHECK(!was_spilled_while_shared()); if (!is_allocated()) { virtual_register_ = virtual_register; last_use_instr_index_ = instr_index; num_commits_required_ = 1; } AddDeferredBlockSpill(instr_index, false, data->allocation_zone()); } RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers, Zone* zone) : register_data_(num_allocatable_registers, zone), zone_(zone) {} RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT : register_data_(other.register_data_.begin(), other.register_data_.end(), other.zone_), zone_(other.zone_) {} int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) { if (IsAllocated(reg)) { return reg_data(reg).virtual_register(); } else { return InstructionOperand::kInvalidVirtualRegister; } } bool RegisterState::IsPhiGapMove(RegisterIndex reg) { DCHECK(IsAllocated(reg)); return reg_data(reg).is_phi_gap_move(); } void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated, InstructionOperand* operand, MidTierRegisterAllocationData* data) { InstructionOperand::ReplaceWith(operand, &allocated); if (IsAllocated(reg)) { reg_data(reg).Commit(allocated, data); ResetDataFor(reg); } } void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated, const InstructionBlock* current_block, MidTierRegisterAllocationData* data) { DCHECK(IsAllocated(reg)); reg_data(reg).Spill(allocated, current_block, data); ResetDataFor(reg); } void RegisterState::SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated, int instr_index, MidTierRegisterAllocationData* data) { DCHECK(IsAllocated(reg)); reg_data(reg).SpillForDeferred(allocated, instr_index, data); ResetDataFor(reg); } void RegisterState::MoveToSpillSlotOnDeferred( RegisterIndex reg, int virtual_register, int instr_index, MidTierRegisterAllocationData* data) { EnsureRegisterData(reg); reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data); } void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register, InstructionOperand* operand, int instr_index, MidTierRegisterAllocationData* data) { EnsureRegisterData(reg); reg_data(reg).Use(virtual_register, instr_index); } void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register, InstructionOperand* operand, bool can_be_constant, int instr_index) { EnsureRegisterData(reg); reg_data(reg).PendingUse(operand, virtual_register, can_be_constant, instr_index); } void RegisterState::UseForPhiGapMove(RegisterIndex reg) { DCHECK(IsAllocated(reg)); reg_data(reg).MarkAsPhiMove(); } RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) { DCHECK(HasRegisterData(reg)); return *register_data_[reg.ToInt()]; } bool RegisterState::IsShared(RegisterIndex reg) { return HasRegisterData(reg) && reg_data(reg).is_shared(); } bool RegisterState::IsAllocated(RegisterIndex reg) { return HasRegisterData(reg) && reg_data(reg).is_allocated(); } bool RegisterState::HasPendingUsesOnly(RegisterIndex reg) { DCHECK(IsAllocated(reg)); return !reg_data(reg).needs_gap_move_on_spill(); } void RegisterState::ResetDataFor(RegisterIndex reg) { DCHECK(HasRegisterData(reg)); if (reg_data(reg).is_shared()) { register_data_[reg.ToInt()] = nullptr; } else { reg_data(reg).Reset(); } } bool RegisterState::HasRegisterData(RegisterIndex reg) { DCHECK_LT(reg.ToInt(), register_data_.size()); return register_data_[reg.ToInt()] != nullptr; } void RegisterState::EnsureRegisterData(RegisterIndex reg) { if (!HasRegisterData(reg)) { register_data_[reg.ToInt()] = zone()->New(); } } void RegisterState::ResetIfSpilledWhileShared(RegisterIndex reg) { if (HasRegisterData(reg) && reg_data(reg).was_spilled_while_shared()) { ResetDataFor(reg); } } void RegisterState::CommitAtMerge(RegisterIndex reg) { DCHECK(IsAllocated(reg)); reg_data(reg).CommitAtMerge(); } void RegisterState::CopyFrom(RegisterIndex reg, RegisterState* source) { register_data_[reg.ToInt()] = source->register_data_[reg.ToInt()]; } bool RegisterState::Equals(RegisterIndex reg, RegisterState* other) { return register_data_[reg.ToInt()] == other->register_data_[reg.ToInt()]; } void RegisterState::AddSharedUses(int shared_use_count) { for (RegisterIndex reg : *this) { if (HasRegisterData(reg)) { reg_data(reg).AddSharedUses(shared_use_count); } } } RegisterState* RegisterState::Clone() { return zone_->New(*this); } class RegisterBitVector { public: RegisterBitVector() : bits_(0) {} bool operator==(const RegisterBitVector& other) const { return bits_ == other.bits_; } bool Contains(RegisterIndex reg, MachineRepresentation rep) const { return bits_ & reg.ToBit(rep); } RegisterIndex GetFirstSet() const { return RegisterIndex(base::bits::CountTrailingZeros(bits_)); } RegisterIndex GetFirstCleared(int max_reg) const { int reg_index = base::bits::CountTrailingZeros(~bits_); if (reg_index < max_reg) { return RegisterIndex(reg_index); } else { return RegisterIndex::Invalid(); } } void Add(RegisterIndex reg, MachineRepresentation rep) { bits_ |= reg.ToBit(rep); } void Clear(RegisterIndex reg, MachineRepresentation rep) { bits_ &= ~reg.ToBit(rep); } RegisterBitVector Union(const RegisterBitVector& other) { return RegisterBitVector(bits_ | other.bits_); } void Reset() { bits_ = 0; } bool IsEmpty() const { return bits_ == 0; } private: friend std::ostream& operator<<(std::ostream&, RegisterBitVector); explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {} static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8, "Maximum registers must fit in uintptr_t bitmap"); uintptr_t bits_; }; std::ostream& operator<<(std::ostream& os, RegisterBitVector register_bits) { return os << std::hex << register_bits.bits_ << std::dec; } // A SinglePassRegisterAllocator is a fast register allocator that does a single // pass through the instruction stream without performing any live-range // analysis beforehand. It deals with a single RegisterKind, either general or // double registers, with the MidTierRegisterAllocator choosing the correct // SinglePassRegisterAllocator based on a values representation. class SinglePassRegisterAllocator final { public: SinglePassRegisterAllocator(RegisterKind kind, MidTierRegisterAllocationData* data); // Convert to / from a register code and a register index. RegisterIndex FromRegCode(int reg_code, MachineRepresentation rep) const; int ToRegCode(RegisterIndex index, MachineRepresentation rep) const; // Allocation routines used to allocate a particular operand to either a // register or a spill slot. void AllocateConstantOutput(ConstantOperand* operand, VirtualRegisterData& vreg, int instr_index); void AllocateOutput(UnallocatedOperand* operand, VirtualRegisterData& vreg, int instr_index); void AllocateInput(UnallocatedOperand* operand, VirtualRegisterData& vreg, int instr_index); void AllocateSameInputOutput(UnallocatedOperand* output, UnallocatedOperand* input, VirtualRegisterData& output_vreg, VirtualRegisterData& input_vreg, int instr_index); void AllocateGapMoveInput(UnallocatedOperand* operand, VirtualRegisterData& vreg, int instr_index); void AllocateTemp(UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index); void AllocatePhi(VirtualRegisterData& virtual_register, const InstructionBlock* block); void AllocatePhiGapMove(VirtualRegisterData& to_vreg, VirtualRegisterData& from_vreg, int instr_index); // Reserve any fixed registers for the operands on an instruction before doing // allocation on the operands. void ReserveFixedInputRegister(const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index); void ReserveFixedTempRegister(const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index); void ReserveFixedOutputRegister(const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index); // Spills all registers that are currently holding data, for example, due to // an instruction that clobbers all registers. void SpillAllRegisters(); // Inform the allocator that we are starting / ending a block or ending // allocation for the current instruction. void StartBlock(const InstructionBlock* block); void EndBlock(const InstructionBlock* block); void EndInstruction(); void UpdateForDeferredBlock(int instr_index); void AllocateDeferredBlockSpillOutput(int instr_index, RpoNumber deferred_block, VirtualRegisterData& virtual_register); RegisterKind kind() const { return kind_; } BitVector* assigned_registers() const { return assigned_registers_; } private: enum class UsePosition { // Operand used at start of instruction. kStart, // Operand used at end of instruction. kEnd, // Operand is used at both the start and end of instruction. kAll, // Operand is not used in the instruction (used when initializing register // state on block entry). kNone, }; // The allocator is initialized without any RegisterState by default to avoid // having to allocate per-block allocator state for functions that don't // allocate registers of a particular type. All allocation functions should // call EnsureRegisterState to allocate a RegisterState if necessary. void EnsureRegisterState(); // Clone the register state from |successor| into the current register state. void CloneStateFrom(RpoNumber successor); // Merge the register state of |successors| into the current register state. void MergeStateFrom(const InstructionBlock::Successors& successors); // Spill a register in a previously processed successor block when merging // state into the current block. void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg, MachineRepresentation rep); // Introduce a gap move to move |virtual_register| from reg |from| to reg |to| // on entry to a |successor| block. void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to, VirtualRegisterData& virtual_register, RpoNumber successor, RegisterState* succ_state); // Update the virtual register data with the data in register_state_. void UpdateVirtualRegisterState(); // Returns true if |virtual_register| is defined after use position |pos| at // |instr_index|. bool DefinedAfter(int virtual_register, int instr_index, UsePosition pos); // Allocate |reg| to |virtual_register| for |operand| of the instruction at // |instr_index|. The register will be reserved for this use for the specified // |pos| use position. void AllocateUse(RegisterIndex reg, VirtualRegisterData& virtual_register, InstructionOperand* operand, int instr_index, UsePosition pos); // Allocate |reg| to |virtual_register| as a pending use (i.e., only if the // register is not subsequently spilled) for |operand| of the instruction at // |instr_index|. void AllocatePendingUse(RegisterIndex reg, VirtualRegisterData& virtual_register, InstructionOperand* operand, bool can_be_constant, int instr_index); // Allocate |operand| to |reg| and add a gap move to move |virtual_register| // to this register for the instruction at |instr_index|. |reg| will be // reserved for this use for the specified |pos| use position. void AllocateUseWithMove(RegisterIndex reg, VirtualRegisterData& virtual_register, UnallocatedOperand* operand, int instr_index, UsePosition pos); void CommitRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep, InstructionOperand* operand, UsePosition pos); void SpillRegister(RegisterIndex reg); void SpillRegisterAndPotentialSimdSibling(RegisterIndex reg, MachineRepresentation rep); void SpillRegisterForVirtualRegister(int virtual_register); // Pre-emptively spill the register at the exit of deferred blocks such that // uses of this register in non-deferred blocks don't need to be spilled. void SpillRegisterForDeferred(RegisterIndex reg, int instr_index); // Returns an AllocatedOperand corresponding to the use of |reg| for // |virtual_register|. AllocatedOperand AllocatedOperandForReg(RegisterIndex reg, MachineRepresentation rep); void ReserveFixedRegister(const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index, UsePosition pos); RegisterIndex AllocateOutput(UnallocatedOperand* operand, VirtualRegisterData& vreg_data, int instr_index, UsePosition pos); void EmitGapMoveFromOutput(InstructionOperand from, InstructionOperand to, int instr_index); // Helper functions to choose the best register for a given operand. V8_INLINE RegisterIndex ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index, UsePosition pos, bool must_use_register); V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep, UsePosition pos, bool must_use_register); V8_INLINE RegisterIndex ChooseFreeRegister(MachineRepresentation rep, UsePosition pos); V8_INLINE RegisterIndex ChooseFreeRegister( const RegisterBitVector& allocated_regs, MachineRepresentation rep); V8_INLINE RegisterIndex ChooseRegisterToSpill(MachineRepresentation rep, UsePosition pos); // Assign, free and mark use's of |reg| for a |virtual_register| at use // position |pos|. V8_INLINE void AssignRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep, UsePosition pos); V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep); V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep, UsePosition pos); V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos); V8_INLINE bool IsValidForRep(RegisterIndex reg, MachineRepresentation rep); // Return the register allocated to |virtual_register|, if any. RegisterIndex RegisterForVirtualRegister(int virtual_register); // Return the virtual register being held by |reg|, or kInvalidVirtualRegister // if |reg| is unallocated. int VirtualRegisterForRegister(RegisterIndex reg); // Returns true if |reg| is unallocated or holds |virtual_register|. bool IsFreeOrSameVirtualRegister(RegisterIndex reg, int virtual_register); // Returns true if |virtual_register| is unallocated or is allocated to |reg|. bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register, RegisterIndex reg); // If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD // register. This returns the index of the higher aliasing FP register from // the SIMD register index (which is the same as the lower register index). RegisterIndex simdSibling(RegisterIndex reg) const { CHECK_EQ(kFPAliasing, AliasingKind::kCombine); // Statically evaluated. RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1}; #ifdef DEBUG // Check that {reg} is indeed the lower SIMD half and {sibling} is the // upper half. int double_reg_base_code; DCHECK_EQ(2, data_->config()->GetAliases( MachineRepresentation::kSimd128, ToRegCode(reg, MachineRepresentation::kSimd128), MachineRepresentation::kFloat64, &double_reg_base_code)); DCHECK_EQ(reg, FromRegCode(double_reg_base_code, MachineRepresentation::kFloat64)); DCHECK_EQ(sibling, FromRegCode(double_reg_base_code + 1, MachineRepresentation::kFloat64)); #endif // DEBUG return sibling; } // Returns a RegisterBitVector representing the allocated registers in // reg_state. RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state); // Check the consistency of reg->vreg and vreg->reg mappings if a debug build. void CheckConsistency(); VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const { return data_->VirtualRegisterDataFor(virtual_register); } // Virtual register to register mapping. ZoneVector virtual_register_to_reg_; // Current register state during allocation. RegisterState* register_state_; // The current block being processed. const InstructionBlock* current_block_; const RegisterKind kind_; const int num_allocatable_registers_; ZoneVector reg_code_to_index_; const int* index_to_reg_code_; BitVector* assigned_registers_; MidTierRegisterAllocationData* data_; RegisterBitVector in_use_at_instr_start_bits_; RegisterBitVector in_use_at_instr_end_bits_; RegisterBitVector allocated_registers_bits_; RegisterBitVector same_input_output_registers_bits_; // These fields are only used when kFPAliasing == COMBINE. base::Optional> float32_reg_code_to_index_; base::Optional> index_to_float32_reg_code_; base::Optional> simd128_reg_code_to_index_; base::Optional> index_to_simd128_reg_code_; }; SinglePassRegisterAllocator::SinglePassRegisterAllocator( RegisterKind kind, MidTierRegisterAllocationData* data) : virtual_register_to_reg_(data->code()->VirtualRegisterCount(), data->allocation_zone()), register_state_(nullptr), current_block_(nullptr), kind_(kind), num_allocatable_registers_( GetAllocatableRegisterCount(data->config(), kind)), reg_code_to_index_(GetRegisterCount(data->config(), kind), data->allocation_zone()), index_to_reg_code_(GetAllocatableRegisterCodes(data->config(), kind)), assigned_registers_(data->code_zone()->New( GetRegisterCount(data->config(), kind), data->code_zone())), data_(data), in_use_at_instr_start_bits_(), in_use_at_instr_end_bits_(), allocated_registers_bits_(), same_input_output_registers_bits_() { for (int i = 0; i < num_allocatable_registers_; i++) { int reg_code = index_to_reg_code_[i]; reg_code_to_index_[reg_code] = RegisterIndex(i); } // If the architecture has COMBINE FP aliasing, initialize float and // simd128 specific register details. if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) { const RegisterConfiguration* config = data->config(); // Float registers. float32_reg_code_to_index_.emplace(config->num_float_registers(), data->allocation_zone()); index_to_float32_reg_code_.emplace(num_allocatable_registers_, -1, data->allocation_zone()); for (int i = 0; i < config->num_allocatable_float_registers(); i++) { int reg_code = config->allocatable_float_codes()[i]; // Only add even float register codes to avoid overlapping multiple float // registers on each RegisterIndex. if (reg_code % 2 != 0) continue; int double_reg_base_code; CHECK_EQ(1, config->GetAliases(MachineRepresentation::kFloat32, reg_code, MachineRepresentation::kFloat64, &double_reg_base_code)); RegisterIndex double_reg(reg_code_to_index_[double_reg_base_code]); float32_reg_code_to_index_->at(reg_code) = double_reg; index_to_float32_reg_code_->at(double_reg.ToInt()) = reg_code; } // Simd128 registers. simd128_reg_code_to_index_.emplace(config->num_simd128_registers(), data->allocation_zone()); index_to_simd128_reg_code_.emplace(num_allocatable_registers_, -1, data->allocation_zone()); for (int i = 0; i < config->num_allocatable_simd128_registers(); i++) { int reg_code = config->allocatable_simd128_codes()[i]; int double_reg_base_code; CHECK_EQ(2, config->GetAliases(MachineRepresentation::kSimd128, reg_code, MachineRepresentation::kFloat64, &double_reg_base_code)); RegisterIndex double_reg{reg_code_to_index_[double_reg_base_code]}; // We later rely on the fact that the two aliasing double registers are at // consecutive indexes. DCHECK_EQ(double_reg.ToInt() + 1, reg_code_to_index_[double_reg_base_code + 1].ToInt()); simd128_reg_code_to_index_->at(reg_code) = double_reg; index_to_simd128_reg_code_->at(double_reg.ToInt()) = reg_code; } } } int SinglePassRegisterAllocator::VirtualRegisterForRegister(RegisterIndex reg) { return register_state_->VirtualRegisterForRegister(reg); } RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister( int virtual_register) { DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister); return virtual_register_to_reg_[virtual_register]; } void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) { if (!register_state_) return; for (RegisterIndex reg : *register_state_) { SpillRegisterForDeferred(reg, instr_index); } } void SinglePassRegisterAllocator::EndInstruction() { in_use_at_instr_end_bits_.Reset(); in_use_at_instr_start_bits_.Reset(); same_input_output_registers_bits_.Reset(); } void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) { DCHECK_NULL(register_state_); DCHECK_NULL(current_block_); DCHECK(in_use_at_instr_start_bits_.IsEmpty()); DCHECK(in_use_at_instr_end_bits_.IsEmpty()); DCHECK(allocated_registers_bits_.IsEmpty()); DCHECK(same_input_output_registers_bits_.IsEmpty()); // Update the current block we are processing. current_block_ = block; if (block->SuccessorCount() == 1) { // If we have only a single successor, we can directly clone our state // from that successor. CloneStateFrom(block->successors()[0]); } else if (block->SuccessorCount() > 1) { // If we have multiple successors, merge the state from all the successors // into our block. MergeStateFrom(block->successors()); } } void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) { DCHECK(in_use_at_instr_start_bits_.IsEmpty()); DCHECK(in_use_at_instr_end_bits_.IsEmpty()); DCHECK(same_input_output_registers_bits_.IsEmpty()); // If we didn't allocate any registers of this kind, or we have reached the // start, nothing to do here. if (!register_state_ || block->PredecessorCount() == 0) { current_block_ = nullptr; return; } if (block->PredecessorCount() > 1) { register_state_->AddSharedUses(static_cast(block->PredecessorCount()) - 1); } BlockState& block_state = data_->block_state(block->rpo_number()); block_state.set_register_in_state(register_state_, kind()); // Remove virtual register to register mappings and clear register state. // We will update the register state when starting the next block. while (!allocated_registers_bits_.IsEmpty()) { RegisterIndex reg = allocated_registers_bits_.GetFirstSet(); VirtualRegisterData& vreg_data = data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg)); FreeRegister(reg, vreg_data.vreg(), vreg_data.rep()); } current_block_ = nullptr; register_state_ = nullptr; } void SinglePassRegisterAllocator::CloneStateFrom(RpoNumber successor) { BlockState& block_state = data_->block_state(successor); RegisterState* successor_registers = block_state.register_in_state(kind()); if (successor_registers != nullptr) { if (data_->GetBlock(successor)->PredecessorCount() == 1) { // Avoids cloning for successors where we are the only predecessor. register_state_ = successor_registers; } else { register_state_ = successor_registers->Clone(); } UpdateVirtualRegisterState(); } } void SinglePassRegisterAllocator::MergeStateFrom( const InstructionBlock::Successors& successors) { for (RpoNumber successor : successors) { BlockState& block_state = data_->block_state(successor); RegisterState* successor_registers = block_state.register_in_state(kind()); if (successor_registers == nullptr) { continue; } if (register_state_ == nullptr) { // If we haven't merged any register state yet, just use successor's // register directly. register_state_ = successor_registers; UpdateVirtualRegisterState(); } else { // Otherwise try to merge our state with the existing state. RegisterBitVector processed_regs; RegisterBitVector succ_allocated_regs = GetAllocatedRegBitVector(successor_registers); for (RegisterIndex reg : *successor_registers) { // If |reg| isn't allocated in successor registers, nothing to do. if (!successor_registers->IsAllocated(reg)) continue; int virtual_register = successor_registers->VirtualRegisterForRegister(reg); VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register); MachineRepresentation rep = vreg_data.rep(); // If we have already processed |reg|, e.g., adding gap move to that // register, then we can continue. if (processed_regs.Contains(reg, rep)) continue; processed_regs.Add(reg, rep); bool reg_in_use = register_state_->IsAllocated(reg); // For COMBINE FP aliasing, the register is also "in use" if the // FP register for the upper half is allocated. if (kFPAliasing == AliasingKind::kCombine && rep == MachineRepresentation::kSimd128) { reg_in_use |= register_state_->IsAllocated(simdSibling(reg)); } // Similarly (but the other way around), the register might be the upper // half of a SIMD register that is allocated. if (kFPAliasing == AliasingKind::kCombine && (rep == MachineRepresentation::kFloat64 || rep == MachineRepresentation::kFloat32)) { int simd_reg_code; CHECK_EQ(1, data_->config()->GetAliases( rep, ToRegCode(reg, rep), MachineRepresentation::kSimd128, &simd_reg_code)); // Sanity check: The SIMD reg code should be the shifted FP reg code. DCHECK_EQ(simd_reg_code, ToRegCode(reg, rep) >> (rep == MachineRepresentation::kFloat64 ? 1 : 2)); RegisterIndex simd_reg = FromRegCode(simd_reg_code, MachineRepresentation::kSimd128); reg_in_use |= simd_reg.is_valid() && register_state_->IsAllocated(simd_reg) && VirtualRegisterDataFor(VirtualRegisterForRegister(simd_reg)) .rep() == MachineRepresentation::kSimd128; } if (!reg_in_use) { DCHECK(successor_registers->IsAllocated(reg)); if (RegisterForVirtualRegister(virtual_register).is_valid()) { // If we already hold the virtual register in a different register // then spill this register in the sucessor block to avoid // invalidating the 1:1 vreg<->reg mapping. // TODO(rmcilroy): Add a gap move to avoid spilling. SpillRegisterAtMerge(successor_registers, reg, rep); continue; } // Register is free in our current register state, so merge the // successor block's register details into it. register_state_->CopyFrom(reg, successor_registers); AssignRegister(reg, virtual_register, rep, UsePosition::kNone); continue; } // Register is in use in the current register state. if (successor_registers->Equals(reg, register_state_)) { // Both match, keep the merged register data. register_state_->CommitAtMerge(reg); continue; } // Try to find a new register for this successor register in the // merge block, and add a gap move on entry of the successor block. RegisterIndex new_reg = RegisterForVirtualRegister(virtual_register); if (!new_reg.is_valid()) { new_reg = ChooseFreeRegister( allocated_registers_bits_.Union(succ_allocated_regs), rep); } else if (new_reg != reg) { // Spill the |new_reg| in the successor block to be able to use it // for this gap move. It would be spilled anyway since it contains // a different virtual register than the merge block. SpillRegisterAtMerge(successor_registers, new_reg, rep); } if (new_reg.is_valid()) { MoveRegisterOnMerge(new_reg, reg, vreg_data, successor, successor_registers); processed_regs.Add(new_reg, rep); } else { SpillRegisterAtMerge(successor_registers, reg, rep); } } } } } RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector( RegisterState* reg_state) { RegisterBitVector allocated_regs; for (RegisterIndex reg : *reg_state) { if (reg_state->IsAllocated(reg)) { VirtualRegisterData virtual_register = VirtualRegisterDataFor(reg_state->VirtualRegisterForRegister(reg)); allocated_regs.Add(reg, virtual_register.rep()); } } return allocated_regs; } void SinglePassRegisterAllocator::SpillRegisterAtMerge( RegisterState* reg_state, RegisterIndex reg, MachineRepresentation rep) { DCHECK_NE(reg_state, register_state_); if (reg_state->IsAllocated(reg)) { int virtual_register = reg_state->VirtualRegisterForRegister(reg); VirtualRegisterData& vreg_data = data_->VirtualRegisterDataFor(virtual_register); AllocatedOperand allocated = AllocatedOperandForReg(reg, vreg_data.rep()); reg_state->Spill(reg, allocated, current_block_, data_); } // Also spill the "simd sibling" register if we want to use {reg} for SIMD. if (kFPAliasing == AliasingKind::kCombine && rep == MachineRepresentation::kSimd128) { RegisterIndex sibling = simdSibling(reg); if (reg_state->IsAllocated(sibling)) { int virtual_register = reg_state->VirtualRegisterForRegister(sibling); VirtualRegisterData& vreg_data = data_->VirtualRegisterDataFor(virtual_register); AllocatedOperand allocated = AllocatedOperandForReg(sibling, vreg_data.rep()); reg_state->Spill(sibling, allocated, current_block_, data_); } } // Similarly, spill the whole SIMD register if we want to use a part of it. if (kFPAliasing == AliasingKind::kCombine && (rep == MachineRepresentation::kFloat64 || rep == MachineRepresentation::kFloat32)) { int simd_reg_code; CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep), MachineRepresentation::kSimd128, &simd_reg_code)); // Sanity check: The SIMD register code should be the shifted {reg_code}. DCHECK_EQ(simd_reg_code, ToRegCode(reg, rep) >> (rep == MachineRepresentation::kFloat64 ? 1 : 2)); RegisterIndex simd_reg = FromRegCode(simd_reg_code, MachineRepresentation::kSimd128); DCHECK(!simd_reg.is_valid() || simd_reg == reg || simdSibling(simd_reg) == reg); if (simd_reg.is_valid() && reg_state->IsAllocated(simd_reg)) { int virtual_register = reg_state->VirtualRegisterForRegister(simd_reg); VirtualRegisterData& vreg_data = data_->VirtualRegisterDataFor(virtual_register); if (vreg_data.rep() == MachineRepresentation::kSimd128) { AllocatedOperand allocated = AllocatedOperandForReg(simd_reg, vreg_data.rep()); reg_state->Spill(simd_reg, allocated, current_block_, data_); } } } } void SinglePassRegisterAllocator::MoveRegisterOnMerge( RegisterIndex from, RegisterIndex to, VirtualRegisterData& virtual_register, RpoNumber successor, RegisterState* succ_state) { int instr_index = data_->GetBlock(successor)->first_instruction_index(); MoveOperands* move = data_->AddPendingOperandGapMove(instr_index, Instruction::START); succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register.rep()), &move->destination(), data_); AllocatePendingUse(from, virtual_register, &move->source(), true, instr_index); } void SinglePassRegisterAllocator::UpdateVirtualRegisterState() { // Update to the new register state and update vreg_to_register map and // resetting any shared registers that were spilled by another block. DCHECK_NOT_NULL(register_state_); for (RegisterIndex reg : *register_state_) { register_state_->ResetIfSpilledWhileShared(reg); int virtual_register = VirtualRegisterForRegister(reg); if (virtual_register != InstructionOperand::kInvalidVirtualRegister) { MachineRepresentation rep = data_->VirtualRegisterDataFor(virtual_register).rep(); AssignRegister(reg, virtual_register, rep, UsePosition::kNone); } } CheckConsistency(); } void SinglePassRegisterAllocator::CheckConsistency() { #ifdef DEBUG int virtual_register = -1; for (RegisterIndex reg : virtual_register_to_reg_) { ++virtual_register; if (!reg.is_valid()) continue; DCHECK_NOT_NULL(register_state_); // The register must be set to allocated. DCHECK(register_state_->IsAllocated(reg)); // reg <-> vreg linking is consistent. DCHECK_EQ(virtual_register, VirtualRegisterForRegister(reg)); } DCHECK_EQ(data_->code()->VirtualRegisterCount() - 1, virtual_register); RegisterBitVector used_registers; for (RegisterIndex reg : *register_state_) { if (!register_state_->IsAllocated(reg)) continue; int virtual_register = VirtualRegisterForRegister(reg); // reg <-> vreg linking is consistent. DCHECK_EQ(reg, RegisterForVirtualRegister(virtual_register)); MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep(); // Allocated registers do not overlap. DCHECK(!used_registers.Contains(reg, rep)); used_registers.Add(reg, rep); } // The {allocated_registers_bits_} bitvector is accurate. DCHECK_EQ(used_registers, allocated_registers_bits_); #endif } RegisterIndex SinglePassRegisterAllocator::FromRegCode( int reg_code, MachineRepresentation rep) const { if (kFPAliasing == AliasingKind::kCombine && kind() == RegisterKind::kDouble) { if (rep == MachineRepresentation::kFloat32) { return RegisterIndex(float32_reg_code_to_index_->at(reg_code)); } else if (rep == MachineRepresentation::kSimd128) { return RegisterIndex(simd128_reg_code_to_index_->at(reg_code)); } DCHECK_EQ(rep, MachineRepresentation::kFloat64); } return RegisterIndex(reg_code_to_index_[reg_code]); } int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg, MachineRepresentation rep) const { if (kFPAliasing == AliasingKind::kCombine && kind() == RegisterKind::kDouble) { if (rep == MachineRepresentation::kFloat32) { DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt())); return index_to_float32_reg_code_->at(reg.ToInt()); } else if (rep == MachineRepresentation::kSimd128) { DCHECK_NE(-1, index_to_simd128_reg_code_->at(reg.ToInt())); return index_to_simd128_reg_code_->at(reg.ToInt()); } DCHECK_EQ(rep, MachineRepresentation::kFloat64); } return index_to_reg_code_[reg.ToInt()]; } bool SinglePassRegisterAllocator::VirtualRegisterIsUnallocatedOrInReg( int virtual_register, RegisterIndex reg) { RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register); return !existing_reg.is_valid() || existing_reg == reg; } bool SinglePassRegisterAllocator::IsFreeOrSameVirtualRegister( RegisterIndex reg, int virtual_register) { int allocated_vreg = VirtualRegisterForRegister(reg); return allocated_vreg == InstructionOperand::kInvalidVirtualRegister || allocated_vreg == virtual_register; } void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from, InstructionOperand to, int instr_index) { DCHECK(from.IsAllocated()); DCHECK(to.IsAllocated()); const InstructionBlock* block = current_block_; DCHECK_EQ(data_->GetBlock(instr_index), block); if (instr_index == block->last_instruction_index()) { // Add gap move to the first instruction of every successor block. for (const RpoNumber& succ : block->successors()) { const InstructionBlock* successor = data_->GetBlock(succ); DCHECK_EQ(1, successor->PredecessorCount()); data_->AddGapMove(successor->first_instruction_index(), Instruction::START, from, to); } } else { data_->AddGapMove(instr_index + 1, Instruction::START, from, to); } } void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep, UsePosition pos) { assigned_registers()->Add(ToRegCode(reg, rep)); allocated_registers_bits_.Add(reg, rep); MarkRegisterUse(reg, rep, pos); if (virtual_register != InstructionOperand::kInvalidVirtualRegister) { virtual_register_to_reg_[virtual_register] = reg; } } void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep, UsePosition pos) { if (pos == UsePosition::kStart || pos == UsePosition::kAll) { in_use_at_instr_start_bits_.Add(reg, rep); } if (pos == UsePosition::kEnd || pos == UsePosition::kAll) { in_use_at_instr_end_bits_.Add(reg, rep); } } void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep) { allocated_registers_bits_.Clear(reg, rep); if (virtual_register != InstructionOperand::kInvalidVirtualRegister) { virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid(); } } RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor( VirtualRegisterData& virtual_register, int instr_index, UsePosition pos, bool must_use_register) { DCHECK_NE(pos, UsePosition::kNone); MachineRepresentation rep = virtual_register.rep(); // If register is already allocated to the virtual register, use that. RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg()); // If we don't need a register, only try to allocate one if the virtual // register hasn't yet been spilled, to try to avoid spilling it. if (!reg.is_valid() && (must_use_register || !virtual_register.IsSpilledAt(instr_index, data_))) { reg = ChooseRegisterFor(rep, pos, must_use_register); } else if (reg.is_valid() && same_input_output_registers_bits_.Contains(reg, rep) && pos != UsePosition::kStart) { // If we are trying to allocate a register that was used as a // same_input_output operand, then we can't use it for an input that expands // past UsePosition::kStart. if (must_use_register) { // Use a new register instead. reg = ChooseRegisterFor(rep, pos, must_use_register); } else { // Use a spill slot. reg = RegisterIndex::Invalid(); } } return reg; } RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor( MachineRepresentation rep, UsePosition pos, bool must_use_register) { DCHECK_NE(pos, UsePosition::kNone); RegisterIndex reg = ChooseFreeRegister(rep, pos); if (!reg.is_valid() && must_use_register) { reg = ChooseRegisterToSpill(rep, pos); SpillRegisterAndPotentialSimdSibling(reg, rep); } return reg; } RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) { switch (pos) { case UsePosition::kStart: return in_use_at_instr_start_bits_; case UsePosition::kEnd: return in_use_at_instr_end_bits_; case UsePosition::kAll: return in_use_at_instr_start_bits_.Union(in_use_at_instr_end_bits_); case UsePosition::kNone: UNREACHABLE(); } } bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg, MachineRepresentation rep) { if (kFPAliasing != AliasingKind::kCombine || kind() == RegisterKind::kGeneral) { return true; } else { switch (rep) { case MachineRepresentation::kFloat32: return index_to_float32_reg_code_->at(reg.ToInt()) != -1; case MachineRepresentation::kFloat64: return true; case MachineRepresentation::kSimd128: return index_to_simd128_reg_code_->at(reg.ToInt()) != -1; default: UNREACHABLE(); } } } RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister( MachineRepresentation rep, UsePosition pos) { // Take the first free, non-blocked register, if available. // TODO(rmcilroy): Consider a better heuristic. RegisterBitVector allocated_or_in_use = InUseBitmap(pos).Union(allocated_registers_bits_); return ChooseFreeRegister(allocated_or_in_use, rep); } RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister( const RegisterBitVector& allocated_regs, MachineRepresentation rep) { RegisterIndex chosen_reg = RegisterIndex::Invalid(); if (kFPAliasing != AliasingKind::kCombine || kind() == RegisterKind::kGeneral) { chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_); } else { // If we don't have simple fp aliasing, we need to check each register // individually to get one with the required representation. for (RegisterIndex reg : *register_state_) { if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) { chosen_reg = reg; break; } } } DCHECK_IMPLIES(chosen_reg.is_valid(), IsValidForRep(chosen_reg, rep)); return chosen_reg; } RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill( MachineRepresentation rep, UsePosition pos) { RegisterBitVector in_use = InUseBitmap(pos); // Choose a register that will need to be spilled. Preferentially choose: // - A register with only pending uses, to avoid having to add a gap move for // a non-pending use. // - A register holding a virtual register that has already been spilled, to // avoid adding a new gap move to spill the virtual register when it is // output. // - Prefer the register holding the virtual register with the earliest // definition point, since it is more likely to be spilled anyway. RegisterIndex chosen_reg; int earliest_definition = kMaxInt; bool pending_only_use = false; bool already_spilled = false; for (RegisterIndex reg : *register_state_) { // Skip if register is in use, or not valid for representation. if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue; // With non-simple FP aliasing, a SIMD register might block more than one FP // register. DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine, register_state_->IsAllocated(reg)); if (kFPAliasing == AliasingKind::kCombine && !register_state_->IsAllocated(reg)) continue; VirtualRegisterData& vreg_data = VirtualRegisterDataFor(VirtualRegisterForRegister(reg)); if ((!pending_only_use && register_state_->HasPendingUsesOnly(reg)) || (!already_spilled && vreg_data.HasSpillOperand()) || vreg_data.output_instr_index() < earliest_definition) { chosen_reg = reg; earliest_definition = vreg_data.output_instr_index(); pending_only_use = register_state_->HasPendingUsesOnly(reg); already_spilled = vreg_data.HasSpillOperand(); } } // There should always be an unblocked register available. DCHECK(chosen_reg.is_valid()); DCHECK(IsValidForRep(chosen_reg, rep)); return chosen_reg; } void SinglePassRegisterAllocator::CommitRegister(RegisterIndex reg, int virtual_register, MachineRepresentation rep, InstructionOperand* operand, UsePosition pos) { // Committing the output operation, and mark the register use in this // instruction, then mark it as free going forward. AllocatedOperand allocated = AllocatedOperandForReg(reg, rep); register_state_->Commit(reg, allocated, operand, data_); MarkRegisterUse(reg, rep, pos); FreeRegister(reg, virtual_register, rep); CheckConsistency(); } void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) { if (!register_state_->IsAllocated(reg)) return; // Spill the register and free register. int virtual_register = VirtualRegisterForRegister(reg); MachineRepresentation rep = VirtualRegisterDataFor(virtual_register).rep(); AllocatedOperand allocated = AllocatedOperandForReg(reg, rep); register_state_->Spill(reg, allocated, current_block_, data_); FreeRegister(reg, virtual_register, rep); } void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling( RegisterIndex reg, MachineRepresentation rep) { SpillRegister(reg); if (kFPAliasing == AliasingKind::kCombine && rep == MachineRepresentation::kSimd128) { SpillRegister(simdSibling(reg)); } } void SinglePassRegisterAllocator::SpillAllRegisters() { if (!register_state_) return; for (RegisterIndex reg : *register_state_) { SpillRegister(reg); } } void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister( int virtual_register) { DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister); RegisterIndex reg = RegisterForVirtualRegister(virtual_register); if (reg.is_valid()) { SpillRegister(reg); } } void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg, int instr_index) { // Committing the output operation, and mark the register use in this // instruction, then mark it as free going forward. if (register_state_->IsAllocated(reg) && register_state_->IsShared(reg)) { VirtualRegisterData& virtual_register = data_->VirtualRegisterDataFor(VirtualRegisterForRegister(reg)); AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register.rep()); register_state_->SpillForDeferred(reg, allocated, instr_index, data_); FreeRegister(reg, virtual_register.vreg(), virtual_register.rep()); } CheckConsistency(); } void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput( int instr_index, RpoNumber deferred_block, VirtualRegisterData& virtual_register) { DCHECK(data_->GetBlock(deferred_block)->IsDeferred()); DCHECK(virtual_register.HasSpillRange()); if (!virtual_register.NeedsSpillAtOutput() && !DefinedAfter(virtual_register.vreg(), instr_index, UsePosition::kEnd)) { // If a register has been assigned to the virtual register, and the virtual // register still doesn't need to be spilled at it's output, and add a // pending move to output the virtual register to it's spill slot on entry // of the deferred block (to avoid spilling on in non-deferred code). // TODO(rmcilroy): Consider assigning a register even if the virtual // register isn't yet assigned - currently doing this regresses performance. RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg()); if (reg.is_valid()) { int deferred_block_start = data_->GetBlock(deferred_block)->first_instruction_index(); register_state_->MoveToSpillSlotOnDeferred(reg, virtual_register.vreg(), deferred_block_start, data_); return; } else { virtual_register.MarkAsNeedsSpillAtOutput(); } } } AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg( RegisterIndex reg, MachineRepresentation rep) { return AllocatedOperand(AllocatedOperand::REGISTER, rep, ToRegCode(reg, rep)); } void SinglePassRegisterAllocator::AllocateUse( RegisterIndex reg, VirtualRegisterData& virtual_register, InstructionOperand* operand, int instr_index, UsePosition pos) { DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg())); AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register.rep()); register_state_->Commit(reg, allocated, operand, data_); register_state_->AllocateUse(reg, virtual_register.vreg(), operand, instr_index, data_); AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(), pos); CheckConsistency(); } void SinglePassRegisterAllocator::AllocatePendingUse( RegisterIndex reg, VirtualRegisterData& virtual_register, InstructionOperand* operand, bool can_be_constant, int instr_index) { DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register.vreg())); register_state_->AllocatePendingUse(reg, virtual_register.vreg(), operand, can_be_constant, instr_index); // Since this is a pending use and the operand doesn't need to use a register, // allocate with UsePosition::kNone to avoid blocking it's use by other // operands in this instruction. AssignRegister(reg, virtual_register.vreg(), virtual_register.rep(), UsePosition::kNone); CheckConsistency(); } void SinglePassRegisterAllocator::AllocateUseWithMove( RegisterIndex reg, VirtualRegisterData& virtual_register, UnallocatedOperand* operand, int instr_index, UsePosition pos) { AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register.rep()); UnallocatedOperand from = UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register.vreg()); data_->AddGapMove(instr_index, Instruction::END, from, to); InstructionOperand::ReplaceWith(operand, &to); MarkRegisterUse(reg, virtual_register.rep(), pos); CheckConsistency(); } void SinglePassRegisterAllocator::AllocateInput( UnallocatedOperand* operand, VirtualRegisterData& virtual_register, int instr_index) { EnsureRegisterState(); // Spill slot policy operands. if (operand->HasFixedSlotPolicy()) { // If the operand is from a fixed slot, allocate it to that fixed slot, // then add a gap move from an unconstrained copy of that input operand, // and spill the gap move's input operand. // TODO(rmcilroy): We could allocate a register for the gap move however // we would need to wait until we've done all the allocations for the // instruction since the allocation needs to reflect the state before // the instruction (at the gap move). For now spilling is fine since // fixed slot inputs are uncommon. UnallocatedOperand input_copy( UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register.vreg()); AllocatedOperand allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, virtual_register.rep(), operand->fixed_slot_index()); InstructionOperand::ReplaceWith(operand, &allocated); MoveOperands* move_op = data_->AddGapMove(instr_index, Instruction::END, input_copy, *operand); virtual_register.SpillOperand(&move_op->source(), instr_index, true, data_); return; } else if (operand->HasSlotPolicy()) { virtual_register.SpillOperand(operand, instr_index, false, data_); return; } // Otherwise try to allocate a register for the operation. UsePosition pos = operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll; if (operand->HasFixedRegisterPolicy() || operand->HasFixedFPRegisterPolicy()) { // With a fixed register operand, we must use that register. RegisterIndex reg = FromRegCode(operand->fixed_register_index(), virtual_register.rep()); if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(), reg)) { // If the virtual register is already in a different register, then just // add a gap move from that register to the fixed register. AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos); } else { // Otherwise allocate a use of the fixed register for |virtual_register|. AllocateUse(reg, virtual_register, operand, instr_index, pos); } } else { bool must_use_register = operand->HasRegisterPolicy(); RegisterIndex reg = ChooseRegisterFor(virtual_register, instr_index, pos, must_use_register); if (!reg.is_valid()) { // The register will have been spilled at this use. virtual_register.SpillOperand( operand, instr_index, operand->HasRegisterOrSlotOrConstantPolicy(), data_); } else if (!must_use_register) { // We might later dedice to spill this register; allocate a pending use. AllocatePendingUse(reg, virtual_register, operand, operand->HasRegisterOrSlotOrConstantPolicy(), instr_index); } else if (VirtualRegisterIsUnallocatedOrInReg(virtual_register.vreg(), reg)) { // The register is directly usable. AllocateUse(reg, virtual_register, operand, instr_index, pos); } else { // We assigned another register to the vreg before. {ChooseRegisterFor} // chose a different one (e.g. to fulfill a "unique register" constraint // for a vreg that was previously used for the input corresponding to the // "same as input" output), so add a gap move to copy the input value to // that new register. AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos); } } } void SinglePassRegisterAllocator::AllocateGapMoveInput( UnallocatedOperand* operand, VirtualRegisterData& vreg_data, int instr_index) { EnsureRegisterState(); // Gap move inputs should be unconstrained. DCHECK(operand->HasRegisterOrSlotOrConstantPolicy()); RegisterIndex reg = ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false); if (reg.is_valid()) { AllocatePendingUse(reg, vreg_data, operand, true, instr_index); } else { vreg_data.SpillOperand(operand, instr_index, true, data_); } } void SinglePassRegisterAllocator::AllocateConstantOutput( ConstantOperand* operand, VirtualRegisterData& vreg_data, int instr_index) { EnsureRegisterState(); // If the constant is allocated to a register, spill it now to add the // necessary gap moves from the constant operand to the register. SpillRegisterForVirtualRegister(vreg_data.vreg()); if (vreg_data.NeedsSpillAtOutput()) { vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block_, instr_index, data_); } } void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand, VirtualRegisterData& vreg_data, int instr_index) { AllocateOutput(operand, vreg_data, instr_index, UsePosition::kEnd); } RegisterIndex SinglePassRegisterAllocator::AllocateOutput( UnallocatedOperand* operand, VirtualRegisterData& vreg_data, int instr_index, UsePosition pos) { EnsureRegisterState(); int virtual_register = vreg_data.vreg(); RegisterIndex reg; if (operand->HasSlotPolicy() || operand->HasFixedSlotPolicy()) { // We can't allocate a register for output given the policy, so make sure // to spill the register holding this virtual register if any. SpillRegisterForVirtualRegister(virtual_register); reg = RegisterIndex::Invalid(); } else if (operand->HasFixedPolicy()) { reg = FromRegCode(operand->fixed_register_index(), vreg_data.rep()); } else { reg = ChooseRegisterFor(vreg_data, instr_index, pos, operand->HasRegisterPolicy()); } // TODO(rmcilroy): support secondary storage. if (!reg.is_valid()) { vreg_data.SpillOperand(operand, instr_index, false, data_); } else { InstructionOperand move_output_to; if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) { // If the |virtual register| was in a different register (e.g., due to // the output having a fixed register), then commit its use in that // register here, and move it from the output operand below. RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register); // Don't mark |existing_reg| as used in this instruction, since it is used // in the (already allocated) following instruction's gap-move. CommitRegister(existing_reg, vreg_data.vreg(), vreg_data.rep(), &move_output_to, UsePosition::kNone); } CommitRegister(reg, vreg_data.vreg(), vreg_data.rep(), operand, pos); if (move_output_to.IsAllocated()) { // Emit a move from output to the register that the |virtual_register| was // allocated to. EmitGapMoveFromOutput(*operand, move_output_to, instr_index); } if (vreg_data.NeedsSpillAtOutput()) { vreg_data.EmitGapMoveFromOutputToSpillSlot( *AllocatedOperand::cast(operand), current_block_, instr_index, data_); } else if (vreg_data.NeedsSpillAtDeferredBlocks()) { vreg_data.EmitDeferredSpillOutputs(data_); } } return reg; } void SinglePassRegisterAllocator::AllocateSameInputOutput( UnallocatedOperand* output, UnallocatedOperand* input, VirtualRegisterData& output_vreg_data, VirtualRegisterData& input_vreg_data, int instr_index) { EnsureRegisterState(); int input_vreg = input_vreg_data.vreg(); int output_vreg = output_vreg_data.vreg(); // The input operand has the details of the register constraints, so replace // the output operand with a copy of the input, with the output's vreg. UnallocatedOperand output_as_input(*input, output_vreg); InstructionOperand::ReplaceWith(output, &output_as_input); RegisterIndex reg = AllocateOutput(output, output_vreg_data, instr_index, UsePosition::kAll); if (reg.is_valid()) { // Replace the input operand with an unallocated fixed register policy for // the same register. UnallocatedOperand::ExtendedPolicy policy = kind() == RegisterKind::kGeneral ? UnallocatedOperand::FIXED_REGISTER : UnallocatedOperand::FIXED_FP_REGISTER; MachineRepresentation rep = input_vreg_data.rep(); UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg); InstructionOperand::ReplaceWith(input, &fixed_input); same_input_output_registers_bits_.Add(reg, rep); } else { // Output was spilled. Due to the SameAsInput allocation policy, we need to // make the input operand the same as the output, i.e., the output virtual // register's spill slot. As such, spill this input operand using the output // virtual register's spill slot, then add a gap-move to move the input // value into this spill slot. output_vreg_data.SpillOperand(input, instr_index, false, data_); // Add an unconstrained gap move for the input virtual register. UnallocatedOperand unconstrained_input( UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, input_vreg); MoveOperands* move_ops = data_->AddGapMove( instr_index, Instruction::END, unconstrained_input, PendingOperand()); output_vreg_data.SpillOperand(&move_ops->destination(), instr_index, true, data_); } } void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index) { EnsureRegisterState(); RegisterIndex reg; DCHECK(!operand->HasFixedSlotPolicy()); if (operand->HasSlotPolicy()) { reg = RegisterIndex::Invalid(); } else if (operand->HasFixedRegisterPolicy() || operand->HasFixedFPRegisterPolicy()) { reg = FromRegCode(operand->fixed_register_index(), rep); } else { reg = ChooseRegisterFor(rep, UsePosition::kAll, operand->HasRegisterPolicy()); } if (reg.is_valid()) { DCHECK(virtual_register == InstructionOperand::kInvalidVirtualRegister || VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)); CommitRegister(reg, virtual_register, rep, operand, UsePosition::kAll); } else { VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register); vreg_data.SpillOperand(operand, instr_index, operand->HasRegisterOrSlotOrConstantPolicy(), data_); } } bool SinglePassRegisterAllocator::DefinedAfter(int virtual_register, int instr_index, UsePosition pos) { if (virtual_register == InstructionOperand::kInvalidVirtualRegister) return false; int defined_at = VirtualRegisterDataFor(virtual_register).output_instr_index(); return defined_at > instr_index || (defined_at == instr_index && pos == UsePosition::kStart); } void SinglePassRegisterAllocator::ReserveFixedInputRegister( const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index) { ReserveFixedRegister( operand, virtual_register, rep, instr_index, operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll); } void SinglePassRegisterAllocator::ReserveFixedTempRegister( const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index) { ReserveFixedRegister(operand, virtual_register, rep, instr_index, UsePosition::kAll); } void SinglePassRegisterAllocator::ReserveFixedOutputRegister( const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index) { ReserveFixedRegister(operand, virtual_register, rep, instr_index, UsePosition::kEnd); } void SinglePassRegisterAllocator::ReserveFixedRegister( const UnallocatedOperand* operand, int virtual_register, MachineRepresentation rep, int instr_index, UsePosition pos) { EnsureRegisterState(); int reg_code = operand->fixed_register_index(); RegisterIndex reg = FromRegCode(reg_code, rep); if (!IsFreeOrSameVirtualRegister(reg, virtual_register) && !DefinedAfter(virtual_register, instr_index, pos)) { // If register is in-use by a different virtual register, spill it now. // TODO(rmcilroy): Consider moving to a unconstrained register instead of // spilling. SpillRegister(reg); } // Also potentially spill the "sibling SIMD register" on architectures where a // SIMD register aliases two FP registers. if (kFPAliasing == AliasingKind::kCombine && rep == MachineRepresentation::kSimd128) { if (register_state_->IsAllocated(simdSibling(reg)) && !DefinedAfter(virtual_register, instr_index, pos)) { SpillRegister(simdSibling(reg)); } } // Similarly (but the other way around), spill a SIMD register that (partly) // overlaps with a fixed FP register. if (kFPAliasing == AliasingKind::kCombine && (rep == MachineRepresentation::kFloat64 || rep == MachineRepresentation::kFloat32)) { int simd_reg_code; CHECK_EQ( 1, data_->config()->GetAliases( rep, reg_code, MachineRepresentation::kSimd128, &simd_reg_code)); // Sanity check: The SIMD register code should be the shifted {reg_code}. DCHECK_EQ(simd_reg_code, reg_code >> (rep == MachineRepresentation::kFloat64 ? 1 : 2)); RegisterIndex simd_reg = FromRegCode(simd_reg_code, MachineRepresentation::kSimd128); DCHECK(simd_reg == reg || simdSibling(simd_reg) == reg); int allocated_vreg = VirtualRegisterForRegister(simd_reg); if (simd_reg != reg && allocated_vreg != InstructionOperand::kInvalidVirtualRegister && VirtualRegisterDataFor(allocated_vreg).rep() == MachineRepresentation::kSimd128 && !DefinedAfter(virtual_register, instr_index, pos)) { SpillRegister(simd_reg); } } MarkRegisterUse(reg, rep, pos); } void SinglePassRegisterAllocator::AllocatePhiGapMove( VirtualRegisterData& to_vreg, VirtualRegisterData& from_vreg, int instr_index) { EnsureRegisterState(); RegisterIndex from_register = RegisterForVirtualRegister(from_vreg.vreg()); RegisterIndex to_register = RegisterForVirtualRegister(to_vreg.vreg()); // If to_register isn't marked as a phi gap move, we can't use it as such. if (to_register.is_valid() && !register_state_->IsPhiGapMove(to_register)) { to_register = RegisterIndex::Invalid(); } if (to_register.is_valid() && !from_register.is_valid()) { // If |to| virtual register is allocated to a register, and the |from| // virtual register isn't allocated, then commit this register and // re-allocate it to the |from| virtual register. InstructionOperand operand; CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), &operand, UsePosition::kAll); AllocateUse(to_register, from_vreg, &operand, instr_index, UsePosition::kAll); } else { // Otherwise add a gap move. MoveOperands* move = data_->AddPendingOperandGapMove(instr_index, Instruction::END); PendingOperand* to_operand = PendingOperand::cast(&move->destination()); PendingOperand* from_operand = PendingOperand::cast(&move->source()); // Commit the |to| side to either a register or the pending spills. if (to_register.is_valid()) { CommitRegister(to_register, to_vreg.vreg(), to_vreg.rep(), to_operand, UsePosition::kAll); } else { to_vreg.SpillOperand(to_operand, instr_index, true, data_); } // The from side is unconstrained. UnallocatedOperand unconstrained_input( UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg.vreg()); InstructionOperand::ReplaceWith(from_operand, &unconstrained_input); } } void SinglePassRegisterAllocator::AllocatePhi( VirtualRegisterData& virtual_register, const InstructionBlock* block) { if (virtual_register.NeedsSpillAtOutput() || block->IsLoopHeader()) { // If the Phi needs to be spilled, just spill here directly so that all // gap moves into the Phi move into the spill slot. SpillRegisterForVirtualRegister(virtual_register.vreg()); } else { RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg()); if (reg.is_valid()) { // If the register is valid, assign it as a phi gap move to be processed // at the successor blocks. If no register or spill slot was used then // the virtual register was never used. register_state_->UseForPhiGapMove(reg); } } } void SinglePassRegisterAllocator::EnsureRegisterState() { if (V8_UNLIKELY(!register_state_)) { register_state_ = RegisterState::New(kind(), num_allocatable_registers_, data_->allocation_zone()); } } class MidTierOutputProcessor final { public: explicit MidTierOutputProcessor(MidTierRegisterAllocationData* data); void InitializeBlockState(const InstructionBlock* block); void DefineOutputs(const InstructionBlock* block); private: void PopulateDeferredBlockRegion(RpoNumber initial_block); VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const { return data_->VirtualRegisterDataFor(virtual_register); } MachineRepresentation RepresentationFor(int virtual_register) const { DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister); DCHECK_LT(virtual_register, code()->VirtualRegisterCount()); return code()->GetRepresentation(virtual_register); } bool IsDeferredBlockBoundary(const ZoneVector& blocks) { return blocks.size() == 1 && !data_->GetBlock(blocks[0])->IsDeferred(); } InstructionSequence* code() const { return data_->code(); } Zone* zone() const { return data_->allocation_zone(); } MidTierRegisterAllocationData* const data_; ZoneQueue deferred_blocks_worklist_; ZoneSet deferred_blocks_processed_; }; MidTierOutputProcessor::MidTierOutputProcessor( MidTierRegisterAllocationData* data) : data_(data), deferred_blocks_worklist_(data->allocation_zone()), deferred_blocks_processed_(data->allocation_zone()) {} void MidTierOutputProcessor::PopulateDeferredBlockRegion( RpoNumber initial_block) { DeferredBlocksRegion* deferred_blocks_region = zone()->New(zone(), code()->InstructionBlockCount()); DCHECK(deferred_blocks_worklist_.empty()); deferred_blocks_worklist_.push(initial_block); deferred_blocks_processed_.insert(initial_block); while (!deferred_blocks_worklist_.empty()) { RpoNumber current = deferred_blocks_worklist_.front(); deferred_blocks_worklist_.pop(); deferred_blocks_region->AddBlock(current, data_); const InstructionBlock* curr_block = data_->GetBlock(current); // Check for whether the predecessor blocks are still deferred. if (IsDeferredBlockBoundary(curr_block->predecessors())) { // If not, mark the predecessor as having a deferred successor. data_->block_state(curr_block->predecessors()[0]) .MarkAsDeferredBlockBoundary(); } else { // Otherwise process predecessors. for (RpoNumber pred : curr_block->predecessors()) { if (deferred_blocks_processed_.count(pred) == 0) { deferred_blocks_worklist_.push(pred); deferred_blocks_processed_.insert(pred); } } } // Check for whether the successor blocks are still deferred. // Process any unprocessed successors if we aren't at a boundary. if (IsDeferredBlockBoundary(curr_block->successors())) { // If not, mark the predecessor as having a deferred successor. data_->block_state(current).MarkAsDeferredBlockBoundary(); } else { // Otherwise process successors. for (RpoNumber succ : curr_block->successors()) { if (deferred_blocks_processed_.count(succ) == 0) { deferred_blocks_worklist_.push(succ); deferred_blocks_processed_.insert(succ); } } } } } void MidTierOutputProcessor::InitializeBlockState( const InstructionBlock* block) { // Update our predecessor blocks with their successors_phi_index if we have // phis. if (block->phis().size()) { for (int i = 0; i < static_cast(block->PredecessorCount()); ++i) { data_->block_state(block->predecessors()[i]).set_successors_phi_index(i); } } BlockState& block_state = data_->block_state(block->rpo_number()); if (block->IsDeferred() && !block_state.deferred_blocks_region()) { PopulateDeferredBlockRegion(block->rpo_number()); } // Mark this block as dominating itself. block_state.dominated_blocks()->Add(block->rpo_number().ToInt()); if (block->dominator().IsValid()) { // Add all the blocks this block dominates to its dominator. BlockState& dominator_block_state = data_->block_state(block->dominator()); dominator_block_state.dominated_blocks()->Union( *block_state.dominated_blocks()); } else { // Only the first block shouldn't have a dominator. DCHECK_EQ(block, code()->instruction_blocks().front()); } } void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) { int block_start = block->first_instruction_index(); bool is_deferred = block->IsDeferred(); for (int index = block->last_instruction_index(); index >= block_start; index--) { Instruction* instr = code()->InstructionAt(index); // For each instruction, define details of the output with the associated // virtual register data. for (size_t i = 0; i < instr->OutputCount(); i++) { InstructionOperand* output = instr->OutputAt(i); if (output->IsConstant()) { ConstantOperand* constant_operand = ConstantOperand::cast(output); int virtual_register = constant_operand->virtual_register(); MachineRepresentation rep = RepresentationFor(virtual_register); VirtualRegisterDataFor(virtual_register) .DefineAsConstantOperand(constant_operand, rep, index, is_deferred); } else { DCHECK(output->IsUnallocated()); UnallocatedOperand* unallocated_operand = UnallocatedOperand::cast(output); int virtual_register = unallocated_operand->virtual_register(); MachineRepresentation rep = RepresentationFor(virtual_register); bool is_exceptional_call_output = instr->IsCallWithDescriptorFlags() && instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler); if (unallocated_operand->HasFixedSlotPolicy()) { // If output has a fixed slot policy, allocate its spill operand now // so that the register allocator can use this knowledge. AllocatedOperand* fixed_spill_operand = AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep, unallocated_operand->fixed_slot_index()); VirtualRegisterDataFor(virtual_register) .DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register, rep, index, is_deferred, is_exceptional_call_output); } else { VirtualRegisterDataFor(virtual_register) .DefineAsUnallocatedOperand(virtual_register, rep, index, is_deferred, is_exceptional_call_output); } } } // Mark any instructions that require reference maps for later reference map // processing. if (instr->HasReferenceMap()) { data_->reference_map_instructions().push_back(index); } } // Define phi output operands. for (PhiInstruction* phi : block->phis()) { int virtual_register = phi->virtual_register(); MachineRepresentation rep = RepresentationFor(virtual_register); VirtualRegisterDataFor(virtual_register) .DefineAsPhi(virtual_register, rep, block->first_instruction_index(), is_deferred); } } void DefineOutputs(MidTierRegisterAllocationData* data) { MidTierOutputProcessor processor(data); for (const InstructionBlock* block : base::Reversed(data->code()->instruction_blocks())) { data->tick_counter()->TickAndMaybeEnterSafepoint(); processor.InitializeBlockState(block); processor.DefineOutputs(block); } } class MidTierRegisterAllocator final { public: explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data); MidTierRegisterAllocator(const MidTierRegisterAllocator&) = delete; MidTierRegisterAllocator& operator=(const MidTierRegisterAllocator&) = delete; void AllocateRegisters(const InstructionBlock* block); void UpdateSpillRangesForLoops(); SinglePassRegisterAllocator& general_reg_allocator() { return general_reg_allocator_; } SinglePassRegisterAllocator& double_reg_allocator() { return double_reg_allocator_; } private: void AllocatePhis(const InstructionBlock* block); void AllocatePhiGapMoves(const InstructionBlock* block); bool IsFixedRegisterPolicy(const UnallocatedOperand* operand); void ReserveFixedRegisters(int instr_index); SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep); VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const { return data_->VirtualRegisterDataFor(virtual_register); } InstructionSequence* code() const { return data_->code(); } Zone* allocation_zone() const { return data_->allocation_zone(); } MidTierRegisterAllocationData* const data_; SinglePassRegisterAllocator general_reg_allocator_; SinglePassRegisterAllocator double_reg_allocator_; }; MidTierRegisterAllocator::MidTierRegisterAllocator( MidTierRegisterAllocationData* data) : data_(data), general_reg_allocator_(RegisterKind::kGeneral, data), double_reg_allocator_(RegisterKind::kDouble, data) {} void MidTierRegisterAllocator::AllocateRegisters( const InstructionBlock* block) { RpoNumber block_rpo = block->rpo_number(); bool is_deferred_block_boundary = data_->block_state(block_rpo).is_deferred_block_boundary(); general_reg_allocator_.StartBlock(block); double_reg_allocator_.StartBlock(block); // If the block is not deferred but has deferred successors, then try to // output spill slots for virtual_registers that are only spilled in the // deferred blocks at the start of those deferred blocks to avoid spilling // them at their output in non-deferred blocks. if (is_deferred_block_boundary && !block->IsDeferred()) { for (RpoNumber successor : block->successors()) { if (!data_->GetBlock(successor)->IsDeferred()) continue; DCHECK_GT(successor, block_rpo); DeferredBlocksRegion* deferred_region = data_->block_state(successor).deferred_blocks_region(); // Freeze the deferred spills on the region to ensure no more are added to // this region after the spills for this entry point have already been // emitted. deferred_region->FreezeDeferredSpills(); for (const int virtual_register : *deferred_region) { VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register); AllocatorFor(vreg_data.rep()) .AllocateDeferredBlockSpillOutput(block->last_instruction_index(), successor, vreg_data); } } } // Allocate registers for instructions in reverse, from the end of the block // to the start. int block_start = block->first_instruction_index(); for (int instr_index = block->last_instruction_index(); instr_index >= block_start; instr_index--) { Instruction* instr = code()->InstructionAt(instr_index); // Reserve any fixed register operands to prevent the register being // allocated to another operand. ReserveFixedRegisters(instr_index); // Allocate outputs. for (size_t i = 0; i < instr->OutputCount(); i++) { InstructionOperand* output = instr->OutputAt(i); DCHECK(!output->IsAllocated()); if (output->IsConstant()) { ConstantOperand* constant_operand = ConstantOperand::cast(output); VirtualRegisterData& vreg_data = VirtualRegisterDataFor(constant_operand->virtual_register()); AllocatorFor(vreg_data.rep()) .AllocateConstantOutput(constant_operand, vreg_data, instr_index); } else { UnallocatedOperand* unallocated_output = UnallocatedOperand::cast(output); VirtualRegisterData& output_vreg_data = VirtualRegisterDataFor(unallocated_output->virtual_register()); if (unallocated_output->HasSameAsInputPolicy()) { DCHECK_EQ(i, 0); UnallocatedOperand* unallocated_input = UnallocatedOperand::cast( instr->InputAt(unallocated_output->input_index())); VirtualRegisterData& input_vreg_data = VirtualRegisterDataFor(unallocated_input->virtual_register()); DCHECK_EQ(AllocatorFor(output_vreg_data.rep()).kind(), AllocatorFor(input_vreg_data.rep()).kind()); AllocatorFor(output_vreg_data.rep()) .AllocateSameInputOutput(unallocated_output, unallocated_input, output_vreg_data, input_vreg_data, instr_index); } else { AllocatorFor(output_vreg_data.rep()) .AllocateOutput(unallocated_output, output_vreg_data, instr_index); } } } if (instr->ClobbersRegisters()) { general_reg_allocator_.SpillAllRegisters(); } if (instr->ClobbersDoubleRegisters()) { double_reg_allocator_.SpillAllRegisters(); } // Allocate temporaries. for (size_t i = 0; i < instr->TempCount(); i++) { UnallocatedOperand* temp = UnallocatedOperand::cast(instr->TempAt(i)); int virtual_register = temp->virtual_register(); MachineRepresentation rep = virtual_register == InstructionOperand::kInvalidVirtualRegister ? InstructionSequence::DefaultRepresentation() : code()->GetRepresentation(virtual_register); AllocatorFor(rep).AllocateTemp(temp, virtual_register, rep, instr_index); } // Allocate inputs that are used across the whole instruction. for (size_t i = 0; i < instr->InputCount(); i++) { if (!instr->InputAt(i)->IsUnallocated()) continue; UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i)); if (input->IsUsedAtStart()) continue; VirtualRegisterData& vreg_data = VirtualRegisterDataFor(input->virtual_register()); AllocatorFor(vreg_data.rep()) .AllocateInput(input, vreg_data, instr_index); } // Then allocate inputs that are only used at the start of the instruction. for (size_t i = 0; i < instr->InputCount(); i++) { if (!instr->InputAt(i)->IsUnallocated()) continue; UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i)); DCHECK(input->IsUsedAtStart()); VirtualRegisterData& vreg_data = VirtualRegisterDataFor(input->virtual_register()); AllocatorFor(vreg_data.rep()) .AllocateInput(input, vreg_data, instr_index); } // If we are allocating for the last instruction in the block, allocate any // phi gap move operations that are needed to resolve phis in our successor. if (instr_index == block->last_instruction_index()) { AllocatePhiGapMoves(block); // If this block is deferred but it's successor isn't, update the state to // limit spills to the deferred blocks where possible. if (is_deferred_block_boundary && block->IsDeferred()) { general_reg_allocator_.UpdateForDeferredBlock(instr_index); double_reg_allocator_.UpdateForDeferredBlock(instr_index); } } // Allocate any unallocated gap move inputs. ParallelMove* moves = instr->GetParallelMove(Instruction::END); if (moves != nullptr) { for (MoveOperands* move : *moves) { DCHECK(!move->destination().IsUnallocated()); if (move->source().IsUnallocated()) { UnallocatedOperand* source = UnallocatedOperand::cast(&move->source()); VirtualRegisterData& vreg_data = VirtualRegisterDataFor(source->virtual_register()); AllocatorFor(vreg_data.rep()) .AllocateGapMoveInput(source, vreg_data, instr_index); } } } general_reg_allocator_.EndInstruction(); double_reg_allocator_.EndInstruction(); } // For now we spill all registers at a loop header. // TODO(rmcilroy): Add support for register allocations across loops. if (block->IsLoopHeader()) { general_reg_allocator_.SpillAllRegisters(); double_reg_allocator_.SpillAllRegisters(); } AllocatePhis(block); general_reg_allocator_.EndBlock(block); double_reg_allocator_.EndBlock(block); } SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor( MachineRepresentation rep) { return IsFloatingPoint(rep) ? double_reg_allocator_ : general_reg_allocator_; } bool MidTierRegisterAllocator::IsFixedRegisterPolicy( const UnallocatedOperand* operand) { return operand->HasFixedRegisterPolicy() || operand->HasFixedFPRegisterPolicy(); } void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) { Instruction* instr = code()->InstructionAt(instr_index); for (size_t i = 0; i < instr->OutputCount(); i++) { if (!instr->OutputAt(i)->IsUnallocated()) continue; const UnallocatedOperand* operand = UnallocatedOperand::cast(instr->OutputAt(i)); if (operand->HasSameAsInputPolicy()) { DCHECK_EQ(i, 0); // Input operand has the register constraints, use it here to reserve the // register for the output (it will be reserved for input below). operand = UnallocatedOperand::cast(instr->InputAt(operand->input_index())); } if (IsFixedRegisterPolicy(operand)) { VirtualRegisterData& vreg_data = VirtualRegisterDataFor(operand->virtual_register()); AllocatorFor(vreg_data.rep()) .ReserveFixedOutputRegister(operand, vreg_data.vreg(), vreg_data.rep(), instr_index); } } for (size_t i = 0; i < instr->TempCount(); i++) { if (!instr->TempAt(i)->IsUnallocated()) continue; const UnallocatedOperand* operand = UnallocatedOperand::cast(instr->TempAt(i)); if (IsFixedRegisterPolicy(operand)) { int virtual_register = operand->virtual_register(); MachineRepresentation rep = virtual_register == InstructionOperand::kInvalidVirtualRegister ? InstructionSequence::DefaultRepresentation() : code()->GetRepresentation(virtual_register); AllocatorFor(rep).ReserveFixedTempRegister(operand, virtual_register, rep, instr_index); } } for (size_t i = 0; i < instr->InputCount(); i++) { if (!instr->InputAt(i)->IsUnallocated()) continue; const UnallocatedOperand* operand = UnallocatedOperand::cast(instr->InputAt(i)); if (IsFixedRegisterPolicy(operand)) { VirtualRegisterData& vreg_data = VirtualRegisterDataFor(operand->virtual_register()); AllocatorFor(vreg_data.rep()) .ReserveFixedInputRegister(operand, vreg_data.vreg(), vreg_data.rep(), instr_index); } } } void MidTierRegisterAllocator::AllocatePhiGapMoves( const InstructionBlock* block) { int successors_phi_index = data_->block_state(block->rpo_number()).successors_phi_index(); // If successors_phi_index is -1 there are no phi's in the successor. if (successors_phi_index == -1) return; // The last instruction of a block with phis can't require reference maps // since we won't record phi gap moves that get spilled when populating the // reference maps int instr_index = block->last_instruction_index(); DCHECK(!code()->InstructionAt(instr_index)->HasReferenceMap()); // If there are phis, we only have a single successor due to edge-split form. DCHECK_EQ(block->SuccessorCount(), 1); const InstructionBlock* successor = data_->GetBlock(block->successors()[0]); for (PhiInstruction* phi : successor->phis()) { VirtualRegisterData& to_vreg = VirtualRegisterDataFor(phi->virtual_register()); VirtualRegisterData& from_vreg = VirtualRegisterDataFor(phi->operands()[successors_phi_index]); AllocatorFor(to_vreg.rep()) .AllocatePhiGapMove(to_vreg, from_vreg, instr_index); } } void MidTierRegisterAllocator::AllocatePhis(const InstructionBlock* block) { for (PhiInstruction* phi : block->phis()) { VirtualRegisterData& virtual_register = VirtualRegisterDataFor(phi->virtual_register()); AllocatorFor(virtual_register.rep()).AllocatePhi(virtual_register, block); } } void MidTierRegisterAllocator::UpdateSpillRangesForLoops() { // Extend the spill range of any spill that crosses a loop header to // the full loop. for (InstructionBlock* block : code()->instruction_blocks()) { if (block->IsLoopHeader()) { RpoNumber last_loop_block = RpoNumber::FromInt(block->loop_end().ToInt() - 1); int last_loop_instr = data_->GetBlock(last_loop_block)->last_instruction_index(); // Extend spill range for all spilled values that are live on entry to the // loop header. for (int vreg : data_->spilled_virtual_registers()) { const VirtualRegisterData& vreg_data = VirtualRegisterDataFor(vreg); if (vreg_data.HasSpillRange() && vreg_data.spill_range()->IsLiveAt(block->first_instruction_index(), block)) { vreg_data.spill_range()->ExtendRangeTo(last_loop_instr); } } } } } void AllocateRegisters(MidTierRegisterAllocationData* data) { MidTierRegisterAllocator allocator(data); for (InstructionBlock* block : base::Reversed(data->code()->instruction_blocks())) { data->tick_counter()->TickAndMaybeEnterSafepoint(); allocator.AllocateRegisters(block); } allocator.UpdateSpillRangesForLoops(); data->frame()->SetAllocatedRegisters( allocator.general_reg_allocator().assigned_registers()); data->frame()->SetAllocatedDoubleRegisters( allocator.double_reg_allocator().assigned_registers()); } // Spill slot allocator for mid-tier register allocation. class MidTierSpillSlotAllocator final { public: explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data); MidTierSpillSlotAllocator(const MidTierSpillSlotAllocator&) = delete; MidTierSpillSlotAllocator& operator=(const MidTierSpillSlotAllocator&) = delete; void Allocate(VirtualRegisterData* virtual_register); private: class SpillSlot; void AdvanceTo(int instr_index); SpillSlot* GetFreeSpillSlot(int byte_width); InstructionSequence* code() const { return data_->code(); } Frame* frame() const { return data_->frame(); } Zone* zone() const { return data_->allocation_zone(); } struct OrderByLastUse { bool operator()(const SpillSlot* a, const SpillSlot* b) const; }; MidTierRegisterAllocationData* const data_; ZonePriorityQueue allocated_slots_; ZoneLinkedList free_slots_; int position_; }; class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject { public: SpillSlot(int stack_slot, int byte_width) : stack_slot_(stack_slot), byte_width_(byte_width), range_() {} SpillSlot(const SpillSlot&) = delete; SpillSlot& operator=(const SpillSlot&) = delete; void AddRange(const Range& range) { range_.AddRange(range); } AllocatedOperand ToOperand(MachineRepresentation rep) const { return AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, stack_slot_); } int byte_width() const { return byte_width_; } int last_use() const { return range_.end(); } private: int stack_slot_; int byte_width_; Range range_; }; bool MidTierSpillSlotAllocator::OrderByLastUse::operator()( const SpillSlot* a, const SpillSlot* b) const { return a->last_use() > b->last_use(); } MidTierSpillSlotAllocator::MidTierSpillSlotAllocator( MidTierRegisterAllocationData* data) : data_(data), allocated_slots_(data->allocation_zone()), free_slots_(data->allocation_zone()), position_(0) {} void MidTierSpillSlotAllocator::AdvanceTo(int instr_index) { // Move any slots that are no longer in use to the free slots list. DCHECK_LE(position_, instr_index); while (!allocated_slots_.empty() && instr_index > allocated_slots_.top()->last_use()) { free_slots_.push_front(allocated_slots_.top()); allocated_slots_.pop(); } position_ = instr_index; } MidTierSpillSlotAllocator::SpillSlot* MidTierSpillSlotAllocator::GetFreeSpillSlot(int byte_width) { for (auto it = free_slots_.begin(); it != free_slots_.end(); ++it) { SpillSlot* slot = *it; if (slot->byte_width() == byte_width) { free_slots_.erase(it); return slot; } } return nullptr; } void MidTierSpillSlotAllocator::Allocate( VirtualRegisterData* virtual_register) { DCHECK(virtual_register->HasPendingSpillOperand()); VirtualRegisterData::SpillRange* spill_range = virtual_register->spill_range(); MachineRepresentation rep = virtual_register->rep(); int byte_width = ByteWidthForStackSlot(rep); Range live_range = spill_range->live_range(); AdvanceTo(live_range.start()); // Try to re-use an existing free spill slot. SpillSlot* slot = GetFreeSpillSlot(byte_width); if (slot == nullptr) { // Otherwise allocate a new slot. int stack_slot_ = frame()->AllocateSpillSlot(byte_width); slot = zone()->New(stack_slot_, byte_width); } // Extend the range of the slot to include this spill range, and allocate the // pending spill operands with this slot. slot->AddRange(live_range); virtual_register->AllocatePendingSpillOperand(slot->ToOperand(rep)); allocated_slots_.push(slot); } void AllocateSpillSlots(MidTierRegisterAllocationData* data) { ZoneVector spilled(data->allocation_zone()); for (int vreg : data->spilled_virtual_registers()) { VirtualRegisterData& vreg_data = data->VirtualRegisterDataFor(vreg); if (vreg_data.HasPendingSpillOperand()) { spilled.push_back(&vreg_data); } } // Sort the spill ranges by order of their first use to enable linear // allocation of spill slots. std::sort(spilled.begin(), spilled.end(), [](const VirtualRegisterData* a, const VirtualRegisterData* b) { return a->spill_range()->live_range().start() < b->spill_range()->live_range().start(); }); // Allocate a spill slot for each virtual register with a spill range. MidTierSpillSlotAllocator allocator(data); for (VirtualRegisterData* spill : spilled) { allocator.Allocate(spill); } } // Populates reference maps for mid-tier register allocation. class MidTierReferenceMapPopulator final { public: explicit MidTierReferenceMapPopulator(MidTierRegisterAllocationData* data); MidTierReferenceMapPopulator(const MidTierReferenceMapPopulator&) = delete; MidTierReferenceMapPopulator& operator=(const MidTierReferenceMapPopulator&) = delete; void RecordReferences(const VirtualRegisterData& virtual_register); private: InstructionSequence* code() const { return data_->code(); } MidTierRegisterAllocationData* const data_; }; MidTierReferenceMapPopulator::MidTierReferenceMapPopulator( MidTierRegisterAllocationData* data) : data_(data) {} void MidTierReferenceMapPopulator::RecordReferences( const VirtualRegisterData& virtual_register) { if (!virtual_register.HasAllocatedSpillOperand()) return; if (!code()->IsReference(virtual_register.vreg())) return; VirtualRegisterData::SpillRange* spill_range = virtual_register.spill_range(); Range& live_range = spill_range->live_range(); AllocatedOperand allocated = *AllocatedOperand::cast(virtual_register.spill_operand()); for (int instr_index : data_->reference_map_instructions()) { if (instr_index > live_range.end() || instr_index < live_range.start()) continue; Instruction* instr = data_->code()->InstructionAt(instr_index); DCHECK(instr->HasReferenceMap()); if (spill_range->IsLiveAt(instr_index, instr->block())) { instr->reference_map()->RecordReference(allocated); } } } void PopulateReferenceMaps(MidTierRegisterAllocationData* data) { MidTierReferenceMapPopulator populator(data); for (int vreg : data->spilled_virtual_registers()) { populator.RecordReferences(data->VirtualRegisterDataFor(vreg)); } } } // namespace compiler } // namespace internal } // namespace v8