• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/register-allocator.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/iterator.h"
10 #include "src/base/small-vector.h"
11 #include "src/codegen/assembler-inl.h"
12 #include "src/codegen/tick-counter.h"
13 #include "src/compiler/backend/spill-placer.h"
14 #include "src/compiler/linkage.h"
15 #include "src/strings/string-stream.h"
16 #include "src/utils/vector.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 #define TRACE_COND(cond, ...)      \
23   do {                             \
24     if (cond) PrintF(__VA_ARGS__); \
25   } while (false)
26 
27 #define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
28 
29 namespace {
30 
31 static constexpr int kFloat32Bit =
32     RepresentationBit(MachineRepresentation::kFloat32);
33 static constexpr int kSimd128Bit =
34     RepresentationBit(MachineRepresentation::kSimd128);
35 
36 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)37 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
38                                           const InstructionBlock* block) {
39   RpoNumber index = block->loop_header();
40   if (!index.IsValid()) return nullptr;
41   return sequence->InstructionBlockAt(index);
42 }
43 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)44 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
45                                             LifetimePosition pos) {
46   return code->GetInstructionBlock(pos.ToInstructionIndex());
47 }
48 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)49 Instruction* GetLastInstruction(InstructionSequence* code,
50                                 const InstructionBlock* block) {
51   return code->InstructionAt(block->last_instruction_index());
52 }
53 
54 }  // namespace
55 
Initialize(Zone * zone,TopLevelLiveRange * range)56 void LiveRangeBoundArray::Initialize(Zone* zone, TopLevelLiveRange* range) {
57   size_t max_child_count = range->GetMaxChildCount();
58 
59   start_ = zone->NewArray<LiveRangeBound>(max_child_count);
60   length_ = 0;
61   LiveRangeBound* curr = start_;
62   // The primary loop in ResolveControlFlow is not responsible for inserting
63   // connecting moves for spilled ranges.
64   for (LiveRange* i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
65     new (curr) LiveRangeBound(i, i->spilled());
66   }
67 }
68 
Find(const LifetimePosition position) const69 LiveRangeBound* LiveRangeBoundArray::Find(
70     const LifetimePosition position) const {
71   size_t left_index = 0;
72   size_t right_index = length_;
73   while (true) {
74     size_t current_index = left_index + (right_index - left_index) / 2;
75     DCHECK(right_index > current_index);
76     LiveRangeBound* bound = &start_[current_index];
77     if (bound->start_ <= position) {
78       if (position < bound->end_) return bound;
79       DCHECK(left_index < current_index);
80       left_index = current_index;
81     } else {
82       right_index = current_index;
83     }
84   }
85 }
86 
FindPred(const InstructionBlock * pred)87 LiveRangeBound* LiveRangeBoundArray::FindPred(const InstructionBlock* pred) {
88   LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
89       pred->last_instruction_index());
90   return Find(pred_end);
91 }
92 
FindSucc(const InstructionBlock * succ)93 LiveRangeBound* LiveRangeBoundArray::FindSucc(const InstructionBlock* succ) {
94   LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
95       succ->first_instruction_index());
96   return Find(succ_start);
97 }
98 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const99 bool LiveRangeBoundArray::FindConnectableSubranges(
100     const InstructionBlock* block, const InstructionBlock* pred,
101     FindResult* result) const {
102   LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
103       pred->last_instruction_index());
104   LiveRangeBound* bound = Find(pred_end);
105   result->pred_cover_ = bound->range_;
106   LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
107       block->first_instruction_index());
108 
109   if (bound->CanCover(cur_start)) {
110     // Both blocks are covered by the same range, so there is nothing to
111     // connect.
112     return false;
113   }
114   bound = Find(cur_start);
115   if (bound->skip_) {
116     return false;
117   }
118   result->cur_cover_ = bound->range_;
119   DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
120   return (result->cur_cover_ != result->pred_cover_);
121 }
122 
LiveRangeFinder(const TopTierRegisterAllocationData * data,Zone * zone)123 LiveRangeFinder::LiveRangeFinder(const TopTierRegisterAllocationData* data,
124                                  Zone* zone)
125     : data_(data),
126       bounds_length_(static_cast<int>(data_->live_ranges().size())),
127       bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
128       zone_(zone) {
129   for (int i = 0; i < bounds_length_; ++i) {
130     new (&bounds_[i]) LiveRangeBoundArray();
131   }
132 }
133 
ArrayFor(int operand_index)134 LiveRangeBoundArray* LiveRangeFinder::ArrayFor(int operand_index) {
135   DCHECK(operand_index < bounds_length_);
136   TopLevelLiveRange* range = data_->live_ranges()[operand_index];
137   DCHECK(range != nullptr && !range->IsEmpty());
138   LiveRangeBoundArray* array = &bounds_[operand_index];
139   if (array->ShouldInitialize()) {
140     array->Initialize(zone_, range);
141   }
142   return array;
143 }
144 
145 using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
146 
147 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare148   bool operator()(const DelayedInsertionMapKey& a,
149                   const DelayedInsertionMapKey& b) const {
150     if (a.first == b.first) {
151       return a.second.Compare(b.second);
152     }
153     return a.first < b.first;
154   }
155 };
156 
157 using DelayedInsertionMap = ZoneMap<DelayedInsertionMapKey, InstructionOperand,
158                                     DelayedInsertionMapCompare>;
159 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)160 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
161                          void* hint, UsePositionHintType hint_type)
162     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
163   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
164   bool register_beneficial = true;
165   UsePositionType type = UsePositionType::kRegisterOrSlot;
166   if (operand_ != nullptr && operand_->IsUnallocated()) {
167     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
168     if (unalloc->HasRegisterPolicy()) {
169       type = UsePositionType::kRequiresRegister;
170     } else if (unalloc->HasSlotPolicy()) {
171       type = UsePositionType::kRequiresSlot;
172       register_beneficial = false;
173     } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
174       type = UsePositionType::kRegisterOrSlotOrConstant;
175       register_beneficial = false;
176     } else {
177       register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
178     }
179   }
180   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
181            RegisterBeneficialField::encode(register_beneficial) |
182            AssignedRegisterField::encode(kUnassignedRegister);
183   DCHECK(pos_.IsValid());
184 }
185 
HasHint() const186 bool UsePosition::HasHint() const {
187   int hint_register;
188   return HintRegister(&hint_register);
189 }
190 
HintRegister(int * register_code) const191 bool UsePosition::HintRegister(int* register_code) const {
192   if (hint_ == nullptr) return false;
193   switch (HintTypeField::decode(flags_)) {
194     case UsePositionHintType::kNone:
195     case UsePositionHintType::kUnresolved:
196       return false;
197     case UsePositionHintType::kUsePos: {
198       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
199       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
200       if (assigned_register == kUnassignedRegister) return false;
201       *register_code = assigned_register;
202       return true;
203     }
204     case UsePositionHintType::kOperand: {
205       InstructionOperand* operand =
206           reinterpret_cast<InstructionOperand*>(hint_);
207       *register_code = LocationOperand::cast(operand)->register_code();
208       return true;
209     }
210     case UsePositionHintType::kPhi: {
211       TopTierRegisterAllocationData::PhiMapValue* phi =
212           reinterpret_cast<TopTierRegisterAllocationData::PhiMapValue*>(hint_);
213       int assigned_register = phi->assigned_register();
214       if (assigned_register == kUnassignedRegister) return false;
215       *register_code = assigned_register;
216       return true;
217     }
218   }
219   UNREACHABLE();
220 }
221 
HintTypeForOperand(const InstructionOperand & op)222 UsePositionHintType UsePosition::HintTypeForOperand(
223     const InstructionOperand& op) {
224   switch (op.kind()) {
225     case InstructionOperand::CONSTANT:
226     case InstructionOperand::IMMEDIATE:
227       return UsePositionHintType::kNone;
228     case InstructionOperand::UNALLOCATED:
229       return UsePositionHintType::kUnresolved;
230     case InstructionOperand::ALLOCATED:
231       if (op.IsRegister() || op.IsFPRegister()) {
232         return UsePositionHintType::kOperand;
233       } else {
234         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
235         return UsePositionHintType::kNone;
236       }
237     case InstructionOperand::PENDING:
238     case InstructionOperand::INVALID:
239       break;
240   }
241   UNREACHABLE();
242 }
243 
SetHint(UsePosition * use_pos)244 void UsePosition::SetHint(UsePosition* use_pos) {
245   DCHECK_NOT_NULL(use_pos);
246   hint_ = use_pos;
247   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
248 }
249 
ResolveHint(UsePosition * use_pos)250 void UsePosition::ResolveHint(UsePosition* use_pos) {
251   DCHECK_NOT_NULL(use_pos);
252   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
253   hint_ = use_pos;
254   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
255 }
256 
set_type(UsePositionType type,bool register_beneficial)257 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
258   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
259   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
260   flags_ = TypeField::encode(type) |
261            RegisterBeneficialField::encode(register_beneficial) |
262            HintTypeField::encode(HintTypeField::decode(flags_)) |
263            AssignedRegisterField::encode(kUnassignedRegister);
264 }
265 
SplitAt(LifetimePosition pos,Zone * zone)266 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
267   DCHECK(Contains(pos) && pos != start());
268   UseInterval* after = zone->New<UseInterval>(pos, end_);
269   after->next_ = next_;
270   next_ = nullptr;
271   end_ = pos;
272   return after;
273 }
274 
Print() const275 void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
276 
operator <<(std::ostream & os,const LifetimePosition pos)277 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
278   os << '@' << pos.ToInstructionIndex();
279   if (pos.IsGapPosition()) {
280     os << 'g';
281   } else {
282     os << 'i';
283   }
284   if (pos.IsStart()) {
285     os << 's';
286   } else {
287     os << 'e';
288   }
289   return os;
290 }
291 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)292 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
293                      TopLevelLiveRange* top_level)
294     : relative_id_(relative_id),
295       bits_(0),
296       last_interval_(nullptr),
297       first_interval_(nullptr),
298       first_pos_(nullptr),
299       top_level_(top_level),
300       next_(nullptr),
301       current_interval_(nullptr),
302       last_processed_use_(nullptr),
303       current_hint_position_(nullptr) {
304   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
305   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
306           RepresentationField::encode(rep) |
307           ControlFlowRegisterHint::encode(kUnassignedRegister);
308 }
309 
VerifyPositions() const310 void LiveRange::VerifyPositions() const {
311   // Walk the positions, verifying that each is in an interval.
312   UseInterval* interval = first_interval_;
313   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
314     CHECK(Start() <= pos->pos());
315     CHECK(pos->pos() <= End());
316     CHECK_NOT_NULL(interval);
317     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
318       interval = interval->next();
319       CHECK_NOT_NULL(interval);
320     }
321   }
322 }
323 
VerifyIntervals() const324 void LiveRange::VerifyIntervals() const {
325   DCHECK(first_interval()->start() == Start());
326   LifetimePosition last_end = first_interval()->end();
327   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
328        interval = interval->next()) {
329     DCHECK(last_end <= interval->start());
330     last_end = interval->end();
331   }
332   DCHECK(last_end == End());
333 }
334 
set_assigned_register(int reg)335 void LiveRange::set_assigned_register(int reg) {
336   DCHECK(!HasRegisterAssigned() && !spilled());
337   bits_ = AssignedRegisterField::update(bits_, reg);
338 }
339 
UnsetAssignedRegister()340 void LiveRange::UnsetAssignedRegister() {
341   DCHECK(HasRegisterAssigned() && !spilled());
342   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
343 }
344 
AttachToNext()345 void LiveRange::AttachToNext() {
346   DCHECK_NOT_NULL(next_);
347   DCHECK_NE(TopLevel()->last_child_covers_, next_);
348   last_interval_->set_next(next_->first_interval());
349   next_->first_interval_ = nullptr;
350   last_interval_ = next_->last_interval_;
351   next_->last_interval_ = nullptr;
352   if (first_pos() == nullptr) {
353     first_pos_ = next_->first_pos();
354   } else {
355     UsePosition* ptr = first_pos_;
356     while (ptr->next() != nullptr) {
357       ptr = ptr->next();
358     }
359     ptr->set_next(next_->first_pos());
360   }
361   next_->first_pos_ = nullptr;
362   LiveRange* old_next = next_;
363   next_ = next_->next_;
364   old_next->next_ = nullptr;
365 }
366 
Unspill()367 void LiveRange::Unspill() {
368   DCHECK(spilled());
369   set_spilled(false);
370   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
371 }
372 
Spill()373 void LiveRange::Spill() {
374   DCHECK(!spilled());
375   DCHECK(!TopLevel()->HasNoSpillType());
376   set_spilled(true);
377   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
378 }
379 
kind() const380 RegisterKind LiveRange::kind() const {
381   return IsFloatingPoint(representation()) ? RegisterKind::kDouble
382                                            : RegisterKind::kGeneral;
383 }
384 
FirstHintPosition(int * register_index)385 UsePosition* LiveRange::FirstHintPosition(int* register_index) {
386   if (!first_pos_) return nullptr;
387   if (current_hint_position_) {
388     if (current_hint_position_->pos() < first_pos_->pos()) {
389       current_hint_position_ = first_pos_;
390     }
391     if (current_hint_position_->pos() > End()) {
392       current_hint_position_ = nullptr;
393     }
394   }
395   bool needs_revisit = false;
396   UsePosition* pos = current_hint_position_;
397   for (; pos != nullptr; pos = pos->next()) {
398     if (pos->HintRegister(register_index)) {
399       break;
400     }
401     // Phi and use position hints can be assigned during allocation which
402     // would invalidate the cached hint position. Make sure we revisit them.
403     needs_revisit = needs_revisit ||
404                     pos->hint_type() == UsePositionHintType::kPhi ||
405                     pos->hint_type() == UsePositionHintType::kUsePos;
406   }
407   if (!needs_revisit) {
408     current_hint_position_ = pos;
409   }
410 #ifdef DEBUG
411   UsePosition* pos_check = first_pos_;
412   for (; pos_check != nullptr; pos_check = pos_check->next()) {
413     if (pos_check->HasHint()) {
414       break;
415     }
416   }
417   CHECK_EQ(pos, pos_check);
418 #endif
419   return pos;
420 }
421 
NextUsePosition(LifetimePosition start) const422 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
423   UsePosition* use_pos = last_processed_use_;
424   if (use_pos == nullptr || use_pos->pos() > start) {
425     use_pos = first_pos();
426   }
427   while (use_pos != nullptr && use_pos->pos() < start) {
428     use_pos = use_pos->next();
429   }
430   last_processed_use_ = use_pos;
431   return use_pos;
432 }
433 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const434 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
435     LifetimePosition start) const {
436   UsePosition* pos = NextUsePosition(start);
437   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
438     pos = pos->next();
439   }
440   return pos;
441 }
442 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const443 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
444     const LifetimePosition& start) const {
445   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
446   if (next_use == nullptr) return End();
447   return next_use->pos();
448 }
449 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const450 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
451     LifetimePosition start) const {
452   UsePosition* pos = first_pos();
453   UsePosition* prev = nullptr;
454   while (pos != nullptr && pos->pos() < start) {
455     if (pos->RegisterIsBeneficial()) prev = pos;
456     pos = pos->next();
457   }
458   return prev;
459 }
460 
NextUsePositionSpillDetrimental(LifetimePosition start) const461 UsePosition* LiveRange::NextUsePositionSpillDetrimental(
462     LifetimePosition start) const {
463   UsePosition* pos = NextUsePosition(start);
464   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister &&
465          !pos->SpillDetrimental()) {
466     pos = pos->next();
467   }
468   return pos;
469 }
470 
NextRegisterPosition(LifetimePosition start) const471 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
472   UsePosition* pos = NextUsePosition(start);
473   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
474     pos = pos->next();
475   }
476   return pos;
477 }
478 
NextSlotPosition(LifetimePosition start) const479 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
480   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
481        pos = pos->next()) {
482     if (pos->type() != UsePositionType::kRequiresSlot) continue;
483     return pos;
484   }
485   return nullptr;
486 }
487 
CanBeSpilled(LifetimePosition pos) const488 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
489   // We cannot spill a live range that has a use requiring a register
490   // at the current or the immediate next position.
491   UsePosition* use_pos = NextRegisterPosition(pos);
492   if (use_pos == nullptr) return true;
493   return use_pos->pos() > pos.NextStart().End();
494 }
495 
IsTopLevel() const496 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
497 
GetAssignedOperand() const498 InstructionOperand LiveRange::GetAssignedOperand() const {
499   DCHECK(!IsEmpty());
500   if (HasRegisterAssigned()) {
501     DCHECK(!spilled());
502     return AllocatedOperand(LocationOperand::REGISTER, representation(),
503                             assigned_register());
504   }
505   DCHECK(spilled());
506   DCHECK(!HasRegisterAssigned());
507   if (TopLevel()->HasSpillOperand()) {
508     InstructionOperand* op = TopLevel()->GetSpillOperand();
509     DCHECK(!op->IsUnallocated());
510     return *op;
511   }
512   return TopLevel()->GetSpillRangeOperand();
513 }
514 
FirstSearchIntervalForPosition(LifetimePosition position) const515 UseInterval* LiveRange::FirstSearchIntervalForPosition(
516     LifetimePosition position) const {
517   if (current_interval_ == nullptr) return first_interval_;
518   if (current_interval_->start() > position) {
519     current_interval_ = nullptr;
520     return first_interval_;
521   }
522   return current_interval_;
523 }
524 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const525 void LiveRange::AdvanceLastProcessedMarker(
526     UseInterval* to_start_of, LifetimePosition but_not_past) const {
527   if (to_start_of == nullptr) return;
528   if (to_start_of->start() > but_not_past) return;
529   LifetimePosition start = current_interval_ == nullptr
530                                ? LifetimePosition::Invalid()
531                                : current_interval_->start();
532   if (to_start_of->start() > start) {
533     current_interval_ = to_start_of;
534   }
535 }
536 
SplitAt(LifetimePosition position,Zone * zone)537 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
538   int new_id = TopLevel()->GetNextChildId();
539   LiveRange* child = zone->New<LiveRange>(new_id, representation(), TopLevel());
540   child->set_bundle(bundle_);
541   // If we split, we do so because we're about to switch registers or move
542   // to/from a slot, so there's no value in connecting hints.
543   DetachAt(position, child, zone, DoNotConnectHints);
544 
545   child->top_level_ = TopLevel();
546   child->next_ = next_;
547   next_ = child;
548   return child;
549 }
550 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)551 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
552                                  Zone* zone,
553                                  HintConnectionOption connect_hints) {
554   DCHECK(Start() < position);
555   DCHECK(End() > position);
556   DCHECK(result->IsEmpty());
557   // Find the last interval that ends before the position. If the
558   // position is contained in one of the intervals in the chain, we
559   // split that interval and use the first part.
560   UseInterval* current = FirstSearchIntervalForPosition(position);
561 
562   // If the split position coincides with the beginning of a use interval
563   // we need to split use positons in a special way.
564   bool split_at_start = false;
565 
566   if (current->start() == position) {
567     // When splitting at start we need to locate the previous use interval.
568     current = first_interval_;
569   }
570 
571   UseInterval* after = nullptr;
572   while (current != nullptr) {
573     if (current->Contains(position)) {
574       after = current->SplitAt(position, zone);
575       break;
576     }
577     UseInterval* next = current->next();
578     if (next->start() >= position) {
579       split_at_start = (next->start() == position);
580       after = next;
581       current->set_next(nullptr);
582       break;
583     }
584     current = next;
585   }
586   DCHECK_NOT_NULL(after);
587 
588   // Partition original use intervals to the two live ranges.
589   UseInterval* before = current;
590   result->last_interval_ =
591       (last_interval_ == before)
592           ? after            // Only interval in the range after split.
593           : last_interval_;  // Last interval of the original range.
594   result->first_interval_ = after;
595   last_interval_ = before;
596 
597   // Find the last use position before the split and the first use
598   // position after it.
599   UsePosition* use_after = first_pos();
600   UsePosition* use_before = nullptr;
601   if (split_at_start) {
602     // The split position coincides with the beginning of a use interval (the
603     // end of a lifetime hole). Use at this position should be attributed to
604     // the split child because split child owns use interval covering it.
605     while (use_after != nullptr && use_after->pos() < position) {
606       use_before = use_after;
607       use_after = use_after->next();
608     }
609   } else {
610     while (use_after != nullptr && use_after->pos() <= position) {
611       use_before = use_after;
612       use_after = use_after->next();
613     }
614   }
615 
616   // Partition original use positions to the two live ranges.
617   if (use_before != nullptr) {
618     use_before->set_next(nullptr);
619   } else {
620     first_pos_ = nullptr;
621   }
622   result->first_pos_ = use_after;
623   result->current_hint_position_ = current_hint_position_;
624 
625   // Discard cached iteration state. It might be pointing
626   // to the use that no longer belongs to this live range.
627   last_processed_use_ = nullptr;
628   current_interval_ = nullptr;
629 
630   if (connect_hints == ConnectHints && use_before != nullptr &&
631       use_after != nullptr) {
632     use_after->SetHint(use_before);
633     result->current_hint_position_ = use_after;
634   }
635 #ifdef DEBUG
636   VerifyChildStructure();
637   result->VerifyChildStructure();
638 #endif
639   return use_before;
640 }
641 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)642 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
643   LiveRange* child = this;
644   for (; child != nullptr; child = child->next()) {
645     child->top_level_ = new_top_level;
646   }
647 }
648 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)649 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
650                                      const InstructionOperand& spill_op) {
651   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
652     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
653     if (!pos->HasOperand()) continue;
654     switch (pos->type()) {
655       case UsePositionType::kRequiresSlot:
656         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
657         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
658         break;
659       case UsePositionType::kRequiresRegister:
660         DCHECK(op.IsRegister() || op.IsFPRegister());
661         V8_FALLTHROUGH;
662       case UsePositionType::kRegisterOrSlot:
663       case UsePositionType::kRegisterOrSlotOrConstant:
664         InstructionOperand::ReplaceWith(pos->operand(), &op);
665         break;
666     }
667   }
668 }
669 
670 // This implements an ordering on live ranges so that they are ordered by their
671 // start positions.  This is needed for the correctness of the register
672 // allocation algorithm.  If two live ranges start at the same offset then there
673 // is a tie breaker based on where the value is first used.  This part of the
674 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const675 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
676   LifetimePosition start = Start();
677   LifetimePosition other_start = other->Start();
678   if (start == other_start) {
679     // Prefer register that has a controlflow hint to make sure it gets
680     // allocated first. This allows the control flow aware alloction to
681     // just put ranges back into the queue without other ranges interfering.
682     if (controlflow_hint() < other->controlflow_hint()) {
683       return true;
684     }
685     // The other has a smaller hint.
686     if (controlflow_hint() > other->controlflow_hint()) {
687       return false;
688     }
689     // Both have the same hint or no hint at all. Use first use position.
690     UsePosition* pos = first_pos();
691     UsePosition* other_pos = other->first_pos();
692     // To make the order total, handle the case where both positions are null.
693     if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg();
694     if (pos == nullptr) return false;
695     if (other_pos == nullptr) return true;
696     // To make the order total, handle the case where both positions are equal.
697     if (pos->pos() == other_pos->pos())
698       return TopLevel()->vreg() < other->TopLevel()->vreg();
699     return pos->pos() < other_pos->pos();
700   }
701   return start < other_start;
702 }
703 
SetUseHints(int register_index)704 void LiveRange::SetUseHints(int register_index) {
705   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
706     if (!pos->HasOperand()) continue;
707     switch (pos->type()) {
708       case UsePositionType::kRequiresSlot:
709         break;
710       case UsePositionType::kRequiresRegister:
711       case UsePositionType::kRegisterOrSlot:
712       case UsePositionType::kRegisterOrSlotOrConstant:
713         pos->set_assigned_register(register_index);
714         break;
715     }
716   }
717 }
718 
CanCover(LifetimePosition position) const719 bool LiveRange::CanCover(LifetimePosition position) const {
720   if (IsEmpty()) return false;
721   return Start() <= position && position < End();
722 }
723 
Covers(LifetimePosition position) const724 bool LiveRange::Covers(LifetimePosition position) const {
725   if (!CanCover(position)) return false;
726   UseInterval* start_search = FirstSearchIntervalForPosition(position);
727   for (UseInterval* interval = start_search; interval != nullptr;
728        interval = interval->next()) {
729     DCHECK(interval->next() == nullptr ||
730            interval->next()->start() >= interval->start());
731     AdvanceLastProcessedMarker(interval, position);
732     if (interval->Contains(position)) return true;
733     if (interval->start() > position) return false;
734   }
735   return false;
736 }
737 
NextEndAfter(LifetimePosition position) const738 LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
739   UseInterval* start_search = FirstSearchIntervalForPosition(position);
740   while (start_search->end() < position) {
741     start_search = start_search->next();
742   }
743   return start_search->end();
744 }
745 
NextStartAfter(LifetimePosition position)746 LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
747   UseInterval* start_search = FirstSearchIntervalForPosition(position);
748   while (start_search->start() < position) {
749     start_search = start_search->next();
750   }
751   next_start_ = start_search->start();
752   return next_start_;
753 }
754 
FirstIntersection(LiveRange * other) const755 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
756   UseInterval* b = other->first_interval();
757   if (b == nullptr) return LifetimePosition::Invalid();
758   LifetimePosition advance_last_processed_up_to = b->start();
759   UseInterval* a = FirstSearchIntervalForPosition(b->start());
760   while (a != nullptr && b != nullptr) {
761     if (a->start() > other->End()) break;
762     if (b->start() > End()) break;
763     LifetimePosition cur_intersection = a->Intersect(b);
764     if (cur_intersection.IsValid()) {
765       return cur_intersection;
766     }
767     if (a->start() < b->start()) {
768       a = a->next();
769       if (a == nullptr || a->start() > other->End()) break;
770       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
771     } else {
772       b = b->next();
773     }
774   }
775   return LifetimePosition::Invalid();
776 }
777 
Print(const RegisterConfiguration * config,bool with_children) const778 void LiveRange::Print(const RegisterConfiguration* config,
779                       bool with_children) const {
780   StdoutStream os;
781   PrintableLiveRange wrapper;
782   wrapper.register_configuration_ = config;
783   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
784     wrapper.range_ = i;
785     os << wrapper << std::endl;
786     if (!with_children) break;
787   }
788 }
789 
Print(bool with_children) const790 void LiveRange::Print(bool with_children) const {
791   Print(RegisterConfiguration::Default(), with_children);
792 }
793 
RegisterFromBundle(int * hint) const794 bool LiveRange::RegisterFromBundle(int* hint) const {
795   if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
796   *hint = bundle_->reg();
797   return true;
798 }
799 
UpdateBundleRegister(int reg) const800 void LiveRange::UpdateBundleRegister(int reg) const {
801   if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
802   bundle_->set_reg(reg);
803 }
804 
805 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList806   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
807                          SpillMoveInsertionList* next)
808       : gap_index(gap_index), operand(operand), next(next) {}
809   const int gap_index;
810   InstructionOperand* const operand;
811   SpillMoveInsertionList* next;
812 };
813 
TopLevelLiveRange(int vreg,MachineRepresentation rep)814 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
815     : LiveRange(0, rep, this),
816       vreg_(vreg),
817       last_child_id_(0),
818       spill_operand_(nullptr),
819       spill_move_insertion_locations_(nullptr),
820       spilled_in_deferred_blocks_(false),
821       has_preassigned_slot_(false),
822       spill_start_index_(kMaxInt),
823       last_pos_(nullptr),
824       last_child_covers_(this) {
825   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
826 }
827 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)828 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
829                                             InstructionOperand* operand) {
830   DCHECK(HasNoSpillType());
831   spill_move_insertion_locations_ = zone->New<SpillMoveInsertionList>(
832       gap_index, operand, spill_move_insertion_locations_);
833 }
834 
CommitSpillMoves(TopTierRegisterAllocationData * data,const InstructionOperand & op)835 void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
836                                          const InstructionOperand& op) {
837   DCHECK_IMPLIES(op.IsConstant(),
838                  GetSpillMoveInsertionLocations(data) == nullptr);
839 
840   if (HasGeneralSpillRange()) {
841     SetLateSpillingSelected(false);
842   }
843 
844   InstructionSequence* sequence = data->code();
845   Zone* zone = sequence->zone();
846 
847   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
848        to_spill != nullptr; to_spill = to_spill->next) {
849     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
850     ParallelMove* move =
851         instr->GetOrCreateParallelMove(Instruction::START, zone);
852     move->AddMove(*to_spill->operand, op);
853     instr->block()->mark_needs_frame();
854   }
855 }
856 
FilterSpillMoves(TopTierRegisterAllocationData * data,const InstructionOperand & op)857 void TopLevelLiveRange::FilterSpillMoves(TopTierRegisterAllocationData* data,
858                                          const InstructionOperand& op) {
859   DCHECK_IMPLIES(op.IsConstant(),
860                  GetSpillMoveInsertionLocations(data) == nullptr);
861   bool might_be_duplicated = has_slot_use() || spilled();
862   InstructionSequence* sequence = data->code();
863 
864   SpillMoveInsertionList* previous = nullptr;
865   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
866        to_spill != nullptr; previous = to_spill, to_spill = to_spill->next) {
867     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
868     ParallelMove* move = instr->GetParallelMove(Instruction::START);
869     // Skip insertion if it's possible that the move exists already as a
870     // constraint move from a fixed output register to a slot.
871     bool found = false;
872     if (move != nullptr && (might_be_duplicated || has_preassigned_slot())) {
873       for (MoveOperands* move_op : *move) {
874         if (move_op->IsEliminated()) continue;
875         if (move_op->source().Equals(*to_spill->operand) &&
876             move_op->destination().Equals(op)) {
877           found = true;
878           if (has_preassigned_slot()) move_op->Eliminate();
879           break;
880         }
881       }
882     }
883     if (found || has_preassigned_slot()) {
884       // Remove the item from the list.
885       if (previous == nullptr) {
886         spill_move_insertion_locations_ = to_spill->next;
887       } else {
888         previous->next = to_spill->next;
889       }
890       // Even though this location doesn't need a spill instruction, the
891       // block does require a frame.
892       instr->block()->mark_needs_frame();
893     }
894   }
895 }
896 
SetSpillOperand(InstructionOperand * operand)897 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
898   DCHECK(HasNoSpillType());
899   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
900   set_spill_type(SpillType::kSpillOperand);
901   spill_operand_ = operand;
902 }
903 
SetSpillRange(SpillRange * spill_range)904 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
905   DCHECK(!HasSpillOperand());
906   DCHECK(spill_range);
907   spill_range_ = spill_range;
908 }
909 
GetSpillRangeOperand() const910 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
911   SpillRange* spill_range = GetSpillRange();
912   int index = spill_range->assigned_slot();
913   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
914 }
915 
VerifyChildrenInOrder() const916 void TopLevelLiveRange::VerifyChildrenInOrder() const {
917   LifetimePosition last_end = End();
918   for (const LiveRange* child = this->next(); child != nullptr;
919        child = child->next()) {
920     DCHECK(last_end <= child->Start());
921     last_end = child->End();
922   }
923 }
924 
GetChildCovers(LifetimePosition pos)925 LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) {
926   LiveRange* child = last_child_covers_;
927   DCHECK_NE(child, nullptr);
928   if (pos < child->Start()) {
929     // Cached value has advanced too far; start from the top.
930     child = this;
931   }
932   LiveRange* previous_child = nullptr;
933   while (child != nullptr && child->End() <= pos) {
934     previous_child = child;
935     child = child->next();
936   }
937 
938   // If we've walked past the end, cache the last child instead. This allows
939   // future calls that are also past the end to be fast, since they will know
940   // that there is no need to reset the search to the beginning.
941   last_child_covers_ = child == nullptr ? previous_child : child;
942 
943   return !child || !child->Covers(pos) ? nullptr : child;
944 }
945 
Verify() const946 void TopLevelLiveRange::Verify() const {
947   VerifyChildrenInOrder();
948   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
949     VerifyChildStructure();
950   }
951 }
952 
ShortenTo(LifetimePosition start,bool trace_alloc)953 void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
954   TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
955              start.value());
956   DCHECK_NOT_NULL(first_interval_);
957   DCHECK(first_interval_->start() <= start);
958   DCHECK(start < first_interval_->end());
959   first_interval_->set_start(start);
960 }
961 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)962 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
963                                        LifetimePosition end, Zone* zone,
964                                        bool trace_alloc) {
965   TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
966              start.value(), end.value());
967   LifetimePosition new_end = end;
968   while (first_interval_ != nullptr && first_interval_->start() <= end) {
969     if (first_interval_->end() > end) {
970       new_end = first_interval_->end();
971     }
972     first_interval_ = first_interval_->next();
973   }
974 
975   UseInterval* new_interval = zone->New<UseInterval>(start, new_end);
976   new_interval->set_next(first_interval_);
977   first_interval_ = new_interval;
978   if (new_interval->next() == nullptr) {
979     last_interval_ = new_interval;
980   }
981 }
982 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)983 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
984                                        LifetimePosition end, Zone* zone,
985                                        bool trace_alloc) {
986   TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
987              start.value(), end.value());
988   if (first_interval_ == nullptr) {
989     UseInterval* interval = zone->New<UseInterval>(start, end);
990     first_interval_ = interval;
991     last_interval_ = interval;
992   } else {
993     if (end == first_interval_->start()) {
994       first_interval_->set_start(start);
995     } else if (end < first_interval_->start()) {
996       UseInterval* interval = zone->New<UseInterval>(start, end);
997       interval->set_next(first_interval_);
998       first_interval_ = interval;
999     } else {
1000       // Order of instruction's processing (see ProcessInstructions) guarantees
1001       // that each new use interval either precedes, intersects with or touches
1002       // the last added interval.
1003       DCHECK(start <= first_interval_->end());
1004       first_interval_->set_start(std::min(start, first_interval_->start()));
1005       first_interval_->set_end(std::max(end, first_interval_->end()));
1006     }
1007   }
1008 }
1009 
AddUsePosition(UsePosition * use_pos,bool trace_alloc)1010 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
1011   LifetimePosition pos = use_pos->pos();
1012   TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
1013              pos.value());
1014   UsePosition* prev_hint = nullptr;
1015   UsePosition* prev = nullptr;
1016   UsePosition* current = first_pos_;
1017   while (current != nullptr && current->pos() < pos) {
1018     prev_hint = current->HasHint() ? current : prev_hint;
1019     prev = current;
1020     current = current->next();
1021   }
1022 
1023   if (prev == nullptr) {
1024     use_pos->set_next(first_pos_);
1025     first_pos_ = use_pos;
1026   } else {
1027     use_pos->set_next(prev->next());
1028     prev->set_next(use_pos);
1029   }
1030 
1031   if (prev_hint == nullptr && use_pos->HasHint()) {
1032     current_hint_position_ = use_pos;
1033   }
1034 }
1035 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1036 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1037                                         UseInterval* interval2) {
1038   while (interval1 != nullptr && interval2 != nullptr) {
1039     if (interval1->start() < interval2->start()) {
1040       if (interval1->end() > interval2->start()) {
1041         return true;
1042       }
1043       interval1 = interval1->next();
1044     } else {
1045       if (interval2->end() > interval1->start()) {
1046         return true;
1047       }
1048       interval2 = interval2->next();
1049     }
1050   }
1051   return false;
1052 }
1053 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1054 std::ostream& operator<<(std::ostream& os,
1055                          const PrintableLiveRange& printable_range) {
1056   const LiveRange* range = printable_range.range_;
1057   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1058      << " ";
1059   if (range->TopLevel()->is_phi()) os << "phi ";
1060   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1061 
1062   os << "{" << std::endl;
1063   UseInterval* interval = range->first_interval();
1064   UsePosition* use_pos = range->first_pos();
1065   while (use_pos != nullptr) {
1066     if (use_pos->HasOperand()) {
1067       os << *use_pos->operand() << use_pos->pos() << " ";
1068     }
1069     use_pos = use_pos->next();
1070   }
1071   os << std::endl;
1072 
1073   while (interval != nullptr) {
1074     os << '[' << interval->start() << ", " << interval->end() << ')'
1075        << std::endl;
1076     interval = interval->next();
1077   }
1078   os << "}";
1079   return os;
1080 }
1081 
1082 namespace {
PrintBlockRow(std::ostream & os,const InstructionBlocks & blocks)1083 void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
1084   os << "     ";
1085   for (auto block : blocks) {
1086     LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
1087         block->first_instruction_index());
1088     LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
1089                                    block->last_instruction_index())
1090                                    .NextFullStart();
1091     int length = end_pos.value() - start_pos.value();
1092     constexpr int kMaxPrefixLength = 32;
1093     char buffer[kMaxPrefixLength];
1094     int rpo_number = block->rpo_number().ToInt();
1095     const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
1096     int max_prefix_length = std::min(length, kMaxPrefixLength);
1097     int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
1098                           deferred_marker);
1099     os << buffer;
1100     int remaining = length - std::min(prefix, max_prefix_length) - 1;
1101     for (int i = 0; i < remaining; ++i) os << '-';
1102     os << ']';
1103   }
1104   os << '\n';
1105 }
1106 }  // namespace
1107 
PrintRangeRow(std::ostream & os,const TopLevelLiveRange * toplevel)1108 void LinearScanAllocator::PrintRangeRow(std::ostream& os,
1109                                         const TopLevelLiveRange* toplevel) {
1110   int position = 0;
1111   os << std::setw(3) << toplevel->vreg() << ": ";
1112 
1113   const char* kind_string;
1114   switch (toplevel->spill_type()) {
1115     case TopLevelLiveRange::SpillType::kSpillRange:
1116       kind_string = "ss";
1117       break;
1118     case TopLevelLiveRange::SpillType::kDeferredSpillRange:
1119       kind_string = "sd";
1120       break;
1121     case TopLevelLiveRange::SpillType::kSpillOperand:
1122       kind_string = "so";
1123       break;
1124     default:
1125       kind_string = "s?";
1126   }
1127 
1128   for (const LiveRange* range = toplevel; range != nullptr;
1129        range = range->next()) {
1130     for (UseInterval* interval = range->first_interval(); interval != nullptr;
1131          interval = interval->next()) {
1132       LifetimePosition start = interval->start();
1133       LifetimePosition end = interval->end();
1134       CHECK_GE(start.value(), position);
1135       for (; start.value() > position; position++) {
1136         os << ' ';
1137       }
1138       int length = end.value() - start.value();
1139       constexpr int kMaxPrefixLength = 32;
1140       char buffer[kMaxPrefixLength];
1141       int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
1142       int prefix;
1143       if (range->spilled()) {
1144         prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
1145       } else {
1146         prefix = snprintf(buffer, max_prefix_length, "|%s",
1147                           RegisterName(range->assigned_register()));
1148       }
1149       os << buffer;
1150       position += std::min(prefix, max_prefix_length - 1);
1151       CHECK_GE(end.value(), position);
1152       const char line_style = range->spilled() ? '-' : '=';
1153       for (; end.value() > position; position++) {
1154         os << line_style;
1155       }
1156     }
1157   }
1158   os << '\n';
1159 }
1160 
PrintRangeOverview(std::ostream & os)1161 void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
1162   PrintBlockRow(os, code()->instruction_blocks());
1163   for (auto const toplevel : data()->fixed_live_ranges()) {
1164     if (toplevel == nullptr) continue;
1165     PrintRangeRow(os, toplevel);
1166   }
1167   int rowcount = 0;
1168   for (auto toplevel : data()->live_ranges()) {
1169     if (!CanProcessRange(toplevel)) continue;
1170     if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
1171     PrintRangeRow(os, toplevel);
1172   }
1173 }
1174 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1175 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1176     : live_ranges_(zone),
1177       assigned_slot_(kUnassignedSlot),
1178       byte_width_(ByteWidthForStackSlot(parent->representation())) {
1179   // Spill ranges are created for top level. This is so that, when merging
1180   // decisions are made, we consider the full extent of the virtual register,
1181   // and avoid clobbering it.
1182   UseInterval* result = nullptr;
1183   UseInterval* node = nullptr;
1184   // Copy the intervals for all ranges.
1185   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1186     UseInterval* src = range->first_interval();
1187     while (src != nullptr) {
1188       UseInterval* new_node = zone->New<UseInterval>(src->start(), src->end());
1189       if (result == nullptr) {
1190         result = new_node;
1191       } else {
1192         node->set_next(new_node);
1193       }
1194       node = new_node;
1195       src = src->next();
1196     }
1197   }
1198   use_interval_ = result;
1199   live_ranges().push_back(parent);
1200   end_position_ = node->end();
1201   parent->SetSpillRange(this);
1202 }
1203 
IsIntersectingWith(SpillRange * other) const1204 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1205   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1206       this->End() <= other->use_interval_->start() ||
1207       other->End() <= this->use_interval_->start()) {
1208     return false;
1209   }
1210   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1211 }
1212 
TryMerge(SpillRange * other)1213 bool SpillRange::TryMerge(SpillRange* other) {
1214   if (HasSlot() || other->HasSlot()) return false;
1215   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1216     return false;
1217 
1218   LifetimePosition max = LifetimePosition::MaxPosition();
1219   if (End() < other->End() && other->End() != max) {
1220     end_position_ = other->End();
1221   }
1222   other->end_position_ = max;
1223 
1224   MergeDisjointIntervals(other->use_interval_);
1225   other->use_interval_ = nullptr;
1226 
1227   for (TopLevelLiveRange* range : other->live_ranges()) {
1228     DCHECK(range->GetSpillRange() == other);
1229     range->SetSpillRange(this);
1230   }
1231 
1232   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1233                        other->live_ranges().end());
1234   other->live_ranges().clear();
1235 
1236   return true;
1237 }
1238 
MergeDisjointIntervals(UseInterval * other)1239 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1240   UseInterval* tail = nullptr;
1241   UseInterval* current = use_interval_;
1242   while (other != nullptr) {
1243     // Make sure the 'current' list starts first
1244     if (current == nullptr || current->start() > other->start()) {
1245       std::swap(current, other);
1246     }
1247     // Check disjointness
1248     DCHECK(other == nullptr || current->end() <= other->start());
1249     // Append the 'current' node to the result accumulator and move forward
1250     if (tail == nullptr) {
1251       use_interval_ = current;
1252     } else {
1253       tail->set_next(current);
1254     }
1255     tail = current;
1256     current = current->next();
1257   }
1258   // Other list is empty => we are done
1259 }
1260 
Print() const1261 void SpillRange::Print() const {
1262   StdoutStream os;
1263   os << "{" << std::endl;
1264   for (TopLevelLiveRange* range : live_ranges()) {
1265     os << range->vreg() << " ";
1266   }
1267   os << std::endl;
1268 
1269   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1270     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1271   }
1272   os << "}" << std::endl;
1273 }
1274 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1275 TopTierRegisterAllocationData::PhiMapValue::PhiMapValue(
1276     PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
1277     : phi_(phi),
1278       block_(block),
1279       incoming_operands_(zone),
1280       assigned_register_(kUnassignedRegister) {
1281   incoming_operands_.reserve(phi->operands().size());
1282 }
1283 
AddOperand(InstructionOperand * operand)1284 void TopTierRegisterAllocationData::PhiMapValue::AddOperand(
1285     InstructionOperand* operand) {
1286   incoming_operands_.push_back(operand);
1287 }
1288 
CommitAssignment(const InstructionOperand & assigned)1289 void TopTierRegisterAllocationData::PhiMapValue::CommitAssignment(
1290     const InstructionOperand& assigned) {
1291   for (InstructionOperand* operand : incoming_operands_) {
1292     InstructionOperand::ReplaceWith(operand, &assigned);
1293   }
1294 }
1295 
TopTierRegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,RegisterAllocationFlags flags,TickCounter * tick_counter,const char * debug_name)1296 TopTierRegisterAllocationData::TopTierRegisterAllocationData(
1297     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1298     InstructionSequence* code, RegisterAllocationFlags flags,
1299     TickCounter* tick_counter, const char* debug_name)
1300     : RegisterAllocationData(Type::kTopTier),
1301       allocation_zone_(zone),
1302       frame_(frame),
1303       code_(code),
1304       debug_name_(debug_name),
1305       config_(config),
1306       phi_map_(allocation_zone()),
1307       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1308       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1309       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1310                    allocation_zone()),
1311       fixed_live_ranges_(kNumberOfFixedRangesPerRegister *
1312                              this->config()->num_general_registers(),
1313                          nullptr, allocation_zone()),
1314       fixed_float_live_ranges_(allocation_zone()),
1315       fixed_double_live_ranges_(kNumberOfFixedRangesPerRegister *
1316                                     this->config()->num_double_registers(),
1317                                 nullptr, allocation_zone()),
1318       fixed_simd128_live_ranges_(allocation_zone()),
1319       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1320       delayed_references_(allocation_zone()),
1321       assigned_registers_(nullptr),
1322       assigned_double_registers_(nullptr),
1323       virtual_register_count_(code->VirtualRegisterCount()),
1324       preassigned_slot_ranges_(zone),
1325       spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
1326                    zone),
1327       flags_(flags),
1328       tick_counter_(tick_counter) {
1329   if (!kSimpleFPAliasing) {
1330     fixed_float_live_ranges_.resize(
1331         kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
1332         nullptr);
1333     fixed_simd128_live_ranges_.resize(
1334         kNumberOfFixedRangesPerRegister *
1335             this->config()->num_simd128_registers(),
1336         nullptr);
1337   }
1338 
1339   assigned_registers_ = code_zone()->New<BitVector>(
1340       this->config()->num_general_registers(), code_zone());
1341   assigned_double_registers_ = code_zone()->New<BitVector>(
1342       this->config()->num_double_registers(), code_zone());
1343   fixed_register_use_ = code_zone()->New<BitVector>(
1344       this->config()->num_general_registers(), code_zone());
1345   fixed_fp_register_use_ = code_zone()->New<BitVector>(
1346       this->config()->num_double_registers(), code_zone());
1347 
1348   this->frame()->SetAllocatedRegisters(assigned_registers_);
1349   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1350 }
1351 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1352 MoveOperands* TopTierRegisterAllocationData::AddGapMove(
1353     int index, Instruction::GapPosition position,
1354     const InstructionOperand& from, const InstructionOperand& to) {
1355   Instruction* instr = code()->InstructionAt(index);
1356   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1357   return moves->AddMove(from, to);
1358 }
1359 
RepresentationFor(int virtual_register)1360 MachineRepresentation TopTierRegisterAllocationData::RepresentationFor(
1361     int virtual_register) {
1362   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1363   return code()->GetRepresentation(virtual_register);
1364 }
1365 
GetOrCreateLiveRangeFor(int index)1366 TopLevelLiveRange* TopTierRegisterAllocationData::GetOrCreateLiveRangeFor(
1367     int index) {
1368   if (index >= static_cast<int>(live_ranges().size())) {
1369     live_ranges().resize(index + 1, nullptr);
1370   }
1371   TopLevelLiveRange* result = live_ranges()[index];
1372   if (result == nullptr) {
1373     result = NewLiveRange(index, RepresentationFor(index));
1374     live_ranges()[index] = result;
1375   }
1376   return result;
1377 }
1378 
NewLiveRange(int index,MachineRepresentation rep)1379 TopLevelLiveRange* TopTierRegisterAllocationData::NewLiveRange(
1380     int index, MachineRepresentation rep) {
1381   return allocation_zone()->New<TopLevelLiveRange>(index, rep);
1382 }
1383 
GetNextLiveRangeId()1384 int TopTierRegisterAllocationData::GetNextLiveRangeId() {
1385   int vreg = virtual_register_count_++;
1386   if (vreg >= static_cast<int>(live_ranges().size())) {
1387     live_ranges().resize(vreg + 1, nullptr);
1388   }
1389   return vreg;
1390 }
1391 
NextLiveRange(MachineRepresentation rep)1392 TopLevelLiveRange* TopTierRegisterAllocationData::NextLiveRange(
1393     MachineRepresentation rep) {
1394   int vreg = GetNextLiveRangeId();
1395   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1396   return ret;
1397 }
1398 
1399 TopTierRegisterAllocationData::PhiMapValue*
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1400 TopTierRegisterAllocationData::InitializePhiMap(const InstructionBlock* block,
1401                                                 PhiInstruction* phi) {
1402   TopTierRegisterAllocationData::PhiMapValue* map_value =
1403       allocation_zone()->New<TopTierRegisterAllocationData::PhiMapValue>(
1404           phi, block, allocation_zone());
1405   auto res =
1406       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1407   DCHECK(res.second);
1408   USE(res);
1409   return map_value;
1410 }
1411 
1412 TopTierRegisterAllocationData::PhiMapValue*
GetPhiMapValueFor(int virtual_register)1413 TopTierRegisterAllocationData::GetPhiMapValueFor(int virtual_register) {
1414   auto it = phi_map_.find(virtual_register);
1415   DCHECK(it != phi_map_.end());
1416   return it->second;
1417 }
1418 
1419 TopTierRegisterAllocationData::PhiMapValue*
GetPhiMapValueFor(TopLevelLiveRange * top_range)1420 TopTierRegisterAllocationData::GetPhiMapValueFor(TopLevelLiveRange* top_range) {
1421   return GetPhiMapValueFor(top_range->vreg());
1422 }
1423 
ExistsUseWithoutDefinition()1424 bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
1425   bool found = false;
1426   BitVector::Iterator iterator(live_in_sets()[0]);
1427   while (!iterator.Done()) {
1428     found = true;
1429     int operand_index = iterator.Current();
1430     PrintF("Register allocator error: live v%d reached first block.\n",
1431            operand_index);
1432     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1433     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1434     if (debug_name() == nullptr) {
1435       PrintF("\n");
1436     } else {
1437       PrintF("  (function: %s)\n", debug_name());
1438     }
1439     iterator.Advance();
1440   }
1441   return found;
1442 }
1443 
1444 // If a range is defined in a deferred block, we can expect all the range
1445 // to only cover positions in deferred blocks. Otherwise, a block on the
1446 // hot path would be dominated by a deferred block, meaning it is unreachable
1447 // without passing through the deferred block, which is contradictory.
1448 // In particular, when such a range contributes a result back on the hot
1449 // path, it will be as one of the inputs of a phi. In that case, the value
1450 // will be transferred via a move in the Gap::END's of the last instruction
1451 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1452 bool TopTierRegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1453   const size_t live_ranges_size = live_ranges().size();
1454   for (const TopLevelLiveRange* range : live_ranges()) {
1455     CHECK_EQ(live_ranges_size,
1456              live_ranges().size());  // TODO(neis): crbug.com/831822
1457     if (range == nullptr || range->IsEmpty() ||
1458         !code()
1459              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1460              ->IsDeferred()) {
1461       continue;
1462     }
1463     for (const UseInterval* i = range->first_interval(); i != nullptr;
1464          i = i->next()) {
1465       int first = i->FirstGapIndex();
1466       int last = i->LastGapIndex();
1467       for (int instr = first; instr <= last;) {
1468         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1469         if (!block->IsDeferred()) return false;
1470         instr = block->last_instruction_index() + 1;
1471       }
1472     }
1473   }
1474   return true;
1475 }
1476 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range,SpillMode spill_mode)1477 SpillRange* TopTierRegisterAllocationData::AssignSpillRangeToLiveRange(
1478     TopLevelLiveRange* range, SpillMode spill_mode) {
1479   using SpillType = TopLevelLiveRange::SpillType;
1480   DCHECK(!range->HasSpillOperand());
1481 
1482   SpillRange* spill_range = range->GetAllocatedSpillRange();
1483   if (spill_range == nullptr) {
1484     spill_range = allocation_zone()->New<SpillRange>(range, allocation_zone());
1485   }
1486   if (spill_mode == SpillMode::kSpillDeferred &&
1487       (range->spill_type() != SpillType::kSpillRange)) {
1488     range->set_spill_type(SpillType::kDeferredSpillRange);
1489   } else {
1490     range->set_spill_type(SpillType::kSpillRange);
1491   }
1492 
1493   spill_ranges()[range->vreg()] = spill_range;
1494   return spill_range;
1495 }
1496 
MarkFixedUse(MachineRepresentation rep,int index)1497 void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
1498                                                  int index) {
1499   switch (rep) {
1500     case MachineRepresentation::kFloat32:
1501     case MachineRepresentation::kSimd128:
1502       if (kSimpleFPAliasing) {
1503         fixed_fp_register_use_->Add(index);
1504       } else {
1505         int alias_base_index = -1;
1506         int aliases = config()->GetAliases(
1507             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1508         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1509         while (aliases--) {
1510           int aliased_reg = alias_base_index + aliases;
1511           fixed_fp_register_use_->Add(aliased_reg);
1512         }
1513       }
1514       break;
1515     case MachineRepresentation::kFloat64:
1516       fixed_fp_register_use_->Add(index);
1517       break;
1518     default:
1519       DCHECK(!IsFloatingPoint(rep));
1520       fixed_register_use_->Add(index);
1521       break;
1522   }
1523 }
1524 
HasFixedUse(MachineRepresentation rep,int index)1525 bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep,
1526                                                 int index) {
1527   switch (rep) {
1528     case MachineRepresentation::kFloat32:
1529     case MachineRepresentation::kSimd128:
1530       if (kSimpleFPAliasing) {
1531         return fixed_fp_register_use_->Contains(index);
1532       } else {
1533         int alias_base_index = -1;
1534         int aliases = config()->GetAliases(
1535             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1536         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1537         bool result = false;
1538         while (aliases-- && !result) {
1539           int aliased_reg = alias_base_index + aliases;
1540           result |= fixed_fp_register_use_->Contains(aliased_reg);
1541         }
1542         return result;
1543       }
1544       break;
1545     case MachineRepresentation::kFloat64:
1546       return fixed_fp_register_use_->Contains(index);
1547       break;
1548     default:
1549       DCHECK(!IsFloatingPoint(rep));
1550       return fixed_register_use_->Contains(index);
1551       break;
1552   }
1553 }
1554 
MarkAllocated(MachineRepresentation rep,int index)1555 void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1556                                                   int index) {
1557   switch (rep) {
1558     case MachineRepresentation::kFloat32:
1559     case MachineRepresentation::kSimd128:
1560       if (kSimpleFPAliasing) {
1561         assigned_double_registers_->Add(index);
1562       } else {
1563         int alias_base_index = -1;
1564         int aliases = config()->GetAliases(
1565             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1566         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1567         while (aliases--) {
1568           int aliased_reg = alias_base_index + aliases;
1569           assigned_double_registers_->Add(aliased_reg);
1570         }
1571       }
1572       break;
1573     case MachineRepresentation::kFloat64:
1574       assigned_double_registers_->Add(index);
1575       break;
1576     default:
1577       DCHECK(!IsFloatingPoint(rep));
1578       assigned_registers_->Add(index);
1579       break;
1580   }
1581 }
1582 
IsBlockBoundary(LifetimePosition pos) const1583 bool TopTierRegisterAllocationData::IsBlockBoundary(
1584     LifetimePosition pos) const {
1585   return pos.IsFullStart() &&
1586          (static_cast<size_t>(pos.ToInstructionIndex()) ==
1587               code()->instructions().size() ||
1588           code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1589               pos.ToInstructionIndex());
1590 }
1591 
ConstraintBuilder(TopTierRegisterAllocationData * data)1592 ConstraintBuilder::ConstraintBuilder(TopTierRegisterAllocationData* data)
1593     : data_(data) {}
1594 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged,bool is_input)1595 InstructionOperand* ConstraintBuilder::AllocateFixed(
1596     UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) {
1597   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1598   DCHECK(operand->HasFixedPolicy());
1599   InstructionOperand allocated;
1600   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1601   int virtual_register = operand->virtual_register();
1602   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1603     rep = data()->RepresentationFor(virtual_register);
1604   }
1605   if (operand->HasFixedSlotPolicy()) {
1606     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1607                                  operand->fixed_slot_index());
1608   } else if (operand->HasFixedRegisterPolicy()) {
1609     DCHECK(!IsFloatingPoint(rep));
1610     DCHECK(data()->config()->IsAllocatableGeneralCode(
1611         operand->fixed_register_index()));
1612     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1613                                  operand->fixed_register_index());
1614   } else if (operand->HasFixedFPRegisterPolicy()) {
1615     DCHECK(IsFloatingPoint(rep));
1616     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1617     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1618                                  operand->fixed_register_index());
1619   } else {
1620     UNREACHABLE();
1621   }
1622   if (is_input && allocated.IsAnyRegister()) {
1623     data()->MarkFixedUse(rep, operand->fixed_register_index());
1624   }
1625   InstructionOperand::ReplaceWith(operand, &allocated);
1626   if (is_tagged) {
1627     TRACE("Fixed reg is tagged at %d\n", pos);
1628     Instruction* instr = code()->InstructionAt(pos);
1629     if (instr->HasReferenceMap()) {
1630       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1631     }
1632   }
1633   return operand;
1634 }
1635 
MeetRegisterConstraints()1636 void ConstraintBuilder::MeetRegisterConstraints() {
1637   for (InstructionBlock* block : code()->instruction_blocks()) {
1638     data_->tick_counter()->TickAndMaybeEnterSafepoint();
1639     MeetRegisterConstraints(block);
1640   }
1641 }
1642 
MeetRegisterConstraints(const InstructionBlock * block)1643 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1644   int start = block->first_instruction_index();
1645   int end = block->last_instruction_index();
1646   DCHECK_NE(-1, start);
1647   for (int i = start; i <= end; ++i) {
1648     MeetConstraintsBefore(i);
1649     if (i != end) MeetConstraintsAfter(i);
1650   }
1651   // Meet register constraints for the instruction in the end.
1652   MeetRegisterConstraintsForLastInstructionInBlock(block);
1653 }
1654 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1655 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1656     const InstructionBlock* block) {
1657   int end = block->last_instruction_index();
1658   Instruction* last_instruction = code()->InstructionAt(end);
1659   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1660     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1661     DCHECK(!output_operand->IsConstant());
1662     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1663     int output_vreg = output->virtual_register();
1664     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1665     bool assigned = false;
1666     if (output->HasFixedPolicy()) {
1667       AllocateFixed(output, -1, false, false);
1668       // This value is produced on the stack, we never need to spill it.
1669       if (output->IsStackSlot()) {
1670         DCHECK(LocationOperand::cast(output)->index() <
1671                data()->frame()->GetSpillSlotCount());
1672         range->SetSpillOperand(LocationOperand::cast(output));
1673         range->SetSpillStartIndex(end);
1674         assigned = true;
1675       }
1676 
1677       for (const RpoNumber& succ : block->successors()) {
1678         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1679         DCHECK_EQ(1, successor->PredecessorCount());
1680         int gap_index = successor->first_instruction_index();
1681         // Create an unconstrained operand for the same virtual register
1682         // and insert a gap move from the fixed output to the operand.
1683         UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1684                                        output_vreg);
1685         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1686       }
1687     }
1688 
1689     if (!assigned) {
1690       for (const RpoNumber& succ : block->successors()) {
1691         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1692         DCHECK_EQ(1, successor->PredecessorCount());
1693         int gap_index = successor->first_instruction_index();
1694         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1695         range->SetSpillStartIndex(gap_index);
1696       }
1697     }
1698   }
1699 }
1700 
MeetConstraintsAfter(int instr_index)1701 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1702   Instruction* first = code()->InstructionAt(instr_index);
1703   // Handle fixed temporaries.
1704   for (size_t i = 0; i < first->TempCount(); i++) {
1705     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1706     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false);
1707   }
1708   // Handle constant/fixed output operands.
1709   for (size_t i = 0; i < first->OutputCount(); i++) {
1710     InstructionOperand* output = first->OutputAt(i);
1711     if (output->IsConstant()) {
1712       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1713       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1714       range->SetSpillStartIndex(instr_index + 1);
1715       range->SetSpillOperand(output);
1716       continue;
1717     }
1718     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1719     TopLevelLiveRange* range =
1720         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1721     bool assigned = false;
1722     if (first_output->HasFixedPolicy()) {
1723       int output_vreg = first_output->virtual_register();
1724       UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1725                                      output_vreg);
1726       bool is_tagged = code()->IsReference(output_vreg);
1727       if (first_output->HasSecondaryStorage()) {
1728         range->MarkHasPreassignedSlot();
1729         data()->preassigned_slot_ranges().push_back(
1730             std::make_pair(range, first_output->GetSecondaryStorage()));
1731       }
1732       AllocateFixed(first_output, instr_index, is_tagged, false);
1733 
1734       // This value is produced on the stack, we never need to spill it.
1735       if (first_output->IsStackSlot()) {
1736         DCHECK(LocationOperand::cast(first_output)->index() <
1737                data()->frame()->GetTotalFrameSlotCount());
1738         range->SetSpillOperand(LocationOperand::cast(first_output));
1739         range->SetSpillStartIndex(instr_index + 1);
1740         assigned = true;
1741       }
1742       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1743                          output_copy);
1744     }
1745     // Make sure we add a gap move for spilling (if we have not done
1746     // so already).
1747     if (!assigned) {
1748       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1749                                  first_output);
1750       range->SetSpillStartIndex(instr_index + 1);
1751     }
1752   }
1753 }
1754 
MeetConstraintsBefore(int instr_index)1755 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1756   Instruction* second = code()->InstructionAt(instr_index);
1757   // Handle fixed input operands of second instruction.
1758   for (size_t i = 0; i < second->InputCount(); i++) {
1759     InstructionOperand* input = second->InputAt(i);
1760     if (input->IsImmediate()) {
1761       continue;  // Ignore immediates.
1762     }
1763     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1764     if (cur_input->HasFixedPolicy()) {
1765       int input_vreg = cur_input->virtual_register();
1766       UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1767                                     input_vreg);
1768       bool is_tagged = code()->IsReference(input_vreg);
1769       AllocateFixed(cur_input, instr_index, is_tagged, true);
1770       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1771     }
1772   }
1773   // Handle "output same as input" for second instruction.
1774   for (size_t i = 0; i < second->OutputCount(); i++) {
1775     InstructionOperand* output = second->OutputAt(i);
1776     if (!output->IsUnallocated()) continue;
1777     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1778     if (!second_output->HasSameAsInputPolicy()) continue;
1779     DCHECK_EQ(0, i);  // Only valid for first output.
1780     UnallocatedOperand* cur_input =
1781         UnallocatedOperand::cast(second->InputAt(0));
1782     int output_vreg = second_output->virtual_register();
1783     int input_vreg = cur_input->virtual_register();
1784     UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1785                                   input_vreg);
1786     *cur_input =
1787         UnallocatedOperand(*cur_input, second_output->virtual_register());
1788     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1789                                                 input_copy, *cur_input);
1790     DCHECK_NOT_NULL(gap_move);
1791     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1792       if (second->HasReferenceMap()) {
1793         TopTierRegisterAllocationData::DelayedReference delayed_reference = {
1794             second->reference_map(), &gap_move->source()};
1795         data()->delayed_references().push_back(delayed_reference);
1796       }
1797     }
1798   }
1799 }
1800 
ResolvePhis()1801 void ConstraintBuilder::ResolvePhis() {
1802   // Process the blocks in reverse order.
1803   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1804     data_->tick_counter()->TickAndMaybeEnterSafepoint();
1805     ResolvePhis(block);
1806   }
1807 }
1808 
ResolvePhis(const InstructionBlock * block)1809 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1810   for (PhiInstruction* phi : block->phis()) {
1811     int phi_vreg = phi->virtual_register();
1812     TopTierRegisterAllocationData::PhiMapValue* map_value =
1813         data()->InitializePhiMap(block, phi);
1814     InstructionOperand& output = phi->output();
1815     // Map the destination operands, so the commitment phase can find them.
1816     for (size_t i = 0; i < phi->operands().size(); ++i) {
1817       InstructionBlock* cur_block =
1818           code()->InstructionBlockAt(block->predecessors()[i]);
1819       UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
1820                                phi->operands()[i]);
1821       MoveOperands* move = data()->AddGapMove(
1822           cur_block->last_instruction_index(), Instruction::END, input, output);
1823       map_value->AddOperand(&move->destination());
1824       DCHECK(!code()
1825                   ->InstructionAt(cur_block->last_instruction_index())
1826                   ->HasReferenceMap());
1827     }
1828     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1829     int gap_index = block->first_instruction_index();
1830     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1831     live_range->SetSpillStartIndex(gap_index);
1832     // We use the phi-ness of some nodes in some later heuristics.
1833     live_range->set_is_phi(true);
1834     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1835   }
1836 }
1837 
LiveRangeBuilder(TopTierRegisterAllocationData * data,Zone * local_zone)1838 LiveRangeBuilder::LiveRangeBuilder(TopTierRegisterAllocationData* data,
1839                                    Zone* local_zone)
1840     : data_(data), phi_hints_(local_zone) {}
1841 
ComputeLiveOut(const InstructionBlock * block,TopTierRegisterAllocationData * data)1842 BitVector* LiveRangeBuilder::ComputeLiveOut(
1843     const InstructionBlock* block, TopTierRegisterAllocationData* data) {
1844   size_t block_index = block->rpo_number().ToSize();
1845   BitVector* live_out = data->live_out_sets()[block_index];
1846   if (live_out == nullptr) {
1847     // Compute live out for the given block, except not including backward
1848     // successor edges.
1849     Zone* zone = data->allocation_zone();
1850     const InstructionSequence* code = data->code();
1851 
1852     live_out = zone->New<BitVector>(code->VirtualRegisterCount(), zone);
1853 
1854     // Process all successor blocks.
1855     for (const RpoNumber& succ : block->successors()) {
1856       // Add values live on entry to the successor.
1857       if (succ <= block->rpo_number()) continue;
1858       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1859       if (live_in != nullptr) live_out->Union(*live_in);
1860 
1861       // All phi input operands corresponding to this successor edge are live
1862       // out from this block.
1863       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1864       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1865       DCHECK(index < successor->PredecessorCount());
1866       for (PhiInstruction* phi : successor->phis()) {
1867         live_out->Add(phi->operands()[index]);
1868       }
1869     }
1870     data->live_out_sets()[block_index] = live_out;
1871   }
1872   return live_out;
1873 }
1874 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1875 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1876                                            BitVector* live_out) {
1877   // Add an interval that includes the entire block to the live range for
1878   // each live_out value.
1879   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1880       block->first_instruction_index());
1881   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1882                              block->last_instruction_index())
1883                              .NextStart();
1884   BitVector::Iterator iterator(live_out);
1885   while (!iterator.Done()) {
1886     int operand_index = iterator.Current();
1887     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1888     range->AddUseInterval(start, end, allocation_zone(),
1889                           data()->is_trace_alloc());
1890     iterator.Advance();
1891   }
1892 }
1893 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1894 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1895   int result = -index - 1;
1896   switch (rep) {
1897     case MachineRepresentation::kSimd128:
1898       result -=
1899           kNumberOfFixedRangesPerRegister * config()->num_float_registers();
1900       V8_FALLTHROUGH;
1901     case MachineRepresentation::kFloat32:
1902       result -=
1903           kNumberOfFixedRangesPerRegister * config()->num_double_registers();
1904       V8_FALLTHROUGH;
1905     case MachineRepresentation::kFloat64:
1906       result -=
1907           kNumberOfFixedRangesPerRegister * config()->num_general_registers();
1908       break;
1909     default:
1910       UNREACHABLE();
1911   }
1912   return result;
1913 }
1914 
FixedLiveRangeFor(int index,SpillMode spill_mode)1915 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index,
1916                                                        SpillMode spill_mode) {
1917   int offset = spill_mode == SpillMode::kSpillAtDefinition
1918                    ? 0
1919                    : config()->num_general_registers();
1920   DCHECK(index < config()->num_general_registers());
1921   TopLevelLiveRange* result = data()->fixed_live_ranges()[offset + index];
1922   if (result == nullptr) {
1923     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1924     result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep);
1925     DCHECK(result->IsFixed());
1926     result->set_assigned_register(index);
1927     data()->MarkAllocated(rep, index);
1928     if (spill_mode == SpillMode::kSpillDeferred) {
1929       result->set_deferred_fixed();
1930     }
1931     data()->fixed_live_ranges()[offset + index] = result;
1932   }
1933   return result;
1934 }
1935 
FixedFPLiveRangeFor(int index,MachineRepresentation rep,SpillMode spill_mode)1936 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1937     int index, MachineRepresentation rep, SpillMode spill_mode) {
1938   int num_regs = config()->num_double_registers();
1939   ZoneVector<TopLevelLiveRange*>* live_ranges =
1940       &data()->fixed_double_live_ranges();
1941   if (!kSimpleFPAliasing) {
1942     switch (rep) {
1943       case MachineRepresentation::kFloat32:
1944         num_regs = config()->num_float_registers();
1945         live_ranges = &data()->fixed_float_live_ranges();
1946         break;
1947       case MachineRepresentation::kSimd128:
1948         num_regs = config()->num_simd128_registers();
1949         live_ranges = &data()->fixed_simd128_live_ranges();
1950         break;
1951       default:
1952         break;
1953     }
1954   }
1955 
1956   int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
1957 
1958   DCHECK(index < num_regs);
1959   USE(num_regs);
1960   TopLevelLiveRange* result = (*live_ranges)[offset + index];
1961   if (result == nullptr) {
1962     result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep);
1963     DCHECK(result->IsFixed());
1964     result->set_assigned_register(index);
1965     data()->MarkAllocated(rep, index);
1966     if (spill_mode == SpillMode::kSpillDeferred) {
1967       result->set_deferred_fixed();
1968     }
1969     (*live_ranges)[offset + index] = result;
1970   }
1971   return result;
1972 }
1973 
LiveRangeFor(InstructionOperand * operand,SpillMode spill_mode)1974 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
1975                                                   SpillMode spill_mode) {
1976   if (operand->IsUnallocated()) {
1977     return data()->GetOrCreateLiveRangeFor(
1978         UnallocatedOperand::cast(operand)->virtual_register());
1979   } else if (operand->IsConstant()) {
1980     return data()->GetOrCreateLiveRangeFor(
1981         ConstantOperand::cast(operand)->virtual_register());
1982   } else if (operand->IsRegister()) {
1983     return FixedLiveRangeFor(
1984         LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
1985   } else if (operand->IsFPRegister()) {
1986     LocationOperand* op = LocationOperand::cast(operand);
1987     return FixedFPLiveRangeFor(op->register_code(), op->representation(),
1988                                spill_mode);
1989   } else {
1990     return nullptr;
1991   }
1992 }
1993 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1994 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1995                                               InstructionOperand* operand,
1996                                               void* hint,
1997                                               UsePositionHintType hint_type) {
1998   return allocation_zone()->New<UsePosition>(pos, operand, hint, hint_type);
1999 }
2000 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2001 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
2002                                       InstructionOperand* operand, void* hint,
2003                                       UsePositionHintType hint_type,
2004                                       SpillMode spill_mode) {
2005   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2006   if (range == nullptr) return nullptr;
2007 
2008   if (range->IsEmpty() || range->Start() > position) {
2009     // Can happen if there is a definition without use.
2010     range->AddUseInterval(position, position.NextStart(), allocation_zone(),
2011                           data()->is_trace_alloc());
2012     range->AddUsePosition(NewUsePosition(position.NextStart()),
2013                           data()->is_trace_alloc());
2014   } else {
2015     range->ShortenTo(position, data()->is_trace_alloc());
2016   }
2017   if (!operand->IsUnallocated()) return nullptr;
2018   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2019   UsePosition* use_pos =
2020       NewUsePosition(position, unalloc_operand, hint, hint_type);
2021   range->AddUsePosition(use_pos, data()->is_trace_alloc());
2022   return use_pos;
2023 }
2024 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2025 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2026                                    LifetimePosition position,
2027                                    InstructionOperand* operand, void* hint,
2028                                    UsePositionHintType hint_type,
2029                                    SpillMode spill_mode) {
2030   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2031   if (range == nullptr) return nullptr;
2032   UsePosition* use_pos = nullptr;
2033   if (operand->IsUnallocated()) {
2034     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2035     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2036     range->AddUsePosition(use_pos, data()->is_trace_alloc());
2037   }
2038   range->AddUseInterval(block_start, position, allocation_zone(),
2039                         data()->is_trace_alloc());
2040   return use_pos;
2041 }
2042 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2043 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2044                                            BitVector* live) {
2045   int block_start = block->first_instruction_index();
2046   LifetimePosition block_start_position =
2047       LifetimePosition::GapFromInstructionIndex(block_start);
2048   bool fixed_float_live_ranges = false;
2049   bool fixed_simd128_live_ranges = false;
2050   if (!kSimpleFPAliasing) {
2051     int mask = data()->code()->representation_mask();
2052     fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
2053     fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2054   }
2055   SpillMode spill_mode = SpillModeForBlock(block);
2056 
2057   for (int index = block->last_instruction_index(); index >= block_start;
2058        index--) {
2059     LifetimePosition curr_position =
2060         LifetimePosition::InstructionFromInstructionIndex(index);
2061     Instruction* instr = code()->InstructionAt(index);
2062     DCHECK_NOT_NULL(instr);
2063     DCHECK(curr_position.IsInstructionPosition());
2064     // Process output, inputs, and temps of this instruction.
2065     for (size_t i = 0; i < instr->OutputCount(); i++) {
2066       InstructionOperand* output = instr->OutputAt(i);
2067       if (output->IsUnallocated()) {
2068         // Unsupported.
2069         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2070         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2071         live->Remove(out_vreg);
2072       } else if (output->IsConstant()) {
2073         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2074         live->Remove(out_vreg);
2075       }
2076       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2077           output->IsRegister() &&
2078           AllocatedOperand::cast(output)->GetRegister() ==
2079               v8::internal::kReturnRegister0) {
2080         // The register defined here is blocked from gap start - it is the
2081         // exception value.
2082         // TODO(mtrofin): should we explore an explicit opcode for
2083         // the first instruction in the handler?
2084         Define(LifetimePosition::GapFromInstructionIndex(index), output,
2085                spill_mode);
2086       } else {
2087         Define(curr_position, output, spill_mode);
2088       }
2089     }
2090 
2091     if (instr->ClobbersRegisters()) {
2092       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2093         // Create a UseInterval at this instruction for all fixed registers,
2094         // (including the instruction outputs). Adding another UseInterval here
2095         // is OK because AddUseInterval will just merge it with the existing
2096         // one at the end of the range.
2097         int code = config()->GetAllocatableGeneralCode(i);
2098         TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
2099         range->AddUseInterval(curr_position, curr_position.End(),
2100                               allocation_zone(), data()->is_trace_alloc());
2101       }
2102     }
2103 
2104     if (instr->ClobbersDoubleRegisters()) {
2105       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2106         // Add a UseInterval for all DoubleRegisters. See comment above for
2107         // general registers.
2108         int code = config()->GetAllocatableDoubleCode(i);
2109         TopLevelLiveRange* range = FixedFPLiveRangeFor(
2110             code, MachineRepresentation::kFloat64, spill_mode);
2111         range->AddUseInterval(curr_position, curr_position.End(),
2112                               allocation_zone(), data()->is_trace_alloc());
2113       }
2114       // Clobber fixed float registers on archs with non-simple aliasing.
2115       if (!kSimpleFPAliasing) {
2116         if (fixed_float_live_ranges) {
2117           for (int i = 0; i < config()->num_allocatable_float_registers();
2118                ++i) {
2119             // Add a UseInterval for all FloatRegisters. See comment above for
2120             // general registers.
2121             int code = config()->GetAllocatableFloatCode(i);
2122             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2123                 code, MachineRepresentation::kFloat32, spill_mode);
2124             range->AddUseInterval(curr_position, curr_position.End(),
2125                                   allocation_zone(), data()->is_trace_alloc());
2126           }
2127         }
2128         if (fixed_simd128_live_ranges) {
2129           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2130                ++i) {
2131             int code = config()->GetAllocatableSimd128Code(i);
2132             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2133                 code, MachineRepresentation::kSimd128, spill_mode);
2134             range->AddUseInterval(curr_position, curr_position.End(),
2135                                   allocation_zone(), data()->is_trace_alloc());
2136           }
2137         }
2138       }
2139     }
2140 
2141     for (size_t i = 0; i < instr->InputCount(); i++) {
2142       InstructionOperand* input = instr->InputAt(i);
2143       if (input->IsImmediate()) {
2144         continue;  // Ignore immediates.
2145       }
2146       LifetimePosition use_pos;
2147       if (input->IsUnallocated() &&
2148           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2149         use_pos = curr_position;
2150       } else {
2151         use_pos = curr_position.End();
2152       }
2153 
2154       if (input->IsUnallocated()) {
2155         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2156         int vreg = unalloc->virtual_register();
2157         live->Add(vreg);
2158         if (unalloc->HasSlotPolicy()) {
2159           data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
2160               block->IsDeferred()
2161                   ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2162                   : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
2163         }
2164       }
2165       Use(block_start_position, use_pos, input, spill_mode);
2166     }
2167 
2168     for (size_t i = 0; i < instr->TempCount(); i++) {
2169       InstructionOperand* temp = instr->TempAt(i);
2170       // Unsupported.
2171       DCHECK_IMPLIES(temp->IsUnallocated(),
2172                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2173       if (instr->ClobbersTemps()) {
2174         if (temp->IsRegister()) continue;
2175         if (temp->IsUnallocated()) {
2176           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2177           if (temp_unalloc->HasFixedPolicy()) {
2178             continue;
2179           }
2180         }
2181       }
2182       Use(block_start_position, curr_position.End(), temp, spill_mode);
2183       Define(curr_position, temp, spill_mode);
2184     }
2185 
2186     // Process the moves of the instruction's gaps, making their sources live.
2187     const Instruction::GapPosition kPositions[] = {Instruction::END,
2188                                                    Instruction::START};
2189     curr_position = curr_position.PrevStart();
2190     DCHECK(curr_position.IsGapPosition());
2191     for (const Instruction::GapPosition& position : kPositions) {
2192       ParallelMove* move = instr->GetParallelMove(position);
2193       if (move == nullptr) continue;
2194       if (position == Instruction::END) {
2195         curr_position = curr_position.End();
2196       } else {
2197         curr_position = curr_position.Start();
2198       }
2199       for (MoveOperands* cur : *move) {
2200         InstructionOperand& from = cur->source();
2201         InstructionOperand& to = cur->destination();
2202         void* hint = &to;
2203         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2204         UsePosition* to_use = nullptr;
2205         int phi_vreg = -1;
2206         if (to.IsUnallocated()) {
2207           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2208           TopLevelLiveRange* to_range =
2209               data()->GetOrCreateLiveRangeFor(to_vreg);
2210           if (to_range->is_phi()) {
2211             phi_vreg = to_vreg;
2212             if (to_range->is_non_loop_phi()) {
2213               hint = to_range->current_hint_position();
2214               hint_type = hint == nullptr ? UsePositionHintType::kNone
2215                                           : UsePositionHintType::kUsePos;
2216             } else {
2217               hint_type = UsePositionHintType::kPhi;
2218               hint = data()->GetPhiMapValueFor(to_vreg);
2219             }
2220           } else {
2221             if (live->Contains(to_vreg)) {
2222               to_use =
2223                   Define(curr_position, &to, &from,
2224                          UsePosition::HintTypeForOperand(from), spill_mode);
2225               live->Remove(to_vreg);
2226             } else {
2227               cur->Eliminate();
2228               continue;
2229             }
2230           }
2231         } else {
2232           Define(curr_position, &to, spill_mode);
2233         }
2234         UsePosition* from_use = Use(block_start_position, curr_position, &from,
2235                                     hint, hint_type, spill_mode);
2236         // Mark range live.
2237         if (from.IsUnallocated()) {
2238           live->Add(UnallocatedOperand::cast(from).virtual_register());
2239         }
2240         // When the value is moved to a register to meet input constraints,
2241         // we should consider this value use similar as a register use in the
2242         // backward spilling heuristics, even though this value use is not
2243         // register benefical at the AllocateBlockedReg stage.
2244         if (to.IsAnyRegister() ||
2245             (to.IsUnallocated() &&
2246              UnallocatedOperand::cast(&to)->HasRegisterPolicy())) {
2247           from_use->set_spill_detrimental();
2248         }
2249         // Resolve use position hints just created.
2250         if (to_use != nullptr && from_use != nullptr) {
2251           to_use->ResolveHint(from_use);
2252           from_use->ResolveHint(to_use);
2253         }
2254         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2255         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2256         // Potentially resolve phi hint.
2257         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2258       }
2259     }
2260   }
2261 }
2262 
ProcessPhis(const InstructionBlock * block,BitVector * live)2263 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2264                                    BitVector* live) {
2265   for (PhiInstruction* phi : block->phis()) {
2266     // The live range interval already ends at the first instruction of the
2267     // block.
2268     int phi_vreg = phi->virtual_register();
2269     live->Remove(phi_vreg);
2270     // Select a hint from a predecessor block that precedes this block in the
2271     // rpo order. In order of priority:
2272     // - Avoid hints from deferred blocks.
2273     // - Prefer hints from allocated (or explicit) operands.
2274     // - Prefer hints from empty blocks (containing just parallel moves and a
2275     //   jump). In these cases, if we can elide the moves, the jump threader
2276     //   is likely to be able to elide the jump.
2277     // The enforcement of hinting in rpo order is required because hint
2278     // resolution that happens later in the compiler pipeline visits
2279     // instructions in reverse rpo order, relying on the fact that phis are
2280     // encountered before their hints.
2281     InstructionOperand* hint = nullptr;
2282     int hint_preference = 0;
2283 
2284     // The cost of hinting increases with the number of predecessors. At the
2285     // same time, the typical benefit decreases, since this hinting only
2286     // optimises the execution path through one predecessor. A limit of 2 is
2287     // sufficient to hit the common if/else pattern.
2288     int predecessor_limit = 2;
2289 
2290     for (RpoNumber predecessor : block->predecessors()) {
2291       const InstructionBlock* predecessor_block =
2292           code()->InstructionBlockAt(predecessor);
2293       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2294 
2295       // Only take hints from earlier rpo numbers.
2296       if (predecessor >= block->rpo_number()) continue;
2297 
2298       // Look up the predecessor instruction.
2299       const Instruction* predecessor_instr =
2300           GetLastInstruction(code(), predecessor_block);
2301       InstructionOperand* predecessor_hint = nullptr;
2302       // Phis are assigned in the END position of the last instruction in each
2303       // predecessor block.
2304       for (MoveOperands* move :
2305            *predecessor_instr->GetParallelMove(Instruction::END)) {
2306         InstructionOperand& to = move->destination();
2307         if (to.IsUnallocated() &&
2308             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2309           predecessor_hint = &move->source();
2310           break;
2311         }
2312       }
2313       DCHECK_NOT_NULL(predecessor_hint);
2314 
2315       // For each predecessor, generate a score according to the priorities
2316       // described above, and pick the best one. Flags in higher-order bits have
2317       // a higher priority than those in lower-order bits.
2318       int predecessor_hint_preference = 0;
2319       const int kNotDeferredBlockPreference = (1 << 2);
2320       const int kMoveIsAllocatedPreference = (1 << 1);
2321       const int kBlockIsEmptyPreference = (1 << 0);
2322 
2323       // - Avoid hints from deferred blocks.
2324       if (!predecessor_block->IsDeferred()) {
2325         predecessor_hint_preference |= kNotDeferredBlockPreference;
2326       }
2327 
2328       // - Prefer hints from allocated operands.
2329       //
2330       // Already-allocated operands are typically assigned using the parallel
2331       // moves on the last instruction. For example:
2332       //
2333       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2334       //      ArchJmp
2335       //    ...
2336       //    phi: v100 = v101 v102
2337       //
2338       // We have already found the END move, so look for a matching START move
2339       // from an allocated operand.
2340       //
2341       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2342       // because the live ranges are still being built when this function is
2343       // called.
2344       // TODO(v8): Find a way to separate hinting from live range analysis in
2345       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2346       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2347       if (moves != nullptr) {
2348         for (MoveOperands* move : *moves) {
2349           InstructionOperand& to = move->destination();
2350           if (predecessor_hint->Equals(to)) {
2351             if (move->source().IsAllocated()) {
2352               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2353             }
2354             break;
2355           }
2356         }
2357       }
2358 
2359       // - Prefer hints from empty blocks.
2360       if (predecessor_block->last_instruction_index() ==
2361           predecessor_block->first_instruction_index()) {
2362         predecessor_hint_preference |= kBlockIsEmptyPreference;
2363       }
2364 
2365       if ((hint == nullptr) ||
2366           (predecessor_hint_preference > hint_preference)) {
2367         // Take the hint from this predecessor.
2368         hint = predecessor_hint;
2369         hint_preference = predecessor_hint_preference;
2370       }
2371 
2372       if (--predecessor_limit <= 0) break;
2373     }
2374     DCHECK_NOT_NULL(hint);
2375 
2376     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2377         block->first_instruction_index());
2378     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2379                                   UsePosition::HintTypeForOperand(*hint),
2380                                   SpillModeForBlock(block));
2381     MapPhiHint(hint, use_pos);
2382   }
2383 }
2384 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2385 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2386                                          BitVector* live) {
2387   DCHECK(block->IsLoopHeader());
2388   // Add a live range stretching from the first loop instruction to the last
2389   // for each value live on entry to the header.
2390   BitVector::Iterator iterator(live);
2391   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2392       block->first_instruction_index());
2393   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2394                              code()->LastLoopInstructionIndex(block))
2395                              .NextFullStart();
2396   while (!iterator.Done()) {
2397     int operand_index = iterator.Current();
2398     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2399     range->EnsureInterval(start, end, allocation_zone(),
2400                           data()->is_trace_alloc());
2401     iterator.Advance();
2402   }
2403   // Insert all values into the live in sets of all blocks in the loop.
2404   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2405        ++i) {
2406     live_in_sets()[i]->Union(*live);
2407   }
2408 }
2409 
BuildLiveRanges()2410 void LiveRangeBuilder::BuildLiveRanges() {
2411   // Process the blocks in reverse order.
2412   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2413        --block_id) {
2414     data_->tick_counter()->TickAndMaybeEnterSafepoint();
2415     InstructionBlock* block =
2416         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2417     BitVector* live = ComputeLiveOut(block, data());
2418     // Initially consider all live_out values live for the entire block. We
2419     // will shorten these intervals if necessary.
2420     AddInitialIntervals(block, live);
2421     // Process the instructions in reverse order, generating and killing
2422     // live values.
2423     ProcessInstructions(block, live);
2424     // All phi output operands are killed by this block.
2425     ProcessPhis(block, live);
2426     // Now live is live_in for this block except not including values live
2427     // out on backward successor edges.
2428     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2429     live_in_sets()[block_id] = live;
2430   }
2431   // Postprocess the ranges.
2432   const size_t live_ranges_size = data()->live_ranges().size();
2433   for (TopLevelLiveRange* range : data()->live_ranges()) {
2434     data_->tick_counter()->TickAndMaybeEnterSafepoint();
2435     CHECK_EQ(live_ranges_size,
2436              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2437     if (range == nullptr) continue;
2438     // Give slots to all ranges with a non fixed slot use.
2439     if (range->has_slot_use() && range->HasNoSpillType()) {
2440       SpillMode spill_mode =
2441           range->slot_use_kind() ==
2442                   TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2443               ? SpillMode::kSpillDeferred
2444               : SpillMode::kSpillAtDefinition;
2445       data()->AssignSpillRangeToLiveRange(range, spill_mode);
2446     }
2447     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2448     // live ranges, every use requires the constant to be in a register.
2449     // Without this hack, all uses with "any" policy would get the constant
2450     // operand assigned.
2451     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2452       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2453            pos = pos->next()) {
2454         if (pos->type() == UsePositionType::kRequiresSlot ||
2455             pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
2456           continue;
2457         }
2458         UsePositionType new_type = UsePositionType::kRegisterOrSlot;
2459         // Can't mark phis as needing a register.
2460         if (!pos->pos().IsGapPosition()) {
2461           new_type = UsePositionType::kRequiresRegister;
2462         }
2463         pos->set_type(new_type, true);
2464       }
2465     }
2466     range->ResetCurrentHintPosition();
2467   }
2468   for (auto preassigned : data()->preassigned_slot_ranges()) {
2469     TopLevelLiveRange* range = preassigned.first;
2470     int slot_id = preassigned.second;
2471     SpillRange* spill = range->HasSpillRange()
2472                             ? range->GetSpillRange()
2473                             : data()->AssignSpillRangeToLiveRange(
2474                                   range, SpillMode::kSpillAtDefinition);
2475     spill->set_assigned_slot(slot_id);
2476   }
2477 #ifdef DEBUG
2478   Verify();
2479 #endif
2480 }
2481 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2482 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2483                                   UsePosition* use_pos) {
2484   DCHECK(!use_pos->IsResolved());
2485   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2486   DCHECK(res.second);
2487   USE(res);
2488 }
2489 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2490 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2491                                       UsePosition* use_pos) {
2492   auto it = phi_hints_.find(operand);
2493   if (it == phi_hints_.end()) return;
2494   DCHECK(!it->second->IsResolved());
2495   it->second->ResolveHint(use_pos);
2496 }
2497 
Verify() const2498 void LiveRangeBuilder::Verify() const {
2499   for (auto& hint : phi_hints_) {
2500     CHECK(hint.second->IsResolved());
2501   }
2502   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2503     if (current != nullptr && !current->IsEmpty()) {
2504       // New LiveRanges should not be split.
2505       CHECK_NULL(current->next());
2506       // General integrity check.
2507       current->Verify();
2508       const UseInterval* first = current->first_interval();
2509       if (first->next() == nullptr) continue;
2510 
2511       // Consecutive intervals should not end and start in the same block,
2512       // otherwise the intervals should have been joined, because the
2513       // variable is live throughout that block.
2514       CHECK(NextIntervalStartsInDifferentBlocks(first));
2515 
2516       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2517         // Except for the first interval, the other intevals must start at
2518         // a block boundary, otherwise data wouldn't flow to them.
2519         CHECK(IntervalStartsAtBlockBoundary(i));
2520         // The last instruction of the predecessors of the block the interval
2521         // starts must be covered by the range.
2522         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2523         if (i->next() != nullptr) {
2524           // Check the consecutive intervals property, except for the last
2525           // interval, where it doesn't apply.
2526           CHECK(NextIntervalStartsInDifferentBlocks(i));
2527         }
2528       }
2529     }
2530   }
2531 }
2532 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2533 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2534     const UseInterval* interval) const {
2535   LifetimePosition start = interval->start();
2536   if (!start.IsFullStart()) return false;
2537   int instruction_index = start.ToInstructionIndex();
2538   const InstructionBlock* block =
2539       data()->code()->GetInstructionBlock(instruction_index);
2540   return block->first_instruction_index() == instruction_index;
2541 }
2542 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2543 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2544     const UseInterval* interval, const TopLevelLiveRange* range) const {
2545   LifetimePosition start = interval->start();
2546   int instruction_index = start.ToInstructionIndex();
2547   const InstructionBlock* block =
2548       data()->code()->GetInstructionBlock(instruction_index);
2549   for (RpoNumber pred_index : block->predecessors()) {
2550     const InstructionBlock* predecessor =
2551         data()->code()->InstructionBlockAt(pred_index);
2552     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2553         predecessor->last_instruction_index());
2554     last_pos = last_pos.NextStart().End();
2555     if (!range->Covers(last_pos)) return false;
2556   }
2557   return true;
2558 }
2559 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2560 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2561     const UseInterval* interval) const {
2562   DCHECK_NOT_NULL(interval->next());
2563   LifetimePosition end = interval->end();
2564   LifetimePosition next_start = interval->next()->start();
2565   // Since end is not covered, but the previous position is, move back a
2566   // position
2567   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2568   int last_covered_index = end.ToInstructionIndex();
2569   const InstructionBlock* block =
2570       data()->code()->GetInstructionBlock(last_covered_index);
2571   const InstructionBlock* next_block =
2572       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2573   return block->rpo_number() < next_block->rpo_number();
2574 }
2575 
BuildBundles()2576 void BundleBuilder::BuildBundles() {
2577   TRACE("Build bundles\n");
2578   // Process the blocks in reverse order.
2579   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2580        --block_id) {
2581     InstructionBlock* block =
2582         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2583     TRACE("Block B%d\n", block_id);
2584     for (auto phi : block->phis()) {
2585       LiveRange* out_range =
2586           data()->GetOrCreateLiveRangeFor(phi->virtual_register());
2587       LiveRangeBundle* out = out_range->get_bundle();
2588       if (out == nullptr) {
2589         out = data()->allocation_zone()->New<LiveRangeBundle>(
2590             data()->allocation_zone(), next_bundle_id_++);
2591         out->TryAddRange(out_range);
2592       }
2593       TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
2594             out_range->TopLevel()->vreg(), out_range->relative_id());
2595       bool phi_interferes_with_backedge_input = false;
2596       for (auto input : phi->operands()) {
2597         LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
2598         TRACE("Input value v%d with range %d:%d\n", input,
2599               input_range->TopLevel()->vreg(), input_range->relative_id());
2600         LiveRangeBundle* input_bundle = input_range->get_bundle();
2601         if (input_bundle != nullptr) {
2602           TRACE("Merge\n");
2603           if (out->TryMerge(input_bundle, data()->is_trace_alloc())) {
2604             TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
2605                   out->id());
2606           } else if (input_range->Start() > out_range->Start()) {
2607             // We are only interested in values defined after the phi, because
2608             // those are values that will go over a back-edge.
2609             phi_interferes_with_backedge_input = true;
2610           }
2611         } else {
2612           TRACE("Add\n");
2613           if (out->TryAddRange(input_range)) {
2614             TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
2615                   out->id());
2616           } else if (input_range->Start() > out_range->Start()) {
2617             // We are only interested in values defined after the phi, because
2618             // those are values that will go over a back-edge.
2619             phi_interferes_with_backedge_input = true;
2620           }
2621         }
2622       }
2623       // Spilling the phi at the loop header is not beneficial if there is
2624       // a back-edge with an input for the phi that interferes with the phi's
2625       // value, because in case that input gets spilled it might introduce
2626       // a stack-to-stack move at the back-edge.
2627       if (phi_interferes_with_backedge_input)
2628         out_range->TopLevel()->set_spilling_at_loop_header_not_beneficial();
2629     }
2630     TRACE("Done block B%d\n", block_id);
2631   }
2632 }
2633 
TryAddRange(LiveRange * range)2634 bool LiveRangeBundle::TryAddRange(LiveRange* range) {
2635   DCHECK_NULL(range->get_bundle());
2636   // We may only add a new live range if its use intervals do not
2637   // overlap with existing intervals in the bundle.
2638   if (UsesOverlap(range->first_interval())) return false;
2639   ranges_.insert(range);
2640   range->set_bundle(this);
2641   InsertUses(range->first_interval());
2642   return true;
2643 }
TryMerge(LiveRangeBundle * other,bool trace_alloc)2644 bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
2645   if (other == this) return true;
2646 
2647   auto iter1 = uses_.begin();
2648   auto iter2 = other->uses_.begin();
2649 
2650   while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
2651     if (iter1->start >= iter2->end) {
2652       ++iter2;
2653     } else if (iter2->start >= iter1->end) {
2654       ++iter1;
2655     } else {
2656       TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
2657                  iter1->end, iter2->start, iter2->end);
2658       return false;
2659     }
2660   }
2661   // Uses are disjoint, merging is possible.
2662   for (auto it = other->ranges_.begin(); it != other->ranges_.end(); ++it) {
2663     (*it)->set_bundle(this);
2664     InsertUses((*it)->first_interval());
2665   }
2666   ranges_.insert(other->ranges_.begin(), other->ranges_.end());
2667   other->ranges_.clear();
2668 
2669   return true;
2670 }
2671 
MergeSpillRanges()2672 void LiveRangeBundle::MergeSpillRanges() {
2673   SpillRange* target = nullptr;
2674   for (auto range : ranges_) {
2675     if (range->TopLevel()->HasSpillRange()) {
2676       SpillRange* current = range->TopLevel()->GetSpillRange();
2677       if (target == nullptr) {
2678         target = current;
2679       } else if (target != current) {
2680         target->TryMerge(current);
2681       }
2682     }
2683   }
2684 }
2685 
RegisterAllocator(TopTierRegisterAllocationData * data,RegisterKind kind)2686 RegisterAllocator::RegisterAllocator(TopTierRegisterAllocationData* data,
2687                                      RegisterKind kind)
2688     : data_(data),
2689       mode_(kind),
2690       num_registers_(GetRegisterCount(data->config(), kind)),
2691       num_allocatable_registers_(
2692           GetAllocatableRegisterCount(data->config(), kind)),
2693       allocatable_register_codes_(
2694           GetAllocatableRegisterCodes(data->config(), kind)),
2695       check_fp_aliasing_(false) {
2696   if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
2697     check_fp_aliasing_ = (data->code()->representation_mask() &
2698                           (kFloat32Bit | kSimd128Bit)) != 0;
2699   }
2700 }
2701 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2702 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2703     const LiveRange* range, int instruction_index) {
2704   LifetimePosition ret = LifetimePosition::Invalid();
2705 
2706   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2707   if (range->Start() >= ret || ret >= range->End()) {
2708     return LifetimePosition::Invalid();
2709   }
2710   return ret;
2711 }
2712 
SplitAndSpillRangesDefinedByMemoryOperand()2713 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2714   size_t initial_range_count = data()->live_ranges().size();
2715   for (size_t i = 0; i < initial_range_count; ++i) {
2716     CHECK_EQ(initial_range_count,
2717              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2718     TopLevelLiveRange* range = data()->live_ranges()[i];
2719     if (!CanProcessRange(range)) continue;
2720     // Only assume defined by memory operand if we are guaranteed to spill it or
2721     // it has a spill operand.
2722     if (range->HasNoSpillType() ||
2723         (range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
2724       continue;
2725     }
2726     LifetimePosition start = range->Start();
2727     TRACE("Live range %d:%d is defined by a spill operand.\n",
2728           range->TopLevel()->vreg(), range->relative_id());
2729     LifetimePosition next_pos = start;
2730     if (next_pos.IsGapPosition()) {
2731       next_pos = next_pos.NextStart();
2732     }
2733 
2734     UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2735     // If the range already has a spill operand and it doesn't need a
2736     // register immediately, split it and spill the first part of the range.
2737     if (pos == nullptr) {
2738       Spill(range, SpillMode::kSpillAtDefinition);
2739     } else if (pos->pos() > range->Start().NextStart()) {
2740       // Do not spill live range eagerly if use position that can benefit from
2741       // the register is too close to the start of live range.
2742       LifetimePosition split_pos = GetSplitPositionForInstruction(
2743           range, pos->pos().ToInstructionIndex());
2744       // There is no place to split, so we can't split and spill.
2745       if (!split_pos.IsValid()) continue;
2746 
2747       split_pos =
2748           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2749 
2750       SplitRangeAt(range, split_pos);
2751       Spill(range, SpillMode::kSpillAtDefinition);
2752     }
2753   }
2754 }
2755 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2756 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2757                                            LifetimePosition pos) {
2758   DCHECK(!range->TopLevel()->IsFixed());
2759   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2760         range->relative_id(), pos.value());
2761 
2762   if (pos <= range->Start()) return range;
2763 
2764   // We can't properly connect liveranges if splitting occurred at the end
2765   // a block.
2766   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2767          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2768           pos.ToInstructionIndex()));
2769 
2770   LiveRange* result = range->SplitAt(pos, allocation_zone());
2771   return result;
2772 }
2773 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2774 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2775                                            LifetimePosition start,
2776                                            LifetimePosition end) {
2777   DCHECK(!range->TopLevel()->IsFixed());
2778   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2779         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2780         end.value());
2781 
2782   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2783   DCHECK(split_pos >= start);
2784   return SplitRangeAt(range, split_pos);
2785 }
2786 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2787 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2788                                                         LifetimePosition end) {
2789   int start_instr = start.ToInstructionIndex();
2790   int end_instr = end.ToInstructionIndex();
2791   DCHECK_LE(start_instr, end_instr);
2792 
2793   // We have no choice
2794   if (start_instr == end_instr) return end;
2795 
2796   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2797   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2798 
2799   if (end_block == start_block) {
2800     // The interval is split in the same basic block. Split at the latest
2801     // possible position.
2802     return end;
2803   }
2804 
2805   const InstructionBlock* block = end_block;
2806   // Find header of outermost loop.
2807   do {
2808     const InstructionBlock* loop = GetContainingLoop(code(), block);
2809     if (loop == nullptr ||
2810         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2811       // No more loops or loop starts before the lifetime start.
2812       break;
2813     }
2814     block = loop;
2815   } while (true);
2816 
2817   // We did not find any suitable outer loop. Split at the latest possible
2818   // position unless end_block is a loop header itself.
2819   if (block == end_block && !end_block->IsLoopHeader()) return end;
2820 
2821   return LifetimePosition::GapFromInstructionIndex(
2822       block->first_instruction_index());
2823 }
2824 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos,SpillMode spill_mode,LiveRange ** begin_spill_out)2825 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2826     LiveRange* range, LifetimePosition pos, SpillMode spill_mode,
2827     LiveRange** begin_spill_out) {
2828   *begin_spill_out = range;
2829   // TODO(herhut): Be more clever here as long as we do not move pos out of
2830   // deferred code.
2831   if (spill_mode == SpillMode::kSpillDeferred) return pos;
2832   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2833   const InstructionBlock* loop_header =
2834       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2835   if (loop_header == nullptr) return pos;
2836 
2837   while (loop_header != nullptr) {
2838     // We are going to spill live range inside the loop.
2839     // If possible try to move spilling position backwards to loop header.
2840     // This will reduce number of memory moves on the back edge.
2841     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2842         loop_header->first_instruction_index());
2843     // Stop if we moved to a loop header before the value is defined or
2844     // at the define position that is not beneficial to spill.
2845     if (range->TopLevel()->Start() > loop_start ||
2846         (range->TopLevel()->Start() == loop_start &&
2847          range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
2848       return pos;
2849 
2850     LiveRange* live_at_header = range->TopLevel()->GetChildCovers(loop_start);
2851 
2852     if (live_at_header != nullptr && !live_at_header->spilled()) {
2853       for (LiveRange* check_use = live_at_header;
2854            check_use != nullptr && check_use->Start() < pos;
2855            check_use = check_use->next()) {
2856         // If we find a use for which spilling is detrimental, don't spill
2857         // at the loop header
2858         UsePosition* next_use =
2859             check_use->NextUsePositionSpillDetrimental(loop_start);
2860         // UsePosition at the end of a UseInterval may
2861         // have the same value as the start of next range.
2862         if (next_use != nullptr && next_use->pos() <= pos) {
2863           return pos;
2864         }
2865       }
2866       // No register beneficial use inside the loop before the pos.
2867       *begin_spill_out = live_at_header;
2868       pos = loop_start;
2869     }
2870 
2871     // Try hoisting out to an outer loop.
2872     loop_header = GetContainingLoop(code(), loop_header);
2873   }
2874   return pos;
2875 }
2876 
Spill(LiveRange * range,SpillMode spill_mode)2877 void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
2878   DCHECK(!range->spilled());
2879   DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
2880          GetInstructionBlock(code(), range->Start())->IsDeferred());
2881   TopLevelLiveRange* first = range->TopLevel();
2882   TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
2883         range->relative_id(), spill_mode);
2884 
2885   TRACE("Starting spill type is %d\n", static_cast<int>(first->spill_type()));
2886   if (first->HasNoSpillType()) {
2887     TRACE("New spill range needed");
2888     data()->AssignSpillRangeToLiveRange(first, spill_mode);
2889   }
2890   // Upgrade the spillmode, in case this was only spilled in deferred code so
2891   // far.
2892   if ((spill_mode == SpillMode::kSpillAtDefinition) &&
2893       (first->spill_type() ==
2894        TopLevelLiveRange::SpillType::kDeferredSpillRange)) {
2895     TRACE("Upgrading\n");
2896     first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
2897   }
2898   TRACE("Final spill type is %d\n", static_cast<int>(first->spill_type()));
2899   range->Spill();
2900 }
2901 
RegisterName(int register_code) const2902 const char* RegisterAllocator::RegisterName(int register_code) const {
2903   if (register_code == kUnassignedRegister) return "unassigned";
2904   return mode() == RegisterKind::kGeneral
2905              ? i::RegisterName(Register::from_code(register_code))
2906              : i::RegisterName(DoubleRegister::from_code(register_code));
2907 }
2908 
LinearScanAllocator(TopTierRegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2909 LinearScanAllocator::LinearScanAllocator(TopTierRegisterAllocationData* data,
2910                                          RegisterKind kind, Zone* local_zone)
2911     : RegisterAllocator(data, kind),
2912       unhandled_live_ranges_(local_zone),
2913       active_live_ranges_(local_zone),
2914       inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
2915                             local_zone),
2916       next_active_ranges_change_(LifetimePosition::Invalid()),
2917       next_inactive_ranges_change_(LifetimePosition::Invalid()) {
2918   active_live_ranges().reserve(8);
2919 }
2920 
MaybeSpillPreviousRanges(LiveRange * begin_range,LifetimePosition begin_pos,LiveRange * end_range)2921 void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
2922                                                    LifetimePosition begin_pos,
2923                                                    LiveRange* end_range) {
2924   // Spill begin_range after begin_pos, then spill every live range of this
2925   // virtual register until but excluding end_range.
2926   DCHECK(begin_range->Covers(begin_pos));
2927   DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel());
2928 
2929   if (begin_range != end_range) {
2930     DCHECK_LE(begin_range->End(), end_range->Start());
2931     if (!begin_range->spilled()) {
2932       SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition);
2933     }
2934     for (LiveRange* range = begin_range->next(); range != end_range;
2935          range = range->next()) {
2936       if (!range->spilled()) {
2937         range->Spill();
2938       }
2939     }
2940   }
2941 }
2942 
MaybeUndoPreviousSplit(LiveRange * range)2943 void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
2944   if (range->next() != nullptr && range->next()->ShouldRecombine()) {
2945     LiveRange* to_remove = range->next();
2946     TRACE("Recombining %d:%d with %d\n", range->TopLevel()->vreg(),
2947           range->relative_id(), to_remove->relative_id());
2948 
2949     // Remove the range from unhandled, as attaching it will change its
2950     // state and hence ordering in the unhandled set.
2951     auto removed_cnt = unhandled_live_ranges().erase(to_remove);
2952     DCHECK_EQ(removed_cnt, 1);
2953     USE(removed_cnt);
2954 
2955     range->AttachToNext();
2956   } else if (range->next() != nullptr) {
2957     TRACE("No recombine for %d:%d to %d\n", range->TopLevel()->vreg(),
2958           range->relative_id(), range->next()->relative_id());
2959   }
2960 }
2961 
SpillNotLiveRanges(RangeWithRegisterSet * to_be_live,LifetimePosition position,SpillMode spill_mode)2962 void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
2963                                              LifetimePosition position,
2964                                              SpillMode spill_mode) {
2965   for (auto it = active_live_ranges().begin();
2966        it != active_live_ranges().end();) {
2967     LiveRange* active_range = *it;
2968     TopLevelLiveRange* toplevel = (*it)->TopLevel();
2969     auto found = to_be_live->find({toplevel, kUnassignedRegister});
2970     if (found == to_be_live->end()) {
2971       // Is not contained in {to_be_live}, spill it.
2972       // Fixed registers are exempt from this. They might have been
2973       // added from inactive at the block boundary but we know that
2974       // they cannot conflict as they are built before register
2975       // allocation starts. It would be algorithmically fine to split
2976       // them and reschedule but the code does not allow to do this.
2977       if (toplevel->IsFixed()) {
2978         TRACE("Keeping reactivated fixed range for %s\n",
2979               RegisterName(toplevel->assigned_register()));
2980         ++it;
2981       } else {
2982         // When spilling a previously spilled/reloaded range, we add back the
2983         // tail that we might have split off when we reloaded/spilled it
2984         // previously. Otherwise we might keep generating small split-offs.
2985         MaybeUndoPreviousSplit(active_range);
2986         TRACE("Putting back %d:%d\n", toplevel->vreg(),
2987               active_range->relative_id());
2988         LiveRange* split = SplitRangeAt(active_range, position);
2989         DCHECK_NE(split, active_range);
2990 
2991         // Make sure we revisit this range once it has a use that requires
2992         // a register.
2993         UsePosition* next_use = split->NextRegisterPosition(position);
2994         if (next_use != nullptr) {
2995           // Move to the start of the gap before use so that we have a space
2996           // to perform the potential reload. Otherwise, do not spill but add
2997           // to unhandled for reallocation.
2998           LifetimePosition revisit_at = next_use->pos().FullStart();
2999           TRACE("Next use at %d\n", revisit_at.value());
3000           if (!data()->IsBlockBoundary(revisit_at)) {
3001             // Leave some space so we have enough gap room.
3002             revisit_at = revisit_at.PrevStart().FullStart();
3003           }
3004           // If this range became life right at the block boundary that we are
3005           // currently processing, we do not need to split it. Instead move it
3006           // to unhandled right away.
3007           if (position < revisit_at) {
3008             LiveRange* third_part = SplitRangeAt(split, revisit_at);
3009             DCHECK_NE(split, third_part);
3010             Spill(split, spill_mode);
3011             TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
3012                   third_part->relative_id());
3013             third_part->SetRecombine();
3014             AddToUnhandled(third_part);
3015           } else {
3016             AddToUnhandled(split);
3017           }
3018         } else {
3019           Spill(split, spill_mode);
3020         }
3021         it = ActiveToHandled(it);
3022       }
3023     } else {
3024       // This range is contained in {to_be_live}, so we can keep it.
3025       int expected_register = (*found).expected_register;
3026       to_be_live->erase(found);
3027       if (expected_register == active_range->assigned_register()) {
3028         // Was life and in correct register, simply pass through.
3029         TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
3030               active_range->relative_id(),
3031               RegisterName(active_range->assigned_register()));
3032         ++it;
3033       } else {
3034         // Was life but wrong register. Split and schedule for
3035         // allocation.
3036         TRACE("Scheduling %d:%d\n", toplevel->vreg(),
3037               active_range->relative_id());
3038         LiveRange* split = SplitRangeAt(active_range, position);
3039         split->set_controlflow_hint(expected_register);
3040         AddToUnhandled(split);
3041         it = ActiveToHandled(it);
3042       }
3043     }
3044   }
3045 }
3046 
AssignRegisterOnReload(LiveRange * range,int reg)3047 LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
3048                                                        int reg) {
3049   // We know the register is currently free but it might be in
3050   // use by a currently inactive range. So we might not be able
3051   // to reload for the full distance. In such case, split here.
3052   // TODO(herhut):
3053   // It might be better if we could use the normal unhandled queue and
3054   // give reloading registers pecedence. That way we would compute the
3055   // intersection for the entire future.
3056   LifetimePosition new_end = range->End();
3057   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
3058     if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
3059       continue;
3060     }
3061     for (const LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
3062       if (!kSimpleFPAliasing && check_fp_aliasing() &&
3063           !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
3064                                         range->representation(), reg)) {
3065         continue;
3066       }
3067       if (new_end <= cur_inactive->NextStart()) {
3068         // Inactive ranges are sorted by their next start, so the remaining
3069         // ranges cannot contribute to new_end.
3070         break;
3071       }
3072       auto next_intersection = cur_inactive->FirstIntersection(range);
3073       if (!next_intersection.IsValid()) continue;
3074       new_end = std::min(new_end, next_intersection);
3075     }
3076   }
3077   if (new_end != range->End()) {
3078     TRACE("Found new end for %d:%d at %d\n", range->TopLevel()->vreg(),
3079           range->relative_id(), new_end.value());
3080     LiveRange* tail = SplitRangeAt(range, new_end);
3081     AddToUnhandled(tail);
3082   }
3083   SetLiveRangeAssignedRegister(range, reg);
3084   return range;
3085 }
3086 
ReloadLiveRanges(RangeWithRegisterSet const & to_be_live,LifetimePosition position)3087 void LinearScanAllocator::ReloadLiveRanges(
3088     RangeWithRegisterSet const& to_be_live, LifetimePosition position) {
3089   // Assumption: All ranges in {to_be_live} are currently spilled and there are
3090   // no conflicting registers in the active ranges.
3091   // The former is ensured by SpillNotLiveRanges, the latter is by construction
3092   // of the to_be_live set.
3093   for (RangeWithRegister range_with_register : to_be_live) {
3094     TopLevelLiveRange* range = range_with_register.range;
3095     int reg = range_with_register.expected_register;
3096     LiveRange* to_resurrect = range->GetChildCovers(position);
3097     if (to_resurrect == nullptr) {
3098       // While the range was life until the end of the predecessor block, it is
3099       // not live in this block. Either there is a lifetime gap or the range
3100       // died.
3101       TRACE("No candidate for %d at %d\n", range->vreg(), position.value());
3102     } else {
3103       // We might be resurrecting a range that we spilled until its next use
3104       // before. In such cases, we have to unsplit it before processing as
3105       // otherwise we might get register changes from one range to the other
3106       // in the middle of blocks.
3107       // If there is a gap between this range and the next, we can just keep
3108       // it as a register change won't hurt.
3109       MaybeUndoPreviousSplit(to_resurrect);
3110       if (to_resurrect->Start() == position) {
3111         // This range already starts at this block. It might have been spilled,
3112         // so we have to unspill it. Otherwise, it is already in the unhandled
3113         // queue waiting for processing.
3114         DCHECK(!to_resurrect->HasRegisterAssigned());
3115         TRACE("Reload %d:%d starting at %d itself\n", range->vreg(),
3116               to_resurrect->relative_id(), position.value());
3117         if (to_resurrect->spilled()) {
3118           to_resurrect->Unspill();
3119           to_resurrect->set_controlflow_hint(reg);
3120           AddToUnhandled(to_resurrect);
3121         } else {
3122           // Assign the preassigned register if we know. Otherwise, nothing to
3123           // do as already in unhandeled.
3124           if (reg != kUnassignedRegister) {
3125             auto erased_cnt = unhandled_live_ranges().erase(to_resurrect);
3126             DCHECK_EQ(erased_cnt, 1);
3127             USE(erased_cnt);
3128             // We know that there is no conflict with active ranges, so just
3129             // assign the register to the range.
3130             to_resurrect = AssignRegisterOnReload(to_resurrect, reg);
3131             AddToActive(to_resurrect);
3132           }
3133         }
3134       } else {
3135         // This range was spilled before. We have to split it and schedule the
3136         // second part for allocation (or assign the register if we know).
3137         DCHECK(to_resurrect->spilled());
3138         LiveRange* split = SplitRangeAt(to_resurrect, position);
3139         TRACE("Reload %d:%d starting at %d as %d\n", range->vreg(),
3140               to_resurrect->relative_id(), split->Start().value(),
3141               split->relative_id());
3142         DCHECK_NE(split, to_resurrect);
3143         if (reg != kUnassignedRegister) {
3144           // We know that there is no conflict with active ranges, so just
3145           // assign the register to the range.
3146           split = AssignRegisterOnReload(split, reg);
3147           AddToActive(split);
3148         } else {
3149           // Let normal register assignment find a suitable register.
3150           split->set_controlflow_hint(reg);
3151           AddToUnhandled(split);
3152         }
3153       }
3154     }
3155   }
3156 }
3157 
ChooseOneOfTwoPredecessorStates(InstructionBlock * current_block,LifetimePosition boundary)3158 RpoNumber LinearScanAllocator::ChooseOneOfTwoPredecessorStates(
3159     InstructionBlock* current_block, LifetimePosition boundary) {
3160   using SmallRangeVector =
3161       base::SmallVector<TopLevelLiveRange*,
3162                         RegisterConfiguration::kMaxRegisters>;
3163   // Pick the state that would generate the least spill/reloads.
3164   // Compute vectors of ranges with imminent use for both sides.
3165   // As GetChildCovers is cached, it is cheaper to repeatedly
3166   // call is rather than compute a shared set first.
3167   auto& left = data()->GetSpillState(current_block->predecessors()[0]);
3168   auto& right = data()->GetSpillState(current_block->predecessors()[1]);
3169   SmallRangeVector left_used;
3170   for (const auto item : left) {
3171     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3172     if (at_next_block != nullptr &&
3173         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3174             nullptr) {
3175       left_used.emplace_back(item->TopLevel());
3176     }
3177   }
3178   SmallRangeVector right_used;
3179   for (const auto item : right) {
3180     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3181     if (at_next_block != nullptr &&
3182         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3183             nullptr) {
3184       right_used.emplace_back(item->TopLevel());
3185     }
3186   }
3187   if (left_used.empty() && right_used.empty()) {
3188     // There are no beneficial register uses. Look at any use at
3189     // all. We do not account for all uses, like flowing into a phi.
3190     // So we just look at ranges still being live.
3191     TRACE("Looking at only uses\n");
3192     for (const auto item : left) {
3193       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3194       if (at_next_block != nullptr &&
3195           at_next_block->NextUsePosition(boundary) != nullptr) {
3196         left_used.emplace_back(item->TopLevel());
3197       }
3198     }
3199     for (const auto item : right) {
3200       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3201       if (at_next_block != nullptr &&
3202           at_next_block->NextUsePosition(boundary) != nullptr) {
3203         right_used.emplace_back(item->TopLevel());
3204       }
3205     }
3206   }
3207   // Now left_used and right_used contains those ranges that matter.
3208   // Count which side matches this most.
3209   TRACE("Vote went %zu vs %zu\n", left_used.size(), right_used.size());
3210   return left_used.size() > right_used.size()
3211              ? current_block->predecessors()[0]
3212              : current_block->predecessors()[1];
3213 }
3214 
CheckConflict(MachineRepresentation rep,int reg,RangeWithRegisterSet * to_be_live)3215 bool LinearScanAllocator::CheckConflict(MachineRepresentation rep, int reg,
3216                                         RangeWithRegisterSet* to_be_live) {
3217   for (RangeWithRegister range_with_reg : *to_be_live) {
3218     if (data()->config()->AreAliases(range_with_reg.range->representation(),
3219                                      range_with_reg.expected_register, rep,
3220                                      reg)) {
3221       return true;
3222     }
3223   }
3224   return false;
3225 }
3226 
ComputeStateFromManyPredecessors(InstructionBlock * current_block,RangeWithRegisterSet * to_be_live)3227 void LinearScanAllocator::ComputeStateFromManyPredecessors(
3228     InstructionBlock* current_block, RangeWithRegisterSet* to_be_live) {
3229   struct Vote {
3230     size_t count;
3231     int used_registers[RegisterConfiguration::kMaxRegisters];
3232   };
3233   struct TopLevelLiveRangeComparator {
3234     bool operator()(const TopLevelLiveRange* lhs,
3235                     const TopLevelLiveRange* rhs) const {
3236       return lhs->vreg() < rhs->vreg();
3237     }
3238   };
3239   ZoneMap<TopLevelLiveRange*, Vote, TopLevelLiveRangeComparator> counts(
3240       data()->allocation_zone());
3241   int deferred_blocks = 0;
3242   for (RpoNumber pred : current_block->predecessors()) {
3243     if (!ConsiderBlockForControlFlow(current_block, pred)) {
3244       // Back edges of a loop count as deferred here too.
3245       deferred_blocks++;
3246       continue;
3247     }
3248     const auto& pred_state = data()->GetSpillState(pred);
3249     for (LiveRange* range : pred_state) {
3250       // We might have spilled the register backwards, so the range we
3251       // stored might have lost its register. Ignore those.
3252       if (!range->HasRegisterAssigned()) continue;
3253       TopLevelLiveRange* toplevel = range->TopLevel();
3254       auto previous = counts.find(toplevel);
3255       if (previous == counts.end()) {
3256         auto result = counts.emplace(std::make_pair(toplevel, Vote{1, {0}}));
3257         CHECK(result.second);
3258         result.first->second.used_registers[range->assigned_register()]++;
3259       } else {
3260         previous->second.count++;
3261         previous->second.used_registers[range->assigned_register()]++;
3262       }
3263     }
3264   }
3265 
3266   // Choose the live ranges from the majority.
3267   const size_t majority =
3268       (current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
3269   bool taken_registers[RegisterConfiguration::kMaxRegisters] = {false};
3270   auto assign_to_live = [this, counts, majority](
3271                             std::function<bool(TopLevelLiveRange*)> filter,
3272                             RangeWithRegisterSet* to_be_live,
3273                             bool* taken_registers) {
3274     bool check_aliasing = !kSimpleFPAliasing && check_fp_aliasing();
3275     for (const auto& val : counts) {
3276       if (!filter(val.first)) continue;
3277       if (val.second.count >= majority) {
3278         int register_max = 0;
3279         int reg = kUnassignedRegister;
3280         bool conflict = false;
3281         int num_regs = num_registers();
3282         int num_codes = num_allocatable_registers();
3283         const int* codes = allocatable_register_codes();
3284         MachineRepresentation rep = val.first->representation();
3285         if (check_aliasing && (rep == MachineRepresentation::kFloat32 ||
3286                                rep == MachineRepresentation::kSimd128))
3287           GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3288         for (int idx = 0; idx < num_regs; idx++) {
3289           int uses = val.second.used_registers[idx];
3290           if (uses == 0) continue;
3291           if (uses > register_max || (conflict && uses == register_max)) {
3292             reg = idx;
3293             register_max = uses;
3294             conflict = check_aliasing ? CheckConflict(rep, reg, to_be_live)
3295                                       : taken_registers[reg];
3296           }
3297         }
3298         if (conflict) {
3299           reg = kUnassignedRegister;
3300         } else if (!check_aliasing) {
3301           taken_registers[reg] = true;
3302         }
3303         to_be_live->emplace(val.first, reg);
3304         TRACE("Reset %d as live due vote %zu in %s\n",
3305               val.first->TopLevel()->vreg(), val.second.count,
3306               RegisterName(reg));
3307       }
3308     }
3309   };
3310   // First round, process fixed registers, as these have precedence.
3311   // There is only one fixed range per register, so we cannot have
3312   // conflicts.
3313   assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live,
3314                  taken_registers);
3315   // Second round, process the rest.
3316   assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live,
3317                  taken_registers);
3318 }
3319 
ConsiderBlockForControlFlow(InstructionBlock * current_block,RpoNumber predecessor)3320 bool LinearScanAllocator::ConsiderBlockForControlFlow(
3321     InstructionBlock* current_block, RpoNumber predecessor) {
3322   // We ignore predecessors on back edges when looking for control flow effects,
3323   // as those lie in the future of allocation and we have no data yet. Also,
3324   // deferred bocks are ignored on deferred to non-deferred boundaries, as we do
3325   // not want them to influence allocation of non deferred code.
3326   return (predecessor < current_block->rpo_number()) &&
3327          (current_block->IsDeferred() ||
3328           !code()->InstructionBlockAt(predecessor)->IsDeferred());
3329 }
3330 
UpdateDeferredFixedRanges(SpillMode spill_mode,InstructionBlock * block)3331 void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
3332                                                     InstructionBlock* block) {
3333   if (spill_mode == SpillMode::kSpillDeferred) {
3334     LifetimePosition max = LifetimePosition::InstructionFromInstructionIndex(
3335         LastDeferredInstructionIndex(block));
3336     // Adds range back to inactive, resolving resulting conflicts.
3337     auto add_to_inactive = [this, max](LiveRange* range) {
3338       AddToInactive(range);
3339       // Splits other if it conflicts with range. Other is placed in unhandled
3340       // for later reallocation.
3341       auto split_conflicting = [this, max](LiveRange* range, LiveRange* other,
3342                                            std::function<void(LiveRange*)>
3343                                                update_caches) {
3344         if (other->TopLevel()->IsFixed()) return;
3345         int reg = range->assigned_register();
3346         if (kSimpleFPAliasing || !check_fp_aliasing()) {
3347           if (other->assigned_register() != reg) {
3348             return;
3349           }
3350         } else {
3351           if (!data()->config()->AreAliases(range->representation(), reg,
3352                                             other->representation(),
3353                                             other->assigned_register())) {
3354             return;
3355           }
3356         }
3357         // The inactive range might conflict, so check whether we need to
3358         // split and spill. We can look for the first intersection, as there
3359         // cannot be any intersections in the past, as those would have been a
3360         // conflict then.
3361         LifetimePosition next_start = range->FirstIntersection(other);
3362         if (!next_start.IsValid() || (next_start > max)) {
3363           // There is no conflict or the conflict is outside of the current
3364           // stretch of deferred code. In either case we can ignore the
3365           // inactive range.
3366           return;
3367         }
3368         // They overlap. So we need to split active and reschedule it
3369         // for allocation.
3370         TRACE("Resolving conflict of %d with deferred fixed for register %s\n",
3371               other->TopLevel()->vreg(),
3372               RegisterName(other->assigned_register()));
3373         LiveRange* split_off =
3374             other->SplitAt(next_start, data()->allocation_zone());
3375         // Try to get the same register after the deferred block.
3376         split_off->set_controlflow_hint(other->assigned_register());
3377         DCHECK_NE(split_off, other);
3378         AddToUnhandled(split_off);
3379         update_caches(other);
3380       };
3381       // Now check for conflicts in active and inactive ranges. We might have
3382       // conflicts in inactive, as we do not do this check on every block
3383       // boundary but only on deferred/non-deferred changes but inactive
3384       // live ranges might become live on any block boundary.
3385       for (auto active : active_live_ranges()) {
3386         split_conflicting(range, active, [this](LiveRange* updated) {
3387           next_active_ranges_change_ =
3388               std::min(updated->End(), next_active_ranges_change_);
3389         });
3390       }
3391       for (int reg = 0; reg < num_registers(); ++reg) {
3392         if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
3393             reg != range->assigned_register()) {
3394           continue;
3395         }
3396         for (auto inactive : inactive_live_ranges(reg)) {
3397           split_conflicting(range, inactive, [this](LiveRange* updated) {
3398             next_inactive_ranges_change_ =
3399                 std::min(updated->End(), next_inactive_ranges_change_);
3400           });
3401         }
3402       }
3403     };
3404     if (mode() == RegisterKind::kGeneral) {
3405       for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3406         if (current != nullptr) {
3407           if (current->IsDeferredFixed()) {
3408             add_to_inactive(current);
3409           }
3410         }
3411       }
3412     } else {
3413       for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3414         if (current != nullptr) {
3415           if (current->IsDeferredFixed()) {
3416             add_to_inactive(current);
3417           }
3418         }
3419       }
3420       if (!kSimpleFPAliasing && check_fp_aliasing()) {
3421         for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3422           if (current != nullptr) {
3423             if (current->IsDeferredFixed()) {
3424               add_to_inactive(current);
3425             }
3426           }
3427         }
3428         for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3429           if (current != nullptr) {
3430             if (current->IsDeferredFixed()) {
3431               add_to_inactive(current);
3432             }
3433           }
3434         }
3435       }
3436     }
3437   } else {
3438     // Remove all ranges.
3439     for (int reg = 0; reg < num_registers(); ++reg) {
3440       for (auto it = inactive_live_ranges(reg).begin();
3441            it != inactive_live_ranges(reg).end();) {
3442         if ((*it)->TopLevel()->IsDeferredFixed()) {
3443           it = inactive_live_ranges(reg).erase(it);
3444         } else {
3445           ++it;
3446         }
3447       }
3448     }
3449   }
3450 }
3451 
BlockIsDeferredOrImmediatePredecessorIsNotDeferred(const InstructionBlock * block)3452 bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
3453     const InstructionBlock* block) {
3454   if (block->IsDeferred()) return true;
3455   if (block->PredecessorCount() == 0) return true;
3456   bool pred_is_deferred = false;
3457   for (auto pred : block->predecessors()) {
3458     if (pred.IsNext(block->rpo_number())) {
3459       pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
3460       break;
3461     }
3462   }
3463   return !pred_is_deferred;
3464 }
3465 
HasNonDeferredPredecessor(InstructionBlock * block)3466 bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) {
3467   for (auto pred : block->predecessors()) {
3468     InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3469     if (!pred_block->IsDeferred()) return true;
3470   }
3471   return false;
3472 }
3473 
AllocateRegisters()3474 void LinearScanAllocator::AllocateRegisters() {
3475   DCHECK(unhandled_live_ranges().empty());
3476   DCHECK(active_live_ranges().empty());
3477   for (int reg = 0; reg < num_registers(); ++reg) {
3478     DCHECK(inactive_live_ranges(reg).empty());
3479   }
3480 
3481   SplitAndSpillRangesDefinedByMemoryOperand();
3482   data()->ResetSpillState();
3483 
3484   if (data()->is_trace_alloc()) {
3485     PrintRangeOverview(std::cout);
3486   }
3487 
3488   const size_t live_ranges_size = data()->live_ranges().size();
3489   for (TopLevelLiveRange* range : data()->live_ranges()) {
3490     CHECK_EQ(live_ranges_size,
3491              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3492     if (!CanProcessRange(range)) continue;
3493     for (LiveRange* to_add = range; to_add != nullptr;
3494          to_add = to_add->next()) {
3495       if (!to_add->spilled()) {
3496         AddToUnhandled(to_add);
3497       }
3498     }
3499   }
3500 
3501   if (mode() == RegisterKind::kGeneral) {
3502     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3503       if (current != nullptr) {
3504         if (current->IsDeferredFixed()) continue;
3505         AddToInactive(current);
3506       }
3507     }
3508   } else {
3509     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3510       if (current != nullptr) {
3511         if (current->IsDeferredFixed()) continue;
3512         AddToInactive(current);
3513       }
3514     }
3515     if (!kSimpleFPAliasing && check_fp_aliasing()) {
3516       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3517         if (current != nullptr) {
3518           if (current->IsDeferredFixed()) continue;
3519           AddToInactive(current);
3520         }
3521       }
3522       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3523         if (current != nullptr) {
3524           if (current->IsDeferredFixed()) continue;
3525           AddToInactive(current);
3526         }
3527       }
3528     }
3529   }
3530 
3531   RpoNumber last_block = RpoNumber::FromInt(0);
3532   RpoNumber max_blocks =
3533       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3534   LifetimePosition next_block_boundary =
3535       LifetimePosition::InstructionFromInstructionIndex(
3536           data()
3537               ->code()
3538               ->InstructionBlockAt(last_block)
3539               ->last_instruction_index())
3540           .NextFullStart();
3541   SpillMode spill_mode = SpillMode::kSpillAtDefinition;
3542 
3543   // Process all ranges. We also need to ensure that we have seen all block
3544   // boundaries. Linear scan might have assigned and spilled ranges before
3545   // reaching the last block and hence we would ignore control flow effects for
3546   // those. Not only does this produce a potentially bad assignment, it also
3547   // breaks with the invariant that we undo spills that happen in deferred code
3548   // when crossing a deferred/non-deferred boundary.
3549   while (!unhandled_live_ranges().empty() || last_block < max_blocks) {
3550     data()->tick_counter()->TickAndMaybeEnterSafepoint();
3551     LiveRange* current = unhandled_live_ranges().empty()
3552                              ? nullptr
3553                              : *unhandled_live_ranges().begin();
3554     LifetimePosition position =
3555         current ? current->Start() : next_block_boundary;
3556 #ifdef DEBUG
3557     allocation_finger_ = position;
3558 #endif
3559     // Check whether we just moved across a block boundary. This will trigger
3560     // for the first range that is past the current boundary.
3561     if (position >= next_block_boundary) {
3562       TRACE("Processing boundary at %d leaving %d\n",
3563             next_block_boundary.value(), last_block.ToInt());
3564 
3565       // Forward state to before block boundary
3566       LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
3567       ForwardStateTo(end_of_block);
3568 
3569       // Remember this state.
3570       InstructionBlock* current_block = data()->code()->GetInstructionBlock(
3571           next_block_boundary.ToInstructionIndex());
3572 
3573       // Store current spill state (as the state at end of block). For
3574       // simplicity, we store the active ranges, e.g., the live ranges that
3575       // are not spilled.
3576       data()->RememberSpillState(last_block, active_live_ranges());
3577 
3578       // Only reset the state if this was not a direct fallthrough. Otherwise
3579       // control flow resolution will get confused (it does not expect changes
3580       // across fallthrough edges.).
3581       bool fallthrough =
3582           (current_block->PredecessorCount() == 1) &&
3583           current_block->predecessors()[0].IsNext(current_block->rpo_number());
3584 
3585       // When crossing a deferred/non-deferred boundary, we have to load or
3586       // remove the deferred fixed ranges from inactive.
3587       if ((spill_mode == SpillMode::kSpillDeferred) !=
3588           current_block->IsDeferred()) {
3589         // Update spill mode.
3590         spill_mode = current_block->IsDeferred()
3591                          ? SpillMode::kSpillDeferred
3592                          : SpillMode::kSpillAtDefinition;
3593 
3594         ForwardStateTo(next_block_boundary);
3595 
3596 #ifdef DEBUG
3597         // Allow allocation at current position.
3598         allocation_finger_ = next_block_boundary;
3599 #endif
3600         UpdateDeferredFixedRanges(spill_mode, current_block);
3601       }
3602 
3603       // Allocation relies on the fact that each non-deferred block has at
3604       // least one non-deferred predecessor. Check this invariant here.
3605       DCHECK_IMPLIES(!current_block->IsDeferred(),
3606                      HasNonDeferredPredecessor(current_block));
3607 
3608       if (!fallthrough) {
3609 #ifdef DEBUG
3610         // Allow allocation at current position.
3611         allocation_finger_ = next_block_boundary;
3612 #endif
3613 
3614         // We are currently at next_block_boundary - 1. Move the state to the
3615         // actual block boundary position. In particular, we have to
3616         // reactivate inactive ranges so that they get rescheduled for
3617         // allocation if they were not live at the predecessors.
3618         ForwardStateTo(next_block_boundary);
3619 
3620         RangeWithRegisterSet to_be_live(data()->allocation_zone());
3621 
3622         // If we end up deciding to use the state of the immediate
3623         // predecessor, it is better not to perform a change. It would lead to
3624         // the same outcome anyway.
3625         // This may never happen on boundaries between deferred and
3626         // non-deferred code, as we rely on explicit respill to ensure we
3627         // spill at definition.
3628         bool no_change_required = false;
3629 
3630         auto pick_state_from = [this, current_block](
3631                                    RpoNumber pred,
3632                                    RangeWithRegisterSet* to_be_live) -> bool {
3633           TRACE("Using information from B%d\n", pred.ToInt());
3634           // If this is a fall-through that is not across a deferred
3635           // boundary, there is nothing to do.
3636           bool is_noop = pred.IsNext(current_block->rpo_number());
3637           if (!is_noop) {
3638             auto& spill_state = data()->GetSpillState(pred);
3639             TRACE("Not a fallthrough. Adding %zu elements...\n",
3640                   spill_state.size());
3641             LifetimePosition pred_end =
3642                 LifetimePosition::GapFromInstructionIndex(
3643                     this->code()->InstructionBlockAt(pred)->code_end());
3644             for (const auto range : spill_state) {
3645               // Filter out ranges that were split or had their register
3646               // stolen by backwards working spill heuristics. These have
3647               // been spilled after the fact, so ignore them.
3648               if (range->End() < pred_end || !range->HasRegisterAssigned())
3649                 continue;
3650               to_be_live->emplace(range);
3651             }
3652           }
3653           return is_noop;
3654         };
3655 
3656         // Multiple cases here:
3657         // 1) We have a single predecessor => this is a control flow split, so
3658         //     just restore the predecessor state.
3659         // 2) We have two predecessors => this is a conditional, so break ties
3660         //     based on what to do based on forward uses, trying to benefit
3661         //     the same branch if in doubt (make one path fast).
3662         // 3) We have many predecessors => this is a switch. Compute union
3663         //     based on majority, break ties by looking forward.
3664         if (current_block->PredecessorCount() == 1) {
3665           TRACE("Single predecessor for B%d\n",
3666                 current_block->rpo_number().ToInt());
3667           no_change_required =
3668               pick_state_from(current_block->predecessors()[0], &to_be_live);
3669         } else if (current_block->PredecessorCount() == 2) {
3670           TRACE("Two predecessors for B%d\n",
3671                 current_block->rpo_number().ToInt());
3672           // If one of the branches does not contribute any information,
3673           // e.g. because it is deferred or a back edge, we can short cut
3674           // here right away.
3675           RpoNumber chosen_predecessor = RpoNumber::Invalid();
3676           if (!ConsiderBlockForControlFlow(current_block,
3677                                            current_block->predecessors()[0])) {
3678             chosen_predecessor = current_block->predecessors()[1];
3679           } else if (!ConsiderBlockForControlFlow(
3680                          current_block, current_block->predecessors()[1])) {
3681             chosen_predecessor = current_block->predecessors()[0];
3682           } else {
3683             chosen_predecessor = ChooseOneOfTwoPredecessorStates(
3684                 current_block, next_block_boundary);
3685           }
3686           no_change_required = pick_state_from(chosen_predecessor, &to_be_live);
3687 
3688         } else {
3689           // Merge at the end of, e.g., a switch.
3690           ComputeStateFromManyPredecessors(current_block, &to_be_live);
3691         }
3692 
3693         if (!no_change_required) {
3694           SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
3695           ReloadLiveRanges(to_be_live, next_block_boundary);
3696         }
3697       }
3698       // Update block information
3699       last_block = current_block->rpo_number();
3700       next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
3701                                 current_block->last_instruction_index())
3702                                 .NextFullStart();
3703 
3704       // We might have created new unhandled live ranges, so cycle around the
3705       // loop to make sure we pick the top most range in unhandled for
3706       // processing.
3707       continue;
3708     }
3709 
3710     DCHECK_NOT_NULL(current);
3711 
3712     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
3713           current->relative_id(), position.value());
3714 
3715     // Now we can erase current, as we are sure to process it.
3716     unhandled_live_ranges().erase(unhandled_live_ranges().begin());
3717 
3718     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
3719       continue;
3720 
3721     ForwardStateTo(position);
3722 
3723     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
3724 
3725     ProcessCurrentRange(current, spill_mode);
3726   }
3727 
3728   if (data()->is_trace_alloc()) {
3729     PrintRangeOverview(std::cout);
3730   }
3731 }
3732 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)3733 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
3734                                                        int reg) {
3735   data()->MarkAllocated(range->representation(), reg);
3736   range->set_assigned_register(reg);
3737   range->SetUseHints(reg);
3738   range->UpdateBundleRegister(reg);
3739   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
3740     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
3741   }
3742 }
3743 
AddToActive(LiveRange * range)3744 void LinearScanAllocator::AddToActive(LiveRange* range) {
3745   TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
3746         range->relative_id(), RegisterName(range->assigned_register()));
3747   active_live_ranges().push_back(range);
3748   next_active_ranges_change_ =
3749       std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
3750 }
3751 
AddToInactive(LiveRange * range)3752 void LinearScanAllocator::AddToInactive(LiveRange* range) {
3753   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
3754         range->relative_id());
3755   next_inactive_ranges_change_ = std::min(
3756       next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
3757   DCHECK(range->HasRegisterAssigned());
3758   inactive_live_ranges(range->assigned_register()).insert(range);
3759 }
3760 
AddToUnhandled(LiveRange * range)3761 void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
3762   if (range == nullptr || range->IsEmpty()) return;
3763   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
3764   DCHECK(allocation_finger_ <= range->Start());
3765 
3766   TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
3767         range->relative_id());
3768   unhandled_live_ranges().insert(range);
3769 }
3770 
ActiveToHandled(const ZoneVector<LiveRange * >::iterator it)3771 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
3772     const ZoneVector<LiveRange*>::iterator it) {
3773   TRACE("Moving live range %d:%d from active to handled\n",
3774         (*it)->TopLevel()->vreg(), (*it)->relative_id());
3775   return active_live_ranges().erase(it);
3776 }
3777 
ActiveToInactive(const ZoneVector<LiveRange * >::iterator it,LifetimePosition position)3778 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
3779     const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
3780   LiveRange* range = *it;
3781   TRACE("Moving live range %d:%d from active to inactive\n",
3782         (range)->TopLevel()->vreg(), range->relative_id());
3783   LifetimePosition next_active = range->NextStartAfter(position);
3784   next_inactive_ranges_change_ =
3785       std::min(next_inactive_ranges_change_, next_active);
3786   DCHECK(range->HasRegisterAssigned());
3787   inactive_live_ranges(range->assigned_register()).insert(range);
3788   return active_live_ranges().erase(it);
3789 }
3790 
3791 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToHandled(InactiveLiveRangeQueue::iterator it)3792 LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) {
3793   LiveRange* range = *it;
3794   TRACE("Moving live range %d:%d from inactive to handled\n",
3795         range->TopLevel()->vreg(), range->relative_id());
3796   int reg = range->assigned_register();
3797   return inactive_live_ranges(reg).erase(it);
3798 }
3799 
3800 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToActive(InactiveLiveRangeQueue::iterator it,LifetimePosition position)3801 LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it,
3802                                       LifetimePosition position) {
3803   LiveRange* range = *it;
3804   active_live_ranges().push_back(range);
3805   TRACE("Moving live range %d:%d from inactive to active\n",
3806         range->TopLevel()->vreg(), range->relative_id());
3807   next_active_ranges_change_ =
3808       std::min(next_active_ranges_change_, range->NextEndAfter(position));
3809   int reg = range->assigned_register();
3810   return inactive_live_ranges(reg).erase(it);
3811 }
3812 
ForwardStateTo(LifetimePosition position)3813 void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
3814   if (position >= next_active_ranges_change_) {
3815     next_active_ranges_change_ = LifetimePosition::MaxPosition();
3816     for (auto it = active_live_ranges().begin();
3817          it != active_live_ranges().end();) {
3818       LiveRange* cur_active = *it;
3819       if (cur_active->End() <= position) {
3820         it = ActiveToHandled(it);
3821       } else if (!cur_active->Covers(position)) {
3822         it = ActiveToInactive(it, position);
3823       } else {
3824         next_active_ranges_change_ = std::min(
3825             next_active_ranges_change_, cur_active->NextEndAfter(position));
3826         ++it;
3827       }
3828     }
3829   }
3830 
3831   if (position >= next_inactive_ranges_change_) {
3832     next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
3833     for (int reg = 0; reg < num_registers(); ++reg) {
3834       ZoneVector<LiveRange*> reorder(data()->allocation_zone());
3835       for (auto it = inactive_live_ranges(reg).begin();
3836            it != inactive_live_ranges(reg).end();) {
3837         LiveRange* cur_inactive = *it;
3838         if (cur_inactive->End() <= position) {
3839           it = InactiveToHandled(it);
3840         } else if (cur_inactive->Covers(position)) {
3841           it = InactiveToActive(it, position);
3842         } else {
3843           next_inactive_ranges_change_ =
3844               std::min(next_inactive_ranges_change_,
3845                        cur_inactive->NextStartAfter(position));
3846           it = inactive_live_ranges(reg).erase(it);
3847           reorder.push_back(cur_inactive);
3848         }
3849       }
3850       for (LiveRange* range : reorder) {
3851         inactive_live_ranges(reg).insert(range);
3852       }
3853     }
3854   }
3855 }
3856 
LastDeferredInstructionIndex(InstructionBlock * start)3857 int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
3858   DCHECK(start->IsDeferred());
3859   RpoNumber last_block =
3860       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3861   while ((start->rpo_number() < last_block)) {
3862     InstructionBlock* next =
3863         code()->InstructionBlockAt(start->rpo_number().Next());
3864     if (!next->IsDeferred()) break;
3865     start = next;
3866   }
3867   return start->last_instruction_index();
3868 }
3869 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const3870 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
3871                                            int* num_regs, int* num_codes,
3872                                            const int** codes) const {
3873   DCHECK(!kSimpleFPAliasing);
3874   if (rep == MachineRepresentation::kFloat32) {
3875     *num_regs = data()->config()->num_float_registers();
3876     *num_codes = data()->config()->num_allocatable_float_registers();
3877     *codes = data()->config()->allocatable_float_codes();
3878   } else if (rep == MachineRepresentation::kSimd128) {
3879     *num_regs = data()->config()->num_simd128_registers();
3880     *num_codes = data()->config()->num_allocatable_simd128_registers();
3881     *codes = data()->config()->allocatable_simd128_codes();
3882   } else {
3883     UNREACHABLE();
3884   }
3885 }
3886 
FindFreeRegistersForRange(LiveRange * range,Vector<LifetimePosition> positions)3887 void LinearScanAllocator::FindFreeRegistersForRange(
3888     LiveRange* range, Vector<LifetimePosition> positions) {
3889   int num_regs = num_registers();
3890   int num_codes = num_allocatable_registers();
3891   const int* codes = allocatable_register_codes();
3892   MachineRepresentation rep = range->representation();
3893   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3894                              rep == MachineRepresentation::kSimd128))
3895     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3896   DCHECK_GE(positions.length(), num_regs);
3897 
3898   for (int i = 0; i < num_regs; ++i) {
3899     positions[i] = LifetimePosition::MaxPosition();
3900   }
3901 
3902   for (LiveRange* cur_active : active_live_ranges()) {
3903     int cur_reg = cur_active->assigned_register();
3904     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3905       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
3906       TRACE("Register %s is free until pos %d (1) due to %d\n",
3907             RegisterName(cur_reg),
3908             LifetimePosition::GapFromInstructionIndex(0).value(),
3909             cur_active->TopLevel()->vreg());
3910     } else {
3911       int alias_base_index = -1;
3912       int aliases = data()->config()->GetAliases(
3913           cur_active->representation(), cur_reg, rep, &alias_base_index);
3914       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3915       while (aliases--) {
3916         int aliased_reg = alias_base_index + aliases;
3917         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
3918       }
3919     }
3920   }
3921 
3922   for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
3923     for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
3924       DCHECK_GT(cur_inactive->End(), range->Start());
3925       CHECK_EQ(cur_inactive->assigned_register(), cur_reg);
3926       // No need to carry out intersections, when this register won't be
3927       // interesting to this range anyway.
3928       // TODO(mtrofin): extend to aliased ranges, too.
3929       if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
3930           positions[cur_reg] <= cur_inactive->NextStart()) {
3931         break;
3932       }
3933       LifetimePosition next_intersection =
3934           cur_inactive->FirstIntersection(range);
3935       if (!next_intersection.IsValid()) continue;
3936       if (kSimpleFPAliasing || !check_fp_aliasing()) {
3937         positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
3938         TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
3939               positions[cur_reg].value());
3940       } else {
3941         int alias_base_index = -1;
3942         int aliases = data()->config()->GetAliases(
3943             cur_inactive->representation(), cur_reg, rep, &alias_base_index);
3944         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3945         while (aliases--) {
3946           int aliased_reg = alias_base_index + aliases;
3947           positions[aliased_reg] =
3948               std::min(positions[aliased_reg], next_intersection);
3949         }
3950       }
3951     }
3952   }
3953 }
3954 
3955 // High-level register allocation summary:
3956 //
3957 // We attempt to first allocate the preferred (hint) register. If that is not
3958 // possible, we find a register that's free, and allocate that. If that's not
3959 // possible, we search for a register to steal from a range that was allocated.
3960 // The goal is to optimize for throughput by avoiding register-to-memory moves,
3961 // which are expensive.
ProcessCurrentRange(LiveRange * current,SpillMode spill_mode)3962 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
3963                                               SpillMode spill_mode) {
3964   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
3965       free_until_pos;
3966   FindFreeRegistersForRange(current, free_until_pos);
3967   if (!TryAllocatePreferredReg(current, free_until_pos)) {
3968     if (!TryAllocateFreeReg(current, free_until_pos)) {
3969       AllocateBlockedReg(current, spill_mode);
3970     }
3971   }
3972   if (current->HasRegisterAssigned()) {
3973     AddToActive(current);
3974   }
3975 }
3976 
TryAllocatePreferredReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3977 bool LinearScanAllocator::TryAllocatePreferredReg(
3978     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3979   int hint_register;
3980   if (current->RegisterFromControlFlow(&hint_register) ||
3981       current->FirstHintPosition(&hint_register) != nullptr ||
3982       current->RegisterFromBundle(&hint_register)) {
3983     TRACE(
3984         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
3985         RegisterName(hint_register), free_until_pos[hint_register].value(),
3986         current->TopLevel()->vreg(), current->relative_id(),
3987         current->End().value());
3988 
3989     // The desired register is free until the end of the current live range.
3990     if (free_until_pos[hint_register] >= current->End()) {
3991       TRACE("Assigning preferred reg %s to live range %d:%d\n",
3992             RegisterName(hint_register), current->TopLevel()->vreg(),
3993             current->relative_id());
3994       SetLiveRangeAssignedRegister(current, hint_register);
3995       return true;
3996     }
3997   }
3998   return false;
3999 }
4000 
PickRegisterThatIsAvailableLongest(LiveRange * current,int hint_reg,const Vector<LifetimePosition> & free_until_pos)4001 int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
4002     LiveRange* current, int hint_reg,
4003     const Vector<LifetimePosition>& free_until_pos) {
4004   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
4005   int num_codes = num_allocatable_registers();
4006   const int* codes = allocatable_register_codes();
4007   MachineRepresentation rep = current->representation();
4008   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
4009                              rep == MachineRepresentation::kSimd128)) {
4010     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4011   }
4012 
4013   DCHECK_GE(free_until_pos.length(), num_codes);
4014 
4015   // Find the register which stays free for the longest time. Check for
4016   // the hinted register first, as we might want to use that one. Only
4017   // count full instructions for free ranges, as an instruction's internal
4018   // positions do not help but might shadow a hinted register. This is
4019   // typically the case for function calls, where all registered are
4020   // cloberred after the call except for the argument registers, which are
4021   // set before the call. Hence, the argument registers always get ignored,
4022   // as their available time is shorter.
4023   int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg;
4024   int current_free = free_until_pos[reg].ToInstructionIndex();
4025   for (int i = 0; i < num_codes; ++i) {
4026     int code = codes[i];
4027     // Prefer registers that have no fixed uses to avoid blocking later hints.
4028     // We use the first register that has no fixed uses to ensure we use
4029     // byte addressable registers in ia32 first.
4030     int candidate_free = free_until_pos[code].ToInstructionIndex();
4031     TRACE("Register %s in free until %d\n", RegisterName(code), candidate_free);
4032     if ((candidate_free > current_free) ||
4033         (candidate_free == current_free && reg != hint_reg &&
4034          (data()->HasFixedUse(current->representation(), reg) &&
4035           !data()->HasFixedUse(current->representation(), code)))) {
4036       reg = code;
4037       current_free = candidate_free;
4038     }
4039   }
4040 
4041   return reg;
4042 }
4043 
TryAllocateFreeReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)4044 bool LinearScanAllocator::TryAllocateFreeReg(
4045     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
4046   // Compute register hint, if such exists.
4047   int hint_reg = kUnassignedRegister;
4048   current->RegisterFromControlFlow(&hint_reg) ||
4049       current->FirstHintPosition(&hint_reg) != nullptr ||
4050       current->RegisterFromBundle(&hint_reg);
4051 
4052   int reg =
4053       PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos);
4054 
4055   LifetimePosition pos = free_until_pos[reg];
4056 
4057   if (pos <= current->Start()) {
4058     // All registers are blocked.
4059     return false;
4060   }
4061 
4062   if (pos < current->End()) {
4063     // Register reg is available at the range start but becomes blocked before
4064     // the range end. Split current at position where it becomes blocked.
4065     LiveRange* tail = SplitRangeAt(current, pos);
4066     AddToUnhandled(tail);
4067 
4068     // Try to allocate preferred register once more.
4069     if (TryAllocatePreferredReg(current, free_until_pos)) return true;
4070   }
4071 
4072   // Register reg is available at the range start and is free until the range
4073   // end.
4074   DCHECK(pos >= current->End());
4075   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
4076         current->TopLevel()->vreg(), current->relative_id());
4077   SetLiveRangeAssignedRegister(current, reg);
4078 
4079   return true;
4080 }
4081 
AllocateBlockedReg(LiveRange * current,SpillMode spill_mode)4082 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
4083                                              SpillMode spill_mode) {
4084   UsePosition* register_use = current->NextRegisterPosition(current->Start());
4085   if (register_use == nullptr) {
4086     // There is no use in the current live range that requires a register.
4087     // We can just spill it.
4088     LiveRange* begin_spill = nullptr;
4089     LifetimePosition spill_pos = FindOptimalSpillingPos(
4090         current, current->Start(), spill_mode, &begin_spill);
4091     MaybeSpillPreviousRanges(begin_spill, spill_pos, current);
4092     Spill(current, spill_mode);
4093     return;
4094   }
4095 
4096   MachineRepresentation rep = current->representation();
4097 
4098   // use_pos keeps track of positions a register/alias is used at.
4099   // block_pos keeps track of positions where a register/alias is blocked
4100   // from.
4101   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4102       use_pos(LifetimePosition::MaxPosition());
4103   EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4104       block_pos(LifetimePosition::MaxPosition());
4105 
4106   for (LiveRange* range : active_live_ranges()) {
4107     int cur_reg = range->assigned_register();
4108     bool is_fixed_or_cant_spill =
4109         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
4110     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4111       if (is_fixed_or_cant_spill) {
4112         block_pos[cur_reg] = use_pos[cur_reg] =
4113             LifetimePosition::GapFromInstructionIndex(0);
4114       } else {
4115         DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
4116                   block_pos[cur_reg]);
4117         use_pos[cur_reg] =
4118             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
4119       }
4120     } else {
4121       int alias_base_index = -1;
4122       int aliases = data()->config()->GetAliases(
4123           range->representation(), cur_reg, rep, &alias_base_index);
4124       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4125       while (aliases--) {
4126         int aliased_reg = alias_base_index + aliases;
4127         if (is_fixed_or_cant_spill) {
4128           block_pos[aliased_reg] = use_pos[aliased_reg] =
4129               LifetimePosition::GapFromInstructionIndex(0);
4130         } else {
4131           use_pos[aliased_reg] =
4132               std::min(block_pos[aliased_reg],
4133                        range->NextLifetimePositionRegisterIsBeneficial(
4134                            current->Start()));
4135         }
4136       }
4137     }
4138   }
4139 
4140   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4141     for (LiveRange* range : inactive_live_ranges(cur_reg)) {
4142       DCHECK(range->End() > current->Start());
4143       DCHECK_EQ(range->assigned_register(), cur_reg);
4144       bool is_fixed = range->TopLevel()->IsFixed();
4145 
4146       // Don't perform costly intersections if they are guaranteed to not update
4147       // block_pos or use_pos.
4148       // TODO(mtrofin): extend to aliased ranges, too.
4149       if ((kSimpleFPAliasing || !check_fp_aliasing())) {
4150         DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
4151         if (block_pos[cur_reg] <= range->NextStart()) break;
4152         if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
4153       }
4154 
4155       LifetimePosition next_intersection = range->FirstIntersection(current);
4156       if (!next_intersection.IsValid()) continue;
4157 
4158       if (kSimpleFPAliasing || !check_fp_aliasing()) {
4159         if (is_fixed) {
4160           block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
4161           use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
4162         } else {
4163           use_pos[cur_reg] = std::min(use_pos[cur_reg], next_intersection);
4164         }
4165       } else {
4166         int alias_base_index = -1;
4167         int aliases = data()->config()->GetAliases(
4168             range->representation(), cur_reg, rep, &alias_base_index);
4169         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4170         while (aliases--) {
4171           int aliased_reg = alias_base_index + aliases;
4172           if (is_fixed) {
4173             block_pos[aliased_reg] =
4174                 std::min(block_pos[aliased_reg], next_intersection);
4175             use_pos[aliased_reg] =
4176                 std::min(block_pos[aliased_reg], use_pos[aliased_reg]);
4177           } else {
4178             use_pos[aliased_reg] =
4179                 std::min(use_pos[aliased_reg], next_intersection);
4180           }
4181         }
4182       }
4183     }
4184   }
4185 
4186   // Compute register hint if it exists.
4187   int hint_reg = kUnassignedRegister;
4188   current->RegisterFromControlFlow(&hint_reg) ||
4189       register_use->HintRegister(&hint_reg) ||
4190       current->RegisterFromBundle(&hint_reg);
4191   int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
4192 
4193   if (use_pos[reg] < register_use->pos()) {
4194     // If there is a gap position before the next register use, we can
4195     // spill until there. The gap position will then fit the fill move.
4196     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
4197                                                    register_use->pos())) {
4198       SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
4199       return;
4200     }
4201   }
4202 
4203   // When in deferred spilling mode avoid stealing registers beyond the current
4204   // deferred region. This is required as we otherwise might spill an inactive
4205   // range with a start outside of deferred code and that would not be reloaded.
4206   LifetimePosition new_end = current->End();
4207   if (spill_mode == SpillMode::kSpillDeferred) {
4208     InstructionBlock* deferred_block =
4209         code()->GetInstructionBlock(current->Start().ToInstructionIndex());
4210     new_end =
4211         std::min(new_end, LifetimePosition::GapFromInstructionIndex(
4212                               LastDeferredInstructionIndex(deferred_block)));
4213   }
4214 
4215   // We couldn't spill until the next register use. Split before the register
4216   // is blocked, if applicable.
4217   if (block_pos[reg] < new_end) {
4218     // Register becomes blocked before the current range end. Split before that
4219     // position.
4220     new_end = block_pos[reg].Start();
4221   }
4222 
4223   // If there is no register available at all, we can only spill this range.
4224   // Happens for instance on entry to deferred code where registers might
4225   // become blocked yet we aim to reload ranges.
4226   if (new_end == current->Start()) {
4227     SpillBetween(current, new_end, register_use->pos(), spill_mode);
4228     return;
4229   }
4230 
4231   // Split at the new end if we found one.
4232   if (new_end != current->End()) {
4233     LiveRange* tail = SplitBetween(current, current->Start(), new_end);
4234     AddToUnhandled(tail);
4235   }
4236 
4237   // Register reg is not blocked for the whole range.
4238   DCHECK(block_pos[reg] >= current->End());
4239   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
4240         current->TopLevel()->vreg(), current->relative_id());
4241   SetLiveRangeAssignedRegister(current, reg);
4242 
4243   // This register was not free. Thus we need to find and spill
4244   // parts of active and inactive live regions that use the same register
4245   // at the same lifetime positions as current.
4246   SplitAndSpillIntersecting(current, spill_mode);
4247 }
4248 
SplitAndSpillIntersecting(LiveRange * current,SpillMode spill_mode)4249 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
4250                                                     SpillMode spill_mode) {
4251   DCHECK(current->HasRegisterAssigned());
4252   int reg = current->assigned_register();
4253   LifetimePosition split_pos = current->Start();
4254   for (auto it = active_live_ranges().begin();
4255        it != active_live_ranges().end();) {
4256     LiveRange* range = *it;
4257     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4258       if (range->assigned_register() != reg) {
4259         ++it;
4260         continue;
4261       }
4262     } else {
4263       if (!data()->config()->AreAliases(current->representation(), reg,
4264                                         range->representation(),
4265                                         range->assigned_register())) {
4266         ++it;
4267         continue;
4268       }
4269     }
4270 
4271     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4272     LiveRange* begin_spill = nullptr;
4273     LifetimePosition spill_pos =
4274         FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill);
4275     MaybeSpillPreviousRanges(begin_spill, spill_pos, range);
4276     if (next_pos == nullptr) {
4277       SpillAfter(range, spill_pos, spill_mode);
4278     } else {
4279       // When spilling between spill_pos and next_pos ensure that the range
4280       // remains spilled at least until the start of the current live range.
4281       // This guarantees that we will not introduce new unhandled ranges that
4282       // start before the current range as this violates allocation invariants
4283       // and will lead to an inconsistent state of active and inactive
4284       // live-ranges: ranges are allocated in order of their start positions,
4285       // ranges are retired from active/inactive when the start of the
4286       // current live-range is larger than their end.
4287       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
4288                                                         next_pos->pos()));
4289       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
4290                         spill_mode);
4291     }
4292     it = ActiveToHandled(it);
4293   }
4294 
4295   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4296     if (kSimpleFPAliasing || !check_fp_aliasing()) {
4297       if (cur_reg != reg) continue;
4298     }
4299     for (auto it = inactive_live_ranges(cur_reg).begin();
4300          it != inactive_live_ranges(cur_reg).end();) {
4301       LiveRange* range = *it;
4302       if (!kSimpleFPAliasing && check_fp_aliasing() &&
4303           !data()->config()->AreAliases(current->representation(), reg,
4304                                         range->representation(), cur_reg)) {
4305         ++it;
4306         continue;
4307       }
4308       DCHECK(range->End() > current->Start());
4309       if (range->TopLevel()->IsFixed()) {
4310         ++it;
4311         continue;
4312       }
4313 
4314       LifetimePosition next_intersection = range->FirstIntersection(current);
4315       if (next_intersection.IsValid()) {
4316         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4317         if (next_pos == nullptr) {
4318           SpillAfter(range, split_pos, spill_mode);
4319         } else {
4320           next_intersection = std::min(next_intersection, next_pos->pos());
4321           SpillBetween(range, split_pos, next_intersection, spill_mode);
4322         }
4323         it = InactiveToHandled(it);
4324       } else {
4325         ++it;
4326       }
4327     }
4328   }
4329 }
4330 
TryReuseSpillForPhi(TopLevelLiveRange * range)4331 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
4332   if (!range->is_phi()) return false;
4333 
4334   DCHECK(!range->HasSpillOperand());
4335   // Check how many operands belong to the same bundle as the output.
4336   LiveRangeBundle* out_bundle = range->get_bundle();
4337   TopTierRegisterAllocationData::PhiMapValue* phi_map_value =
4338       data()->GetPhiMapValueFor(range);
4339   const PhiInstruction* phi = phi_map_value->phi();
4340   const InstructionBlock* block = phi_map_value->block();
4341   // Count the number of spilled operands.
4342   size_t spilled_count = 0;
4343   for (size_t i = 0; i < phi->operands().size(); i++) {
4344     int op = phi->operands()[i];
4345     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
4346     if (!op_range->TopLevel()->HasSpillRange()) continue;
4347     const InstructionBlock* pred =
4348         code()->InstructionBlockAt(block->predecessors()[i]);
4349     LifetimePosition pred_end =
4350         LifetimePosition::InstructionFromInstructionIndex(
4351             pred->last_instruction_index());
4352     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
4353       op_range = op_range->next();
4354     }
4355     if (op_range != nullptr && op_range->spilled() &&
4356         op_range->get_bundle() == out_bundle) {
4357       spilled_count++;
4358     }
4359   }
4360 
4361   // Only continue if more than half of the operands are spilled to the same
4362   // slot (because part of same bundle).
4363   if (spilled_count * 2 <= phi->operands().size()) {
4364     return false;
4365   }
4366 
4367   // If the range does not need register soon, spill it to the merged
4368   // spill range.
4369   LifetimePosition next_pos = range->Start();
4370   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
4371   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
4372   if (pos == nullptr) {
4373     Spill(range, SpillMode::kSpillAtDefinition);
4374     return true;
4375   } else if (pos->pos() > range->Start().NextStart()) {
4376     SpillBetween(range, range->Start(), pos->pos(),
4377                  SpillMode::kSpillAtDefinition);
4378     return true;
4379   }
4380   return false;
4381 }
4382 
SpillAfter(LiveRange * range,LifetimePosition pos,SpillMode spill_mode)4383 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos,
4384                                      SpillMode spill_mode) {
4385   LiveRange* second_part = SplitRangeAt(range, pos);
4386   Spill(second_part, spill_mode);
4387 }
4388 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end,SpillMode spill_mode)4389 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
4390                                        LifetimePosition end,
4391                                        SpillMode spill_mode) {
4392   SpillBetweenUntil(range, start, start, end, spill_mode);
4393 }
4394 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end,SpillMode spill_mode)4395 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
4396                                             LifetimePosition start,
4397                                             LifetimePosition until,
4398                                             LifetimePosition end,
4399                                             SpillMode spill_mode) {
4400   CHECK(start < end);
4401   LiveRange* second_part = SplitRangeAt(range, start);
4402 
4403   if (second_part->Start() < end) {
4404     // The split result intersects with [start, end[.
4405     // Split it at position between ]start+1, end[, spill the middle part
4406     // and put the rest to unhandled.
4407 
4408     // Make sure that the third part always starts after the start of the
4409     // second part, as that likely is the current position of the register
4410     // allocator and we cannot add ranges to unhandled that start before
4411     // the current position.
4412     LifetimePosition split_start = std::max(second_part->Start().End(), until);
4413 
4414     // If end is an actual use (which it typically is) we have to split
4415     // so that there is a gap before so that we have space for moving the
4416     // value into its position.
4417     // However, if we have no choice, split right where asked.
4418     LifetimePosition third_part_end =
4419         std::max(split_start, end.PrevStart().End());
4420     // Instead of spliting right after or even before the block boundary,
4421     // split on the boumndary to avoid extra moves.
4422     if (data()->IsBlockBoundary(end.Start())) {
4423       third_part_end = std::max(split_start, end.Start());
4424     }
4425 
4426     LiveRange* third_part =
4427         SplitBetween(second_part, split_start, third_part_end);
4428     if (GetInstructionBlock(data()->code(), second_part->Start())
4429             ->IsDeferred()) {
4430       // Try to use the same register as before.
4431       TRACE("Setting control flow hint for %d:%d to %s\n",
4432             third_part->TopLevel()->vreg(), third_part->relative_id(),
4433             RegisterName(range->controlflow_hint()));
4434       third_part->set_controlflow_hint(range->controlflow_hint());
4435     }
4436 
4437     AddToUnhandled(third_part);
4438     // This can happen, even if we checked for start < end above, as we fiddle
4439     // with the end location. However, we are guaranteed to be after or at
4440     // until, so this is fine.
4441     if (third_part != second_part) {
4442       Spill(second_part, spill_mode);
4443     }
4444   } else {
4445     // The split result does not intersect with [start, end[.
4446     // Nothing to spill. Just put it to unhandled as whole.
4447     AddToUnhandled(second_part);
4448   }
4449 }
4450 
OperandAssigner(TopTierRegisterAllocationData * data)4451 OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
4452     : data_(data) {}
4453 
DecideSpillingMode()4454 void OperandAssigner::DecideSpillingMode() {
4455   for (auto range : data()->live_ranges()) {
4456     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4457     int max_blocks = data()->code()->InstructionBlockCount();
4458     if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
4459       // If the range is spilled only in deferred blocks and starts in
4460       // a non-deferred block, we transition its representation here so
4461       // that the LiveRangeConnector processes them correctly. If,
4462       // however, they start in a deferred block, we uograde them to
4463       // spill at definition, as that definition is in a deferred block
4464       // anyway. While this is an optimization, the code in LiveRangeConnector
4465       // relies on it!
4466       if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
4467         TRACE("Live range %d is spilled and alive in deferred code only\n",
4468               range->vreg());
4469         range->TransitionRangeToSpillAtDefinition();
4470       } else {
4471         TRACE("Live range %d is spilled deferred code only but alive outside\n",
4472               range->vreg());
4473         range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
4474                                               max_blocks);
4475       }
4476     }
4477   }
4478 }
4479 
AssignSpillSlots()4480 void OperandAssigner::AssignSpillSlots() {
4481   for (auto range : data()->live_ranges()) {
4482     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4483     if (range != nullptr && range->get_bundle() != nullptr) {
4484       range->get_bundle()->MergeSpillRanges();
4485     }
4486   }
4487   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
4488   // Merge disjoint spill ranges
4489   for (size_t i = 0; i < spill_ranges.size(); ++i) {
4490     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4491     SpillRange* range = spill_ranges[i];
4492     if (range == nullptr) continue;
4493     if (range->IsEmpty()) continue;
4494     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
4495       SpillRange* other = spill_ranges[j];
4496       if (other != nullptr && !other->IsEmpty()) {
4497         range->TryMerge(other);
4498       }
4499     }
4500   }
4501   // Allocate slots for the merged spill ranges.
4502   for (SpillRange* range : spill_ranges) {
4503     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4504     if (range == nullptr || range->IsEmpty()) continue;
4505     // Allocate a new operand referring to the spill slot.
4506     if (!range->HasSlot()) {
4507       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
4508       range->set_assigned_slot(index);
4509     }
4510   }
4511 }
4512 
CommitAssignment()4513 void OperandAssigner::CommitAssignment() {
4514   const size_t live_ranges_size = data()->live_ranges().size();
4515   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
4516     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4517     CHECK_EQ(live_ranges_size,
4518              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4519     if (top_range == nullptr || top_range->IsEmpty()) continue;
4520     InstructionOperand spill_operand;
4521     if (top_range->HasSpillOperand()) {
4522       spill_operand = *top_range->TopLevel()->GetSpillOperand();
4523     } else if (top_range->TopLevel()->HasSpillRange()) {
4524       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
4525     }
4526     if (top_range->is_phi()) {
4527       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
4528           top_range->GetAssignedOperand());
4529     }
4530     for (LiveRange* range = top_range; range != nullptr;
4531          range = range->next()) {
4532       InstructionOperand assigned = range->GetAssignedOperand();
4533       DCHECK(!assigned.IsUnallocated());
4534       range->ConvertUsesToOperand(assigned, spill_operand);
4535     }
4536 
4537     if (!spill_operand.IsInvalid()) {
4538       // If this top level range has a child spilled in a deferred block, we use
4539       // the range and control flow connection mechanism instead of spilling at
4540       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
4541       // phases. Normally, when we spill at definition, we do not insert a
4542       // connecting move when a successor child range is spilled - because the
4543       // spilled range picks up its value from the slot which was assigned at
4544       // definition. For ranges that are determined to spill only in deferred
4545       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
4546       // where a spill operand is expected, and then finalize by inserting the
4547       // spills in the deferred blocks dominators.
4548       if (!top_range->IsSpilledOnlyInDeferredBlocks(data()) &&
4549           !top_range->HasGeneralSpillRange()) {
4550         // Spill at definition if the range isn't spilled in a way that will be
4551         // handled later.
4552         top_range->FilterSpillMoves(data(), spill_operand);
4553         top_range->CommitSpillMoves(data(), spill_operand);
4554       }
4555     }
4556   }
4557 }
4558 
ReferenceMapPopulator(TopTierRegisterAllocationData * data)4559 ReferenceMapPopulator::ReferenceMapPopulator(
4560     TopTierRegisterAllocationData* data)
4561     : data_(data) {}
4562 
SafePointsAreInOrder() const4563 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
4564   int safe_point = 0;
4565   for (ReferenceMap* map : *data()->code()->reference_maps()) {
4566     if (safe_point > map->instruction_position()) return false;
4567     safe_point = map->instruction_position();
4568   }
4569   return true;
4570 }
4571 
PopulateReferenceMaps()4572 void ReferenceMapPopulator::PopulateReferenceMaps() {
4573   DCHECK(SafePointsAreInOrder());
4574   // Map all delayed references.
4575   for (TopTierRegisterAllocationData::DelayedReference& delayed_reference :
4576        data()->delayed_references()) {
4577     delayed_reference.map->RecordReference(
4578         AllocatedOperand::cast(*delayed_reference.operand));
4579   }
4580   // Iterate over all safe point positions and record a pointer
4581   // for all spilled live ranges at this point.
4582   int last_range_start = 0;
4583   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
4584   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
4585   const size_t live_ranges_size = data()->live_ranges().size();
4586   for (TopLevelLiveRange* range : data()->live_ranges()) {
4587     CHECK_EQ(live_ranges_size,
4588              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4589     if (range == nullptr) continue;
4590     // Skip non-reference values.
4591     if (!data()->code()->IsReference(range->vreg())) continue;
4592     // Skip empty live ranges.
4593     if (range->IsEmpty()) continue;
4594     if (range->has_preassigned_slot()) continue;
4595 
4596     // Find the extent of the range and its children.
4597     int start = range->Start().ToInstructionIndex();
4598     int end = 0;
4599     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
4600       LifetimePosition this_end = cur->End();
4601       if (this_end.ToInstructionIndex() > end)
4602         end = this_end.ToInstructionIndex();
4603       DCHECK(cur->Start().ToInstructionIndex() >= start);
4604     }
4605 
4606     // Most of the ranges are in order, but not all.  Keep an eye on when they
4607     // step backwards and reset the first_it so we don't miss any safe points.
4608     if (start < last_range_start) first_it = reference_maps->begin();
4609     last_range_start = start;
4610 
4611     // Step across all the safe points that are before the start of this range,
4612     // recording how far we step in order to save doing this for the next range.
4613     for (; first_it != reference_maps->end(); ++first_it) {
4614       ReferenceMap* map = *first_it;
4615       if (map->instruction_position() >= start) break;
4616     }
4617 
4618     InstructionOperand spill_operand;
4619     if (((range->HasSpillOperand() &&
4620           !range->GetSpillOperand()->IsConstant()) ||
4621          range->HasSpillRange())) {
4622       if (range->HasSpillOperand()) {
4623         spill_operand = *range->GetSpillOperand();
4624       } else {
4625         spill_operand = range->GetSpillRangeOperand();
4626       }
4627       DCHECK(spill_operand.IsStackSlot());
4628       DCHECK(CanBeTaggedOrCompressedPointer(
4629           AllocatedOperand::cast(spill_operand).representation()));
4630     }
4631 
4632     LiveRange* cur = range;
4633     // Step through the safe points to see whether they are in the range.
4634     for (auto it = first_it; it != reference_maps->end(); ++it) {
4635       ReferenceMap* map = *it;
4636       int safe_point = map->instruction_position();
4637 
4638       // The safe points are sorted so we can stop searching here.
4639       if (safe_point - 1 > end) break;
4640 
4641       // Advance to the next active range that covers the current
4642       // safe point position.
4643       LifetimePosition safe_point_pos =
4644           LifetimePosition::InstructionFromInstructionIndex(safe_point);
4645 
4646       // Search for the child range (cur) that covers safe_point_pos. If we
4647       // don't find it before the children pass safe_point_pos, keep cur at
4648       // the last child, because the next safe_point_pos may be covered by cur.
4649       // This may happen if cur has more than one interval, and the current
4650       // safe_point_pos is in between intervals.
4651       // For that reason, cur may be at most the last child.
4652       DCHECK_NOT_NULL(cur);
4653       DCHECK(safe_point_pos >= cur->Start() || range == cur);
4654       bool found = false;
4655       while (!found) {
4656         if (cur->Covers(safe_point_pos)) {
4657           found = true;
4658         } else {
4659           LiveRange* next = cur->next();
4660           if (next == nullptr || next->Start() > safe_point_pos) {
4661             break;
4662           }
4663           cur = next;
4664         }
4665       }
4666 
4667       if (!found) {
4668         continue;
4669       }
4670 
4671       // Check if the live range is spilled and the safe point is after
4672       // the spill position.
4673       int spill_index = range->IsSpilledOnlyInDeferredBlocks(data()) ||
4674                                 range->LateSpillingSelected()
4675                             ? cur->Start().ToInstructionIndex()
4676                             : range->spill_start_index();
4677 
4678       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
4679         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
4680               range->vreg(), spill_index, safe_point);
4681         map->RecordReference(AllocatedOperand::cast(spill_operand));
4682       }
4683 
4684       if (!cur->spilled()) {
4685         TRACE(
4686             "Pointer in register for range %d:%d (start at %d) "
4687             "at safe point %d\n",
4688             range->vreg(), cur->relative_id(), cur->Start().value(),
4689             safe_point);
4690         InstructionOperand operand = cur->GetAssignedOperand();
4691         DCHECK(!operand.IsStackSlot());
4692         DCHECK(CanBeTaggedOrCompressedPointer(
4693             AllocatedOperand::cast(operand).representation()));
4694         map->RecordReference(AllocatedOperand::cast(operand));
4695       }
4696     }
4697   }
4698 }
4699 
LiveRangeConnector(TopTierRegisterAllocationData * data)4700 LiveRangeConnector::LiveRangeConnector(TopTierRegisterAllocationData* data)
4701     : data_(data) {}
4702 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const4703 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
4704     const InstructionBlock* block) const {
4705   if (block->PredecessorCount() != 1) return false;
4706   return block->predecessors()[0].IsNext(block->rpo_number());
4707 }
4708 
ResolveControlFlow(Zone * local_zone)4709 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
4710   // Lazily linearize live ranges in memory for fast lookup.
4711   LiveRangeFinder finder(data(), local_zone);
4712   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
4713   for (const InstructionBlock* block : code()->instruction_blocks()) {
4714     if (CanEagerlyResolveControlFlow(block)) continue;
4715     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
4716     BitVector::Iterator iterator(live);
4717     while (!iterator.Done()) {
4718       data()->tick_counter()->TickAndMaybeEnterSafepoint();
4719       int vreg = iterator.Current();
4720       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
4721       for (const RpoNumber& pred : block->predecessors()) {
4722         FindResult result;
4723         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
4724         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
4725           continue;
4726         }
4727         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
4728         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
4729         if (pred_op.Equals(cur_op)) continue;
4730         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
4731           // We're doing a reload.
4732           // We don't need to, if:
4733           // 1) there's no register use in this block, and
4734           // 2) the range ends before the block does, and
4735           // 3) we don't have a successor, or the successor is spilled.
4736           LifetimePosition block_start =
4737               LifetimePosition::GapFromInstructionIndex(block->code_start());
4738           LifetimePosition block_end =
4739               LifetimePosition::GapFromInstructionIndex(block->code_end());
4740           const LiveRange* current = result.cur_cover_;
4741           // Note that this is not the successor if we have control flow!
4742           // However, in the following condition, we only refer to it if it
4743           // begins in the current block, in which case we can safely declare it
4744           // to be the successor.
4745           const LiveRange* successor = current->next();
4746           if (current->End() < block_end &&
4747               (successor == nullptr || successor->spilled())) {
4748             // verify point 1: no register use. We can go to the end of the
4749             // range, since it's all within the block.
4750 
4751             bool uses_reg = false;
4752             for (const UsePosition* use = current->NextUsePosition(block_start);
4753                  use != nullptr; use = use->next()) {
4754               if (use->operand()->IsAnyRegister()) {
4755                 uses_reg = true;
4756                 break;
4757               }
4758             }
4759             if (!uses_reg) continue;
4760           }
4761           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) &&
4762               pred_block->IsDeferred()) {
4763             // The spill location should be defined in pred_block, so add
4764             // pred_block to the list of blocks requiring a spill operand.
4765             TRACE("Adding B%d to list of spill blocks for %d\n",
4766                   pred_block->rpo_number().ToInt(),
4767                   current->TopLevel()->vreg());
4768             current->TopLevel()
4769                 ->GetListOfBlocksRequiringSpillOperands(data())
4770                 ->Add(pred_block->rpo_number().ToInt());
4771           }
4772         }
4773         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
4774         USE(move_loc);
4775         DCHECK_IMPLIES(
4776             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks(
4777                 data()) &&
4778                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
4779             code()->GetInstructionBlock(move_loc)->IsDeferred());
4780       }
4781       iterator.Advance();
4782     }
4783   }
4784 
4785   // At this stage, we collected blocks needing a spill operand due to reloads
4786   // from ConnectRanges and from ResolveControlFlow. Time to commit the spills
4787   // for deferred blocks. This is a convenient time to commit spills for general
4788   // spill ranges also, because they need to use the LiveRangeFinder.
4789   const size_t live_ranges_size = data()->live_ranges().size();
4790   SpillPlacer spill_placer(&finder, data(), local_zone);
4791   for (TopLevelLiveRange* top : data()->live_ranges()) {
4792     CHECK_EQ(live_ranges_size,
4793              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4794     if (top == nullptr || top->IsEmpty()) continue;
4795     if (top->IsSpilledOnlyInDeferredBlocks(data())) {
4796       CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()),
4797                                    local_zone);
4798     } else if (top->HasGeneralSpillRange()) {
4799       spill_placer.Add(top);
4800     }
4801   }
4802 }
4803 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)4804 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
4805                                            const InstructionOperand& cur_op,
4806                                            const InstructionBlock* pred,
4807                                            const InstructionOperand& pred_op) {
4808   DCHECK(!pred_op.Equals(cur_op));
4809   int gap_index;
4810   Instruction::GapPosition position;
4811   if (block->PredecessorCount() == 1) {
4812     gap_index = block->first_instruction_index();
4813     position = Instruction::START;
4814   } else {
4815     DCHECK_EQ(1, pred->SuccessorCount());
4816     DCHECK(!code()
4817                 ->InstructionAt(pred->last_instruction_index())
4818                 ->HasReferenceMap());
4819     gap_index = pred->last_instruction_index();
4820     position = Instruction::END;
4821   }
4822   data()->AddGapMove(gap_index, position, pred_op, cur_op);
4823   return gap_index;
4824 }
4825 
ConnectRanges(Zone * local_zone)4826 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
4827   DelayedInsertionMap delayed_insertion_map(local_zone);
4828   const size_t live_ranges_size = data()->live_ranges().size();
4829   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
4830     CHECK_EQ(live_ranges_size,
4831              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4832     if (top_range == nullptr) continue;
4833     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(data());
4834     LiveRange* first_range = top_range;
4835     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
4836          first_range = second_range, second_range = second_range->next()) {
4837       LifetimePosition pos = second_range->Start();
4838       // Add gap move if the two live ranges touch and there is no block
4839       // boundary.
4840       if (second_range->spilled()) continue;
4841       if (first_range->End() != pos) continue;
4842       if (data()->IsBlockBoundary(pos) &&
4843           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
4844         continue;
4845       }
4846       InstructionOperand prev_operand = first_range->GetAssignedOperand();
4847       InstructionOperand cur_operand = second_range->GetAssignedOperand();
4848       if (prev_operand.Equals(cur_operand)) continue;
4849       bool delay_insertion = false;
4850       Instruction::GapPosition gap_pos;
4851       int gap_index = pos.ToInstructionIndex();
4852       if (connect_spilled && !prev_operand.IsAnyRegister() &&
4853           cur_operand.IsAnyRegister()) {
4854         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
4855         DCHECK(block->IsDeferred());
4856         // Performing a reload in this block, meaning the spill operand must
4857         // be defined here.
4858         top_range->GetListOfBlocksRequiringSpillOperands(data())->Add(
4859             block->rpo_number().ToInt());
4860       }
4861 
4862       if (pos.IsGapPosition()) {
4863         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
4864       } else {
4865         if (pos.IsStart()) {
4866           delay_insertion = true;
4867         } else {
4868           gap_index++;
4869         }
4870         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
4871       }
4872       // Reloads or spills for spilled in deferred blocks ranges must happen
4873       // only in deferred blocks.
4874       DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
4875                                           cur_operand.IsAnyRegister()),
4876                      code()->GetInstructionBlock(gap_index)->IsDeferred());
4877 
4878       ParallelMove* move =
4879           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
4880               gap_pos, code_zone());
4881       if (!delay_insertion) {
4882         move->AddMove(prev_operand, cur_operand);
4883       } else {
4884         delayed_insertion_map.insert(
4885             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
4886       }
4887     }
4888   }
4889   if (delayed_insertion_map.empty()) return;
4890   // Insert all the moves which should occur after the stored move.
4891   ZoneVector<MoveOperands*> to_insert(local_zone);
4892   ZoneVector<MoveOperands*> to_eliminate(local_zone);
4893   to_insert.reserve(4);
4894   to_eliminate.reserve(4);
4895   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
4896   for (auto it = delayed_insertion_map.begin();; ++it) {
4897     bool done = it == delayed_insertion_map.end();
4898     if (done || it->first.first != moves) {
4899       // Commit the MoveOperands for current ParallelMove.
4900       for (MoveOperands* move : to_eliminate) {
4901         move->Eliminate();
4902       }
4903       for (MoveOperands* move : to_insert) {
4904         moves->push_back(move);
4905       }
4906       if (done) break;
4907       // Reset state.
4908       to_eliminate.clear();
4909       to_insert.clear();
4910       moves = it->first.first;
4911     }
4912     // Gather all MoveOperands for a single ParallelMove.
4913     MoveOperands* move =
4914         code_zone()->New<MoveOperands>(it->first.second, it->second);
4915     moves->PrepareInsertAfter(move, &to_eliminate);
4916     to_insert.push_back(move);
4917   }
4918 }
4919 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)4920 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
4921     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
4922   DCHECK(range->IsSpilledOnlyInDeferredBlocks(data()));
4923   DCHECK(!range->spilled());
4924 
4925   InstructionSequence* code = data()->code();
4926   InstructionOperand spill_operand = range->GetSpillRangeOperand();
4927 
4928   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
4929         range->vreg());
4930   // If we have ranges that aren't spilled but require the operand on the stack,
4931   // make sure we insert the spill.
4932   for (const LiveRange* child = range; child != nullptr;
4933        child = child->next()) {
4934     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
4935          pos = pos->next()) {
4936       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
4937         continue;
4938       range->AddBlockRequiringSpillOperand(
4939           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
4940               ->rpo_number(),
4941           data());
4942     }
4943   }
4944 
4945   ZoneQueue<int> worklist(temp_zone);
4946 
4947   for (BitVector::Iterator iterator(
4948            range->GetListOfBlocksRequiringSpillOperands(data()));
4949        !iterator.Done(); iterator.Advance()) {
4950     worklist.push(iterator.Current());
4951   }
4952 
4953   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
4954   // Seek the deferred blocks that dominate locations requiring spill operands,
4955   // and spill there. We only need to spill at the start of such blocks.
4956   BitVector done_blocks(
4957       range->GetListOfBlocksRequiringSpillOperands(data())->length(),
4958       temp_zone);
4959   while (!worklist.empty()) {
4960     int block_id = worklist.front();
4961     worklist.pop();
4962     if (done_blocks.Contains(block_id)) continue;
4963     done_blocks.Add(block_id);
4964     InstructionBlock* spill_block =
4965         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
4966 
4967     for (const RpoNumber& pred : spill_block->predecessors()) {
4968       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
4969 
4970       if (pred_block->IsDeferred()) {
4971         worklist.push(pred_block->rpo_number().ToInt());
4972       } else {
4973         LifetimePosition pred_end =
4974             LifetimePosition::InstructionFromInstructionIndex(
4975                 pred_block->last_instruction_index());
4976 
4977         LiveRangeBound* bound = array->Find(pred_end);
4978 
4979         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
4980 
4981         RpoNumber spill_block_number = spill_block->rpo_number();
4982         if (done_moves.find(std::make_pair(
4983                 spill_block_number, range->vreg())) == done_moves.end()) {
4984           TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
4985                 spill_block_number.ToInt());
4986           data()->AddGapMove(spill_block->first_instruction_index(),
4987                              Instruction::GapPosition::START, pred_op,
4988                              spill_operand);
4989           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
4990           spill_block->mark_needs_frame();
4991         }
4992       }
4993     }
4994   }
4995 }
4996 
4997 #undef TRACE
4998 #undef TRACE_COND
4999 
5000 }  // namespace compiler
5001 }  // namespace internal
5002 }  // namespace v8
5003