• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/register-allocator.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/iterator.h"
10 #include "src/base/small-vector.h"
11 #include "src/base/vector.h"
12 #include "src/codegen/assembler-inl.h"
13 #include "src/codegen/tick-counter.h"
14 #include "src/compiler/backend/spill-placer.h"
15 #include "src/compiler/linkage.h"
16 #include "src/strings/string-stream.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 #define TRACE_COND(cond, ...)      \
23   do {                             \
24     if (cond) PrintF(__VA_ARGS__); \
25   } while (false)
26 
27 #define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
28 
29 namespace {
30 
31 static constexpr int kFloat32Bit =
32     RepresentationBit(MachineRepresentation::kFloat32);
33 static constexpr int kSimd128Bit =
34     RepresentationBit(MachineRepresentation::kSimd128);
35 
36 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)37 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
38                                           const InstructionBlock* block) {
39   RpoNumber index = block->loop_header();
40   if (!index.IsValid()) return nullptr;
41   return sequence->InstructionBlockAt(index);
42 }
43 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)44 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
45                                             LifetimePosition pos) {
46   return code->GetInstructionBlock(pos.ToInstructionIndex());
47 }
48 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)49 Instruction* GetLastInstruction(InstructionSequence* code,
50                                 const InstructionBlock* block) {
51   return code->InstructionAt(block->last_instruction_index());
52 }
53 
54 }  // namespace
55 
Initialize(Zone * zone,TopLevelLiveRange * range)56 void LiveRangeBoundArray::Initialize(Zone* zone, TopLevelLiveRange* range) {
57   size_t max_child_count = range->GetMaxChildCount();
58 
59   start_ = zone->NewArray<LiveRangeBound>(max_child_count);
60   length_ = 0;
61   LiveRangeBound* curr = start_;
62   // The primary loop in ResolveControlFlow is not responsible for inserting
63   // connecting moves for spilled ranges.
64   for (LiveRange* i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
65     new (curr) LiveRangeBound(i, i->spilled());
66   }
67 }
68 
Find(const LifetimePosition position) const69 LiveRangeBound* LiveRangeBoundArray::Find(
70     const LifetimePosition position) const {
71   size_t left_index = 0;
72   size_t right_index = length_;
73   while (true) {
74     size_t current_index = left_index + (right_index - left_index) / 2;
75     DCHECK(right_index > current_index);
76     LiveRangeBound* bound = &start_[current_index];
77     if (bound->start_ <= position) {
78       if (position < bound->end_) return bound;
79       DCHECK(left_index < current_index);
80       left_index = current_index;
81     } else {
82       right_index = current_index;
83     }
84   }
85 }
86 
FindPred(const InstructionBlock * pred)87 LiveRangeBound* LiveRangeBoundArray::FindPred(const InstructionBlock* pred) {
88   LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
89       pred->last_instruction_index());
90   return Find(pred_end);
91 }
92 
FindSucc(const InstructionBlock * succ)93 LiveRangeBound* LiveRangeBoundArray::FindSucc(const InstructionBlock* succ) {
94   LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
95       succ->first_instruction_index());
96   return Find(succ_start);
97 }
98 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const99 bool LiveRangeBoundArray::FindConnectableSubranges(
100     const InstructionBlock* block, const InstructionBlock* pred,
101     FindResult* result) const {
102   LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
103       pred->last_instruction_index());
104   LiveRangeBound* bound = Find(pred_end);
105   result->pred_cover_ = bound->range_;
106   LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
107       block->first_instruction_index());
108 
109   if (bound->CanCover(cur_start)) {
110     // Both blocks are covered by the same range, so there is nothing to
111     // connect.
112     return false;
113   }
114   bound = Find(cur_start);
115   if (bound->skip_) {
116     return false;
117   }
118   result->cur_cover_ = bound->range_;
119   DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
120   return (result->cur_cover_ != result->pred_cover_);
121 }
122 
LiveRangeFinder(const TopTierRegisterAllocationData * data,Zone * zone)123 LiveRangeFinder::LiveRangeFinder(const TopTierRegisterAllocationData* data,
124                                  Zone* zone)
125     : data_(data),
126       bounds_length_(static_cast<int>(data_->live_ranges().size())),
127       bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
128       zone_(zone) {
129   for (int i = 0; i < bounds_length_; ++i) {
130     new (&bounds_[i]) LiveRangeBoundArray();
131   }
132 }
133 
ArrayFor(int operand_index)134 LiveRangeBoundArray* LiveRangeFinder::ArrayFor(int operand_index) {
135   DCHECK(operand_index < bounds_length_);
136   TopLevelLiveRange* range = data_->live_ranges()[operand_index];
137   DCHECK(range != nullptr && !range->IsEmpty());
138   DCHECK_EQ(range->vreg(), operand_index);
139   LiveRangeBoundArray* array = &bounds_[operand_index];
140   if (array->ShouldInitialize()) {
141     array->Initialize(zone_, range);
142   }
143   return array;
144 }
145 
146 using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
147 
148 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare149   bool operator()(const DelayedInsertionMapKey& a,
150                   const DelayedInsertionMapKey& b) const {
151     if (a.first == b.first) {
152       return a.second.Compare(b.second);
153     }
154     return a.first < b.first;
155   }
156 };
157 
158 using DelayedInsertionMap = ZoneMap<DelayedInsertionMapKey, InstructionOperand,
159                                     DelayedInsertionMapCompare>;
160 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)161 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
162                          void* hint, UsePositionHintType hint_type)
163     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
164   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
165   bool register_beneficial = true;
166   UsePositionType type = UsePositionType::kRegisterOrSlot;
167   if (operand_ != nullptr && operand_->IsUnallocated()) {
168     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
169     if (unalloc->HasRegisterPolicy()) {
170       type = UsePositionType::kRequiresRegister;
171     } else if (unalloc->HasSlotPolicy()) {
172       type = UsePositionType::kRequiresSlot;
173       register_beneficial = false;
174     } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
175       type = UsePositionType::kRegisterOrSlotOrConstant;
176       register_beneficial = false;
177     } else {
178       register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
179     }
180   }
181   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
182            RegisterBeneficialField::encode(register_beneficial) |
183            AssignedRegisterField::encode(kUnassignedRegister);
184   DCHECK(pos_.IsValid());
185 }
186 
HasHint() const187 bool UsePosition::HasHint() const {
188   int hint_register;
189   return HintRegister(&hint_register);
190 }
191 
HintRegister(int * register_code) const192 bool UsePosition::HintRegister(int* register_code) const {
193   if (hint_ == nullptr) return false;
194   switch (HintTypeField::decode(flags_)) {
195     case UsePositionHintType::kNone:
196     case UsePositionHintType::kUnresolved:
197       return false;
198     case UsePositionHintType::kUsePos: {
199       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
200       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
201       if (assigned_register == kUnassignedRegister) return false;
202       *register_code = assigned_register;
203       return true;
204     }
205     case UsePositionHintType::kOperand: {
206       InstructionOperand* operand =
207           reinterpret_cast<InstructionOperand*>(hint_);
208       *register_code = LocationOperand::cast(operand)->register_code();
209       return true;
210     }
211     case UsePositionHintType::kPhi: {
212       TopTierRegisterAllocationData::PhiMapValue* phi =
213           reinterpret_cast<TopTierRegisterAllocationData::PhiMapValue*>(hint_);
214       int assigned_register = phi->assigned_register();
215       if (assigned_register == kUnassignedRegister) return false;
216       *register_code = assigned_register;
217       return true;
218     }
219   }
220   UNREACHABLE();
221 }
222 
HintTypeForOperand(const InstructionOperand & op)223 UsePositionHintType UsePosition::HintTypeForOperand(
224     const InstructionOperand& op) {
225   switch (op.kind()) {
226     case InstructionOperand::CONSTANT:
227     case InstructionOperand::IMMEDIATE:
228       return UsePositionHintType::kNone;
229     case InstructionOperand::UNALLOCATED:
230       return UsePositionHintType::kUnresolved;
231     case InstructionOperand::ALLOCATED:
232       if (op.IsRegister() || op.IsFPRegister()) {
233         return UsePositionHintType::kOperand;
234       } else {
235         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
236         return UsePositionHintType::kNone;
237       }
238     case InstructionOperand::PENDING:
239     case InstructionOperand::INVALID:
240       break;
241   }
242   UNREACHABLE();
243 }
244 
SetHint(UsePosition * use_pos)245 void UsePosition::SetHint(UsePosition* use_pos) {
246   DCHECK_NOT_NULL(use_pos);
247   hint_ = use_pos;
248   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
249 }
250 
ResolveHint(UsePosition * use_pos)251 void UsePosition::ResolveHint(UsePosition* use_pos) {
252   DCHECK_NOT_NULL(use_pos);
253   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
254   hint_ = use_pos;
255   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
256 }
257 
set_type(UsePositionType type,bool register_beneficial)258 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
259   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
260   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
261   flags_ = TypeField::encode(type) |
262            RegisterBeneficialField::encode(register_beneficial) |
263            HintTypeField::encode(HintTypeField::decode(flags_)) |
264            AssignedRegisterField::encode(kUnassignedRegister);
265 }
266 
SplitAt(LifetimePosition pos,Zone * zone)267 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
268   DCHECK(Contains(pos) && pos != start());
269   UseInterval* after = zone->New<UseInterval>(pos, end_);
270   after->next_ = next_;
271   next_ = nullptr;
272   end_ = pos;
273   return after;
274 }
275 
Print() const276 void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
277 
operator <<(std::ostream & os,const LifetimePosition pos)278 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
279   os << '@' << pos.ToInstructionIndex();
280   if (pos.IsGapPosition()) {
281     os << 'g';
282   } else {
283     os << 'i';
284   }
285   if (pos.IsStart()) {
286     os << 's';
287   } else {
288     os << 'e';
289   }
290   return os;
291 }
292 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)293 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
294                      TopLevelLiveRange* top_level)
295     : relative_id_(relative_id),
296       bits_(0),
297       last_interval_(nullptr),
298       first_interval_(nullptr),
299       first_pos_(nullptr),
300       top_level_(top_level),
301       next_(nullptr),
302       current_interval_(nullptr),
303       last_processed_use_(nullptr),
304       current_hint_position_(nullptr) {
305   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
306   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
307           RepresentationField::encode(rep) |
308           ControlFlowRegisterHint::encode(kUnassignedRegister);
309 }
310 
VerifyPositions() const311 void LiveRange::VerifyPositions() const {
312   // Walk the positions, verifying that each is in an interval.
313   UseInterval* interval = first_interval_;
314   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
315     CHECK(Start() <= pos->pos());
316     CHECK(pos->pos() <= End());
317     CHECK_NOT_NULL(interval);
318     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
319       interval = interval->next();
320       CHECK_NOT_NULL(interval);
321     }
322   }
323 }
324 
VerifyIntervals() const325 void LiveRange::VerifyIntervals() const {
326   DCHECK(first_interval()->start() == Start());
327   LifetimePosition last_end = first_interval()->end();
328   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
329        interval = interval->next()) {
330     DCHECK(last_end <= interval->start());
331     last_end = interval->end();
332   }
333   DCHECK(last_end == End());
334 }
335 
set_assigned_register(int reg)336 void LiveRange::set_assigned_register(int reg) {
337   DCHECK(!HasRegisterAssigned() && !spilled());
338   bits_ = AssignedRegisterField::update(bits_, reg);
339 }
340 
UnsetAssignedRegister()341 void LiveRange::UnsetAssignedRegister() {
342   DCHECK(HasRegisterAssigned() && !spilled());
343   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
344 }
345 
AttachToNext()346 void LiveRange::AttachToNext() {
347   DCHECK_NOT_NULL(next_);
348   DCHECK_NE(TopLevel()->last_child_covers_, next_);
349   last_interval_->set_next(next_->first_interval());
350   next_->first_interval_ = nullptr;
351   last_interval_ = next_->last_interval_;
352   next_->last_interval_ = nullptr;
353   if (first_pos() == nullptr) {
354     first_pos_ = next_->first_pos();
355   } else {
356     UsePosition* ptr = first_pos_;
357     while (ptr->next() != nullptr) {
358       ptr = ptr->next();
359     }
360     ptr->set_next(next_->first_pos());
361   }
362   next_->first_pos_ = nullptr;
363   LiveRange* old_next = next_;
364   next_ = next_->next_;
365   old_next->next_ = nullptr;
366 }
367 
Unspill()368 void LiveRange::Unspill() {
369   DCHECK(spilled());
370   set_spilled(false);
371   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
372 }
373 
Spill()374 void LiveRange::Spill() {
375   DCHECK(!spilled());
376   DCHECK(!TopLevel()->HasNoSpillType());
377   set_spilled(true);
378   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
379 }
380 
kind() const381 RegisterKind LiveRange::kind() const {
382   if (kFPAliasing == AliasingKind::kIndependent &&
383       IsSimd128(representation())) {
384     return RegisterKind::kSimd128;
385   } else {
386     return IsFloatingPoint(representation()) ? RegisterKind::kDouble
387                                              : RegisterKind::kGeneral;
388   }
389 }
390 
FirstHintPosition(int * register_index)391 UsePosition* LiveRange::FirstHintPosition(int* register_index) {
392   if (!first_pos_) return nullptr;
393   if (current_hint_position_) {
394     if (current_hint_position_->pos() < first_pos_->pos()) {
395       current_hint_position_ = first_pos_;
396     }
397     if (current_hint_position_->pos() > End()) {
398       current_hint_position_ = nullptr;
399     }
400   }
401   bool needs_revisit = false;
402   UsePosition* pos = current_hint_position_;
403   for (; pos != nullptr; pos = pos->next()) {
404     if (pos->HintRegister(register_index)) {
405       break;
406     }
407     // Phi and use position hints can be assigned during allocation which
408     // would invalidate the cached hint position. Make sure we revisit them.
409     needs_revisit = needs_revisit ||
410                     pos->hint_type() == UsePositionHintType::kPhi ||
411                     pos->hint_type() == UsePositionHintType::kUsePos;
412   }
413   if (!needs_revisit) {
414     current_hint_position_ = pos;
415   }
416 #ifdef DEBUG
417   UsePosition* pos_check = first_pos_;
418   for (; pos_check != nullptr; pos_check = pos_check->next()) {
419     if (pos_check->HasHint()) {
420       break;
421     }
422   }
423   CHECK_EQ(pos, pos_check);
424 #endif
425   return pos;
426 }
427 
NextUsePosition(LifetimePosition start) const428 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
429   UsePosition* use_pos = last_processed_use_;
430   if (use_pos == nullptr || use_pos->pos() > start) {
431     use_pos = first_pos();
432   }
433   while (use_pos != nullptr && use_pos->pos() < start) {
434     use_pos = use_pos->next();
435   }
436   last_processed_use_ = use_pos;
437   return use_pos;
438 }
439 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const440 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
441     LifetimePosition start) const {
442   UsePosition* pos = NextUsePosition(start);
443   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
444     pos = pos->next();
445   }
446   return pos;
447 }
448 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const449 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
450     const LifetimePosition& start) const {
451   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
452   if (next_use == nullptr) return End();
453   return next_use->pos();
454 }
455 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const456 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
457     LifetimePosition start) const {
458   UsePosition* pos = first_pos();
459   UsePosition* prev = nullptr;
460   while (pos != nullptr && pos->pos() < start) {
461     if (pos->RegisterIsBeneficial()) prev = pos;
462     pos = pos->next();
463   }
464   return prev;
465 }
466 
NextUsePositionSpillDetrimental(LifetimePosition start) const467 UsePosition* LiveRange::NextUsePositionSpillDetrimental(
468     LifetimePosition start) const {
469   UsePosition* pos = NextUsePosition(start);
470   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister &&
471          !pos->SpillDetrimental()) {
472     pos = pos->next();
473   }
474   return pos;
475 }
476 
NextRegisterPosition(LifetimePosition start) const477 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
478   UsePosition* pos = NextUsePosition(start);
479   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
480     pos = pos->next();
481   }
482   return pos;
483 }
484 
CanBeSpilled(LifetimePosition pos) const485 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
486   // We cannot spill a live range that has a use requiring a register
487   // at the current or the immediate next position.
488   UsePosition* use_pos = NextRegisterPosition(pos);
489   if (use_pos == nullptr) return true;
490   return use_pos->pos() > pos.NextStart().End();
491 }
492 
IsTopLevel() const493 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
494 
GetAssignedOperand() const495 InstructionOperand LiveRange::GetAssignedOperand() const {
496   DCHECK(!IsEmpty());
497   if (HasRegisterAssigned()) {
498     DCHECK(!spilled());
499     return AllocatedOperand(LocationOperand::REGISTER, representation(),
500                             assigned_register());
501   }
502   DCHECK(spilled());
503   DCHECK(!HasRegisterAssigned());
504   if (TopLevel()->HasSpillOperand()) {
505     InstructionOperand* op = TopLevel()->GetSpillOperand();
506     DCHECK(!op->IsUnallocated());
507     return *op;
508   }
509   return TopLevel()->GetSpillRangeOperand();
510 }
511 
FirstSearchIntervalForPosition(LifetimePosition position) const512 UseInterval* LiveRange::FirstSearchIntervalForPosition(
513     LifetimePosition position) const {
514   if (current_interval_ == nullptr) return first_interval_;
515   if (current_interval_->start() > position) {
516     current_interval_ = nullptr;
517     return first_interval_;
518   }
519   return current_interval_;
520 }
521 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const522 void LiveRange::AdvanceLastProcessedMarker(
523     UseInterval* to_start_of, LifetimePosition but_not_past) const {
524   if (to_start_of == nullptr) return;
525   if (to_start_of->start() > but_not_past) return;
526   LifetimePosition start = current_interval_ == nullptr
527                                ? LifetimePosition::Invalid()
528                                : current_interval_->start();
529   if (to_start_of->start() > start) {
530     current_interval_ = to_start_of;
531   }
532 }
533 
SplitAt(LifetimePosition position,Zone * zone)534 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
535   int new_id = TopLevel()->GetNextChildId();
536   LiveRange* child = zone->New<LiveRange>(new_id, representation(), TopLevel());
537   child->set_bundle(bundle_);
538   // If we split, we do so because we're about to switch registers or move
539   // to/from a slot, so there's no value in connecting hints.
540   DetachAt(position, child, zone, DoNotConnectHints);
541 
542   child->top_level_ = TopLevel();
543   child->next_ = next_;
544   next_ = child;
545   return child;
546 }
547 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)548 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
549                                  Zone* zone,
550                                  HintConnectionOption connect_hints) {
551   DCHECK(Start() < position);
552   DCHECK(End() > position);
553   DCHECK(result->IsEmpty());
554   // Find the last interval that ends before the position. If the
555   // position is contained in one of the intervals in the chain, we
556   // split that interval and use the first part.
557   UseInterval* current = FirstSearchIntervalForPosition(position);
558 
559   // If the split position coincides with the beginning of a use interval
560   // we need to split use positons in a special way.
561   bool split_at_start = false;
562 
563   if (current->start() == position) {
564     // When splitting at start we need to locate the previous use interval.
565     current = first_interval_;
566   }
567 
568   UseInterval* after = nullptr;
569   while (current != nullptr) {
570     if (current->Contains(position)) {
571       after = current->SplitAt(position, zone);
572       break;
573     }
574     UseInterval* next = current->next();
575     if (next->start() >= position) {
576       split_at_start = (next->start() == position);
577       after = next;
578       current->set_next(nullptr);
579       break;
580     }
581     current = next;
582   }
583   DCHECK_NOT_NULL(after);
584 
585   // Partition original use intervals to the two live ranges.
586   UseInterval* before = current;
587   result->last_interval_ =
588       (last_interval_ == before)
589           ? after            // Only interval in the range after split.
590           : last_interval_;  // Last interval of the original range.
591   result->first_interval_ = after;
592   last_interval_ = before;
593 
594   // Find the last use position before the split and the first use
595   // position after it.
596   UsePosition* use_after = first_pos();
597   UsePosition* use_before = nullptr;
598   if (split_at_start) {
599     // The split position coincides with the beginning of a use interval (the
600     // end of a lifetime hole). Use at this position should be attributed to
601     // the split child because split child owns use interval covering it.
602     while (use_after != nullptr && use_after->pos() < position) {
603       use_before = use_after;
604       use_after = use_after->next();
605     }
606   } else {
607     while (use_after != nullptr && use_after->pos() <= position) {
608       use_before = use_after;
609       use_after = use_after->next();
610     }
611   }
612 
613   // Partition original use positions to the two live ranges.
614   if (use_before != nullptr) {
615     use_before->set_next(nullptr);
616   } else {
617     first_pos_ = nullptr;
618   }
619   result->first_pos_ = use_after;
620   result->current_hint_position_ = current_hint_position_;
621 
622   // Discard cached iteration state. It might be pointing
623   // to the use that no longer belongs to this live range.
624   last_processed_use_ = nullptr;
625   current_interval_ = nullptr;
626 
627   if (connect_hints == ConnectHints && use_before != nullptr &&
628       use_after != nullptr) {
629     use_after->SetHint(use_before);
630     result->current_hint_position_ = use_after;
631   }
632 #ifdef DEBUG
633   VerifyChildStructure();
634   result->VerifyChildStructure();
635 #endif
636   return use_before;
637 }
638 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)639 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
640   LiveRange* child = this;
641   for (; child != nullptr; child = child->next()) {
642     child->top_level_ = new_top_level;
643   }
644 }
645 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)646 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
647                                      const InstructionOperand& spill_op) {
648   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
649     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
650     if (!pos->HasOperand()) continue;
651     switch (pos->type()) {
652       case UsePositionType::kRequiresSlot:
653         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
654         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
655         break;
656       case UsePositionType::kRequiresRegister:
657         DCHECK(op.IsRegister() || op.IsFPRegister());
658         V8_FALLTHROUGH;
659       case UsePositionType::kRegisterOrSlot:
660       case UsePositionType::kRegisterOrSlotOrConstant:
661         InstructionOperand::ReplaceWith(pos->operand(), &op);
662         break;
663     }
664   }
665 }
666 
667 // This implements an ordering on live ranges so that they are ordered by their
668 // start positions.  This is needed for the correctness of the register
669 // allocation algorithm.  If two live ranges start at the same offset then there
670 // is a tie breaker based on where the value is first used.  This part of the
671 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const672 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
673   LifetimePosition start = Start();
674   LifetimePosition other_start = other->Start();
675   if (start == other_start) {
676     // Prefer register that has a controlflow hint to make sure it gets
677     // allocated first. This allows the control flow aware alloction to
678     // just put ranges back into the queue without other ranges interfering.
679     if (controlflow_hint() < other->controlflow_hint()) {
680       return true;
681     }
682     // The other has a smaller hint.
683     if (controlflow_hint() > other->controlflow_hint()) {
684       return false;
685     }
686     // Both have the same hint or no hint at all. Use first use position.
687     UsePosition* pos = first_pos();
688     UsePosition* other_pos = other->first_pos();
689     // To make the order total, handle the case where both positions are null.
690     if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg();
691     if (pos == nullptr) return false;
692     if (other_pos == nullptr) return true;
693     // To make the order total, handle the case where both positions are equal.
694     if (pos->pos() == other_pos->pos())
695       return TopLevel()->vreg() < other->TopLevel()->vreg();
696     return pos->pos() < other_pos->pos();
697   }
698   return start < other_start;
699 }
700 
SetUseHints(int register_index)701 void LiveRange::SetUseHints(int register_index) {
702   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
703     if (!pos->HasOperand()) continue;
704     switch (pos->type()) {
705       case UsePositionType::kRequiresSlot:
706         break;
707       case UsePositionType::kRequiresRegister:
708       case UsePositionType::kRegisterOrSlot:
709       case UsePositionType::kRegisterOrSlotOrConstant:
710         pos->set_assigned_register(register_index);
711         break;
712     }
713   }
714 }
715 
CanCover(LifetimePosition position) const716 bool LiveRange::CanCover(LifetimePosition position) const {
717   if (IsEmpty()) return false;
718   return Start() <= position && position < End();
719 }
720 
Covers(LifetimePosition position) const721 bool LiveRange::Covers(LifetimePosition position) const {
722   if (!CanCover(position)) return false;
723   UseInterval* start_search = FirstSearchIntervalForPosition(position);
724   for (UseInterval* interval = start_search; interval != nullptr;
725        interval = interval->next()) {
726     DCHECK(interval->next() == nullptr ||
727            interval->next()->start() >= interval->start());
728     AdvanceLastProcessedMarker(interval, position);
729     if (interval->Contains(position)) return true;
730     if (interval->start() > position) return false;
731   }
732   return false;
733 }
734 
NextEndAfter(LifetimePosition position) const735 LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
736   UseInterval* start_search = FirstSearchIntervalForPosition(position);
737   while (start_search->end() < position) {
738     start_search = start_search->next();
739   }
740   return start_search->end();
741 }
742 
NextStartAfter(LifetimePosition position)743 LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
744   UseInterval* start_search = FirstSearchIntervalForPosition(position);
745   while (start_search->start() < position) {
746     start_search = start_search->next();
747   }
748   next_start_ = start_search->start();
749   return next_start_;
750 }
751 
FirstIntersection(LiveRange * other) const752 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
753   UseInterval* b = other->first_interval();
754   if (b == nullptr) return LifetimePosition::Invalid();
755   LifetimePosition advance_last_processed_up_to = b->start();
756   UseInterval* a = FirstSearchIntervalForPosition(b->start());
757   while (a != nullptr && b != nullptr) {
758     if (a->start() > other->End()) break;
759     if (b->start() > End()) break;
760     LifetimePosition cur_intersection = a->Intersect(b);
761     if (cur_intersection.IsValid()) {
762       return cur_intersection;
763     }
764     if (a->start() < b->start()) {
765       a = a->next();
766       if (a == nullptr || a->start() > other->End()) break;
767       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
768     } else {
769       b = b->next();
770     }
771   }
772   return LifetimePosition::Invalid();
773 }
774 
Print(const RegisterConfiguration * config,bool with_children) const775 void LiveRange::Print(const RegisterConfiguration* config,
776                       bool with_children) const {
777   StdoutStream os;
778   PrintableLiveRange wrapper;
779   wrapper.register_configuration_ = config;
780   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
781     wrapper.range_ = i;
782     os << wrapper << std::endl;
783     if (!with_children) break;
784   }
785 }
786 
Print(bool with_children) const787 void LiveRange::Print(bool with_children) const {
788   Print(RegisterConfiguration::Default(), with_children);
789 }
790 
RegisterFromBundle(int * hint) const791 bool LiveRange::RegisterFromBundle(int* hint) const {
792   if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
793   *hint = bundle_->reg();
794   return true;
795 }
796 
UpdateBundleRegister(int reg) const797 void LiveRange::UpdateBundleRegister(int reg) const {
798   if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
799   bundle_->set_reg(reg);
800 }
801 
802 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList803   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
804                          SpillMoveInsertionList* next)
805       : gap_index(gap_index), operand(operand), next(next) {}
806   const int gap_index;
807   InstructionOperand* const operand;
808   SpillMoveInsertionList* next;
809 };
810 
TopLevelLiveRange(int vreg,MachineRepresentation rep)811 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
812     : LiveRange(0, rep, this),
813       vreg_(vreg),
814       last_child_id_(0),
815       spill_operand_(nullptr),
816       spill_move_insertion_locations_(nullptr),
817       spilled_in_deferred_blocks_(false),
818       has_preassigned_slot_(false),
819       spill_start_index_(kMaxInt),
820       last_pos_(nullptr),
821       last_child_covers_(this) {
822   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
823 }
824 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)825 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
826                                             InstructionOperand* operand) {
827   DCHECK(HasNoSpillType());
828   spill_move_insertion_locations_ = zone->New<SpillMoveInsertionList>(
829       gap_index, operand, spill_move_insertion_locations_);
830 }
831 
CommitSpillMoves(TopTierRegisterAllocationData * data,const InstructionOperand & op)832 void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
833                                          const InstructionOperand& op) {
834   DCHECK_IMPLIES(op.IsConstant(),
835                  GetSpillMoveInsertionLocations(data) == nullptr);
836 
837   if (HasGeneralSpillRange()) {
838     SetLateSpillingSelected(false);
839   }
840 
841   InstructionSequence* sequence = data->code();
842   Zone* zone = sequence->zone();
843 
844   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
845        to_spill != nullptr; to_spill = to_spill->next) {
846     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
847     ParallelMove* move =
848         instr->GetOrCreateParallelMove(Instruction::START, zone);
849     move->AddMove(*to_spill->operand, op);
850     instr->block()->mark_needs_frame();
851   }
852 }
853 
FilterSpillMoves(TopTierRegisterAllocationData * data,const InstructionOperand & op)854 void TopLevelLiveRange::FilterSpillMoves(TopTierRegisterAllocationData* data,
855                                          const InstructionOperand& op) {
856   DCHECK_IMPLIES(op.IsConstant(),
857                  GetSpillMoveInsertionLocations(data) == nullptr);
858   bool might_be_duplicated = has_slot_use() || spilled();
859   InstructionSequence* sequence = data->code();
860 
861   SpillMoveInsertionList* previous = nullptr;
862   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
863        to_spill != nullptr; previous = to_spill, to_spill = to_spill->next) {
864     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
865     ParallelMove* move = instr->GetParallelMove(Instruction::START);
866     // Skip insertion if it's possible that the move exists already as a
867     // constraint move from a fixed output register to a slot.
868     bool found = false;
869     if (move != nullptr && (might_be_duplicated || has_preassigned_slot())) {
870       for (MoveOperands* move_op : *move) {
871         if (move_op->IsEliminated()) continue;
872         if (move_op->source().Equals(*to_spill->operand) &&
873             move_op->destination().Equals(op)) {
874           found = true;
875           if (has_preassigned_slot()) move_op->Eliminate();
876           break;
877         }
878       }
879     }
880     if (found || has_preassigned_slot()) {
881       // Remove the item from the list.
882       if (previous == nullptr) {
883         spill_move_insertion_locations_ = to_spill->next;
884       } else {
885         previous->next = to_spill->next;
886       }
887       // Even though this location doesn't need a spill instruction, the
888       // block does require a frame.
889       instr->block()->mark_needs_frame();
890     }
891   }
892 }
893 
SetSpillOperand(InstructionOperand * operand)894 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
895   DCHECK(HasNoSpillType());
896   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
897   set_spill_type(SpillType::kSpillOperand);
898   spill_operand_ = operand;
899 }
900 
SetSpillRange(SpillRange * spill_range)901 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
902   DCHECK(!HasSpillOperand());
903   DCHECK(spill_range);
904   spill_range_ = spill_range;
905 }
906 
GetSpillRangeOperand() const907 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
908   SpillRange* spill_range = GetSpillRange();
909   int index = spill_range->assigned_slot();
910   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
911 }
912 
VerifyChildrenInOrder() const913 void TopLevelLiveRange::VerifyChildrenInOrder() const {
914   LifetimePosition last_end = End();
915   for (const LiveRange* child = this->next(); child != nullptr;
916        child = child->next()) {
917     DCHECK(last_end <= child->Start());
918     last_end = child->End();
919   }
920 }
921 
GetChildCovers(LifetimePosition pos)922 LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) {
923   LiveRange* child = last_child_covers_;
924   DCHECK_NE(child, nullptr);
925   if (pos < child->Start()) {
926     // Cached value has advanced too far; start from the top.
927     child = this;
928   }
929   LiveRange* previous_child = nullptr;
930   while (child != nullptr && child->End() <= pos) {
931     previous_child = child;
932     child = child->next();
933   }
934 
935   // If we've walked past the end, cache the last child instead. This allows
936   // future calls that are also past the end to be fast, since they will know
937   // that there is no need to reset the search to the beginning.
938   last_child_covers_ = child == nullptr ? previous_child : child;
939 
940   return !child || !child->Covers(pos) ? nullptr : child;
941 }
942 
Verify() const943 void TopLevelLiveRange::Verify() const {
944   VerifyChildrenInOrder();
945   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
946     VerifyChildStructure();
947   }
948 }
949 
ShortenTo(LifetimePosition start,bool trace_alloc)950 void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
951   TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
952              start.value());
953   DCHECK_NOT_NULL(first_interval_);
954   DCHECK(first_interval_->start() <= start);
955   DCHECK(start < first_interval_->end());
956   first_interval_->set_start(start);
957 }
958 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)959 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
960                                        LifetimePosition end, Zone* zone,
961                                        bool trace_alloc) {
962   TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
963              start.value(), end.value());
964   LifetimePosition new_end = end;
965   while (first_interval_ != nullptr && first_interval_->start() <= end) {
966     if (first_interval_->end() > end) {
967       new_end = first_interval_->end();
968     }
969     first_interval_ = first_interval_->next();
970   }
971 
972   UseInterval* new_interval = zone->New<UseInterval>(start, new_end);
973   new_interval->set_next(first_interval_);
974   first_interval_ = new_interval;
975   if (new_interval->next() == nullptr) {
976     last_interval_ = new_interval;
977   }
978 }
979 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone,bool trace_alloc)980 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
981                                        LifetimePosition end, Zone* zone,
982                                        bool trace_alloc) {
983   TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
984              start.value(), end.value());
985   if (first_interval_ == nullptr) {
986     UseInterval* interval = zone->New<UseInterval>(start, end);
987     first_interval_ = interval;
988     last_interval_ = interval;
989   } else {
990     if (end == first_interval_->start()) {
991       first_interval_->set_start(start);
992     } else if (end < first_interval_->start()) {
993       UseInterval* interval = zone->New<UseInterval>(start, end);
994       interval->set_next(first_interval_);
995       first_interval_ = interval;
996     } else {
997       // Order of instruction's processing (see ProcessInstructions) guarantees
998       // that each new use interval either precedes, intersects with or touches
999       // the last added interval.
1000       DCHECK(start <= first_interval_->end());
1001       first_interval_->set_start(std::min(start, first_interval_->start()));
1002       first_interval_->set_end(std::max(end, first_interval_->end()));
1003     }
1004   }
1005 }
1006 
AddUsePosition(UsePosition * use_pos,bool trace_alloc)1007 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
1008   LifetimePosition pos = use_pos->pos();
1009   TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
1010              pos.value());
1011   UsePosition* prev_hint = nullptr;
1012   UsePosition* prev = nullptr;
1013   UsePosition* current = first_pos_;
1014   while (current != nullptr && current->pos() < pos) {
1015     prev_hint = current->HasHint() ? current : prev_hint;
1016     prev = current;
1017     current = current->next();
1018   }
1019 
1020   if (prev == nullptr) {
1021     use_pos->set_next(first_pos_);
1022     first_pos_ = use_pos;
1023   } else {
1024     use_pos->set_next(prev->next());
1025     prev->set_next(use_pos);
1026   }
1027 
1028   if (prev_hint == nullptr && use_pos->HasHint()) {
1029     current_hint_position_ = use_pos;
1030   }
1031 }
1032 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1033 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1034                                         UseInterval* interval2) {
1035   while (interval1 != nullptr && interval2 != nullptr) {
1036     if (interval1->start() < interval2->start()) {
1037       if (interval1->end() > interval2->start()) {
1038         return true;
1039       }
1040       interval1 = interval1->next();
1041     } else {
1042       if (interval2->end() > interval1->start()) {
1043         return true;
1044       }
1045       interval2 = interval2->next();
1046     }
1047   }
1048   return false;
1049 }
1050 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1051 std::ostream& operator<<(std::ostream& os,
1052                          const PrintableLiveRange& printable_range) {
1053   const LiveRange* range = printable_range.range_;
1054   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1055      << " ";
1056   if (range->TopLevel()->is_phi()) os << "phi ";
1057   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1058 
1059   os << "{" << std::endl;
1060   UseInterval* interval = range->first_interval();
1061   UsePosition* use_pos = range->first_pos();
1062   while (use_pos != nullptr) {
1063     if (use_pos->HasOperand()) {
1064       os << *use_pos->operand() << use_pos->pos() << " ";
1065     }
1066     use_pos = use_pos->next();
1067   }
1068   os << std::endl;
1069 
1070   while (interval != nullptr) {
1071     os << '[' << interval->start() << ", " << interval->end() << ')'
1072        << std::endl;
1073     interval = interval->next();
1074   }
1075   os << "}";
1076   return os;
1077 }
1078 
1079 namespace {
PrintBlockRow(std::ostream & os,const InstructionBlocks & blocks)1080 void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
1081   os << "     ";
1082   for (auto block : blocks) {
1083     LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
1084         block->first_instruction_index());
1085     LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
1086                                    block->last_instruction_index())
1087                                    .NextFullStart();
1088     int length = end_pos.value() - start_pos.value();
1089     constexpr int kMaxPrefixLength = 32;
1090     char buffer[kMaxPrefixLength];
1091     int rpo_number = block->rpo_number().ToInt();
1092     const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
1093     int max_prefix_length = std::min(length, kMaxPrefixLength);
1094     int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
1095                           deferred_marker);
1096     os << buffer;
1097     int remaining = length - std::min(prefix, max_prefix_length) - 1;
1098     for (int i = 0; i < remaining; ++i) os << '-';
1099     os << ']';
1100   }
1101   os << '\n';
1102 }
1103 }  // namespace
1104 
PrintRangeRow(std::ostream & os,const TopLevelLiveRange * toplevel)1105 void LinearScanAllocator::PrintRangeRow(std::ostream& os,
1106                                         const TopLevelLiveRange* toplevel) {
1107   int position = 0;
1108   os << std::setw(3) << toplevel->vreg() << ": ";
1109 
1110   const char* kind_string;
1111   switch (toplevel->spill_type()) {
1112     case TopLevelLiveRange::SpillType::kSpillRange:
1113       kind_string = "ss";
1114       break;
1115     case TopLevelLiveRange::SpillType::kDeferredSpillRange:
1116       kind_string = "sd";
1117       break;
1118     case TopLevelLiveRange::SpillType::kSpillOperand:
1119       kind_string = "so";
1120       break;
1121     default:
1122       kind_string = "s?";
1123   }
1124 
1125   for (const LiveRange* range = toplevel; range != nullptr;
1126        range = range->next()) {
1127     for (UseInterval* interval = range->first_interval(); interval != nullptr;
1128          interval = interval->next()) {
1129       LifetimePosition start = interval->start();
1130       LifetimePosition end = interval->end();
1131       CHECK_GE(start.value(), position);
1132       for (; start.value() > position; position++) {
1133         os << ' ';
1134       }
1135       int length = end.value() - start.value();
1136       constexpr int kMaxPrefixLength = 32;
1137       char buffer[kMaxPrefixLength];
1138       int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
1139       int prefix;
1140       if (range->spilled()) {
1141         prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
1142       } else {
1143         prefix = snprintf(buffer, max_prefix_length, "|%s",
1144                           RegisterName(range->assigned_register()));
1145       }
1146       os << buffer;
1147       position += std::min(prefix, max_prefix_length - 1);
1148       CHECK_GE(end.value(), position);
1149       const char line_style = range->spilled() ? '-' : '=';
1150       for (; end.value() > position; position++) {
1151         os << line_style;
1152       }
1153     }
1154   }
1155   os << '\n';
1156 }
1157 
PrintRangeOverview()1158 void LinearScanAllocator::PrintRangeOverview() {
1159   std::ostringstream os;
1160   PrintBlockRow(os, code()->instruction_blocks());
1161   for (auto const toplevel : data()->fixed_live_ranges()) {
1162     if (toplevel == nullptr) continue;
1163     PrintRangeRow(os, toplevel);
1164   }
1165   int rowcount = 0;
1166   for (auto toplevel : data()->live_ranges()) {
1167     if (!CanProcessRange(toplevel)) continue;
1168     if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
1169     PrintRangeRow(os, toplevel);
1170   }
1171   PrintF("%s\n", os.str().c_str());
1172 }
1173 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1174 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1175     : live_ranges_(zone),
1176       assigned_slot_(kUnassignedSlot),
1177       byte_width_(ByteWidthForStackSlot(parent->representation())) {
1178   // Spill ranges are created for top level. This is so that, when merging
1179   // decisions are made, we consider the full extent of the virtual register,
1180   // and avoid clobbering it.
1181   UseInterval* result = nullptr;
1182   UseInterval* node = nullptr;
1183   // Copy the intervals for all ranges.
1184   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1185     UseInterval* src = range->first_interval();
1186     while (src != nullptr) {
1187       UseInterval* new_node = zone->New<UseInterval>(src->start(), src->end());
1188       if (result == nullptr) {
1189         result = new_node;
1190       } else {
1191         node->set_next(new_node);
1192       }
1193       node = new_node;
1194       src = src->next();
1195     }
1196   }
1197   use_interval_ = result;
1198   live_ranges().push_back(parent);
1199   end_position_ = node->end();
1200   parent->SetSpillRange(this);
1201 }
1202 
IsIntersectingWith(SpillRange * other) const1203 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1204   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1205       this->End() <= other->use_interval_->start() ||
1206       other->End() <= this->use_interval_->start()) {
1207     return false;
1208   }
1209   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1210 }
1211 
TryMerge(SpillRange * other)1212 bool SpillRange::TryMerge(SpillRange* other) {
1213   if (HasSlot() || other->HasSlot()) return false;
1214   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1215     return false;
1216 
1217   LifetimePosition max = LifetimePosition::MaxPosition();
1218   if (End() < other->End() && other->End() != max) {
1219     end_position_ = other->End();
1220   }
1221   other->end_position_ = max;
1222 
1223   MergeDisjointIntervals(other->use_interval_);
1224   other->use_interval_ = nullptr;
1225 
1226   for (TopLevelLiveRange* range : other->live_ranges()) {
1227     DCHECK(range->GetSpillRange() == other);
1228     range->SetSpillRange(this);
1229   }
1230 
1231   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1232                        other->live_ranges().end());
1233   other->live_ranges().clear();
1234 
1235   return true;
1236 }
1237 
MergeDisjointIntervals(UseInterval * other)1238 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1239   UseInterval* tail = nullptr;
1240   UseInterval* current = use_interval_;
1241   while (other != nullptr) {
1242     // Make sure the 'current' list starts first
1243     if (current == nullptr || current->start() > other->start()) {
1244       std::swap(current, other);
1245     }
1246     // Check disjointness
1247     DCHECK(other == nullptr || current->end() <= other->start());
1248     // Append the 'current' node to the result accumulator and move forward
1249     if (tail == nullptr) {
1250       use_interval_ = current;
1251     } else {
1252       tail->set_next(current);
1253     }
1254     tail = current;
1255     current = current->next();
1256   }
1257   // Other list is empty => we are done
1258 }
1259 
Print() const1260 void SpillRange::Print() const {
1261   StdoutStream os;
1262   os << "{" << std::endl;
1263   for (TopLevelLiveRange* range : live_ranges()) {
1264     os << range->vreg() << " ";
1265   }
1266   os << std::endl;
1267 
1268   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1269     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1270   }
1271   os << "}" << std::endl;
1272 }
1273 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1274 TopTierRegisterAllocationData::PhiMapValue::PhiMapValue(
1275     PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
1276     : phi_(phi),
1277       block_(block),
1278       incoming_operands_(zone),
1279       assigned_register_(kUnassignedRegister) {
1280   incoming_operands_.reserve(phi->operands().size());
1281 }
1282 
AddOperand(InstructionOperand * operand)1283 void TopTierRegisterAllocationData::PhiMapValue::AddOperand(
1284     InstructionOperand* operand) {
1285   incoming_operands_.push_back(operand);
1286 }
1287 
CommitAssignment(const InstructionOperand & assigned)1288 void TopTierRegisterAllocationData::PhiMapValue::CommitAssignment(
1289     const InstructionOperand& assigned) {
1290   for (InstructionOperand* operand : incoming_operands_) {
1291     InstructionOperand::ReplaceWith(operand, &assigned);
1292   }
1293 }
1294 
TopTierRegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,RegisterAllocationFlags flags,TickCounter * tick_counter,const char * debug_name)1295 TopTierRegisterAllocationData::TopTierRegisterAllocationData(
1296     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1297     InstructionSequence* code, RegisterAllocationFlags flags,
1298     TickCounter* tick_counter, const char* debug_name)
1299     : RegisterAllocationData(Type::kTopTier),
1300       allocation_zone_(zone),
1301       frame_(frame),
1302       code_(code),
1303       debug_name_(debug_name),
1304       config_(config),
1305       phi_map_(allocation_zone()),
1306       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1307       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1308       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1309                    allocation_zone()),
1310       fixed_live_ranges_(kNumberOfFixedRangesPerRegister *
1311                              this->config()->num_general_registers(),
1312                          nullptr, allocation_zone()),
1313       fixed_float_live_ranges_(allocation_zone()),
1314       fixed_double_live_ranges_(kNumberOfFixedRangesPerRegister *
1315                                     this->config()->num_double_registers(),
1316                                 nullptr, allocation_zone()),
1317       fixed_simd128_live_ranges_(allocation_zone()),
1318       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1319       delayed_references_(allocation_zone()),
1320       assigned_registers_(nullptr),
1321       assigned_double_registers_(nullptr),
1322       virtual_register_count_(code->VirtualRegisterCount()),
1323       preassigned_slot_ranges_(zone),
1324       spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
1325                    zone),
1326       flags_(flags),
1327       tick_counter_(tick_counter),
1328       slot_for_const_range_(zone) {
1329   if (kFPAliasing == AliasingKind::kCombine) {
1330     fixed_float_live_ranges_.resize(
1331         kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
1332         nullptr);
1333     fixed_simd128_live_ranges_.resize(
1334         kNumberOfFixedRangesPerRegister *
1335             this->config()->num_simd128_registers(),
1336         nullptr);
1337   } else if (kFPAliasing == AliasingKind::kIndependent) {
1338     fixed_simd128_live_ranges_.resize(
1339         kNumberOfFixedRangesPerRegister *
1340             this->config()->num_simd128_registers(),
1341         nullptr);
1342   }
1343 
1344   assigned_registers_ = code_zone()->New<BitVector>(
1345       this->config()->num_general_registers(), code_zone());
1346   assigned_double_registers_ = code_zone()->New<BitVector>(
1347       this->config()->num_double_registers(), code_zone());
1348   fixed_register_use_ = code_zone()->New<BitVector>(
1349       this->config()->num_general_registers(), code_zone());
1350   fixed_fp_register_use_ = code_zone()->New<BitVector>(
1351       this->config()->num_double_registers(), code_zone());
1352   if (kFPAliasing == AliasingKind::kIndependent) {
1353     assigned_simd128_registers_ = code_zone()->New<BitVector>(
1354         this->config()->num_simd128_registers(), code_zone());
1355     fixed_simd128_register_use_ = code_zone()->New<BitVector>(
1356         this->config()->num_simd128_registers(), code_zone());
1357   }
1358 
1359   this->frame()->SetAllocatedRegisters(assigned_registers_);
1360   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1361 }
1362 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1363 MoveOperands* TopTierRegisterAllocationData::AddGapMove(
1364     int index, Instruction::GapPosition position,
1365     const InstructionOperand& from, const InstructionOperand& to) {
1366   Instruction* instr = code()->InstructionAt(index);
1367   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1368   return moves->AddMove(from, to);
1369 }
1370 
RepresentationFor(int virtual_register)1371 MachineRepresentation TopTierRegisterAllocationData::RepresentationFor(
1372     int virtual_register) {
1373   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1374   return code()->GetRepresentation(virtual_register);
1375 }
1376 
GetOrCreateLiveRangeFor(int index)1377 TopLevelLiveRange* TopTierRegisterAllocationData::GetOrCreateLiveRangeFor(
1378     int index) {
1379   if (index >= static_cast<int>(live_ranges().size())) {
1380     live_ranges().resize(index + 1, nullptr);
1381   }
1382   TopLevelLiveRange* result = live_ranges()[index];
1383   if (result == nullptr) {
1384     result = NewLiveRange(index, RepresentationFor(index));
1385     live_ranges()[index] = result;
1386   }
1387   DCHECK_EQ(live_ranges()[index]->vreg(), index);
1388   return result;
1389 }
1390 
NewLiveRange(int index,MachineRepresentation rep)1391 TopLevelLiveRange* TopTierRegisterAllocationData::NewLiveRange(
1392     int index, MachineRepresentation rep) {
1393   return allocation_zone()->New<TopLevelLiveRange>(index, rep);
1394 }
1395 
1396 TopTierRegisterAllocationData::PhiMapValue*
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1397 TopTierRegisterAllocationData::InitializePhiMap(const InstructionBlock* block,
1398                                                 PhiInstruction* phi) {
1399   TopTierRegisterAllocationData::PhiMapValue* map_value =
1400       allocation_zone()->New<TopTierRegisterAllocationData::PhiMapValue>(
1401           phi, block, allocation_zone());
1402   auto res =
1403       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1404   DCHECK(res.second);
1405   USE(res);
1406   return map_value;
1407 }
1408 
1409 TopTierRegisterAllocationData::PhiMapValue*
GetPhiMapValueFor(int virtual_register)1410 TopTierRegisterAllocationData::GetPhiMapValueFor(int virtual_register) {
1411   auto it = phi_map_.find(virtual_register);
1412   DCHECK(it != phi_map_.end());
1413   return it->second;
1414 }
1415 
1416 TopTierRegisterAllocationData::PhiMapValue*
GetPhiMapValueFor(TopLevelLiveRange * top_range)1417 TopTierRegisterAllocationData::GetPhiMapValueFor(TopLevelLiveRange* top_range) {
1418   return GetPhiMapValueFor(top_range->vreg());
1419 }
1420 
ExistsUseWithoutDefinition()1421 bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
1422   bool found = false;
1423   for (int operand_index : *live_in_sets()[0]) {
1424     found = true;
1425     PrintF("Register allocator error: live v%d reached first block.\n",
1426            operand_index);
1427     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1428     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1429     if (debug_name() == nullptr) {
1430       PrintF("\n");
1431     } else {
1432       PrintF("  (function: %s)\n", debug_name());
1433     }
1434   }
1435   return found;
1436 }
1437 
1438 // If a range is defined in a deferred block, we can expect all the range
1439 // to only cover positions in deferred blocks. Otherwise, a block on the
1440 // hot path would be dominated by a deferred block, meaning it is unreachable
1441 // without passing through the deferred block, which is contradictory.
1442 // In particular, when such a range contributes a result back on the hot
1443 // path, it will be as one of the inputs of a phi. In that case, the value
1444 // will be transferred via a move in the Gap::END's of the last instruction
1445 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1446 bool TopTierRegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1447   const size_t live_ranges_size = live_ranges().size();
1448   for (const TopLevelLiveRange* range : live_ranges()) {
1449     CHECK_EQ(live_ranges_size,
1450              live_ranges().size());  // TODO(neis): crbug.com/831822
1451     if (range == nullptr || range->IsEmpty() ||
1452         !code()
1453              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1454              ->IsDeferred()) {
1455       continue;
1456     }
1457     for (const UseInterval* i = range->first_interval(); i != nullptr;
1458          i = i->next()) {
1459       int first = i->FirstGapIndex();
1460       int last = i->LastGapIndex();
1461       for (int instr = first; instr <= last;) {
1462         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1463         if (!block->IsDeferred()) return false;
1464         instr = block->last_instruction_index() + 1;
1465       }
1466     }
1467   }
1468   return true;
1469 }
1470 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range,SpillMode spill_mode)1471 SpillRange* TopTierRegisterAllocationData::AssignSpillRangeToLiveRange(
1472     TopLevelLiveRange* range, SpillMode spill_mode) {
1473   using SpillType = TopLevelLiveRange::SpillType;
1474   DCHECK(!range->HasSpillOperand());
1475 
1476   SpillRange* spill_range = range->GetAllocatedSpillRange();
1477   if (spill_range == nullptr) {
1478     spill_range = allocation_zone()->New<SpillRange>(range, allocation_zone());
1479   }
1480   if (spill_mode == SpillMode::kSpillDeferred &&
1481       (range->spill_type() != SpillType::kSpillRange)) {
1482     range->set_spill_type(SpillType::kDeferredSpillRange);
1483   } else {
1484     range->set_spill_type(SpillType::kSpillRange);
1485   }
1486 
1487   spill_ranges()[range->vreg()] = spill_range;
1488   return spill_range;
1489 }
1490 
MarkFixedUse(MachineRepresentation rep,int index)1491 void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
1492                                                  int index) {
1493   switch (rep) {
1494     case MachineRepresentation::kFloat32:
1495     case MachineRepresentation::kSimd128:
1496       if (kFPAliasing == AliasingKind::kOverlap) {
1497         fixed_fp_register_use_->Add(index);
1498       } else if (kFPAliasing == AliasingKind::kIndependent) {
1499         if (rep == MachineRepresentation::kFloat32) {
1500           fixed_fp_register_use_->Add(index);
1501         } else {
1502           fixed_simd128_register_use_->Add(index);
1503         }
1504       } else {
1505         int alias_base_index = -1;
1506         int aliases = config()->GetAliases(
1507             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1508         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1509         while (aliases--) {
1510           int aliased_reg = alias_base_index + aliases;
1511           fixed_fp_register_use_->Add(aliased_reg);
1512         }
1513       }
1514       break;
1515     case MachineRepresentation::kFloat64:
1516       fixed_fp_register_use_->Add(index);
1517       break;
1518     default:
1519       DCHECK(!IsFloatingPoint(rep));
1520       fixed_register_use_->Add(index);
1521       break;
1522   }
1523 }
1524 
HasFixedUse(MachineRepresentation rep,int index)1525 bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep,
1526                                                 int index) {
1527   switch (rep) {
1528     case MachineRepresentation::kFloat32:
1529     case MachineRepresentation::kSimd128: {
1530       if (kFPAliasing == AliasingKind::kOverlap) {
1531         return fixed_fp_register_use_->Contains(index);
1532       } else if (kFPAliasing == AliasingKind::kIndependent) {
1533         if (rep == MachineRepresentation::kFloat32) {
1534           return fixed_fp_register_use_->Contains(index);
1535         } else {
1536           return fixed_simd128_register_use_->Contains(index);
1537         }
1538       } else {
1539         int alias_base_index = -1;
1540         int aliases = config()->GetAliases(
1541             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1542         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1543         bool result = false;
1544         while (aliases-- && !result) {
1545           int aliased_reg = alias_base_index + aliases;
1546           result |= fixed_fp_register_use_->Contains(aliased_reg);
1547         }
1548         return result;
1549       }
1550     }
1551     case MachineRepresentation::kFloat64:
1552       return fixed_fp_register_use_->Contains(index);
1553     default:
1554       DCHECK(!IsFloatingPoint(rep));
1555       return fixed_register_use_->Contains(index);
1556   }
1557 }
1558 
MarkAllocated(MachineRepresentation rep,int index)1559 void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1560                                                   int index) {
1561   switch (rep) {
1562     case MachineRepresentation::kFloat32:
1563     case MachineRepresentation::kSimd128:
1564       if (kFPAliasing == AliasingKind::kOverlap) {
1565         assigned_double_registers_->Add(index);
1566       } else if (kFPAliasing == AliasingKind::kIndependent) {
1567         if (rep == MachineRepresentation::kFloat32) {
1568           assigned_double_registers_->Add(index);
1569         } else {
1570           assigned_simd128_registers_->Add(index);
1571         }
1572       } else {
1573         int alias_base_index = -1;
1574         int aliases = config()->GetAliases(
1575             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1576         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1577         while (aliases--) {
1578           int aliased_reg = alias_base_index + aliases;
1579           assigned_double_registers_->Add(aliased_reg);
1580         }
1581       }
1582       break;
1583     case MachineRepresentation::kFloat64:
1584       assigned_double_registers_->Add(index);
1585       break;
1586     default:
1587       DCHECK(!IsFloatingPoint(rep));
1588       assigned_registers_->Add(index);
1589       break;
1590   }
1591 }
1592 
IsBlockBoundary(LifetimePosition pos) const1593 bool TopTierRegisterAllocationData::IsBlockBoundary(
1594     LifetimePosition pos) const {
1595   return pos.IsFullStart() &&
1596          (static_cast<size_t>(pos.ToInstructionIndex()) ==
1597               code()->instructions().size() ||
1598           code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1599               pos.ToInstructionIndex());
1600 }
1601 
ConstraintBuilder(TopTierRegisterAllocationData * data)1602 ConstraintBuilder::ConstraintBuilder(TopTierRegisterAllocationData* data)
1603     : data_(data) {}
1604 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged,bool is_input)1605 InstructionOperand* ConstraintBuilder::AllocateFixed(
1606     UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) {
1607   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1608   DCHECK(operand->HasFixedPolicy());
1609   InstructionOperand allocated;
1610   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1611   int virtual_register = operand->virtual_register();
1612   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1613     rep = data()->RepresentationFor(virtual_register);
1614   }
1615   if (operand->HasFixedSlotPolicy()) {
1616     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1617                                  operand->fixed_slot_index());
1618   } else if (operand->HasFixedRegisterPolicy()) {
1619     DCHECK(!IsFloatingPoint(rep));
1620     DCHECK(data()->config()->IsAllocatableGeneralCode(
1621         operand->fixed_register_index()));
1622     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1623                                  operand->fixed_register_index());
1624   } else if (operand->HasFixedFPRegisterPolicy()) {
1625     DCHECK(IsFloatingPoint(rep));
1626     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1627     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1628                                  operand->fixed_register_index());
1629   } else {
1630     UNREACHABLE();
1631   }
1632   if (is_input && allocated.IsAnyRegister()) {
1633     data()->MarkFixedUse(rep, operand->fixed_register_index());
1634   }
1635   InstructionOperand::ReplaceWith(operand, &allocated);
1636   if (is_tagged) {
1637     TRACE("Fixed reg is tagged at %d\n", pos);
1638     Instruction* instr = code()->InstructionAt(pos);
1639     if (instr->HasReferenceMap()) {
1640       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1641     }
1642   }
1643   return operand;
1644 }
1645 
MeetRegisterConstraints()1646 void ConstraintBuilder::MeetRegisterConstraints() {
1647   for (InstructionBlock* block : code()->instruction_blocks()) {
1648     data_->tick_counter()->TickAndMaybeEnterSafepoint();
1649     MeetRegisterConstraints(block);
1650   }
1651 }
1652 
MeetRegisterConstraints(const InstructionBlock * block)1653 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1654   int start = block->first_instruction_index();
1655   int end = block->last_instruction_index();
1656   DCHECK_NE(-1, start);
1657   for (int i = start; i <= end; ++i) {
1658     MeetConstraintsBefore(i);
1659     if (i != end) MeetConstraintsAfter(i);
1660   }
1661   // Meet register constraints for the instruction in the end.
1662   MeetRegisterConstraintsForLastInstructionInBlock(block);
1663 }
1664 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1665 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1666     const InstructionBlock* block) {
1667   int end = block->last_instruction_index();
1668   Instruction* last_instruction = code()->InstructionAt(end);
1669   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1670     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1671     DCHECK(!output_operand->IsConstant());
1672     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1673     int output_vreg = output->virtual_register();
1674     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1675     bool assigned = false;
1676     if (output->HasFixedPolicy()) {
1677       AllocateFixed(output, -1, false, false);
1678       // This value is produced on the stack, we never need to spill it.
1679       if (output->IsStackSlot()) {
1680         DCHECK(LocationOperand::cast(output)->index() <
1681                data()->frame()->GetSpillSlotCount());
1682         range->SetSpillOperand(LocationOperand::cast(output));
1683         range->SetSpillStartIndex(end);
1684         assigned = true;
1685       }
1686 
1687       for (const RpoNumber& succ : block->successors()) {
1688         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1689         DCHECK_EQ(1, successor->PredecessorCount());
1690         int gap_index = successor->first_instruction_index();
1691         // Create an unconstrained operand for the same virtual register
1692         // and insert a gap move from the fixed output to the operand.
1693         UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1694                                        output_vreg);
1695         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1696       }
1697     }
1698 
1699     if (!assigned) {
1700       for (const RpoNumber& succ : block->successors()) {
1701         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1702         DCHECK_EQ(1, successor->PredecessorCount());
1703         int gap_index = successor->first_instruction_index();
1704         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1705         range->SetSpillStartIndex(gap_index);
1706       }
1707     }
1708   }
1709 }
1710 
MeetConstraintsAfter(int instr_index)1711 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1712   Instruction* first = code()->InstructionAt(instr_index);
1713   // Handle fixed temporaries.
1714   for (size_t i = 0; i < first->TempCount(); i++) {
1715     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1716     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false);
1717   }
1718   // Handle constant/fixed output operands.
1719   for (size_t i = 0; i < first->OutputCount(); i++) {
1720     InstructionOperand* output = first->OutputAt(i);
1721     if (output->IsConstant()) {
1722       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1723       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1724       range->SetSpillStartIndex(instr_index + 1);
1725       range->SetSpillOperand(output);
1726       continue;
1727     }
1728     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1729     TopLevelLiveRange* range =
1730         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1731     bool assigned = false;
1732     if (first_output->HasFixedPolicy()) {
1733       int output_vreg = first_output->virtual_register();
1734       UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1735                                      output_vreg);
1736       bool is_tagged = code()->IsReference(output_vreg);
1737       if (first_output->HasSecondaryStorage()) {
1738         range->MarkHasPreassignedSlot();
1739         data()->preassigned_slot_ranges().push_back(
1740             std::make_pair(range, first_output->GetSecondaryStorage()));
1741       }
1742       AllocateFixed(first_output, instr_index, is_tagged, false);
1743 
1744       // This value is produced on the stack, we never need to spill it.
1745       if (first_output->IsStackSlot()) {
1746         DCHECK(LocationOperand::cast(first_output)->index() <
1747                data()->frame()->GetTotalFrameSlotCount());
1748         range->SetSpillOperand(LocationOperand::cast(first_output));
1749         range->SetSpillStartIndex(instr_index + 1);
1750         assigned = true;
1751       }
1752       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1753                          output_copy);
1754     }
1755     // Make sure we add a gap move for spilling (if we have not done
1756     // so already).
1757     if (!assigned) {
1758       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1759                                  first_output);
1760       range->SetSpillStartIndex(instr_index + 1);
1761     }
1762   }
1763 }
1764 
MeetConstraintsBefore(int instr_index)1765 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1766   Instruction* second = code()->InstructionAt(instr_index);
1767   // Handle fixed input operands of second instruction.
1768   ZoneVector<TopLevelLiveRange*>* spilled_consts = nullptr;
1769   for (size_t i = 0; i < second->InputCount(); i++) {
1770     InstructionOperand* input = second->InputAt(i);
1771     if (input->IsImmediate()) {
1772       continue;  // Ignore immediates.
1773     }
1774     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1775     if (cur_input->HasSlotPolicy()) {
1776       TopLevelLiveRange* range =
1777           data()->GetOrCreateLiveRangeFor(cur_input->virtual_register());
1778       if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
1779         bool already_spilled = false;
1780         if (spilled_consts == nullptr) {
1781           spilled_consts =
1782               allocation_zone()->New<ZoneVector<TopLevelLiveRange*>>(
1783                   allocation_zone());
1784         } else {
1785           auto it =
1786               std::find(spilled_consts->begin(), spilled_consts->end(), range);
1787           already_spilled = it != spilled_consts->end();
1788         }
1789         auto it = data()->slot_for_const_range().find(range);
1790         if (it == data()->slot_for_const_range().end()) {
1791           DCHECK(!already_spilled);
1792           int width = ByteWidthForStackSlot(range->representation());
1793           int index = data()->frame()->AllocateSpillSlot(width);
1794           auto* slot = AllocatedOperand::New(allocation_zone(),
1795                                              LocationOperand::STACK_SLOT,
1796                                              range->representation(), index);
1797           it = data()->slot_for_const_range().emplace(range, slot).first;
1798         }
1799         if (!already_spilled) {
1800           auto* slot = it->second;
1801           int input_vreg = cur_input->virtual_register();
1802           UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1803                                         input_vreg);
1804           // Spill at every use position for simplicity, this case is very rare.
1805           data()->AddGapMove(instr_index, Instruction::END, input_copy, *slot);
1806           spilled_consts->push_back(range);
1807         }
1808       }
1809     }
1810     if (cur_input->HasFixedPolicy()) {
1811       int input_vreg = cur_input->virtual_register();
1812       UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1813                                     input_vreg);
1814       bool is_tagged = code()->IsReference(input_vreg);
1815       AllocateFixed(cur_input, instr_index, is_tagged, true);
1816       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1817     }
1818   }
1819   // Handle "output same as input" for second instruction.
1820   for (size_t i = 0; i < second->OutputCount(); i++) {
1821     InstructionOperand* output = second->OutputAt(i);
1822     if (!output->IsUnallocated()) continue;
1823     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1824     if (!second_output->HasSameAsInputPolicy()) continue;
1825     DCHECK_EQ(0, i);  // Only valid for first output.
1826     UnallocatedOperand* cur_input =
1827         UnallocatedOperand::cast(second->InputAt(second_output->input_index()));
1828     int output_vreg = second_output->virtual_register();
1829     int input_vreg = cur_input->virtual_register();
1830     UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1831                                   input_vreg);
1832     *cur_input =
1833         UnallocatedOperand(*cur_input, second_output->virtual_register());
1834     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1835                                                 input_copy, *cur_input);
1836     DCHECK_NOT_NULL(gap_move);
1837     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1838       if (second->HasReferenceMap()) {
1839         TopTierRegisterAllocationData::DelayedReference delayed_reference = {
1840             second->reference_map(), &gap_move->source()};
1841         data()->delayed_references().push_back(delayed_reference);
1842       }
1843     }
1844   }
1845 }
1846 
ResolvePhis()1847 void ConstraintBuilder::ResolvePhis() {
1848   // Process the blocks in reverse order.
1849   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1850     data_->tick_counter()->TickAndMaybeEnterSafepoint();
1851     ResolvePhis(block);
1852   }
1853 }
1854 
ResolvePhis(const InstructionBlock * block)1855 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1856   for (PhiInstruction* phi : block->phis()) {
1857     int phi_vreg = phi->virtual_register();
1858     TopTierRegisterAllocationData::PhiMapValue* map_value =
1859         data()->InitializePhiMap(block, phi);
1860     InstructionOperand& output = phi->output();
1861     // Map the destination operands, so the commitment phase can find them.
1862     for (size_t i = 0; i < phi->operands().size(); ++i) {
1863       InstructionBlock* cur_block =
1864           code()->InstructionBlockAt(block->predecessors()[i]);
1865       UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
1866                                phi->operands()[i]);
1867       MoveOperands* move = data()->AddGapMove(
1868           cur_block->last_instruction_index(), Instruction::END, input, output);
1869       map_value->AddOperand(&move->destination());
1870       DCHECK(!code()
1871                   ->InstructionAt(cur_block->last_instruction_index())
1872                   ->HasReferenceMap());
1873     }
1874     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1875     int gap_index = block->first_instruction_index();
1876     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1877     live_range->SetSpillStartIndex(gap_index);
1878     // We use the phi-ness of some nodes in some later heuristics.
1879     live_range->set_is_phi(true);
1880     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1881   }
1882 }
1883 
LiveRangeBuilder(TopTierRegisterAllocationData * data,Zone * local_zone)1884 LiveRangeBuilder::LiveRangeBuilder(TopTierRegisterAllocationData* data,
1885                                    Zone* local_zone)
1886     : data_(data), phi_hints_(local_zone) {}
1887 
ComputeLiveOut(const InstructionBlock * block,TopTierRegisterAllocationData * data)1888 BitVector* LiveRangeBuilder::ComputeLiveOut(
1889     const InstructionBlock* block, TopTierRegisterAllocationData* data) {
1890   size_t block_index = block->rpo_number().ToSize();
1891   BitVector* live_out = data->live_out_sets()[block_index];
1892   if (live_out == nullptr) {
1893     // Compute live out for the given block, except not including backward
1894     // successor edges.
1895     Zone* zone = data->allocation_zone();
1896     const InstructionSequence* code = data->code();
1897 
1898     live_out = zone->New<BitVector>(code->VirtualRegisterCount(), zone);
1899 
1900     // Process all successor blocks.
1901     for (const RpoNumber& succ : block->successors()) {
1902       // Add values live on entry to the successor.
1903       if (succ <= block->rpo_number()) continue;
1904       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1905       if (live_in != nullptr) live_out->Union(*live_in);
1906 
1907       // All phi input operands corresponding to this successor edge are live
1908       // out from this block.
1909       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1910       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1911       DCHECK(index < successor->PredecessorCount());
1912       for (PhiInstruction* phi : successor->phis()) {
1913         live_out->Add(phi->operands()[index]);
1914       }
1915     }
1916     data->live_out_sets()[block_index] = live_out;
1917   }
1918   return live_out;
1919 }
1920 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1921 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1922                                            BitVector* live_out) {
1923   // Add an interval that includes the entire block to the live range for
1924   // each live_out value.
1925   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1926       block->first_instruction_index());
1927   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1928                              block->last_instruction_index())
1929                              .NextStart();
1930   for (int operand_index : *live_out) {
1931     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1932     range->AddUseInterval(start, end, allocation_zone(),
1933                           data()->is_trace_alloc());
1934   }
1935 }
1936 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1937 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1938   int result = -index - 1;
1939   switch (rep) {
1940     case MachineRepresentation::kSimd128:
1941       result -=
1942           kNumberOfFixedRangesPerRegister * config()->num_float_registers();
1943       V8_FALLTHROUGH;
1944     case MachineRepresentation::kFloat32:
1945       result -=
1946           kNumberOfFixedRangesPerRegister * config()->num_double_registers();
1947       V8_FALLTHROUGH;
1948     case MachineRepresentation::kFloat64:
1949       result -=
1950           kNumberOfFixedRangesPerRegister * config()->num_general_registers();
1951       break;
1952     default:
1953       UNREACHABLE();
1954   }
1955   return result;
1956 }
1957 
FixedLiveRangeFor(int index,SpillMode spill_mode)1958 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index,
1959                                                        SpillMode spill_mode) {
1960   int offset = spill_mode == SpillMode::kSpillAtDefinition
1961                    ? 0
1962                    : config()->num_general_registers();
1963   DCHECK(index < config()->num_general_registers());
1964   TopLevelLiveRange* result = data()->fixed_live_ranges()[offset + index];
1965   if (result == nullptr) {
1966     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1967     result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep);
1968     DCHECK(result->IsFixed());
1969     result->set_assigned_register(index);
1970     data()->MarkAllocated(rep, index);
1971     if (spill_mode == SpillMode::kSpillDeferred) {
1972       result->set_deferred_fixed();
1973     }
1974     data()->fixed_live_ranges()[offset + index] = result;
1975   }
1976   return result;
1977 }
1978 
FixedFPLiveRangeFor(int index,MachineRepresentation rep,SpillMode spill_mode)1979 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1980     int index, MachineRepresentation rep, SpillMode spill_mode) {
1981   int num_regs = config()->num_double_registers();
1982   ZoneVector<TopLevelLiveRange*>* live_ranges =
1983       &data()->fixed_double_live_ranges();
1984   if (kFPAliasing == AliasingKind::kCombine) {
1985     switch (rep) {
1986       case MachineRepresentation::kFloat32:
1987         num_regs = config()->num_float_registers();
1988         live_ranges = &data()->fixed_float_live_ranges();
1989         break;
1990       case MachineRepresentation::kSimd128:
1991         num_regs = config()->num_simd128_registers();
1992         live_ranges = &data()->fixed_simd128_live_ranges();
1993         break;
1994       default:
1995         break;
1996     }
1997   }
1998 
1999   int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
2000 
2001   DCHECK(index < num_regs);
2002   USE(num_regs);
2003   TopLevelLiveRange* result = (*live_ranges)[offset + index];
2004   if (result == nullptr) {
2005     result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep);
2006     DCHECK(result->IsFixed());
2007     result->set_assigned_register(index);
2008     data()->MarkAllocated(rep, index);
2009     if (spill_mode == SpillMode::kSpillDeferred) {
2010       result->set_deferred_fixed();
2011     }
2012     (*live_ranges)[offset + index] = result;
2013   }
2014   return result;
2015 }
2016 
FixedSIMD128LiveRangeFor(int index,SpillMode spill_mode)2017 TopLevelLiveRange* LiveRangeBuilder::FixedSIMD128LiveRangeFor(
2018     int index, SpillMode spill_mode) {
2019   DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
2020   int num_regs = config()->num_simd128_registers();
2021   ZoneVector<TopLevelLiveRange*>* live_ranges =
2022       &data()->fixed_simd128_live_ranges();
2023   int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
2024 
2025   DCHECK(index < num_regs);
2026   USE(num_regs);
2027   TopLevelLiveRange* result = (*live_ranges)[offset + index];
2028   if (result == nullptr) {
2029     result = data()->NewLiveRange(
2030         FixedFPLiveRangeID(offset + index, MachineRepresentation::kSimd128),
2031         MachineRepresentation::kSimd128);
2032     DCHECK(result->IsFixed());
2033     result->set_assigned_register(index);
2034     data()->MarkAllocated(MachineRepresentation::kSimd128, index);
2035     if (spill_mode == SpillMode::kSpillDeferred) {
2036       result->set_deferred_fixed();
2037     }
2038     (*live_ranges)[offset + index] = result;
2039   }
2040   return result;
2041 }
2042 
LiveRangeFor(InstructionOperand * operand,SpillMode spill_mode)2043 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
2044                                                   SpillMode spill_mode) {
2045   if (operand->IsUnallocated()) {
2046     return data()->GetOrCreateLiveRangeFor(
2047         UnallocatedOperand::cast(operand)->virtual_register());
2048   } else if (operand->IsConstant()) {
2049     return data()->GetOrCreateLiveRangeFor(
2050         ConstantOperand::cast(operand)->virtual_register());
2051   } else if (operand->IsRegister()) {
2052     return FixedLiveRangeFor(
2053         LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
2054   } else if (operand->IsFPRegister()) {
2055     LocationOperand* op = LocationOperand::cast(operand);
2056     if (kFPAliasing == AliasingKind::kIndependent &&
2057         op->representation() == MachineRepresentation::kSimd128) {
2058       return FixedSIMD128LiveRangeFor(op->register_code(), spill_mode);
2059     }
2060     return FixedFPLiveRangeFor(op->register_code(), op->representation(),
2061                                spill_mode);
2062   } else {
2063     return nullptr;
2064   }
2065 }
2066 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)2067 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
2068                                               InstructionOperand* operand,
2069                                               void* hint,
2070                                               UsePositionHintType hint_type) {
2071   return allocation_zone()->New<UsePosition>(pos, operand, hint, hint_type);
2072 }
2073 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2074 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
2075                                       InstructionOperand* operand, void* hint,
2076                                       UsePositionHintType hint_type,
2077                                       SpillMode spill_mode) {
2078   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2079   if (range == nullptr) return nullptr;
2080 
2081   if (range->IsEmpty() || range->Start() > position) {
2082     // Can happen if there is a definition without use.
2083     range->AddUseInterval(position, position.NextStart(), allocation_zone(),
2084                           data()->is_trace_alloc());
2085     range->AddUsePosition(NewUsePosition(position.NextStart()),
2086                           data()->is_trace_alloc());
2087   } else {
2088     range->ShortenTo(position, data()->is_trace_alloc());
2089   }
2090   if (!operand->IsUnallocated()) return nullptr;
2091   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2092   UsePosition* use_pos =
2093       NewUsePosition(position, unalloc_operand, hint, hint_type);
2094   range->AddUsePosition(use_pos, data()->is_trace_alloc());
2095   return use_pos;
2096 }
2097 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type,SpillMode spill_mode)2098 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2099                                    LifetimePosition position,
2100                                    InstructionOperand* operand, void* hint,
2101                                    UsePositionHintType hint_type,
2102                                    SpillMode spill_mode) {
2103   TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
2104   if (range == nullptr) return nullptr;
2105   UsePosition* use_pos = nullptr;
2106   if (operand->IsUnallocated()) {
2107     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2108     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2109     range->AddUsePosition(use_pos, data()->is_trace_alloc());
2110   }
2111   range->AddUseInterval(block_start, position, allocation_zone(),
2112                         data()->is_trace_alloc());
2113   return use_pos;
2114 }
2115 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2116 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2117                                            BitVector* live) {
2118   int block_start = block->first_instruction_index();
2119   LifetimePosition block_start_position =
2120       LifetimePosition::GapFromInstructionIndex(block_start);
2121   bool fixed_float_live_ranges = false;
2122   bool fixed_simd128_live_ranges = false;
2123   if (kFPAliasing == AliasingKind::kCombine) {
2124     int mask = data()->code()->representation_mask();
2125     fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
2126     fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2127   } else if (kFPAliasing == AliasingKind::kIndependent) {
2128     int mask = data()->code()->representation_mask();
2129     fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2130   }
2131   SpillMode spill_mode = SpillModeForBlock(block);
2132 
2133   for (int index = block->last_instruction_index(); index >= block_start;
2134        index--) {
2135     LifetimePosition curr_position =
2136         LifetimePosition::InstructionFromInstructionIndex(index);
2137     Instruction* instr = code()->InstructionAt(index);
2138     DCHECK_NOT_NULL(instr);
2139     DCHECK(curr_position.IsInstructionPosition());
2140     // Process output, inputs, and temps of this instruction.
2141     for (size_t i = 0; i < instr->OutputCount(); i++) {
2142       InstructionOperand* output = instr->OutputAt(i);
2143       if (output->IsUnallocated()) {
2144         // Unsupported.
2145         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2146         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2147         live->Remove(out_vreg);
2148       } else if (output->IsConstant()) {
2149         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2150         live->Remove(out_vreg);
2151       }
2152       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2153           output->IsRegister() &&
2154           AllocatedOperand::cast(output)->GetRegister() ==
2155               v8::internal::kReturnRegister0) {
2156         // The register defined here is blocked from gap start - it is the
2157         // exception value.
2158         // TODO(mtrofin): should we explore an explicit opcode for
2159         // the first instruction in the handler?
2160         Define(LifetimePosition::GapFromInstructionIndex(index), output,
2161                spill_mode);
2162       } else {
2163         Define(curr_position, output, spill_mode);
2164       }
2165     }
2166 
2167     if (instr->ClobbersRegisters()) {
2168       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2169         // Create a UseInterval at this instruction for all fixed registers,
2170         // (including the instruction outputs). Adding another UseInterval here
2171         // is OK because AddUseInterval will just merge it with the existing
2172         // one at the end of the range.
2173         int code = config()->GetAllocatableGeneralCode(i);
2174         TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
2175         range->AddUseInterval(curr_position, curr_position.End(),
2176                               allocation_zone(), data()->is_trace_alloc());
2177       }
2178     }
2179 
2180     if (instr->ClobbersDoubleRegisters()) {
2181       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2182         // Add a UseInterval for all DoubleRegisters. See comment above for
2183         // general registers.
2184         int code = config()->GetAllocatableDoubleCode(i);
2185         TopLevelLiveRange* range = FixedFPLiveRangeFor(
2186             code, MachineRepresentation::kFloat64, spill_mode);
2187         range->AddUseInterval(curr_position, curr_position.End(),
2188                               allocation_zone(), data()->is_trace_alloc());
2189       }
2190       // Clobber fixed float registers on archs with non-simple aliasing.
2191       if (kFPAliasing == AliasingKind::kCombine) {
2192         if (fixed_float_live_ranges) {
2193           for (int i = 0; i < config()->num_allocatable_float_registers();
2194                ++i) {
2195             // Add a UseInterval for all FloatRegisters. See comment above for
2196             // general registers.
2197             int code = config()->GetAllocatableFloatCode(i);
2198             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2199                 code, MachineRepresentation::kFloat32, spill_mode);
2200             range->AddUseInterval(curr_position, curr_position.End(),
2201                                   allocation_zone(), data()->is_trace_alloc());
2202           }
2203         }
2204         if (fixed_simd128_live_ranges) {
2205           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2206                ++i) {
2207             int code = config()->GetAllocatableSimd128Code(i);
2208             TopLevelLiveRange* range = FixedFPLiveRangeFor(
2209                 code, MachineRepresentation::kSimd128, spill_mode);
2210             range->AddUseInterval(curr_position, curr_position.End(),
2211                                   allocation_zone(), data()->is_trace_alloc());
2212           }
2213         }
2214       } else if (kFPAliasing == AliasingKind::kIndependent) {
2215         if (fixed_simd128_live_ranges) {
2216           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2217                ++i) {
2218             int code = config()->GetAllocatableSimd128Code(i);
2219             TopLevelLiveRange* range =
2220                 FixedSIMD128LiveRangeFor(code, spill_mode);
2221             range->AddUseInterval(curr_position, curr_position.End(),
2222                                   allocation_zone(), data()->is_trace_alloc());
2223           }
2224         }
2225       }
2226     }
2227 
2228     for (size_t i = 0; i < instr->InputCount(); i++) {
2229       InstructionOperand* input = instr->InputAt(i);
2230       if (input->IsImmediate()) {
2231         continue;  // Ignore immediates.
2232       }
2233       LifetimePosition use_pos;
2234       if (input->IsUnallocated() &&
2235           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2236         use_pos = curr_position;
2237       } else {
2238         use_pos = curr_position.End();
2239       }
2240 
2241       if (input->IsUnallocated()) {
2242         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2243         int vreg = unalloc->virtual_register();
2244         live->Add(vreg);
2245         if (unalloc->HasSlotPolicy()) {
2246           data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
2247               block->IsDeferred()
2248                   ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2249                   : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
2250         }
2251       }
2252       Use(block_start_position, use_pos, input, spill_mode);
2253     }
2254 
2255     for (size_t i = 0; i < instr->TempCount(); i++) {
2256       InstructionOperand* temp = instr->TempAt(i);
2257       // Unsupported.
2258       DCHECK_IMPLIES(temp->IsUnallocated(),
2259                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2260       if (instr->ClobbersTemps()) {
2261         if (temp->IsRegister()) continue;
2262         if (temp->IsUnallocated()) {
2263           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2264           if (temp_unalloc->HasFixedPolicy()) {
2265             continue;
2266           }
2267         }
2268       }
2269       Use(block_start_position, curr_position.End(), temp, spill_mode);
2270       Define(curr_position, temp, spill_mode);
2271     }
2272 
2273     // Process the moves of the instruction's gaps, making their sources live.
2274     const Instruction::GapPosition kPositions[] = {Instruction::END,
2275                                                    Instruction::START};
2276     curr_position = curr_position.PrevStart();
2277     DCHECK(curr_position.IsGapPosition());
2278     for (const Instruction::GapPosition& position : kPositions) {
2279       ParallelMove* move = instr->GetParallelMove(position);
2280       if (move == nullptr) continue;
2281       if (position == Instruction::END) {
2282         curr_position = curr_position.End();
2283       } else {
2284         curr_position = curr_position.Start();
2285       }
2286       for (MoveOperands* cur : *move) {
2287         InstructionOperand& from = cur->source();
2288         InstructionOperand& to = cur->destination();
2289         void* hint = &to;
2290         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2291         UsePosition* to_use = nullptr;
2292         int phi_vreg = -1;
2293         if (to.IsUnallocated()) {
2294           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2295           TopLevelLiveRange* to_range =
2296               data()->GetOrCreateLiveRangeFor(to_vreg);
2297           if (to_range->is_phi()) {
2298             phi_vreg = to_vreg;
2299             if (to_range->is_non_loop_phi()) {
2300               hint = to_range->current_hint_position();
2301               hint_type = hint == nullptr ? UsePositionHintType::kNone
2302                                           : UsePositionHintType::kUsePos;
2303             } else {
2304               hint_type = UsePositionHintType::kPhi;
2305               hint = data()->GetPhiMapValueFor(to_vreg);
2306             }
2307           } else {
2308             if (live->Contains(to_vreg)) {
2309               to_use =
2310                   Define(curr_position, &to, &from,
2311                          UsePosition::HintTypeForOperand(from), spill_mode);
2312               live->Remove(to_vreg);
2313             } else {
2314               cur->Eliminate();
2315               continue;
2316             }
2317           }
2318         } else {
2319           Define(curr_position, &to, spill_mode);
2320         }
2321         UsePosition* from_use = Use(block_start_position, curr_position, &from,
2322                                     hint, hint_type, spill_mode);
2323         // Mark range live.
2324         if (from.IsUnallocated()) {
2325           live->Add(UnallocatedOperand::cast(from).virtual_register());
2326         }
2327         // When the value is moved to a register to meet input constraints,
2328         // we should consider this value use similar as a register use in the
2329         // backward spilling heuristics, even though this value use is not
2330         // register benefical at the AllocateBlockedReg stage.
2331         if (to.IsAnyRegister() ||
2332             (to.IsUnallocated() &&
2333              UnallocatedOperand::cast(&to)->HasRegisterPolicy())) {
2334           from_use->set_spill_detrimental();
2335         }
2336         // Resolve use position hints just created.
2337         if (to_use != nullptr && from_use != nullptr) {
2338           to_use->ResolveHint(from_use);
2339           from_use->ResolveHint(to_use);
2340         }
2341         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2342         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2343         // Potentially resolve phi hint.
2344         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2345       }
2346     }
2347   }
2348 }
2349 
ProcessPhis(const InstructionBlock * block,BitVector * live)2350 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2351                                    BitVector* live) {
2352   for (PhiInstruction* phi : block->phis()) {
2353     // The live range interval already ends at the first instruction of the
2354     // block.
2355     int phi_vreg = phi->virtual_register();
2356     live->Remove(phi_vreg);
2357     // Select a hint from a predecessor block that precedes this block in the
2358     // rpo order. In order of priority:
2359     // - Avoid hints from deferred blocks.
2360     // - Prefer hints from allocated (or explicit) operands.
2361     // - Prefer hints from empty blocks (containing just parallel moves and a
2362     //   jump). In these cases, if we can elide the moves, the jump threader
2363     //   is likely to be able to elide the jump.
2364     // The enforcement of hinting in rpo order is required because hint
2365     // resolution that happens later in the compiler pipeline visits
2366     // instructions in reverse rpo order, relying on the fact that phis are
2367     // encountered before their hints.
2368     InstructionOperand* hint = nullptr;
2369     int hint_preference = 0;
2370 
2371     // The cost of hinting increases with the number of predecessors. At the
2372     // same time, the typical benefit decreases, since this hinting only
2373     // optimises the execution path through one predecessor. A limit of 2 is
2374     // sufficient to hit the common if/else pattern.
2375     int predecessor_limit = 2;
2376 
2377     for (RpoNumber predecessor : block->predecessors()) {
2378       const InstructionBlock* predecessor_block =
2379           code()->InstructionBlockAt(predecessor);
2380       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2381 
2382       // Only take hints from earlier rpo numbers.
2383       if (predecessor >= block->rpo_number()) continue;
2384 
2385       // Look up the predecessor instruction.
2386       const Instruction* predecessor_instr =
2387           GetLastInstruction(code(), predecessor_block);
2388       InstructionOperand* predecessor_hint = nullptr;
2389       // Phis are assigned in the END position of the last instruction in each
2390       // predecessor block.
2391       for (MoveOperands* move :
2392            *predecessor_instr->GetParallelMove(Instruction::END)) {
2393         InstructionOperand& to = move->destination();
2394         if (to.IsUnallocated() &&
2395             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2396           predecessor_hint = &move->source();
2397           break;
2398         }
2399       }
2400       DCHECK_NOT_NULL(predecessor_hint);
2401 
2402       // For each predecessor, generate a score according to the priorities
2403       // described above, and pick the best one. Flags in higher-order bits have
2404       // a higher priority than those in lower-order bits.
2405       int predecessor_hint_preference = 0;
2406       const int kNotDeferredBlockPreference = (1 << 2);
2407       const int kMoveIsAllocatedPreference = (1 << 1);
2408       const int kBlockIsEmptyPreference = (1 << 0);
2409 
2410       // - Avoid hints from deferred blocks.
2411       if (!predecessor_block->IsDeferred()) {
2412         predecessor_hint_preference |= kNotDeferredBlockPreference;
2413       }
2414 
2415       // - Prefer hints from allocated operands.
2416       //
2417       // Already-allocated operands are typically assigned using the parallel
2418       // moves on the last instruction. For example:
2419       //
2420       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2421       //      ArchJmp
2422       //    ...
2423       //    phi: v100 = v101 v102
2424       //
2425       // We have already found the END move, so look for a matching START move
2426       // from an allocated operand.
2427       //
2428       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2429       // because the live ranges are still being built when this function is
2430       // called.
2431       // TODO(v8): Find a way to separate hinting from live range analysis in
2432       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2433       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2434       if (moves != nullptr) {
2435         for (MoveOperands* move : *moves) {
2436           InstructionOperand& to = move->destination();
2437           if (predecessor_hint->Equals(to)) {
2438             if (move->source().IsAllocated()) {
2439               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2440             }
2441             break;
2442           }
2443         }
2444       }
2445 
2446       // - Prefer hints from empty blocks.
2447       if (predecessor_block->last_instruction_index() ==
2448           predecessor_block->first_instruction_index()) {
2449         predecessor_hint_preference |= kBlockIsEmptyPreference;
2450       }
2451 
2452       if ((hint == nullptr) ||
2453           (predecessor_hint_preference > hint_preference)) {
2454         // Take the hint from this predecessor.
2455         hint = predecessor_hint;
2456         hint_preference = predecessor_hint_preference;
2457       }
2458 
2459       if (--predecessor_limit <= 0) break;
2460     }
2461     DCHECK_NOT_NULL(hint);
2462 
2463     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2464         block->first_instruction_index());
2465     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2466                                   UsePosition::HintTypeForOperand(*hint),
2467                                   SpillModeForBlock(block));
2468     MapPhiHint(hint, use_pos);
2469   }
2470 }
2471 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2472 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2473                                          BitVector* live) {
2474   DCHECK(block->IsLoopHeader());
2475   // Add a live range stretching from the first loop instruction to the last
2476   // for each value live on entry to the header.
2477   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2478       block->first_instruction_index());
2479   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2480                              code()->LastLoopInstructionIndex(block))
2481                              .NextFullStart();
2482   for (int operand_index : *live) {
2483     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2484     range->EnsureInterval(start, end, allocation_zone(),
2485                           data()->is_trace_alloc());
2486   }
2487   // Insert all values into the live in sets of all blocks in the loop.
2488   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2489        ++i) {
2490     live_in_sets()[i]->Union(*live);
2491   }
2492 }
2493 
BuildLiveRanges()2494 void LiveRangeBuilder::BuildLiveRanges() {
2495   // Process the blocks in reverse order.
2496   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2497        --block_id) {
2498     data_->tick_counter()->TickAndMaybeEnterSafepoint();
2499     InstructionBlock* block =
2500         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2501     BitVector* live = ComputeLiveOut(block, data());
2502     // Initially consider all live_out values live for the entire block. We
2503     // will shorten these intervals if necessary.
2504     AddInitialIntervals(block, live);
2505     // Process the instructions in reverse order, generating and killing
2506     // live values.
2507     ProcessInstructions(block, live);
2508     // All phi output operands are killed by this block.
2509     ProcessPhis(block, live);
2510     // Now live is live_in for this block except not including values live
2511     // out on backward successor edges.
2512     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2513     live_in_sets()[block_id] = live;
2514   }
2515   // Postprocess the ranges.
2516   const size_t live_ranges_size = data()->live_ranges().size();
2517   for (TopLevelLiveRange* range : data()->live_ranges()) {
2518     data_->tick_counter()->TickAndMaybeEnterSafepoint();
2519     CHECK_EQ(live_ranges_size,
2520              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2521     if (range == nullptr) continue;
2522     // Give slots to all ranges with a non fixed slot use.
2523     if (range->has_slot_use() && range->HasNoSpillType()) {
2524       SpillMode spill_mode =
2525           range->slot_use_kind() ==
2526                   TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
2527               ? SpillMode::kSpillDeferred
2528               : SpillMode::kSpillAtDefinition;
2529       data()->AssignSpillRangeToLiveRange(range, spill_mode);
2530     }
2531     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2532     // live ranges, every use requires the constant to be in a register.
2533     // Without this hack, all uses with "any" policy would get the constant
2534     // operand assigned.
2535     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2536       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2537            pos = pos->next()) {
2538         if (pos->type() == UsePositionType::kRequiresSlot ||
2539             pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
2540           continue;
2541         }
2542         UsePositionType new_type = UsePositionType::kRegisterOrSlot;
2543         // Can't mark phis as needing a register.
2544         if (!pos->pos().IsGapPosition()) {
2545           new_type = UsePositionType::kRequiresRegister;
2546         }
2547         pos->set_type(new_type, true);
2548       }
2549     }
2550     range->ResetCurrentHintPosition();
2551   }
2552   for (auto preassigned : data()->preassigned_slot_ranges()) {
2553     TopLevelLiveRange* range = preassigned.first;
2554     int slot_id = preassigned.second;
2555     SpillRange* spill = range->HasSpillRange()
2556                             ? range->GetSpillRange()
2557                             : data()->AssignSpillRangeToLiveRange(
2558                                   range, SpillMode::kSpillAtDefinition);
2559     spill->set_assigned_slot(slot_id);
2560   }
2561 #ifdef DEBUG
2562   Verify();
2563 #endif
2564 }
2565 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2566 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2567                                   UsePosition* use_pos) {
2568   DCHECK(!use_pos->IsResolved());
2569   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2570   DCHECK(res.second);
2571   USE(res);
2572 }
2573 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2574 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2575                                       UsePosition* use_pos) {
2576   auto it = phi_hints_.find(operand);
2577   if (it == phi_hints_.end()) return;
2578   DCHECK(!it->second->IsResolved());
2579   it->second->ResolveHint(use_pos);
2580 }
2581 
Verify() const2582 void LiveRangeBuilder::Verify() const {
2583   for (auto& hint : phi_hints_) {
2584     CHECK(hint.second->IsResolved());
2585   }
2586   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2587     if (current != nullptr && !current->IsEmpty()) {
2588       // New LiveRanges should not be split.
2589       CHECK_NULL(current->next());
2590       // General integrity check.
2591       current->Verify();
2592       const UseInterval* first = current->first_interval();
2593       if (first->next() == nullptr) continue;
2594 
2595       // Consecutive intervals should not end and start in the same block,
2596       // otherwise the intervals should have been joined, because the
2597       // variable is live throughout that block.
2598       CHECK(NextIntervalStartsInDifferentBlocks(first));
2599 
2600       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2601         // Except for the first interval, the other intevals must start at
2602         // a block boundary, otherwise data wouldn't flow to them.
2603         CHECK(IntervalStartsAtBlockBoundary(i));
2604         // The last instruction of the predecessors of the block the interval
2605         // starts must be covered by the range.
2606         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2607         if (i->next() != nullptr) {
2608           // Check the consecutive intervals property, except for the last
2609           // interval, where it doesn't apply.
2610           CHECK(NextIntervalStartsInDifferentBlocks(i));
2611         }
2612       }
2613     }
2614   }
2615 }
2616 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2617 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2618     const UseInterval* interval) const {
2619   LifetimePosition start = interval->start();
2620   if (!start.IsFullStart()) return false;
2621   int instruction_index = start.ToInstructionIndex();
2622   const InstructionBlock* block =
2623       data()->code()->GetInstructionBlock(instruction_index);
2624   return block->first_instruction_index() == instruction_index;
2625 }
2626 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2627 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2628     const UseInterval* interval, const TopLevelLiveRange* range) const {
2629   LifetimePosition start = interval->start();
2630   int instruction_index = start.ToInstructionIndex();
2631   const InstructionBlock* block =
2632       data()->code()->GetInstructionBlock(instruction_index);
2633   for (RpoNumber pred_index : block->predecessors()) {
2634     const InstructionBlock* predecessor =
2635         data()->code()->InstructionBlockAt(pred_index);
2636     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2637         predecessor->last_instruction_index());
2638     last_pos = last_pos.NextStart().End();
2639     if (!range->Covers(last_pos)) return false;
2640   }
2641   return true;
2642 }
2643 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2644 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2645     const UseInterval* interval) const {
2646   DCHECK_NOT_NULL(interval->next());
2647   LifetimePosition end = interval->end();
2648   LifetimePosition next_start = interval->next()->start();
2649   // Since end is not covered, but the previous position is, move back a
2650   // position
2651   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2652   int last_covered_index = end.ToInstructionIndex();
2653   const InstructionBlock* block =
2654       data()->code()->GetInstructionBlock(last_covered_index);
2655   const InstructionBlock* next_block =
2656       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2657   return block->rpo_number() < next_block->rpo_number();
2658 }
2659 
BuildBundles()2660 void BundleBuilder::BuildBundles() {
2661   TRACE("Build bundles\n");
2662   // Process the blocks in reverse order.
2663   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2664        --block_id) {
2665     InstructionBlock* block =
2666         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2667     TRACE("Block B%d\n", block_id);
2668     for (auto phi : block->phis()) {
2669       LiveRange* out_range =
2670           data()->GetOrCreateLiveRangeFor(phi->virtual_register());
2671       LiveRangeBundle* out = out_range->get_bundle();
2672       if (out == nullptr) {
2673         out = data()->allocation_zone()->New<LiveRangeBundle>(
2674             data()->allocation_zone(), next_bundle_id_++);
2675         out->TryAddRange(out_range);
2676       }
2677       TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
2678             out_range->TopLevel()->vreg(), out_range->relative_id());
2679       bool phi_interferes_with_backedge_input = false;
2680       for (auto input : phi->operands()) {
2681         LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
2682         TRACE("Input value v%d with range %d:%d\n", input,
2683               input_range->TopLevel()->vreg(), input_range->relative_id());
2684         LiveRangeBundle* input_bundle = input_range->get_bundle();
2685         if (input_bundle != nullptr) {
2686           TRACE("Merge\n");
2687           LiveRangeBundle* merged = LiveRangeBundle::TryMerge(
2688               out, input_bundle, data()->is_trace_alloc());
2689           if (merged != nullptr) {
2690             DCHECK_EQ(out_range->get_bundle(), merged);
2691             DCHECK_EQ(input_range->get_bundle(), merged);
2692             out = merged;
2693             TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
2694                   out->id());
2695           } else if (input_range->Start() > out_range->Start()) {
2696             // We are only interested in values defined after the phi, because
2697             // those are values that will go over a back-edge.
2698             phi_interferes_with_backedge_input = true;
2699           }
2700         } else {
2701           TRACE("Add\n");
2702           if (out->TryAddRange(input_range)) {
2703             TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
2704                   out->id());
2705           } else if (input_range->Start() > out_range->Start()) {
2706             // We are only interested in values defined after the phi, because
2707             // those are values that will go over a back-edge.
2708             phi_interferes_with_backedge_input = true;
2709           }
2710         }
2711       }
2712       // Spilling the phi at the loop header is not beneficial if there is
2713       // a back-edge with an input for the phi that interferes with the phi's
2714       // value, because in case that input gets spilled it might introduce
2715       // a stack-to-stack move at the back-edge.
2716       if (phi_interferes_with_backedge_input)
2717         out_range->TopLevel()->set_spilling_at_loop_header_not_beneficial();
2718     }
2719     TRACE("Done block B%d\n", block_id);
2720   }
2721 }
2722 
TryAddRange(LiveRange * range)2723 bool LiveRangeBundle::TryAddRange(LiveRange* range) {
2724   DCHECK_NULL(range->get_bundle());
2725   // We may only add a new live range if its use intervals do not
2726   // overlap with existing intervals in the bundle.
2727   if (UsesOverlap(range->first_interval())) return false;
2728   ranges_.insert(range);
2729   range->set_bundle(this);
2730   InsertUses(range->first_interval());
2731   return true;
2732 }
2733 
TryMerge(LiveRangeBundle * lhs,LiveRangeBundle * rhs,bool trace_alloc)2734 LiveRangeBundle* LiveRangeBundle::TryMerge(LiveRangeBundle* lhs,
2735                                            LiveRangeBundle* rhs,
2736                                            bool trace_alloc) {
2737   if (rhs == lhs) return lhs;
2738 
2739   auto iter1 = lhs->uses_.begin();
2740   auto iter2 = rhs->uses_.begin();
2741 
2742   while (iter1 != lhs->uses_.end() && iter2 != rhs->uses_.end()) {
2743     if (iter1->start >= iter2->end) {
2744       ++iter2;
2745     } else if (iter2->start >= iter1->end) {
2746       ++iter1;
2747     } else {
2748       TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
2749                  iter1->end, iter2->start, iter2->end);
2750       return nullptr;
2751     }
2752   }
2753   // Uses are disjoint, merging is possible.
2754   if (lhs->uses_.size() < rhs->uses_.size()) {
2755     // Merge the smallest bundle into the biggest.
2756     std::swap(lhs, rhs);
2757   }
2758   for (auto it = rhs->ranges_.begin(); it != rhs->ranges_.end(); ++it) {
2759     (*it)->set_bundle(lhs);
2760     lhs->InsertUses((*it)->first_interval());
2761   }
2762   lhs->ranges_.insert(rhs->ranges_.begin(), rhs->ranges_.end());
2763   rhs->ranges_.clear();
2764   return lhs;
2765 }
2766 
MergeSpillRangesAndClear()2767 void LiveRangeBundle::MergeSpillRangesAndClear() {
2768   DCHECK_IMPLIES(ranges_.empty(), uses_.empty());
2769   SpillRange* target = nullptr;
2770   for (auto range : ranges_) {
2771     if (range->TopLevel()->HasSpillRange()) {
2772       SpillRange* current = range->TopLevel()->GetSpillRange();
2773       if (target == nullptr) {
2774         target = current;
2775       } else if (target != current) {
2776         target->TryMerge(current);
2777       }
2778     }
2779   }
2780   // Clear the fields so that we don't try to merge the spill ranges again when
2781   // we hit the same bundle from a different LiveRange in AssignSpillSlots.
2782   // LiveRangeBundles are not used after this.
2783   ranges_.clear();
2784   uses_.clear();
2785 }
2786 
RegisterAllocator(TopTierRegisterAllocationData * data,RegisterKind kind)2787 RegisterAllocator::RegisterAllocator(TopTierRegisterAllocationData* data,
2788                                      RegisterKind kind)
2789     : data_(data),
2790       mode_(kind),
2791       num_registers_(GetRegisterCount(data->config(), kind)),
2792       num_allocatable_registers_(
2793           GetAllocatableRegisterCount(data->config(), kind)),
2794       allocatable_register_codes_(
2795           GetAllocatableRegisterCodes(data->config(), kind)),
2796       check_fp_aliasing_(false) {
2797   if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
2798     check_fp_aliasing_ = (data->code()->representation_mask() &
2799                           (kFloat32Bit | kSimd128Bit)) != 0;
2800   }
2801 }
2802 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2803 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2804     const LiveRange* range, int instruction_index) {
2805   LifetimePosition ret = LifetimePosition::Invalid();
2806 
2807   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2808   if (range->Start() >= ret || ret >= range->End()) {
2809     return LifetimePosition::Invalid();
2810   }
2811   return ret;
2812 }
2813 
SplitAndSpillRangesDefinedByMemoryOperand()2814 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2815   size_t initial_range_count = data()->live_ranges().size();
2816   for (size_t i = 0; i < initial_range_count; ++i) {
2817     CHECK_EQ(initial_range_count,
2818              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2819     TopLevelLiveRange* range = data()->live_ranges()[i];
2820     if (!CanProcessRange(range)) continue;
2821     // Only assume defined by memory operand if we are guaranteed to spill it or
2822     // it has a spill operand.
2823     if (range->HasNoSpillType() ||
2824         (range->HasSpillRange() && !range->has_non_deferred_slot_use())) {
2825       continue;
2826     }
2827     LifetimePosition start = range->Start();
2828     TRACE("Live range %d:%d is defined by a spill operand.\n",
2829           range->TopLevel()->vreg(), range->relative_id());
2830     LifetimePosition next_pos = start;
2831     if (next_pos.IsGapPosition()) {
2832       next_pos = next_pos.NextStart();
2833     }
2834 
2835     UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2836     // If the range already has a spill operand and it doesn't need a
2837     // register immediately, split it and spill the first part of the range.
2838     if (pos == nullptr) {
2839       Spill(range, SpillMode::kSpillAtDefinition);
2840     } else if (pos->pos() > range->Start().NextStart()) {
2841       // Do not spill live range eagerly if use position that can benefit from
2842       // the register is too close to the start of live range.
2843       LifetimePosition split_pos = GetSplitPositionForInstruction(
2844           range, pos->pos().ToInstructionIndex());
2845       // There is no place to split, so we can't split and spill.
2846       if (!split_pos.IsValid()) continue;
2847 
2848       split_pos =
2849           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2850 
2851       SplitRangeAt(range, split_pos);
2852       Spill(range, SpillMode::kSpillAtDefinition);
2853     }
2854   }
2855 }
2856 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2857 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2858                                            LifetimePosition pos) {
2859   DCHECK(!range->TopLevel()->IsFixed());
2860   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2861         range->relative_id(), pos.value());
2862 
2863   if (pos <= range->Start()) return range;
2864 
2865   // We can't properly connect liveranges if splitting occurred at the end
2866   // a block.
2867   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2868          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2869           pos.ToInstructionIndex()));
2870 
2871   LiveRange* result = range->SplitAt(pos, allocation_zone());
2872   return result;
2873 }
2874 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2875 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2876                                            LifetimePosition start,
2877                                            LifetimePosition end) {
2878   DCHECK(!range->TopLevel()->IsFixed());
2879   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2880         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2881         end.value());
2882 
2883   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2884   DCHECK(split_pos >= start);
2885   return SplitRangeAt(range, split_pos);
2886 }
2887 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2888 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2889                                                         LifetimePosition end) {
2890   int start_instr = start.ToInstructionIndex();
2891   int end_instr = end.ToInstructionIndex();
2892   DCHECK_LE(start_instr, end_instr);
2893 
2894   // We have no choice
2895   if (start_instr == end_instr) return end;
2896 
2897   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2898   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2899 
2900   if (end_block == start_block) {
2901     // The interval is split in the same basic block. Split at the latest
2902     // possible position.
2903     return end;
2904   }
2905 
2906   const InstructionBlock* block = end_block;
2907   // Find header of outermost loop.
2908   do {
2909     const InstructionBlock* loop = GetContainingLoop(code(), block);
2910     if (loop == nullptr ||
2911         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2912       // No more loops or loop starts before the lifetime start.
2913       break;
2914     }
2915     block = loop;
2916   } while (true);
2917 
2918   // We did not find any suitable outer loop. Split at the latest possible
2919   // position unless end_block is a loop header itself.
2920   if (block == end_block && !end_block->IsLoopHeader()) return end;
2921 
2922   return LifetimePosition::GapFromInstructionIndex(
2923       block->first_instruction_index());
2924 }
2925 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos,SpillMode spill_mode,LiveRange ** begin_spill_out)2926 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2927     LiveRange* range, LifetimePosition pos, SpillMode spill_mode,
2928     LiveRange** begin_spill_out) {
2929   *begin_spill_out = range;
2930   // TODO(herhut): Be more clever here as long as we do not move pos out of
2931   // deferred code.
2932   if (spill_mode == SpillMode::kSpillDeferred) return pos;
2933   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2934   const InstructionBlock* loop_header =
2935       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2936   if (loop_header == nullptr) return pos;
2937 
2938   while (loop_header != nullptr) {
2939     // We are going to spill live range inside the loop.
2940     // If possible try to move spilling position backwards to loop header.
2941     // This will reduce number of memory moves on the back edge.
2942     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2943         loop_header->first_instruction_index());
2944     // Stop if we moved to a loop header before the value is defined or
2945     // at the define position that is not beneficial to spill.
2946     if (range->TopLevel()->Start() > loop_start ||
2947         (range->TopLevel()->Start() == loop_start &&
2948          range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
2949       return pos;
2950 
2951     LiveRange* live_at_header = range->TopLevel()->GetChildCovers(loop_start);
2952 
2953     if (live_at_header != nullptr && !live_at_header->spilled()) {
2954       for (LiveRange* check_use = live_at_header;
2955            check_use != nullptr && check_use->Start() < pos;
2956            check_use = check_use->next()) {
2957         // If we find a use for which spilling is detrimental, don't spill
2958         // at the loop header
2959         UsePosition* next_use =
2960             check_use->NextUsePositionSpillDetrimental(loop_start);
2961         // UsePosition at the end of a UseInterval may
2962         // have the same value as the start of next range.
2963         if (next_use != nullptr && next_use->pos() <= pos) {
2964           return pos;
2965         }
2966       }
2967       // No register beneficial use inside the loop before the pos.
2968       *begin_spill_out = live_at_header;
2969       pos = loop_start;
2970     }
2971 
2972     // Try hoisting out to an outer loop.
2973     loop_header = GetContainingLoop(code(), loop_header);
2974   }
2975   return pos;
2976 }
2977 
Spill(LiveRange * range,SpillMode spill_mode)2978 void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
2979   DCHECK(!range->spilled());
2980   DCHECK(spill_mode == SpillMode::kSpillAtDefinition ||
2981          GetInstructionBlock(code(), range->Start())->IsDeferred());
2982   TopLevelLiveRange* first = range->TopLevel();
2983   TRACE("Spilling live range %d:%d mode %d\n", first->vreg(),
2984         range->relative_id(), spill_mode);
2985 
2986   TRACE("Starting spill type is %d\n", static_cast<int>(first->spill_type()));
2987   if (first->HasNoSpillType()) {
2988     TRACE("New spill range needed");
2989     data()->AssignSpillRangeToLiveRange(first, spill_mode);
2990   }
2991   // Upgrade the spillmode, in case this was only spilled in deferred code so
2992   // far.
2993   if ((spill_mode == SpillMode::kSpillAtDefinition) &&
2994       (first->spill_type() ==
2995        TopLevelLiveRange::SpillType::kDeferredSpillRange)) {
2996     TRACE("Upgrading\n");
2997     first->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
2998   }
2999   TRACE("Final spill type is %d\n", static_cast<int>(first->spill_type()));
3000   range->Spill();
3001 }
3002 
RegisterName(int register_code) const3003 const char* RegisterAllocator::RegisterName(int register_code) const {
3004   if (register_code == kUnassignedRegister) return "unassigned";
3005   switch (mode()) {
3006     case RegisterKind::kGeneral:
3007       return i::RegisterName(Register::from_code(register_code));
3008     case RegisterKind::kDouble:
3009       return i::RegisterName(DoubleRegister::from_code(register_code));
3010     case RegisterKind::kSimd128:
3011       return i::RegisterName(Simd128Register::from_code(register_code));
3012   }
3013 }
3014 
LinearScanAllocator(TopTierRegisterAllocationData * data,RegisterKind kind,Zone * local_zone)3015 LinearScanAllocator::LinearScanAllocator(TopTierRegisterAllocationData* data,
3016                                          RegisterKind kind, Zone* local_zone)
3017     : RegisterAllocator(data, kind),
3018       unhandled_live_ranges_(local_zone),
3019       active_live_ranges_(local_zone),
3020       inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
3021                             local_zone),
3022       next_active_ranges_change_(LifetimePosition::Invalid()),
3023       next_inactive_ranges_change_(LifetimePosition::Invalid()) {
3024   active_live_ranges().reserve(8);
3025 }
3026 
MaybeSpillPreviousRanges(LiveRange * begin_range,LifetimePosition begin_pos,LiveRange * end_range)3027 void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
3028                                                    LifetimePosition begin_pos,
3029                                                    LiveRange* end_range) {
3030   // Spill begin_range after begin_pos, then spill every live range of this
3031   // virtual register until but excluding end_range.
3032   DCHECK(begin_range->Covers(begin_pos));
3033   DCHECK_EQ(begin_range->TopLevel(), end_range->TopLevel());
3034 
3035   if (begin_range != end_range) {
3036     DCHECK_LE(begin_range->End(), end_range->Start());
3037     if (!begin_range->spilled()) {
3038       SpillAfter(begin_range, begin_pos, SpillMode::kSpillAtDefinition);
3039     }
3040     for (LiveRange* range = begin_range->next(); range != end_range;
3041          range = range->next()) {
3042       if (!range->spilled()) {
3043         range->Spill();
3044       }
3045     }
3046   }
3047 }
3048 
MaybeUndoPreviousSplit(LiveRange * range)3049 void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
3050   if (range->next() != nullptr && range->next()->ShouldRecombine()) {
3051     LiveRange* to_remove = range->next();
3052     TRACE("Recombining %d:%d with %d\n", range->TopLevel()->vreg(),
3053           range->relative_id(), to_remove->relative_id());
3054 
3055     // Remove the range from unhandled, as attaching it will change its
3056     // state and hence ordering in the unhandled set.
3057     auto removed_cnt = unhandled_live_ranges().erase(to_remove);
3058     DCHECK_EQ(removed_cnt, 1);
3059     USE(removed_cnt);
3060 
3061     range->AttachToNext();
3062   } else if (range->next() != nullptr) {
3063     TRACE("No recombine for %d:%d to %d\n", range->TopLevel()->vreg(),
3064           range->relative_id(), range->next()->relative_id());
3065   }
3066 }
3067 
SpillNotLiveRanges(RangeWithRegisterSet * to_be_live,LifetimePosition position,SpillMode spill_mode)3068 void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
3069                                              LifetimePosition position,
3070                                              SpillMode spill_mode) {
3071   for (auto it = active_live_ranges().begin();
3072        it != active_live_ranges().end();) {
3073     LiveRange* active_range = *it;
3074     TopLevelLiveRange* toplevel = (*it)->TopLevel();
3075     auto found = to_be_live->find({toplevel, kUnassignedRegister});
3076     if (found == to_be_live->end()) {
3077       // Is not contained in {to_be_live}, spill it.
3078       // Fixed registers are exempt from this. They might have been
3079       // added from inactive at the block boundary but we know that
3080       // they cannot conflict as they are built before register
3081       // allocation starts. It would be algorithmically fine to split
3082       // them and reschedule but the code does not allow to do this.
3083       if (toplevel->IsFixed()) {
3084         TRACE("Keeping reactivated fixed range for %s\n",
3085               RegisterName(toplevel->assigned_register()));
3086         ++it;
3087       } else {
3088         // When spilling a previously spilled/reloaded range, we add back the
3089         // tail that we might have split off when we reloaded/spilled it
3090         // previously. Otherwise we might keep generating small split-offs.
3091         MaybeUndoPreviousSplit(active_range);
3092         TRACE("Putting back %d:%d\n", toplevel->vreg(),
3093               active_range->relative_id());
3094         LiveRange* split = SplitRangeAt(active_range, position);
3095         DCHECK_NE(split, active_range);
3096 
3097         // Make sure we revisit this range once it has a use that requires
3098         // a register.
3099         UsePosition* next_use = split->NextRegisterPosition(position);
3100         if (next_use != nullptr) {
3101           // Move to the start of the gap before use so that we have a space
3102           // to perform the potential reload. Otherwise, do not spill but add
3103           // to unhandled for reallocation.
3104           LifetimePosition revisit_at = next_use->pos().FullStart();
3105           TRACE("Next use at %d\n", revisit_at.value());
3106           if (!data()->IsBlockBoundary(revisit_at)) {
3107             // Leave some space so we have enough gap room.
3108             revisit_at = revisit_at.PrevStart().FullStart();
3109           }
3110           // If this range became life right at the block boundary that we are
3111           // currently processing, we do not need to split it. Instead move it
3112           // to unhandled right away.
3113           if (position < revisit_at) {
3114             LiveRange* third_part = SplitRangeAt(split, revisit_at);
3115             DCHECK_NE(split, third_part);
3116             Spill(split, spill_mode);
3117             TRACE("Marking %d:%d to recombine\n", toplevel->vreg(),
3118                   third_part->relative_id());
3119             third_part->SetRecombine();
3120             AddToUnhandled(third_part);
3121           } else {
3122             AddToUnhandled(split);
3123           }
3124         } else {
3125           Spill(split, spill_mode);
3126         }
3127         it = ActiveToHandled(it);
3128       }
3129     } else {
3130       // This range is contained in {to_be_live}, so we can keep it.
3131       int expected_register = (*found).expected_register;
3132       to_be_live->erase(found);
3133       if (expected_register == active_range->assigned_register()) {
3134         // Was life and in correct register, simply pass through.
3135         TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
3136               active_range->relative_id(),
3137               RegisterName(active_range->assigned_register()));
3138         ++it;
3139       } else {
3140         // Was life but wrong register. Split and schedule for
3141         // allocation.
3142         TRACE("Scheduling %d:%d\n", toplevel->vreg(),
3143               active_range->relative_id());
3144         LiveRange* split = SplitRangeAt(active_range, position);
3145         split->set_controlflow_hint(expected_register);
3146         AddToUnhandled(split);
3147         it = ActiveToHandled(it);
3148       }
3149     }
3150   }
3151 }
3152 
AssignRegisterOnReload(LiveRange * range,int reg)3153 LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
3154                                                        int reg) {
3155   // We know the register is currently free but it might be in
3156   // use by a currently inactive range. So we might not be able
3157   // to reload for the full distance. In such case, split here.
3158   // TODO(herhut):
3159   // It might be better if we could use the normal unhandled queue and
3160   // give reloading registers pecedence. That way we would compute the
3161   // intersection for the entire future.
3162   LifetimePosition new_end = range->End();
3163   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
3164     if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
3165         cur_reg != reg) {
3166       continue;
3167     }
3168     for (const LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
3169       if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
3170           !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
3171                                         range->representation(), reg)) {
3172         continue;
3173       }
3174       if (new_end <= cur_inactive->NextStart()) {
3175         // Inactive ranges are sorted by their next start, so the remaining
3176         // ranges cannot contribute to new_end.
3177         break;
3178       }
3179       auto next_intersection = cur_inactive->FirstIntersection(range);
3180       if (!next_intersection.IsValid()) continue;
3181       new_end = std::min(new_end, next_intersection);
3182     }
3183   }
3184   if (new_end != range->End()) {
3185     TRACE("Found new end for %d:%d at %d\n", range->TopLevel()->vreg(),
3186           range->relative_id(), new_end.value());
3187     LiveRange* tail = SplitRangeAt(range, new_end);
3188     AddToUnhandled(tail);
3189   }
3190   SetLiveRangeAssignedRegister(range, reg);
3191   return range;
3192 }
3193 
ReloadLiveRanges(RangeWithRegisterSet const & to_be_live,LifetimePosition position)3194 void LinearScanAllocator::ReloadLiveRanges(
3195     RangeWithRegisterSet const& to_be_live, LifetimePosition position) {
3196   // Assumption: All ranges in {to_be_live} are currently spilled and there are
3197   // no conflicting registers in the active ranges.
3198   // The former is ensured by SpillNotLiveRanges, the latter is by construction
3199   // of the to_be_live set.
3200   for (RangeWithRegister range_with_register : to_be_live) {
3201     TopLevelLiveRange* range = range_with_register.range;
3202     int reg = range_with_register.expected_register;
3203     LiveRange* to_resurrect = range->GetChildCovers(position);
3204     if (to_resurrect == nullptr) {
3205       // While the range was life until the end of the predecessor block, it is
3206       // not live in this block. Either there is a lifetime gap or the range
3207       // died.
3208       TRACE("No candidate for %d at %d\n", range->vreg(), position.value());
3209     } else {
3210       // We might be resurrecting a range that we spilled until its next use
3211       // before. In such cases, we have to unsplit it before processing as
3212       // otherwise we might get register changes from one range to the other
3213       // in the middle of blocks.
3214       // If there is a gap between this range and the next, we can just keep
3215       // it as a register change won't hurt.
3216       MaybeUndoPreviousSplit(to_resurrect);
3217       if (to_resurrect->Start() == position) {
3218         // This range already starts at this block. It might have been spilled,
3219         // so we have to unspill it. Otherwise, it is already in the unhandled
3220         // queue waiting for processing.
3221         DCHECK(!to_resurrect->HasRegisterAssigned());
3222         TRACE("Reload %d:%d starting at %d itself\n", range->vreg(),
3223               to_resurrect->relative_id(), position.value());
3224         if (to_resurrect->spilled()) {
3225           to_resurrect->Unspill();
3226           to_resurrect->set_controlflow_hint(reg);
3227           AddToUnhandled(to_resurrect);
3228         } else {
3229           // Assign the preassigned register if we know. Otherwise, nothing to
3230           // do as already in unhandeled.
3231           if (reg != kUnassignedRegister) {
3232             auto erased_cnt = unhandled_live_ranges().erase(to_resurrect);
3233             DCHECK_EQ(erased_cnt, 1);
3234             USE(erased_cnt);
3235             // We know that there is no conflict with active ranges, so just
3236             // assign the register to the range.
3237             to_resurrect = AssignRegisterOnReload(to_resurrect, reg);
3238             AddToActive(to_resurrect);
3239           }
3240         }
3241       } else {
3242         // This range was spilled before. We have to split it and schedule the
3243         // second part for allocation (or assign the register if we know).
3244         DCHECK(to_resurrect->spilled());
3245         LiveRange* split = SplitRangeAt(to_resurrect, position);
3246         TRACE("Reload %d:%d starting at %d as %d\n", range->vreg(),
3247               to_resurrect->relative_id(), split->Start().value(),
3248               split->relative_id());
3249         DCHECK_NE(split, to_resurrect);
3250         if (reg != kUnassignedRegister) {
3251           // We know that there is no conflict with active ranges, so just
3252           // assign the register to the range.
3253           split = AssignRegisterOnReload(split, reg);
3254           AddToActive(split);
3255         } else {
3256           // Let normal register assignment find a suitable register.
3257           split->set_controlflow_hint(reg);
3258           AddToUnhandled(split);
3259         }
3260       }
3261     }
3262   }
3263 }
3264 
ChooseOneOfTwoPredecessorStates(InstructionBlock * current_block,LifetimePosition boundary)3265 RpoNumber LinearScanAllocator::ChooseOneOfTwoPredecessorStates(
3266     InstructionBlock* current_block, LifetimePosition boundary) {
3267   using SmallRangeVector =
3268       base::SmallVector<TopLevelLiveRange*,
3269                         RegisterConfiguration::kMaxRegisters>;
3270   // Pick the state that would generate the least spill/reloads.
3271   // Compute vectors of ranges with imminent use for both sides.
3272   // As GetChildCovers is cached, it is cheaper to repeatedly
3273   // call is rather than compute a shared set first.
3274   auto& left = data()->GetSpillState(current_block->predecessors()[0]);
3275   auto& right = data()->GetSpillState(current_block->predecessors()[1]);
3276   SmallRangeVector left_used;
3277   for (const auto item : left) {
3278     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3279     if (at_next_block != nullptr &&
3280         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3281             nullptr) {
3282       left_used.emplace_back(item->TopLevel());
3283     }
3284   }
3285   SmallRangeVector right_used;
3286   for (const auto item : right) {
3287     LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3288     if (at_next_block != nullptr &&
3289         at_next_block->NextUsePositionRegisterIsBeneficial(boundary) !=
3290             nullptr) {
3291       right_used.emplace_back(item->TopLevel());
3292     }
3293   }
3294   if (left_used.empty() && right_used.empty()) {
3295     // There are no beneficial register uses. Look at any use at
3296     // all. We do not account for all uses, like flowing into a phi.
3297     // So we just look at ranges still being live.
3298     TRACE("Looking at only uses\n");
3299     for (const auto item : left) {
3300       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3301       if (at_next_block != nullptr &&
3302           at_next_block->NextUsePosition(boundary) != nullptr) {
3303         left_used.emplace_back(item->TopLevel());
3304       }
3305     }
3306     for (const auto item : right) {
3307       LiveRange* at_next_block = item->TopLevel()->GetChildCovers(boundary);
3308       if (at_next_block != nullptr &&
3309           at_next_block->NextUsePosition(boundary) != nullptr) {
3310         right_used.emplace_back(item->TopLevel());
3311       }
3312     }
3313   }
3314   // Now left_used and right_used contains those ranges that matter.
3315   // Count which side matches this most.
3316   TRACE("Vote went %zu vs %zu\n", left_used.size(), right_used.size());
3317   return left_used.size() > right_used.size()
3318              ? current_block->predecessors()[0]
3319              : current_block->predecessors()[1];
3320 }
3321 
CheckConflict(MachineRepresentation rep,int reg,RangeWithRegisterSet * to_be_live)3322 bool LinearScanAllocator::CheckConflict(MachineRepresentation rep, int reg,
3323                                         RangeWithRegisterSet* to_be_live) {
3324   for (RangeWithRegister range_with_reg : *to_be_live) {
3325     if (data()->config()->AreAliases(range_with_reg.range->representation(),
3326                                      range_with_reg.expected_register, rep,
3327                                      reg)) {
3328       return true;
3329     }
3330   }
3331   return false;
3332 }
3333 
ComputeStateFromManyPredecessors(InstructionBlock * current_block,RangeWithRegisterSet * to_be_live)3334 void LinearScanAllocator::ComputeStateFromManyPredecessors(
3335     InstructionBlock* current_block, RangeWithRegisterSet* to_be_live) {
3336   struct Vote {
3337     size_t count;
3338     int used_registers[RegisterConfiguration::kMaxRegisters];
3339   };
3340   struct TopLevelLiveRangeComparator {
3341     bool operator()(const TopLevelLiveRange* lhs,
3342                     const TopLevelLiveRange* rhs) const {
3343       return lhs->vreg() < rhs->vreg();
3344     }
3345   };
3346   ZoneMap<TopLevelLiveRange*, Vote, TopLevelLiveRangeComparator> counts(
3347       data()->allocation_zone());
3348   int deferred_blocks = 0;
3349   for (RpoNumber pred : current_block->predecessors()) {
3350     if (!ConsiderBlockForControlFlow(current_block, pred)) {
3351       // Back edges of a loop count as deferred here too.
3352       deferred_blocks++;
3353       continue;
3354     }
3355     const auto& pred_state = data()->GetSpillState(pred);
3356     for (LiveRange* range : pred_state) {
3357       // We might have spilled the register backwards, so the range we
3358       // stored might have lost its register. Ignore those.
3359       if (!range->HasRegisterAssigned()) continue;
3360       TopLevelLiveRange* toplevel = range->TopLevel();
3361       auto previous = counts.find(toplevel);
3362       if (previous == counts.end()) {
3363         auto result = counts.emplace(std::make_pair(toplevel, Vote{1, {0}}));
3364         CHECK(result.second);
3365         result.first->second.used_registers[range->assigned_register()]++;
3366       } else {
3367         previous->second.count++;
3368         previous->second.used_registers[range->assigned_register()]++;
3369       }
3370     }
3371   }
3372 
3373   // Choose the live ranges from the majority.
3374   const size_t majority =
3375       (current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
3376   bool taken_registers[RegisterConfiguration::kMaxRegisters] = {false};
3377   auto assign_to_live = [this, counts, majority](
3378                             std::function<bool(TopLevelLiveRange*)> filter,
3379                             RangeWithRegisterSet* to_be_live,
3380                             bool* taken_registers) {
3381     bool check_aliasing =
3382         kFPAliasing == AliasingKind::kCombine && check_fp_aliasing();
3383     for (const auto& val : counts) {
3384       if (!filter(val.first)) continue;
3385       if (val.second.count >= majority) {
3386         int register_max = 0;
3387         int reg = kUnassignedRegister;
3388         bool conflict = false;
3389         int num_regs = num_registers();
3390         int num_codes = num_allocatable_registers();
3391         const int* codes = allocatable_register_codes();
3392         MachineRepresentation rep = val.first->representation();
3393         if (check_aliasing && (rep == MachineRepresentation::kFloat32 ||
3394                                rep == MachineRepresentation::kSimd128))
3395           GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3396         for (int idx = 0; idx < num_regs; idx++) {
3397           int uses = val.second.used_registers[idx];
3398           if (uses == 0) continue;
3399           if (uses > register_max || (conflict && uses == register_max)) {
3400             reg = idx;
3401             register_max = uses;
3402             conflict = check_aliasing ? CheckConflict(rep, reg, to_be_live)
3403                                       : taken_registers[reg];
3404           }
3405         }
3406         if (conflict) {
3407           reg = kUnassignedRegister;
3408         } else if (!check_aliasing) {
3409           taken_registers[reg] = true;
3410         }
3411         to_be_live->emplace(val.first, reg);
3412         TRACE("Reset %d as live due vote %zu in %s\n",
3413               val.first->TopLevel()->vreg(), val.second.count,
3414               RegisterName(reg));
3415       }
3416     }
3417   };
3418   // First round, process fixed registers, as these have precedence.
3419   // There is only one fixed range per register, so we cannot have
3420   // conflicts.
3421   assign_to_live([](TopLevelLiveRange* r) { return r->IsFixed(); }, to_be_live,
3422                  taken_registers);
3423   // Second round, process the rest.
3424   assign_to_live([](TopLevelLiveRange* r) { return !r->IsFixed(); }, to_be_live,
3425                  taken_registers);
3426 }
3427 
ConsiderBlockForControlFlow(InstructionBlock * current_block,RpoNumber predecessor)3428 bool LinearScanAllocator::ConsiderBlockForControlFlow(
3429     InstructionBlock* current_block, RpoNumber predecessor) {
3430   // We ignore predecessors on back edges when looking for control flow effects,
3431   // as those lie in the future of allocation and we have no data yet. Also,
3432   // deferred bocks are ignored on deferred to non-deferred boundaries, as we do
3433   // not want them to influence allocation of non deferred code.
3434   return (predecessor < current_block->rpo_number()) &&
3435          (current_block->IsDeferred() ||
3436           !code()->InstructionBlockAt(predecessor)->IsDeferred());
3437 }
3438 
UpdateDeferredFixedRanges(SpillMode spill_mode,InstructionBlock * block)3439 void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
3440                                                     InstructionBlock* block) {
3441   if (spill_mode == SpillMode::kSpillDeferred) {
3442     LifetimePosition max = LifetimePosition::InstructionFromInstructionIndex(
3443         LastDeferredInstructionIndex(block));
3444     // Adds range back to inactive, resolving resulting conflicts.
3445     auto add_to_inactive = [this, max](LiveRange* range) {
3446       AddToInactive(range);
3447       // Splits other if it conflicts with range. Other is placed in unhandled
3448       // for later reallocation.
3449       auto split_conflicting = [this, max](LiveRange* range, LiveRange* other,
3450                                            std::function<void(LiveRange*)>
3451                                                update_caches) {
3452         if (other->TopLevel()->IsFixed()) return;
3453         int reg = range->assigned_register();
3454         if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
3455           if (other->assigned_register() != reg) {
3456             return;
3457           }
3458         } else {
3459           if (!data()->config()->AreAliases(range->representation(), reg,
3460                                             other->representation(),
3461                                             other->assigned_register())) {
3462             return;
3463           }
3464         }
3465         // The inactive range might conflict, so check whether we need to
3466         // split and spill. We can look for the first intersection, as there
3467         // cannot be any intersections in the past, as those would have been a
3468         // conflict then.
3469         LifetimePosition next_start = range->FirstIntersection(other);
3470         if (!next_start.IsValid() || (next_start > max)) {
3471           // There is no conflict or the conflict is outside of the current
3472           // stretch of deferred code. In either case we can ignore the
3473           // inactive range.
3474           return;
3475         }
3476         // They overlap. So we need to split active and reschedule it
3477         // for allocation.
3478         TRACE("Resolving conflict of %d with deferred fixed for register %s\n",
3479               other->TopLevel()->vreg(),
3480               RegisterName(other->assigned_register()));
3481         LiveRange* split_off =
3482             other->SplitAt(next_start, data()->allocation_zone());
3483         // Try to get the same register after the deferred block.
3484         split_off->set_controlflow_hint(other->assigned_register());
3485         DCHECK_NE(split_off, other);
3486         AddToUnhandled(split_off);
3487         update_caches(other);
3488       };
3489       // Now check for conflicts in active and inactive ranges. We might have
3490       // conflicts in inactive, as we do not do this check on every block
3491       // boundary but only on deferred/non-deferred changes but inactive
3492       // live ranges might become live on any block boundary.
3493       for (auto active : active_live_ranges()) {
3494         split_conflicting(range, active, [this](LiveRange* updated) {
3495           next_active_ranges_change_ =
3496               std::min(updated->End(), next_active_ranges_change_);
3497         });
3498       }
3499       for (int reg = 0; reg < num_registers(); ++reg) {
3500         if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
3501             reg != range->assigned_register()) {
3502           continue;
3503         }
3504         for (auto inactive : inactive_live_ranges(reg)) {
3505           if (inactive->NextStart() > max) break;
3506           split_conflicting(range, inactive, [this](LiveRange* updated) {
3507             next_inactive_ranges_change_ =
3508                 std::min(updated->End(), next_inactive_ranges_change_);
3509           });
3510         }
3511       }
3512     };
3513     if (mode() == RegisterKind::kGeneral) {
3514       for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3515         if (current != nullptr) {
3516           if (current->IsDeferredFixed()) {
3517             add_to_inactive(current);
3518           }
3519         }
3520       }
3521     } else if (mode() == RegisterKind::kDouble) {
3522       for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3523         if (current != nullptr) {
3524           if (current->IsDeferredFixed()) {
3525             add_to_inactive(current);
3526           }
3527         }
3528       }
3529       if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
3530         for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3531           if (current != nullptr) {
3532             if (current->IsDeferredFixed()) {
3533               add_to_inactive(current);
3534             }
3535           }
3536         }
3537         for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3538           if (current != nullptr) {
3539             if (current->IsDeferredFixed()) {
3540               add_to_inactive(current);
3541             }
3542           }
3543         }
3544       }
3545     } else {
3546       DCHECK_EQ(mode(), RegisterKind::kSimd128);
3547       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3548         if (current != nullptr) {
3549           if (current->IsDeferredFixed()) {
3550             add_to_inactive(current);
3551           }
3552         }
3553       }
3554     }
3555   } else {
3556     // Remove all ranges.
3557     for (int reg = 0; reg < num_registers(); ++reg) {
3558       for (auto it = inactive_live_ranges(reg).begin();
3559            it != inactive_live_ranges(reg).end();) {
3560         if ((*it)->TopLevel()->IsDeferredFixed()) {
3561           it = inactive_live_ranges(reg).erase(it);
3562         } else {
3563           ++it;
3564         }
3565       }
3566     }
3567   }
3568 }
3569 
BlockIsDeferredOrImmediatePredecessorIsNotDeferred(const InstructionBlock * block)3570 bool LinearScanAllocator::BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
3571     const InstructionBlock* block) {
3572   if (block->IsDeferred()) return true;
3573   if (block->PredecessorCount() == 0) return true;
3574   bool pred_is_deferred = false;
3575   for (auto pred : block->predecessors()) {
3576     if (pred.IsNext(block->rpo_number())) {
3577       pred_is_deferred = code()->InstructionBlockAt(pred)->IsDeferred();
3578       break;
3579     }
3580   }
3581   return !pred_is_deferred;
3582 }
3583 
HasNonDeferredPredecessor(InstructionBlock * block)3584 bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) {
3585   for (auto pred : block->predecessors()) {
3586     InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3587     if (!pred_block->IsDeferred()) return true;
3588   }
3589   return false;
3590 }
3591 
AllocateRegisters()3592 void LinearScanAllocator::AllocateRegisters() {
3593   DCHECK(unhandled_live_ranges().empty());
3594   DCHECK(active_live_ranges().empty());
3595   for (int reg = 0; reg < num_registers(); ++reg) {
3596     DCHECK(inactive_live_ranges(reg).empty());
3597   }
3598 
3599   SplitAndSpillRangesDefinedByMemoryOperand();
3600   data()->ResetSpillState();
3601 
3602   if (data()->is_trace_alloc()) {
3603     PrintRangeOverview();
3604   }
3605 
3606   const size_t live_ranges_size = data()->live_ranges().size();
3607   for (TopLevelLiveRange* range : data()->live_ranges()) {
3608     CHECK_EQ(live_ranges_size,
3609              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3610     if (!CanProcessRange(range)) continue;
3611     for (LiveRange* to_add = range; to_add != nullptr;
3612          to_add = to_add->next()) {
3613       if (!to_add->spilled()) {
3614         AddToUnhandled(to_add);
3615       }
3616     }
3617   }
3618 
3619   if (mode() == RegisterKind::kGeneral) {
3620     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
3621       if (current != nullptr) {
3622         if (current->IsDeferredFixed()) continue;
3623         AddToInactive(current);
3624       }
3625     }
3626   } else if (mode() == RegisterKind::kDouble) {
3627     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
3628       if (current != nullptr) {
3629         if (current->IsDeferredFixed()) continue;
3630         AddToInactive(current);
3631       }
3632     }
3633     if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
3634       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
3635         if (current != nullptr) {
3636           if (current->IsDeferredFixed()) continue;
3637           AddToInactive(current);
3638         }
3639       }
3640       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3641         if (current != nullptr) {
3642           if (current->IsDeferredFixed()) continue;
3643           AddToInactive(current);
3644         }
3645       }
3646     }
3647   } else {
3648     DCHECK(mode() == RegisterKind::kSimd128);
3649     for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
3650       if (current != nullptr) {
3651         if (current->IsDeferredFixed()) continue;
3652         AddToInactive(current);
3653       }
3654     }
3655   }
3656 
3657   RpoNumber last_block = RpoNumber::FromInt(0);
3658   RpoNumber max_blocks =
3659       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3660   LifetimePosition next_block_boundary =
3661       LifetimePosition::InstructionFromInstructionIndex(
3662           data()
3663               ->code()
3664               ->InstructionBlockAt(last_block)
3665               ->last_instruction_index())
3666           .NextFullStart();
3667   SpillMode spill_mode = SpillMode::kSpillAtDefinition;
3668 
3669   // Process all ranges. We also need to ensure that we have seen all block
3670   // boundaries. Linear scan might have assigned and spilled ranges before
3671   // reaching the last block and hence we would ignore control flow effects for
3672   // those. Not only does this produce a potentially bad assignment, it also
3673   // breaks with the invariant that we undo spills that happen in deferred code
3674   // when crossing a deferred/non-deferred boundary.
3675   while (!unhandled_live_ranges().empty() || last_block < max_blocks) {
3676     data()->tick_counter()->TickAndMaybeEnterSafepoint();
3677     LiveRange* current = unhandled_live_ranges().empty()
3678                              ? nullptr
3679                              : *unhandled_live_ranges().begin();
3680     LifetimePosition position =
3681         current ? current->Start() : next_block_boundary;
3682 #ifdef DEBUG
3683     allocation_finger_ = position;
3684 #endif
3685     // Check whether we just moved across a block boundary. This will trigger
3686     // for the first range that is past the current boundary.
3687     if (position >= next_block_boundary) {
3688       TRACE("Processing boundary at %d leaving %d\n",
3689             next_block_boundary.value(), last_block.ToInt());
3690 
3691       // Forward state to before block boundary
3692       LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
3693       ForwardStateTo(end_of_block);
3694 
3695       // Remember this state.
3696       InstructionBlock* current_block = data()->code()->GetInstructionBlock(
3697           next_block_boundary.ToInstructionIndex());
3698 
3699       // Store current spill state (as the state at end of block). For
3700       // simplicity, we store the active ranges, e.g., the live ranges that
3701       // are not spilled.
3702       data()->RememberSpillState(last_block, active_live_ranges());
3703 
3704       // Only reset the state if this was not a direct fallthrough. Otherwise
3705       // control flow resolution will get confused (it does not expect changes
3706       // across fallthrough edges.).
3707       bool fallthrough =
3708           (current_block->PredecessorCount() == 1) &&
3709           current_block->predecessors()[0].IsNext(current_block->rpo_number());
3710 
3711       // When crossing a deferred/non-deferred boundary, we have to load or
3712       // remove the deferred fixed ranges from inactive.
3713       if ((spill_mode == SpillMode::kSpillDeferred) !=
3714           current_block->IsDeferred()) {
3715         // Update spill mode.
3716         spill_mode = current_block->IsDeferred()
3717                          ? SpillMode::kSpillDeferred
3718                          : SpillMode::kSpillAtDefinition;
3719 
3720         ForwardStateTo(next_block_boundary);
3721 
3722 #ifdef DEBUG
3723         // Allow allocation at current position.
3724         allocation_finger_ = next_block_boundary;
3725 #endif
3726         UpdateDeferredFixedRanges(spill_mode, current_block);
3727       }
3728 
3729       // Allocation relies on the fact that each non-deferred block has at
3730       // least one non-deferred predecessor. Check this invariant here.
3731       DCHECK_IMPLIES(!current_block->IsDeferred(),
3732                      HasNonDeferredPredecessor(current_block));
3733 
3734       if (!fallthrough) {
3735 #ifdef DEBUG
3736         // Allow allocation at current position.
3737         allocation_finger_ = next_block_boundary;
3738 #endif
3739 
3740         // We are currently at next_block_boundary - 1. Move the state to the
3741         // actual block boundary position. In particular, we have to
3742         // reactivate inactive ranges so that they get rescheduled for
3743         // allocation if they were not live at the predecessors.
3744         ForwardStateTo(next_block_boundary);
3745 
3746         RangeWithRegisterSet to_be_live(data()->allocation_zone());
3747 
3748         // If we end up deciding to use the state of the immediate
3749         // predecessor, it is better not to perform a change. It would lead to
3750         // the same outcome anyway.
3751         // This may never happen on boundaries between deferred and
3752         // non-deferred code, as we rely on explicit respill to ensure we
3753         // spill at definition.
3754         bool no_change_required = false;
3755 
3756         auto pick_state_from = [this, current_block](
3757                                    RpoNumber pred,
3758                                    RangeWithRegisterSet* to_be_live) -> bool {
3759           TRACE("Using information from B%d\n", pred.ToInt());
3760           // If this is a fall-through that is not across a deferred
3761           // boundary, there is nothing to do.
3762           bool is_noop = pred.IsNext(current_block->rpo_number());
3763           if (!is_noop) {
3764             auto& spill_state = data()->GetSpillState(pred);
3765             TRACE("Not a fallthrough. Adding %zu elements...\n",
3766                   spill_state.size());
3767             LifetimePosition pred_end =
3768                 LifetimePosition::GapFromInstructionIndex(
3769                     this->code()->InstructionBlockAt(pred)->code_end());
3770             for (const auto range : spill_state) {
3771               // Filter out ranges that were split or had their register
3772               // stolen by backwards working spill heuristics. These have
3773               // been spilled after the fact, so ignore them.
3774               if (range->End() < pred_end || !range->HasRegisterAssigned())
3775                 continue;
3776               to_be_live->emplace(range);
3777             }
3778           }
3779           return is_noop;
3780         };
3781 
3782         // Multiple cases here:
3783         // 1) We have a single predecessor => this is a control flow split, so
3784         //     just restore the predecessor state.
3785         // 2) We have two predecessors => this is a conditional, so break ties
3786         //     based on what to do based on forward uses, trying to benefit
3787         //     the same branch if in doubt (make one path fast).
3788         // 3) We have many predecessors => this is a switch. Compute union
3789         //     based on majority, break ties by looking forward.
3790         if (current_block->PredecessorCount() == 1) {
3791           TRACE("Single predecessor for B%d\n",
3792                 current_block->rpo_number().ToInt());
3793           no_change_required =
3794               pick_state_from(current_block->predecessors()[0], &to_be_live);
3795         } else if (current_block->PredecessorCount() == 2) {
3796           TRACE("Two predecessors for B%d\n",
3797                 current_block->rpo_number().ToInt());
3798           // If one of the branches does not contribute any information,
3799           // e.g. because it is deferred or a back edge, we can short cut
3800           // here right away.
3801           RpoNumber chosen_predecessor = RpoNumber::Invalid();
3802           if (!ConsiderBlockForControlFlow(current_block,
3803                                            current_block->predecessors()[0])) {
3804             chosen_predecessor = current_block->predecessors()[1];
3805           } else if (!ConsiderBlockForControlFlow(
3806                          current_block, current_block->predecessors()[1])) {
3807             chosen_predecessor = current_block->predecessors()[0];
3808           } else {
3809             chosen_predecessor = ChooseOneOfTwoPredecessorStates(
3810                 current_block, next_block_boundary);
3811           }
3812           no_change_required = pick_state_from(chosen_predecessor, &to_be_live);
3813 
3814         } else {
3815           // Merge at the end of, e.g., a switch.
3816           ComputeStateFromManyPredecessors(current_block, &to_be_live);
3817         }
3818 
3819         if (!no_change_required) {
3820           SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
3821           ReloadLiveRanges(to_be_live, next_block_boundary);
3822         }
3823       }
3824       // Update block information
3825       last_block = current_block->rpo_number();
3826       next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
3827                                 current_block->last_instruction_index())
3828                                 .NextFullStart();
3829 
3830       // We might have created new unhandled live ranges, so cycle around the
3831       // loop to make sure we pick the top most range in unhandled for
3832       // processing.
3833       continue;
3834     }
3835 
3836     DCHECK_NOT_NULL(current);
3837 
3838     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
3839           current->relative_id(), position.value());
3840 
3841     // Now we can erase current, as we are sure to process it.
3842     unhandled_live_ranges().erase(unhandled_live_ranges().begin());
3843 
3844     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
3845       continue;
3846 
3847     ForwardStateTo(position);
3848 
3849     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
3850 
3851     ProcessCurrentRange(current, spill_mode);
3852   }
3853 
3854   if (data()->is_trace_alloc()) {
3855     PrintRangeOverview();
3856   }
3857 }
3858 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)3859 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
3860                                                        int reg) {
3861   data()->MarkAllocated(range->representation(), reg);
3862   range->set_assigned_register(reg);
3863   range->SetUseHints(reg);
3864   range->UpdateBundleRegister(reg);
3865   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
3866     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
3867   }
3868 }
3869 
AddToActive(LiveRange * range)3870 void LinearScanAllocator::AddToActive(LiveRange* range) {
3871   TRACE("Add live range %d:%d in %s to active\n", range->TopLevel()->vreg(),
3872         range->relative_id(), RegisterName(range->assigned_register()));
3873   active_live_ranges().push_back(range);
3874   next_active_ranges_change_ =
3875       std::min(next_active_ranges_change_, range->NextEndAfter(range->Start()));
3876 }
3877 
AddToInactive(LiveRange * range)3878 void LinearScanAllocator::AddToInactive(LiveRange* range) {
3879   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
3880         range->relative_id());
3881   next_inactive_ranges_change_ = std::min(
3882       next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
3883   DCHECK(range->HasRegisterAssigned());
3884   inactive_live_ranges(range->assigned_register()).insert(range);
3885 }
3886 
AddToUnhandled(LiveRange * range)3887 void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
3888   if (range == nullptr || range->IsEmpty()) return;
3889   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
3890   DCHECK(allocation_finger_ <= range->Start());
3891 
3892   TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
3893         range->relative_id());
3894   unhandled_live_ranges().insert(range);
3895 }
3896 
ActiveToHandled(const ZoneVector<LiveRange * >::iterator it)3897 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
3898     const ZoneVector<LiveRange*>::iterator it) {
3899   TRACE("Moving live range %d:%d from active to handled\n",
3900         (*it)->TopLevel()->vreg(), (*it)->relative_id());
3901   return active_live_ranges().erase(it);
3902 }
3903 
ActiveToInactive(const ZoneVector<LiveRange * >::iterator it,LifetimePosition position)3904 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
3905     const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
3906   LiveRange* range = *it;
3907   TRACE("Moving live range %d:%d from active to inactive\n",
3908         (range)->TopLevel()->vreg(), range->relative_id());
3909   LifetimePosition next_active = range->NextStartAfter(position);
3910   next_inactive_ranges_change_ =
3911       std::min(next_inactive_ranges_change_, next_active);
3912   DCHECK(range->HasRegisterAssigned());
3913   inactive_live_ranges(range->assigned_register()).insert(range);
3914   return active_live_ranges().erase(it);
3915 }
3916 
3917 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToHandled(InactiveLiveRangeQueue::iterator it)3918 LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) {
3919   LiveRange* range = *it;
3920   TRACE("Moving live range %d:%d from inactive to handled\n",
3921         range->TopLevel()->vreg(), range->relative_id());
3922   int reg = range->assigned_register();
3923   return inactive_live_ranges(reg).erase(it);
3924 }
3925 
3926 LinearScanAllocator::InactiveLiveRangeQueue::iterator
InactiveToActive(InactiveLiveRangeQueue::iterator it,LifetimePosition position)3927 LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it,
3928                                       LifetimePosition position) {
3929   LiveRange* range = *it;
3930   active_live_ranges().push_back(range);
3931   TRACE("Moving live range %d:%d from inactive to active\n",
3932         range->TopLevel()->vreg(), range->relative_id());
3933   next_active_ranges_change_ =
3934       std::min(next_active_ranges_change_, range->NextEndAfter(position));
3935   int reg = range->assigned_register();
3936   return inactive_live_ranges(reg).erase(it);
3937 }
3938 
ForwardStateTo(LifetimePosition position)3939 void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
3940   if (position >= next_active_ranges_change_) {
3941     next_active_ranges_change_ = LifetimePosition::MaxPosition();
3942     for (auto it = active_live_ranges().begin();
3943          it != active_live_ranges().end();) {
3944       LiveRange* cur_active = *it;
3945       if (cur_active->End() <= position) {
3946         it = ActiveToHandled(it);
3947       } else if (!cur_active->Covers(position)) {
3948         it = ActiveToInactive(it, position);
3949       } else {
3950         next_active_ranges_change_ = std::min(
3951             next_active_ranges_change_, cur_active->NextEndAfter(position));
3952         ++it;
3953       }
3954     }
3955   }
3956 
3957   if (position >= next_inactive_ranges_change_) {
3958     next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
3959     for (int reg = 0; reg < num_registers(); ++reg) {
3960       ZoneVector<LiveRange*> reorder(data()->allocation_zone());
3961       for (auto it = inactive_live_ranges(reg).begin();
3962            it != inactive_live_ranges(reg).end();) {
3963         LiveRange* cur_inactive = *it;
3964         if (cur_inactive->End() <= position) {
3965           it = InactiveToHandled(it);
3966         } else if (cur_inactive->Covers(position)) {
3967           it = InactiveToActive(it, position);
3968         } else {
3969           next_inactive_ranges_change_ =
3970               std::min(next_inactive_ranges_change_,
3971                        cur_inactive->NextStartAfter(position));
3972           it = inactive_live_ranges(reg).erase(it);
3973           reorder.push_back(cur_inactive);
3974         }
3975       }
3976       for (LiveRange* range : reorder) {
3977         inactive_live_ranges(reg).insert(range);
3978       }
3979     }
3980   }
3981 }
3982 
LastDeferredInstructionIndex(InstructionBlock * start)3983 int LinearScanAllocator::LastDeferredInstructionIndex(InstructionBlock* start) {
3984   DCHECK(start->IsDeferred());
3985   RpoNumber last_block =
3986       RpoNumber::FromInt(code()->InstructionBlockCount() - 1);
3987   while ((start->rpo_number() < last_block)) {
3988     InstructionBlock* next =
3989         code()->InstructionBlockAt(start->rpo_number().Next());
3990     if (!next->IsDeferred()) break;
3991     start = next;
3992   }
3993   return start->last_instruction_index();
3994 }
3995 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const3996 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
3997                                            int* num_regs, int* num_codes,
3998                                            const int** codes) const {
3999   DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
4000   if (rep == MachineRepresentation::kFloat32) {
4001     *num_regs = data()->config()->num_float_registers();
4002     *num_codes = data()->config()->num_allocatable_float_registers();
4003     *codes = data()->config()->allocatable_float_codes();
4004   } else if (rep == MachineRepresentation::kSimd128) {
4005     *num_regs = data()->config()->num_simd128_registers();
4006     *num_codes = data()->config()->num_allocatable_simd128_registers();
4007     *codes = data()->config()->allocatable_simd128_codes();
4008   } else {
4009     UNREACHABLE();
4010   }
4011 }
4012 
GetSIMD128RegisterSet(int * num_regs,int * num_codes,const int ** codes) const4013 void LinearScanAllocator::GetSIMD128RegisterSet(int* num_regs, int* num_codes,
4014                                                 const int** codes) const {
4015   DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
4016 
4017   *num_regs = data()->config()->num_simd128_registers();
4018   *num_codes = data()->config()->num_allocatable_simd128_registers();
4019   *codes = data()->config()->allocatable_simd128_codes();
4020 }
4021 
FindFreeRegistersForRange(LiveRange * range,base::Vector<LifetimePosition> positions)4022 void LinearScanAllocator::FindFreeRegistersForRange(
4023     LiveRange* range, base::Vector<LifetimePosition> positions) {
4024   int num_regs = num_registers();
4025   int num_codes = num_allocatable_registers();
4026   const int* codes = allocatable_register_codes();
4027   MachineRepresentation rep = range->representation();
4028   if (kFPAliasing == AliasingKind::kCombine &&
4029       (rep == MachineRepresentation::kFloat32 ||
4030        rep == MachineRepresentation::kSimd128)) {
4031     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4032   } else if (kFPAliasing == AliasingKind::kIndependent &&
4033              (rep == MachineRepresentation::kSimd128)) {
4034     GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
4035   }
4036   DCHECK_GE(positions.length(), num_regs);
4037 
4038   for (int i = 0; i < num_regs; ++i) {
4039     positions[i] = LifetimePosition::MaxPosition();
4040   }
4041 
4042   for (LiveRange* cur_active : active_live_ranges()) {
4043     int cur_reg = cur_active->assigned_register();
4044     if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4045       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
4046       TRACE("Register %s is free until pos %d (1) due to %d\n",
4047             RegisterName(cur_reg),
4048             LifetimePosition::GapFromInstructionIndex(0).value(),
4049             cur_active->TopLevel()->vreg());
4050     } else {
4051       int alias_base_index = -1;
4052       int aliases = data()->config()->GetAliases(
4053           cur_active->representation(), cur_reg, rep, &alias_base_index);
4054       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4055       while (aliases--) {
4056         int aliased_reg = alias_base_index + aliases;
4057         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
4058       }
4059     }
4060   }
4061 
4062   for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
4063     for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
4064       DCHECK_GT(cur_inactive->End(), range->Start());
4065       CHECK_EQ(cur_inactive->assigned_register(), cur_reg);
4066       // No need to carry out intersections, when this register won't be
4067       // interesting to this range anyway.
4068       // TODO(mtrofin): extend to aliased ranges, too.
4069       if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
4070           (positions[cur_reg] <= cur_inactive->NextStart() ||
4071            range->End() <= cur_inactive->NextStart())) {
4072         break;
4073       }
4074       LifetimePosition next_intersection =
4075           cur_inactive->FirstIntersection(range);
4076       if (!next_intersection.IsValid()) continue;
4077       if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4078         positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
4079         TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
4080               positions[cur_reg].value());
4081       } else {
4082         int alias_base_index = -1;
4083         int aliases = data()->config()->GetAliases(
4084             cur_inactive->representation(), cur_reg, rep, &alias_base_index);
4085         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4086         while (aliases--) {
4087           int aliased_reg = alias_base_index + aliases;
4088           positions[aliased_reg] =
4089               std::min(positions[aliased_reg], next_intersection);
4090         }
4091       }
4092     }
4093   }
4094 }
4095 
4096 // High-level register allocation summary:
4097 //
4098 // We attempt to first allocate the preferred (hint) register. If that is not
4099 // possible, we find a register that's free, and allocate that. If that's not
4100 // possible, we search for a register to steal from a range that was allocated.
4101 // The goal is to optimize for throughput by avoiding register-to-memory moves,
4102 // which are expensive.
ProcessCurrentRange(LiveRange * current,SpillMode spill_mode)4103 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
4104                                               SpillMode spill_mode) {
4105   base::EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4106       free_until_pos;
4107   FindFreeRegistersForRange(current, free_until_pos);
4108   if (!TryAllocatePreferredReg(current, free_until_pos)) {
4109     if (!TryAllocateFreeReg(current, free_until_pos)) {
4110       AllocateBlockedReg(current, spill_mode);
4111     }
4112   }
4113   if (current->HasRegisterAssigned()) {
4114     AddToActive(current);
4115   }
4116 }
4117 
TryAllocatePreferredReg(LiveRange * current,const base::Vector<LifetimePosition> & free_until_pos)4118 bool LinearScanAllocator::TryAllocatePreferredReg(
4119     LiveRange* current, const base::Vector<LifetimePosition>& free_until_pos) {
4120   int hint_register;
4121   if (current->RegisterFromControlFlow(&hint_register) ||
4122       current->FirstHintPosition(&hint_register) != nullptr ||
4123       current->RegisterFromBundle(&hint_register)) {
4124     TRACE(
4125         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
4126         RegisterName(hint_register), free_until_pos[hint_register].value(),
4127         current->TopLevel()->vreg(), current->relative_id(),
4128         current->End().value());
4129 
4130     // The desired register is free until the end of the current live range.
4131     if (free_until_pos[hint_register] >= current->End()) {
4132       TRACE("Assigning preferred reg %s to live range %d:%d\n",
4133             RegisterName(hint_register), current->TopLevel()->vreg(),
4134             current->relative_id());
4135       SetLiveRangeAssignedRegister(current, hint_register);
4136       return true;
4137     }
4138   }
4139   return false;
4140 }
4141 
PickRegisterThatIsAvailableLongest(LiveRange * current,int hint_reg,const base::Vector<LifetimePosition> & free_until_pos)4142 int LinearScanAllocator::PickRegisterThatIsAvailableLongest(
4143     LiveRange* current, int hint_reg,
4144     const base::Vector<LifetimePosition>& free_until_pos) {
4145   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
4146   int num_codes = num_allocatable_registers();
4147   const int* codes = allocatable_register_codes();
4148   MachineRepresentation rep = current->representation();
4149   if (kFPAliasing == AliasingKind::kCombine &&
4150       (rep == MachineRepresentation::kFloat32 ||
4151        rep == MachineRepresentation::kSimd128)) {
4152     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
4153   } else if (kFPAliasing == AliasingKind::kIndependent &&
4154              (rep == MachineRepresentation::kSimd128)) {
4155     GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
4156   }
4157 
4158   DCHECK_GE(free_until_pos.length(), num_codes);
4159 
4160   // Find the register which stays free for the longest time. Check for
4161   // the hinted register first, as we might want to use that one. Only
4162   // count full instructions for free ranges, as an instruction's internal
4163   // positions do not help but might shadow a hinted register. This is
4164   // typically the case for function calls, where all registered are
4165   // cloberred after the call except for the argument registers, which are
4166   // set before the call. Hence, the argument registers always get ignored,
4167   // as their available time is shorter.
4168   int reg = (hint_reg == kUnassignedRegister) ? codes[0] : hint_reg;
4169   int current_free = free_until_pos[reg].ToInstructionIndex();
4170   for (int i = 0; i < num_codes; ++i) {
4171     int code = codes[i];
4172     // Prefer registers that have no fixed uses to avoid blocking later hints.
4173     // We use the first register that has no fixed uses to ensure we use
4174     // byte addressable registers in ia32 first.
4175     int candidate_free = free_until_pos[code].ToInstructionIndex();
4176     TRACE("Register %s in free until %d\n", RegisterName(code), candidate_free);
4177     if ((candidate_free > current_free) ||
4178         (candidate_free == current_free && reg != hint_reg &&
4179          (data()->HasFixedUse(current->representation(), reg) &&
4180           !data()->HasFixedUse(current->representation(), code)))) {
4181       reg = code;
4182       current_free = candidate_free;
4183     }
4184   }
4185 
4186   return reg;
4187 }
4188 
TryAllocateFreeReg(LiveRange * current,const base::Vector<LifetimePosition> & free_until_pos)4189 bool LinearScanAllocator::TryAllocateFreeReg(
4190     LiveRange* current, const base::Vector<LifetimePosition>& free_until_pos) {
4191   // Compute register hint, if such exists.
4192   int hint_reg = kUnassignedRegister;
4193   current->RegisterFromControlFlow(&hint_reg) ||
4194       current->FirstHintPosition(&hint_reg) != nullptr ||
4195       current->RegisterFromBundle(&hint_reg);
4196 
4197   int reg =
4198       PickRegisterThatIsAvailableLongest(current, hint_reg, free_until_pos);
4199 
4200   LifetimePosition pos = free_until_pos[reg];
4201 
4202   if (pos <= current->Start()) {
4203     // All registers are blocked.
4204     return false;
4205   }
4206 
4207   if (pos < current->End()) {
4208     // Register reg is available at the range start but becomes blocked before
4209     // the range end. Split current before the position where it becomes
4210     // blocked. Shift the split position to the last gap position. This is to
4211     // ensure that if a connecting move is needed, that move coincides with the
4212     // start of the range that it defines. See crbug.com/1182985.
4213     LifetimePosition gap_pos =
4214         pos.IsGapPosition() ? pos : pos.FullStart().End();
4215     if (gap_pos <= current->Start()) return false;
4216     LiveRange* tail = SplitRangeAt(current, gap_pos);
4217     AddToUnhandled(tail);
4218 
4219     // Try to allocate preferred register once more.
4220     if (TryAllocatePreferredReg(current, free_until_pos)) return true;
4221   }
4222 
4223   // Register reg is available at the range start and is free until the range
4224   // end.
4225   DCHECK_GE(pos, current->End());
4226   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
4227         current->TopLevel()->vreg(), current->relative_id());
4228   SetLiveRangeAssignedRegister(current, reg);
4229 
4230   return true;
4231 }
4232 
AllocateBlockedReg(LiveRange * current,SpillMode spill_mode)4233 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
4234                                              SpillMode spill_mode) {
4235   UsePosition* register_use = current->NextRegisterPosition(current->Start());
4236   if (register_use == nullptr) {
4237     // There is no use in the current live range that requires a register.
4238     // We can just spill it.
4239     LiveRange* begin_spill = nullptr;
4240     LifetimePosition spill_pos = FindOptimalSpillingPos(
4241         current, current->Start(), spill_mode, &begin_spill);
4242     MaybeSpillPreviousRanges(begin_spill, spill_pos, current);
4243     Spill(current, spill_mode);
4244     return;
4245   }
4246 
4247   MachineRepresentation rep = current->representation();
4248 
4249   // use_pos keeps track of positions a register/alias is used at.
4250   // block_pos keeps track of positions where a register/alias is blocked
4251   // from.
4252   base::EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4253       use_pos(LifetimePosition::MaxPosition());
4254   base::EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
4255       block_pos(LifetimePosition::MaxPosition());
4256 
4257   for (LiveRange* range : active_live_ranges()) {
4258     int cur_reg = range->assigned_register();
4259     bool is_fixed_or_cant_spill =
4260         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
4261     if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4262       if (is_fixed_or_cant_spill) {
4263         block_pos[cur_reg] = use_pos[cur_reg] =
4264             LifetimePosition::GapFromInstructionIndex(0);
4265       } else {
4266         DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
4267                   block_pos[cur_reg]);
4268         use_pos[cur_reg] =
4269             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
4270       }
4271     } else {
4272       int alias_base_index = -1;
4273       int aliases = data()->config()->GetAliases(
4274           range->representation(), cur_reg, rep, &alias_base_index);
4275       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4276       while (aliases--) {
4277         int aliased_reg = alias_base_index + aliases;
4278         if (is_fixed_or_cant_spill) {
4279           block_pos[aliased_reg] = use_pos[aliased_reg] =
4280               LifetimePosition::GapFromInstructionIndex(0);
4281         } else {
4282           use_pos[aliased_reg] =
4283               std::min(block_pos[aliased_reg],
4284                        range->NextLifetimePositionRegisterIsBeneficial(
4285                            current->Start()));
4286         }
4287       }
4288     }
4289   }
4290 
4291   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4292     for (LiveRange* range : inactive_live_ranges(cur_reg)) {
4293       DCHECK(range->End() > current->Start());
4294       DCHECK_EQ(range->assigned_register(), cur_reg);
4295       bool is_fixed = range->TopLevel()->IsFixed();
4296 
4297       // Don't perform costly intersections if they are guaranteed to not update
4298       // block_pos or use_pos.
4299       // TODO(mtrofin): extend to aliased ranges, too.
4300       if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing())) {
4301         DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
4302         if (block_pos[cur_reg] <= range->NextStart()) break;
4303         if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
4304       }
4305 
4306       LifetimePosition next_intersection = range->FirstIntersection(current);
4307       if (!next_intersection.IsValid()) continue;
4308 
4309       if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4310         if (is_fixed) {
4311           block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
4312           use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
4313         } else {
4314           use_pos[cur_reg] = std::min(use_pos[cur_reg], next_intersection);
4315         }
4316       } else {
4317         int alias_base_index = -1;
4318         int aliases = data()->config()->GetAliases(
4319             range->representation(), cur_reg, rep, &alias_base_index);
4320         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
4321         while (aliases--) {
4322           int aliased_reg = alias_base_index + aliases;
4323           if (is_fixed) {
4324             block_pos[aliased_reg] =
4325                 std::min(block_pos[aliased_reg], next_intersection);
4326             use_pos[aliased_reg] =
4327                 std::min(block_pos[aliased_reg], use_pos[aliased_reg]);
4328           } else {
4329             use_pos[aliased_reg] =
4330                 std::min(use_pos[aliased_reg], next_intersection);
4331           }
4332         }
4333       }
4334     }
4335   }
4336 
4337   // Compute register hint if it exists.
4338   int hint_reg = kUnassignedRegister;
4339   current->RegisterFromControlFlow(&hint_reg) ||
4340       register_use->HintRegister(&hint_reg) ||
4341       current->RegisterFromBundle(&hint_reg);
4342   int reg = PickRegisterThatIsAvailableLongest(current, hint_reg, use_pos);
4343 
4344   if (use_pos[reg] < register_use->pos()) {
4345     // If there is a gap position before the next register use, we can
4346     // spill until there. The gap position will then fit the fill move.
4347     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
4348                                                    register_use->pos())) {
4349       SpillBetween(current, current->Start(), register_use->pos(), spill_mode);
4350       return;
4351     }
4352   }
4353 
4354   // When in deferred spilling mode avoid stealing registers beyond the current
4355   // deferred region. This is required as we otherwise might spill an inactive
4356   // range with a start outside of deferred code and that would not be reloaded.
4357   LifetimePosition new_end = current->End();
4358   if (spill_mode == SpillMode::kSpillDeferred) {
4359     InstructionBlock* deferred_block =
4360         code()->GetInstructionBlock(current->Start().ToInstructionIndex());
4361     new_end =
4362         std::min(new_end, LifetimePosition::GapFromInstructionIndex(
4363                               LastDeferredInstructionIndex(deferred_block)));
4364   }
4365 
4366   // We couldn't spill until the next register use. Split before the register
4367   // is blocked, if applicable.
4368   if (block_pos[reg] < new_end) {
4369     // Register becomes blocked before the current range end. Split before that
4370     // position.
4371     new_end = block_pos[reg].Start();
4372   }
4373 
4374   // If there is no register available at all, we can only spill this range.
4375   // Happens for instance on entry to deferred code where registers might
4376   // become blocked yet we aim to reload ranges.
4377   if (new_end == current->Start()) {
4378     SpillBetween(current, new_end, register_use->pos(), spill_mode);
4379     return;
4380   }
4381 
4382   // Split at the new end if we found one.
4383   if (new_end != current->End()) {
4384     LiveRange* tail = SplitBetween(current, current->Start(), new_end);
4385     AddToUnhandled(tail);
4386   }
4387 
4388   // Register reg is not blocked for the whole range.
4389   DCHECK(block_pos[reg] >= current->End());
4390   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
4391         current->TopLevel()->vreg(), current->relative_id());
4392   SetLiveRangeAssignedRegister(current, reg);
4393 
4394   // This register was not free. Thus we need to find and spill
4395   // parts of active and inactive live regions that use the same register
4396   // at the same lifetime positions as current.
4397   SplitAndSpillIntersecting(current, spill_mode);
4398 }
4399 
SplitAndSpillIntersecting(LiveRange * current,SpillMode spill_mode)4400 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
4401                                                     SpillMode spill_mode) {
4402   DCHECK(current->HasRegisterAssigned());
4403   int reg = current->assigned_register();
4404   LifetimePosition split_pos = current->Start();
4405   for (auto it = active_live_ranges().begin();
4406        it != active_live_ranges().end();) {
4407     LiveRange* range = *it;
4408     if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4409       if (range->assigned_register() != reg) {
4410         ++it;
4411         continue;
4412       }
4413     } else {
4414       if (!data()->config()->AreAliases(current->representation(), reg,
4415                                         range->representation(),
4416                                         range->assigned_register())) {
4417         ++it;
4418         continue;
4419       }
4420     }
4421 
4422     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4423     LiveRange* begin_spill = nullptr;
4424     LifetimePosition spill_pos =
4425         FindOptimalSpillingPos(range, split_pos, spill_mode, &begin_spill);
4426     MaybeSpillPreviousRanges(begin_spill, spill_pos, range);
4427     if (next_pos == nullptr) {
4428       SpillAfter(range, spill_pos, spill_mode);
4429     } else {
4430       // When spilling between spill_pos and next_pos ensure that the range
4431       // remains spilled at least until the start of the current live range.
4432       // This guarantees that we will not introduce new unhandled ranges that
4433       // start before the current range as this violates allocation invariants
4434       // and will lead to an inconsistent state of active and inactive
4435       // live-ranges: ranges are allocated in order of their start positions,
4436       // ranges are retired from active/inactive when the start of the
4437       // current live-range is larger than their end.
4438       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
4439                                                         next_pos->pos()));
4440       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos(),
4441                         spill_mode);
4442     }
4443     it = ActiveToHandled(it);
4444   }
4445 
4446   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
4447     if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
4448       if (cur_reg != reg) continue;
4449     }
4450     for (auto it = inactive_live_ranges(cur_reg).begin();
4451          it != inactive_live_ranges(cur_reg).end();) {
4452       LiveRange* range = *it;
4453       if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
4454           !data()->config()->AreAliases(current->representation(), reg,
4455                                         range->representation(), cur_reg)) {
4456         ++it;
4457         continue;
4458       }
4459       DCHECK(range->End() > current->Start());
4460       if (range->TopLevel()->IsFixed()) {
4461         ++it;
4462         continue;
4463       }
4464 
4465       LifetimePosition next_intersection = range->FirstIntersection(current);
4466       if (next_intersection.IsValid()) {
4467         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
4468         if (next_pos == nullptr) {
4469           SpillAfter(range, split_pos, spill_mode);
4470         } else {
4471           next_intersection = std::min(next_intersection, next_pos->pos());
4472           SpillBetween(range, split_pos, next_intersection, spill_mode);
4473         }
4474         it = InactiveToHandled(it);
4475       } else {
4476         ++it;
4477       }
4478     }
4479   }
4480 }
4481 
TryReuseSpillForPhi(TopLevelLiveRange * range)4482 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
4483   if (!range->is_phi()) return false;
4484 
4485   DCHECK(!range->HasSpillOperand());
4486   // Check how many operands belong to the same bundle as the output.
4487   LiveRangeBundle* out_bundle = range->get_bundle();
4488   TopTierRegisterAllocationData::PhiMapValue* phi_map_value =
4489       data()->GetPhiMapValueFor(range);
4490   const PhiInstruction* phi = phi_map_value->phi();
4491   const InstructionBlock* block = phi_map_value->block();
4492   // Count the number of spilled operands.
4493   size_t spilled_count = 0;
4494   for (size_t i = 0; i < phi->operands().size(); i++) {
4495     int op = phi->operands()[i];
4496     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
4497     if (!op_range->TopLevel()->HasSpillRange()) continue;
4498     const InstructionBlock* pred =
4499         code()->InstructionBlockAt(block->predecessors()[i]);
4500     LifetimePosition pred_end =
4501         LifetimePosition::InstructionFromInstructionIndex(
4502             pred->last_instruction_index());
4503     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
4504       op_range = op_range->next();
4505     }
4506     if (op_range != nullptr && op_range->spilled() &&
4507         op_range->get_bundle() == out_bundle) {
4508       spilled_count++;
4509     }
4510   }
4511 
4512   // Only continue if more than half of the operands are spilled to the same
4513   // slot (because part of same bundle).
4514   if (spilled_count * 2 <= phi->operands().size()) {
4515     return false;
4516   }
4517 
4518   // If the range does not need register soon, spill it to the merged
4519   // spill range.
4520   LifetimePosition next_pos = range->Start();
4521   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
4522   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
4523   if (pos == nullptr) {
4524     Spill(range, SpillMode::kSpillAtDefinition);
4525     return true;
4526   } else if (pos->pos() > range->Start().NextStart()) {
4527     SpillBetween(range, range->Start(), pos->pos(),
4528                  SpillMode::kSpillAtDefinition);
4529     return true;
4530   }
4531   return false;
4532 }
4533 
SpillAfter(LiveRange * range,LifetimePosition pos,SpillMode spill_mode)4534 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos,
4535                                      SpillMode spill_mode) {
4536   LiveRange* second_part = SplitRangeAt(range, pos);
4537   Spill(second_part, spill_mode);
4538 }
4539 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end,SpillMode spill_mode)4540 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
4541                                        LifetimePosition end,
4542                                        SpillMode spill_mode) {
4543   SpillBetweenUntil(range, start, start, end, spill_mode);
4544 }
4545 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end,SpillMode spill_mode)4546 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
4547                                             LifetimePosition start,
4548                                             LifetimePosition until,
4549                                             LifetimePosition end,
4550                                             SpillMode spill_mode) {
4551   CHECK(start < end);
4552   LiveRange* second_part = SplitRangeAt(range, start);
4553 
4554   if (second_part->Start() < end) {
4555     // The split result intersects with [start, end[.
4556     // Split it at position between ]start+1, end[, spill the middle part
4557     // and put the rest to unhandled.
4558 
4559     // Make sure that the third part always starts after the start of the
4560     // second part, as that likely is the current position of the register
4561     // allocator and we cannot add ranges to unhandled that start before
4562     // the current position.
4563     LifetimePosition split_start = std::max(second_part->Start().End(), until);
4564 
4565     // If end is an actual use (which it typically is) we have to split
4566     // so that there is a gap before so that we have space for moving the
4567     // value into its position.
4568     // However, if we have no choice, split right where asked.
4569     LifetimePosition third_part_end =
4570         std::max(split_start, end.PrevStart().End());
4571     // Instead of spliting right after or even before the block boundary,
4572     // split on the boumndary to avoid extra moves.
4573     if (data()->IsBlockBoundary(end.Start())) {
4574       third_part_end = std::max(split_start, end.Start());
4575     }
4576 
4577     LiveRange* third_part =
4578         SplitBetween(second_part, split_start, third_part_end);
4579     if (GetInstructionBlock(data()->code(), second_part->Start())
4580             ->IsDeferred()) {
4581       // Try to use the same register as before.
4582       TRACE("Setting control flow hint for %d:%d to %s\n",
4583             third_part->TopLevel()->vreg(), third_part->relative_id(),
4584             RegisterName(range->controlflow_hint()));
4585       third_part->set_controlflow_hint(range->controlflow_hint());
4586     }
4587 
4588     AddToUnhandled(third_part);
4589     // This can happen, even if we checked for start < end above, as we fiddle
4590     // with the end location. However, we are guaranteed to be after or at
4591     // until, so this is fine.
4592     if (third_part != second_part) {
4593       Spill(second_part, spill_mode);
4594     }
4595   } else {
4596     // The split result does not intersect with [start, end[.
4597     // Nothing to spill. Just put it to unhandled as whole.
4598     AddToUnhandled(second_part);
4599   }
4600 }
4601 
OperandAssigner(TopTierRegisterAllocationData * data)4602 OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
4603     : data_(data) {}
4604 
DecideSpillingMode()4605 void OperandAssigner::DecideSpillingMode() {
4606   for (auto range : data()->live_ranges()) {
4607     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4608     int max_blocks = data()->code()->InstructionBlockCount();
4609     if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
4610       // If the range is spilled only in deferred blocks and starts in
4611       // a non-deferred block, we transition its representation here so
4612       // that the LiveRangeConnector processes them correctly. If,
4613       // however, they start in a deferred block, we uograde them to
4614       // spill at definition, as that definition is in a deferred block
4615       // anyway. While this is an optimization, the code in LiveRangeConnector
4616       // relies on it!
4617       if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
4618         TRACE("Live range %d is spilled and alive in deferred code only\n",
4619               range->vreg());
4620         range->TransitionRangeToSpillAtDefinition();
4621       } else {
4622         TRACE("Live range %d is spilled deferred code only but alive outside\n",
4623               range->vreg());
4624         range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
4625                                               max_blocks);
4626       }
4627     }
4628   }
4629 }
4630 
AssignSpillSlots()4631 void OperandAssigner::AssignSpillSlots() {
4632   for (auto range : data()->live_ranges()) {
4633     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4634     if (range != nullptr && range->get_bundle() != nullptr) {
4635       range->get_bundle()->MergeSpillRangesAndClear();
4636     }
4637   }
4638   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
4639   // Merge disjoint spill ranges
4640   for (size_t i = 0; i < spill_ranges.size(); ++i) {
4641     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4642     SpillRange* range = spill_ranges[i];
4643     if (range == nullptr) continue;
4644     if (range->IsEmpty()) continue;
4645     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
4646       SpillRange* other = spill_ranges[j];
4647       if (other != nullptr && !other->IsEmpty()) {
4648         range->TryMerge(other);
4649       }
4650     }
4651   }
4652   // Allocate slots for the merged spill ranges.
4653   for (SpillRange* range : spill_ranges) {
4654     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4655     if (range == nullptr || range->IsEmpty()) continue;
4656     if (!range->HasSlot()) {
4657       // Allocate a new operand referring to the spill slot, aligned to the
4658       // operand size.
4659       int width = range->byte_width();
4660       int index = data()->frame()->AllocateSpillSlot(width, width);
4661       range->set_assigned_slot(index);
4662     }
4663   }
4664 }
4665 
CommitAssignment()4666 void OperandAssigner::CommitAssignment() {
4667   const size_t live_ranges_size = data()->live_ranges().size();
4668   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
4669     data()->tick_counter()->TickAndMaybeEnterSafepoint();
4670     CHECK_EQ(live_ranges_size,
4671              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4672     if (top_range == nullptr || top_range->IsEmpty()) continue;
4673     InstructionOperand spill_operand;
4674     if (top_range->HasSpillOperand()) {
4675       auto it = data()->slot_for_const_range().find(top_range);
4676       if (it != data()->slot_for_const_range().end()) {
4677         spill_operand = *it->second;
4678       } else {
4679         spill_operand = *top_range->GetSpillOperand();
4680       }
4681     } else if (top_range->HasSpillRange()) {
4682       spill_operand = top_range->GetSpillRangeOperand();
4683     }
4684     if (top_range->is_phi()) {
4685       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
4686           top_range->GetAssignedOperand());
4687     }
4688     for (LiveRange* range = top_range; range != nullptr;
4689          range = range->next()) {
4690       InstructionOperand assigned = range->GetAssignedOperand();
4691       DCHECK(!assigned.IsUnallocated());
4692       range->ConvertUsesToOperand(assigned, spill_operand);
4693     }
4694 
4695     if (!spill_operand.IsInvalid()) {
4696       // If this top level range has a child spilled in a deferred block, we use
4697       // the range and control flow connection mechanism instead of spilling at
4698       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
4699       // phases. Normally, when we spill at definition, we do not insert a
4700       // connecting move when a successor child range is spilled - because the
4701       // spilled range picks up its value from the slot which was assigned at
4702       // definition. For ranges that are determined to spill only in deferred
4703       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
4704       // where a spill operand is expected, and then finalize by inserting the
4705       // spills in the deferred blocks dominators.
4706       if (!top_range->IsSpilledOnlyInDeferredBlocks(data()) &&
4707           !top_range->HasGeneralSpillRange()) {
4708         // Spill at definition if the range isn't spilled in a way that will be
4709         // handled later.
4710         top_range->FilterSpillMoves(data(), spill_operand);
4711         top_range->CommitSpillMoves(data(), spill_operand);
4712       }
4713     }
4714   }
4715 }
4716 
ReferenceMapPopulator(TopTierRegisterAllocationData * data)4717 ReferenceMapPopulator::ReferenceMapPopulator(
4718     TopTierRegisterAllocationData* data)
4719     : data_(data) {}
4720 
SafePointsAreInOrder() const4721 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
4722   int safe_point = 0;
4723   for (ReferenceMap* map : *data()->code()->reference_maps()) {
4724     if (safe_point > map->instruction_position()) return false;
4725     safe_point = map->instruction_position();
4726   }
4727   return true;
4728 }
4729 
PopulateReferenceMaps()4730 void ReferenceMapPopulator::PopulateReferenceMaps() {
4731   DCHECK(SafePointsAreInOrder());
4732   // Map all delayed references.
4733   for (TopTierRegisterAllocationData::DelayedReference& delayed_reference :
4734        data()->delayed_references()) {
4735     delayed_reference.map->RecordReference(
4736         AllocatedOperand::cast(*delayed_reference.operand));
4737   }
4738   // Iterate over all safe point positions and record a pointer
4739   // for all spilled live ranges at this point.
4740   int last_range_start = 0;
4741   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
4742   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
4743   const size_t live_ranges_size = data()->live_ranges().size();
4744   // We break the invariant that live ranges are indexed by their vregs here.
4745   // This is ok because we don't use that invariant here, and this is the last
4746   // phase.
4747   std::sort(data()->live_ranges().begin(), data()->live_ranges().end(),
4748             [](TopLevelLiveRange* a, TopLevelLiveRange* b) {
4749               if (!a || a->IsEmpty()) return false;
4750               if (!b || b->IsEmpty()) return true;
4751               return a->Start() < b->Start();
4752             });
4753   for (TopLevelLiveRange* range : data()->live_ranges()) {
4754     CHECK_EQ(live_ranges_size,
4755              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4756     if (range == nullptr) continue;
4757     // Skip non-reference values.
4758     if (!data()->code()->IsReference(range->vreg())) continue;
4759     // Skip empty live ranges.
4760     if (range->IsEmpty()) continue;
4761     if (range->has_preassigned_slot()) continue;
4762 
4763     // Find the extent of the range and its children.
4764     int start = range->Start().ToInstructionIndex();
4765     int end = 0;
4766     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
4767       LifetimePosition this_end = cur->End();
4768       if (this_end.ToInstructionIndex() > end)
4769         end = this_end.ToInstructionIndex();
4770       DCHECK(cur->Start().ToInstructionIndex() >= start);
4771     }
4772 
4773     // Ranges should be sorted, so that the first reference map in the current
4774     // live range has to be after {first_it}.
4775     DCHECK_LE(last_range_start, start);
4776     last_range_start = start;
4777     USE(last_range_start);
4778 
4779     // Step across all the safe points that are before the start of this range,
4780     // recording how far we step in order to save doing this for the next range.
4781     for (; first_it != reference_maps->end(); ++first_it) {
4782       ReferenceMap* map = *first_it;
4783       if (map->instruction_position() >= start) break;
4784     }
4785 
4786     InstructionOperand spill_operand;
4787     if (((range->HasSpillOperand() &&
4788           !range->GetSpillOperand()->IsConstant()) ||
4789          range->HasSpillRange())) {
4790       if (range->HasSpillOperand()) {
4791         spill_operand = *range->GetSpillOperand();
4792       } else {
4793         spill_operand = range->GetSpillRangeOperand();
4794       }
4795       DCHECK(spill_operand.IsStackSlot());
4796       DCHECK(CanBeTaggedOrCompressedPointer(
4797           AllocatedOperand::cast(spill_operand).representation()));
4798     }
4799 
4800     LiveRange* cur = range;
4801     // Step through the safe points to see whether they are in the range.
4802     for (auto it = first_it; it != reference_maps->end(); ++it) {
4803       ReferenceMap* map = *it;
4804       int safe_point = map->instruction_position();
4805 
4806       // The safe points are sorted so we can stop searching here.
4807       if (safe_point - 1 > end) break;
4808 
4809       // Advance to the next active range that covers the current
4810       // safe point position.
4811       LifetimePosition safe_point_pos =
4812           LifetimePosition::InstructionFromInstructionIndex(safe_point);
4813 
4814       // Search for the child range (cur) that covers safe_point_pos. If we
4815       // don't find it before the children pass safe_point_pos, keep cur at
4816       // the last child, because the next safe_point_pos may be covered by cur.
4817       // This may happen if cur has more than one interval, and the current
4818       // safe_point_pos is in between intervals.
4819       // For that reason, cur may be at most the last child.
4820       DCHECK_NOT_NULL(cur);
4821       DCHECK(safe_point_pos >= cur->Start() || range == cur);
4822       bool found = false;
4823       while (!found) {
4824         if (cur->Covers(safe_point_pos)) {
4825           found = true;
4826         } else {
4827           LiveRange* next = cur->next();
4828           if (next == nullptr || next->Start() > safe_point_pos) {
4829             break;
4830           }
4831           cur = next;
4832         }
4833       }
4834 
4835       if (!found) {
4836         continue;
4837       }
4838 
4839       // Check if the live range is spilled and the safe point is after
4840       // the spill position.
4841       int spill_index = range->IsSpilledOnlyInDeferredBlocks(data()) ||
4842                                 range->LateSpillingSelected()
4843                             ? cur->Start().ToInstructionIndex()
4844                             : range->spill_start_index();
4845 
4846       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
4847         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
4848               range->vreg(), spill_index, safe_point);
4849         map->RecordReference(AllocatedOperand::cast(spill_operand));
4850       }
4851 
4852       if (!cur->spilled()) {
4853         TRACE(
4854             "Pointer in register for range %d:%d (start at %d) "
4855             "at safe point %d\n",
4856             range->vreg(), cur->relative_id(), cur->Start().value(),
4857             safe_point);
4858         InstructionOperand operand = cur->GetAssignedOperand();
4859         DCHECK(!operand.IsStackSlot());
4860         DCHECK(CanBeTaggedOrCompressedPointer(
4861             AllocatedOperand::cast(operand).representation()));
4862         map->RecordReference(AllocatedOperand::cast(operand));
4863       }
4864     }
4865   }
4866 }
4867 
LiveRangeConnector(TopTierRegisterAllocationData * data)4868 LiveRangeConnector::LiveRangeConnector(TopTierRegisterAllocationData* data)
4869     : data_(data) {}
4870 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const4871 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
4872     const InstructionBlock* block) const {
4873   if (block->PredecessorCount() != 1) return false;
4874   return block->predecessors()[0].IsNext(block->rpo_number());
4875 }
4876 
ResolveControlFlow(Zone * local_zone)4877 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
4878   // Lazily linearize live ranges in memory for fast lookup.
4879   LiveRangeFinder finder(data(), local_zone);
4880   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
4881   for (const InstructionBlock* block : code()->instruction_blocks()) {
4882     if (CanEagerlyResolveControlFlow(block)) continue;
4883     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
4884     auto it = live->begin();
4885     auto end = live->end();
4886     while (it != end) {
4887       data()->tick_counter()->TickAndMaybeEnterSafepoint();
4888       int vreg = *it;
4889       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
4890       for (const RpoNumber& pred : block->predecessors()) {
4891         FindResult result;
4892         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
4893         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
4894           continue;
4895         }
4896         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
4897         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
4898         if (pred_op.Equals(cur_op)) continue;
4899         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
4900           // We're doing a reload.
4901           // We don't need to, if:
4902           // 1) there's no register use in this block, and
4903           // 2) the range ends before the block does, and
4904           // 3) we don't have a successor, or the successor is spilled.
4905           LifetimePosition block_start =
4906               LifetimePosition::GapFromInstructionIndex(block->code_start());
4907           LifetimePosition block_end =
4908               LifetimePosition::GapFromInstructionIndex(block->code_end());
4909           const LiveRange* current = result.cur_cover_;
4910           // Note that this is not the successor if we have control flow!
4911           // However, in the following condition, we only refer to it if it
4912           // begins in the current block, in which case we can safely declare it
4913           // to be the successor.
4914           const LiveRange* successor = current->next();
4915           if (current->End() < block_end &&
4916               (successor == nullptr || successor->spilled())) {
4917             // verify point 1: no register use. We can go to the end of the
4918             // range, since it's all within the block.
4919 
4920             bool uses_reg = false;
4921             for (const UsePosition* use = current->NextUsePosition(block_start);
4922                  use != nullptr; use = use->next()) {
4923               if (use->operand()->IsAnyRegister()) {
4924                 uses_reg = true;
4925                 break;
4926               }
4927             }
4928             if (!uses_reg) continue;
4929           }
4930           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks(data()) &&
4931               pred_block->IsDeferred()) {
4932             // The spill location should be defined in pred_block, so add
4933             // pred_block to the list of blocks requiring a spill operand.
4934             TRACE("Adding B%d to list of spill blocks for %d\n",
4935                   pred_block->rpo_number().ToInt(),
4936                   current->TopLevel()->vreg());
4937             current->TopLevel()
4938                 ->GetListOfBlocksRequiringSpillOperands(data())
4939                 ->Add(pred_block->rpo_number().ToInt());
4940           }
4941         }
4942         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
4943         USE(move_loc);
4944         DCHECK_IMPLIES(
4945             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks(
4946                 data()) &&
4947                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) &&
4948                 move_loc != -1,
4949             code()->GetInstructionBlock(move_loc)->IsDeferred());
4950       }
4951       ++it;
4952     }
4953   }
4954 
4955   // At this stage, we collected blocks needing a spill operand due to reloads
4956   // from ConnectRanges and from ResolveControlFlow. Time to commit the spills
4957   // for deferred blocks. This is a convenient time to commit spills for general
4958   // spill ranges also, because they need to use the LiveRangeFinder.
4959   const size_t live_ranges_size = data()->live_ranges().size();
4960   SpillPlacer spill_placer(&finder, data(), local_zone);
4961   for (TopLevelLiveRange* top : data()->live_ranges()) {
4962     CHECK_EQ(live_ranges_size,
4963              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
4964     if (top == nullptr || top->IsEmpty()) continue;
4965     if (top->IsSpilledOnlyInDeferredBlocks(data())) {
4966       CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()),
4967                                    local_zone);
4968     } else if (top->HasGeneralSpillRange()) {
4969       spill_placer.Add(top);
4970     }
4971   }
4972 }
4973 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)4974 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
4975                                            const InstructionOperand& cur_op,
4976                                            const InstructionBlock* pred,
4977                                            const InstructionOperand& pred_op) {
4978   DCHECK(!pred_op.Equals(cur_op));
4979   int gap_index;
4980   Instruction::GapPosition position;
4981   if (block->PredecessorCount() == 1) {
4982     gap_index = block->first_instruction_index();
4983     position = Instruction::START;
4984   } else {
4985     Instruction* last = code()->InstructionAt(pred->last_instruction_index());
4986     // The connecting move might invalidate uses of the destination operand in
4987     // the deoptimization call. See crbug.com/v8/12218. Omitting the move is
4988     // safe since the deopt call exits the current code.
4989     if (last->IsDeoptimizeCall()) {
4990       return -1;
4991     }
4992     // In every other case the last instruction should not participate in
4993     // register allocation, or it could interfere with the connecting move.
4994     for (size_t i = 0; i < last->InputCount(); ++i) {
4995       DCHECK(last->InputAt(i)->IsImmediate());
4996     }
4997     DCHECK_EQ(1, pred->SuccessorCount());
4998     DCHECK(!code()
4999                 ->InstructionAt(pred->last_instruction_index())
5000                 ->HasReferenceMap());
5001     gap_index = pred->last_instruction_index();
5002     position = Instruction::END;
5003   }
5004   data()->AddGapMove(gap_index, position, pred_op, cur_op);
5005   return gap_index;
5006 }
5007 
ConnectRanges(Zone * local_zone)5008 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
5009   DelayedInsertionMap delayed_insertion_map(local_zone);
5010   const size_t live_ranges_size = data()->live_ranges().size();
5011   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
5012     CHECK_EQ(live_ranges_size,
5013              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
5014     if (top_range == nullptr) continue;
5015     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(data());
5016     LiveRange* first_range = top_range;
5017     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
5018          first_range = second_range, second_range = second_range->next()) {
5019       LifetimePosition pos = second_range->Start();
5020       // Add gap move if the two live ranges touch and there is no block
5021       // boundary.
5022       if (second_range->spilled()) continue;
5023       if (first_range->End() != pos) continue;
5024       if (data()->IsBlockBoundary(pos) &&
5025           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
5026         continue;
5027       }
5028       InstructionOperand prev_operand = first_range->GetAssignedOperand();
5029       InstructionOperand cur_operand = second_range->GetAssignedOperand();
5030       if (prev_operand.Equals(cur_operand)) continue;
5031       bool delay_insertion = false;
5032       Instruction::GapPosition gap_pos;
5033       int gap_index = pos.ToInstructionIndex();
5034       if (connect_spilled && !prev_operand.IsAnyRegister() &&
5035           cur_operand.IsAnyRegister()) {
5036         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
5037         DCHECK(block->IsDeferred());
5038         // Performing a reload in this block, meaning the spill operand must
5039         // be defined here.
5040         top_range->GetListOfBlocksRequiringSpillOperands(data())->Add(
5041             block->rpo_number().ToInt());
5042       }
5043 
5044       if (pos.IsGapPosition()) {
5045         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
5046       } else {
5047         if (pos.IsStart()) {
5048           delay_insertion = true;
5049         } else {
5050           gap_index++;
5051         }
5052         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
5053       }
5054       // Reloads or spills for spilled in deferred blocks ranges must happen
5055       // only in deferred blocks.
5056       DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
5057                                           cur_operand.IsAnyRegister()),
5058                      code()->GetInstructionBlock(gap_index)->IsDeferred());
5059 
5060       ParallelMove* move =
5061           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
5062               gap_pos, code_zone());
5063       if (!delay_insertion) {
5064         move->AddMove(prev_operand, cur_operand);
5065       } else {
5066         delayed_insertion_map.insert(
5067             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
5068       }
5069     }
5070   }
5071   if (delayed_insertion_map.empty()) return;
5072   // Insert all the moves which should occur after the stored move.
5073   ZoneVector<MoveOperands*> to_insert(local_zone);
5074   ZoneVector<MoveOperands*> to_eliminate(local_zone);
5075   to_insert.reserve(4);
5076   to_eliminate.reserve(4);
5077   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
5078   for (auto it = delayed_insertion_map.begin();; ++it) {
5079     bool done = it == delayed_insertion_map.end();
5080     if (done || it->first.first != moves) {
5081       // Commit the MoveOperands for current ParallelMove.
5082       for (MoveOperands* move : to_eliminate) {
5083         move->Eliminate();
5084       }
5085       for (MoveOperands* move : to_insert) {
5086         moves->push_back(move);
5087       }
5088       if (done) break;
5089       // Reset state.
5090       to_eliminate.clear();
5091       to_insert.clear();
5092       moves = it->first.first;
5093     }
5094     // Gather all MoveOperands for a single ParallelMove.
5095     MoveOperands* move =
5096         code_zone()->New<MoveOperands>(it->first.second, it->second);
5097     moves->PrepareInsertAfter(move, &to_eliminate);
5098     to_insert.push_back(move);
5099   }
5100 }
5101 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)5102 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
5103     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
5104   DCHECK(range->IsSpilledOnlyInDeferredBlocks(data()));
5105   DCHECK(!range->spilled());
5106 
5107   InstructionSequence* code = data()->code();
5108   InstructionOperand spill_operand = range->GetSpillRangeOperand();
5109 
5110   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
5111         range->vreg());
5112   // If we have ranges that aren't spilled but require the operand on the stack,
5113   // make sure we insert the spill.
5114   for (const LiveRange* child = range; child != nullptr;
5115        child = child->next()) {
5116     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
5117          pos = pos->next()) {
5118       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
5119         continue;
5120       range->AddBlockRequiringSpillOperand(
5121           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
5122               ->rpo_number(),
5123           data());
5124     }
5125   }
5126 
5127   ZoneQueue<int> worklist(temp_zone);
5128 
5129   for (int block_id : *range->GetListOfBlocksRequiringSpillOperands(data())) {
5130     worklist.push(block_id);
5131   }
5132 
5133   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
5134   // Seek the deferred blocks that dominate locations requiring spill operands,
5135   // and spill there. We only need to spill at the start of such blocks.
5136   BitVector done_blocks(
5137       range->GetListOfBlocksRequiringSpillOperands(data())->length(),
5138       temp_zone);
5139   while (!worklist.empty()) {
5140     int block_id = worklist.front();
5141     worklist.pop();
5142     if (done_blocks.Contains(block_id)) continue;
5143     done_blocks.Add(block_id);
5144     InstructionBlock* spill_block =
5145         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
5146 
5147     for (const RpoNumber& pred : spill_block->predecessors()) {
5148       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
5149 
5150       if (pred_block->IsDeferred()) {
5151         worklist.push(pred_block->rpo_number().ToInt());
5152       } else {
5153         LifetimePosition pred_end =
5154             LifetimePosition::InstructionFromInstructionIndex(
5155                 pred_block->last_instruction_index());
5156 
5157         LiveRangeBound* bound = array->Find(pred_end);
5158 
5159         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
5160 
5161         RpoNumber spill_block_number = spill_block->rpo_number();
5162         if (done_moves.find(std::make_pair(
5163                 spill_block_number, range->vreg())) == done_moves.end()) {
5164           TRACE("Spilling deferred spill for range %d at B%d\n", range->vreg(),
5165                 spill_block_number.ToInt());
5166           data()->AddGapMove(spill_block->first_instruction_index(),
5167                              Instruction::GapPosition::START, pred_op,
5168                              spill_operand);
5169           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
5170           spill_block->mark_needs_frame();
5171         }
5172       }
5173     }
5174   }
5175 }
5176 
5177 #undef TRACE
5178 #undef TRACE_COND
5179 
5180 }  // namespace compiler
5181 }  // namespace internal
5182 }  // namespace v8
5183