• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/register-allocator.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/base/adapters.h"
9 #include "src/compiler/linkage.h"
10 #include "src/string-stream.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 #define TRACE(...)                             \
17   do {                                         \
18     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
19   } while (false)
20 
21 
22 namespace {
23 
24 static const int kFloatRepBit =
25     1 << static_cast<int>(MachineRepresentation::kFloat32);
26 static const int kSimd128RepBit =
27     1 << static_cast<int>(MachineRepresentation::kSimd128);
28 
RemoveElement(ZoneVector<LiveRange * > * v,LiveRange * range)29 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
30   auto it = std::find(v->begin(), v->end(), range);
31   DCHECK(it != v->end());
32   v->erase(it);
33 }
34 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)35 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
36   return kind == FP_REGISTERS ? cfg->num_double_registers()
37                               : cfg->num_general_registers();
38 }
39 
40 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)41 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
42                                 RegisterKind kind) {
43   return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
44                               : cfg->num_allocatable_general_registers();
45 }
46 
47 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)48 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
49                                        RegisterKind kind) {
50   return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
51                               : cfg->allocatable_general_codes();
52 }
53 
54 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)55 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
56                                           const InstructionBlock* block) {
57   RpoNumber index = block->loop_header();
58   if (!index.IsValid()) return nullptr;
59   return sequence->InstructionBlockAt(index);
60 }
61 
62 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)63 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
64                                             LifetimePosition pos) {
65   return code->GetInstructionBlock(pos.ToInstructionIndex());
66 }
67 
68 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)69 Instruction* GetLastInstruction(InstructionSequence* code,
70                                 const InstructionBlock* block) {
71   return code->InstructionAt(block->last_instruction_index());
72 }
73 
74 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)75 int GetByteWidth(MachineRepresentation rep) {
76   switch (rep) {
77     case MachineRepresentation::kBit:
78     case MachineRepresentation::kWord8:
79     case MachineRepresentation::kWord16:
80     case MachineRepresentation::kWord32:
81     case MachineRepresentation::kTaggedSigned:
82     case MachineRepresentation::kTaggedPointer:
83     case MachineRepresentation::kTagged:
84     case MachineRepresentation::kFloat32:
85       return kPointerSize;
86     case MachineRepresentation::kWord64:
87     case MachineRepresentation::kFloat64:
88       return kDoubleSize;
89     case MachineRepresentation::kSimd128:
90       return kSimd128Size;
91     case MachineRepresentation::kNone:
92       break;
93   }
94   UNREACHABLE();
95 }
96 
97 }  // namespace
98 
99 class LiveRangeBound {
100  public:
LiveRangeBound(LiveRange * range,bool skip)101   explicit LiveRangeBound(LiveRange* range, bool skip)
102       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
103     DCHECK(!range->IsEmpty());
104   }
105 
CanCover(LifetimePosition position)106   bool CanCover(LifetimePosition position) {
107     return start_ <= position && position < end_;
108   }
109 
110   LiveRange* const range_;
111   const LifetimePosition start_;
112   const LifetimePosition end_;
113   const bool skip_;
114 
115  private:
116   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
117 };
118 
119 
120 struct FindResult {
121   LiveRange* cur_cover_;
122   LiveRange* pred_cover_;
123 };
124 
125 
126 class LiveRangeBoundArray {
127  public:
LiveRangeBoundArray()128   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
129 
ShouldInitialize()130   bool ShouldInitialize() { return start_ == nullptr; }
131 
Initialize(Zone * zone,TopLevelLiveRange * range)132   void Initialize(Zone* zone, TopLevelLiveRange* range) {
133     length_ = range->GetChildCount();
134 
135     start_ = zone->NewArray<LiveRangeBound>(length_);
136     LiveRangeBound* curr = start_;
137     // Normally, spilled ranges do not need connecting moves, because the spill
138     // location has been assigned at definition. For ranges spilled in deferred
139     // blocks, that is not the case, so we need to connect the spilled children.
140     for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
141       new (curr) LiveRangeBound(i, i->spilled());
142     }
143   }
144 
Find(const LifetimePosition position) const145   LiveRangeBound* Find(const LifetimePosition position) const {
146     size_t left_index = 0;
147     size_t right_index = length_;
148     while (true) {
149       size_t current_index = left_index + (right_index - left_index) / 2;
150       DCHECK(right_index > current_index);
151       LiveRangeBound* bound = &start_[current_index];
152       if (bound->start_ <= position) {
153         if (position < bound->end_) return bound;
154         DCHECK(left_index < current_index);
155         left_index = current_index;
156       } else {
157         right_index = current_index;
158       }
159     }
160   }
161 
FindPred(const InstructionBlock * pred)162   LiveRangeBound* FindPred(const InstructionBlock* pred) {
163     LifetimePosition pred_end =
164         LifetimePosition::InstructionFromInstructionIndex(
165             pred->last_instruction_index());
166     return Find(pred_end);
167   }
168 
FindSucc(const InstructionBlock * succ)169   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
170     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
171         succ->first_instruction_index());
172     return Find(succ_start);
173   }
174 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const175   bool FindConnectableSubranges(const InstructionBlock* block,
176                                 const InstructionBlock* pred,
177                                 FindResult* result) const {
178     LifetimePosition pred_end =
179         LifetimePosition::InstructionFromInstructionIndex(
180             pred->last_instruction_index());
181     LiveRangeBound* bound = Find(pred_end);
182     result->pred_cover_ = bound->range_;
183     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
184         block->first_instruction_index());
185 
186     if (bound->CanCover(cur_start)) {
187       // Both blocks are covered by the same range, so there is nothing to
188       // connect.
189       return false;
190     }
191     bound = Find(cur_start);
192     if (bound->skip_) {
193       return false;
194     }
195     result->cur_cover_ = bound->range_;
196     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
197     return (result->cur_cover_ != result->pred_cover_);
198   }
199 
200  private:
201   size_t length_;
202   LiveRangeBound* start_;
203 
204   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
205 };
206 
207 
208 class LiveRangeFinder {
209  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)210   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
211       : data_(data),
212         bounds_length_(static_cast<int>(data_->live_ranges().size())),
213         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
214         zone_(zone) {
215     for (int i = 0; i < bounds_length_; ++i) {
216       new (&bounds_[i]) LiveRangeBoundArray();
217     }
218   }
219 
ArrayFor(int operand_index)220   LiveRangeBoundArray* ArrayFor(int operand_index) {
221     DCHECK(operand_index < bounds_length_);
222     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
223     DCHECK(range != nullptr && !range->IsEmpty());
224     LiveRangeBoundArray* array = &bounds_[operand_index];
225     if (array->ShouldInitialize()) {
226       array->Initialize(zone_, range);
227     }
228     return array;
229   }
230 
231  private:
232   const RegisterAllocationData* const data_;
233   const int bounds_length_;
234   LiveRangeBoundArray* const bounds_;
235   Zone* const zone_;
236 
237   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
238 };
239 
240 
241 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
242 
243 
244 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare245   bool operator()(const DelayedInsertionMapKey& a,
246                   const DelayedInsertionMapKey& b) const {
247     if (a.first == b.first) {
248       return a.second.Compare(b.second);
249     }
250     return a.first < b.first;
251   }
252 };
253 
254 
255 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
256                 DelayedInsertionMapCompare> DelayedInsertionMap;
257 
258 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)259 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
260                          void* hint, UsePositionHintType hint_type)
261     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
262   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
263   bool register_beneficial = true;
264   UsePositionType type = UsePositionType::kRegisterOrSlot;
265   if (operand_ != nullptr && operand_->IsUnallocated()) {
266     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
267     if (unalloc->HasRegisterPolicy()) {
268       type = UsePositionType::kRequiresRegister;
269     } else if (unalloc->HasSlotPolicy()) {
270       type = UsePositionType::kRequiresSlot;
271       register_beneficial = false;
272     } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
273       type = UsePositionType::kRegisterOrSlotOrConstant;
274       register_beneficial = false;
275     } else {
276       register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
277     }
278   }
279   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
280            RegisterBeneficialField::encode(register_beneficial) |
281            AssignedRegisterField::encode(kUnassignedRegister);
282   DCHECK(pos_.IsValid());
283 }
284 
285 
HasHint() const286 bool UsePosition::HasHint() const {
287   int hint_register;
288   return HintRegister(&hint_register);
289 }
290 
291 
HintRegister(int * register_code) const292 bool UsePosition::HintRegister(int* register_code) const {
293   if (hint_ == nullptr) return false;
294   switch (HintTypeField::decode(flags_)) {
295     case UsePositionHintType::kNone:
296     case UsePositionHintType::kUnresolved:
297       return false;
298     case UsePositionHintType::kUsePos: {
299       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
300       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
301       if (assigned_register == kUnassignedRegister) return false;
302       *register_code = assigned_register;
303       return true;
304     }
305     case UsePositionHintType::kOperand: {
306       InstructionOperand* operand =
307           reinterpret_cast<InstructionOperand*>(hint_);
308       *register_code = LocationOperand::cast(operand)->register_code();
309       return true;
310     }
311     case UsePositionHintType::kPhi: {
312       RegisterAllocationData::PhiMapValue* phi =
313           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
314       int assigned_register = phi->assigned_register();
315       if (assigned_register == kUnassignedRegister) return false;
316       *register_code = assigned_register;
317       return true;
318     }
319   }
320   UNREACHABLE();
321 }
322 
323 
HintTypeForOperand(const InstructionOperand & op)324 UsePositionHintType UsePosition::HintTypeForOperand(
325     const InstructionOperand& op) {
326   switch (op.kind()) {
327     case InstructionOperand::CONSTANT:
328     case InstructionOperand::IMMEDIATE:
329     case InstructionOperand::EXPLICIT:
330       return UsePositionHintType::kNone;
331     case InstructionOperand::UNALLOCATED:
332       return UsePositionHintType::kUnresolved;
333     case InstructionOperand::ALLOCATED:
334       if (op.IsRegister() || op.IsFPRegister()) {
335         return UsePositionHintType::kOperand;
336       } else {
337         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
338         return UsePositionHintType::kNone;
339       }
340     case InstructionOperand::INVALID:
341       break;
342   }
343   UNREACHABLE();
344 }
345 
SetHint(UsePosition * use_pos)346 void UsePosition::SetHint(UsePosition* use_pos) {
347   DCHECK_NOT_NULL(use_pos);
348   hint_ = use_pos;
349   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
350 }
351 
ResolveHint(UsePosition * use_pos)352 void UsePosition::ResolveHint(UsePosition* use_pos) {
353   DCHECK_NOT_NULL(use_pos);
354   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
355   hint_ = use_pos;
356   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
357 }
358 
359 
set_type(UsePositionType type,bool register_beneficial)360 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
361   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
362   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
363   flags_ = TypeField::encode(type) |
364            RegisterBeneficialField::encode(register_beneficial) |
365            HintTypeField::encode(HintTypeField::decode(flags_)) |
366            AssignedRegisterField::encode(kUnassignedRegister);
367 }
368 
369 
SplitAt(LifetimePosition pos,Zone * zone)370 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
371   DCHECK(Contains(pos) && pos != start());
372   UseInterval* after = new (zone) UseInterval(pos, end_);
373   after->next_ = next_;
374   next_ = nullptr;
375   end_ = pos;
376   return after;
377 }
378 
Print() const379 void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
380 
operator <<(std::ostream & os,const LifetimePosition pos)381 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
382   os << '@' << pos.ToInstructionIndex();
383   if (pos.IsGapPosition()) {
384     os << 'g';
385   } else {
386     os << 'i';
387   }
388   if (pos.IsStart()) {
389     os << 's';
390   } else {
391     os << 'e';
392   }
393   return os;
394 }
395 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)396 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
397                      TopLevelLiveRange* top_level)
398     : relative_id_(relative_id),
399       bits_(0),
400       last_interval_(nullptr),
401       first_interval_(nullptr),
402       first_pos_(nullptr),
403       top_level_(top_level),
404       next_(nullptr),
405       current_interval_(nullptr),
406       last_processed_use_(nullptr),
407       current_hint_position_(nullptr),
408       splitting_pointer_(nullptr) {
409   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
410   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
411           RepresentationField::encode(rep);
412 }
413 
414 
VerifyPositions() const415 void LiveRange::VerifyPositions() const {
416   // Walk the positions, verifying that each is in an interval.
417   UseInterval* interval = first_interval_;
418   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
419     CHECK(Start() <= pos->pos());
420     CHECK(pos->pos() <= End());
421     CHECK_NOT_NULL(interval);
422     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
423       interval = interval->next();
424       CHECK_NOT_NULL(interval);
425     }
426   }
427 }
428 
429 
VerifyIntervals() const430 void LiveRange::VerifyIntervals() const {
431   DCHECK(first_interval()->start() == Start());
432   LifetimePosition last_end = first_interval()->end();
433   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
434        interval = interval->next()) {
435     DCHECK(last_end <= interval->start());
436     last_end = interval->end();
437   }
438   DCHECK(last_end == End());
439 }
440 
441 
set_assigned_register(int reg)442 void LiveRange::set_assigned_register(int reg) {
443   DCHECK(!HasRegisterAssigned() && !spilled());
444   bits_ = AssignedRegisterField::update(bits_, reg);
445 }
446 
447 
UnsetAssignedRegister()448 void LiveRange::UnsetAssignedRegister() {
449   DCHECK(HasRegisterAssigned() && !spilled());
450   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
451 }
452 
453 
Spill()454 void LiveRange::Spill() {
455   DCHECK(!spilled());
456   DCHECK(!TopLevel()->HasNoSpillType());
457   set_spilled(true);
458   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
459 }
460 
461 
kind() const462 RegisterKind LiveRange::kind() const {
463   return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
464 }
465 
466 
FirstHintPosition(int * register_index) const467 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
468   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
469     if (pos->HintRegister(register_index)) return pos;
470   }
471   return nullptr;
472 }
473 
474 
NextUsePosition(LifetimePosition start) const475 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
476   UsePosition* use_pos = last_processed_use_;
477   if (use_pos == nullptr || use_pos->pos() > start) {
478     use_pos = first_pos();
479   }
480   while (use_pos != nullptr && use_pos->pos() < start) {
481     use_pos = use_pos->next();
482   }
483   last_processed_use_ = use_pos;
484   return use_pos;
485 }
486 
487 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const488 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
489     LifetimePosition start) const {
490   UsePosition* pos = NextUsePosition(start);
491   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
492     pos = pos->next();
493   }
494   return pos;
495 }
496 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const497 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
498     const LifetimePosition& start) const {
499   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
500   if (next_use == nullptr) return End();
501   return next_use->pos();
502 }
503 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const504 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
505     LifetimePosition start) const {
506   UsePosition* pos = first_pos();
507   UsePosition* prev = nullptr;
508   while (pos != nullptr && pos->pos() < start) {
509     if (pos->RegisterIsBeneficial()) prev = pos;
510     pos = pos->next();
511   }
512   return prev;
513 }
514 
515 
NextRegisterPosition(LifetimePosition start) const516 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
517   UsePosition* pos = NextUsePosition(start);
518   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
519     pos = pos->next();
520   }
521   return pos;
522 }
523 
524 
NextSlotPosition(LifetimePosition start) const525 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
526   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
527        pos = pos->next()) {
528     if (pos->type() != UsePositionType::kRequiresSlot) continue;
529     return pos;
530   }
531   return nullptr;
532 }
533 
534 
CanBeSpilled(LifetimePosition pos) const535 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
536   // We cannot spill a live range that has a use requiring a register
537   // at the current or the immediate next position.
538   UsePosition* use_pos = NextRegisterPosition(pos);
539   if (use_pos == nullptr) return true;
540   return use_pos->pos() > pos.NextStart().End();
541 }
542 
543 
IsTopLevel() const544 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
545 
546 
GetAssignedOperand() const547 InstructionOperand LiveRange::GetAssignedOperand() const {
548   if (HasRegisterAssigned()) {
549     DCHECK(!spilled());
550     return AllocatedOperand(LocationOperand::REGISTER, representation(),
551                             assigned_register());
552   }
553   DCHECK(spilled());
554   DCHECK(!HasRegisterAssigned());
555   if (TopLevel()->HasSpillOperand()) {
556     InstructionOperand* op = TopLevel()->GetSpillOperand();
557     DCHECK(!op->IsUnallocated());
558     return *op;
559   }
560   return TopLevel()->GetSpillRangeOperand();
561 }
562 
563 
FirstSearchIntervalForPosition(LifetimePosition position) const564 UseInterval* LiveRange::FirstSearchIntervalForPosition(
565     LifetimePosition position) const {
566   if (current_interval_ == nullptr) return first_interval_;
567   if (current_interval_->start() > position) {
568     current_interval_ = nullptr;
569     return first_interval_;
570   }
571   return current_interval_;
572 }
573 
574 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const575 void LiveRange::AdvanceLastProcessedMarker(
576     UseInterval* to_start_of, LifetimePosition but_not_past) const {
577   if (to_start_of == nullptr) return;
578   if (to_start_of->start() > but_not_past) return;
579   LifetimePosition start = current_interval_ == nullptr
580                                ? LifetimePosition::Invalid()
581                                : current_interval_->start();
582   if (to_start_of->start() > start) {
583     current_interval_ = to_start_of;
584   }
585 }
586 
587 
SplitAt(LifetimePosition position,Zone * zone)588 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
589   int new_id = TopLevel()->GetNextChildId();
590   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
591   // If we split, we do so because we're about to switch registers or move
592   // to/from a slot, so there's no value in connecting hints.
593   DetachAt(position, child, zone, DoNotConnectHints);
594 
595   child->top_level_ = TopLevel();
596   child->next_ = next_;
597   next_ = child;
598   return child;
599 }
600 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)601 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
602                                  Zone* zone,
603                                  HintConnectionOption connect_hints) {
604   DCHECK(Start() < position);
605   DCHECK(End() > position);
606   DCHECK(result->IsEmpty());
607   // Find the last interval that ends before the position. If the
608   // position is contained in one of the intervals in the chain, we
609   // split that interval and use the first part.
610   UseInterval* current = FirstSearchIntervalForPosition(position);
611 
612   // If the split position coincides with the beginning of a use interval
613   // we need to split use positons in a special way.
614   bool split_at_start = false;
615 
616   if (current->start() == position) {
617     // When splitting at start we need to locate the previous use interval.
618     current = first_interval_;
619   }
620 
621   UseInterval* after = nullptr;
622   while (current != nullptr) {
623     if (current->Contains(position)) {
624       after = current->SplitAt(position, zone);
625       break;
626     }
627     UseInterval* next = current->next();
628     if (next->start() >= position) {
629       split_at_start = (next->start() == position);
630       after = next;
631       current->set_next(nullptr);
632       break;
633     }
634     current = next;
635   }
636   DCHECK_NOT_NULL(after);
637 
638   // Partition original use intervals to the two live ranges.
639   UseInterval* before = current;
640   result->last_interval_ =
641       (last_interval_ == before)
642           ? after            // Only interval in the range after split.
643           : last_interval_;  // Last interval of the original range.
644   result->first_interval_ = after;
645   last_interval_ = before;
646 
647   // Find the last use position before the split and the first use
648   // position after it.
649   UsePosition* use_after =
650       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
651           ? first_pos()
652           : splitting_pointer_;
653   UsePosition* use_before = nullptr;
654   if (split_at_start) {
655     // The split position coincides with the beginning of a use interval (the
656     // end of a lifetime hole). Use at this position should be attributed to
657     // the split child because split child owns use interval covering it.
658     while (use_after != nullptr && use_after->pos() < position) {
659       use_before = use_after;
660       use_after = use_after->next();
661     }
662   } else {
663     while (use_after != nullptr && use_after->pos() <= position) {
664       use_before = use_after;
665       use_after = use_after->next();
666     }
667   }
668 
669   // Partition original use positions to the two live ranges.
670   if (use_before != nullptr) {
671     use_before->set_next(nullptr);
672   } else {
673     first_pos_ = nullptr;
674   }
675   result->first_pos_ = use_after;
676 
677   // Discard cached iteration state. It might be pointing
678   // to the use that no longer belongs to this live range.
679   last_processed_use_ = nullptr;
680   current_interval_ = nullptr;
681 
682   if (connect_hints == ConnectHints && use_before != nullptr &&
683       use_after != nullptr) {
684     use_after->SetHint(use_before);
685   }
686 #ifdef DEBUG
687   VerifyChildStructure();
688   result->VerifyChildStructure();
689 #endif
690   return use_before;
691 }
692 
693 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)694 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
695   LiveRange* child = this;
696   for (; child != nullptr; child = child->next()) {
697     child->top_level_ = new_top_level;
698   }
699 }
700 
701 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)702 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
703                                      const InstructionOperand& spill_op) {
704   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
705     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
706     if (!pos->HasOperand()) continue;
707     switch (pos->type()) {
708       case UsePositionType::kRequiresSlot:
709         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
710         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
711         break;
712       case UsePositionType::kRequiresRegister:
713         DCHECK(op.IsRegister() || op.IsFPRegister());
714         V8_FALLTHROUGH;
715       case UsePositionType::kRegisterOrSlot:
716       case UsePositionType::kRegisterOrSlotOrConstant:
717         InstructionOperand::ReplaceWith(pos->operand(), &op);
718         break;
719     }
720   }
721 }
722 
723 
724 // This implements an ordering on live ranges so that they are ordered by their
725 // start positions.  This is needed for the correctness of the register
726 // allocation algorithm.  If two live ranges start at the same offset then there
727 // is a tie breaker based on where the value is first used.  This part of the
728 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const729 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
730   LifetimePosition start = Start();
731   LifetimePosition other_start = other->Start();
732   if (start == other_start) {
733     UsePosition* pos = first_pos();
734     if (pos == nullptr) return false;
735     UsePosition* other_pos = other->first_pos();
736     if (other_pos == nullptr) return true;
737     return pos->pos() < other_pos->pos();
738   }
739   return start < other_start;
740 }
741 
742 
SetUseHints(int register_index)743 void LiveRange::SetUseHints(int register_index) {
744   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
745     if (!pos->HasOperand()) continue;
746     switch (pos->type()) {
747       case UsePositionType::kRequiresSlot:
748         break;
749       case UsePositionType::kRequiresRegister:
750       case UsePositionType::kRegisterOrSlot:
751       case UsePositionType::kRegisterOrSlotOrConstant:
752         pos->set_assigned_register(register_index);
753         break;
754     }
755   }
756 }
757 
758 
CanCover(LifetimePosition position) const759 bool LiveRange::CanCover(LifetimePosition position) const {
760   if (IsEmpty()) return false;
761   return Start() <= position && position < End();
762 }
763 
764 
Covers(LifetimePosition position) const765 bool LiveRange::Covers(LifetimePosition position) const {
766   if (!CanCover(position)) return false;
767   UseInterval* start_search = FirstSearchIntervalForPosition(position);
768   for (UseInterval* interval = start_search; interval != nullptr;
769        interval = interval->next()) {
770     DCHECK(interval->next() == nullptr ||
771            interval->next()->start() >= interval->start());
772     AdvanceLastProcessedMarker(interval, position);
773     if (interval->Contains(position)) return true;
774     if (interval->start() > position) return false;
775   }
776   return false;
777 }
778 
779 
FirstIntersection(LiveRange * other) const780 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
781   UseInterval* b = other->first_interval();
782   if (b == nullptr) return LifetimePosition::Invalid();
783   LifetimePosition advance_last_processed_up_to = b->start();
784   UseInterval* a = FirstSearchIntervalForPosition(b->start());
785   while (a != nullptr && b != nullptr) {
786     if (a->start() > other->End()) break;
787     if (b->start() > End()) break;
788     LifetimePosition cur_intersection = a->Intersect(b);
789     if (cur_intersection.IsValid()) {
790       return cur_intersection;
791     }
792     if (a->start() < b->start()) {
793       a = a->next();
794       if (a == nullptr || a->start() > other->End()) break;
795       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
796     } else {
797       b = b->next();
798     }
799   }
800   return LifetimePosition::Invalid();
801 }
802 
Print(const RegisterConfiguration * config,bool with_children) const803 void LiveRange::Print(const RegisterConfiguration* config,
804                       bool with_children) const {
805   StdoutStream os;
806   PrintableLiveRange wrapper;
807   wrapper.register_configuration_ = config;
808   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
809     wrapper.range_ = i;
810     os << wrapper << std::endl;
811     if (!with_children) break;
812   }
813 }
814 
815 
Print(bool with_children) const816 void LiveRange::Print(bool with_children) const {
817   Print(RegisterConfiguration::Default(), with_children);
818 }
819 
820 
821 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList822   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
823                          SpillMoveInsertionList* next)
824       : gap_index(gap_index), operand(operand), next(next) {}
825   const int gap_index;
826   InstructionOperand* const operand;
827   SpillMoveInsertionList* const next;
828 };
829 
830 
TopLevelLiveRange(int vreg,MachineRepresentation rep)831 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
832     : LiveRange(0, rep, this),
833       vreg_(vreg),
834       last_child_id_(0),
835       splintered_from_(nullptr),
836       spill_operand_(nullptr),
837       spill_move_insertion_locations_(nullptr),
838       spilled_in_deferred_blocks_(false),
839       spill_start_index_(kMaxInt),
840       last_pos_(nullptr),
841       splinter_(nullptr),
842       has_preassigned_slot_(false) {
843   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
844 }
845 
846 
847 #if DEBUG
debug_virt_reg() const848 int TopLevelLiveRange::debug_virt_reg() const {
849   return IsSplinter() ? splintered_from()->vreg() : vreg();
850 }
851 #endif
852 
853 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)854 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
855                                             InstructionOperand* operand) {
856   DCHECK(HasNoSpillType());
857   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
858       gap_index, operand, spill_move_insertion_locations_);
859 }
860 
CommitSpillMoves(InstructionSequence * sequence,const InstructionOperand & op,bool might_be_duplicated)861 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
862                                          const InstructionOperand& op,
863                                          bool might_be_duplicated) {
864   DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
865   Zone* zone = sequence->zone();
866 
867   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
868        to_spill != nullptr; to_spill = to_spill->next) {
869     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
870     ParallelMove* move =
871         instr->GetOrCreateParallelMove(Instruction::START, zone);
872     // Skip insertion if it's possible that the move exists already as a
873     // constraint move from a fixed output register to a slot.
874     if (might_be_duplicated || has_preassigned_slot()) {
875       bool found = false;
876       for (MoveOperands* move_op : *move) {
877         if (move_op->IsEliminated()) continue;
878         if (move_op->source().Equals(*to_spill->operand) &&
879             move_op->destination().Equals(op)) {
880           found = true;
881           if (has_preassigned_slot()) move_op->Eliminate();
882           break;
883         }
884       }
885       if (found) continue;
886     }
887     if (!has_preassigned_slot()) {
888       move->AddMove(*to_spill->operand, op);
889     }
890   }
891 }
892 
893 
SetSpillOperand(InstructionOperand * operand)894 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
895   DCHECK(HasNoSpillType());
896   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
897   set_spill_type(SpillType::kSpillOperand);
898   spill_operand_ = operand;
899 }
900 
901 
SetSpillRange(SpillRange * spill_range)902 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
903   DCHECK(!HasSpillOperand());
904   DCHECK(spill_range);
905   spill_range_ = spill_range;
906 }
907 
908 
GetSpillRangeOperand() const909 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
910   SpillRange* spill_range = GetSpillRange();
911   int index = spill_range->assigned_slot();
912   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
913 }
914 
915 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)916 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
917                                  Zone* zone) {
918   DCHECK(start != Start() || end != End());
919   DCHECK(start < end);
920 
921   TopLevelLiveRange splinter_temp(-1, representation());
922   UsePosition* last_in_splinter = nullptr;
923   // Live ranges defined in deferred blocks stay in deferred blocks, so we
924   // don't need to splinter them. That means that start should always be
925   // after the beginning of the range.
926   DCHECK(start > Start());
927 
928   if (end >= End()) {
929     DCHECK(start > Start());
930     DetachAt(start, &splinter_temp, zone, ConnectHints);
931     next_ = nullptr;
932   } else {
933     DCHECK(start < End() && Start() < end);
934 
935     const int kInvalidId = std::numeric_limits<int>::max();
936 
937     UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
938 
939     LiveRange end_part(kInvalidId, this->representation(), nullptr);
940     // The last chunk exits the deferred region, and we don't want to connect
941     // hints here, because the non-deferred region shouldn't be affected
942     // by allocation decisions on the deferred path.
943     last_in_splinter =
944         splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
945 
946     next_ = end_part.next_;
947     last_interval_->set_next(end_part.first_interval_);
948     // The next splinter will happen either at or after the current interval.
949     // We can optimize DetachAt by setting current_interval_ accordingly,
950     // which will then be picked up by FirstSearchIntervalForPosition.
951     current_interval_ = last_interval_;
952     last_interval_ = end_part.last_interval_;
953 
954     if (first_pos_ == nullptr) {
955       first_pos_ = end_part.first_pos_;
956     } else {
957       splitting_pointer_ = last;
958       if (last != nullptr) last->set_next(end_part.first_pos_);
959     }
960   }
961 
962   if (splinter()->IsEmpty()) {
963     splinter()->first_interval_ = splinter_temp.first_interval_;
964     splinter()->last_interval_ = splinter_temp.last_interval_;
965   } else {
966     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
967     splinter()->last_interval_ = splinter_temp.last_interval_;
968   }
969   if (splinter()->first_pos() == nullptr) {
970     splinter()->first_pos_ = splinter_temp.first_pos_;
971   } else {
972     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
973   }
974   if (last_in_splinter != nullptr) {
975     splinter()->last_pos_ = last_in_splinter;
976   } else {
977     if (splinter()->first_pos() != nullptr &&
978         splinter()->last_pos_ == nullptr) {
979       splinter()->last_pos_ = splinter()->first_pos();
980       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
981            pos = pos->next()) {
982         splinter()->last_pos_ = pos;
983       }
984     }
985   }
986 #if DEBUG
987   Verify();
988   splinter()->Verify();
989 #endif
990 }
991 
992 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)993 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
994   splintered_from_ = splinter_parent;
995   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
996     SetSpillRange(splinter_parent->spill_range_);
997   }
998 }
999 
1000 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)1001 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
1002   DCHECK(merged->TopLevel() == this);
1003 
1004   if (HasNoSpillType() && merged->HasSpillRange()) {
1005     set_spill_type(merged->spill_type());
1006     DCHECK_LT(0, GetSpillRange()->live_ranges().size());
1007     merged->spill_range_ = nullptr;
1008     merged->bits_ =
1009         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
1010   }
1011 }
1012 
1013 
Merge(TopLevelLiveRange * other,Zone * zone)1014 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
1015   DCHECK(Start() < other->Start());
1016   DCHECK(other->splintered_from() == this);
1017 
1018   LiveRange* first = this;
1019   LiveRange* second = other;
1020   DCHECK(first->Start() < second->Start());
1021   while (first != nullptr && second != nullptr) {
1022     DCHECK(first != second);
1023     // Make sure the ranges are in order each time we iterate.
1024     if (second->Start() < first->Start()) {
1025       LiveRange* tmp = second;
1026       second = first;
1027       first = tmp;
1028       continue;
1029     }
1030 
1031     if (first->End() <= second->Start()) {
1032       if (first->next() == nullptr ||
1033           first->next()->Start() > second->Start()) {
1034         // First is in order before second.
1035         LiveRange* temp = first->next();
1036         first->next_ = second;
1037         first = temp;
1038       } else {
1039         // First is in order before its successor (or second), so advance first.
1040         first = first->next();
1041       }
1042       continue;
1043     }
1044 
1045     DCHECK(first->Start() < second->Start());
1046     // If first and second intersect, split first.
1047     if (first->Start() < second->End() && second->Start() < first->End()) {
1048       LiveRange* temp = first->SplitAt(second->Start(), zone);
1049       CHECK(temp != first);
1050       temp->set_spilled(first->spilled());
1051       if (!temp->spilled())
1052         temp->set_assigned_register(first->assigned_register());
1053 
1054       first->next_ = second;
1055       first = temp;
1056       continue;
1057     }
1058     DCHECK(first->End() <= second->Start());
1059   }
1060 
1061   TopLevel()->UpdateParentForAllChildren(TopLevel());
1062   TopLevel()->UpdateSpillRangePostMerge(other);
1063   TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
1064                                other->has_slot_use());
1065 
1066 #if DEBUG
1067   Verify();
1068 #endif
1069 }
1070 
1071 
VerifyChildrenInOrder() const1072 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1073   LifetimePosition last_end = End();
1074   for (const LiveRange* child = this->next(); child != nullptr;
1075        child = child->next()) {
1076     DCHECK(last_end <= child->Start());
1077     last_end = child->End();
1078   }
1079 }
1080 
1081 
Verify() const1082 void TopLevelLiveRange::Verify() const {
1083   VerifyChildrenInOrder();
1084   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1085     VerifyChildStructure();
1086   }
1087 }
1088 
1089 
ShortenTo(LifetimePosition start)1090 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
1091   TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1092   DCHECK_NOT_NULL(first_interval_);
1093   DCHECK(first_interval_->start() <= start);
1094   DCHECK(start < first_interval_->end());
1095   first_interval_->set_start(start);
1096 }
1097 
1098 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1099 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1100                                        LifetimePosition end, Zone* zone) {
1101   TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1102         end.value());
1103   LifetimePosition new_end = end;
1104   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1105     if (first_interval_->end() > end) {
1106       new_end = first_interval_->end();
1107     }
1108     first_interval_ = first_interval_->next();
1109   }
1110 
1111   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1112   new_interval->set_next(first_interval_);
1113   first_interval_ = new_interval;
1114   if (new_interval->next() == nullptr) {
1115     last_interval_ = new_interval;
1116   }
1117 }
1118 
1119 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1120 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1121                                        LifetimePosition end, Zone* zone) {
1122   TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1123         end.value());
1124   if (first_interval_ == nullptr) {
1125     UseInterval* interval = new (zone) UseInterval(start, end);
1126     first_interval_ = interval;
1127     last_interval_ = interval;
1128   } else {
1129     if (end == first_interval_->start()) {
1130       first_interval_->set_start(start);
1131     } else if (end < first_interval_->start()) {
1132       UseInterval* interval = new (zone) UseInterval(start, end);
1133       interval->set_next(first_interval_);
1134       first_interval_ = interval;
1135     } else {
1136       // Order of instruction's processing (see ProcessInstructions) guarantees
1137       // that each new use interval either precedes, intersects with or touches
1138       // the last added interval.
1139       DCHECK(start <= first_interval_->end());
1140       first_interval_->set_start(Min(start, first_interval_->start()));
1141       first_interval_->set_end(Max(end, first_interval_->end()));
1142     }
1143   }
1144 }
1145 
1146 
AddUsePosition(UsePosition * use_pos)1147 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1148   LifetimePosition pos = use_pos->pos();
1149   TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1150   UsePosition* prev_hint = nullptr;
1151   UsePosition* prev = nullptr;
1152   UsePosition* current = first_pos_;
1153   while (current != nullptr && current->pos() < pos) {
1154     prev_hint = current->HasHint() ? current : prev_hint;
1155     prev = current;
1156     current = current->next();
1157   }
1158 
1159   if (prev == nullptr) {
1160     use_pos->set_next(first_pos_);
1161     first_pos_ = use_pos;
1162   } else {
1163     use_pos->set_next(prev->next());
1164     prev->set_next(use_pos);
1165   }
1166 
1167   if (prev_hint == nullptr && use_pos->HasHint()) {
1168     current_hint_position_ = use_pos;
1169   }
1170 }
1171 
1172 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1173 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1174                                         UseInterval* interval2) {
1175   while (interval1 != nullptr && interval2 != nullptr) {
1176     if (interval1->start() < interval2->start()) {
1177       if (interval1->end() > interval2->start()) {
1178         return true;
1179       }
1180       interval1 = interval1->next();
1181     } else {
1182       if (interval2->end() > interval1->start()) {
1183         return true;
1184       }
1185       interval2 = interval2->next();
1186     }
1187   }
1188   return false;
1189 }
1190 
1191 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1192 std::ostream& operator<<(std::ostream& os,
1193                          const PrintableLiveRange& printable_range) {
1194   const LiveRange* range = printable_range.range_;
1195   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1196      << " ";
1197   if (range->TopLevel()->is_phi()) os << "phi ";
1198   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1199 
1200   os << "{" << std::endl;
1201   UseInterval* interval = range->first_interval();
1202   UsePosition* use_pos = range->first_pos();
1203   PrintableInstructionOperand pio;
1204   pio.register_configuration_ = printable_range.register_configuration_;
1205   while (use_pos != nullptr) {
1206     if (use_pos->HasOperand()) {
1207       pio.op_ = *use_pos->operand();
1208       os << pio << use_pos->pos() << " ";
1209     }
1210     use_pos = use_pos->next();
1211   }
1212   os << std::endl;
1213 
1214   while (interval != nullptr) {
1215     os << '[' << interval->start() << ", " << interval->end() << ')'
1216        << std::endl;
1217     interval = interval->next();
1218   }
1219   os << "}";
1220   return os;
1221 }
1222 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1223 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1224     : live_ranges_(zone),
1225       assigned_slot_(kUnassignedSlot),
1226       byte_width_(GetByteWidth(parent->representation())) {
1227   // Spill ranges are created for top level, non-splintered ranges. This is so
1228   // that, when merging decisions are made, we consider the full extent of the
1229   // virtual register, and avoid clobbering it.
1230   DCHECK(!parent->IsSplinter());
1231   UseInterval* result = nullptr;
1232   UseInterval* node = nullptr;
1233   // Copy the intervals for all ranges.
1234   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1235     UseInterval* src = range->first_interval();
1236     while (src != nullptr) {
1237       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1238       if (result == nullptr) {
1239         result = new_node;
1240       } else {
1241         node->set_next(new_node);
1242       }
1243       node = new_node;
1244       src = src->next();
1245     }
1246   }
1247   use_interval_ = result;
1248   live_ranges().push_back(parent);
1249   end_position_ = node->end();
1250   parent->SetSpillRange(this);
1251 }
1252 
IsIntersectingWith(SpillRange * other) const1253 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1254   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1255       this->End() <= other->use_interval_->start() ||
1256       other->End() <= this->use_interval_->start()) {
1257     return false;
1258   }
1259   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1260 }
1261 
1262 
TryMerge(SpillRange * other)1263 bool SpillRange::TryMerge(SpillRange* other) {
1264   if (HasSlot() || other->HasSlot()) return false;
1265   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1266     return false;
1267 
1268   LifetimePosition max = LifetimePosition::MaxPosition();
1269   if (End() < other->End() && other->End() != max) {
1270     end_position_ = other->End();
1271   }
1272   other->end_position_ = max;
1273 
1274   MergeDisjointIntervals(other->use_interval_);
1275   other->use_interval_ = nullptr;
1276 
1277   for (TopLevelLiveRange* range : other->live_ranges()) {
1278     DCHECK(range->GetSpillRange() == other);
1279     range->SetSpillRange(this);
1280   }
1281 
1282   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1283                        other->live_ranges().end());
1284   other->live_ranges().clear();
1285 
1286   return true;
1287 }
1288 
1289 
MergeDisjointIntervals(UseInterval * other)1290 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1291   UseInterval* tail = nullptr;
1292   UseInterval* current = use_interval_;
1293   while (other != nullptr) {
1294     // Make sure the 'current' list starts first
1295     if (current == nullptr || current->start() > other->start()) {
1296       std::swap(current, other);
1297     }
1298     // Check disjointness
1299     DCHECK(other == nullptr || current->end() <= other->start());
1300     // Append the 'current' node to the result accumulator and move forward
1301     if (tail == nullptr) {
1302       use_interval_ = current;
1303     } else {
1304       tail->set_next(current);
1305     }
1306     tail = current;
1307     current = current->next();
1308   }
1309   // Other list is empty => we are done
1310 }
1311 
1312 
Print() const1313 void SpillRange::Print() const {
1314   StdoutStream os;
1315   os << "{" << std::endl;
1316   for (TopLevelLiveRange* range : live_ranges()) {
1317     os << range->vreg() << " ";
1318   }
1319   os << std::endl;
1320 
1321   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1322     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1323   }
1324   os << "}" << std::endl;
1325 }
1326 
1327 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1328 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1329                                                  const InstructionBlock* block,
1330                                                  Zone* zone)
1331     : phi_(phi),
1332       block_(block),
1333       incoming_operands_(zone),
1334       assigned_register_(kUnassignedRegister) {
1335   incoming_operands_.reserve(phi->operands().size());
1336 }
1337 
1338 
AddOperand(InstructionOperand * operand)1339 void RegisterAllocationData::PhiMapValue::AddOperand(
1340     InstructionOperand* operand) {
1341   incoming_operands_.push_back(operand);
1342 }
1343 
1344 
CommitAssignment(const InstructionOperand & assigned)1345 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1346     const InstructionOperand& assigned) {
1347   for (InstructionOperand* operand : incoming_operands_) {
1348     InstructionOperand::ReplaceWith(operand, &assigned);
1349   }
1350 }
1351 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,const char * debug_name)1352 RegisterAllocationData::RegisterAllocationData(
1353     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1354     InstructionSequence* code, const char* debug_name)
1355     : allocation_zone_(zone),
1356       frame_(frame),
1357       code_(code),
1358       debug_name_(debug_name),
1359       config_(config),
1360       phi_map_(allocation_zone()),
1361       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1362       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1363       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1364                    allocation_zone()),
1365       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1366                          allocation_zone()),
1367       fixed_float_live_ranges_(allocation_zone()),
1368       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1369                                 allocation_zone()),
1370       fixed_simd128_live_ranges_(allocation_zone()),
1371       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1372       delayed_references_(allocation_zone()),
1373       assigned_registers_(nullptr),
1374       assigned_double_registers_(nullptr),
1375       virtual_register_count_(code->VirtualRegisterCount()),
1376       preassigned_slot_ranges_(zone) {
1377   if (!kSimpleFPAliasing) {
1378     fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
1379                                     nullptr);
1380     fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
1381                                       nullptr);
1382   }
1383 
1384   assigned_registers_ = new (code_zone())
1385       BitVector(this->config()->num_general_registers(), code_zone());
1386   assigned_double_registers_ = new (code_zone())
1387       BitVector(this->config()->num_double_registers(), code_zone());
1388   this->frame()->SetAllocatedRegisters(assigned_registers_);
1389   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1390 }
1391 
1392 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1393 MoveOperands* RegisterAllocationData::AddGapMove(
1394     int index, Instruction::GapPosition position,
1395     const InstructionOperand& from, const InstructionOperand& to) {
1396   Instruction* instr = code()->InstructionAt(index);
1397   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1398   return moves->AddMove(from, to);
1399 }
1400 
1401 
RepresentationFor(int virtual_register)1402 MachineRepresentation RegisterAllocationData::RepresentationFor(
1403     int virtual_register) {
1404   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1405   return code()->GetRepresentation(virtual_register);
1406 }
1407 
1408 
GetOrCreateLiveRangeFor(int index)1409 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1410   if (index >= static_cast<int>(live_ranges().size())) {
1411     live_ranges().resize(index + 1, nullptr);
1412   }
1413   TopLevelLiveRange* result = live_ranges()[index];
1414   if (result == nullptr) {
1415     result = NewLiveRange(index, RepresentationFor(index));
1416     live_ranges()[index] = result;
1417   }
1418   return result;
1419 }
1420 
1421 
NewLiveRange(int index,MachineRepresentation rep)1422 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1423     int index, MachineRepresentation rep) {
1424   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1425 }
1426 
1427 
GetNextLiveRangeId()1428 int RegisterAllocationData::GetNextLiveRangeId() {
1429   int vreg = virtual_register_count_++;
1430   if (vreg >= static_cast<int>(live_ranges().size())) {
1431     live_ranges().resize(vreg + 1, nullptr);
1432   }
1433   return vreg;
1434 }
1435 
1436 
NextLiveRange(MachineRepresentation rep)1437 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1438     MachineRepresentation rep) {
1439   int vreg = GetNextLiveRangeId();
1440   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1441   return ret;
1442 }
1443 
1444 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1445 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1446     const InstructionBlock* block, PhiInstruction* phi) {
1447   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1448       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1449   auto res =
1450       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1451   DCHECK(res.second);
1452   USE(res);
1453   return map_value;
1454 }
1455 
1456 
GetPhiMapValueFor(int virtual_register)1457 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1458     int virtual_register) {
1459   auto it = phi_map_.find(virtual_register);
1460   DCHECK(it != phi_map_.end());
1461   return it->second;
1462 }
1463 
1464 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1465 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1466     TopLevelLiveRange* top_range) {
1467   return GetPhiMapValueFor(top_range->vreg());
1468 }
1469 
1470 
ExistsUseWithoutDefinition()1471 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1472   bool found = false;
1473   BitVector::Iterator iterator(live_in_sets()[0]);
1474   while (!iterator.Done()) {
1475     found = true;
1476     int operand_index = iterator.Current();
1477     PrintF("Register allocator error: live v%d reached first block.\n",
1478            operand_index);
1479     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1480     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1481     if (debug_name() == nullptr) {
1482       PrintF("\n");
1483     } else {
1484       PrintF("  (function: %s)\n", debug_name());
1485     }
1486     iterator.Advance();
1487   }
1488   return found;
1489 }
1490 
1491 
1492 // If a range is defined in a deferred block, we can expect all the range
1493 // to only cover positions in deferred blocks. Otherwise, a block on the
1494 // hot path would be dominated by a deferred block, meaning it is unreachable
1495 // without passing through the deferred block, which is contradictory.
1496 // In particular, when such a range contributes a result back on the hot
1497 // path, it will be as one of the inputs of a phi. In that case, the value
1498 // will be transferred via a move in the Gap::END's of the last instruction
1499 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1500 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1501   const size_t live_ranges_size = live_ranges().size();
1502   for (const TopLevelLiveRange* range : live_ranges()) {
1503     CHECK_EQ(live_ranges_size,
1504              live_ranges().size());  // TODO(neis): crbug.com/831822
1505     if (range == nullptr || range->IsEmpty() ||
1506         !code()
1507              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1508              ->IsDeferred()) {
1509       continue;
1510     }
1511     for (const UseInterval* i = range->first_interval(); i != nullptr;
1512          i = i->next()) {
1513       int first = i->FirstGapIndex();
1514       int last = i->LastGapIndex();
1515       for (int instr = first; instr <= last;) {
1516         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1517         if (!block->IsDeferred()) return false;
1518         instr = block->last_instruction_index() + 1;
1519       }
1520     }
1521   }
1522   return true;
1523 }
1524 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range)1525 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1526     TopLevelLiveRange* range) {
1527   DCHECK(!range->HasSpillOperand());
1528 
1529   SpillRange* spill_range = range->GetAllocatedSpillRange();
1530   if (spill_range == nullptr) {
1531     DCHECK(!range->IsSplinter());
1532     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1533   }
1534   range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1535 
1536   int spill_range_index =
1537       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1538 
1539   spill_ranges()[spill_range_index] = spill_range;
1540 
1541   return spill_range;
1542 }
1543 
1544 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1545 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1546     TopLevelLiveRange* range) {
1547   DCHECK(!range->HasSpillOperand());
1548   DCHECK(!range->IsSplinter());
1549   SpillRange* spill_range =
1550       new (allocation_zone()) SpillRange(range, allocation_zone());
1551   return spill_range;
1552 }
1553 
MarkAllocated(MachineRepresentation rep,int index)1554 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1555                                            int index) {
1556   switch (rep) {
1557     case MachineRepresentation::kFloat32:
1558     case MachineRepresentation::kSimd128:
1559       if (kSimpleFPAliasing) {
1560         assigned_double_registers_->Add(index);
1561       } else {
1562         int alias_base_index = -1;
1563         int aliases = config()->GetAliases(
1564             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1565         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1566         while (aliases--) {
1567           int aliased_reg = alias_base_index + aliases;
1568           assigned_double_registers_->Add(aliased_reg);
1569         }
1570       }
1571       break;
1572     case MachineRepresentation::kFloat64:
1573       assigned_double_registers_->Add(index);
1574       break;
1575     default:
1576       DCHECK(!IsFloatingPoint(rep));
1577       assigned_registers_->Add(index);
1578       break;
1579   }
1580 }
1581 
IsBlockBoundary(LifetimePosition pos) const1582 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1583   return pos.IsFullStart() &&
1584          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1585              pos.ToInstructionIndex();
1586 }
1587 
1588 
ConstraintBuilder(RegisterAllocationData * data)1589 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1590     : data_(data) {}
1591 
1592 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged)1593 InstructionOperand* ConstraintBuilder::AllocateFixed(
1594     UnallocatedOperand* operand, int pos, bool is_tagged) {
1595   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1596   DCHECK(operand->HasFixedPolicy());
1597   InstructionOperand allocated;
1598   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1599   int virtual_register = operand->virtual_register();
1600   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1601     rep = data()->RepresentationFor(virtual_register);
1602   }
1603   if (operand->HasFixedSlotPolicy()) {
1604     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1605                                  operand->fixed_slot_index());
1606   } else if (operand->HasFixedRegisterPolicy()) {
1607     DCHECK(!IsFloatingPoint(rep));
1608     DCHECK(data()->config()->IsAllocatableGeneralCode(
1609         operand->fixed_register_index()));
1610     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1611                                  operand->fixed_register_index());
1612   } else if (operand->HasFixedFPRegisterPolicy()) {
1613     DCHECK(IsFloatingPoint(rep));
1614     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1615     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1616                                  operand->fixed_register_index());
1617   } else {
1618     UNREACHABLE();
1619   }
1620   InstructionOperand::ReplaceWith(operand, &allocated);
1621   if (is_tagged) {
1622     TRACE("Fixed reg is tagged at %d\n", pos);
1623     Instruction* instr = code()->InstructionAt(pos);
1624     if (instr->HasReferenceMap()) {
1625       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1626     }
1627   }
1628   return operand;
1629 }
1630 
1631 
MeetRegisterConstraints()1632 void ConstraintBuilder::MeetRegisterConstraints() {
1633   for (InstructionBlock* block : code()->instruction_blocks()) {
1634     MeetRegisterConstraints(block);
1635   }
1636 }
1637 
1638 
MeetRegisterConstraints(const InstructionBlock * block)1639 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1640   int start = block->first_instruction_index();
1641   int end = block->last_instruction_index();
1642   DCHECK_NE(-1, start);
1643   for (int i = start; i <= end; ++i) {
1644     MeetConstraintsBefore(i);
1645     if (i != end) MeetConstraintsAfter(i);
1646   }
1647   // Meet register constraints for the instruction in the end.
1648   MeetRegisterConstraintsForLastInstructionInBlock(block);
1649 }
1650 
1651 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1652 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1653     const InstructionBlock* block) {
1654   int end = block->last_instruction_index();
1655   Instruction* last_instruction = code()->InstructionAt(end);
1656   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1657     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1658     DCHECK(!output_operand->IsConstant());
1659     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1660     int output_vreg = output->virtual_register();
1661     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1662     bool assigned = false;
1663     if (output->HasFixedPolicy()) {
1664       AllocateFixed(output, -1, false);
1665       // This value is produced on the stack, we never need to spill it.
1666       if (output->IsStackSlot()) {
1667         DCHECK(LocationOperand::cast(output)->index() <
1668                data()->frame()->GetSpillSlotCount());
1669         range->SetSpillOperand(LocationOperand::cast(output));
1670         range->SetSpillStartIndex(end);
1671         assigned = true;
1672       }
1673 
1674       for (const RpoNumber& succ : block->successors()) {
1675         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1676         DCHECK_EQ(1, successor->PredecessorCount());
1677         int gap_index = successor->first_instruction_index();
1678         // Create an unconstrained operand for the same virtual register
1679         // and insert a gap move from the fixed output to the operand.
1680         UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1681                                        output_vreg);
1682         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1683       }
1684     }
1685 
1686     if (!assigned) {
1687       for (const RpoNumber& succ : block->successors()) {
1688         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1689         DCHECK_EQ(1, successor->PredecessorCount());
1690         int gap_index = successor->first_instruction_index();
1691         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1692         range->SetSpillStartIndex(gap_index);
1693       }
1694     }
1695   }
1696 }
1697 
1698 
MeetConstraintsAfter(int instr_index)1699 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1700   Instruction* first = code()->InstructionAt(instr_index);
1701   // Handle fixed temporaries.
1702   for (size_t i = 0; i < first->TempCount(); i++) {
1703     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1704     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1705   }
1706   // Handle constant/fixed output operands.
1707   for (size_t i = 0; i < first->OutputCount(); i++) {
1708     InstructionOperand* output = first->OutputAt(i);
1709     if (output->IsConstant()) {
1710       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1711       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1712       range->SetSpillStartIndex(instr_index + 1);
1713       range->SetSpillOperand(output);
1714       continue;
1715     }
1716     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1717     TopLevelLiveRange* range =
1718         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1719     bool assigned = false;
1720     if (first_output->HasFixedPolicy()) {
1721       int output_vreg = first_output->virtual_register();
1722       UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1723                                      output_vreg);
1724       bool is_tagged = code()->IsReference(output_vreg);
1725       if (first_output->HasSecondaryStorage()) {
1726         range->MarkHasPreassignedSlot();
1727         data()->preassigned_slot_ranges().push_back(
1728             std::make_pair(range, first_output->GetSecondaryStorage()));
1729       }
1730       AllocateFixed(first_output, instr_index, is_tagged);
1731 
1732       // This value is produced on the stack, we never need to spill it.
1733       if (first_output->IsStackSlot()) {
1734         DCHECK(LocationOperand::cast(first_output)->index() <
1735                data()->frame()->GetTotalFrameSlotCount());
1736         range->SetSpillOperand(LocationOperand::cast(first_output));
1737         range->SetSpillStartIndex(instr_index + 1);
1738         assigned = true;
1739       }
1740       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1741                          output_copy);
1742     }
1743     // Make sure we add a gap move for spilling (if we have not done
1744     // so already).
1745     if (!assigned) {
1746       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1747                                  first_output);
1748       range->SetSpillStartIndex(instr_index + 1);
1749     }
1750   }
1751 }
1752 
1753 
MeetConstraintsBefore(int instr_index)1754 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1755   Instruction* second = code()->InstructionAt(instr_index);
1756   // Handle fixed input operands of second instruction.
1757   for (size_t i = 0; i < second->InputCount(); i++) {
1758     InstructionOperand* input = second->InputAt(i);
1759     if (input->IsImmediate() || input->IsExplicit()) {
1760       continue;  // Ignore immediates and explicitly reserved registers.
1761     }
1762     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1763     if (cur_input->HasFixedPolicy()) {
1764       int input_vreg = cur_input->virtual_register();
1765       UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1766                                     input_vreg);
1767       bool is_tagged = code()->IsReference(input_vreg);
1768       AllocateFixed(cur_input, instr_index, is_tagged);
1769       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1770     }
1771   }
1772   // Handle "output same as input" for second instruction.
1773   for (size_t i = 0; i < second->OutputCount(); i++) {
1774     InstructionOperand* output = second->OutputAt(i);
1775     if (!output->IsUnallocated()) continue;
1776     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1777     if (!second_output->HasSameAsInputPolicy()) continue;
1778     DCHECK_EQ(0, i);  // Only valid for first output.
1779     UnallocatedOperand* cur_input =
1780         UnallocatedOperand::cast(second->InputAt(0));
1781     int output_vreg = second_output->virtual_register();
1782     int input_vreg = cur_input->virtual_register();
1783     UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1784                                   input_vreg);
1785     *cur_input =
1786         UnallocatedOperand(*cur_input, second_output->virtual_register());
1787     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1788                                                 input_copy, *cur_input);
1789     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1790       if (second->HasReferenceMap()) {
1791         RegisterAllocationData::DelayedReference delayed_reference = {
1792             second->reference_map(), &gap_move->source()};
1793         data()->delayed_references().push_back(delayed_reference);
1794       }
1795     } else if (!code()->IsReference(input_vreg) &&
1796                code()->IsReference(output_vreg)) {
1797       // The input is assumed to immediately have a tagged representation,
1798       // before the pointer map can be used. I.e. the pointer map at the
1799       // instruction will include the output operand (whose value at the
1800       // beginning of the instruction is equal to the input operand). If
1801       // this is not desired, then the pointer map at this instruction needs
1802       // to be adjusted manually.
1803     }
1804   }
1805 }
1806 
1807 
ResolvePhis()1808 void ConstraintBuilder::ResolvePhis() {
1809   // Process the blocks in reverse order.
1810   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1811     ResolvePhis(block);
1812   }
1813 }
1814 
1815 
ResolvePhis(const InstructionBlock * block)1816 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1817   for (PhiInstruction* phi : block->phis()) {
1818     int phi_vreg = phi->virtual_register();
1819     RegisterAllocationData::PhiMapValue* map_value =
1820         data()->InitializePhiMap(block, phi);
1821     InstructionOperand& output = phi->output();
1822     // Map the destination operands, so the commitment phase can find them.
1823     for (size_t i = 0; i < phi->operands().size(); ++i) {
1824       InstructionBlock* cur_block =
1825           code()->InstructionBlockAt(block->predecessors()[i]);
1826       UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
1827                                phi->operands()[i]);
1828       MoveOperands* move = data()->AddGapMove(
1829           cur_block->last_instruction_index(), Instruction::END, input, output);
1830       map_value->AddOperand(&move->destination());
1831       DCHECK(!code()
1832                   ->InstructionAt(cur_block->last_instruction_index())
1833                   ->HasReferenceMap());
1834     }
1835     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1836     int gap_index = block->first_instruction_index();
1837     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1838     live_range->SetSpillStartIndex(gap_index);
1839     // We use the phi-ness of some nodes in some later heuristics.
1840     live_range->set_is_phi(true);
1841     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1842   }
1843 }
1844 
1845 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)1846 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1847                                    Zone* local_zone)
1848     : data_(data), phi_hints_(local_zone) {}
1849 
1850 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)1851 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1852                                             RegisterAllocationData* data) {
1853   size_t block_index = block->rpo_number().ToSize();
1854   BitVector* live_out = data->live_out_sets()[block_index];
1855   if (live_out == nullptr) {
1856     // Compute live out for the given block, except not including backward
1857     // successor edges.
1858     Zone* zone = data->allocation_zone();
1859     const InstructionSequence* code = data->code();
1860 
1861     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1862 
1863     // Process all successor blocks.
1864     for (const RpoNumber& succ : block->successors()) {
1865       // Add values live on entry to the successor.
1866       if (succ <= block->rpo_number()) continue;
1867       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1868       if (live_in != nullptr) live_out->Union(*live_in);
1869 
1870       // All phi input operands corresponding to this successor edge are live
1871       // out from this block.
1872       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1873       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1874       DCHECK(index < successor->PredecessorCount());
1875       for (PhiInstruction* phi : successor->phis()) {
1876         live_out->Add(phi->operands()[index]);
1877       }
1878     }
1879     data->live_out_sets()[block_index] = live_out;
1880   }
1881   return live_out;
1882 }
1883 
1884 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1885 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1886                                            BitVector* live_out) {
1887   // Add an interval that includes the entire block to the live range for
1888   // each live_out value.
1889   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1890       block->first_instruction_index());
1891   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1892                              block->last_instruction_index())
1893                              .NextStart();
1894   BitVector::Iterator iterator(live_out);
1895   while (!iterator.Done()) {
1896     int operand_index = iterator.Current();
1897     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1898     range->AddUseInterval(start, end, allocation_zone());
1899     iterator.Advance();
1900   }
1901 }
1902 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1903 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1904   int result = -index - 1;
1905   switch (rep) {
1906     case MachineRepresentation::kSimd128:
1907       result -= config()->num_float_registers();
1908       V8_FALLTHROUGH;
1909     case MachineRepresentation::kFloat32:
1910       result -= config()->num_double_registers();
1911       V8_FALLTHROUGH;
1912     case MachineRepresentation::kFloat64:
1913       result -= config()->num_general_registers();
1914       break;
1915     default:
1916       UNREACHABLE();
1917       break;
1918   }
1919   return result;
1920 }
1921 
FixedLiveRangeFor(int index)1922 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1923   DCHECK(index < config()->num_general_registers());
1924   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1925   if (result == nullptr) {
1926     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1927     result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
1928     DCHECK(result->IsFixed());
1929     result->set_assigned_register(index);
1930     data()->MarkAllocated(rep, index);
1931     data()->fixed_live_ranges()[index] = result;
1932   }
1933   return result;
1934 }
1935 
FixedFPLiveRangeFor(int index,MachineRepresentation rep)1936 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1937     int index, MachineRepresentation rep) {
1938   int num_regs = config()->num_double_registers();
1939   ZoneVector<TopLevelLiveRange*>* live_ranges =
1940       &data()->fixed_double_live_ranges();
1941   if (!kSimpleFPAliasing) {
1942     switch (rep) {
1943       case MachineRepresentation::kFloat32:
1944         num_regs = config()->num_float_registers();
1945         live_ranges = &data()->fixed_float_live_ranges();
1946         break;
1947       case MachineRepresentation::kSimd128:
1948         num_regs = config()->num_simd128_registers();
1949         live_ranges = &data()->fixed_simd128_live_ranges();
1950         break;
1951       default:
1952         break;
1953     }
1954   }
1955 
1956   DCHECK(index < num_regs);
1957   USE(num_regs);
1958   TopLevelLiveRange* result = (*live_ranges)[index];
1959   if (result == nullptr) {
1960     result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1961     DCHECK(result->IsFixed());
1962     result->set_assigned_register(index);
1963     data()->MarkAllocated(rep, index);
1964     (*live_ranges)[index] = result;
1965   }
1966   return result;
1967 }
1968 
LiveRangeFor(InstructionOperand * operand)1969 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1970   if (operand->IsUnallocated()) {
1971     return data()->GetOrCreateLiveRangeFor(
1972         UnallocatedOperand::cast(operand)->virtual_register());
1973   } else if (operand->IsConstant()) {
1974     return data()->GetOrCreateLiveRangeFor(
1975         ConstantOperand::cast(operand)->virtual_register());
1976   } else if (operand->IsRegister()) {
1977     return FixedLiveRangeFor(
1978         LocationOperand::cast(operand)->GetRegister().code());
1979   } else if (operand->IsFPRegister()) {
1980     LocationOperand* op = LocationOperand::cast(operand);
1981     return FixedFPLiveRangeFor(op->register_code(), op->representation());
1982   } else {
1983     return nullptr;
1984   }
1985 }
1986 
1987 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1988 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1989                                               InstructionOperand* operand,
1990                                               void* hint,
1991                                               UsePositionHintType hint_type) {
1992   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1993 }
1994 
1995 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1996 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1997                                       InstructionOperand* operand, void* hint,
1998                                       UsePositionHintType hint_type) {
1999   TopLevelLiveRange* range = LiveRangeFor(operand);
2000   if (range == nullptr) return nullptr;
2001 
2002   if (range->IsEmpty() || range->Start() > position) {
2003     // Can happen if there is a definition without use.
2004     range->AddUseInterval(position, position.NextStart(), allocation_zone());
2005     range->AddUsePosition(NewUsePosition(position.NextStart()));
2006   } else {
2007     range->ShortenTo(position);
2008   }
2009   if (!operand->IsUnallocated()) return nullptr;
2010   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2011   UsePosition* use_pos =
2012       NewUsePosition(position, unalloc_operand, hint, hint_type);
2013   range->AddUsePosition(use_pos);
2014   return use_pos;
2015 }
2016 
2017 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)2018 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2019                                    LifetimePosition position,
2020                                    InstructionOperand* operand, void* hint,
2021                                    UsePositionHintType hint_type) {
2022   TopLevelLiveRange* range = LiveRangeFor(operand);
2023   if (range == nullptr) return nullptr;
2024   UsePosition* use_pos = nullptr;
2025   if (operand->IsUnallocated()) {
2026     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2027     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2028     range->AddUsePosition(use_pos);
2029   }
2030   range->AddUseInterval(block_start, position, allocation_zone());
2031   return use_pos;
2032 }
2033 
2034 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2035 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2036                                            BitVector* live) {
2037   int block_start = block->first_instruction_index();
2038   LifetimePosition block_start_position =
2039       LifetimePosition::GapFromInstructionIndex(block_start);
2040   bool fixed_float_live_ranges = false;
2041   bool fixed_simd128_live_ranges = false;
2042   if (!kSimpleFPAliasing) {
2043     int mask = data()->code()->representation_mask();
2044     fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
2045     fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
2046   }
2047 
2048   for (int index = block->last_instruction_index(); index >= block_start;
2049        index--) {
2050     LifetimePosition curr_position =
2051         LifetimePosition::InstructionFromInstructionIndex(index);
2052     Instruction* instr = code()->InstructionAt(index);
2053     DCHECK_NOT_NULL(instr);
2054     DCHECK(curr_position.IsInstructionPosition());
2055     // Process output, inputs, and temps of this instruction.
2056     for (size_t i = 0; i < instr->OutputCount(); i++) {
2057       InstructionOperand* output = instr->OutputAt(i);
2058       if (output->IsUnallocated()) {
2059         // Unsupported.
2060         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2061         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2062         live->Remove(out_vreg);
2063       } else if (output->IsConstant()) {
2064         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2065         live->Remove(out_vreg);
2066       }
2067       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2068           output->IsRegister() &&
2069           AllocatedOperand::cast(output)->GetRegister() ==
2070               v8::internal::kReturnRegister0) {
2071         // The register defined here is blocked from gap start - it is the
2072         // exception value.
2073         // TODO(mtrofin): should we explore an explicit opcode for
2074         // the first instruction in the handler?
2075         Define(LifetimePosition::GapFromInstructionIndex(index), output);
2076       } else {
2077         Define(curr_position, output);
2078       }
2079     }
2080 
2081     if (instr->ClobbersRegisters()) {
2082       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2083         // Create a UseInterval at this instruction for all fixed registers,
2084         // (including the instruction outputs). Adding another UseInterval here
2085         // is OK because AddUseInterval will just merge it with the existing
2086         // one at the end of the range.
2087         int code = config()->GetAllocatableGeneralCode(i);
2088         TopLevelLiveRange* range = FixedLiveRangeFor(code);
2089         range->AddUseInterval(curr_position, curr_position.End(),
2090                               allocation_zone());
2091       }
2092     }
2093 
2094     if (instr->ClobbersDoubleRegisters()) {
2095       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2096         // Add a UseInterval for all DoubleRegisters. See comment above for
2097         // general registers.
2098         int code = config()->GetAllocatableDoubleCode(i);
2099         TopLevelLiveRange* range =
2100             FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
2101         range->AddUseInterval(curr_position, curr_position.End(),
2102                               allocation_zone());
2103       }
2104       // Clobber fixed float registers on archs with non-simple aliasing.
2105       if (!kSimpleFPAliasing) {
2106         if (fixed_float_live_ranges) {
2107           for (int i = 0; i < config()->num_allocatable_float_registers();
2108                ++i) {
2109             // Add a UseInterval for all FloatRegisters. See comment above for
2110             // general registers.
2111             int code = config()->GetAllocatableFloatCode(i);
2112             TopLevelLiveRange* range =
2113                 FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
2114             range->AddUseInterval(curr_position, curr_position.End(),
2115                                   allocation_zone());
2116           }
2117         }
2118         if (fixed_simd128_live_ranges) {
2119           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2120                ++i) {
2121             int code = config()->GetAllocatableSimd128Code(i);
2122             TopLevelLiveRange* range =
2123                 FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
2124             range->AddUseInterval(curr_position, curr_position.End(),
2125                                   allocation_zone());
2126           }
2127         }
2128       }
2129     }
2130 
2131     for (size_t i = 0; i < instr->InputCount(); i++) {
2132       InstructionOperand* input = instr->InputAt(i);
2133       if (input->IsImmediate() || input->IsExplicit()) {
2134         continue;  // Ignore immediates and explicitly reserved registers.
2135       }
2136       LifetimePosition use_pos;
2137       if (input->IsUnallocated() &&
2138           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2139         use_pos = curr_position;
2140       } else {
2141         use_pos = curr_position.End();
2142       }
2143 
2144       if (input->IsUnallocated()) {
2145         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2146         int vreg = unalloc->virtual_register();
2147         live->Add(vreg);
2148         if (unalloc->HasSlotPolicy()) {
2149           data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
2150         }
2151       }
2152       Use(block_start_position, use_pos, input);
2153     }
2154 
2155     for (size_t i = 0; i < instr->TempCount(); i++) {
2156       InstructionOperand* temp = instr->TempAt(i);
2157       // Unsupported.
2158       DCHECK_IMPLIES(temp->IsUnallocated(),
2159                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2160       if (instr->ClobbersTemps()) {
2161         if (temp->IsRegister()) continue;
2162         if (temp->IsUnallocated()) {
2163           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2164           if (temp_unalloc->HasFixedPolicy()) {
2165             continue;
2166           }
2167         }
2168       }
2169       Use(block_start_position, curr_position.End(), temp);
2170       Define(curr_position, temp);
2171     }
2172 
2173     // Process the moves of the instruction's gaps, making their sources live.
2174     const Instruction::GapPosition kPositions[] = {Instruction::END,
2175                                                    Instruction::START};
2176     curr_position = curr_position.PrevStart();
2177     DCHECK(curr_position.IsGapPosition());
2178     for (const Instruction::GapPosition& position : kPositions) {
2179       ParallelMove* move = instr->GetParallelMove(position);
2180       if (move == nullptr) continue;
2181       if (position == Instruction::END) {
2182         curr_position = curr_position.End();
2183       } else {
2184         curr_position = curr_position.Start();
2185       }
2186       for (MoveOperands* cur : *move) {
2187         InstructionOperand& from = cur->source();
2188         InstructionOperand& to = cur->destination();
2189         void* hint = &to;
2190         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2191         UsePosition* to_use = nullptr;
2192         int phi_vreg = -1;
2193         if (to.IsUnallocated()) {
2194           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2195           TopLevelLiveRange* to_range =
2196               data()->GetOrCreateLiveRangeFor(to_vreg);
2197           if (to_range->is_phi()) {
2198             phi_vreg = to_vreg;
2199             if (to_range->is_non_loop_phi()) {
2200               hint = to_range->current_hint_position();
2201               hint_type = hint == nullptr ? UsePositionHintType::kNone
2202                                           : UsePositionHintType::kUsePos;
2203             } else {
2204               hint_type = UsePositionHintType::kPhi;
2205               hint = data()->GetPhiMapValueFor(to_vreg);
2206             }
2207           } else {
2208             if (live->Contains(to_vreg)) {
2209               to_use = Define(curr_position, &to, &from,
2210                               UsePosition::HintTypeForOperand(from));
2211               live->Remove(to_vreg);
2212             } else {
2213               cur->Eliminate();
2214               continue;
2215             }
2216           }
2217         } else {
2218           Define(curr_position, &to);
2219         }
2220         UsePosition* from_use =
2221             Use(block_start_position, curr_position, &from, hint, hint_type);
2222         // Mark range live.
2223         if (from.IsUnallocated()) {
2224           live->Add(UnallocatedOperand::cast(from).virtual_register());
2225         }
2226         // Resolve use position hints just created.
2227         if (to_use != nullptr && from_use != nullptr) {
2228           to_use->ResolveHint(from_use);
2229           from_use->ResolveHint(to_use);
2230         }
2231         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2232         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2233         // Potentially resolve phi hint.
2234         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2235       }
2236     }
2237   }
2238 }
2239 
ProcessPhis(const InstructionBlock * block,BitVector * live)2240 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2241                                    BitVector* live) {
2242   for (PhiInstruction* phi : block->phis()) {
2243     // The live range interval already ends at the first instruction of the
2244     // block.
2245     int phi_vreg = phi->virtual_register();
2246     live->Remove(phi_vreg);
2247     // Select a hint from a predecessor block that precedes this block in the
2248     // rpo order. In order of priority:
2249     // - Avoid hints from deferred blocks.
2250     // - Prefer hints from allocated (or explicit) operands.
2251     // - Prefer hints from empty blocks (containing just parallel moves and a
2252     //   jump). In these cases, if we can elide the moves, the jump threader
2253     //   is likely to be able to elide the jump.
2254     // The enforcement of hinting in rpo order is required because hint
2255     // resolution that happens later in the compiler pipeline visits
2256     // instructions in reverse rpo order, relying on the fact that phis are
2257     // encountered before their hints.
2258     InstructionOperand* hint = nullptr;
2259     int hint_preference = 0;
2260 
2261     // The cost of hinting increases with the number of predecessors. At the
2262     // same time, the typical benefit decreases, since this hinting only
2263     // optimises the execution path through one predecessor. A limit of 2 is
2264     // sufficient to hit the common if/else pattern.
2265     int predecessor_limit = 2;
2266 
2267     for (RpoNumber predecessor : block->predecessors()) {
2268       const InstructionBlock* predecessor_block =
2269           code()->InstructionBlockAt(predecessor);
2270       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2271 
2272       // Only take hints from earlier rpo numbers.
2273       if (predecessor >= block->rpo_number()) continue;
2274 
2275       // Look up the predecessor instruction.
2276       const Instruction* predecessor_instr =
2277           GetLastInstruction(code(), predecessor_block);
2278       InstructionOperand* predecessor_hint = nullptr;
2279       // Phis are assigned in the END position of the last instruction in each
2280       // predecessor block.
2281       for (MoveOperands* move :
2282            *predecessor_instr->GetParallelMove(Instruction::END)) {
2283         InstructionOperand& to = move->destination();
2284         if (to.IsUnallocated() &&
2285             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2286           predecessor_hint = &move->source();
2287           break;
2288         }
2289       }
2290       DCHECK_NOT_NULL(predecessor_hint);
2291 
2292       // For each predecessor, generate a score according to the priorities
2293       // described above, and pick the best one. Flags in higher-order bits have
2294       // a higher priority than those in lower-order bits.
2295       int predecessor_hint_preference = 0;
2296       const int kNotDeferredBlockPreference = (1 << 2);
2297       const int kMoveIsAllocatedPreference = (1 << 1);
2298       const int kBlockIsEmptyPreference = (1 << 0);
2299 
2300       // - Avoid hints from deferred blocks.
2301       if (!predecessor_block->IsDeferred()) {
2302         predecessor_hint_preference |= kNotDeferredBlockPreference;
2303       }
2304 
2305       // - Prefer hints from allocated (or explicit) operands.
2306       //
2307       // Already-allocated or explicit operands are typically assigned using
2308       // the parallel moves on the last instruction. For example:
2309       //
2310       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2311       //      ArchJmp
2312       //    ...
2313       //    phi: v100 = v101 v102
2314       //
2315       // We have already found the END move, so look for a matching START move
2316       // from an allocated (or explicit) operand.
2317       //
2318       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2319       // because the live ranges are still being built when this function is
2320       // called.
2321       // TODO(v8): Find a way to separate hinting from live range analysis in
2322       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2323       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2324       if (moves != nullptr) {
2325         for (MoveOperands* move : *moves) {
2326           InstructionOperand& to = move->destination();
2327           if (predecessor_hint->Equals(to)) {
2328             if (move->source().IsAllocated() || move->source().IsExplicit()) {
2329               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2330             }
2331             break;
2332           }
2333         }
2334       }
2335 
2336       // - Prefer hints from empty blocks.
2337       if (predecessor_block->last_instruction_index() ==
2338           predecessor_block->first_instruction_index()) {
2339         predecessor_hint_preference |= kBlockIsEmptyPreference;
2340       }
2341 
2342       if ((hint == nullptr) ||
2343           (predecessor_hint_preference > hint_preference)) {
2344         // Take the hint from this predecessor.
2345         hint = predecessor_hint;
2346         hint_preference = predecessor_hint_preference;
2347       }
2348 
2349       if (--predecessor_limit <= 0) break;
2350     }
2351     DCHECK_NOT_NULL(hint);
2352 
2353     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2354         block->first_instruction_index());
2355     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2356                                   UsePosition::HintTypeForOperand(*hint));
2357     MapPhiHint(hint, use_pos);
2358   }
2359 }
2360 
2361 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2362 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2363                                          BitVector* live) {
2364   DCHECK(block->IsLoopHeader());
2365   // Add a live range stretching from the first loop instruction to the last
2366   // for each value live on entry to the header.
2367   BitVector::Iterator iterator(live);
2368   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2369       block->first_instruction_index());
2370   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2371                              code()->LastLoopInstructionIndex(block))
2372                              .NextFullStart();
2373   while (!iterator.Done()) {
2374     int operand_index = iterator.Current();
2375     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2376     range->EnsureInterval(start, end, allocation_zone());
2377     iterator.Advance();
2378   }
2379   // Insert all values into the live in sets of all blocks in the loop.
2380   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2381        ++i) {
2382     live_in_sets()[i]->Union(*live);
2383   }
2384 }
2385 
2386 
BuildLiveRanges()2387 void LiveRangeBuilder::BuildLiveRanges() {
2388   // Process the blocks in reverse order.
2389   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2390        --block_id) {
2391     InstructionBlock* block =
2392         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2393     BitVector* live = ComputeLiveOut(block, data());
2394     // Initially consider all live_out values live for the entire block. We
2395     // will shorten these intervals if necessary.
2396     AddInitialIntervals(block, live);
2397     // Process the instructions in reverse order, generating and killing
2398     // live values.
2399     ProcessInstructions(block, live);
2400     // All phi output operands are killed by this block.
2401     ProcessPhis(block, live);
2402     // Now live is live_in for this block except not including values live
2403     // out on backward successor edges.
2404     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2405     live_in_sets()[block_id] = live;
2406   }
2407   // Postprocess the ranges.
2408   const size_t live_ranges_size = data()->live_ranges().size();
2409   for (TopLevelLiveRange* range : data()->live_ranges()) {
2410     CHECK_EQ(live_ranges_size,
2411              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2412     if (range == nullptr) continue;
2413     // Give slots to all ranges with a non fixed slot use.
2414     if (range->has_slot_use() && range->HasNoSpillType()) {
2415       data()->AssignSpillRangeToLiveRange(range);
2416     }
2417     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2418     // live ranges, every use requires the constant to be in a register.
2419     // Without this hack, all uses with "any" policy would get the constant
2420     // operand assigned.
2421     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2422       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2423            pos = pos->next()) {
2424         if (pos->type() == UsePositionType::kRequiresSlot ||
2425             pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
2426           continue;
2427         }
2428         UsePositionType new_type = UsePositionType::kRegisterOrSlot;
2429         // Can't mark phis as needing a register.
2430         if (!pos->pos().IsGapPosition()) {
2431           new_type = UsePositionType::kRequiresRegister;
2432         }
2433         pos->set_type(new_type, true);
2434       }
2435     }
2436   }
2437   for (auto preassigned : data()->preassigned_slot_ranges()) {
2438     TopLevelLiveRange* range = preassigned.first;
2439     int slot_id = preassigned.second;
2440     SpillRange* spill = range->HasSpillRange()
2441                             ? range->GetSpillRange()
2442                             : data()->AssignSpillRangeToLiveRange(range);
2443     spill->set_assigned_slot(slot_id);
2444   }
2445 #ifdef DEBUG
2446   Verify();
2447 #endif
2448 }
2449 
2450 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2451 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2452                                   UsePosition* use_pos) {
2453   DCHECK(!use_pos->IsResolved());
2454   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2455   DCHECK(res.second);
2456   USE(res);
2457 }
2458 
2459 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2460 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2461                                       UsePosition* use_pos) {
2462   auto it = phi_hints_.find(operand);
2463   if (it == phi_hints_.end()) return;
2464   DCHECK(!it->second->IsResolved());
2465   it->second->ResolveHint(use_pos);
2466 }
2467 
2468 
Verify() const2469 void LiveRangeBuilder::Verify() const {
2470   for (auto& hint : phi_hints_) {
2471     CHECK(hint.second->IsResolved());
2472   }
2473   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2474     if (current != nullptr && !current->IsEmpty()) {
2475       // New LiveRanges should not be split.
2476       CHECK_NULL(current->next());
2477       // General integrity check.
2478       current->Verify();
2479       const UseInterval* first = current->first_interval();
2480       if (first->next() == nullptr) continue;
2481 
2482       // Consecutive intervals should not end and start in the same block,
2483       // otherwise the intervals should have been joined, because the
2484       // variable is live throughout that block.
2485       CHECK(NextIntervalStartsInDifferentBlocks(first));
2486 
2487       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2488         // Except for the first interval, the other intevals must start at
2489         // a block boundary, otherwise data wouldn't flow to them.
2490         CHECK(IntervalStartsAtBlockBoundary(i));
2491         // The last instruction of the predecessors of the block the interval
2492         // starts must be covered by the range.
2493         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2494         if (i->next() != nullptr) {
2495           // Check the consecutive intervals property, except for the last
2496           // interval, where it doesn't apply.
2497           CHECK(NextIntervalStartsInDifferentBlocks(i));
2498         }
2499       }
2500     }
2501   }
2502 }
2503 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2504 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2505     const UseInterval* interval) const {
2506   LifetimePosition start = interval->start();
2507   if (!start.IsFullStart()) return false;
2508   int instruction_index = start.ToInstructionIndex();
2509   const InstructionBlock* block =
2510       data()->code()->GetInstructionBlock(instruction_index);
2511   return block->first_instruction_index() == instruction_index;
2512 }
2513 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2514 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2515     const UseInterval* interval, const TopLevelLiveRange* range) const {
2516   LifetimePosition start = interval->start();
2517   int instruction_index = start.ToInstructionIndex();
2518   const InstructionBlock* block =
2519       data()->code()->GetInstructionBlock(instruction_index);
2520   for (RpoNumber pred_index : block->predecessors()) {
2521     const InstructionBlock* predecessor =
2522         data()->code()->InstructionBlockAt(pred_index);
2523     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2524         predecessor->last_instruction_index());
2525     last_pos = last_pos.NextStart().End();
2526     if (!range->Covers(last_pos)) return false;
2527   }
2528   return true;
2529 }
2530 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2531 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2532     const UseInterval* interval) const {
2533   DCHECK_NOT_NULL(interval->next());
2534   LifetimePosition end = interval->end();
2535   LifetimePosition next_start = interval->next()->start();
2536   // Since end is not covered, but the previous position is, move back a
2537   // position
2538   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2539   int last_covered_index = end.ToInstructionIndex();
2540   const InstructionBlock* block =
2541       data()->code()->GetInstructionBlock(last_covered_index);
2542   const InstructionBlock* next_block =
2543       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2544   return block->rpo_number() < next_block->rpo_number();
2545 }
2546 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2547 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2548                                      RegisterKind kind)
2549     : data_(data),
2550       mode_(kind),
2551       num_registers_(GetRegisterCount(data->config(), kind)),
2552       num_allocatable_registers_(
2553           GetAllocatableRegisterCount(data->config(), kind)),
2554       allocatable_register_codes_(
2555           GetAllocatableRegisterCodes(data->config(), kind)),
2556       check_fp_aliasing_(false) {
2557   if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
2558     check_fp_aliasing_ = (data->code()->representation_mask() &
2559                           (kFloatRepBit | kSimd128RepBit)) != 0;
2560   }
2561 }
2562 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2563 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2564     const LiveRange* range, int instruction_index) {
2565   LifetimePosition ret = LifetimePosition::Invalid();
2566 
2567   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2568   if (range->Start() >= ret || ret >= range->End()) {
2569     return LifetimePosition::Invalid();
2570   }
2571   return ret;
2572 }
2573 
SplitAndSpillRangesDefinedByMemoryOperand()2574 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2575   size_t initial_range_count = data()->live_ranges().size();
2576   for (size_t i = 0; i < initial_range_count; ++i) {
2577     CHECK_EQ(initial_range_count,
2578              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2579     TopLevelLiveRange* range = data()->live_ranges()[i];
2580     if (!CanProcessRange(range)) continue;
2581     if (range->HasNoSpillType() ||
2582         (range->HasSpillRange() && !range->has_slot_use())) {
2583       continue;
2584     }
2585     LifetimePosition start = range->Start();
2586     TRACE("Live range %d:%d is defined by a spill operand.\n",
2587           range->TopLevel()->vreg(), range->relative_id());
2588     LifetimePosition next_pos = start;
2589     if (next_pos.IsGapPosition()) {
2590       next_pos = next_pos.NextStart();
2591     }
2592 
2593     // With splinters, we can be more strict and skip over positions
2594     // not strictly needing registers.
2595     UsePosition* pos =
2596         range->IsSplinter()
2597             ? range->NextRegisterPosition(next_pos)
2598             : range->NextUsePositionRegisterIsBeneficial(next_pos);
2599     // If the range already has a spill operand and it doesn't need a
2600     // register immediately, split it and spill the first part of the range.
2601     if (pos == nullptr) {
2602       Spill(range);
2603     } else if (pos->pos() > range->Start().NextStart()) {
2604       // Do not spill live range eagerly if use position that can benefit from
2605       // the register is too close to the start of live range.
2606       LifetimePosition split_pos = GetSplitPositionForInstruction(
2607           range, pos->pos().ToInstructionIndex());
2608       // There is no place to split, so we can't split and spill.
2609       if (!split_pos.IsValid()) continue;
2610 
2611       split_pos =
2612           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2613 
2614       SplitRangeAt(range, split_pos);
2615       Spill(range);
2616     }
2617   }
2618 }
2619 
2620 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2621 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2622                                            LifetimePosition pos) {
2623   DCHECK(!range->TopLevel()->IsFixed());
2624   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2625         range->relative_id(), pos.value());
2626 
2627   if (pos <= range->Start()) return range;
2628 
2629   // We can't properly connect liveranges if splitting occurred at the end
2630   // a block.
2631   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2632          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2633           pos.ToInstructionIndex()));
2634 
2635   LiveRange* result = range->SplitAt(pos, allocation_zone());
2636   return result;
2637 }
2638 
2639 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2640 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2641                                            LifetimePosition start,
2642                                            LifetimePosition end) {
2643   DCHECK(!range->TopLevel()->IsFixed());
2644   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2645         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2646         end.value());
2647 
2648   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2649   DCHECK(split_pos >= start);
2650   return SplitRangeAt(range, split_pos);
2651 }
2652 
2653 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2654 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2655                                                         LifetimePosition end) {
2656   int start_instr = start.ToInstructionIndex();
2657   int end_instr = end.ToInstructionIndex();
2658   DCHECK_LE(start_instr, end_instr);
2659 
2660   // We have no choice
2661   if (start_instr == end_instr) return end;
2662 
2663   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2664   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2665 
2666   if (end_block == start_block) {
2667     // The interval is split in the same basic block. Split at the latest
2668     // possible position.
2669     return end;
2670   }
2671 
2672   const InstructionBlock* block = end_block;
2673   // Find header of outermost loop.
2674   do {
2675     const InstructionBlock* loop = GetContainingLoop(code(), block);
2676     if (loop == nullptr ||
2677         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2678       // No more loops or loop starts before the lifetime start.
2679       break;
2680     }
2681     block = loop;
2682   } while (true);
2683 
2684   // We did not find any suitable outer loop. Split at the latest possible
2685   // position unless end_block is a loop header itself.
2686   if (block == end_block && !end_block->IsLoopHeader()) return end;
2687 
2688   return LifetimePosition::GapFromInstructionIndex(
2689       block->first_instruction_index());
2690 }
2691 
2692 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos)2693 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2694     LiveRange* range, LifetimePosition pos) {
2695   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2696   const InstructionBlock* loop_header =
2697       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2698 
2699   if (loop_header == nullptr) return pos;
2700 
2701   const UsePosition* prev_use =
2702       range->PreviousUsePositionRegisterIsBeneficial(pos);
2703 
2704   while (loop_header != nullptr) {
2705     // We are going to spill live range inside the loop.
2706     // If possible try to move spilling position backwards to loop header.
2707     // This will reduce number of memory moves on the back edge.
2708     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2709         loop_header->first_instruction_index());
2710 
2711     if (range->Covers(loop_start)) {
2712       if (prev_use == nullptr || prev_use->pos() < loop_start) {
2713         // No register beneficial use inside the loop before the pos.
2714         pos = loop_start;
2715       }
2716     }
2717 
2718     // Try hoisting out to an outer loop.
2719     loop_header = GetContainingLoop(code(), loop_header);
2720   }
2721 
2722   return pos;
2723 }
2724 
2725 
Spill(LiveRange * range)2726 void RegisterAllocator::Spill(LiveRange* range) {
2727   DCHECK(!range->spilled());
2728   TopLevelLiveRange* first = range->TopLevel();
2729   TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2730 
2731   if (first->HasNoSpillType()) {
2732     data()->AssignSpillRangeToLiveRange(first);
2733   }
2734   range->Spill();
2735 }
2736 
RegisterName(int register_code) const2737 const char* RegisterAllocator::RegisterName(int register_code) const {
2738   if (mode() == GENERAL_REGISTERS) {
2739     return data()->config()->GetGeneralRegisterName(register_code);
2740   } else {
2741     return data()->config()->GetDoubleRegisterName(register_code);
2742   }
2743 }
2744 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2745 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2746                                          RegisterKind kind, Zone* local_zone)
2747     : RegisterAllocator(data, kind),
2748       unhandled_live_ranges_(local_zone),
2749       active_live_ranges_(local_zone),
2750       inactive_live_ranges_(local_zone) {
2751   active_live_ranges().reserve(8);
2752   inactive_live_ranges().reserve(8);
2753   // TryAllocateFreeReg and AllocateBlockedReg assume this
2754   // when allocating local arrays.
2755   DCHECK_GE(RegisterConfiguration::kMaxFPRegisters,
2756             this->data()->config()->num_general_registers());
2757 }
2758 
2759 
AllocateRegisters()2760 void LinearScanAllocator::AllocateRegisters() {
2761   DCHECK(unhandled_live_ranges().empty());
2762   DCHECK(active_live_ranges().empty());
2763   DCHECK(inactive_live_ranges().empty());
2764 
2765   SplitAndSpillRangesDefinedByMemoryOperand();
2766 
2767   const size_t live_ranges_size = data()->live_ranges().size();
2768   for (TopLevelLiveRange* range : data()->live_ranges()) {
2769     CHECK_EQ(live_ranges_size,
2770              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
2771     if (!CanProcessRange(range)) continue;
2772     for (LiveRange* to_add = range; to_add != nullptr;
2773          to_add = to_add->next()) {
2774       if (!to_add->spilled()) {
2775         AddToUnhandled(to_add);
2776       }
2777     }
2778   }
2779 
2780   if (mode() == GENERAL_REGISTERS) {
2781     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
2782       if (current != nullptr) AddToInactive(current);
2783     }
2784   } else {
2785     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
2786       if (current != nullptr) AddToInactive(current);
2787     }
2788     if (!kSimpleFPAliasing && check_fp_aliasing()) {
2789       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
2790         if (current != nullptr) AddToInactive(current);
2791       }
2792       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
2793         if (current != nullptr) AddToInactive(current);
2794       }
2795     }
2796   }
2797 
2798   while (!unhandled_live_ranges().empty()) {
2799     LiveRange* current = *unhandled_live_ranges().begin();
2800     unhandled_live_ranges().erase(unhandled_live_ranges().begin());
2801     LifetimePosition position = current->Start();
2802 #ifdef DEBUG
2803     allocation_finger_ = position;
2804 #endif
2805     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2806           current->relative_id(), position.value());
2807 
2808     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2809       continue;
2810 
2811     for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2812       LiveRange* cur_active = active_live_ranges()[i];
2813       if (cur_active->End() <= position) {
2814         ActiveToHandled(cur_active);
2815         --i;  // The live range was removed from the list of active live ranges.
2816       } else if (!cur_active->Covers(position)) {
2817         ActiveToInactive(cur_active);
2818         --i;  // The live range was removed from the list of active live ranges.
2819       }
2820     }
2821 
2822     for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2823       LiveRange* cur_inactive = inactive_live_ranges()[i];
2824       if (cur_inactive->End() <= position) {
2825         InactiveToHandled(cur_inactive);
2826         --i;  // Live range was removed from the list of inactive live ranges.
2827       } else if (cur_inactive->Covers(position)) {
2828         InactiveToActive(cur_inactive);
2829         --i;  // Live range was removed from the list of inactive live ranges.
2830       }
2831     }
2832 
2833     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2834 
2835     ProcessCurrentRange(current);
2836   }
2837 }
2838 
TrySplitAndSpillSplinter(LiveRange * range)2839 bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
2840   DCHECK(range->TopLevel()->IsSplinter());
2841   // If we can spill the whole range, great. Otherwise, split above the
2842   // first use needing a register and spill the top part.
2843   const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
2844   if (next_reg == nullptr) {
2845     Spill(range);
2846     return true;
2847   } else if (range->FirstHintPosition() == nullptr) {
2848     // If there was no hint, but we have a use position requiring a
2849     // register, apply the hot path heuristics.
2850     return false;
2851   } else if (next_reg->pos().PrevStart() > range->Start()) {
2852     LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
2853     AddToUnhandled(tail);
2854     Spill(range);
2855     return true;
2856   }
2857   return false;
2858 }
2859 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)2860 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2861                                                        int reg) {
2862   data()->MarkAllocated(range->representation(), reg);
2863   range->set_assigned_register(reg);
2864   range->SetUseHints(reg);
2865   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2866     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2867   }
2868 }
2869 
2870 
AddToActive(LiveRange * range)2871 void LinearScanAllocator::AddToActive(LiveRange* range) {
2872   TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2873         range->relative_id());
2874   active_live_ranges().push_back(range);
2875 }
2876 
2877 
AddToInactive(LiveRange * range)2878 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2879   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2880         range->relative_id());
2881   inactive_live_ranges().push_back(range);
2882 }
2883 
AddToUnhandled(LiveRange * range)2884 void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
2885   if (range == nullptr || range->IsEmpty()) return;
2886   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2887   DCHECK(allocation_finger_ <= range->Start());
2888 
2889   TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
2890         range->relative_id());
2891   unhandled_live_ranges().insert(range);
2892 }
2893 
2894 
ActiveToHandled(LiveRange * range)2895 void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
2896   RemoveElement(&active_live_ranges(), range);
2897   TRACE("Moving live range %d:%d from active to handled\n",
2898         range->TopLevel()->vreg(), range->relative_id());
2899 }
2900 
2901 
ActiveToInactive(LiveRange * range)2902 void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
2903   RemoveElement(&active_live_ranges(), range);
2904   inactive_live_ranges().push_back(range);
2905   TRACE("Moving live range %d:%d from active to inactive\n",
2906         range->TopLevel()->vreg(), range->relative_id());
2907 }
2908 
2909 
InactiveToHandled(LiveRange * range)2910 void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
2911   RemoveElement(&inactive_live_ranges(), range);
2912   TRACE("Moving live range %d:%d from inactive to handled\n",
2913         range->TopLevel()->vreg(), range->relative_id());
2914 }
2915 
2916 
InactiveToActive(LiveRange * range)2917 void LinearScanAllocator::InactiveToActive(LiveRange* range) {
2918   RemoveElement(&inactive_live_ranges(), range);
2919   active_live_ranges().push_back(range);
2920   TRACE("Moving live range %d:%d from inactive to active\n",
2921         range->TopLevel()->vreg(), range->relative_id());
2922 }
2923 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const2924 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
2925                                            int* num_regs, int* num_codes,
2926                                            const int** codes) const {
2927   DCHECK(!kSimpleFPAliasing);
2928   if (rep == MachineRepresentation::kFloat32) {
2929     *num_regs = data()->config()->num_float_registers();
2930     *num_codes = data()->config()->num_allocatable_float_registers();
2931     *codes = data()->config()->allocatable_float_codes();
2932   } else if (rep == MachineRepresentation::kSimd128) {
2933     *num_regs = data()->config()->num_simd128_registers();
2934     *num_codes = data()->config()->num_allocatable_simd128_registers();
2935     *codes = data()->config()->allocatable_simd128_codes();
2936   } else {
2937     UNREACHABLE();
2938   }
2939 }
2940 
FindFreeRegistersForRange(LiveRange * range,Vector<LifetimePosition> positions)2941 void LinearScanAllocator::FindFreeRegistersForRange(
2942     LiveRange* range, Vector<LifetimePosition> positions) {
2943   int num_regs = num_registers();
2944   int num_codes = num_allocatable_registers();
2945   const int* codes = allocatable_register_codes();
2946   MachineRepresentation rep = range->representation();
2947   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
2948                              rep == MachineRepresentation::kSimd128))
2949     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
2950   DCHECK_GE(positions.length(), num_regs);
2951 
2952   for (int i = 0; i < num_regs; ++i) {
2953     positions[i] = LifetimePosition::MaxPosition();
2954   }
2955 
2956   for (LiveRange* cur_active : active_live_ranges()) {
2957     int cur_reg = cur_active->assigned_register();
2958     if (kSimpleFPAliasing || !check_fp_aliasing()) {
2959       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
2960       TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
2961             LifetimePosition::GapFromInstructionIndex(0).value());
2962     } else {
2963       int alias_base_index = -1;
2964       int aliases = data()->config()->GetAliases(
2965           cur_active->representation(), cur_reg, rep, &alias_base_index);
2966       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
2967       while (aliases--) {
2968         int aliased_reg = alias_base_index + aliases;
2969         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
2970       }
2971     }
2972   }
2973 
2974   for (LiveRange* cur_inactive : inactive_live_ranges()) {
2975     DCHECK(cur_inactive->End() > range->Start());
2976     int cur_reg = cur_inactive->assigned_register();
2977     // No need to carry out intersections, when this register won't be
2978     // interesting to this range anyway.
2979     // TODO(mtrofin): extend to aliased ranges, too.
2980     if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
2981         positions[cur_reg] < range->Start()) {
2982       continue;
2983     }
2984 
2985     LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
2986     if (!next_intersection.IsValid()) continue;
2987     if (kSimpleFPAliasing || !check_fp_aliasing()) {
2988       positions[cur_reg] = Min(positions[cur_reg], next_intersection);
2989       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
2990             Min(positions[cur_reg], next_intersection).value());
2991     } else {
2992       int alias_base_index = -1;
2993       int aliases = data()->config()->GetAliases(
2994           cur_inactive->representation(), cur_reg, rep, &alias_base_index);
2995       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
2996       while (aliases--) {
2997         int aliased_reg = alias_base_index + aliases;
2998         positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
2999       }
3000     }
3001   }
3002 }
3003 
3004 // High-level register allocation summary:
3005 //
3006 // For regular, or hot (i.e. not splinter) ranges, we attempt to first
3007 // allocate first the preferred (hint) register. If that is not possible,
3008 // we find a register that's free, and allocate that. If that's not possible,
3009 // we search for a register to steal from a range that was allocated. The
3010 // goal is to optimize for throughput by avoiding register-to-memory
3011 // moves, which are expensive.
3012 //
3013 // For splinters, the goal is to minimize the number of moves. First we try
3014 // to allocate the preferred register (more discussion follows). Failing that,
3015 // we bail out and spill as far as we can, unless the first use is at start,
3016 // case in which we apply the same behavior as we do for regular ranges.
3017 // If there is no hint, we apply the hot-path behavior.
3018 //
3019 // For the splinter, the hint register may come from:
3020 //
3021 // - the hot path (we set it at splintering time with SetHint). In this case, if
3022 // we cannot offer the hint register, spilling is better because it's at most
3023 // 1 move, while trying to find and offer another register is at least 1 move.
3024 //
3025 // - a constraint. If we cannot offer that register, it's because  there is some
3026 // interference. So offering the hint register up to the interference would
3027 // result
3028 // in a move at the interference, plus a move to satisfy the constraint. This is
3029 // also the number of moves if we spill, with the potential of the range being
3030 // already spilled and thus saving a move (the spill).
3031 // Note that this can only be an input constraint, if it were an output one,
3032 // the range wouldn't be a splinter because it means it'd be defined in a
3033 // deferred
3034 // block, and we don't mark those as splinters (they live in deferred blocks
3035 // only).
3036 //
3037 // - a phi. The same analysis as in the case of the input constraint applies.
3038 //
ProcessCurrentRange(LiveRange * current)3039 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
3040   LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
3041   Vector<LifetimePosition> free_until_pos(
3042       free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
3043   FindFreeRegistersForRange(current, free_until_pos);
3044   if (!TryAllocatePreferredReg(current, free_until_pos)) {
3045     if (current->TopLevel()->IsSplinter()) {
3046       if (TrySplitAndSpillSplinter(current)) return;
3047     }
3048     if (!TryAllocateFreeReg(current, free_until_pos)) {
3049       AllocateBlockedReg(current);
3050     }
3051   }
3052   if (current->HasRegisterAssigned()) {
3053     AddToActive(current);
3054   }
3055 }
3056 
TryAllocatePreferredReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3057 bool LinearScanAllocator::TryAllocatePreferredReg(
3058     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3059   int hint_register;
3060   if (current->FirstHintPosition(&hint_register) != nullptr) {
3061     TRACE(
3062         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
3063         RegisterName(hint_register), free_until_pos[hint_register].value(),
3064         current->TopLevel()->vreg(), current->relative_id(),
3065         current->End().value());
3066 
3067     // The desired register is free until the end of the current live range.
3068     if (free_until_pos[hint_register] >= current->End()) {
3069       TRACE("Assigning preferred reg %s to live range %d:%d\n",
3070             RegisterName(hint_register), current->TopLevel()->vreg(),
3071             current->relative_id());
3072       SetLiveRangeAssignedRegister(current, hint_register);
3073       return true;
3074     }
3075   }
3076   return false;
3077 }
3078 
TryAllocateFreeReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3079 bool LinearScanAllocator::TryAllocateFreeReg(
3080     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3081   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
3082   int num_codes = num_allocatable_registers();
3083   const int* codes = allocatable_register_codes();
3084   MachineRepresentation rep = current->representation();
3085   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3086                              rep == MachineRepresentation::kSimd128)) {
3087     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3088   }
3089 
3090   DCHECK_GE(free_until_pos.length(), num_codes);
3091 
3092   // Find the register which stays free for the longest time. Check for
3093   // the hinted register first, as we might want to use that one. Only
3094   // count full instructions for free ranges, as an instruction's internal
3095   // positions do not help but might shadow a hinted register. This is
3096   // typically the case for function calls, where all registered are
3097   // cloberred after the call except for the argument registers, which are
3098   // set before the call. Hence, the argument registers always get ignored,
3099   // as their available time is shorter.
3100   int reg;
3101   if (current->FirstHintPosition(&reg) == nullptr) {
3102     reg = codes[0];
3103   }
3104   for (int i = 0; i < num_codes; ++i) {
3105     int code = codes[i];
3106     if (free_until_pos[code].ToInstructionIndex() >
3107         free_until_pos[reg].ToInstructionIndex()) {
3108       reg = code;
3109     }
3110   }
3111 
3112   LifetimePosition pos = free_until_pos[reg];
3113 
3114   if (pos <= current->Start()) {
3115     // All registers are blocked.
3116     return false;
3117   }
3118 
3119   if (pos < current->End()) {
3120     // Register reg is available at the range start but becomes blocked before
3121     // the range end. Split current at position where it becomes blocked.
3122     LiveRange* tail = SplitRangeAt(current, pos);
3123     AddToUnhandled(tail);
3124 
3125     // Try to allocate preferred register once more.
3126     if (TryAllocatePreferredReg(current, free_until_pos)) return true;
3127   }
3128 
3129   // Register reg is available at the range start and is free until the range
3130   // end.
3131   DCHECK(pos >= current->End());
3132   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
3133         current->TopLevel()->vreg(), current->relative_id());
3134   SetLiveRangeAssignedRegister(current, reg);
3135 
3136   return true;
3137 }
3138 
AllocateBlockedReg(LiveRange * current)3139 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
3140   UsePosition* register_use = current->NextRegisterPosition(current->Start());
3141   if (register_use == nullptr) {
3142     // There is no use in the current live range that requires a register.
3143     // We can just spill it.
3144     Spill(current);
3145     return;
3146   }
3147 
3148   int num_regs = num_registers();
3149   int num_codes = num_allocatable_registers();
3150   const int* codes = allocatable_register_codes();
3151   MachineRepresentation rep = current->representation();
3152   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3153                              rep == MachineRepresentation::kSimd128))
3154     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3155 
3156   // use_pos keeps track of positions a register/alias is used at.
3157   // block_pos keeps track of positions where a register/alias is blocked
3158   // from.
3159   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
3160   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
3161   for (int i = 0; i < num_regs; i++) {
3162     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
3163   }
3164 
3165   for (LiveRange* range : active_live_ranges()) {
3166     int cur_reg = range->assigned_register();
3167     bool is_fixed_or_cant_spill =
3168         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
3169     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3170       if (is_fixed_or_cant_spill) {
3171         block_pos[cur_reg] = use_pos[cur_reg] =
3172             LifetimePosition::GapFromInstructionIndex(0);
3173       } else {
3174         DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
3175                   block_pos[cur_reg]);
3176         use_pos[cur_reg] =
3177             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
3178       }
3179     } else {
3180       int alias_base_index = -1;
3181       int aliases = data()->config()->GetAliases(
3182           range->representation(), cur_reg, rep, &alias_base_index);
3183       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3184       while (aliases--) {
3185         int aliased_reg = alias_base_index + aliases;
3186         if (is_fixed_or_cant_spill) {
3187           block_pos[aliased_reg] = use_pos[aliased_reg] =
3188               LifetimePosition::GapFromInstructionIndex(0);
3189         } else {
3190           use_pos[aliased_reg] =
3191               Min(block_pos[aliased_reg],
3192                   range->NextLifetimePositionRegisterIsBeneficial(
3193                       current->Start()));
3194         }
3195       }
3196     }
3197   }
3198 
3199   for (LiveRange* range : inactive_live_ranges()) {
3200     DCHECK(range->End() > current->Start());
3201     int cur_reg = range->assigned_register();
3202     bool is_fixed = range->TopLevel()->IsFixed();
3203 
3204     // Don't perform costly intersections if they are guaranteed to not update
3205     // block_pos or use_pos.
3206     // TODO(mtrofin): extend to aliased ranges, too.
3207     if ((kSimpleFPAliasing || !check_fp_aliasing())) {
3208       if (is_fixed) {
3209         if (block_pos[cur_reg] < range->Start()) continue;
3210       } else {
3211         if (use_pos[cur_reg] < range->Start()) continue;
3212       }
3213     }
3214 
3215     LifetimePosition next_intersection = range->FirstIntersection(current);
3216     if (!next_intersection.IsValid()) continue;
3217 
3218     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3219       if (is_fixed) {
3220         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
3221         use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
3222       } else {
3223         use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
3224       }
3225     } else {
3226       int alias_base_index = -1;
3227       int aliases = data()->config()->GetAliases(
3228           range->representation(), cur_reg, rep, &alias_base_index);
3229       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3230       while (aliases--) {
3231         int aliased_reg = alias_base_index + aliases;
3232         if (is_fixed) {
3233           block_pos[aliased_reg] =
3234               Min(block_pos[aliased_reg], next_intersection);
3235           use_pos[aliased_reg] =
3236               Min(block_pos[aliased_reg], use_pos[aliased_reg]);
3237         } else {
3238           use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
3239         }
3240       }
3241     }
3242   }
3243 
3244   int reg = codes[0];
3245   for (int i = 1; i < num_codes; ++i) {
3246     int code = codes[i];
3247     if (use_pos[code] > use_pos[reg]) {
3248       reg = code;
3249     }
3250   }
3251 
3252   if (use_pos[reg] < register_use->pos()) {
3253     // If there is a gap position before the next register use, we can
3254     // spill until there. The gap position will then fit the fill move.
3255     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
3256                                                    register_use->pos())) {
3257       SpillBetween(current, current->Start(), register_use->pos());
3258       return;
3259     }
3260   }
3261 
3262   // We couldn't spill until the next register use. Split before the register
3263   // is blocked, if applicable.
3264   if (block_pos[reg] < current->End()) {
3265     // Register becomes blocked before the current range end. Split before that
3266     // position.
3267     LiveRange* tail =
3268         SplitBetween(current, current->Start(), block_pos[reg].Start());
3269     AddToUnhandled(tail);
3270   }
3271 
3272   // Register reg is not blocked for the whole range.
3273   DCHECK(block_pos[reg] >= current->End());
3274   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
3275         current->TopLevel()->vreg(), current->relative_id());
3276   SetLiveRangeAssignedRegister(current, reg);
3277 
3278   // This register was not free. Thus we need to find and spill
3279   // parts of active and inactive live regions that use the same register
3280   // at the same lifetime positions as current.
3281   SplitAndSpillIntersecting(current);
3282 }
3283 
3284 
SplitAndSpillIntersecting(LiveRange * current)3285 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
3286   DCHECK(current->HasRegisterAssigned());
3287   int reg = current->assigned_register();
3288   LifetimePosition split_pos = current->Start();
3289   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
3290     LiveRange* range = active_live_ranges()[i];
3291     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3292       if (range->assigned_register() != reg) continue;
3293     } else {
3294       if (!data()->config()->AreAliases(current->representation(), reg,
3295                                         range->representation(),
3296                                         range->assigned_register())) {
3297         continue;
3298       }
3299     }
3300 
3301     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3302     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
3303     if (next_pos == nullptr) {
3304       SpillAfter(range, spill_pos);
3305     } else {
3306       // When spilling between spill_pos and next_pos ensure that the range
3307       // remains spilled at least until the start of the current live range.
3308       // This guarantees that we will not introduce new unhandled ranges that
3309       // start before the current range as this violates allocation invariants
3310       // and will lead to an inconsistent state of active and inactive
3311       // live-ranges: ranges are allocated in order of their start positions,
3312       // ranges are retired from active/inactive when the start of the
3313       // current live-range is larger than their end.
3314       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
3315                                                         next_pos->pos()));
3316       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
3317     }
3318     ActiveToHandled(range);
3319     --i;
3320   }
3321 
3322   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
3323     LiveRange* range = inactive_live_ranges()[i];
3324     DCHECK(range->End() > current->Start());
3325     if (range->TopLevel()->IsFixed()) continue;
3326     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3327       if (range->assigned_register() != reg) continue;
3328     } else {
3329       if (!data()->config()->AreAliases(current->representation(), reg,
3330                                         range->representation(),
3331                                         range->assigned_register()))
3332         continue;
3333     }
3334 
3335     LifetimePosition next_intersection = range->FirstIntersection(current);
3336     if (next_intersection.IsValid()) {
3337       UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3338       if (next_pos == nullptr) {
3339         SpillAfter(range, split_pos);
3340       } else {
3341         next_intersection = Min(next_intersection, next_pos->pos());
3342         SpillBetween(range, split_pos, next_intersection);
3343       }
3344       InactiveToHandled(range);
3345       --i;
3346     }
3347   }
3348 }
3349 
3350 
TryReuseSpillForPhi(TopLevelLiveRange * range)3351 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
3352   if (!range->is_phi()) return false;
3353 
3354   DCHECK(!range->HasSpillOperand());
3355   RegisterAllocationData::PhiMapValue* phi_map_value =
3356       data()->GetPhiMapValueFor(range);
3357   const PhiInstruction* phi = phi_map_value->phi();
3358   const InstructionBlock* block = phi_map_value->block();
3359   // Count the number of spilled operands.
3360   size_t spilled_count = 0;
3361   LiveRange* first_op = nullptr;
3362   for (size_t i = 0; i < phi->operands().size(); i++) {
3363     int op = phi->operands()[i];
3364     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
3365     if (!op_range->TopLevel()->HasSpillRange()) continue;
3366     const InstructionBlock* pred =
3367         code()->InstructionBlockAt(block->predecessors()[i]);
3368     LifetimePosition pred_end =
3369         LifetimePosition::InstructionFromInstructionIndex(
3370             pred->last_instruction_index());
3371     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
3372       op_range = op_range->next();
3373     }
3374     if (op_range != nullptr && op_range->spilled()) {
3375       spilled_count++;
3376       if (first_op == nullptr) {
3377         first_op = op_range->TopLevel();
3378       }
3379     }
3380   }
3381 
3382   // Only continue if more than half of the operands are spilled.
3383   if (spilled_count * 2 <= phi->operands().size()) {
3384     return false;
3385   }
3386 
3387   // Try to merge the spilled operands and count the number of merged spilled
3388   // operands.
3389   DCHECK_NOT_NULL(first_op);
3390   SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
3391   size_t num_merged = 1;
3392   for (size_t i = 1; i < phi->operands().size(); i++) {
3393     int op = phi->operands()[i];
3394     TopLevelLiveRange* op_range = data()->live_ranges()[op];
3395     if (!op_range->HasSpillRange()) continue;
3396     SpillRange* op_spill = op_range->GetSpillRange();
3397     if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
3398       num_merged++;
3399     }
3400   }
3401 
3402   // Only continue if enough operands could be merged to the
3403   // same spill slot.
3404   if (num_merged * 2 <= phi->operands().size() ||
3405       AreUseIntervalsIntersecting(first_op_spill->interval(),
3406                                   range->first_interval())) {
3407     return false;
3408   }
3409 
3410   // If the range does not need register soon, spill it to the merged
3411   // spill range.
3412   LifetimePosition next_pos = range->Start();
3413   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3414   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
3415   if (pos == nullptr) {
3416     SpillRange* spill_range =
3417         range->TopLevel()->HasSpillRange()
3418             ? range->TopLevel()->GetSpillRange()
3419             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3420     bool merged = first_op_spill->TryMerge(spill_range);
3421     if (!merged) return false;
3422     Spill(range);
3423     return true;
3424   } else if (pos->pos() > range->Start().NextStart()) {
3425     SpillRange* spill_range =
3426         range->TopLevel()->HasSpillRange()
3427             ? range->TopLevel()->GetSpillRange()
3428             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3429     bool merged = first_op_spill->TryMerge(spill_range);
3430     if (!merged) return false;
3431     SpillBetween(range, range->Start(), pos->pos());
3432     return true;
3433   }
3434   return false;
3435 }
3436 
3437 
SpillAfter(LiveRange * range,LifetimePosition pos)3438 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
3439   LiveRange* second_part = SplitRangeAt(range, pos);
3440   Spill(second_part);
3441 }
3442 
3443 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)3444 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
3445                                        LifetimePosition end) {
3446   SpillBetweenUntil(range, start, start, end);
3447 }
3448 
3449 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end)3450 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
3451                                             LifetimePosition start,
3452                                             LifetimePosition until,
3453                                             LifetimePosition end) {
3454   CHECK(start < end);
3455   LiveRange* second_part = SplitRangeAt(range, start);
3456 
3457   if (second_part->Start() < end) {
3458     // The split result intersects with [start, end[.
3459     // Split it at position between ]start+1, end[, spill the middle part
3460     // and put the rest to unhandled.
3461     LifetimePosition third_part_end = end.PrevStart().End();
3462     if (data()->IsBlockBoundary(end.Start())) {
3463       third_part_end = end.Start();
3464     }
3465     LiveRange* third_part = SplitBetween(
3466         second_part, Max(second_part->Start().End(), until), third_part_end);
3467 
3468     DCHECK(third_part != second_part);
3469 
3470     Spill(second_part);
3471     AddToUnhandled(third_part);
3472   } else {
3473     // The split result does not intersect with [start, end[.
3474     // Nothing to spill. Just put it to unhandled as whole.
3475     AddToUnhandled(second_part);
3476   }
3477 }
3478 
3479 
SpillSlotLocator(RegisterAllocationData * data)3480 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
3481     : data_(data) {}
3482 
3483 
LocateSpillSlots()3484 void SpillSlotLocator::LocateSpillSlots() {
3485   const InstructionSequence* code = data()->code();
3486   const size_t live_ranges_size = data()->live_ranges().size();
3487   for (TopLevelLiveRange* range : data()->live_ranges()) {
3488     CHECK_EQ(live_ranges_size,
3489              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3490     if (range == nullptr || range->IsEmpty()) continue;
3491     // We care only about ranges which spill in the frame.
3492     if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
3493       continue;
3494     }
3495     TopLevelLiveRange::SpillMoveInsertionList* spills =
3496         range->GetSpillMoveInsertionLocations();
3497     DCHECK_NOT_NULL(spills);
3498     for (; spills != nullptr; spills = spills->next) {
3499       code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
3500     }
3501   }
3502 }
3503 
3504 
OperandAssigner(RegisterAllocationData * data)3505 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
3506 
3507 
AssignSpillSlots()3508 void OperandAssigner::AssignSpillSlots() {
3509   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
3510   // Merge disjoint spill ranges
3511   for (size_t i = 0; i < spill_ranges.size(); ++i) {
3512     SpillRange* range = spill_ranges[i];
3513     if (range == nullptr) continue;
3514     if (range->IsEmpty()) continue;
3515     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
3516       SpillRange* other = spill_ranges[j];
3517       if (other != nullptr && !other->IsEmpty()) {
3518         range->TryMerge(other);
3519       }
3520     }
3521   }
3522   // Allocate slots for the merged spill ranges.
3523   for (SpillRange* range : spill_ranges) {
3524     if (range == nullptr || range->IsEmpty()) continue;
3525     // Allocate a new operand referring to the spill slot.
3526     if (!range->HasSlot()) {
3527       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
3528       range->set_assigned_slot(index);
3529     }
3530   }
3531 }
3532 
3533 
CommitAssignment()3534 void OperandAssigner::CommitAssignment() {
3535   const size_t live_ranges_size = data()->live_ranges().size();
3536   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3537     CHECK_EQ(live_ranges_size,
3538              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3539     if (top_range == nullptr || top_range->IsEmpty()) continue;
3540     InstructionOperand spill_operand;
3541     if (top_range->HasSpillOperand()) {
3542       spill_operand = *top_range->TopLevel()->GetSpillOperand();
3543     } else if (top_range->TopLevel()->HasSpillRange()) {
3544       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3545     }
3546     if (top_range->is_phi()) {
3547       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3548           top_range->GetAssignedOperand());
3549     }
3550     for (LiveRange* range = top_range; range != nullptr;
3551          range = range->next()) {
3552       InstructionOperand assigned = range->GetAssignedOperand();
3553       DCHECK(!assigned.IsUnallocated());
3554       range->ConvertUsesToOperand(assigned, spill_operand);
3555     }
3556 
3557     if (!spill_operand.IsInvalid()) {
3558       // If this top level range has a child spilled in a deferred block, we use
3559       // the range and control flow connection mechanism instead of spilling at
3560       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3561       // phases. Normally, when we spill at definition, we do not insert a
3562       // connecting move when a successor child range is spilled - because the
3563       // spilled range picks up its value from the slot which was assigned at
3564       // definition. For ranges that are determined to spill only in deferred
3565       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
3566       // where a spill operand is expected, and then finalize by inserting the
3567       // spills in the deferred blocks dominators.
3568       if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
3569         // Spill at definition if the range isn't spilled only in deferred
3570         // blocks.
3571         top_range->CommitSpillMoves(
3572             data()->code(), spill_operand,
3573             top_range->has_slot_use() || top_range->spilled());
3574       }
3575     }
3576   }
3577 }
3578 
3579 
ReferenceMapPopulator(RegisterAllocationData * data)3580 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3581     : data_(data) {}
3582 
3583 
SafePointsAreInOrder() const3584 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3585   int safe_point = 0;
3586   for (ReferenceMap* map : *data()->code()->reference_maps()) {
3587     if (safe_point > map->instruction_position()) return false;
3588     safe_point = map->instruction_position();
3589   }
3590   return true;
3591 }
3592 
3593 
PopulateReferenceMaps()3594 void ReferenceMapPopulator::PopulateReferenceMaps() {
3595   DCHECK(SafePointsAreInOrder());
3596   // Map all delayed references.
3597   for (RegisterAllocationData::DelayedReference& delayed_reference :
3598        data()->delayed_references()) {
3599     delayed_reference.map->RecordReference(
3600         AllocatedOperand::cast(*delayed_reference.operand));
3601   }
3602   // Iterate over all safe point positions and record a pointer
3603   // for all spilled live ranges at this point.
3604   int last_range_start = 0;
3605   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3606   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3607   const size_t live_ranges_size = data()->live_ranges().size();
3608   for (TopLevelLiveRange* range : data()->live_ranges()) {
3609     CHECK_EQ(live_ranges_size,
3610              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3611     if (range == nullptr) continue;
3612     // Skip non-reference values.
3613     if (!data()->IsReference(range)) continue;
3614     // Skip empty live ranges.
3615     if (range->IsEmpty()) continue;
3616     if (range->has_preassigned_slot()) continue;
3617 
3618     // Find the extent of the range and its children.
3619     int start = range->Start().ToInstructionIndex();
3620     int end = 0;
3621     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3622       LifetimePosition this_end = cur->End();
3623       if (this_end.ToInstructionIndex() > end)
3624         end = this_end.ToInstructionIndex();
3625       DCHECK(cur->Start().ToInstructionIndex() >= start);
3626     }
3627 
3628     // Most of the ranges are in order, but not all.  Keep an eye on when they
3629     // step backwards and reset the first_it so we don't miss any safe points.
3630     if (start < last_range_start) first_it = reference_maps->begin();
3631     last_range_start = start;
3632 
3633     // Step across all the safe points that are before the start of this range,
3634     // recording how far we step in order to save doing this for the next range.
3635     for (; first_it != reference_maps->end(); ++first_it) {
3636       ReferenceMap* map = *first_it;
3637       if (map->instruction_position() >= start) break;
3638     }
3639 
3640     InstructionOperand spill_operand;
3641     if (((range->HasSpillOperand() &&
3642           !range->GetSpillOperand()->IsConstant()) ||
3643          range->HasSpillRange())) {
3644       if (range->HasSpillOperand()) {
3645         spill_operand = *range->GetSpillOperand();
3646       } else {
3647         spill_operand = range->GetSpillRangeOperand();
3648       }
3649       DCHECK(spill_operand.IsStackSlot());
3650       DCHECK(CanBeTaggedPointer(
3651           AllocatedOperand::cast(spill_operand).representation()));
3652     }
3653 
3654     LiveRange* cur = range;
3655     // Step through the safe points to see whether they are in the range.
3656     for (auto it = first_it; it != reference_maps->end(); ++it) {
3657       ReferenceMap* map = *it;
3658       int safe_point = map->instruction_position();
3659 
3660       // The safe points are sorted so we can stop searching here.
3661       if (safe_point - 1 > end) break;
3662 
3663       // Advance to the next active range that covers the current
3664       // safe point position.
3665       LifetimePosition safe_point_pos =
3666           LifetimePosition::InstructionFromInstructionIndex(safe_point);
3667 
3668       // Search for the child range (cur) that covers safe_point_pos. If we
3669       // don't find it before the children pass safe_point_pos, keep cur at
3670       // the last child, because the next safe_point_pos may be covered by cur.
3671       // This may happen if cur has more than one interval, and the current
3672       // safe_point_pos is in between intervals.
3673       // For that reason, cur may be at most the last child.
3674       DCHECK_NOT_NULL(cur);
3675       DCHECK(safe_point_pos >= cur->Start() || range == cur);
3676       bool found = false;
3677       while (!found) {
3678         if (cur->Covers(safe_point_pos)) {
3679           found = true;
3680         } else {
3681           LiveRange* next = cur->next();
3682           if (next == nullptr || next->Start() > safe_point_pos) {
3683             break;
3684           }
3685           cur = next;
3686         }
3687       }
3688 
3689       if (!found) {
3690         continue;
3691       }
3692 
3693       // Check if the live range is spilled and the safe point is after
3694       // the spill position.
3695       int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3696                             ? cur->Start().ToInstructionIndex()
3697                             : range->spill_start_index();
3698 
3699       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3700         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3701               range->vreg(), spill_index, safe_point);
3702         map->RecordReference(AllocatedOperand::cast(spill_operand));
3703       }
3704 
3705       if (!cur->spilled()) {
3706         TRACE(
3707             "Pointer in register for range %d:%d (start at %d) "
3708             "at safe point %d\n",
3709             range->vreg(), cur->relative_id(), cur->Start().value(),
3710             safe_point);
3711         InstructionOperand operand = cur->GetAssignedOperand();
3712         DCHECK(!operand.IsStackSlot());
3713         DCHECK(CanBeTaggedPointer(
3714             AllocatedOperand::cast(operand).representation()));
3715         map->RecordReference(AllocatedOperand::cast(operand));
3716       }
3717     }
3718   }
3719 }
3720 
3721 
LiveRangeConnector(RegisterAllocationData * data)3722 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3723     : data_(data) {}
3724 
3725 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const3726 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3727     const InstructionBlock* block) const {
3728   if (block->PredecessorCount() != 1) return false;
3729   return block->predecessors()[0].IsNext(block->rpo_number());
3730 }
3731 
3732 
ResolveControlFlow(Zone * local_zone)3733 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3734   // Lazily linearize live ranges in memory for fast lookup.
3735   LiveRangeFinder finder(data(), local_zone);
3736   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3737   for (const InstructionBlock* block : code()->instruction_blocks()) {
3738     if (CanEagerlyResolveControlFlow(block)) continue;
3739     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3740     BitVector::Iterator iterator(live);
3741     while (!iterator.Done()) {
3742       int vreg = iterator.Current();
3743       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
3744       for (const RpoNumber& pred : block->predecessors()) {
3745         FindResult result;
3746         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3747         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3748           continue;
3749         }
3750         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3751         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3752         if (pred_op.Equals(cur_op)) continue;
3753         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
3754           // We're doing a reload.
3755           // We don't need to, if:
3756           // 1) there's no register use in this block, and
3757           // 2) the range ends before the block does, and
3758           // 3) we don't have a successor, or the successor is spilled.
3759           LifetimePosition block_start =
3760               LifetimePosition::GapFromInstructionIndex(block->code_start());
3761           LifetimePosition block_end =
3762               LifetimePosition::GapFromInstructionIndex(block->code_end());
3763           const LiveRange* current = result.cur_cover_;
3764           const LiveRange* successor = current->next();
3765           if (current->End() < block_end &&
3766               (successor == nullptr || successor->spilled())) {
3767             // verify point 1: no register use. We can go to the end of the
3768             // range, since it's all within the block.
3769 
3770             bool uses_reg = false;
3771             for (const UsePosition* use = current->NextUsePosition(block_start);
3772                  use != nullptr; use = use->next()) {
3773               if (use->operand()->IsAnyRegister()) {
3774                 uses_reg = true;
3775                 break;
3776               }
3777             }
3778             if (!uses_reg) continue;
3779           }
3780           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3781               pred_block->IsDeferred()) {
3782             // The spill location should be defined in pred_block, so add
3783             // pred_block to the list of blocks requiring a spill operand.
3784             current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
3785                 pred_block->rpo_number().ToInt());
3786           }
3787         }
3788         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3789         USE(move_loc);
3790         DCHECK_IMPLIES(
3791             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3792                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3793             code()->GetInstructionBlock(move_loc)->IsDeferred());
3794       }
3795       iterator.Advance();
3796     }
3797   }
3798 
3799   // At this stage, we collected blocks needing a spill operand from
3800   // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
3801   // deferred blocks.
3802   const size_t live_ranges_size = data()->live_ranges().size();
3803   for (TopLevelLiveRange* top : data()->live_ranges()) {
3804     CHECK_EQ(live_ranges_size,
3805              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3806     if (top == nullptr || top->IsEmpty() ||
3807         !top->IsSpilledOnlyInDeferredBlocks())
3808       continue;
3809     CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
3810   }
3811 }
3812 
3813 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)3814 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3815                                            const InstructionOperand& cur_op,
3816                                            const InstructionBlock* pred,
3817                                            const InstructionOperand& pred_op) {
3818   DCHECK(!pred_op.Equals(cur_op));
3819   int gap_index;
3820   Instruction::GapPosition position;
3821   if (block->PredecessorCount() == 1) {
3822     gap_index = block->first_instruction_index();
3823     position = Instruction::START;
3824   } else {
3825     DCHECK_EQ(1, pred->SuccessorCount());
3826     DCHECK(!code()
3827                 ->InstructionAt(pred->last_instruction_index())
3828                 ->HasReferenceMap());
3829     gap_index = pred->last_instruction_index();
3830     position = Instruction::END;
3831   }
3832   data()->AddGapMove(gap_index, position, pred_op, cur_op);
3833   return gap_index;
3834 }
3835 
ConnectRanges(Zone * local_zone)3836 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3837   DelayedInsertionMap delayed_insertion_map(local_zone);
3838   const size_t live_ranges_size = data()->live_ranges().size();
3839   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3840     CHECK_EQ(live_ranges_size,
3841              data()->live_ranges().size());  // TODO(neis): crbug.com/831822
3842     if (top_range == nullptr) continue;
3843     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3844     LiveRange* first_range = top_range;
3845     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3846          first_range = second_range, second_range = second_range->next()) {
3847       LifetimePosition pos = second_range->Start();
3848       // Add gap move if the two live ranges touch and there is no block
3849       // boundary.
3850       if (second_range->spilled()) continue;
3851       if (first_range->End() != pos) continue;
3852       if (data()->IsBlockBoundary(pos) &&
3853           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3854         continue;
3855       }
3856       InstructionOperand prev_operand = first_range->GetAssignedOperand();
3857       InstructionOperand cur_operand = second_range->GetAssignedOperand();
3858       if (prev_operand.Equals(cur_operand)) continue;
3859       bool delay_insertion = false;
3860       Instruction::GapPosition gap_pos;
3861       int gap_index = pos.ToInstructionIndex();
3862       if (connect_spilled && !prev_operand.IsAnyRegister() &&
3863           cur_operand.IsAnyRegister()) {
3864         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
3865         DCHECK(block->IsDeferred());
3866         // Performing a reload in this block, meaning the spill operand must
3867         // be defined here.
3868         top_range->GetListOfBlocksRequiringSpillOperands()->Add(
3869             block->rpo_number().ToInt());
3870       }
3871 
3872       if (pos.IsGapPosition()) {
3873         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3874       } else {
3875         if (pos.IsStart()) {
3876           delay_insertion = true;
3877         } else {
3878           gap_index++;
3879         }
3880         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3881       }
3882       // Reloads or spills for spilled in deferred blocks ranges must happen
3883       // only in deferred blocks.
3884       DCHECK_IMPLIES(
3885           connect_spilled &&
3886               !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
3887           code()->GetInstructionBlock(gap_index)->IsDeferred());
3888 
3889       ParallelMove* move =
3890           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3891               gap_pos, code_zone());
3892       if (!delay_insertion) {
3893         move->AddMove(prev_operand, cur_operand);
3894       } else {
3895         delayed_insertion_map.insert(
3896             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3897       }
3898     }
3899   }
3900   if (delayed_insertion_map.empty()) return;
3901   // Insert all the moves which should occur after the stored move.
3902   ZoneVector<MoveOperands*> to_insert(local_zone);
3903   ZoneVector<MoveOperands*> to_eliminate(local_zone);
3904   to_insert.reserve(4);
3905   to_eliminate.reserve(4);
3906   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3907   for (auto it = delayed_insertion_map.begin();; ++it) {
3908     bool done = it == delayed_insertion_map.end();
3909     if (done || it->first.first != moves) {
3910       // Commit the MoveOperands for current ParallelMove.
3911       for (MoveOperands* move : to_eliminate) {
3912         move->Eliminate();
3913       }
3914       for (MoveOperands* move : to_insert) {
3915         moves->push_back(move);
3916       }
3917       if (done) break;
3918       // Reset state.
3919       to_eliminate.clear();
3920       to_insert.clear();
3921       moves = it->first.first;
3922     }
3923     // Gather all MoveOperands for a single ParallelMove.
3924     MoveOperands* move =
3925         new (code_zone()) MoveOperands(it->first.second, it->second);
3926     moves->PrepareInsertAfter(move, &to_eliminate);
3927     to_insert.push_back(move);
3928   }
3929 }
3930 
3931 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)3932 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
3933     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
3934   DCHECK(range->IsSpilledOnlyInDeferredBlocks());
3935   DCHECK(!range->spilled());
3936 
3937   InstructionSequence* code = data()->code();
3938   InstructionOperand spill_operand = range->GetSpillRangeOperand();
3939 
3940   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
3941         range->vreg());
3942   // If we have ranges that aren't spilled but require the operand on the stack,
3943   // make sure we insert the spill.
3944   for (const LiveRange* child = range; child != nullptr;
3945        child = child->next()) {
3946     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
3947          pos = pos->next()) {
3948       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
3949         continue;
3950       range->AddBlockRequiringSpillOperand(
3951           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
3952               ->rpo_number());
3953     }
3954   }
3955 
3956   ZoneQueue<int> worklist(temp_zone);
3957 
3958   for (BitVector::Iterator iterator(
3959            range->GetListOfBlocksRequiringSpillOperands());
3960        !iterator.Done(); iterator.Advance()) {
3961     worklist.push(iterator.Current());
3962   }
3963 
3964   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
3965   // Seek the deferred blocks that dominate locations requiring spill operands,
3966   // and spill there. We only need to spill at the start of such blocks.
3967   BitVector done_blocks(
3968       range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
3969   while (!worklist.empty()) {
3970     int block_id = worklist.front();
3971     worklist.pop();
3972     if (done_blocks.Contains(block_id)) continue;
3973     done_blocks.Add(block_id);
3974     InstructionBlock* spill_block =
3975         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
3976 
3977     for (const RpoNumber& pred : spill_block->predecessors()) {
3978       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
3979 
3980       if (pred_block->IsDeferred()) {
3981         worklist.push(pred_block->rpo_number().ToInt());
3982       } else {
3983         LifetimePosition pred_end =
3984             LifetimePosition::InstructionFromInstructionIndex(
3985                 pred_block->last_instruction_index());
3986 
3987         LiveRangeBound* bound = array->Find(pred_end);
3988 
3989         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
3990 
3991         RpoNumber spill_block_number = spill_block->rpo_number();
3992         if (done_moves.find(std::make_pair(
3993                 spill_block_number, range->vreg())) == done_moves.end()) {
3994           data()->AddGapMove(spill_block->first_instruction_index(),
3995                              Instruction::GapPosition::START, pred_op,
3996                              spill_operand);
3997           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
3998           spill_block->mark_needs_frame();
3999         }
4000       }
4001     }
4002   }
4003 }
4004 
4005 #undef TRACE
4006 
4007 }  // namespace compiler
4008 }  // namespace internal
4009 }  // namespace v8
4010