• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/linkage.h"
7 #include "src/compiler/register-allocator.h"
8 #include "src/string-stream.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE(...)                             \
15   do {                                         \
16     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
17   } while (false)
18 
19 
20 namespace {
21 
22 static const int kFloatRepBit =
23     1 << static_cast<int>(MachineRepresentation::kFloat32);
24 static const int kSimd128RepBit =
25     1 << static_cast<int>(MachineRepresentation::kSimd128);
26 
RemoveElement(ZoneVector<LiveRange * > * v,LiveRange * range)27 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
28   auto it = std::find(v->begin(), v->end(), range);
29   DCHECK(it != v->end());
30   v->erase(it);
31 }
32 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)33 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
34   return kind == FP_REGISTERS ? cfg->num_double_registers()
35                               : cfg->num_general_registers();
36 }
37 
38 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)39 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
40                                 RegisterKind kind) {
41   return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
42                               : cfg->num_allocatable_general_registers();
43 }
44 
45 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)46 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
47                                        RegisterKind kind) {
48   return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
49                               : cfg->allocatable_general_codes();
50 }
51 
52 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)53 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
54                                           const InstructionBlock* block) {
55   RpoNumber index = block->loop_header();
56   if (!index.IsValid()) return nullptr;
57   return sequence->InstructionBlockAt(index);
58 }
59 
60 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)61 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
62                                             LifetimePosition pos) {
63   return code->GetInstructionBlock(pos.ToInstructionIndex());
64 }
65 
66 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)67 Instruction* GetLastInstruction(InstructionSequence* code,
68                                 const InstructionBlock* block) {
69   return code->InstructionAt(block->last_instruction_index());
70 }
71 
72 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)73 int GetByteWidth(MachineRepresentation rep) {
74   switch (rep) {
75     case MachineRepresentation::kBit:
76     case MachineRepresentation::kWord8:
77     case MachineRepresentation::kWord16:
78     case MachineRepresentation::kWord32:
79     case MachineRepresentation::kTaggedSigned:
80     case MachineRepresentation::kTaggedPointer:
81     case MachineRepresentation::kTagged:
82     case MachineRepresentation::kFloat32:
83       return kPointerSize;
84     case MachineRepresentation::kWord64:
85     case MachineRepresentation::kFloat64:
86       return kDoubleSize;
87     case MachineRepresentation::kSimd128:
88       return kSimd128Size;
89     case MachineRepresentation::kSimd1x4:
90     case MachineRepresentation::kSimd1x8:
91     case MachineRepresentation::kSimd1x16:
92       return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
93     case MachineRepresentation::kNone:
94       break;
95   }
96   UNREACHABLE();
97   return 0;
98 }
99 
100 }  // namespace
101 
102 class LiveRangeBound {
103  public:
LiveRangeBound(LiveRange * range,bool skip)104   explicit LiveRangeBound(LiveRange* range, bool skip)
105       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
106     DCHECK(!range->IsEmpty());
107   }
108 
CanCover(LifetimePosition position)109   bool CanCover(LifetimePosition position) {
110     return start_ <= position && position < end_;
111   }
112 
113   LiveRange* const range_;
114   const LifetimePosition start_;
115   const LifetimePosition end_;
116   const bool skip_;
117 
118  private:
119   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
120 };
121 
122 
123 struct FindResult {
124   LiveRange* cur_cover_;
125   LiveRange* pred_cover_;
126 };
127 
128 
129 class LiveRangeBoundArray {
130  public:
LiveRangeBoundArray()131   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
132 
ShouldInitialize()133   bool ShouldInitialize() { return start_ == nullptr; }
134 
Initialize(Zone * zone,TopLevelLiveRange * range)135   void Initialize(Zone* zone, TopLevelLiveRange* range) {
136     length_ = range->GetChildCount();
137 
138     start_ = zone->NewArray<LiveRangeBound>(length_);
139     LiveRangeBound* curr = start_;
140     // Normally, spilled ranges do not need connecting moves, because the spill
141     // location has been assigned at definition. For ranges spilled in deferred
142     // blocks, that is not the case, so we need to connect the spilled children.
143     for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
144       new (curr) LiveRangeBound(i, i->spilled());
145     }
146   }
147 
Find(const LifetimePosition position) const148   LiveRangeBound* Find(const LifetimePosition position) const {
149     size_t left_index = 0;
150     size_t right_index = length_;
151     while (true) {
152       size_t current_index = left_index + (right_index - left_index) / 2;
153       DCHECK(right_index > current_index);
154       LiveRangeBound* bound = &start_[current_index];
155       if (bound->start_ <= position) {
156         if (position < bound->end_) return bound;
157         DCHECK(left_index < current_index);
158         left_index = current_index;
159       } else {
160         right_index = current_index;
161       }
162     }
163   }
164 
FindPred(const InstructionBlock * pred)165   LiveRangeBound* FindPred(const InstructionBlock* pred) {
166     LifetimePosition pred_end =
167         LifetimePosition::InstructionFromInstructionIndex(
168             pred->last_instruction_index());
169     return Find(pred_end);
170   }
171 
FindSucc(const InstructionBlock * succ)172   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
173     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
174         succ->first_instruction_index());
175     return Find(succ_start);
176   }
177 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const178   bool FindConnectableSubranges(const InstructionBlock* block,
179                                 const InstructionBlock* pred,
180                                 FindResult* result) const {
181     LifetimePosition pred_end =
182         LifetimePosition::InstructionFromInstructionIndex(
183             pred->last_instruction_index());
184     LiveRangeBound* bound = Find(pred_end);
185     result->pred_cover_ = bound->range_;
186     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
187         block->first_instruction_index());
188 
189     if (bound->CanCover(cur_start)) {
190       // Both blocks are covered by the same range, so there is nothing to
191       // connect.
192       return false;
193     }
194     bound = Find(cur_start);
195     if (bound->skip_) {
196       return false;
197     }
198     result->cur_cover_ = bound->range_;
199     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
200     return (result->cur_cover_ != result->pred_cover_);
201   }
202 
203  private:
204   size_t length_;
205   LiveRangeBound* start_;
206 
207   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
208 };
209 
210 
211 class LiveRangeFinder {
212  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)213   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
214       : data_(data),
215         bounds_length_(static_cast<int>(data_->live_ranges().size())),
216         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
217         zone_(zone) {
218     for (int i = 0; i < bounds_length_; ++i) {
219       new (&bounds_[i]) LiveRangeBoundArray();
220     }
221   }
222 
ArrayFor(int operand_index)223   LiveRangeBoundArray* ArrayFor(int operand_index) {
224     DCHECK(operand_index < bounds_length_);
225     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
226     DCHECK(range != nullptr && !range->IsEmpty());
227     LiveRangeBoundArray* array = &bounds_[operand_index];
228     if (array->ShouldInitialize()) {
229       array->Initialize(zone_, range);
230     }
231     return array;
232   }
233 
234  private:
235   const RegisterAllocationData* const data_;
236   const int bounds_length_;
237   LiveRangeBoundArray* const bounds_;
238   Zone* const zone_;
239 
240   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
241 };
242 
243 
244 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
245 
246 
247 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare248   bool operator()(const DelayedInsertionMapKey& a,
249                   const DelayedInsertionMapKey& b) const {
250     if (a.first == b.first) {
251       return a.second.Compare(b.second);
252     }
253     return a.first < b.first;
254   }
255 };
256 
257 
258 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
259                 DelayedInsertionMapCompare> DelayedInsertionMap;
260 
261 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)262 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
263                          void* hint, UsePositionHintType hint_type)
264     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
265   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
266   bool register_beneficial = true;
267   UsePositionType type = UsePositionType::kAny;
268   if (operand_ != nullptr && operand_->IsUnallocated()) {
269     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
270     if (unalloc->HasRegisterPolicy()) {
271       type = UsePositionType::kRequiresRegister;
272     } else if (unalloc->HasSlotPolicy()) {
273       type = UsePositionType::kRequiresSlot;
274       register_beneficial = false;
275     } else {
276       register_beneficial = !unalloc->HasAnyPolicy();
277     }
278   }
279   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
280            RegisterBeneficialField::encode(register_beneficial) |
281            AssignedRegisterField::encode(kUnassignedRegister);
282   DCHECK(pos_.IsValid());
283 }
284 
285 
HasHint() const286 bool UsePosition::HasHint() const {
287   int hint_register;
288   return HintRegister(&hint_register);
289 }
290 
291 
HintRegister(int * register_code) const292 bool UsePosition::HintRegister(int* register_code) const {
293   if (hint_ == nullptr) return false;
294   switch (HintTypeField::decode(flags_)) {
295     case UsePositionHintType::kNone:
296     case UsePositionHintType::kUnresolved:
297       return false;
298     case UsePositionHintType::kUsePos: {
299       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
300       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
301       if (assigned_register == kUnassignedRegister) return false;
302       *register_code = assigned_register;
303       return true;
304     }
305     case UsePositionHintType::kOperand: {
306       InstructionOperand* operand =
307           reinterpret_cast<InstructionOperand*>(hint_);
308       *register_code = LocationOperand::cast(operand)->register_code();
309       return true;
310     }
311     case UsePositionHintType::kPhi: {
312       RegisterAllocationData::PhiMapValue* phi =
313           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
314       int assigned_register = phi->assigned_register();
315       if (assigned_register == kUnassignedRegister) return false;
316       *register_code = assigned_register;
317       return true;
318     }
319   }
320   UNREACHABLE();
321   return false;
322 }
323 
324 
HintTypeForOperand(const InstructionOperand & op)325 UsePositionHintType UsePosition::HintTypeForOperand(
326     const InstructionOperand& op) {
327   switch (op.kind()) {
328     case InstructionOperand::CONSTANT:
329     case InstructionOperand::IMMEDIATE:
330     case InstructionOperand::EXPLICIT:
331       return UsePositionHintType::kNone;
332     case InstructionOperand::UNALLOCATED:
333       return UsePositionHintType::kUnresolved;
334     case InstructionOperand::ALLOCATED:
335       if (op.IsRegister() || op.IsFPRegister()) {
336         return UsePositionHintType::kOperand;
337       } else {
338         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
339         return UsePositionHintType::kNone;
340       }
341     case InstructionOperand::INVALID:
342       break;
343   }
344   UNREACHABLE();
345   return UsePositionHintType::kNone;
346 }
347 
SetHint(UsePosition * use_pos)348 void UsePosition::SetHint(UsePosition* use_pos) {
349   DCHECK_NOT_NULL(use_pos);
350   hint_ = use_pos;
351   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
352 }
353 
ResolveHint(UsePosition * use_pos)354 void UsePosition::ResolveHint(UsePosition* use_pos) {
355   DCHECK_NOT_NULL(use_pos);
356   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
357   hint_ = use_pos;
358   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
359 }
360 
361 
set_type(UsePositionType type,bool register_beneficial)362 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
363   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
364   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
365   flags_ = TypeField::encode(type) |
366            RegisterBeneficialField::encode(register_beneficial) |
367            HintTypeField::encode(HintTypeField::decode(flags_)) |
368            AssignedRegisterField::encode(kUnassignedRegister);
369 }
370 
371 
SplitAt(LifetimePosition pos,Zone * zone)372 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
373   DCHECK(Contains(pos) && pos != start());
374   UseInterval* after = new (zone) UseInterval(pos, end_);
375   after->next_ = next_;
376   next_ = nullptr;
377   end_ = pos;
378   return after;
379 }
380 
381 
Print() const382 void LifetimePosition::Print() const {
383   OFStream os(stdout);
384   os << *this << std::endl;
385 }
386 
387 
operator <<(std::ostream & os,const LifetimePosition pos)388 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
389   os << '@' << pos.ToInstructionIndex();
390   if (pos.IsGapPosition()) {
391     os << 'g';
392   } else {
393     os << 'i';
394   }
395   if (pos.IsStart()) {
396     os << 's';
397   } else {
398     os << 'e';
399   }
400   return os;
401 }
402 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)403 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
404                      TopLevelLiveRange* top_level)
405     : relative_id_(relative_id),
406       bits_(0),
407       last_interval_(nullptr),
408       first_interval_(nullptr),
409       first_pos_(nullptr),
410       top_level_(top_level),
411       next_(nullptr),
412       current_interval_(nullptr),
413       last_processed_use_(nullptr),
414       current_hint_position_(nullptr),
415       splitting_pointer_(nullptr) {
416   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
417   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
418           RepresentationField::encode(rep);
419 }
420 
421 
VerifyPositions() const422 void LiveRange::VerifyPositions() const {
423   // Walk the positions, verifying that each is in an interval.
424   UseInterval* interval = first_interval_;
425   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
426     CHECK(Start() <= pos->pos());
427     CHECK(pos->pos() <= End());
428     CHECK_NOT_NULL(interval);
429     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
430       interval = interval->next();
431       CHECK_NOT_NULL(interval);
432     }
433   }
434 }
435 
436 
VerifyIntervals() const437 void LiveRange::VerifyIntervals() const {
438   DCHECK(first_interval()->start() == Start());
439   LifetimePosition last_end = first_interval()->end();
440   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
441        interval = interval->next()) {
442     DCHECK(last_end <= interval->start());
443     last_end = interval->end();
444   }
445   DCHECK(last_end == End());
446 }
447 
448 
set_assigned_register(int reg)449 void LiveRange::set_assigned_register(int reg) {
450   DCHECK(!HasRegisterAssigned() && !spilled());
451   bits_ = AssignedRegisterField::update(bits_, reg);
452 }
453 
454 
UnsetAssignedRegister()455 void LiveRange::UnsetAssignedRegister() {
456   DCHECK(HasRegisterAssigned() && !spilled());
457   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
458 }
459 
460 
Spill()461 void LiveRange::Spill() {
462   DCHECK(!spilled());
463   DCHECK(!TopLevel()->HasNoSpillType());
464   set_spilled(true);
465   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
466 }
467 
468 
kind() const469 RegisterKind LiveRange::kind() const {
470   return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
471 }
472 
473 
FirstHintPosition(int * register_index) const474 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
475   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
476     if (pos->HintRegister(register_index)) return pos;
477   }
478   return nullptr;
479 }
480 
481 
NextUsePosition(LifetimePosition start) const482 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
483   UsePosition* use_pos = last_processed_use_;
484   if (use_pos == nullptr || use_pos->pos() > start) {
485     use_pos = first_pos();
486   }
487   while (use_pos != nullptr && use_pos->pos() < start) {
488     use_pos = use_pos->next();
489   }
490   last_processed_use_ = use_pos;
491   return use_pos;
492 }
493 
494 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const495 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
496     LifetimePosition start) const {
497   UsePosition* pos = NextUsePosition(start);
498   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
499     pos = pos->next();
500   }
501   return pos;
502 }
503 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const504 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
505     const LifetimePosition& start) const {
506   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
507   if (next_use == nullptr) return End();
508   return next_use->pos();
509 }
510 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const511 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
512     LifetimePosition start) const {
513   UsePosition* pos = first_pos();
514   UsePosition* prev = nullptr;
515   while (pos != nullptr && pos->pos() < start) {
516     if (pos->RegisterIsBeneficial()) prev = pos;
517     pos = pos->next();
518   }
519   return prev;
520 }
521 
522 
NextRegisterPosition(LifetimePosition start) const523 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
524   UsePosition* pos = NextUsePosition(start);
525   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
526     pos = pos->next();
527   }
528   return pos;
529 }
530 
531 
NextSlotPosition(LifetimePosition start) const532 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
533   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
534        pos = pos->next()) {
535     if (pos->type() != UsePositionType::kRequiresSlot) continue;
536     return pos;
537   }
538   return nullptr;
539 }
540 
541 
CanBeSpilled(LifetimePosition pos) const542 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
543   // We cannot spill a live range that has a use requiring a register
544   // at the current or the immediate next position.
545   UsePosition* use_pos = NextRegisterPosition(pos);
546   if (use_pos == nullptr) return true;
547   return use_pos->pos() > pos.NextStart().End();
548 }
549 
550 
IsTopLevel() const551 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
552 
553 
GetAssignedOperand() const554 InstructionOperand LiveRange::GetAssignedOperand() const {
555   if (HasRegisterAssigned()) {
556     DCHECK(!spilled());
557     return AllocatedOperand(LocationOperand::REGISTER, representation(),
558                             assigned_register());
559   }
560   DCHECK(spilled());
561   DCHECK(!HasRegisterAssigned());
562   if (TopLevel()->HasSpillOperand()) {
563     InstructionOperand* op = TopLevel()->GetSpillOperand();
564     DCHECK(!op->IsUnallocated());
565     return *op;
566   }
567   return TopLevel()->GetSpillRangeOperand();
568 }
569 
570 
FirstSearchIntervalForPosition(LifetimePosition position) const571 UseInterval* LiveRange::FirstSearchIntervalForPosition(
572     LifetimePosition position) const {
573   if (current_interval_ == nullptr) return first_interval_;
574   if (current_interval_->start() > position) {
575     current_interval_ = nullptr;
576     return first_interval_;
577   }
578   return current_interval_;
579 }
580 
581 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const582 void LiveRange::AdvanceLastProcessedMarker(
583     UseInterval* to_start_of, LifetimePosition but_not_past) const {
584   if (to_start_of == nullptr) return;
585   if (to_start_of->start() > but_not_past) return;
586   LifetimePosition start = current_interval_ == nullptr
587                                ? LifetimePosition::Invalid()
588                                : current_interval_->start();
589   if (to_start_of->start() > start) {
590     current_interval_ = to_start_of;
591   }
592 }
593 
594 
SplitAt(LifetimePosition position,Zone * zone)595 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
596   int new_id = TopLevel()->GetNextChildId();
597   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
598   // If we split, we do so because we're about to switch registers or move
599   // to/from a slot, so there's no value in connecting hints.
600   DetachAt(position, child, zone, DoNotConnectHints);
601 
602   child->top_level_ = TopLevel();
603   child->next_ = next_;
604   next_ = child;
605   return child;
606 }
607 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)608 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
609                                  Zone* zone,
610                                  HintConnectionOption connect_hints) {
611   DCHECK(Start() < position);
612   DCHECK(End() > position);
613   DCHECK(result->IsEmpty());
614   // Find the last interval that ends before the position. If the
615   // position is contained in one of the intervals in the chain, we
616   // split that interval and use the first part.
617   UseInterval* current = FirstSearchIntervalForPosition(position);
618 
619   // If the split position coincides with the beginning of a use interval
620   // we need to split use positons in a special way.
621   bool split_at_start = false;
622 
623   if (current->start() == position) {
624     // When splitting at start we need to locate the previous use interval.
625     current = first_interval_;
626   }
627 
628   UseInterval* after = nullptr;
629   while (current != nullptr) {
630     if (current->Contains(position)) {
631       after = current->SplitAt(position, zone);
632       break;
633     }
634     UseInterval* next = current->next();
635     if (next->start() >= position) {
636       split_at_start = (next->start() == position);
637       after = next;
638       current->set_next(nullptr);
639       break;
640     }
641     current = next;
642   }
643   DCHECK(nullptr != after);
644 
645   // Partition original use intervals to the two live ranges.
646   UseInterval* before = current;
647   result->last_interval_ =
648       (last_interval_ == before)
649           ? after            // Only interval in the range after split.
650           : last_interval_;  // Last interval of the original range.
651   result->first_interval_ = after;
652   last_interval_ = before;
653 
654   // Find the last use position before the split and the first use
655   // position after it.
656   UsePosition* use_after =
657       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
658           ? first_pos()
659           : splitting_pointer_;
660   UsePosition* use_before = nullptr;
661   if (split_at_start) {
662     // The split position coincides with the beginning of a use interval (the
663     // end of a lifetime hole). Use at this position should be attributed to
664     // the split child because split child owns use interval covering it.
665     while (use_after != nullptr && use_after->pos() < position) {
666       use_before = use_after;
667       use_after = use_after->next();
668     }
669   } else {
670     while (use_after != nullptr && use_after->pos() <= position) {
671       use_before = use_after;
672       use_after = use_after->next();
673     }
674   }
675 
676   // Partition original use positions to the two live ranges.
677   if (use_before != nullptr) {
678     use_before->set_next(nullptr);
679   } else {
680     first_pos_ = nullptr;
681   }
682   result->first_pos_ = use_after;
683 
684   // Discard cached iteration state. It might be pointing
685   // to the use that no longer belongs to this live range.
686   last_processed_use_ = nullptr;
687   current_interval_ = nullptr;
688 
689   if (connect_hints == ConnectHints && use_before != nullptr &&
690       use_after != nullptr) {
691     use_after->SetHint(use_before);
692   }
693 #ifdef DEBUG
694   VerifyChildStructure();
695   result->VerifyChildStructure();
696 #endif
697   return use_before;
698 }
699 
700 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)701 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
702   LiveRange* child = this;
703   for (; child != nullptr; child = child->next()) {
704     child->top_level_ = new_top_level;
705   }
706 }
707 
708 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)709 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
710                                      const InstructionOperand& spill_op) {
711   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
712     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
713     if (!pos->HasOperand()) continue;
714     switch (pos->type()) {
715       case UsePositionType::kRequiresSlot:
716         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
717         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
718         break;
719       case UsePositionType::kRequiresRegister:
720         DCHECK(op.IsRegister() || op.IsFPRegister());
721       // Fall through.
722       case UsePositionType::kAny:
723         InstructionOperand::ReplaceWith(pos->operand(), &op);
724         break;
725     }
726   }
727 }
728 
729 
730 // This implements an ordering on live ranges so that they are ordered by their
731 // start positions.  This is needed for the correctness of the register
732 // allocation algorithm.  If two live ranges start at the same offset then there
733 // is a tie breaker based on where the value is first used.  This part of the
734 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const735 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
736   LifetimePosition start = Start();
737   LifetimePosition other_start = other->Start();
738   if (start == other_start) {
739     UsePosition* pos = first_pos();
740     if (pos == nullptr) return false;
741     UsePosition* other_pos = other->first_pos();
742     if (other_pos == nullptr) return true;
743     return pos->pos() < other_pos->pos();
744   }
745   return start < other_start;
746 }
747 
748 
SetUseHints(int register_index)749 void LiveRange::SetUseHints(int register_index) {
750   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
751     if (!pos->HasOperand()) continue;
752     switch (pos->type()) {
753       case UsePositionType::kRequiresSlot:
754         break;
755       case UsePositionType::kRequiresRegister:
756       case UsePositionType::kAny:
757         pos->set_assigned_register(register_index);
758         break;
759     }
760   }
761 }
762 
763 
CanCover(LifetimePosition position) const764 bool LiveRange::CanCover(LifetimePosition position) const {
765   if (IsEmpty()) return false;
766   return Start() <= position && position < End();
767 }
768 
769 
Covers(LifetimePosition position) const770 bool LiveRange::Covers(LifetimePosition position) const {
771   if (!CanCover(position)) return false;
772   UseInterval* start_search = FirstSearchIntervalForPosition(position);
773   for (UseInterval* interval = start_search; interval != nullptr;
774        interval = interval->next()) {
775     DCHECK(interval->next() == nullptr ||
776            interval->next()->start() >= interval->start());
777     AdvanceLastProcessedMarker(interval, position);
778     if (interval->Contains(position)) return true;
779     if (interval->start() > position) return false;
780   }
781   return false;
782 }
783 
784 
FirstIntersection(LiveRange * other) const785 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
786   UseInterval* b = other->first_interval();
787   if (b == nullptr) return LifetimePosition::Invalid();
788   LifetimePosition advance_last_processed_up_to = b->start();
789   UseInterval* a = FirstSearchIntervalForPosition(b->start());
790   while (a != nullptr && b != nullptr) {
791     if (a->start() > other->End()) break;
792     if (b->start() > End()) break;
793     LifetimePosition cur_intersection = a->Intersect(b);
794     if (cur_intersection.IsValid()) {
795       return cur_intersection;
796     }
797     if (a->start() < b->start()) {
798       a = a->next();
799       if (a == nullptr || a->start() > other->End()) break;
800       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
801     } else {
802       b = b->next();
803     }
804   }
805   return LifetimePosition::Invalid();
806 }
807 
Print(const RegisterConfiguration * config,bool with_children) const808 void LiveRange::Print(const RegisterConfiguration* config,
809                       bool with_children) const {
810   OFStream os(stdout);
811   PrintableLiveRange wrapper;
812   wrapper.register_configuration_ = config;
813   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
814     wrapper.range_ = i;
815     os << wrapper << std::endl;
816     if (!with_children) break;
817   }
818 }
819 
820 
Print(bool with_children) const821 void LiveRange::Print(bool with_children) const {
822   Print(RegisterConfiguration::Turbofan(), with_children);
823 }
824 
825 
826 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList827   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
828                          SpillMoveInsertionList* next)
829       : gap_index(gap_index), operand(operand), next(next) {}
830   const int gap_index;
831   InstructionOperand* const operand;
832   SpillMoveInsertionList* const next;
833 };
834 
835 
TopLevelLiveRange(int vreg,MachineRepresentation rep)836 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
837     : LiveRange(0, rep, this),
838       vreg_(vreg),
839       last_child_id_(0),
840       splintered_from_(nullptr),
841       spill_operand_(nullptr),
842       spill_move_insertion_locations_(nullptr),
843       spilled_in_deferred_blocks_(false),
844       spill_start_index_(kMaxInt),
845       last_pos_(nullptr),
846       splinter_(nullptr),
847       has_preassigned_slot_(false) {
848   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
849 }
850 
851 
852 #if DEBUG
debug_virt_reg() const853 int TopLevelLiveRange::debug_virt_reg() const {
854   return IsSplinter() ? splintered_from()->vreg() : vreg();
855 }
856 #endif
857 
858 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)859 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
860                                             InstructionOperand* operand) {
861   DCHECK(HasNoSpillType());
862   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
863       gap_index, operand, spill_move_insertion_locations_);
864 }
865 
CommitSpillMoves(InstructionSequence * sequence,const InstructionOperand & op,bool might_be_duplicated)866 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
867                                          const InstructionOperand& op,
868                                          bool might_be_duplicated) {
869   DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
870   Zone* zone = sequence->zone();
871 
872   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
873        to_spill != nullptr; to_spill = to_spill->next) {
874     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
875     ParallelMove* move =
876         instr->GetOrCreateParallelMove(Instruction::START, zone);
877     // Skip insertion if it's possible that the move exists already as a
878     // constraint move from a fixed output register to a slot.
879     if (might_be_duplicated || has_preassigned_slot()) {
880       bool found = false;
881       for (MoveOperands* move_op : *move) {
882         if (move_op->IsEliminated()) continue;
883         if (move_op->source().Equals(*to_spill->operand) &&
884             move_op->destination().Equals(op)) {
885           found = true;
886           if (has_preassigned_slot()) move_op->Eliminate();
887           break;
888         }
889       }
890       if (found) continue;
891     }
892     if (!has_preassigned_slot()) {
893       move->AddMove(*to_spill->operand, op);
894     }
895   }
896 }
897 
898 
SetSpillOperand(InstructionOperand * operand)899 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
900   DCHECK(HasNoSpillType());
901   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
902   set_spill_type(SpillType::kSpillOperand);
903   spill_operand_ = operand;
904 }
905 
906 
SetSpillRange(SpillRange * spill_range)907 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
908   DCHECK(!HasSpillOperand());
909   DCHECK(spill_range);
910   spill_range_ = spill_range;
911 }
912 
913 
GetSpillRangeOperand() const914 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
915   SpillRange* spill_range = GetSpillRange();
916   int index = spill_range->assigned_slot();
917   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
918 }
919 
920 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)921 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
922                                  Zone* zone) {
923   DCHECK(start != Start() || end != End());
924   DCHECK(start < end);
925 
926   TopLevelLiveRange splinter_temp(-1, representation());
927   UsePosition* last_in_splinter = nullptr;
928   // Live ranges defined in deferred blocks stay in deferred blocks, so we
929   // don't need to splinter them. That means that start should always be
930   // after the beginning of the range.
931   DCHECK(start > Start());
932 
933   if (end >= End()) {
934     DCHECK(start > Start());
935     DetachAt(start, &splinter_temp, zone, ConnectHints);
936     next_ = nullptr;
937   } else {
938     DCHECK(start < End() && Start() < end);
939 
940     const int kInvalidId = std::numeric_limits<int>::max();
941 
942     UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
943 
944     LiveRange end_part(kInvalidId, this->representation(), nullptr);
945     // The last chunk exits the deferred region, and we don't want to connect
946     // hints here, because the non-deferred region shouldn't be affected
947     // by allocation decisions on the deferred path.
948     last_in_splinter =
949         splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
950 
951     next_ = end_part.next_;
952     last_interval_->set_next(end_part.first_interval_);
953     // The next splinter will happen either at or after the current interval.
954     // We can optimize DetachAt by setting current_interval_ accordingly,
955     // which will then be picked up by FirstSearchIntervalForPosition.
956     current_interval_ = last_interval_;
957     last_interval_ = end_part.last_interval_;
958 
959     if (first_pos_ == nullptr) {
960       first_pos_ = end_part.first_pos_;
961     } else {
962       splitting_pointer_ = last;
963       if (last != nullptr) last->set_next(end_part.first_pos_);
964     }
965   }
966 
967   if (splinter()->IsEmpty()) {
968     splinter()->first_interval_ = splinter_temp.first_interval_;
969     splinter()->last_interval_ = splinter_temp.last_interval_;
970   } else {
971     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
972     splinter()->last_interval_ = splinter_temp.last_interval_;
973   }
974   if (splinter()->first_pos() == nullptr) {
975     splinter()->first_pos_ = splinter_temp.first_pos_;
976   } else {
977     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
978   }
979   if (last_in_splinter != nullptr) {
980     splinter()->last_pos_ = last_in_splinter;
981   } else {
982     if (splinter()->first_pos() != nullptr &&
983         splinter()->last_pos_ == nullptr) {
984       splinter()->last_pos_ = splinter()->first_pos();
985       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
986            pos = pos->next()) {
987         splinter()->last_pos_ = pos;
988       }
989     }
990   }
991 #if DEBUG
992   Verify();
993   splinter()->Verify();
994 #endif
995 }
996 
997 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)998 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
999   splintered_from_ = splinter_parent;
1000   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
1001     SetSpillRange(splinter_parent->spill_range_);
1002   }
1003 }
1004 
1005 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)1006 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
1007   DCHECK(merged->TopLevel() == this);
1008 
1009   if (HasNoSpillType() && merged->HasSpillRange()) {
1010     set_spill_type(merged->spill_type());
1011     DCHECK(GetSpillRange()->live_ranges().size() > 0);
1012     merged->spill_range_ = nullptr;
1013     merged->bits_ =
1014         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
1015   }
1016 }
1017 
1018 
Merge(TopLevelLiveRange * other,Zone * zone)1019 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
1020   DCHECK(Start() < other->Start());
1021   DCHECK(other->splintered_from() == this);
1022 
1023   LiveRange* first = this;
1024   LiveRange* second = other;
1025   DCHECK(first->Start() < second->Start());
1026   while (first != nullptr && second != nullptr) {
1027     DCHECK(first != second);
1028     // Make sure the ranges are in order each time we iterate.
1029     if (second->Start() < first->Start()) {
1030       LiveRange* tmp = second;
1031       second = first;
1032       first = tmp;
1033       continue;
1034     }
1035 
1036     if (first->End() <= second->Start()) {
1037       if (first->next() == nullptr ||
1038           first->next()->Start() > second->Start()) {
1039         // First is in order before second.
1040         LiveRange* temp = first->next();
1041         first->next_ = second;
1042         first = temp;
1043       } else {
1044         // First is in order before its successor (or second), so advance first.
1045         first = first->next();
1046       }
1047       continue;
1048     }
1049 
1050     DCHECK(first->Start() < second->Start());
1051     // If first and second intersect, split first.
1052     if (first->Start() < second->End() && second->Start() < first->End()) {
1053       LiveRange* temp = first->SplitAt(second->Start(), zone);
1054       CHECK(temp != first);
1055       temp->set_spilled(first->spilled());
1056       if (!temp->spilled())
1057         temp->set_assigned_register(first->assigned_register());
1058 
1059       first->next_ = second;
1060       first = temp;
1061       continue;
1062     }
1063     DCHECK(first->End() <= second->Start());
1064   }
1065 
1066   TopLevel()->UpdateParentForAllChildren(TopLevel());
1067   TopLevel()->UpdateSpillRangePostMerge(other);
1068   TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
1069                                other->has_slot_use());
1070 
1071 #if DEBUG
1072   Verify();
1073 #endif
1074 }
1075 
1076 
VerifyChildrenInOrder() const1077 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1078   LifetimePosition last_end = End();
1079   for (const LiveRange* child = this->next(); child != nullptr;
1080        child = child->next()) {
1081     DCHECK(last_end <= child->Start());
1082     last_end = child->End();
1083   }
1084 }
1085 
1086 
Verify() const1087 void TopLevelLiveRange::Verify() const {
1088   VerifyChildrenInOrder();
1089   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1090     VerifyChildStructure();
1091   }
1092 }
1093 
1094 
ShortenTo(LifetimePosition start)1095 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
1096   TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1097   DCHECK(first_interval_ != nullptr);
1098   DCHECK(first_interval_->start() <= start);
1099   DCHECK(start < first_interval_->end());
1100   first_interval_->set_start(start);
1101 }
1102 
1103 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1104 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1105                                        LifetimePosition end, Zone* zone) {
1106   TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1107         end.value());
1108   LifetimePosition new_end = end;
1109   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1110     if (first_interval_->end() > end) {
1111       new_end = first_interval_->end();
1112     }
1113     first_interval_ = first_interval_->next();
1114   }
1115 
1116   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1117   new_interval->set_next(first_interval_);
1118   first_interval_ = new_interval;
1119   if (new_interval->next() == nullptr) {
1120     last_interval_ = new_interval;
1121   }
1122 }
1123 
1124 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1125 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1126                                        LifetimePosition end, Zone* zone) {
1127   TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1128         end.value());
1129   if (first_interval_ == nullptr) {
1130     UseInterval* interval = new (zone) UseInterval(start, end);
1131     first_interval_ = interval;
1132     last_interval_ = interval;
1133   } else {
1134     if (end == first_interval_->start()) {
1135       first_interval_->set_start(start);
1136     } else if (end < first_interval_->start()) {
1137       UseInterval* interval = new (zone) UseInterval(start, end);
1138       interval->set_next(first_interval_);
1139       first_interval_ = interval;
1140     } else {
1141       // Order of instruction's processing (see ProcessInstructions) guarantees
1142       // that each new use interval either precedes, intersects with or touches
1143       // the last added interval.
1144       DCHECK(start <= first_interval_->end());
1145       first_interval_->set_start(Min(start, first_interval_->start()));
1146       first_interval_->set_end(Max(end, first_interval_->end()));
1147     }
1148   }
1149 }
1150 
1151 
AddUsePosition(UsePosition * use_pos)1152 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1153   LifetimePosition pos = use_pos->pos();
1154   TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1155   UsePosition* prev_hint = nullptr;
1156   UsePosition* prev = nullptr;
1157   UsePosition* current = first_pos_;
1158   while (current != nullptr && current->pos() < pos) {
1159     prev_hint = current->HasHint() ? current : prev_hint;
1160     prev = current;
1161     current = current->next();
1162   }
1163 
1164   if (prev == nullptr) {
1165     use_pos->set_next(first_pos_);
1166     first_pos_ = use_pos;
1167   } else {
1168     use_pos->set_next(prev->next());
1169     prev->set_next(use_pos);
1170   }
1171 
1172   if (prev_hint == nullptr && use_pos->HasHint()) {
1173     current_hint_position_ = use_pos;
1174   }
1175 }
1176 
1177 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1178 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1179                                         UseInterval* interval2) {
1180   while (interval1 != nullptr && interval2 != nullptr) {
1181     if (interval1->start() < interval2->start()) {
1182       if (interval1->end() > interval2->start()) {
1183         return true;
1184       }
1185       interval1 = interval1->next();
1186     } else {
1187       if (interval2->end() > interval1->start()) {
1188         return true;
1189       }
1190       interval2 = interval2->next();
1191     }
1192   }
1193   return false;
1194 }
1195 
1196 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1197 std::ostream& operator<<(std::ostream& os,
1198                          const PrintableLiveRange& printable_range) {
1199   const LiveRange* range = printable_range.range_;
1200   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1201      << " ";
1202   if (range->TopLevel()->is_phi()) os << "phi ";
1203   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1204 
1205   os << "{" << std::endl;
1206   UseInterval* interval = range->first_interval();
1207   UsePosition* use_pos = range->first_pos();
1208   PrintableInstructionOperand pio;
1209   pio.register_configuration_ = printable_range.register_configuration_;
1210   while (use_pos != nullptr) {
1211     if (use_pos->HasOperand()) {
1212       pio.op_ = *use_pos->operand();
1213       os << pio << use_pos->pos() << " ";
1214     }
1215     use_pos = use_pos->next();
1216   }
1217   os << std::endl;
1218 
1219   while (interval != nullptr) {
1220     os << '[' << interval->start() << ", " << interval->end() << ')'
1221        << std::endl;
1222     interval = interval->next();
1223   }
1224   os << "}";
1225   return os;
1226 }
1227 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1228 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1229     : live_ranges_(zone),
1230       assigned_slot_(kUnassignedSlot),
1231       byte_width_(GetByteWidth(parent->representation())) {
1232   // Spill ranges are created for top level, non-splintered ranges. This is so
1233   // that, when merging decisions are made, we consider the full extent of the
1234   // virtual register, and avoid clobbering it.
1235   DCHECK(!parent->IsSplinter());
1236   UseInterval* result = nullptr;
1237   UseInterval* node = nullptr;
1238   // Copy the intervals for all ranges.
1239   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1240     UseInterval* src = range->first_interval();
1241     while (src != nullptr) {
1242       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1243       if (result == nullptr) {
1244         result = new_node;
1245       } else {
1246         node->set_next(new_node);
1247       }
1248       node = new_node;
1249       src = src->next();
1250     }
1251   }
1252   use_interval_ = result;
1253   live_ranges().push_back(parent);
1254   end_position_ = node->end();
1255   parent->SetSpillRange(this);
1256 }
1257 
IsIntersectingWith(SpillRange * other) const1258 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1259   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1260       this->End() <= other->use_interval_->start() ||
1261       other->End() <= this->use_interval_->start()) {
1262     return false;
1263   }
1264   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1265 }
1266 
1267 
TryMerge(SpillRange * other)1268 bool SpillRange::TryMerge(SpillRange* other) {
1269   if (HasSlot() || other->HasSlot()) return false;
1270   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1271     return false;
1272 
1273   LifetimePosition max = LifetimePosition::MaxPosition();
1274   if (End() < other->End() && other->End() != max) {
1275     end_position_ = other->End();
1276   }
1277   other->end_position_ = max;
1278 
1279   MergeDisjointIntervals(other->use_interval_);
1280   other->use_interval_ = nullptr;
1281 
1282   for (TopLevelLiveRange* range : other->live_ranges()) {
1283     DCHECK(range->GetSpillRange() == other);
1284     range->SetSpillRange(this);
1285   }
1286 
1287   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1288                        other->live_ranges().end());
1289   other->live_ranges().clear();
1290 
1291   return true;
1292 }
1293 
1294 
MergeDisjointIntervals(UseInterval * other)1295 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1296   UseInterval* tail = nullptr;
1297   UseInterval* current = use_interval_;
1298   while (other != nullptr) {
1299     // Make sure the 'current' list starts first
1300     if (current == nullptr || current->start() > other->start()) {
1301       std::swap(current, other);
1302     }
1303     // Check disjointness
1304     DCHECK(other == nullptr || current->end() <= other->start());
1305     // Append the 'current' node to the result accumulator and move forward
1306     if (tail == nullptr) {
1307       use_interval_ = current;
1308     } else {
1309       tail->set_next(current);
1310     }
1311     tail = current;
1312     current = current->next();
1313   }
1314   // Other list is empty => we are done
1315 }
1316 
1317 
Print() const1318 void SpillRange::Print() const {
1319   OFStream os(stdout);
1320   os << "{" << std::endl;
1321   for (TopLevelLiveRange* range : live_ranges()) {
1322     os << range->vreg() << " ";
1323   }
1324   os << std::endl;
1325 
1326   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1327     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1328   }
1329   os << "}" << std::endl;
1330 }
1331 
1332 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1333 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1334                                                  const InstructionBlock* block,
1335                                                  Zone* zone)
1336     : phi_(phi),
1337       block_(block),
1338       incoming_operands_(zone),
1339       assigned_register_(kUnassignedRegister) {
1340   incoming_operands_.reserve(phi->operands().size());
1341 }
1342 
1343 
AddOperand(InstructionOperand * operand)1344 void RegisterAllocationData::PhiMapValue::AddOperand(
1345     InstructionOperand* operand) {
1346   incoming_operands_.push_back(operand);
1347 }
1348 
1349 
CommitAssignment(const InstructionOperand & assigned)1350 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1351     const InstructionOperand& assigned) {
1352   for (InstructionOperand* operand : incoming_operands_) {
1353     InstructionOperand::ReplaceWith(operand, &assigned);
1354   }
1355 }
1356 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,const char * debug_name)1357 RegisterAllocationData::RegisterAllocationData(
1358     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1359     InstructionSequence* code, const char* debug_name)
1360     : allocation_zone_(zone),
1361       frame_(frame),
1362       code_(code),
1363       debug_name_(debug_name),
1364       config_(config),
1365       phi_map_(allocation_zone()),
1366       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1367       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1368       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1369                    allocation_zone()),
1370       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1371                          allocation_zone()),
1372       fixed_float_live_ranges_(allocation_zone()),
1373       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1374                                 allocation_zone()),
1375       fixed_simd128_live_ranges_(allocation_zone()),
1376       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1377       delayed_references_(allocation_zone()),
1378       assigned_registers_(nullptr),
1379       assigned_double_registers_(nullptr),
1380       virtual_register_count_(code->VirtualRegisterCount()),
1381       preassigned_slot_ranges_(zone) {
1382   if (!kSimpleFPAliasing) {
1383     fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
1384                                     nullptr);
1385     fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
1386                                       nullptr);
1387   }
1388 
1389   assigned_registers_ = new (code_zone())
1390       BitVector(this->config()->num_general_registers(), code_zone());
1391   assigned_double_registers_ = new (code_zone())
1392       BitVector(this->config()->num_double_registers(), code_zone());
1393   this->frame()->SetAllocatedRegisters(assigned_registers_);
1394   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1395 }
1396 
1397 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1398 MoveOperands* RegisterAllocationData::AddGapMove(
1399     int index, Instruction::GapPosition position,
1400     const InstructionOperand& from, const InstructionOperand& to) {
1401   Instruction* instr = code()->InstructionAt(index);
1402   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1403   return moves->AddMove(from, to);
1404 }
1405 
1406 
RepresentationFor(int virtual_register)1407 MachineRepresentation RegisterAllocationData::RepresentationFor(
1408     int virtual_register) {
1409   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1410   return code()->GetRepresentation(virtual_register);
1411 }
1412 
1413 
GetOrCreateLiveRangeFor(int index)1414 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1415   if (index >= static_cast<int>(live_ranges().size())) {
1416     live_ranges().resize(index + 1, nullptr);
1417   }
1418   TopLevelLiveRange* result = live_ranges()[index];
1419   if (result == nullptr) {
1420     result = NewLiveRange(index, RepresentationFor(index));
1421     live_ranges()[index] = result;
1422   }
1423   return result;
1424 }
1425 
1426 
NewLiveRange(int index,MachineRepresentation rep)1427 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1428     int index, MachineRepresentation rep) {
1429   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1430 }
1431 
1432 
GetNextLiveRangeId()1433 int RegisterAllocationData::GetNextLiveRangeId() {
1434   int vreg = virtual_register_count_++;
1435   if (vreg >= static_cast<int>(live_ranges().size())) {
1436     live_ranges().resize(vreg + 1, nullptr);
1437   }
1438   return vreg;
1439 }
1440 
1441 
NextLiveRange(MachineRepresentation rep)1442 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1443     MachineRepresentation rep) {
1444   int vreg = GetNextLiveRangeId();
1445   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1446   return ret;
1447 }
1448 
1449 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1450 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1451     const InstructionBlock* block, PhiInstruction* phi) {
1452   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1453       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1454   auto res =
1455       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1456   DCHECK(res.second);
1457   USE(res);
1458   return map_value;
1459 }
1460 
1461 
GetPhiMapValueFor(int virtual_register)1462 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1463     int virtual_register) {
1464   auto it = phi_map_.find(virtual_register);
1465   DCHECK(it != phi_map_.end());
1466   return it->second;
1467 }
1468 
1469 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1470 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1471     TopLevelLiveRange* top_range) {
1472   return GetPhiMapValueFor(top_range->vreg());
1473 }
1474 
1475 
ExistsUseWithoutDefinition()1476 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1477   bool found = false;
1478   BitVector::Iterator iterator(live_in_sets()[0]);
1479   while (!iterator.Done()) {
1480     found = true;
1481     int operand_index = iterator.Current();
1482     PrintF("Register allocator error: live v%d reached first block.\n",
1483            operand_index);
1484     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1485     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1486     if (debug_name() == nullptr) {
1487       PrintF("\n");
1488     } else {
1489       PrintF("  (function: %s)\n", debug_name());
1490     }
1491     iterator.Advance();
1492   }
1493   return found;
1494 }
1495 
1496 
1497 // If a range is defined in a deferred block, we can expect all the range
1498 // to only cover positions in deferred blocks. Otherwise, a block on the
1499 // hot path would be dominated by a deferred block, meaning it is unreachable
1500 // without passing through the deferred block, which is contradictory.
1501 // In particular, when such a range contributes a result back on the hot
1502 // path, it will be as one of the inputs of a phi. In that case, the value
1503 // will be transferred via a move in the Gap::END's of the last instruction
1504 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1505 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1506   for (const TopLevelLiveRange* range : live_ranges()) {
1507     if (range == nullptr || range->IsEmpty() ||
1508         !code()
1509              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1510              ->IsDeferred()) {
1511       continue;
1512     }
1513     for (const UseInterval* i = range->first_interval(); i != nullptr;
1514          i = i->next()) {
1515       int first = i->FirstGapIndex();
1516       int last = i->LastGapIndex();
1517       for (int instr = first; instr <= last;) {
1518         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1519         if (!block->IsDeferred()) return false;
1520         instr = block->last_instruction_index() + 1;
1521       }
1522     }
1523   }
1524   return true;
1525 }
1526 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range)1527 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1528     TopLevelLiveRange* range) {
1529   DCHECK(!range->HasSpillOperand());
1530 
1531   SpillRange* spill_range = range->GetAllocatedSpillRange();
1532   if (spill_range == nullptr) {
1533     DCHECK(!range->IsSplinter());
1534     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1535   }
1536   range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1537 
1538   int spill_range_index =
1539       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1540 
1541   spill_ranges()[spill_range_index] = spill_range;
1542 
1543   return spill_range;
1544 }
1545 
1546 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1547 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1548     TopLevelLiveRange* range) {
1549   DCHECK(!range->HasSpillOperand());
1550   DCHECK(!range->IsSplinter());
1551   SpillRange* spill_range =
1552       new (allocation_zone()) SpillRange(range, allocation_zone());
1553   return spill_range;
1554 }
1555 
MarkAllocated(MachineRepresentation rep,int index)1556 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1557                                            int index) {
1558   switch (rep) {
1559     case MachineRepresentation::kFloat32:
1560     case MachineRepresentation::kSimd128:
1561       if (kSimpleFPAliasing) {
1562         assigned_double_registers_->Add(index);
1563       } else {
1564         int alias_base_index = -1;
1565         int aliases = config()->GetAliases(
1566             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1567         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1568         while (aliases--) {
1569           int aliased_reg = alias_base_index + aliases;
1570           assigned_double_registers_->Add(aliased_reg);
1571         }
1572       }
1573       break;
1574     case MachineRepresentation::kFloat64:
1575       assigned_double_registers_->Add(index);
1576       break;
1577     default:
1578       DCHECK(!IsFloatingPoint(rep));
1579       assigned_registers_->Add(index);
1580       break;
1581   }
1582 }
1583 
IsBlockBoundary(LifetimePosition pos) const1584 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1585   return pos.IsFullStart() &&
1586          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1587              pos.ToInstructionIndex();
1588 }
1589 
1590 
ConstraintBuilder(RegisterAllocationData * data)1591 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1592     : data_(data) {}
1593 
1594 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged)1595 InstructionOperand* ConstraintBuilder::AllocateFixed(
1596     UnallocatedOperand* operand, int pos, bool is_tagged) {
1597   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1598   DCHECK(operand->HasFixedPolicy());
1599   InstructionOperand allocated;
1600   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1601   int virtual_register = operand->virtual_register();
1602   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1603     rep = data()->RepresentationFor(virtual_register);
1604   }
1605   if (operand->HasFixedSlotPolicy()) {
1606     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1607                                  operand->fixed_slot_index());
1608   } else if (operand->HasFixedRegisterPolicy()) {
1609     DCHECK(!IsFloatingPoint(rep));
1610     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1611                                  operand->fixed_register_index());
1612   } else if (operand->HasFixedFPRegisterPolicy()) {
1613     DCHECK(IsFloatingPoint(rep));
1614     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1615     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1616                                  operand->fixed_register_index());
1617   } else {
1618     UNREACHABLE();
1619   }
1620   InstructionOperand::ReplaceWith(operand, &allocated);
1621   if (is_tagged) {
1622     TRACE("Fixed reg is tagged at %d\n", pos);
1623     Instruction* instr = code()->InstructionAt(pos);
1624     if (instr->HasReferenceMap()) {
1625       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1626     }
1627   }
1628   return operand;
1629 }
1630 
1631 
MeetRegisterConstraints()1632 void ConstraintBuilder::MeetRegisterConstraints() {
1633   for (InstructionBlock* block : code()->instruction_blocks()) {
1634     MeetRegisterConstraints(block);
1635   }
1636 }
1637 
1638 
MeetRegisterConstraints(const InstructionBlock * block)1639 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1640   int start = block->first_instruction_index();
1641   int end = block->last_instruction_index();
1642   DCHECK_NE(-1, start);
1643   for (int i = start; i <= end; ++i) {
1644     MeetConstraintsBefore(i);
1645     if (i != end) MeetConstraintsAfter(i);
1646   }
1647   // Meet register constraints for the instruction in the end.
1648   MeetRegisterConstraintsForLastInstructionInBlock(block);
1649 }
1650 
1651 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1652 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1653     const InstructionBlock* block) {
1654   int end = block->last_instruction_index();
1655   Instruction* last_instruction = code()->InstructionAt(end);
1656   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1657     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1658     DCHECK(!output_operand->IsConstant());
1659     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1660     int output_vreg = output->virtual_register();
1661     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1662     bool assigned = false;
1663     if (output->HasFixedPolicy()) {
1664       AllocateFixed(output, -1, false);
1665       // This value is produced on the stack, we never need to spill it.
1666       if (output->IsStackSlot()) {
1667         DCHECK(LocationOperand::cast(output)->index() <
1668                data()->frame()->GetSpillSlotCount());
1669         range->SetSpillOperand(LocationOperand::cast(output));
1670         range->SetSpillStartIndex(end);
1671         assigned = true;
1672       }
1673 
1674       for (const RpoNumber& succ : block->successors()) {
1675         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1676         DCHECK(successor->PredecessorCount() == 1);
1677         int gap_index = successor->first_instruction_index();
1678         // Create an unconstrained operand for the same virtual register
1679         // and insert a gap move from the fixed output to the operand.
1680         UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1681         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1682       }
1683     }
1684 
1685     if (!assigned) {
1686       for (const RpoNumber& succ : block->successors()) {
1687         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1688         DCHECK(successor->PredecessorCount() == 1);
1689         int gap_index = successor->first_instruction_index();
1690         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1691         range->SetSpillStartIndex(gap_index);
1692       }
1693     }
1694   }
1695 }
1696 
1697 
MeetConstraintsAfter(int instr_index)1698 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1699   Instruction* first = code()->InstructionAt(instr_index);
1700   // Handle fixed temporaries.
1701   for (size_t i = 0; i < first->TempCount(); i++) {
1702     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1703     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1704   }
1705   // Handle constant/fixed output operands.
1706   for (size_t i = 0; i < first->OutputCount(); i++) {
1707     InstructionOperand* output = first->OutputAt(i);
1708     if (output->IsConstant()) {
1709       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1710       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1711       range->SetSpillStartIndex(instr_index + 1);
1712       range->SetSpillOperand(output);
1713       continue;
1714     }
1715     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1716     TopLevelLiveRange* range =
1717         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1718     bool assigned = false;
1719     if (first_output->HasFixedPolicy()) {
1720       int output_vreg = first_output->virtual_register();
1721       UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1722       bool is_tagged = code()->IsReference(output_vreg);
1723       if (first_output->HasSecondaryStorage()) {
1724         range->MarkHasPreassignedSlot();
1725         data()->preassigned_slot_ranges().push_back(
1726             std::make_pair(range, first_output->GetSecondaryStorage()));
1727       }
1728       AllocateFixed(first_output, instr_index, is_tagged);
1729 
1730       // This value is produced on the stack, we never need to spill it.
1731       if (first_output->IsStackSlot()) {
1732         DCHECK(LocationOperand::cast(first_output)->index() <
1733                data()->frame()->GetTotalFrameSlotCount());
1734         range->SetSpillOperand(LocationOperand::cast(first_output));
1735         range->SetSpillStartIndex(instr_index + 1);
1736         assigned = true;
1737       }
1738       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1739                          output_copy);
1740     }
1741     // Make sure we add a gap move for spilling (if we have not done
1742     // so already).
1743     if (!assigned) {
1744       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1745                                  first_output);
1746       range->SetSpillStartIndex(instr_index + 1);
1747     }
1748   }
1749 }
1750 
1751 
MeetConstraintsBefore(int instr_index)1752 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1753   Instruction* second = code()->InstructionAt(instr_index);
1754   // Handle fixed input operands of second instruction.
1755   for (size_t i = 0; i < second->InputCount(); i++) {
1756     InstructionOperand* input = second->InputAt(i);
1757     if (input->IsImmediate() || input->IsExplicit()) {
1758       continue;  // Ignore immediates and explicitly reserved registers.
1759     }
1760     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1761     if (cur_input->HasFixedPolicy()) {
1762       int input_vreg = cur_input->virtual_register();
1763       UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1764       bool is_tagged = code()->IsReference(input_vreg);
1765       AllocateFixed(cur_input, instr_index, is_tagged);
1766       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1767     }
1768   }
1769   // Handle "output same as input" for second instruction.
1770   for (size_t i = 0; i < second->OutputCount(); i++) {
1771     InstructionOperand* output = second->OutputAt(i);
1772     if (!output->IsUnallocated()) continue;
1773     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1774     if (!second_output->HasSameAsInputPolicy()) continue;
1775     DCHECK(i == 0);  // Only valid for first output.
1776     UnallocatedOperand* cur_input =
1777         UnallocatedOperand::cast(second->InputAt(0));
1778     int output_vreg = second_output->virtual_register();
1779     int input_vreg = cur_input->virtual_register();
1780     UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1781     cur_input->set_virtual_register(second_output->virtual_register());
1782     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1783                                                 input_copy, *cur_input);
1784     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1785       if (second->HasReferenceMap()) {
1786         RegisterAllocationData::DelayedReference delayed_reference = {
1787             second->reference_map(), &gap_move->source()};
1788         data()->delayed_references().push_back(delayed_reference);
1789       }
1790     } else if (!code()->IsReference(input_vreg) &&
1791                code()->IsReference(output_vreg)) {
1792       // The input is assumed to immediately have a tagged representation,
1793       // before the pointer map can be used. I.e. the pointer map at the
1794       // instruction will include the output operand (whose value at the
1795       // beginning of the instruction is equal to the input operand). If
1796       // this is not desired, then the pointer map at this instruction needs
1797       // to be adjusted manually.
1798     }
1799   }
1800 }
1801 
1802 
ResolvePhis()1803 void ConstraintBuilder::ResolvePhis() {
1804   // Process the blocks in reverse order.
1805   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1806     ResolvePhis(block);
1807   }
1808 }
1809 
1810 
ResolvePhis(const InstructionBlock * block)1811 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1812   for (PhiInstruction* phi : block->phis()) {
1813     int phi_vreg = phi->virtual_register();
1814     RegisterAllocationData::PhiMapValue* map_value =
1815         data()->InitializePhiMap(block, phi);
1816     InstructionOperand& output = phi->output();
1817     // Map the destination operands, so the commitment phase can find them.
1818     for (size_t i = 0; i < phi->operands().size(); ++i) {
1819       InstructionBlock* cur_block =
1820           code()->InstructionBlockAt(block->predecessors()[i]);
1821       UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
1822       MoveOperands* move = data()->AddGapMove(
1823           cur_block->last_instruction_index(), Instruction::END, input, output);
1824       map_value->AddOperand(&move->destination());
1825       DCHECK(!code()
1826                   ->InstructionAt(cur_block->last_instruction_index())
1827                   ->HasReferenceMap());
1828     }
1829     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1830     int gap_index = block->first_instruction_index();
1831     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1832     live_range->SetSpillStartIndex(gap_index);
1833     // We use the phi-ness of some nodes in some later heuristics.
1834     live_range->set_is_phi(true);
1835     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1836   }
1837 }
1838 
1839 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)1840 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1841                                    Zone* local_zone)
1842     : data_(data), phi_hints_(local_zone) {}
1843 
1844 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)1845 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1846                                             RegisterAllocationData* data) {
1847   size_t block_index = block->rpo_number().ToSize();
1848   BitVector* live_out = data->live_out_sets()[block_index];
1849   if (live_out == nullptr) {
1850     // Compute live out for the given block, except not including backward
1851     // successor edges.
1852     Zone* zone = data->allocation_zone();
1853     const InstructionSequence* code = data->code();
1854 
1855     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1856 
1857     // Process all successor blocks.
1858     for (const RpoNumber& succ : block->successors()) {
1859       // Add values live on entry to the successor.
1860       if (succ <= block->rpo_number()) continue;
1861       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1862       if (live_in != nullptr) live_out->Union(*live_in);
1863 
1864       // All phi input operands corresponding to this successor edge are live
1865       // out from this block.
1866       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1867       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1868       DCHECK(index < successor->PredecessorCount());
1869       for (PhiInstruction* phi : successor->phis()) {
1870         live_out->Add(phi->operands()[index]);
1871       }
1872     }
1873     data->live_out_sets()[block_index] = live_out;
1874   }
1875   return live_out;
1876 }
1877 
1878 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1879 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1880                                            BitVector* live_out) {
1881   // Add an interval that includes the entire block to the live range for
1882   // each live_out value.
1883   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1884       block->first_instruction_index());
1885   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1886                              block->last_instruction_index())
1887                              .NextStart();
1888   BitVector::Iterator iterator(live_out);
1889   while (!iterator.Done()) {
1890     int operand_index = iterator.Current();
1891     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1892     range->AddUseInterval(start, end, allocation_zone());
1893     iterator.Advance();
1894   }
1895 }
1896 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1897 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1898   int result = -index - 1;
1899   switch (rep) {
1900     case MachineRepresentation::kSimd128:
1901       result -= config()->num_float_registers();
1902     // Fall through.
1903     case MachineRepresentation::kFloat32:
1904       result -= config()->num_double_registers();
1905     // Fall through.
1906     case MachineRepresentation::kFloat64:
1907       result -= config()->num_general_registers();
1908       break;
1909     default:
1910       UNREACHABLE();
1911       break;
1912   }
1913   return result;
1914 }
1915 
FixedLiveRangeFor(int index)1916 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1917   DCHECK(index < config()->num_general_registers());
1918   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1919   if (result == nullptr) {
1920     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1921     result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
1922     DCHECK(result->IsFixed());
1923     result->set_assigned_register(index);
1924     data()->MarkAllocated(rep, index);
1925     data()->fixed_live_ranges()[index] = result;
1926   }
1927   return result;
1928 }
1929 
FixedFPLiveRangeFor(int index,MachineRepresentation rep)1930 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1931     int index, MachineRepresentation rep) {
1932   int num_regs = config()->num_double_registers();
1933   ZoneVector<TopLevelLiveRange*>* live_ranges =
1934       &data()->fixed_double_live_ranges();
1935   if (!kSimpleFPAliasing) {
1936     switch (rep) {
1937       case MachineRepresentation::kFloat32:
1938         num_regs = config()->num_float_registers();
1939         live_ranges = &data()->fixed_float_live_ranges();
1940         break;
1941       case MachineRepresentation::kSimd128:
1942         num_regs = config()->num_simd128_registers();
1943         live_ranges = &data()->fixed_simd128_live_ranges();
1944         break;
1945       default:
1946         break;
1947     }
1948   }
1949 
1950   DCHECK(index < num_regs);
1951   USE(num_regs);
1952   TopLevelLiveRange* result = (*live_ranges)[index];
1953   if (result == nullptr) {
1954     result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1955     DCHECK(result->IsFixed());
1956     result->set_assigned_register(index);
1957     data()->MarkAllocated(rep, index);
1958     (*live_ranges)[index] = result;
1959   }
1960   return result;
1961 }
1962 
LiveRangeFor(InstructionOperand * operand)1963 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1964   if (operand->IsUnallocated()) {
1965     return data()->GetOrCreateLiveRangeFor(
1966         UnallocatedOperand::cast(operand)->virtual_register());
1967   } else if (operand->IsConstant()) {
1968     return data()->GetOrCreateLiveRangeFor(
1969         ConstantOperand::cast(operand)->virtual_register());
1970   } else if (operand->IsRegister()) {
1971     return FixedLiveRangeFor(
1972         LocationOperand::cast(operand)->GetRegister().code());
1973   } else if (operand->IsFPRegister()) {
1974     LocationOperand* op = LocationOperand::cast(operand);
1975     return FixedFPLiveRangeFor(op->register_code(), op->representation());
1976   } else {
1977     return nullptr;
1978   }
1979 }
1980 
1981 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1982 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1983                                               InstructionOperand* operand,
1984                                               void* hint,
1985                                               UsePositionHintType hint_type) {
1986   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1987 }
1988 
1989 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1990 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1991                                       InstructionOperand* operand, void* hint,
1992                                       UsePositionHintType hint_type) {
1993   TopLevelLiveRange* range = LiveRangeFor(operand);
1994   if (range == nullptr) return nullptr;
1995 
1996   if (range->IsEmpty() || range->Start() > position) {
1997     // Can happen if there is a definition without use.
1998     range->AddUseInterval(position, position.NextStart(), allocation_zone());
1999     range->AddUsePosition(NewUsePosition(position.NextStart()));
2000   } else {
2001     range->ShortenTo(position);
2002   }
2003   if (!operand->IsUnallocated()) return nullptr;
2004   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2005   UsePosition* use_pos =
2006       NewUsePosition(position, unalloc_operand, hint, hint_type);
2007   range->AddUsePosition(use_pos);
2008   return use_pos;
2009 }
2010 
2011 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)2012 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2013                                    LifetimePosition position,
2014                                    InstructionOperand* operand, void* hint,
2015                                    UsePositionHintType hint_type) {
2016   TopLevelLiveRange* range = LiveRangeFor(operand);
2017   if (range == nullptr) return nullptr;
2018   UsePosition* use_pos = nullptr;
2019   if (operand->IsUnallocated()) {
2020     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2021     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2022     range->AddUsePosition(use_pos);
2023   }
2024   range->AddUseInterval(block_start, position, allocation_zone());
2025   return use_pos;
2026 }
2027 
2028 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2029 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2030                                            BitVector* live) {
2031   int block_start = block->first_instruction_index();
2032   LifetimePosition block_start_position =
2033       LifetimePosition::GapFromInstructionIndex(block_start);
2034   bool fixed_float_live_ranges = false;
2035   bool fixed_simd128_live_ranges = false;
2036   if (!kSimpleFPAliasing) {
2037     int mask = data()->code()->representation_mask();
2038     fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
2039     fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
2040   }
2041 
2042   for (int index = block->last_instruction_index(); index >= block_start;
2043        index--) {
2044     LifetimePosition curr_position =
2045         LifetimePosition::InstructionFromInstructionIndex(index);
2046     Instruction* instr = code()->InstructionAt(index);
2047     DCHECK(instr != nullptr);
2048     DCHECK(curr_position.IsInstructionPosition());
2049     // Process output, inputs, and temps of this instruction.
2050     for (size_t i = 0; i < instr->OutputCount(); i++) {
2051       InstructionOperand* output = instr->OutputAt(i);
2052       if (output->IsUnallocated()) {
2053         // Unsupported.
2054         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2055         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2056         live->Remove(out_vreg);
2057       } else if (output->IsConstant()) {
2058         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2059         live->Remove(out_vreg);
2060       }
2061       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2062           output->IsRegister() &&
2063           AllocatedOperand::cast(output)->GetRegister().is(
2064               v8::internal::kReturnRegister0)) {
2065         // The register defined here is blocked from gap start - it is the
2066         // exception value.
2067         // TODO(mtrofin): should we explore an explicit opcode for
2068         // the first instruction in the handler?
2069         Define(LifetimePosition::GapFromInstructionIndex(index), output);
2070       } else {
2071         Define(curr_position, output);
2072       }
2073     }
2074 
2075     if (instr->ClobbersRegisters()) {
2076       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2077         // Create a UseInterval at this instruction for all fixed registers,
2078         // (including the instruction outputs). Adding another UseInterval here
2079         // is OK because AddUseInterval will just merge it with the existing
2080         // one at the end of the range.
2081         int code = config()->GetAllocatableGeneralCode(i);
2082         TopLevelLiveRange* range = FixedLiveRangeFor(code);
2083         range->AddUseInterval(curr_position, curr_position.End(),
2084                               allocation_zone());
2085       }
2086     }
2087 
2088     if (instr->ClobbersDoubleRegisters()) {
2089       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2090         // Add a UseInterval for all DoubleRegisters. See comment above for
2091         // general registers.
2092         int code = config()->GetAllocatableDoubleCode(i);
2093         TopLevelLiveRange* range =
2094             FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
2095         range->AddUseInterval(curr_position, curr_position.End(),
2096                               allocation_zone());
2097       }
2098       // Clobber fixed float registers on archs with non-simple aliasing.
2099       if (!kSimpleFPAliasing) {
2100         if (fixed_float_live_ranges) {
2101           for (int i = 0; i < config()->num_allocatable_float_registers();
2102                ++i) {
2103             // Add a UseInterval for all FloatRegisters. See comment above for
2104             // general registers.
2105             int code = config()->GetAllocatableFloatCode(i);
2106             TopLevelLiveRange* range =
2107                 FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
2108             range->AddUseInterval(curr_position, curr_position.End(),
2109                                   allocation_zone());
2110           }
2111         }
2112         if (fixed_simd128_live_ranges) {
2113           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2114                ++i) {
2115             int code = config()->GetAllocatableSimd128Code(i);
2116             TopLevelLiveRange* range =
2117                 FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
2118             range->AddUseInterval(curr_position, curr_position.End(),
2119                                   allocation_zone());
2120           }
2121         }
2122       }
2123     }
2124 
2125     for (size_t i = 0; i < instr->InputCount(); i++) {
2126       InstructionOperand* input = instr->InputAt(i);
2127       if (input->IsImmediate() || input->IsExplicit()) {
2128         continue;  // Ignore immediates and explicitly reserved registers.
2129       }
2130       LifetimePosition use_pos;
2131       if (input->IsUnallocated() &&
2132           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2133         use_pos = curr_position;
2134       } else {
2135         use_pos = curr_position.End();
2136       }
2137 
2138       if (input->IsUnallocated()) {
2139         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2140         int vreg = unalloc->virtual_register();
2141         live->Add(vreg);
2142         if (unalloc->HasSlotPolicy()) {
2143           data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
2144         }
2145       }
2146       Use(block_start_position, use_pos, input);
2147     }
2148 
2149     for (size_t i = 0; i < instr->TempCount(); i++) {
2150       InstructionOperand* temp = instr->TempAt(i);
2151       // Unsupported.
2152       DCHECK_IMPLIES(temp->IsUnallocated(),
2153                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2154       if (instr->ClobbersTemps()) {
2155         if (temp->IsRegister()) continue;
2156         if (temp->IsUnallocated()) {
2157           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2158           if (temp_unalloc->HasFixedPolicy()) {
2159             continue;
2160           }
2161         }
2162       }
2163       Use(block_start_position, curr_position.End(), temp);
2164       Define(curr_position, temp);
2165     }
2166 
2167     // Process the moves of the instruction's gaps, making their sources live.
2168     const Instruction::GapPosition kPositions[] = {Instruction::END,
2169                                                    Instruction::START};
2170     curr_position = curr_position.PrevStart();
2171     DCHECK(curr_position.IsGapPosition());
2172     for (const Instruction::GapPosition& position : kPositions) {
2173       ParallelMove* move = instr->GetParallelMove(position);
2174       if (move == nullptr) continue;
2175       if (position == Instruction::END) {
2176         curr_position = curr_position.End();
2177       } else {
2178         curr_position = curr_position.Start();
2179       }
2180       for (MoveOperands* cur : *move) {
2181         InstructionOperand& from = cur->source();
2182         InstructionOperand& to = cur->destination();
2183         void* hint = &to;
2184         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2185         UsePosition* to_use = nullptr;
2186         int phi_vreg = -1;
2187         if (to.IsUnallocated()) {
2188           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2189           TopLevelLiveRange* to_range =
2190               data()->GetOrCreateLiveRangeFor(to_vreg);
2191           if (to_range->is_phi()) {
2192             phi_vreg = to_vreg;
2193             if (to_range->is_non_loop_phi()) {
2194               hint = to_range->current_hint_position();
2195               hint_type = hint == nullptr ? UsePositionHintType::kNone
2196                                           : UsePositionHintType::kUsePos;
2197             } else {
2198               hint_type = UsePositionHintType::kPhi;
2199               hint = data()->GetPhiMapValueFor(to_vreg);
2200             }
2201           } else {
2202             if (live->Contains(to_vreg)) {
2203               to_use = Define(curr_position, &to, &from,
2204                               UsePosition::HintTypeForOperand(from));
2205               live->Remove(to_vreg);
2206             } else {
2207               cur->Eliminate();
2208               continue;
2209             }
2210           }
2211         } else {
2212           Define(curr_position, &to);
2213         }
2214         UsePosition* from_use =
2215             Use(block_start_position, curr_position, &from, hint, hint_type);
2216         // Mark range live.
2217         if (from.IsUnallocated()) {
2218           live->Add(UnallocatedOperand::cast(from).virtual_register());
2219         }
2220         // Resolve use position hints just created.
2221         if (to_use != nullptr && from_use != nullptr) {
2222           to_use->ResolveHint(from_use);
2223           from_use->ResolveHint(to_use);
2224         }
2225         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2226         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2227         // Potentially resolve phi hint.
2228         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2229       }
2230     }
2231   }
2232 }
2233 
ProcessPhis(const InstructionBlock * block,BitVector * live)2234 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2235                                    BitVector* live) {
2236   for (PhiInstruction* phi : block->phis()) {
2237     // The live range interval already ends at the first instruction of the
2238     // block.
2239     int phi_vreg = phi->virtual_register();
2240     live->Remove(phi_vreg);
2241     // Select a hint from a predecessor block that preceeds this block in the
2242     // rpo order. In order of priority:
2243     // - Avoid hints from deferred blocks.
2244     // - Prefer hints from allocated (or explicit) operands.
2245     // - Prefer hints from empty blocks (containing just parallel moves and a
2246     //   jump). In these cases, if we can elide the moves, the jump threader
2247     //   is likely to be able to elide the jump.
2248     // The enforcement of hinting in rpo order is required because hint
2249     // resolution that happens later in the compiler pipeline visits
2250     // instructions in reverse rpo order, relying on the fact that phis are
2251     // encountered before their hints.
2252     InstructionOperand* hint = nullptr;
2253     int hint_preference = 0;
2254 
2255     // The cost of hinting increases with the number of predecessors. At the
2256     // same time, the typical benefit decreases, since this hinting only
2257     // optimises the execution path through one predecessor. A limit of 2 is
2258     // sufficient to hit the common if/else pattern.
2259     int predecessor_limit = 2;
2260 
2261     for (RpoNumber predecessor : block->predecessors()) {
2262       const InstructionBlock* predecessor_block =
2263           code()->InstructionBlockAt(predecessor);
2264       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2265 
2266       // Only take hints from earlier rpo numbers.
2267       if (predecessor >= block->rpo_number()) continue;
2268 
2269       // Look up the predecessor instruction.
2270       const Instruction* predecessor_instr =
2271           GetLastInstruction(code(), predecessor_block);
2272       InstructionOperand* predecessor_hint = nullptr;
2273       // Phis are assigned in the END position of the last instruction in each
2274       // predecessor block.
2275       for (MoveOperands* move :
2276            *predecessor_instr->GetParallelMove(Instruction::END)) {
2277         InstructionOperand& to = move->destination();
2278         if (to.IsUnallocated() &&
2279             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2280           predecessor_hint = &move->source();
2281           break;
2282         }
2283       }
2284       DCHECK_NOT_NULL(predecessor_hint);
2285 
2286       // For each predecessor, generate a score according to the priorities
2287       // described above, and pick the best one. Flags in higher-order bits have
2288       // a higher priority than those in lower-order bits.
2289       int predecessor_hint_preference = 0;
2290       const int kNotDeferredBlockPreference = (1 << 2);
2291       const int kMoveIsAllocatedPreference = (1 << 1);
2292       const int kBlockIsEmptyPreference = (1 << 0);
2293 
2294       // - Avoid hints from deferred blocks.
2295       if (!predecessor_block->IsDeferred()) {
2296         predecessor_hint_preference |= kNotDeferredBlockPreference;
2297       }
2298 
2299       // - Prefer hints from allocated (or explicit) operands.
2300       //
2301       // Already-allocated or explicit operands are typically assigned using
2302       // the parallel moves on the last instruction. For example:
2303       //
2304       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2305       //      ArchJmp
2306       //    ...
2307       //    phi: v100 = v101 v102
2308       //
2309       // We have already found the END move, so look for a matching START move
2310       // from an allocated (or explicit) operand.
2311       //
2312       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2313       // because the live ranges are still being built when this function is
2314       // called.
2315       // TODO(v8): Find a way to separate hinting from live range analysis in
2316       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2317       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2318       if (moves != nullptr) {
2319         for (MoveOperands* move : *moves) {
2320           InstructionOperand& to = move->destination();
2321           if (predecessor_hint->Equals(to)) {
2322             if (move->source().IsAllocated() || move->source().IsExplicit()) {
2323               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2324             }
2325             break;
2326           }
2327         }
2328       }
2329 
2330       // - Prefer hints from empty blocks.
2331       if (predecessor_block->last_instruction_index() ==
2332           predecessor_block->first_instruction_index()) {
2333         predecessor_hint_preference |= kBlockIsEmptyPreference;
2334       }
2335 
2336       if ((hint == nullptr) ||
2337           (predecessor_hint_preference > hint_preference)) {
2338         // Take the hint from this predecessor.
2339         hint = predecessor_hint;
2340         hint_preference = predecessor_hint_preference;
2341       }
2342 
2343       if (--predecessor_limit <= 0) break;
2344     }
2345     DCHECK_NOT_NULL(hint);
2346 
2347     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2348         block->first_instruction_index());
2349     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2350                                   UsePosition::HintTypeForOperand(*hint));
2351     MapPhiHint(hint, use_pos);
2352   }
2353 }
2354 
2355 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2356 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2357                                          BitVector* live) {
2358   DCHECK(block->IsLoopHeader());
2359   // Add a live range stretching from the first loop instruction to the last
2360   // for each value live on entry to the header.
2361   BitVector::Iterator iterator(live);
2362   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2363       block->first_instruction_index());
2364   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2365                              code()->LastLoopInstructionIndex(block))
2366                              .NextFullStart();
2367   while (!iterator.Done()) {
2368     int operand_index = iterator.Current();
2369     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2370     range->EnsureInterval(start, end, allocation_zone());
2371     iterator.Advance();
2372   }
2373   // Insert all values into the live in sets of all blocks in the loop.
2374   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2375        ++i) {
2376     live_in_sets()[i]->Union(*live);
2377   }
2378 }
2379 
2380 
BuildLiveRanges()2381 void LiveRangeBuilder::BuildLiveRanges() {
2382   // Process the blocks in reverse order.
2383   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2384        --block_id) {
2385     InstructionBlock* block =
2386         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2387     BitVector* live = ComputeLiveOut(block, data());
2388     // Initially consider all live_out values live for the entire block. We
2389     // will shorten these intervals if necessary.
2390     AddInitialIntervals(block, live);
2391     // Process the instructions in reverse order, generating and killing
2392     // live values.
2393     ProcessInstructions(block, live);
2394     // All phi output operands are killed by this block.
2395     ProcessPhis(block, live);
2396     // Now live is live_in for this block except not including values live
2397     // out on backward successor edges.
2398     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2399     live_in_sets()[block_id] = live;
2400   }
2401   // Postprocess the ranges.
2402   for (TopLevelLiveRange* range : data()->live_ranges()) {
2403     if (range == nullptr) continue;
2404     // Give slots to all ranges with a non fixed slot use.
2405     if (range->has_slot_use() && range->HasNoSpillType()) {
2406       data()->AssignSpillRangeToLiveRange(range);
2407     }
2408     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2409     // live ranges, every use requires the constant to be in a register.
2410     // Without this hack, all uses with "any" policy would get the constant
2411     // operand assigned.
2412     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2413       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2414            pos = pos->next()) {
2415         if (pos->type() == UsePositionType::kRequiresSlot) continue;
2416         UsePositionType new_type = UsePositionType::kAny;
2417         // Can't mark phis as needing a register.
2418         if (!pos->pos().IsGapPosition()) {
2419           new_type = UsePositionType::kRequiresRegister;
2420         }
2421         pos->set_type(new_type, true);
2422       }
2423     }
2424   }
2425   for (auto preassigned : data()->preassigned_slot_ranges()) {
2426     TopLevelLiveRange* range = preassigned.first;
2427     int slot_id = preassigned.second;
2428     SpillRange* spill = range->HasSpillRange()
2429                             ? range->GetSpillRange()
2430                             : data()->AssignSpillRangeToLiveRange(range);
2431     spill->set_assigned_slot(slot_id);
2432   }
2433 #ifdef DEBUG
2434   Verify();
2435 #endif
2436 }
2437 
2438 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2439 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2440                                   UsePosition* use_pos) {
2441   DCHECK(!use_pos->IsResolved());
2442   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2443   DCHECK(res.second);
2444   USE(res);
2445 }
2446 
2447 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2448 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2449                                       UsePosition* use_pos) {
2450   auto it = phi_hints_.find(operand);
2451   if (it == phi_hints_.end()) return;
2452   DCHECK(!it->second->IsResolved());
2453   it->second->ResolveHint(use_pos);
2454 }
2455 
2456 
Verify() const2457 void LiveRangeBuilder::Verify() const {
2458   for (auto& hint : phi_hints_) {
2459     CHECK(hint.second->IsResolved());
2460   }
2461   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2462     if (current != nullptr && !current->IsEmpty()) {
2463       // New LiveRanges should not be split.
2464       CHECK_NULL(current->next());
2465       // General integrity check.
2466       current->Verify();
2467       const UseInterval* first = current->first_interval();
2468       if (first->next() == nullptr) continue;
2469 
2470       // Consecutive intervals should not end and start in the same block,
2471       // otherwise the intervals should have been joined, because the
2472       // variable is live throughout that block.
2473       CHECK(NextIntervalStartsInDifferentBlocks(first));
2474 
2475       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2476         // Except for the first interval, the other intevals must start at
2477         // a block boundary, otherwise data wouldn't flow to them.
2478         CHECK(IntervalStartsAtBlockBoundary(i));
2479         // The last instruction of the predecessors of the block the interval
2480         // starts must be covered by the range.
2481         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2482         if (i->next() != nullptr) {
2483           // Check the consecutive intervals property, except for the last
2484           // interval, where it doesn't apply.
2485           CHECK(NextIntervalStartsInDifferentBlocks(i));
2486         }
2487       }
2488     }
2489   }
2490 }
2491 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2492 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2493     const UseInterval* interval) const {
2494   LifetimePosition start = interval->start();
2495   if (!start.IsFullStart()) return false;
2496   int instruction_index = start.ToInstructionIndex();
2497   const InstructionBlock* block =
2498       data()->code()->GetInstructionBlock(instruction_index);
2499   return block->first_instruction_index() == instruction_index;
2500 }
2501 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2502 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2503     const UseInterval* interval, const TopLevelLiveRange* range) const {
2504   LifetimePosition start = interval->start();
2505   int instruction_index = start.ToInstructionIndex();
2506   const InstructionBlock* block =
2507       data()->code()->GetInstructionBlock(instruction_index);
2508   for (RpoNumber pred_index : block->predecessors()) {
2509     const InstructionBlock* predecessor =
2510         data()->code()->InstructionBlockAt(pred_index);
2511     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2512         predecessor->last_instruction_index());
2513     last_pos = last_pos.NextStart().End();
2514     if (!range->Covers(last_pos)) return false;
2515   }
2516   return true;
2517 }
2518 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2519 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2520     const UseInterval* interval) const {
2521   DCHECK_NOT_NULL(interval->next());
2522   LifetimePosition end = interval->end();
2523   LifetimePosition next_start = interval->next()->start();
2524   // Since end is not covered, but the previous position is, move back a
2525   // position
2526   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2527   int last_covered_index = end.ToInstructionIndex();
2528   const InstructionBlock* block =
2529       data()->code()->GetInstructionBlock(last_covered_index);
2530   const InstructionBlock* next_block =
2531       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2532   return block->rpo_number() < next_block->rpo_number();
2533 }
2534 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2535 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2536                                      RegisterKind kind)
2537     : data_(data),
2538       mode_(kind),
2539       num_registers_(GetRegisterCount(data->config(), kind)),
2540       num_allocatable_registers_(
2541           GetAllocatableRegisterCount(data->config(), kind)),
2542       allocatable_register_codes_(
2543           GetAllocatableRegisterCodes(data->config(), kind)),
2544       check_fp_aliasing_(false) {
2545   if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
2546     check_fp_aliasing_ = (data->code()->representation_mask() &
2547                           (kFloatRepBit | kSimd128RepBit)) != 0;
2548   }
2549 }
2550 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2551 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2552     const LiveRange* range, int instruction_index) {
2553   LifetimePosition ret = LifetimePosition::Invalid();
2554 
2555   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2556   if (range->Start() >= ret || ret >= range->End()) {
2557     return LifetimePosition::Invalid();
2558   }
2559   return ret;
2560 }
2561 
SplitAndSpillRangesDefinedByMemoryOperand()2562 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2563   size_t initial_range_count = data()->live_ranges().size();
2564   for (size_t i = 0; i < initial_range_count; ++i) {
2565     TopLevelLiveRange* range = data()->live_ranges()[i];
2566     if (!CanProcessRange(range)) continue;
2567     if (range->HasNoSpillType() ||
2568         (range->HasSpillRange() && !range->has_slot_use())) {
2569       continue;
2570     }
2571     LifetimePosition start = range->Start();
2572     TRACE("Live range %d:%d is defined by a spill operand.\n",
2573           range->TopLevel()->vreg(), range->relative_id());
2574     LifetimePosition next_pos = start;
2575     if (next_pos.IsGapPosition()) {
2576       next_pos = next_pos.NextStart();
2577     }
2578 
2579     // With splinters, we can be more strict and skip over positions
2580     // not strictly needing registers.
2581     UsePosition* pos =
2582         range->IsSplinter()
2583             ? range->NextRegisterPosition(next_pos)
2584             : range->NextUsePositionRegisterIsBeneficial(next_pos);
2585     // If the range already has a spill operand and it doesn't need a
2586     // register immediately, split it and spill the first part of the range.
2587     if (pos == nullptr) {
2588       Spill(range);
2589     } else if (pos->pos() > range->Start().NextStart()) {
2590       // Do not spill live range eagerly if use position that can benefit from
2591       // the register is too close to the start of live range.
2592       LifetimePosition split_pos = GetSplitPositionForInstruction(
2593           range, pos->pos().ToInstructionIndex());
2594       // There is no place to split, so we can't split and spill.
2595       if (!split_pos.IsValid()) continue;
2596 
2597       split_pos =
2598           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2599 
2600       SplitRangeAt(range, split_pos);
2601       Spill(range);
2602     }
2603   }
2604 }
2605 
2606 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2607 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2608                                            LifetimePosition pos) {
2609   DCHECK(!range->TopLevel()->IsFixed());
2610   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2611         range->relative_id(), pos.value());
2612 
2613   if (pos <= range->Start()) return range;
2614 
2615   // We can't properly connect liveranges if splitting occurred at the end
2616   // a block.
2617   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2618          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2619           pos.ToInstructionIndex()));
2620 
2621   LiveRange* result = range->SplitAt(pos, allocation_zone());
2622   return result;
2623 }
2624 
2625 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2626 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2627                                            LifetimePosition start,
2628                                            LifetimePosition end) {
2629   DCHECK(!range->TopLevel()->IsFixed());
2630   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2631         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2632         end.value());
2633 
2634   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2635   DCHECK(split_pos >= start);
2636   return SplitRangeAt(range, split_pos);
2637 }
2638 
2639 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2640 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2641                                                         LifetimePosition end) {
2642   int start_instr = start.ToInstructionIndex();
2643   int end_instr = end.ToInstructionIndex();
2644   DCHECK(start_instr <= end_instr);
2645 
2646   // We have no choice
2647   if (start_instr == end_instr) return end;
2648 
2649   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2650   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2651 
2652   if (end_block == start_block) {
2653     // The interval is split in the same basic block. Split at the latest
2654     // possible position.
2655     return end;
2656   }
2657 
2658   const InstructionBlock* block = end_block;
2659   // Find header of outermost loop.
2660   do {
2661     const InstructionBlock* loop = GetContainingLoop(code(), block);
2662     if (loop == nullptr ||
2663         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2664       // No more loops or loop starts before the lifetime start.
2665       break;
2666     }
2667     block = loop;
2668   } while (true);
2669 
2670   // We did not find any suitable outer loop. Split at the latest possible
2671   // position unless end_block is a loop header itself.
2672   if (block == end_block && !end_block->IsLoopHeader()) return end;
2673 
2674   return LifetimePosition::GapFromInstructionIndex(
2675       block->first_instruction_index());
2676 }
2677 
2678 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos)2679 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2680     LiveRange* range, LifetimePosition pos) {
2681   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2682   const InstructionBlock* loop_header =
2683       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2684 
2685   if (loop_header == nullptr) return pos;
2686 
2687   const UsePosition* prev_use =
2688       range->PreviousUsePositionRegisterIsBeneficial(pos);
2689 
2690   while (loop_header != nullptr) {
2691     // We are going to spill live range inside the loop.
2692     // If possible try to move spilling position backwards to loop header.
2693     // This will reduce number of memory moves on the back edge.
2694     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2695         loop_header->first_instruction_index());
2696 
2697     if (range->Covers(loop_start)) {
2698       if (prev_use == nullptr || prev_use->pos() < loop_start) {
2699         // No register beneficial use inside the loop before the pos.
2700         pos = loop_start;
2701       }
2702     }
2703 
2704     // Try hoisting out to an outer loop.
2705     loop_header = GetContainingLoop(code(), loop_header);
2706   }
2707 
2708   return pos;
2709 }
2710 
2711 
Spill(LiveRange * range)2712 void RegisterAllocator::Spill(LiveRange* range) {
2713   DCHECK(!range->spilled());
2714   TopLevelLiveRange* first = range->TopLevel();
2715   TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2716 
2717   if (first->HasNoSpillType()) {
2718     data()->AssignSpillRangeToLiveRange(first);
2719   }
2720   range->Spill();
2721 }
2722 
RegisterName(int register_code) const2723 const char* RegisterAllocator::RegisterName(int register_code) const {
2724   if (mode() == GENERAL_REGISTERS) {
2725     return data()->config()->GetGeneralRegisterName(register_code);
2726   } else {
2727     return data()->config()->GetDoubleRegisterName(register_code);
2728   }
2729 }
2730 
2731 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2732 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2733                                          RegisterKind kind, Zone* local_zone)
2734     : RegisterAllocator(data, kind),
2735       unhandled_live_ranges_(local_zone),
2736       active_live_ranges_(local_zone),
2737       inactive_live_ranges_(local_zone) {
2738   unhandled_live_ranges().reserve(
2739       static_cast<size_t>(code()->VirtualRegisterCount() * 2));
2740   active_live_ranges().reserve(8);
2741   inactive_live_ranges().reserve(8);
2742   // TryAllocateFreeReg and AllocateBlockedReg assume this
2743   // when allocating local arrays.
2744   DCHECK(RegisterConfiguration::kMaxFPRegisters >=
2745          this->data()->config()->num_general_registers());
2746 }
2747 
2748 
AllocateRegisters()2749 void LinearScanAllocator::AllocateRegisters() {
2750   DCHECK(unhandled_live_ranges().empty());
2751   DCHECK(active_live_ranges().empty());
2752   DCHECK(inactive_live_ranges().empty());
2753 
2754   SplitAndSpillRangesDefinedByMemoryOperand();
2755 
2756   for (TopLevelLiveRange* range : data()->live_ranges()) {
2757     if (!CanProcessRange(range)) continue;
2758     for (LiveRange* to_add = range; to_add != nullptr;
2759          to_add = to_add->next()) {
2760       if (!to_add->spilled()) {
2761         AddToUnhandledUnsorted(to_add);
2762       }
2763     }
2764   }
2765   SortUnhandled();
2766   DCHECK(UnhandledIsSorted());
2767 
2768   if (mode() == GENERAL_REGISTERS) {
2769     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
2770       if (current != nullptr) AddToInactive(current);
2771     }
2772   } else {
2773     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
2774       if (current != nullptr) AddToInactive(current);
2775     }
2776     if (!kSimpleFPAliasing && check_fp_aliasing()) {
2777       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
2778         if (current != nullptr) AddToInactive(current);
2779       }
2780       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
2781         if (current != nullptr) AddToInactive(current);
2782       }
2783     }
2784   }
2785 
2786   while (!unhandled_live_ranges().empty()) {
2787     DCHECK(UnhandledIsSorted());
2788     LiveRange* current = unhandled_live_ranges().back();
2789     unhandled_live_ranges().pop_back();
2790     DCHECK(UnhandledIsSorted());
2791     LifetimePosition position = current->Start();
2792 #ifdef DEBUG
2793     allocation_finger_ = position;
2794 #endif
2795     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2796           current->relative_id(), position.value());
2797 
2798     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2799       continue;
2800 
2801     for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2802       LiveRange* cur_active = active_live_ranges()[i];
2803       if (cur_active->End() <= position) {
2804         ActiveToHandled(cur_active);
2805         --i;  // The live range was removed from the list of active live ranges.
2806       } else if (!cur_active->Covers(position)) {
2807         ActiveToInactive(cur_active);
2808         --i;  // The live range was removed from the list of active live ranges.
2809       }
2810     }
2811 
2812     for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2813       LiveRange* cur_inactive = inactive_live_ranges()[i];
2814       if (cur_inactive->End() <= position) {
2815         InactiveToHandled(cur_inactive);
2816         --i;  // Live range was removed from the list of inactive live ranges.
2817       } else if (cur_inactive->Covers(position)) {
2818         InactiveToActive(cur_inactive);
2819         --i;  // Live range was removed from the list of inactive live ranges.
2820       }
2821     }
2822 
2823     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2824 
2825     ProcessCurrentRange(current);
2826   }
2827 }
2828 
TrySplitAndSpillSplinter(LiveRange * range)2829 bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
2830   DCHECK(range->TopLevel()->IsSplinter());
2831   // If we can spill the whole range, great. Otherwise, split above the
2832   // first use needing a register and spill the top part.
2833   const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
2834   if (next_reg == nullptr) {
2835     Spill(range);
2836     return true;
2837   } else if (range->FirstHintPosition() == nullptr) {
2838     // If there was no hint, but we have a use position requiring a
2839     // register, apply the hot path heuristics.
2840     return false;
2841   } else if (next_reg->pos().PrevStart() > range->Start()) {
2842     LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
2843     AddToUnhandledSorted(tail);
2844     Spill(range);
2845     return true;
2846   }
2847   return false;
2848 }
2849 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)2850 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2851                                                        int reg) {
2852   data()->MarkAllocated(range->representation(), reg);
2853   range->set_assigned_register(reg);
2854   range->SetUseHints(reg);
2855   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2856     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2857   }
2858 }
2859 
2860 
AddToActive(LiveRange * range)2861 void LinearScanAllocator::AddToActive(LiveRange* range) {
2862   TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2863         range->relative_id());
2864   active_live_ranges().push_back(range);
2865 }
2866 
2867 
AddToInactive(LiveRange * range)2868 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2869   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2870         range->relative_id());
2871   inactive_live_ranges().push_back(range);
2872 }
2873 
2874 
AddToUnhandledSorted(LiveRange * range)2875 void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
2876   if (range == nullptr || range->IsEmpty()) return;
2877   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2878   DCHECK(allocation_finger_ <= range->Start());
2879   for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2880        --i) {
2881     LiveRange* cur_range = unhandled_live_ranges().at(i);
2882     if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2883     TRACE("Add live range %d:%d to unhandled at %d\n",
2884           range->TopLevel()->vreg(), range->relative_id(), i + 1);
2885     auto it = unhandled_live_ranges().begin() + (i + 1);
2886     unhandled_live_ranges().insert(it, range);
2887     DCHECK(UnhandledIsSorted());
2888     return;
2889   }
2890   TRACE("Add live range %d:%d to unhandled at start\n",
2891         range->TopLevel()->vreg(), range->relative_id());
2892   unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2893   DCHECK(UnhandledIsSorted());
2894 }
2895 
2896 
AddToUnhandledUnsorted(LiveRange * range)2897 void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2898   if (range == nullptr || range->IsEmpty()) return;
2899   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2900   TRACE("Add live range %d:%d to unhandled unsorted at end\n",
2901         range->TopLevel()->vreg(), range->relative_id());
2902   unhandled_live_ranges().push_back(range);
2903 }
2904 
2905 
UnhandledSortHelper(LiveRange * a,LiveRange * b)2906 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2907   DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2908   if (a->ShouldBeAllocatedBefore(b)) return false;
2909   if (b->ShouldBeAllocatedBefore(a)) return true;
2910   return a->TopLevel()->vreg() < b->TopLevel()->vreg();
2911 }
2912 
2913 
2914 // Sort the unhandled live ranges so that the ranges to be processed first are
2915 // at the end of the array list.  This is convenient for the register allocation
2916 // algorithm because it is efficient to remove elements from the end.
SortUnhandled()2917 void LinearScanAllocator::SortUnhandled() {
2918   TRACE("Sort unhandled\n");
2919   std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2920             &UnhandledSortHelper);
2921 }
2922 
2923 
UnhandledIsSorted()2924 bool LinearScanAllocator::UnhandledIsSorted() {
2925   size_t len = unhandled_live_ranges().size();
2926   for (size_t i = 1; i < len; i++) {
2927     LiveRange* a = unhandled_live_ranges().at(i - 1);
2928     LiveRange* b = unhandled_live_ranges().at(i);
2929     if (a->Start() < b->Start()) return false;
2930   }
2931   return true;
2932 }
2933 
2934 
ActiveToHandled(LiveRange * range)2935 void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
2936   RemoveElement(&active_live_ranges(), range);
2937   TRACE("Moving live range %d:%d from active to handled\n",
2938         range->TopLevel()->vreg(), range->relative_id());
2939 }
2940 
2941 
ActiveToInactive(LiveRange * range)2942 void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
2943   RemoveElement(&active_live_ranges(), range);
2944   inactive_live_ranges().push_back(range);
2945   TRACE("Moving live range %d:%d from active to inactive\n",
2946         range->TopLevel()->vreg(), range->relative_id());
2947 }
2948 
2949 
InactiveToHandled(LiveRange * range)2950 void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
2951   RemoveElement(&inactive_live_ranges(), range);
2952   TRACE("Moving live range %d:%d from inactive to handled\n",
2953         range->TopLevel()->vreg(), range->relative_id());
2954 }
2955 
2956 
InactiveToActive(LiveRange * range)2957 void LinearScanAllocator::InactiveToActive(LiveRange* range) {
2958   RemoveElement(&inactive_live_ranges(), range);
2959   active_live_ranges().push_back(range);
2960   TRACE("Moving live range %d:%d from inactive to active\n",
2961         range->TopLevel()->vreg(), range->relative_id());
2962 }
2963 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const2964 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
2965                                            int* num_regs, int* num_codes,
2966                                            const int** codes) const {
2967   DCHECK(!kSimpleFPAliasing);
2968   if (rep == MachineRepresentation::kFloat32) {
2969     *num_regs = data()->config()->num_float_registers();
2970     *num_codes = data()->config()->num_allocatable_float_registers();
2971     *codes = data()->config()->allocatable_float_codes();
2972   } else if (rep == MachineRepresentation::kSimd128) {
2973     *num_regs = data()->config()->num_simd128_registers();
2974     *num_codes = data()->config()->num_allocatable_simd128_registers();
2975     *codes = data()->config()->allocatable_simd128_codes();
2976   } else {
2977     UNREACHABLE();
2978   }
2979 }
2980 
FindFreeRegistersForRange(LiveRange * range,Vector<LifetimePosition> positions)2981 void LinearScanAllocator::FindFreeRegistersForRange(
2982     LiveRange* range, Vector<LifetimePosition> positions) {
2983   int num_regs = num_registers();
2984   int num_codes = num_allocatable_registers();
2985   const int* codes = allocatable_register_codes();
2986   MachineRepresentation rep = range->representation();
2987   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
2988                              rep == MachineRepresentation::kSimd128))
2989     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
2990   DCHECK_GE(positions.length(), num_regs);
2991 
2992   for (int i = 0; i < num_regs; ++i) {
2993     positions[i] = LifetimePosition::MaxPosition();
2994   }
2995 
2996   for (LiveRange* cur_active : active_live_ranges()) {
2997     int cur_reg = cur_active->assigned_register();
2998     if (kSimpleFPAliasing || !check_fp_aliasing()) {
2999       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
3000       TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
3001             LifetimePosition::GapFromInstructionIndex(0).value());
3002     } else {
3003       int alias_base_index = -1;
3004       int aliases = data()->config()->GetAliases(
3005           cur_active->representation(), cur_reg, rep, &alias_base_index);
3006       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3007       while (aliases--) {
3008         int aliased_reg = alias_base_index + aliases;
3009         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
3010       }
3011     }
3012   }
3013 
3014   for (LiveRange* cur_inactive : inactive_live_ranges()) {
3015     DCHECK(cur_inactive->End() > range->Start());
3016     int cur_reg = cur_inactive->assigned_register();
3017     // No need to carry out intersections, when this register won't be
3018     // interesting to this range anyway.
3019     // TODO(mtrofin): extend to aliased ranges, too.
3020     if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
3021         positions[cur_reg] < range->Start()) {
3022       continue;
3023     }
3024 
3025     LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
3026     if (!next_intersection.IsValid()) continue;
3027     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3028       positions[cur_reg] = Min(positions[cur_reg], next_intersection);
3029       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
3030             Min(positions[cur_reg], next_intersection).value());
3031     } else {
3032       int alias_base_index = -1;
3033       int aliases = data()->config()->GetAliases(
3034           cur_inactive->representation(), cur_reg, rep, &alias_base_index);
3035       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3036       while (aliases--) {
3037         int aliased_reg = alias_base_index + aliases;
3038         positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
3039       }
3040     }
3041   }
3042 }
3043 
3044 // High-level register allocation summary:
3045 //
3046 // For regular, or hot (i.e. not splinter) ranges, we attempt to first
3047 // allocate first the preferred (hint) register. If that is not possible,
3048 // we find a register that's free, and allocate that. If that's not possible,
3049 // we search for a register to steal from a range that was allocated. The
3050 // goal is to optimize for throughput by avoiding register-to-memory
3051 // moves, which are expensive.
3052 //
3053 // For splinters, the goal is to minimize the number of moves. First we try
3054 // to allocate the preferred register (more discussion follows). Failing that,
3055 // we bail out and spill as far as we can, unless the first use is at start,
3056 // case in which we apply the same behavior as we do for regular ranges.
3057 // If there is no hint, we apply the hot-path behavior.
3058 //
3059 // For the splinter, the hint register may come from:
3060 //
3061 // - the hot path (we set it at splintering time with SetHint). In this case, if
3062 // we cannot offer the hint register, spilling is better because it's at most
3063 // 1 move, while trying to find and offer another register is at least 1 move.
3064 //
3065 // - a constraint. If we cannot offer that register, it's because  there is some
3066 // interference. So offering the hint register up to the interference would
3067 // result
3068 // in a move at the interference, plus a move to satisfy the constraint. This is
3069 // also the number of moves if we spill, with the potential of the range being
3070 // already spilled and thus saving a move (the spill).
3071 // Note that this can only be an input constraint, if it were an output one,
3072 // the range wouldn't be a splinter because it means it'd be defined in a
3073 // deferred
3074 // block, and we don't mark those as splinters (they live in deferred blocks
3075 // only).
3076 //
3077 // - a phi. The same analysis as in the case of the input constraint applies.
3078 //
ProcessCurrentRange(LiveRange * current)3079 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
3080   LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
3081   Vector<LifetimePosition> free_until_pos(
3082       free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
3083   FindFreeRegistersForRange(current, free_until_pos);
3084   if (!TryAllocatePreferredReg(current, free_until_pos)) {
3085     if (current->TopLevel()->IsSplinter()) {
3086       if (TrySplitAndSpillSplinter(current)) return;
3087     }
3088     if (!TryAllocateFreeReg(current, free_until_pos)) {
3089       AllocateBlockedReg(current);
3090     }
3091   }
3092   if (current->HasRegisterAssigned()) {
3093     AddToActive(current);
3094   }
3095 }
3096 
TryAllocatePreferredReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3097 bool LinearScanAllocator::TryAllocatePreferredReg(
3098     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3099   int hint_register;
3100   if (current->FirstHintPosition(&hint_register) != nullptr) {
3101     TRACE(
3102         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
3103         RegisterName(hint_register), free_until_pos[hint_register].value(),
3104         current->TopLevel()->vreg(), current->relative_id(),
3105         current->End().value());
3106 
3107     // The desired register is free until the end of the current live range.
3108     if (free_until_pos[hint_register] >= current->End()) {
3109       TRACE("Assigning preferred reg %s to live range %d:%d\n",
3110             RegisterName(hint_register), current->TopLevel()->vreg(),
3111             current->relative_id());
3112       SetLiveRangeAssignedRegister(current, hint_register);
3113       return true;
3114     }
3115   }
3116   return false;
3117 }
3118 
TryAllocateFreeReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3119 bool LinearScanAllocator::TryAllocateFreeReg(
3120     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3121   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
3122   int num_codes = num_allocatable_registers();
3123   const int* codes = allocatable_register_codes();
3124   MachineRepresentation rep = current->representation();
3125   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3126                              rep == MachineRepresentation::kSimd128)) {
3127     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3128   }
3129 
3130   DCHECK_GE(free_until_pos.length(), num_codes);
3131 
3132   // Find the register which stays free for the longest time.
3133   int reg = codes[0];
3134   for (int i = 1; i < num_codes; ++i) {
3135     int code = codes[i];
3136     if (free_until_pos[code] > free_until_pos[reg]) {
3137       reg = code;
3138     }
3139   }
3140 
3141   LifetimePosition pos = free_until_pos[reg];
3142 
3143   if (pos <= current->Start()) {
3144     // All registers are blocked.
3145     return false;
3146   }
3147 
3148   if (pos < current->End()) {
3149     // Register reg is available at the range start but becomes blocked before
3150     // the range end. Split current at position where it becomes blocked.
3151     LiveRange* tail = SplitRangeAt(current, pos);
3152     AddToUnhandledSorted(tail);
3153   }
3154 
3155   // Register reg is available at the range start and is free until the range
3156   // end.
3157   DCHECK(pos >= current->End());
3158   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
3159         current->TopLevel()->vreg(), current->relative_id());
3160   SetLiveRangeAssignedRegister(current, reg);
3161 
3162   return true;
3163 }
3164 
AllocateBlockedReg(LiveRange * current)3165 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
3166   UsePosition* register_use = current->NextRegisterPosition(current->Start());
3167   if (register_use == nullptr) {
3168     // There is no use in the current live range that requires a register.
3169     // We can just spill it.
3170     Spill(current);
3171     return;
3172   }
3173 
3174   int num_regs = num_registers();
3175   int num_codes = num_allocatable_registers();
3176   const int* codes = allocatable_register_codes();
3177   MachineRepresentation rep = current->representation();
3178   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3179                              rep == MachineRepresentation::kSimd128))
3180     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3181 
3182   // use_pos keeps track of positions a register/alias is used at.
3183   // block_pos keeps track of positions where a register/alias is blocked
3184   // from.
3185   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
3186   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
3187   for (int i = 0; i < num_regs; i++) {
3188     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
3189   }
3190 
3191   for (LiveRange* range : active_live_ranges()) {
3192     int cur_reg = range->assigned_register();
3193     bool is_fixed_or_cant_spill =
3194         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
3195     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3196       if (is_fixed_or_cant_spill) {
3197         block_pos[cur_reg] = use_pos[cur_reg] =
3198             LifetimePosition::GapFromInstructionIndex(0);
3199       } else {
3200         DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
3201                   block_pos[cur_reg]);
3202         use_pos[cur_reg] =
3203             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
3204       }
3205     } else {
3206       int alias_base_index = -1;
3207       int aliases = data()->config()->GetAliases(
3208           range->representation(), cur_reg, rep, &alias_base_index);
3209       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3210       while (aliases--) {
3211         int aliased_reg = alias_base_index + aliases;
3212         if (is_fixed_or_cant_spill) {
3213           block_pos[aliased_reg] = use_pos[aliased_reg] =
3214               LifetimePosition::GapFromInstructionIndex(0);
3215         } else {
3216           use_pos[aliased_reg] =
3217               Min(block_pos[aliased_reg],
3218                   range->NextLifetimePositionRegisterIsBeneficial(
3219                       current->Start()));
3220         }
3221       }
3222     }
3223   }
3224 
3225   for (LiveRange* range : inactive_live_ranges()) {
3226     DCHECK(range->End() > current->Start());
3227     int cur_reg = range->assigned_register();
3228     bool is_fixed = range->TopLevel()->IsFixed();
3229 
3230     // Don't perform costly intersections if they are guaranteed to not update
3231     // block_pos or use_pos.
3232     // TODO(mtrofin): extend to aliased ranges, too.
3233     if ((kSimpleFPAliasing || !check_fp_aliasing())) {
3234       if (is_fixed) {
3235         if (block_pos[cur_reg] < range->Start()) continue;
3236       } else {
3237         if (use_pos[cur_reg] < range->Start()) continue;
3238       }
3239     }
3240 
3241     LifetimePosition next_intersection = range->FirstIntersection(current);
3242     if (!next_intersection.IsValid()) continue;
3243 
3244     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3245       if (is_fixed) {
3246         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
3247         use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
3248       } else {
3249         use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
3250       }
3251     } else {
3252       int alias_base_index = -1;
3253       int aliases = data()->config()->GetAliases(
3254           range->representation(), cur_reg, rep, &alias_base_index);
3255       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3256       while (aliases--) {
3257         int aliased_reg = alias_base_index + aliases;
3258         if (is_fixed) {
3259           block_pos[aliased_reg] =
3260               Min(block_pos[aliased_reg], next_intersection);
3261           use_pos[aliased_reg] =
3262               Min(block_pos[aliased_reg], use_pos[aliased_reg]);
3263         } else {
3264           use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
3265         }
3266       }
3267     }
3268   }
3269 
3270   int reg = codes[0];
3271   for (int i = 1; i < num_codes; ++i) {
3272     int code = codes[i];
3273     if (use_pos[code] > use_pos[reg]) {
3274       reg = code;
3275     }
3276   }
3277 
3278   if (use_pos[reg] < register_use->pos()) {
3279     // If there is a gap position before the next register use, we can
3280     // spill until there. The gap position will then fit the fill move.
3281     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
3282                                                    register_use->pos())) {
3283       SpillBetween(current, current->Start(), register_use->pos());
3284       return;
3285     }
3286   }
3287 
3288   // We couldn't spill until the next register use. Split before the register
3289   // is blocked, if applicable.
3290   if (block_pos[reg] < current->End()) {
3291     // Register becomes blocked before the current range end. Split before that
3292     // position.
3293     LiveRange* tail =
3294         SplitBetween(current, current->Start(), block_pos[reg].Start());
3295     AddToUnhandledSorted(tail);
3296   }
3297 
3298   // Register reg is not blocked for the whole range.
3299   DCHECK(block_pos[reg] >= current->End());
3300   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
3301         current->TopLevel()->vreg(), current->relative_id());
3302   SetLiveRangeAssignedRegister(current, reg);
3303 
3304   // This register was not free. Thus we need to find and spill
3305   // parts of active and inactive live regions that use the same register
3306   // at the same lifetime positions as current.
3307   SplitAndSpillIntersecting(current);
3308 }
3309 
3310 
SplitAndSpillIntersecting(LiveRange * current)3311 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
3312   DCHECK(current->HasRegisterAssigned());
3313   int reg = current->assigned_register();
3314   LifetimePosition split_pos = current->Start();
3315   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
3316     LiveRange* range = active_live_ranges()[i];
3317     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3318       if (range->assigned_register() != reg) continue;
3319     } else {
3320       if (!data()->config()->AreAliases(current->representation(), reg,
3321                                         range->representation(),
3322                                         range->assigned_register())) {
3323         continue;
3324       }
3325     }
3326 
3327     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3328     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
3329     if (next_pos == nullptr) {
3330       SpillAfter(range, spill_pos);
3331     } else {
3332       // When spilling between spill_pos and next_pos ensure that the range
3333       // remains spilled at least until the start of the current live range.
3334       // This guarantees that we will not introduce new unhandled ranges that
3335       // start before the current range as this violates allocation invariants
3336       // and will lead to an inconsistent state of active and inactive
3337       // live-ranges: ranges are allocated in order of their start positions,
3338       // ranges are retired from active/inactive when the start of the
3339       // current live-range is larger than their end.
3340       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
3341                                                         next_pos->pos()));
3342       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
3343     }
3344     ActiveToHandled(range);
3345     --i;
3346   }
3347 
3348   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
3349     LiveRange* range = inactive_live_ranges()[i];
3350     DCHECK(range->End() > current->Start());
3351     if (range->TopLevel()->IsFixed()) continue;
3352     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3353       if (range->assigned_register() != reg) continue;
3354     } else {
3355       if (!data()->config()->AreAliases(current->representation(), reg,
3356                                         range->representation(),
3357                                         range->assigned_register()))
3358         continue;
3359     }
3360 
3361     LifetimePosition next_intersection = range->FirstIntersection(current);
3362     if (next_intersection.IsValid()) {
3363       UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3364       if (next_pos == nullptr) {
3365         SpillAfter(range, split_pos);
3366       } else {
3367         next_intersection = Min(next_intersection, next_pos->pos());
3368         SpillBetween(range, split_pos, next_intersection);
3369       }
3370       InactiveToHandled(range);
3371       --i;
3372     }
3373   }
3374 }
3375 
3376 
TryReuseSpillForPhi(TopLevelLiveRange * range)3377 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
3378   if (!range->is_phi()) return false;
3379 
3380   DCHECK(!range->HasSpillOperand());
3381   RegisterAllocationData::PhiMapValue* phi_map_value =
3382       data()->GetPhiMapValueFor(range);
3383   const PhiInstruction* phi = phi_map_value->phi();
3384   const InstructionBlock* block = phi_map_value->block();
3385   // Count the number of spilled operands.
3386   size_t spilled_count = 0;
3387   LiveRange* first_op = nullptr;
3388   for (size_t i = 0; i < phi->operands().size(); i++) {
3389     int op = phi->operands()[i];
3390     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
3391     if (!op_range->TopLevel()->HasSpillRange()) continue;
3392     const InstructionBlock* pred =
3393         code()->InstructionBlockAt(block->predecessors()[i]);
3394     LifetimePosition pred_end =
3395         LifetimePosition::InstructionFromInstructionIndex(
3396             pred->last_instruction_index());
3397     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
3398       op_range = op_range->next();
3399     }
3400     if (op_range != nullptr && op_range->spilled()) {
3401       spilled_count++;
3402       if (first_op == nullptr) {
3403         first_op = op_range->TopLevel();
3404       }
3405     }
3406   }
3407 
3408   // Only continue if more than half of the operands are spilled.
3409   if (spilled_count * 2 <= phi->operands().size()) {
3410     return false;
3411   }
3412 
3413   // Try to merge the spilled operands and count the number of merged spilled
3414   // operands.
3415   DCHECK(first_op != nullptr);
3416   SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
3417   size_t num_merged = 1;
3418   for (size_t i = 1; i < phi->operands().size(); i++) {
3419     int op = phi->operands()[i];
3420     TopLevelLiveRange* op_range = data()->live_ranges()[op];
3421     if (!op_range->HasSpillRange()) continue;
3422     SpillRange* op_spill = op_range->GetSpillRange();
3423     if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
3424       num_merged++;
3425     }
3426   }
3427 
3428   // Only continue if enough operands could be merged to the
3429   // same spill slot.
3430   if (num_merged * 2 <= phi->operands().size() ||
3431       AreUseIntervalsIntersecting(first_op_spill->interval(),
3432                                   range->first_interval())) {
3433     return false;
3434   }
3435 
3436   // If the range does not need register soon, spill it to the merged
3437   // spill range.
3438   LifetimePosition next_pos = range->Start();
3439   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3440   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
3441   if (pos == nullptr) {
3442     SpillRange* spill_range =
3443         range->TopLevel()->HasSpillRange()
3444             ? range->TopLevel()->GetSpillRange()
3445             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3446     bool merged = first_op_spill->TryMerge(spill_range);
3447     if (!merged) return false;
3448     Spill(range);
3449     return true;
3450   } else if (pos->pos() > range->Start().NextStart()) {
3451     SpillRange* spill_range =
3452         range->TopLevel()->HasSpillRange()
3453             ? range->TopLevel()->GetSpillRange()
3454             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3455     bool merged = first_op_spill->TryMerge(spill_range);
3456     if (!merged) return false;
3457     SpillBetween(range, range->Start(), pos->pos());
3458     DCHECK(UnhandledIsSorted());
3459     return true;
3460   }
3461   return false;
3462 }
3463 
3464 
SpillAfter(LiveRange * range,LifetimePosition pos)3465 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
3466   LiveRange* second_part = SplitRangeAt(range, pos);
3467   Spill(second_part);
3468 }
3469 
3470 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)3471 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
3472                                        LifetimePosition end) {
3473   SpillBetweenUntil(range, start, start, end);
3474 }
3475 
3476 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end)3477 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
3478                                             LifetimePosition start,
3479                                             LifetimePosition until,
3480                                             LifetimePosition end) {
3481   CHECK(start < end);
3482   LiveRange* second_part = SplitRangeAt(range, start);
3483 
3484   if (second_part->Start() < end) {
3485     // The split result intersects with [start, end[.
3486     // Split it at position between ]start+1, end[, spill the middle part
3487     // and put the rest to unhandled.
3488     LifetimePosition third_part_end = end.PrevStart().End();
3489     if (data()->IsBlockBoundary(end.Start())) {
3490       third_part_end = end.Start();
3491     }
3492     LiveRange* third_part = SplitBetween(
3493         second_part, Max(second_part->Start().End(), until), third_part_end);
3494 
3495     DCHECK(third_part != second_part);
3496 
3497     Spill(second_part);
3498     AddToUnhandledSorted(third_part);
3499   } else {
3500     // The split result does not intersect with [start, end[.
3501     // Nothing to spill. Just put it to unhandled as whole.
3502     AddToUnhandledSorted(second_part);
3503   }
3504 }
3505 
3506 
SpillSlotLocator(RegisterAllocationData * data)3507 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
3508     : data_(data) {}
3509 
3510 
LocateSpillSlots()3511 void SpillSlotLocator::LocateSpillSlots() {
3512   const InstructionSequence* code = data()->code();
3513   for (TopLevelLiveRange* range : data()->live_ranges()) {
3514     if (range == nullptr || range->IsEmpty()) continue;
3515     // We care only about ranges which spill in the frame.
3516     if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
3517       continue;
3518     }
3519     TopLevelLiveRange::SpillMoveInsertionList* spills =
3520         range->GetSpillMoveInsertionLocations();
3521     DCHECK_NOT_NULL(spills);
3522     for (; spills != nullptr; spills = spills->next) {
3523       code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
3524     }
3525   }
3526 }
3527 
3528 
OperandAssigner(RegisterAllocationData * data)3529 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
3530 
3531 
AssignSpillSlots()3532 void OperandAssigner::AssignSpillSlots() {
3533   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
3534   // Merge disjoint spill ranges
3535   for (size_t i = 0; i < spill_ranges.size(); ++i) {
3536     SpillRange* range = spill_ranges[i];
3537     if (range == nullptr) continue;
3538     if (range->IsEmpty()) continue;
3539     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
3540       SpillRange* other = spill_ranges[j];
3541       if (other != nullptr && !other->IsEmpty()) {
3542         range->TryMerge(other);
3543       }
3544     }
3545   }
3546   // Allocate slots for the merged spill ranges.
3547   for (SpillRange* range : spill_ranges) {
3548     if (range == nullptr || range->IsEmpty()) continue;
3549     // Allocate a new operand referring to the spill slot.
3550     if (!range->HasSlot()) {
3551       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
3552       range->set_assigned_slot(index);
3553     }
3554   }
3555 }
3556 
3557 
CommitAssignment()3558 void OperandAssigner::CommitAssignment() {
3559   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3560     if (top_range == nullptr || top_range->IsEmpty()) continue;
3561     InstructionOperand spill_operand;
3562     if (top_range->HasSpillOperand()) {
3563       spill_operand = *top_range->TopLevel()->GetSpillOperand();
3564     } else if (top_range->TopLevel()->HasSpillRange()) {
3565       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3566     }
3567     if (top_range->is_phi()) {
3568       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3569           top_range->GetAssignedOperand());
3570     }
3571     for (LiveRange* range = top_range; range != nullptr;
3572          range = range->next()) {
3573       InstructionOperand assigned = range->GetAssignedOperand();
3574       range->ConvertUsesToOperand(assigned, spill_operand);
3575     }
3576 
3577     if (!spill_operand.IsInvalid()) {
3578       // If this top level range has a child spilled in a deferred block, we use
3579       // the range and control flow connection mechanism instead of spilling at
3580       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3581       // phases. Normally, when we spill at definition, we do not insert a
3582       // connecting move when a successor child range is spilled - because the
3583       // spilled range picks up its value from the slot which was assigned at
3584       // definition. For ranges that are determined to spill only in deferred
3585       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
3586       // where a spill operand is expected, and then finalize by inserting the
3587       // spills in the deferred blocks dominators.
3588       if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
3589         // Spill at definition if the range isn't spilled only in deferred
3590         // blocks.
3591         top_range->CommitSpillMoves(
3592             data()->code(), spill_operand,
3593             top_range->has_slot_use() || top_range->spilled());
3594       }
3595     }
3596   }
3597 }
3598 
3599 
ReferenceMapPopulator(RegisterAllocationData * data)3600 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3601     : data_(data) {}
3602 
3603 
SafePointsAreInOrder() const3604 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3605   int safe_point = 0;
3606   for (ReferenceMap* map : *data()->code()->reference_maps()) {
3607     if (safe_point > map->instruction_position()) return false;
3608     safe_point = map->instruction_position();
3609   }
3610   return true;
3611 }
3612 
3613 
PopulateReferenceMaps()3614 void ReferenceMapPopulator::PopulateReferenceMaps() {
3615   DCHECK(SafePointsAreInOrder());
3616   // Map all delayed references.
3617   for (RegisterAllocationData::DelayedReference& delayed_reference :
3618        data()->delayed_references()) {
3619     delayed_reference.map->RecordReference(
3620         AllocatedOperand::cast(*delayed_reference.operand));
3621   }
3622   // Iterate over all safe point positions and record a pointer
3623   // for all spilled live ranges at this point.
3624   int last_range_start = 0;
3625   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3626   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3627   for (TopLevelLiveRange* range : data()->live_ranges()) {
3628     if (range == nullptr) continue;
3629     // Skip non-reference values.
3630     if (!data()->IsReference(range)) continue;
3631     // Skip empty live ranges.
3632     if (range->IsEmpty()) continue;
3633     if (range->has_preassigned_slot()) continue;
3634 
3635     // Find the extent of the range and its children.
3636     int start = range->Start().ToInstructionIndex();
3637     int end = 0;
3638     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3639       LifetimePosition this_end = cur->End();
3640       if (this_end.ToInstructionIndex() > end)
3641         end = this_end.ToInstructionIndex();
3642       DCHECK(cur->Start().ToInstructionIndex() >= start);
3643     }
3644 
3645     // Most of the ranges are in order, but not all.  Keep an eye on when they
3646     // step backwards and reset the first_it so we don't miss any safe points.
3647     if (start < last_range_start) first_it = reference_maps->begin();
3648     last_range_start = start;
3649 
3650     // Step across all the safe points that are before the start of this range,
3651     // recording how far we step in order to save doing this for the next range.
3652     for (; first_it != reference_maps->end(); ++first_it) {
3653       ReferenceMap* map = *first_it;
3654       if (map->instruction_position() >= start) break;
3655     }
3656 
3657     InstructionOperand spill_operand;
3658     if (((range->HasSpillOperand() &&
3659           !range->GetSpillOperand()->IsConstant()) ||
3660          range->HasSpillRange())) {
3661       if (range->HasSpillOperand()) {
3662         spill_operand = *range->GetSpillOperand();
3663       } else {
3664         spill_operand = range->GetSpillRangeOperand();
3665       }
3666       DCHECK(spill_operand.IsStackSlot());
3667       DCHECK(CanBeTaggedPointer(
3668           AllocatedOperand::cast(spill_operand).representation()));
3669     }
3670 
3671     LiveRange* cur = range;
3672     // Step through the safe points to see whether they are in the range.
3673     for (auto it = first_it; it != reference_maps->end(); ++it) {
3674       ReferenceMap* map = *it;
3675       int safe_point = map->instruction_position();
3676 
3677       // The safe points are sorted so we can stop searching here.
3678       if (safe_point - 1 > end) break;
3679 
3680       // Advance to the next active range that covers the current
3681       // safe point position.
3682       LifetimePosition safe_point_pos =
3683           LifetimePosition::InstructionFromInstructionIndex(safe_point);
3684 
3685       // Search for the child range (cur) that covers safe_point_pos. If we
3686       // don't find it before the children pass safe_point_pos, keep cur at
3687       // the last child, because the next safe_point_pos may be covered by cur.
3688       // This may happen if cur has more than one interval, and the current
3689       // safe_point_pos is in between intervals.
3690       // For that reason, cur may be at most the last child.
3691       DCHECK_NOT_NULL(cur);
3692       DCHECK(safe_point_pos >= cur->Start() || range == cur);
3693       bool found = false;
3694       while (!found) {
3695         if (cur->Covers(safe_point_pos)) {
3696           found = true;
3697         } else {
3698           LiveRange* next = cur->next();
3699           if (next == nullptr || next->Start() > safe_point_pos) {
3700             break;
3701           }
3702           cur = next;
3703         }
3704       }
3705 
3706       if (!found) {
3707         continue;
3708       }
3709 
3710       // Check if the live range is spilled and the safe point is after
3711       // the spill position.
3712       int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3713                             ? cur->Start().ToInstructionIndex()
3714                             : range->spill_start_index();
3715 
3716       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3717         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3718               range->vreg(), spill_index, safe_point);
3719         map->RecordReference(AllocatedOperand::cast(spill_operand));
3720       }
3721 
3722       if (!cur->spilled()) {
3723         TRACE(
3724             "Pointer in register for range %d:%d (start at %d) "
3725             "at safe point %d\n",
3726             range->vreg(), cur->relative_id(), cur->Start().value(),
3727             safe_point);
3728         InstructionOperand operand = cur->GetAssignedOperand();
3729         DCHECK(!operand.IsStackSlot());
3730         DCHECK(CanBeTaggedPointer(
3731             AllocatedOperand::cast(operand).representation()));
3732         map->RecordReference(AllocatedOperand::cast(operand));
3733       }
3734     }
3735   }
3736 }
3737 
3738 
LiveRangeConnector(RegisterAllocationData * data)3739 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3740     : data_(data) {}
3741 
3742 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const3743 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3744     const InstructionBlock* block) const {
3745   if (block->PredecessorCount() != 1) return false;
3746   return block->predecessors()[0].IsNext(block->rpo_number());
3747 }
3748 
3749 
ResolveControlFlow(Zone * local_zone)3750 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3751   // Lazily linearize live ranges in memory for fast lookup.
3752   LiveRangeFinder finder(data(), local_zone);
3753   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3754   for (const InstructionBlock* block : code()->instruction_blocks()) {
3755     if (CanEagerlyResolveControlFlow(block)) continue;
3756     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3757     BitVector::Iterator iterator(live);
3758     while (!iterator.Done()) {
3759       int vreg = iterator.Current();
3760       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
3761       for (const RpoNumber& pred : block->predecessors()) {
3762         FindResult result;
3763         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3764         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3765           continue;
3766         }
3767         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3768         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3769         if (pred_op.Equals(cur_op)) continue;
3770         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
3771           // We're doing a reload.
3772           // We don't need to, if:
3773           // 1) there's no register use in this block, and
3774           // 2) the range ends before the block does, and
3775           // 3) we don't have a successor, or the successor is spilled.
3776           LifetimePosition block_start =
3777               LifetimePosition::GapFromInstructionIndex(block->code_start());
3778           LifetimePosition block_end =
3779               LifetimePosition::GapFromInstructionIndex(block->code_end());
3780           const LiveRange* current = result.cur_cover_;
3781           const LiveRange* successor = current->next();
3782           if (current->End() < block_end &&
3783               (successor == nullptr || successor->spilled())) {
3784             // verify point 1: no register use. We can go to the end of the
3785             // range, since it's all within the block.
3786 
3787             bool uses_reg = false;
3788             for (const UsePosition* use = current->NextUsePosition(block_start);
3789                  use != nullptr; use = use->next()) {
3790               if (use->operand()->IsAnyRegister()) {
3791                 uses_reg = true;
3792                 break;
3793               }
3794             }
3795             if (!uses_reg) continue;
3796           }
3797           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3798               pred_block->IsDeferred()) {
3799             // The spill location should be defined in pred_block, so add
3800             // pred_block to the list of blocks requiring a spill operand.
3801             current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
3802                 pred_block->rpo_number().ToInt());
3803           }
3804         }
3805         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3806         USE(move_loc);
3807         DCHECK_IMPLIES(
3808             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3809                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3810             code()->GetInstructionBlock(move_loc)->IsDeferred());
3811       }
3812       iterator.Advance();
3813     }
3814   }
3815 
3816   // At this stage, we collected blocks needing a spill operand from
3817   // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
3818   // deferred blocks.
3819   for (TopLevelLiveRange* top : data()->live_ranges()) {
3820     if (top == nullptr || top->IsEmpty() ||
3821         !top->IsSpilledOnlyInDeferredBlocks())
3822       continue;
3823     CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
3824   }
3825 }
3826 
3827 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)3828 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3829                                            const InstructionOperand& cur_op,
3830                                            const InstructionBlock* pred,
3831                                            const InstructionOperand& pred_op) {
3832   DCHECK(!pred_op.Equals(cur_op));
3833   int gap_index;
3834   Instruction::GapPosition position;
3835   if (block->PredecessorCount() == 1) {
3836     gap_index = block->first_instruction_index();
3837     position = Instruction::START;
3838   } else {
3839     DCHECK(pred->SuccessorCount() == 1);
3840     DCHECK(!code()
3841                 ->InstructionAt(pred->last_instruction_index())
3842                 ->HasReferenceMap());
3843     gap_index = pred->last_instruction_index();
3844     position = Instruction::END;
3845   }
3846   data()->AddGapMove(gap_index, position, pred_op, cur_op);
3847   return gap_index;
3848 }
3849 
ConnectRanges(Zone * local_zone)3850 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3851   DelayedInsertionMap delayed_insertion_map(local_zone);
3852   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3853     if (top_range == nullptr) continue;
3854     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3855     LiveRange* first_range = top_range;
3856     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3857          first_range = second_range, second_range = second_range->next()) {
3858       LifetimePosition pos = second_range->Start();
3859       // Add gap move if the two live ranges touch and there is no block
3860       // boundary.
3861       if (second_range->spilled()) continue;
3862       if (first_range->End() != pos) continue;
3863       if (data()->IsBlockBoundary(pos) &&
3864           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3865         continue;
3866       }
3867       InstructionOperand prev_operand = first_range->GetAssignedOperand();
3868       InstructionOperand cur_operand = second_range->GetAssignedOperand();
3869       if (prev_operand.Equals(cur_operand)) continue;
3870       bool delay_insertion = false;
3871       Instruction::GapPosition gap_pos;
3872       int gap_index = pos.ToInstructionIndex();
3873       if (connect_spilled && !prev_operand.IsAnyRegister() &&
3874           cur_operand.IsAnyRegister()) {
3875         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
3876         DCHECK(block->IsDeferred());
3877         // Performing a reload in this block, meaning the spill operand must
3878         // be defined here.
3879         top_range->GetListOfBlocksRequiringSpillOperands()->Add(
3880             block->rpo_number().ToInt());
3881       }
3882 
3883       if (pos.IsGapPosition()) {
3884         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3885       } else {
3886         if (pos.IsStart()) {
3887           delay_insertion = true;
3888         } else {
3889           gap_index++;
3890         }
3891         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3892       }
3893       // Reloads or spills for spilled in deferred blocks ranges must happen
3894       // only in deferred blocks.
3895       DCHECK_IMPLIES(
3896           connect_spilled &&
3897               !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
3898           code()->GetInstructionBlock(gap_index)->IsDeferred());
3899 
3900       ParallelMove* move =
3901           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3902               gap_pos, code_zone());
3903       if (!delay_insertion) {
3904         move->AddMove(prev_operand, cur_operand);
3905       } else {
3906         delayed_insertion_map.insert(
3907             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3908       }
3909     }
3910   }
3911   if (delayed_insertion_map.empty()) return;
3912   // Insert all the moves which should occur after the stored move.
3913   ZoneVector<MoveOperands*> to_insert(local_zone);
3914   ZoneVector<MoveOperands*> to_eliminate(local_zone);
3915   to_insert.reserve(4);
3916   to_eliminate.reserve(4);
3917   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3918   for (auto it = delayed_insertion_map.begin();; ++it) {
3919     bool done = it == delayed_insertion_map.end();
3920     if (done || it->first.first != moves) {
3921       // Commit the MoveOperands for current ParallelMove.
3922       for (MoveOperands* move : to_eliminate) {
3923         move->Eliminate();
3924       }
3925       for (MoveOperands* move : to_insert) {
3926         moves->push_back(move);
3927       }
3928       if (done) break;
3929       // Reset state.
3930       to_eliminate.clear();
3931       to_insert.clear();
3932       moves = it->first.first;
3933     }
3934     // Gather all MoveOperands for a single ParallelMove.
3935     MoveOperands* move =
3936         new (code_zone()) MoveOperands(it->first.second, it->second);
3937     moves->PrepareInsertAfter(move, &to_eliminate);
3938     to_insert.push_back(move);
3939   }
3940 }
3941 
3942 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)3943 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
3944     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
3945   DCHECK(range->IsSpilledOnlyInDeferredBlocks());
3946   DCHECK(!range->spilled());
3947 
3948   InstructionSequence* code = data()->code();
3949   InstructionOperand spill_operand = range->GetSpillRangeOperand();
3950 
3951   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
3952         range->vreg());
3953   // If we have ranges that aren't spilled but require the operand on the stack,
3954   // make sure we insert the spill.
3955   for (const LiveRange* child = range; child != nullptr;
3956        child = child->next()) {
3957     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
3958          pos = pos->next()) {
3959       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
3960         continue;
3961       range->AddBlockRequiringSpillOperand(
3962           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
3963               ->rpo_number());
3964     }
3965   }
3966 
3967   ZoneQueue<int> worklist(temp_zone);
3968 
3969   for (BitVector::Iterator iterator(
3970            range->GetListOfBlocksRequiringSpillOperands());
3971        !iterator.Done(); iterator.Advance()) {
3972     worklist.push(iterator.Current());
3973   }
3974 
3975   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
3976   // Seek the deferred blocks that dominate locations requiring spill operands,
3977   // and spill there. We only need to spill at the start of such blocks.
3978   BitVector done_blocks(
3979       range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
3980   while (!worklist.empty()) {
3981     int block_id = worklist.front();
3982     worklist.pop();
3983     if (done_blocks.Contains(block_id)) continue;
3984     done_blocks.Add(block_id);
3985     InstructionBlock* spill_block =
3986         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
3987 
3988     for (const RpoNumber& pred : spill_block->predecessors()) {
3989       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
3990 
3991       if (pred_block->IsDeferred()) {
3992         worklist.push(pred_block->rpo_number().ToInt());
3993       } else {
3994         LifetimePosition pred_end =
3995             LifetimePosition::InstructionFromInstructionIndex(
3996                 pred_block->last_instruction_index());
3997 
3998         LiveRangeBound* bound = array->Find(pred_end);
3999 
4000         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
4001 
4002         RpoNumber spill_block_number = spill_block->rpo_number();
4003         if (done_moves.find(std::make_pair(
4004                 spill_block_number, range->vreg())) == done_moves.end()) {
4005           data()->AddGapMove(spill_block->first_instruction_index(),
4006                              Instruction::GapPosition::START, pred_op,
4007                              spill_operand);
4008           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
4009           spill_block->mark_needs_frame();
4010         }
4011       }
4012     }
4013   }
4014 }
4015 
4016 
4017 }  // namespace compiler
4018 }  // namespace internal
4019 }  // namespace v8
4020