• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/linkage.h"
7 #include "src/compiler/register-allocator.h"
8 #include "src/string-stream.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE(...)                             \
15   do {                                         \
16     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
17   } while (false)
18 
19 
20 namespace {
21 
RemoveElement(ZoneVector<LiveRange * > * v,LiveRange * range)22 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
23   auto it = std::find(v->begin(), v->end(), range);
24   DCHECK(it != v->end());
25   v->erase(it);
26 }
27 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)28 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
29   return kind == FP_REGISTERS ? cfg->num_double_registers()
30                               : cfg->num_general_registers();
31 }
32 
33 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)34 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
35                                 RegisterKind kind) {
36   return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
37                               : cfg->num_allocatable_general_registers();
38 }
39 
40 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)41 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
42                                        RegisterKind kind) {
43   return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
44                               : cfg->allocatable_general_codes();
45 }
46 
47 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)48 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
49                                           const InstructionBlock* block) {
50   RpoNumber index = block->loop_header();
51   if (!index.IsValid()) return nullptr;
52   return sequence->InstructionBlockAt(index);
53 }
54 
55 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)56 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
57                                             LifetimePosition pos) {
58   return code->GetInstructionBlock(pos.ToInstructionIndex());
59 }
60 
61 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)62 Instruction* GetLastInstruction(InstructionSequence* code,
63                                 const InstructionBlock* block) {
64   return code->InstructionAt(block->last_instruction_index());
65 }
66 
IsOutputRegisterOf(Instruction * instr,int code)67 bool IsOutputRegisterOf(Instruction* instr, int code) {
68   for (size_t i = 0; i < instr->OutputCount(); i++) {
69     InstructionOperand* output = instr->OutputAt(i);
70     if (output->IsRegister() &&
71         LocationOperand::cast(output)->register_code() == code) {
72       return true;
73     }
74   }
75   return false;
76 }
77 
IsOutputFPRegisterOf(Instruction * instr,MachineRepresentation rep,int code)78 bool IsOutputFPRegisterOf(Instruction* instr, MachineRepresentation rep,
79                           int code) {
80   for (size_t i = 0; i < instr->OutputCount(); i++) {
81     InstructionOperand* output = instr->OutputAt(i);
82     if (output->IsFPRegister()) {
83       const LocationOperand* op = LocationOperand::cast(output);
84       if (kSimpleFPAliasing) {
85         if (op->register_code() == code) return true;
86       } else {
87         if (RegisterConfiguration::Turbofan()->AreAliases(
88                 op->representation(), op->register_code(), rep, code)) {
89           return true;
90         }
91       }
92     }
93   }
94   return false;
95 }
96 
97 
98 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)99 int GetByteWidth(MachineRepresentation rep) {
100   switch (rep) {
101     case MachineRepresentation::kBit:
102     case MachineRepresentation::kWord8:
103     case MachineRepresentation::kWord16:
104     case MachineRepresentation::kWord32:
105     case MachineRepresentation::kTagged:
106       return kPointerSize;
107     case MachineRepresentation::kFloat32:
108     case MachineRepresentation::kWord64:
109     case MachineRepresentation::kFloat64:
110       return 8;
111     case MachineRepresentation::kSimd128:
112       return 16;
113     case MachineRepresentation::kNone:
114       break;
115   }
116   UNREACHABLE();
117   return 0;
118 }
119 
120 }  // namespace
121 
122 class LiveRangeBound {
123  public:
LiveRangeBound(LiveRange * range,bool skip)124   explicit LiveRangeBound(LiveRange* range, bool skip)
125       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
126     DCHECK(!range->IsEmpty());
127   }
128 
CanCover(LifetimePosition position)129   bool CanCover(LifetimePosition position) {
130     return start_ <= position && position < end_;
131   }
132 
133   LiveRange* const range_;
134   const LifetimePosition start_;
135   const LifetimePosition end_;
136   const bool skip_;
137 
138  private:
139   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
140 };
141 
142 
143 struct FindResult {
144   LiveRange* cur_cover_;
145   LiveRange* pred_cover_;
146 };
147 
148 
149 class LiveRangeBoundArray {
150  public:
LiveRangeBoundArray()151   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
152 
ShouldInitialize()153   bool ShouldInitialize() { return start_ == nullptr; }
154 
Initialize(Zone * zone,TopLevelLiveRange * range)155   void Initialize(Zone* zone, TopLevelLiveRange* range) {
156     length_ = range->GetChildCount();
157 
158     start_ = zone->NewArray<LiveRangeBound>(length_);
159     LiveRangeBound* curr = start_;
160     // Normally, spilled ranges do not need connecting moves, because the spill
161     // location has been assigned at definition. For ranges spilled in deferred
162     // blocks, that is not the case, so we need to connect the spilled children.
163     for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
164       new (curr) LiveRangeBound(i, i->spilled());
165     }
166   }
167 
Find(const LifetimePosition position) const168   LiveRangeBound* Find(const LifetimePosition position) const {
169     size_t left_index = 0;
170     size_t right_index = length_;
171     while (true) {
172       size_t current_index = left_index + (right_index - left_index) / 2;
173       DCHECK(right_index > current_index);
174       LiveRangeBound* bound = &start_[current_index];
175       if (bound->start_ <= position) {
176         if (position < bound->end_) return bound;
177         DCHECK(left_index < current_index);
178         left_index = current_index;
179       } else {
180         right_index = current_index;
181       }
182     }
183   }
184 
FindPred(const InstructionBlock * pred)185   LiveRangeBound* FindPred(const InstructionBlock* pred) {
186     LifetimePosition pred_end =
187         LifetimePosition::InstructionFromInstructionIndex(
188             pred->last_instruction_index());
189     return Find(pred_end);
190   }
191 
FindSucc(const InstructionBlock * succ)192   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
193     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
194         succ->first_instruction_index());
195     return Find(succ_start);
196   }
197 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const198   bool FindConnectableSubranges(const InstructionBlock* block,
199                                 const InstructionBlock* pred,
200                                 FindResult* result) const {
201     LifetimePosition pred_end =
202         LifetimePosition::InstructionFromInstructionIndex(
203             pred->last_instruction_index());
204     LiveRangeBound* bound = Find(pred_end);
205     result->pred_cover_ = bound->range_;
206     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
207         block->first_instruction_index());
208 
209     if (bound->CanCover(cur_start)) {
210       // Both blocks are covered by the same range, so there is nothing to
211       // connect.
212       return false;
213     }
214     bound = Find(cur_start);
215     if (bound->skip_) {
216       return false;
217     }
218     result->cur_cover_ = bound->range_;
219     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
220     return (result->cur_cover_ != result->pred_cover_);
221   }
222 
223  private:
224   size_t length_;
225   LiveRangeBound* start_;
226 
227   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
228 };
229 
230 
231 class LiveRangeFinder {
232  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)233   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
234       : data_(data),
235         bounds_length_(static_cast<int>(data_->live_ranges().size())),
236         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
237         zone_(zone) {
238     for (int i = 0; i < bounds_length_; ++i) {
239       new (&bounds_[i]) LiveRangeBoundArray();
240     }
241   }
242 
ArrayFor(int operand_index)243   LiveRangeBoundArray* ArrayFor(int operand_index) {
244     DCHECK(operand_index < bounds_length_);
245     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
246     DCHECK(range != nullptr && !range->IsEmpty());
247     LiveRangeBoundArray* array = &bounds_[operand_index];
248     if (array->ShouldInitialize()) {
249       array->Initialize(zone_, range);
250     }
251     return array;
252   }
253 
254  private:
255   const RegisterAllocationData* const data_;
256   const int bounds_length_;
257   LiveRangeBoundArray* const bounds_;
258   Zone* const zone_;
259 
260   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
261 };
262 
263 
264 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
265 
266 
267 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare268   bool operator()(const DelayedInsertionMapKey& a,
269                   const DelayedInsertionMapKey& b) const {
270     if (a.first == b.first) {
271       return a.second.Compare(b.second);
272     }
273     return a.first < b.first;
274   }
275 };
276 
277 
278 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
279                 DelayedInsertionMapCompare> DelayedInsertionMap;
280 
281 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)282 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
283                          void* hint, UsePositionHintType hint_type)
284     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
285   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
286   bool register_beneficial = true;
287   UsePositionType type = UsePositionType::kAny;
288   if (operand_ != nullptr && operand_->IsUnallocated()) {
289     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
290     if (unalloc->HasRegisterPolicy()) {
291       type = UsePositionType::kRequiresRegister;
292     } else if (unalloc->HasSlotPolicy()) {
293       type = UsePositionType::kRequiresSlot;
294       register_beneficial = false;
295     } else {
296       register_beneficial = !unalloc->HasAnyPolicy();
297     }
298   }
299   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
300            RegisterBeneficialField::encode(register_beneficial) |
301            AssignedRegisterField::encode(kUnassignedRegister);
302   DCHECK(pos_.IsValid());
303 }
304 
305 
HasHint() const306 bool UsePosition::HasHint() const {
307   int hint_register;
308   return HintRegister(&hint_register);
309 }
310 
311 
HintRegister(int * register_code) const312 bool UsePosition::HintRegister(int* register_code) const {
313   if (hint_ == nullptr) return false;
314   switch (HintTypeField::decode(flags_)) {
315     case UsePositionHintType::kNone:
316     case UsePositionHintType::kUnresolved:
317       return false;
318     case UsePositionHintType::kUsePos: {
319       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
320       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
321       if (assigned_register == kUnassignedRegister) return false;
322       *register_code = assigned_register;
323       return true;
324     }
325     case UsePositionHintType::kOperand: {
326       InstructionOperand* operand =
327           reinterpret_cast<InstructionOperand*>(hint_);
328       *register_code = LocationOperand::cast(operand)->register_code();
329       return true;
330     }
331     case UsePositionHintType::kPhi: {
332       RegisterAllocationData::PhiMapValue* phi =
333           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
334       int assigned_register = phi->assigned_register();
335       if (assigned_register == kUnassignedRegister) return false;
336       *register_code = assigned_register;
337       return true;
338     }
339   }
340   UNREACHABLE();
341   return false;
342 }
343 
344 
HintTypeForOperand(const InstructionOperand & op)345 UsePositionHintType UsePosition::HintTypeForOperand(
346     const InstructionOperand& op) {
347   switch (op.kind()) {
348     case InstructionOperand::CONSTANT:
349     case InstructionOperand::IMMEDIATE:
350     case InstructionOperand::EXPLICIT:
351       return UsePositionHintType::kNone;
352     case InstructionOperand::UNALLOCATED:
353       return UsePositionHintType::kUnresolved;
354     case InstructionOperand::ALLOCATED:
355       if (op.IsRegister() || op.IsFPRegister()) {
356         return UsePositionHintType::kOperand;
357       } else {
358         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
359         return UsePositionHintType::kNone;
360       }
361     case InstructionOperand::INVALID:
362       break;
363   }
364   UNREACHABLE();
365   return UsePositionHintType::kNone;
366 }
367 
368 
ResolveHint(UsePosition * use_pos)369 void UsePosition::ResolveHint(UsePosition* use_pos) {
370   DCHECK_NOT_NULL(use_pos);
371   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
372   hint_ = use_pos;
373   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
374 }
375 
376 
set_type(UsePositionType type,bool register_beneficial)377 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
378   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
379   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
380   flags_ = TypeField::encode(type) |
381            RegisterBeneficialField::encode(register_beneficial) |
382            HintTypeField::encode(HintTypeField::decode(flags_)) |
383            AssignedRegisterField::encode(kUnassignedRegister);
384 }
385 
386 
SplitAt(LifetimePosition pos,Zone * zone)387 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
388   DCHECK(Contains(pos) && pos != start());
389   UseInterval* after = new (zone) UseInterval(pos, end_);
390   after->next_ = next_;
391   next_ = nullptr;
392   end_ = pos;
393   return after;
394 }
395 
396 
Print() const397 void LifetimePosition::Print() const {
398   OFStream os(stdout);
399   os << *this << std::endl;
400 }
401 
402 
operator <<(std::ostream & os,const LifetimePosition pos)403 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
404   os << '@' << pos.ToInstructionIndex();
405   if (pos.IsGapPosition()) {
406     os << 'g';
407   } else {
408     os << 'i';
409   }
410   if (pos.IsStart()) {
411     os << 's';
412   } else {
413     os << 'e';
414   }
415   return os;
416 }
417 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)418 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
419                      TopLevelLiveRange* top_level)
420     : relative_id_(relative_id),
421       bits_(0),
422       last_interval_(nullptr),
423       first_interval_(nullptr),
424       first_pos_(nullptr),
425       top_level_(top_level),
426       next_(nullptr),
427       current_interval_(nullptr),
428       last_processed_use_(nullptr),
429       current_hint_position_(nullptr),
430       splitting_pointer_(nullptr) {
431   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
432   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
433           RepresentationField::encode(rep);
434 }
435 
436 
VerifyPositions() const437 void LiveRange::VerifyPositions() const {
438   // Walk the positions, verifying that each is in an interval.
439   UseInterval* interval = first_interval_;
440   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
441     CHECK(Start() <= pos->pos());
442     CHECK(pos->pos() <= End());
443     CHECK_NOT_NULL(interval);
444     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
445       interval = interval->next();
446       CHECK_NOT_NULL(interval);
447     }
448   }
449 }
450 
451 
VerifyIntervals() const452 void LiveRange::VerifyIntervals() const {
453   DCHECK(first_interval()->start() == Start());
454   LifetimePosition last_end = first_interval()->end();
455   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
456        interval = interval->next()) {
457     DCHECK(last_end <= interval->start());
458     last_end = interval->end();
459   }
460   DCHECK(last_end == End());
461 }
462 
463 
set_assigned_register(int reg)464 void LiveRange::set_assigned_register(int reg) {
465   DCHECK(!HasRegisterAssigned() && !spilled());
466   bits_ = AssignedRegisterField::update(bits_, reg);
467 }
468 
469 
UnsetAssignedRegister()470 void LiveRange::UnsetAssignedRegister() {
471   DCHECK(HasRegisterAssigned() && !spilled());
472   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
473 }
474 
475 
Spill()476 void LiveRange::Spill() {
477   DCHECK(!spilled());
478   DCHECK(!TopLevel()->HasNoSpillType());
479   set_spilled(true);
480   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
481 }
482 
483 
kind() const484 RegisterKind LiveRange::kind() const {
485   return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
486 }
487 
488 
FirstHintPosition(int * register_index) const489 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
490   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
491     if (pos->HintRegister(register_index)) return pos;
492   }
493   return nullptr;
494 }
495 
496 
NextUsePosition(LifetimePosition start) const497 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
498   UsePosition* use_pos = last_processed_use_;
499   if (use_pos == nullptr || use_pos->pos() > start) {
500     use_pos = first_pos();
501   }
502   while (use_pos != nullptr && use_pos->pos() < start) {
503     use_pos = use_pos->next();
504   }
505   last_processed_use_ = use_pos;
506   return use_pos;
507 }
508 
509 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const510 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
511     LifetimePosition start) const {
512   UsePosition* pos = NextUsePosition(start);
513   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
514     pos = pos->next();
515   }
516   return pos;
517 }
518 
519 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const520 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
521     LifetimePosition start) const {
522   UsePosition* pos = first_pos();
523   UsePosition* prev = nullptr;
524   while (pos != nullptr && pos->pos() < start) {
525     if (pos->RegisterIsBeneficial()) prev = pos;
526     pos = pos->next();
527   }
528   return prev;
529 }
530 
531 
NextRegisterPosition(LifetimePosition start) const532 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
533   UsePosition* pos = NextUsePosition(start);
534   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
535     pos = pos->next();
536   }
537   return pos;
538 }
539 
540 
NextSlotPosition(LifetimePosition start) const541 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
542   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
543        pos = pos->next()) {
544     if (pos->type() != UsePositionType::kRequiresSlot) continue;
545     return pos;
546   }
547   return nullptr;
548 }
549 
550 
CanBeSpilled(LifetimePosition pos) const551 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
552   // We cannot spill a live range that has a use requiring a register
553   // at the current or the immediate next position.
554   UsePosition* use_pos = NextRegisterPosition(pos);
555   if (use_pos == nullptr) return true;
556   return use_pos->pos() > pos.NextStart().End();
557 }
558 
559 
IsTopLevel() const560 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
561 
562 
GetAssignedOperand() const563 InstructionOperand LiveRange::GetAssignedOperand() const {
564   if (HasRegisterAssigned()) {
565     DCHECK(!spilled());
566     return AllocatedOperand(LocationOperand::REGISTER, representation(),
567                             assigned_register());
568   }
569   DCHECK(spilled());
570   DCHECK(!HasRegisterAssigned());
571   if (TopLevel()->HasSpillOperand()) {
572     InstructionOperand* op = TopLevel()->GetSpillOperand();
573     DCHECK(!op->IsUnallocated());
574     return *op;
575   }
576   return TopLevel()->GetSpillRangeOperand();
577 }
578 
579 
FirstSearchIntervalForPosition(LifetimePosition position) const580 UseInterval* LiveRange::FirstSearchIntervalForPosition(
581     LifetimePosition position) const {
582   if (current_interval_ == nullptr) return first_interval_;
583   if (current_interval_->start() > position) {
584     current_interval_ = nullptr;
585     return first_interval_;
586   }
587   return current_interval_;
588 }
589 
590 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const591 void LiveRange::AdvanceLastProcessedMarker(
592     UseInterval* to_start_of, LifetimePosition but_not_past) const {
593   if (to_start_of == nullptr) return;
594   if (to_start_of->start() > but_not_past) return;
595   LifetimePosition start = current_interval_ == nullptr
596                                ? LifetimePosition::Invalid()
597                                : current_interval_->start();
598   if (to_start_of->start() > start) {
599     current_interval_ = to_start_of;
600   }
601 }
602 
603 
SplitAt(LifetimePosition position,Zone * zone)604 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
605   int new_id = TopLevel()->GetNextChildId();
606   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
607   DetachAt(position, child, zone);
608 
609   child->top_level_ = TopLevel();
610   child->next_ = next_;
611   next_ = child;
612   return child;
613 }
614 
615 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone)616 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
617                                  Zone* zone) {
618   DCHECK(Start() < position);
619   DCHECK(End() > position);
620   DCHECK(result->IsEmpty());
621   // Find the last interval that ends before the position. If the
622   // position is contained in one of the intervals in the chain, we
623   // split that interval and use the first part.
624   UseInterval* current = FirstSearchIntervalForPosition(position);
625 
626   // If the split position coincides with the beginning of a use interval
627   // we need to split use positons in a special way.
628   bool split_at_start = false;
629 
630   if (current->start() == position) {
631     // When splitting at start we need to locate the previous use interval.
632     current = first_interval_;
633   }
634 
635   UseInterval* after = nullptr;
636   while (current != nullptr) {
637     if (current->Contains(position)) {
638       after = current->SplitAt(position, zone);
639       break;
640     }
641     UseInterval* next = current->next();
642     if (next->start() >= position) {
643       split_at_start = (next->start() == position);
644       after = next;
645       current->set_next(nullptr);
646       break;
647     }
648     current = next;
649   }
650   DCHECK(nullptr != after);
651 
652   // Partition original use intervals to the two live ranges.
653   UseInterval* before = current;
654   result->last_interval_ =
655       (last_interval_ == before)
656           ? after            // Only interval in the range after split.
657           : last_interval_;  // Last interval of the original range.
658   result->first_interval_ = after;
659   last_interval_ = before;
660 
661   // Find the last use position before the split and the first use
662   // position after it.
663   UsePosition* use_after =
664       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
665           ? first_pos()
666           : splitting_pointer_;
667   UsePosition* use_before = nullptr;
668   if (split_at_start) {
669     // The split position coincides with the beginning of a use interval (the
670     // end of a lifetime hole). Use at this position should be attributed to
671     // the split child because split child owns use interval covering it.
672     while (use_after != nullptr && use_after->pos() < position) {
673       use_before = use_after;
674       use_after = use_after->next();
675     }
676   } else {
677     while (use_after != nullptr && use_after->pos() <= position) {
678       use_before = use_after;
679       use_after = use_after->next();
680     }
681   }
682 
683   // Partition original use positions to the two live ranges.
684   if (use_before != nullptr) {
685     use_before->set_next(nullptr);
686   } else {
687     first_pos_ = nullptr;
688   }
689   result->first_pos_ = use_after;
690 
691   // Discard cached iteration state. It might be pointing
692   // to the use that no longer belongs to this live range.
693   last_processed_use_ = nullptr;
694   current_interval_ = nullptr;
695 
696 #ifdef DEBUG
697   VerifyChildStructure();
698   result->VerifyChildStructure();
699 #endif
700   return use_before;
701 }
702 
703 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)704 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
705   LiveRange* child = this;
706   for (; child != nullptr; child = child->next()) {
707     child->top_level_ = new_top_level;
708   }
709 }
710 
711 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)712 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
713                                      const InstructionOperand& spill_op) {
714   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
715     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
716     if (!pos->HasOperand()) continue;
717     switch (pos->type()) {
718       case UsePositionType::kRequiresSlot:
719         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
720         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
721         break;
722       case UsePositionType::kRequiresRegister:
723         DCHECK(op.IsRegister() || op.IsFPRegister());
724       // Fall through.
725       case UsePositionType::kAny:
726         InstructionOperand::ReplaceWith(pos->operand(), &op);
727         break;
728     }
729   }
730 }
731 
732 
733 // This implements an ordering on live ranges so that they are ordered by their
734 // start positions.  This is needed for the correctness of the register
735 // allocation algorithm.  If two live ranges start at the same offset then there
736 // is a tie breaker based on where the value is first used.  This part of the
737 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const738 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
739   LifetimePosition start = Start();
740   LifetimePosition other_start = other->Start();
741   if (start == other_start) {
742     UsePosition* pos = first_pos();
743     if (pos == nullptr) return false;
744     UsePosition* other_pos = other->first_pos();
745     if (other_pos == nullptr) return true;
746     return pos->pos() < other_pos->pos();
747   }
748   return start < other_start;
749 }
750 
751 
SetUseHints(int register_index)752 void LiveRange::SetUseHints(int register_index) {
753   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
754     if (!pos->HasOperand()) continue;
755     switch (pos->type()) {
756       case UsePositionType::kRequiresSlot:
757         break;
758       case UsePositionType::kRequiresRegister:
759       case UsePositionType::kAny:
760         pos->set_assigned_register(register_index);
761         break;
762     }
763   }
764 }
765 
766 
CanCover(LifetimePosition position) const767 bool LiveRange::CanCover(LifetimePosition position) const {
768   if (IsEmpty()) return false;
769   return Start() <= position && position < End();
770 }
771 
772 
Covers(LifetimePosition position) const773 bool LiveRange::Covers(LifetimePosition position) const {
774   if (!CanCover(position)) return false;
775   UseInterval* start_search = FirstSearchIntervalForPosition(position);
776   for (UseInterval* interval = start_search; interval != nullptr;
777        interval = interval->next()) {
778     DCHECK(interval->next() == nullptr ||
779            interval->next()->start() >= interval->start());
780     AdvanceLastProcessedMarker(interval, position);
781     if (interval->Contains(position)) return true;
782     if (interval->start() > position) return false;
783   }
784   return false;
785 }
786 
787 
FirstIntersection(LiveRange * other) const788 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
789   UseInterval* b = other->first_interval();
790   if (b == nullptr) return LifetimePosition::Invalid();
791   LifetimePosition advance_last_processed_up_to = b->start();
792   UseInterval* a = FirstSearchIntervalForPosition(b->start());
793   while (a != nullptr && b != nullptr) {
794     if (a->start() > other->End()) break;
795     if (b->start() > End()) break;
796     LifetimePosition cur_intersection = a->Intersect(b);
797     if (cur_intersection.IsValid()) {
798       return cur_intersection;
799     }
800     if (a->start() < b->start()) {
801       a = a->next();
802       if (a == nullptr || a->start() > other->End()) break;
803       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
804     } else {
805       b = b->next();
806     }
807   }
808   return LifetimePosition::Invalid();
809 }
810 
Print(const RegisterConfiguration * config,bool with_children) const811 void LiveRange::Print(const RegisterConfiguration* config,
812                       bool with_children) const {
813   OFStream os(stdout);
814   PrintableLiveRange wrapper;
815   wrapper.register_configuration_ = config;
816   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
817     wrapper.range_ = i;
818     os << wrapper << std::endl;
819     if (!with_children) break;
820   }
821 }
822 
823 
Print(bool with_children) const824 void LiveRange::Print(bool with_children) const {
825   Print(RegisterConfiguration::Turbofan(), with_children);
826 }
827 
828 
829 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList830   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
831                          SpillMoveInsertionList* next)
832       : gap_index(gap_index), operand(operand), next(next) {}
833   const int gap_index;
834   InstructionOperand* const operand;
835   SpillMoveInsertionList* const next;
836 };
837 
838 
TopLevelLiveRange(int vreg,MachineRepresentation rep)839 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
840     : LiveRange(0, rep, this),
841       vreg_(vreg),
842       last_child_id_(0),
843       splintered_from_(nullptr),
844       spill_operand_(nullptr),
845       spill_move_insertion_locations_(nullptr),
846       spilled_in_deferred_blocks_(false),
847       spill_start_index_(kMaxInt),
848       last_pos_(nullptr),
849       splinter_(nullptr),
850       has_preassigned_slot_(false) {
851   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
852 }
853 
854 
855 #if DEBUG
debug_virt_reg() const856 int TopLevelLiveRange::debug_virt_reg() const {
857   return IsSplinter() ? splintered_from()->vreg() : vreg();
858 }
859 #endif
860 
861 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)862 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
863                                             InstructionOperand* operand) {
864   DCHECK(HasNoSpillType());
865   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
866       gap_index, operand, spill_move_insertion_locations_);
867 }
868 
CommitSpillMoves(InstructionSequence * sequence,const InstructionOperand & op,bool might_be_duplicated)869 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
870                                          const InstructionOperand& op,
871                                          bool might_be_duplicated) {
872   DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
873   Zone* zone = sequence->zone();
874 
875   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
876        to_spill != nullptr; to_spill = to_spill->next) {
877     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
878     ParallelMove* move =
879         instr->GetOrCreateParallelMove(Instruction::START, zone);
880     // Skip insertion if it's possible that the move exists already as a
881     // constraint move from a fixed output register to a slot.
882     if (might_be_duplicated || has_preassigned_slot()) {
883       bool found = false;
884       for (MoveOperands* move_op : *move) {
885         if (move_op->IsEliminated()) continue;
886         if (move_op->source().Equals(*to_spill->operand) &&
887             move_op->destination().Equals(op)) {
888           found = true;
889           if (has_preassigned_slot()) move_op->Eliminate();
890           break;
891         }
892       }
893       if (found) continue;
894     }
895     if (!has_preassigned_slot()) {
896       move->AddMove(*to_spill->operand, op);
897     }
898   }
899 }
900 
901 
SetSpillOperand(InstructionOperand * operand)902 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
903   DCHECK(HasNoSpillType());
904   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
905   set_spill_type(SpillType::kSpillOperand);
906   spill_operand_ = operand;
907 }
908 
909 
SetSpillRange(SpillRange * spill_range)910 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
911   DCHECK(!HasSpillOperand());
912   DCHECK(spill_range);
913   spill_range_ = spill_range;
914 }
915 
916 
GetSpillRangeOperand() const917 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
918   SpillRange* spill_range = GetSpillRange();
919   int index = spill_range->assigned_slot();
920   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
921 }
922 
923 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)924 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
925                                  Zone* zone) {
926   DCHECK(start != Start() || end != End());
927   DCHECK(start < end);
928 
929   TopLevelLiveRange splinter_temp(-1, representation());
930   UsePosition* last_in_splinter = nullptr;
931   // Live ranges defined in deferred blocks stay in deferred blocks, so we
932   // don't need to splinter them. That means that start should always be
933   // after the beginning of the range.
934   DCHECK(start > Start());
935 
936   if (end >= End()) {
937     DCHECK(start > Start());
938     DetachAt(start, &splinter_temp, zone);
939     next_ = nullptr;
940   } else {
941     DCHECK(start < End() && Start() < end);
942 
943     const int kInvalidId = std::numeric_limits<int>::max();
944 
945     UsePosition* last = DetachAt(start, &splinter_temp, zone);
946 
947     LiveRange end_part(kInvalidId, this->representation(), nullptr);
948     last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
949 
950     next_ = end_part.next_;
951     last_interval_->set_next(end_part.first_interval_);
952     // The next splinter will happen either at or after the current interval.
953     // We can optimize DetachAt by setting current_interval_ accordingly,
954     // which will then be picked up by FirstSearchIntervalForPosition.
955     current_interval_ = last_interval_;
956     last_interval_ = end_part.last_interval_;
957 
958     if (first_pos_ == nullptr) {
959       first_pos_ = end_part.first_pos_;
960     } else {
961       splitting_pointer_ = last;
962       if (last != nullptr) last->set_next(end_part.first_pos_);
963     }
964   }
965 
966   if (splinter()->IsEmpty()) {
967     splinter()->first_interval_ = splinter_temp.first_interval_;
968     splinter()->last_interval_ = splinter_temp.last_interval_;
969   } else {
970     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
971     splinter()->last_interval_ = splinter_temp.last_interval_;
972   }
973   if (splinter()->first_pos() == nullptr) {
974     splinter()->first_pos_ = splinter_temp.first_pos_;
975   } else {
976     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
977   }
978   if (last_in_splinter != nullptr) {
979     splinter()->last_pos_ = last_in_splinter;
980   } else {
981     if (splinter()->first_pos() != nullptr &&
982         splinter()->last_pos_ == nullptr) {
983       splinter()->last_pos_ = splinter()->first_pos();
984       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
985            pos = pos->next()) {
986         splinter()->last_pos_ = pos;
987       }
988     }
989   }
990 #if DEBUG
991   Verify();
992   splinter()->Verify();
993 #endif
994 }
995 
996 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)997 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
998   splintered_from_ = splinter_parent;
999   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
1000     SetSpillRange(splinter_parent->spill_range_);
1001   }
1002 }
1003 
1004 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)1005 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
1006   DCHECK(merged->TopLevel() == this);
1007 
1008   if (HasNoSpillType() && merged->HasSpillRange()) {
1009     set_spill_type(merged->spill_type());
1010     DCHECK(GetSpillRange()->live_ranges().size() > 0);
1011     merged->spill_range_ = nullptr;
1012     merged->bits_ =
1013         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
1014   }
1015 }
1016 
1017 
Merge(TopLevelLiveRange * other,Zone * zone)1018 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
1019   DCHECK(Start() < other->Start());
1020   DCHECK(other->splintered_from() == this);
1021 
1022   LiveRange* first = this;
1023   LiveRange* second = other;
1024   DCHECK(first->Start() < second->Start());
1025   while (first != nullptr && second != nullptr) {
1026     DCHECK(first != second);
1027     // Make sure the ranges are in order each time we iterate.
1028     if (second->Start() < first->Start()) {
1029       LiveRange* tmp = second;
1030       second = first;
1031       first = tmp;
1032       continue;
1033     }
1034 
1035     if (first->End() <= second->Start()) {
1036       if (first->next() == nullptr ||
1037           first->next()->Start() > second->Start()) {
1038         // First is in order before second.
1039         LiveRange* temp = first->next();
1040         first->next_ = second;
1041         first = temp;
1042       } else {
1043         // First is in order before its successor (or second), so advance first.
1044         first = first->next();
1045       }
1046       continue;
1047     }
1048 
1049     DCHECK(first->Start() < second->Start());
1050     // If first and second intersect, split first.
1051     if (first->Start() < second->End() && second->Start() < first->End()) {
1052       LiveRange* temp = first->SplitAt(second->Start(), zone);
1053       CHECK(temp != first);
1054       temp->set_spilled(first->spilled());
1055       if (!temp->spilled())
1056         temp->set_assigned_register(first->assigned_register());
1057 
1058       first->next_ = second;
1059       first = temp;
1060       continue;
1061     }
1062     DCHECK(first->End() <= second->Start());
1063   }
1064 
1065   TopLevel()->UpdateParentForAllChildren(TopLevel());
1066   TopLevel()->UpdateSpillRangePostMerge(other);
1067 
1068 #if DEBUG
1069   Verify();
1070 #endif
1071 }
1072 
1073 
VerifyChildrenInOrder() const1074 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1075   LifetimePosition last_end = End();
1076   for (const LiveRange* child = this->next(); child != nullptr;
1077        child = child->next()) {
1078     DCHECK(last_end <= child->Start());
1079     last_end = child->End();
1080   }
1081 }
1082 
1083 
Verify() const1084 void TopLevelLiveRange::Verify() const {
1085   VerifyChildrenInOrder();
1086   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1087     VerifyChildStructure();
1088   }
1089 }
1090 
1091 
ShortenTo(LifetimePosition start)1092 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
1093   TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1094   DCHECK(first_interval_ != nullptr);
1095   DCHECK(first_interval_->start() <= start);
1096   DCHECK(start < first_interval_->end());
1097   first_interval_->set_start(start);
1098 }
1099 
1100 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1101 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1102                                        LifetimePosition end, Zone* zone) {
1103   TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1104         end.value());
1105   LifetimePosition new_end = end;
1106   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1107     if (first_interval_->end() > end) {
1108       new_end = first_interval_->end();
1109     }
1110     first_interval_ = first_interval_->next();
1111   }
1112 
1113   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1114   new_interval->set_next(first_interval_);
1115   first_interval_ = new_interval;
1116   if (new_interval->next() == nullptr) {
1117     last_interval_ = new_interval;
1118   }
1119 }
1120 
1121 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1122 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1123                                        LifetimePosition end, Zone* zone) {
1124   TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1125         end.value());
1126   if (first_interval_ == nullptr) {
1127     UseInterval* interval = new (zone) UseInterval(start, end);
1128     first_interval_ = interval;
1129     last_interval_ = interval;
1130   } else {
1131     if (end == first_interval_->start()) {
1132       first_interval_->set_start(start);
1133     } else if (end < first_interval_->start()) {
1134       UseInterval* interval = new (zone) UseInterval(start, end);
1135       interval->set_next(first_interval_);
1136       first_interval_ = interval;
1137     } else {
1138       // Order of instruction's processing (see ProcessInstructions) guarantees
1139       // that each new use interval either precedes or intersects with
1140       // last added interval.
1141       DCHECK(start < first_interval_->end());
1142       first_interval_->set_start(Min(start, first_interval_->start()));
1143       first_interval_->set_end(Max(end, first_interval_->end()));
1144     }
1145   }
1146 }
1147 
1148 
AddUsePosition(UsePosition * use_pos)1149 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1150   LifetimePosition pos = use_pos->pos();
1151   TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1152   UsePosition* prev_hint = nullptr;
1153   UsePosition* prev = nullptr;
1154   UsePosition* current = first_pos_;
1155   while (current != nullptr && current->pos() < pos) {
1156     prev_hint = current->HasHint() ? current : prev_hint;
1157     prev = current;
1158     current = current->next();
1159   }
1160 
1161   if (prev == nullptr) {
1162     use_pos->set_next(first_pos_);
1163     first_pos_ = use_pos;
1164   } else {
1165     use_pos->set_next(prev->next());
1166     prev->set_next(use_pos);
1167   }
1168 
1169   if (prev_hint == nullptr && use_pos->HasHint()) {
1170     current_hint_position_ = use_pos;
1171   }
1172 }
1173 
1174 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1175 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1176                                         UseInterval* interval2) {
1177   while (interval1 != nullptr && interval2 != nullptr) {
1178     if (interval1->start() < interval2->start()) {
1179       if (interval1->end() > interval2->start()) {
1180         return true;
1181       }
1182       interval1 = interval1->next();
1183     } else {
1184       if (interval2->end() > interval1->start()) {
1185         return true;
1186       }
1187       interval2 = interval2->next();
1188     }
1189   }
1190   return false;
1191 }
1192 
1193 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1194 std::ostream& operator<<(std::ostream& os,
1195                          const PrintableLiveRange& printable_range) {
1196   const LiveRange* range = printable_range.range_;
1197   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1198      << " ";
1199   if (range->TopLevel()->is_phi()) os << "phi ";
1200   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1201 
1202   os << "{" << std::endl;
1203   UseInterval* interval = range->first_interval();
1204   UsePosition* use_pos = range->first_pos();
1205   PrintableInstructionOperand pio;
1206   pio.register_configuration_ = printable_range.register_configuration_;
1207   while (use_pos != nullptr) {
1208     if (use_pos->HasOperand()) {
1209       pio.op_ = *use_pos->operand();
1210       os << pio << use_pos->pos() << " ";
1211     }
1212     use_pos = use_pos->next();
1213   }
1214   os << std::endl;
1215 
1216   while (interval != nullptr) {
1217     os << '[' << interval->start() << ", " << interval->end() << ')'
1218        << std::endl;
1219     interval = interval->next();
1220   }
1221   os << "}";
1222   return os;
1223 }
1224 
1225 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1226 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1227     : live_ranges_(zone),
1228       assigned_slot_(kUnassignedSlot),
1229       byte_width_(GetByteWidth(parent->representation())),
1230       kind_(parent->kind()) {
1231   // Spill ranges are created for top level, non-splintered ranges. This is so
1232   // that, when merging decisions are made, we consider the full extent of the
1233   // virtual register, and avoid clobbering it.
1234   DCHECK(!parent->IsSplinter());
1235   UseInterval* result = nullptr;
1236   UseInterval* node = nullptr;
1237   // Copy the intervals for all ranges.
1238   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1239     UseInterval* src = range->first_interval();
1240     while (src != nullptr) {
1241       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1242       if (result == nullptr) {
1243         result = new_node;
1244       } else {
1245         node->set_next(new_node);
1246       }
1247       node = new_node;
1248       src = src->next();
1249     }
1250   }
1251   use_interval_ = result;
1252   live_ranges().push_back(parent);
1253   end_position_ = node->end();
1254   parent->SetSpillRange(this);
1255 }
1256 
IsIntersectingWith(SpillRange * other) const1257 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1258   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1259       this->End() <= other->use_interval_->start() ||
1260       other->End() <= this->use_interval_->start()) {
1261     return false;
1262   }
1263   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1264 }
1265 
1266 
TryMerge(SpillRange * other)1267 bool SpillRange::TryMerge(SpillRange* other) {
1268   if (HasSlot() || other->HasSlot()) return false;
1269   // TODO(dcarney): byte widths should be compared here not kinds.
1270   if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
1271       IsIntersectingWith(other)) {
1272     return false;
1273   }
1274 
1275   LifetimePosition max = LifetimePosition::MaxPosition();
1276   if (End() < other->End() && other->End() != max) {
1277     end_position_ = other->End();
1278   }
1279   other->end_position_ = max;
1280 
1281   MergeDisjointIntervals(other->use_interval_);
1282   other->use_interval_ = nullptr;
1283 
1284   for (TopLevelLiveRange* range : other->live_ranges()) {
1285     DCHECK(range->GetSpillRange() == other);
1286     range->SetSpillRange(this);
1287   }
1288 
1289   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1290                        other->live_ranges().end());
1291   other->live_ranges().clear();
1292 
1293   return true;
1294 }
1295 
1296 
MergeDisjointIntervals(UseInterval * other)1297 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1298   UseInterval* tail = nullptr;
1299   UseInterval* current = use_interval_;
1300   while (other != nullptr) {
1301     // Make sure the 'current' list starts first
1302     if (current == nullptr || current->start() > other->start()) {
1303       std::swap(current, other);
1304     }
1305     // Check disjointness
1306     DCHECK(other == nullptr || current->end() <= other->start());
1307     // Append the 'current' node to the result accumulator and move forward
1308     if (tail == nullptr) {
1309       use_interval_ = current;
1310     } else {
1311       tail->set_next(current);
1312     }
1313     tail = current;
1314     current = current->next();
1315   }
1316   // Other list is empty => we are done
1317 }
1318 
1319 
Print() const1320 void SpillRange::Print() const {
1321   OFStream os(stdout);
1322   os << "{" << std::endl;
1323   for (TopLevelLiveRange* range : live_ranges()) {
1324     os << range->vreg() << " ";
1325   }
1326   os << std::endl;
1327 
1328   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1329     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1330   }
1331   os << "}" << std::endl;
1332 }
1333 
1334 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1335 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1336                                                  const InstructionBlock* block,
1337                                                  Zone* zone)
1338     : phi_(phi),
1339       block_(block),
1340       incoming_operands_(zone),
1341       assigned_register_(kUnassignedRegister) {
1342   incoming_operands_.reserve(phi->operands().size());
1343 }
1344 
1345 
AddOperand(InstructionOperand * operand)1346 void RegisterAllocationData::PhiMapValue::AddOperand(
1347     InstructionOperand* operand) {
1348   incoming_operands_.push_back(operand);
1349 }
1350 
1351 
CommitAssignment(const InstructionOperand & assigned)1352 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1353     const InstructionOperand& assigned) {
1354   for (InstructionOperand* operand : incoming_operands_) {
1355     InstructionOperand::ReplaceWith(operand, &assigned);
1356   }
1357 }
1358 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,const char * debug_name)1359 RegisterAllocationData::RegisterAllocationData(
1360     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1361     InstructionSequence* code, const char* debug_name)
1362     : allocation_zone_(zone),
1363       frame_(frame),
1364       code_(code),
1365       debug_name_(debug_name),
1366       config_(config),
1367       phi_map_(allocation_zone()),
1368       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1369       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1370       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1371                    allocation_zone()),
1372       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1373                          allocation_zone()),
1374       fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
1375                                allocation_zone()),
1376       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1377                                 allocation_zone()),
1378       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1379       delayed_references_(allocation_zone()),
1380       assigned_registers_(nullptr),
1381       assigned_double_registers_(nullptr),
1382       virtual_register_count_(code->VirtualRegisterCount()),
1383       preassigned_slot_ranges_(zone) {
1384   assigned_registers_ = new (code_zone())
1385       BitVector(this->config()->num_general_registers(), code_zone());
1386   assigned_double_registers_ = new (code_zone())
1387       BitVector(this->config()->num_double_registers(), code_zone());
1388   this->frame()->SetAllocatedRegisters(assigned_registers_);
1389   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1390 }
1391 
1392 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1393 MoveOperands* RegisterAllocationData::AddGapMove(
1394     int index, Instruction::GapPosition position,
1395     const InstructionOperand& from, const InstructionOperand& to) {
1396   Instruction* instr = code()->InstructionAt(index);
1397   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1398   return moves->AddMove(from, to);
1399 }
1400 
1401 
RepresentationFor(int virtual_register)1402 MachineRepresentation RegisterAllocationData::RepresentationFor(
1403     int virtual_register) {
1404   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1405   return code()->GetRepresentation(virtual_register);
1406 }
1407 
1408 
GetOrCreateLiveRangeFor(int index)1409 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1410   if (index >= static_cast<int>(live_ranges().size())) {
1411     live_ranges().resize(index + 1, nullptr);
1412   }
1413   TopLevelLiveRange* result = live_ranges()[index];
1414   if (result == nullptr) {
1415     result = NewLiveRange(index, RepresentationFor(index));
1416     live_ranges()[index] = result;
1417   }
1418   return result;
1419 }
1420 
1421 
NewLiveRange(int index,MachineRepresentation rep)1422 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1423     int index, MachineRepresentation rep) {
1424   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1425 }
1426 
1427 
GetNextLiveRangeId()1428 int RegisterAllocationData::GetNextLiveRangeId() {
1429   int vreg = virtual_register_count_++;
1430   if (vreg >= static_cast<int>(live_ranges().size())) {
1431     live_ranges().resize(vreg + 1, nullptr);
1432   }
1433   return vreg;
1434 }
1435 
1436 
NextLiveRange(MachineRepresentation rep)1437 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1438     MachineRepresentation rep) {
1439   int vreg = GetNextLiveRangeId();
1440   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1441   return ret;
1442 }
1443 
1444 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1445 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1446     const InstructionBlock* block, PhiInstruction* phi) {
1447   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1448       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1449   auto res =
1450       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1451   DCHECK(res.second);
1452   USE(res);
1453   return map_value;
1454 }
1455 
1456 
GetPhiMapValueFor(int virtual_register)1457 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1458     int virtual_register) {
1459   auto it = phi_map_.find(virtual_register);
1460   DCHECK(it != phi_map_.end());
1461   return it->second;
1462 }
1463 
1464 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1465 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1466     TopLevelLiveRange* top_range) {
1467   return GetPhiMapValueFor(top_range->vreg());
1468 }
1469 
1470 
ExistsUseWithoutDefinition()1471 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1472   bool found = false;
1473   BitVector::Iterator iterator(live_in_sets()[0]);
1474   while (!iterator.Done()) {
1475     found = true;
1476     int operand_index = iterator.Current();
1477     PrintF("Register allocator error: live v%d reached first block.\n",
1478            operand_index);
1479     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1480     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1481     if (debug_name() == nullptr) {
1482       PrintF("\n");
1483     } else {
1484       PrintF("  (function: %s)\n", debug_name());
1485     }
1486     iterator.Advance();
1487   }
1488   return found;
1489 }
1490 
1491 
1492 // If a range is defined in a deferred block, we can expect all the range
1493 // to only cover positions in deferred blocks. Otherwise, a block on the
1494 // hot path would be dominated by a deferred block, meaning it is unreachable
1495 // without passing through the deferred block, which is contradictory.
1496 // In particular, when such a range contributes a result back on the hot
1497 // path, it will be as one of the inputs of a phi. In that case, the value
1498 // will be transferred via a move in the Gap::END's of the last instruction
1499 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1500 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1501   for (const TopLevelLiveRange* range : live_ranges()) {
1502     if (range == nullptr || range->IsEmpty() ||
1503         !code()
1504              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1505              ->IsDeferred()) {
1506       continue;
1507     }
1508     for (const UseInterval* i = range->first_interval(); i != nullptr;
1509          i = i->next()) {
1510       int first = i->FirstGapIndex();
1511       int last = i->LastGapIndex();
1512       for (int instr = first; instr <= last;) {
1513         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1514         if (!block->IsDeferred()) return false;
1515         instr = block->last_instruction_index() + 1;
1516       }
1517     }
1518   }
1519   return true;
1520 }
1521 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range)1522 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1523     TopLevelLiveRange* range) {
1524   DCHECK(!range->HasSpillOperand());
1525 
1526   SpillRange* spill_range = range->GetAllocatedSpillRange();
1527   if (spill_range == nullptr) {
1528     DCHECK(!range->IsSplinter());
1529     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1530   }
1531   range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1532 
1533   int spill_range_index =
1534       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1535 
1536   spill_ranges()[spill_range_index] = spill_range;
1537 
1538   return spill_range;
1539 }
1540 
1541 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1542 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1543     TopLevelLiveRange* range) {
1544   DCHECK(!range->HasSpillOperand());
1545   DCHECK(!range->IsSplinter());
1546   SpillRange* spill_range =
1547       new (allocation_zone()) SpillRange(range, allocation_zone());
1548   return spill_range;
1549 }
1550 
MarkAllocated(MachineRepresentation rep,int index)1551 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1552                                            int index) {
1553   switch (rep) {
1554     case MachineRepresentation::kFloat32:
1555       if (kSimpleFPAliasing) {
1556         assigned_double_registers_->Add(index);
1557       } else {
1558         int alias_base_index = -1;
1559         int aliases = config()->GetAliases(
1560             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1561         while (aliases--) {
1562           int aliased_reg = alias_base_index + aliases;
1563           assigned_double_registers_->Add(aliased_reg);
1564         }
1565       }
1566       break;
1567     case MachineRepresentation::kFloat64:
1568       assigned_double_registers_->Add(index);
1569       break;
1570     default:
1571       DCHECK(!IsFloatingPoint(rep));
1572       assigned_registers_->Add(index);
1573       break;
1574   }
1575 }
1576 
IsBlockBoundary(LifetimePosition pos) const1577 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1578   return pos.IsFullStart() &&
1579          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1580              pos.ToInstructionIndex();
1581 }
1582 
1583 
ConstraintBuilder(RegisterAllocationData * data)1584 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1585     : data_(data) {}
1586 
1587 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged)1588 InstructionOperand* ConstraintBuilder::AllocateFixed(
1589     UnallocatedOperand* operand, int pos, bool is_tagged) {
1590   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1591   DCHECK(operand->HasFixedPolicy());
1592   InstructionOperand allocated;
1593   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1594   int virtual_register = operand->virtual_register();
1595   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1596     rep = data()->RepresentationFor(virtual_register);
1597   }
1598   if (operand->HasFixedSlotPolicy()) {
1599     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1600                                  operand->fixed_slot_index());
1601   } else if (operand->HasFixedRegisterPolicy()) {
1602     DCHECK(!IsFloatingPoint(rep));
1603     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1604                                  operand->fixed_register_index());
1605   } else if (operand->HasFixedFPRegisterPolicy()) {
1606     DCHECK(IsFloatingPoint(rep));
1607     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1608     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1609                                  operand->fixed_register_index());
1610   } else {
1611     UNREACHABLE();
1612   }
1613   InstructionOperand::ReplaceWith(operand, &allocated);
1614   if (is_tagged) {
1615     TRACE("Fixed reg is tagged at %d\n", pos);
1616     Instruction* instr = code()->InstructionAt(pos);
1617     if (instr->HasReferenceMap()) {
1618       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1619     }
1620   }
1621   return operand;
1622 }
1623 
1624 
MeetRegisterConstraints()1625 void ConstraintBuilder::MeetRegisterConstraints() {
1626   for (InstructionBlock* block : code()->instruction_blocks()) {
1627     MeetRegisterConstraints(block);
1628   }
1629 }
1630 
1631 
MeetRegisterConstraints(const InstructionBlock * block)1632 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1633   int start = block->first_instruction_index();
1634   int end = block->last_instruction_index();
1635   DCHECK_NE(-1, start);
1636   for (int i = start; i <= end; ++i) {
1637     MeetConstraintsBefore(i);
1638     if (i != end) MeetConstraintsAfter(i);
1639   }
1640   // Meet register constraints for the instruction in the end.
1641   MeetRegisterConstraintsForLastInstructionInBlock(block);
1642 }
1643 
1644 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1645 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1646     const InstructionBlock* block) {
1647   int end = block->last_instruction_index();
1648   Instruction* last_instruction = code()->InstructionAt(end);
1649   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1650     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1651     DCHECK(!output_operand->IsConstant());
1652     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1653     int output_vreg = output->virtual_register();
1654     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1655     bool assigned = false;
1656     if (output->HasFixedPolicy()) {
1657       AllocateFixed(output, -1, false);
1658       // This value is produced on the stack, we never need to spill it.
1659       if (output->IsStackSlot()) {
1660         DCHECK(LocationOperand::cast(output)->index() <
1661                data()->frame()->GetSpillSlotCount());
1662         range->SetSpillOperand(LocationOperand::cast(output));
1663         range->SetSpillStartIndex(end);
1664         assigned = true;
1665       }
1666 
1667       for (const RpoNumber& succ : block->successors()) {
1668         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1669         DCHECK(successor->PredecessorCount() == 1);
1670         int gap_index = successor->first_instruction_index();
1671         // Create an unconstrained operand for the same virtual register
1672         // and insert a gap move from the fixed output to the operand.
1673         UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1674         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1675       }
1676     }
1677 
1678     if (!assigned) {
1679       for (const RpoNumber& succ : block->successors()) {
1680         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1681         DCHECK(successor->PredecessorCount() == 1);
1682         int gap_index = successor->first_instruction_index();
1683         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1684         range->SetSpillStartIndex(gap_index);
1685       }
1686     }
1687   }
1688 }
1689 
1690 
MeetConstraintsAfter(int instr_index)1691 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1692   Instruction* first = code()->InstructionAt(instr_index);
1693   // Handle fixed temporaries.
1694   for (size_t i = 0; i < first->TempCount(); i++) {
1695     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1696     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1697   }
1698   // Handle constant/fixed output operands.
1699   for (size_t i = 0; i < first->OutputCount(); i++) {
1700     InstructionOperand* output = first->OutputAt(i);
1701     if (output->IsConstant()) {
1702       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1703       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1704       range->SetSpillStartIndex(instr_index + 1);
1705       range->SetSpillOperand(output);
1706       continue;
1707     }
1708     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1709     TopLevelLiveRange* range =
1710         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1711     bool assigned = false;
1712     if (first_output->HasFixedPolicy()) {
1713       int output_vreg = first_output->virtual_register();
1714       UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1715       bool is_tagged = code()->IsReference(output_vreg);
1716       if (first_output->HasSecondaryStorage()) {
1717         range->MarkHasPreassignedSlot();
1718         data()->preassigned_slot_ranges().push_back(
1719             std::make_pair(range, first_output->GetSecondaryStorage()));
1720       }
1721       AllocateFixed(first_output, instr_index, is_tagged);
1722 
1723       // This value is produced on the stack, we never need to spill it.
1724       if (first_output->IsStackSlot()) {
1725         DCHECK(LocationOperand::cast(first_output)->index() <
1726                data()->frame()->GetTotalFrameSlotCount());
1727         range->SetSpillOperand(LocationOperand::cast(first_output));
1728         range->SetSpillStartIndex(instr_index + 1);
1729         assigned = true;
1730       }
1731       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1732                          output_copy);
1733     }
1734     // Make sure we add a gap move for spilling (if we have not done
1735     // so already).
1736     if (!assigned) {
1737       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1738                                  first_output);
1739       range->SetSpillStartIndex(instr_index + 1);
1740     }
1741   }
1742 }
1743 
1744 
MeetConstraintsBefore(int instr_index)1745 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1746   Instruction* second = code()->InstructionAt(instr_index);
1747   // Handle fixed input operands of second instruction.
1748   for (size_t i = 0; i < second->InputCount(); i++) {
1749     InstructionOperand* input = second->InputAt(i);
1750     if (input->IsImmediate() || input->IsExplicit()) {
1751       continue;  // Ignore immediates and explicitly reserved registers.
1752     }
1753     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1754     if (cur_input->HasFixedPolicy()) {
1755       int input_vreg = cur_input->virtual_register();
1756       UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1757       bool is_tagged = code()->IsReference(input_vreg);
1758       AllocateFixed(cur_input, instr_index, is_tagged);
1759       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1760     }
1761   }
1762   // Handle "output same as input" for second instruction.
1763   for (size_t i = 0; i < second->OutputCount(); i++) {
1764     InstructionOperand* output = second->OutputAt(i);
1765     if (!output->IsUnallocated()) continue;
1766     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1767     if (!second_output->HasSameAsInputPolicy()) continue;
1768     DCHECK(i == 0);  // Only valid for first output.
1769     UnallocatedOperand* cur_input =
1770         UnallocatedOperand::cast(second->InputAt(0));
1771     int output_vreg = second_output->virtual_register();
1772     int input_vreg = cur_input->virtual_register();
1773     UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1774     cur_input->set_virtual_register(second_output->virtual_register());
1775     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1776                                                 input_copy, *cur_input);
1777     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1778       if (second->HasReferenceMap()) {
1779         RegisterAllocationData::DelayedReference delayed_reference = {
1780             second->reference_map(), &gap_move->source()};
1781         data()->delayed_references().push_back(delayed_reference);
1782       }
1783     } else if (!code()->IsReference(input_vreg) &&
1784                code()->IsReference(output_vreg)) {
1785       // The input is assumed to immediately have a tagged representation,
1786       // before the pointer map can be used. I.e. the pointer map at the
1787       // instruction will include the output operand (whose value at the
1788       // beginning of the instruction is equal to the input operand). If
1789       // this is not desired, then the pointer map at this instruction needs
1790       // to be adjusted manually.
1791     }
1792   }
1793 }
1794 
1795 
ResolvePhis()1796 void ConstraintBuilder::ResolvePhis() {
1797   // Process the blocks in reverse order.
1798   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1799     ResolvePhis(block);
1800   }
1801 }
1802 
1803 
ResolvePhis(const InstructionBlock * block)1804 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1805   for (PhiInstruction* phi : block->phis()) {
1806     int phi_vreg = phi->virtual_register();
1807     RegisterAllocationData::PhiMapValue* map_value =
1808         data()->InitializePhiMap(block, phi);
1809     InstructionOperand& output = phi->output();
1810     // Map the destination operands, so the commitment phase can find them.
1811     for (size_t i = 0; i < phi->operands().size(); ++i) {
1812       InstructionBlock* cur_block =
1813           code()->InstructionBlockAt(block->predecessors()[i]);
1814       UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
1815       MoveOperands* move = data()->AddGapMove(
1816           cur_block->last_instruction_index(), Instruction::END, input, output);
1817       map_value->AddOperand(&move->destination());
1818       DCHECK(!code()
1819                   ->InstructionAt(cur_block->last_instruction_index())
1820                   ->HasReferenceMap());
1821     }
1822     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1823     int gap_index = block->first_instruction_index();
1824     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1825     live_range->SetSpillStartIndex(gap_index);
1826     // We use the phi-ness of some nodes in some later heuristics.
1827     live_range->set_is_phi(true);
1828     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1829   }
1830 }
1831 
1832 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)1833 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1834                                    Zone* local_zone)
1835     : data_(data), phi_hints_(local_zone) {}
1836 
1837 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)1838 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1839                                             RegisterAllocationData* data) {
1840   size_t block_index = block->rpo_number().ToSize();
1841   BitVector* live_out = data->live_out_sets()[block_index];
1842   if (live_out == nullptr) {
1843     // Compute live out for the given block, except not including backward
1844     // successor edges.
1845     Zone* zone = data->allocation_zone();
1846     const InstructionSequence* code = data->code();
1847 
1848     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1849 
1850     // Process all successor blocks.
1851     for (const RpoNumber& succ : block->successors()) {
1852       // Add values live on entry to the successor.
1853       if (succ <= block->rpo_number()) continue;
1854       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1855       if (live_in != nullptr) live_out->Union(*live_in);
1856 
1857       // All phi input operands corresponding to this successor edge are live
1858       // out from this block.
1859       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1860       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1861       DCHECK(index < successor->PredecessorCount());
1862       for (PhiInstruction* phi : successor->phis()) {
1863         live_out->Add(phi->operands()[index]);
1864       }
1865     }
1866     data->live_out_sets()[block_index] = live_out;
1867   }
1868   return live_out;
1869 }
1870 
1871 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1872 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1873                                            BitVector* live_out) {
1874   // Add an interval that includes the entire block to the live range for
1875   // each live_out value.
1876   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1877       block->first_instruction_index());
1878   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1879                              block->last_instruction_index())
1880                              .NextStart();
1881   BitVector::Iterator iterator(live_out);
1882   while (!iterator.Done()) {
1883     int operand_index = iterator.Current();
1884     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1885     range->AddUseInterval(start, end, allocation_zone());
1886     iterator.Advance();
1887   }
1888 }
1889 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1890 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1891   switch (rep) {
1892     case MachineRepresentation::kFloat32:
1893       return -index - 1 - config()->num_general_registers();
1894     case MachineRepresentation::kFloat64:
1895       return -index - 1 - config()->num_general_registers() -
1896              config()->num_float_registers();
1897     default:
1898       break;
1899   }
1900   UNREACHABLE();
1901   return 0;
1902 }
1903 
FixedLiveRangeFor(int index)1904 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1905   DCHECK(index < config()->num_general_registers());
1906   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1907   if (result == nullptr) {
1908     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1909     result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
1910     DCHECK(result->IsFixed());
1911     result->set_assigned_register(index);
1912     data()->MarkAllocated(rep, index);
1913     data()->fixed_live_ranges()[index] = result;
1914   }
1915   return result;
1916 }
1917 
FixedFPLiveRangeFor(int index,MachineRepresentation rep)1918 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1919     int index, MachineRepresentation rep) {
1920   TopLevelLiveRange* result = nullptr;
1921   if (rep == MachineRepresentation::kFloat64) {
1922     DCHECK(index < config()->num_double_registers());
1923     result = data()->fixed_double_live_ranges()[index];
1924     if (result == nullptr) {
1925       result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1926       DCHECK(result->IsFixed());
1927       result->set_assigned_register(index);
1928       data()->MarkAllocated(rep, index);
1929       data()->fixed_double_live_ranges()[index] = result;
1930     }
1931   } else {
1932     DCHECK(rep == MachineRepresentation::kFloat32);
1933     DCHECK(index < config()->num_float_registers());
1934     result = data()->fixed_float_live_ranges()[index];
1935     if (result == nullptr) {
1936       result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1937       DCHECK(result->IsFixed());
1938       result->set_assigned_register(index);
1939       data()->MarkAllocated(rep, index);
1940       data()->fixed_float_live_ranges()[index] = result;
1941     }
1942   }
1943   return result;
1944 }
1945 
LiveRangeFor(InstructionOperand * operand)1946 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1947   if (operand->IsUnallocated()) {
1948     return data()->GetOrCreateLiveRangeFor(
1949         UnallocatedOperand::cast(operand)->virtual_register());
1950   } else if (operand->IsConstant()) {
1951     return data()->GetOrCreateLiveRangeFor(
1952         ConstantOperand::cast(operand)->virtual_register());
1953   } else if (operand->IsRegister()) {
1954     return FixedLiveRangeFor(
1955         LocationOperand::cast(operand)->GetRegister().code());
1956   } else if (operand->IsFPRegister()) {
1957     LocationOperand* op = LocationOperand::cast(operand);
1958     return FixedFPLiveRangeFor(op->register_code(), op->representation());
1959   } else {
1960     return nullptr;
1961   }
1962 }
1963 
1964 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1965 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1966                                               InstructionOperand* operand,
1967                                               void* hint,
1968                                               UsePositionHintType hint_type) {
1969   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1970 }
1971 
1972 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1973 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1974                                       InstructionOperand* operand, void* hint,
1975                                       UsePositionHintType hint_type) {
1976   TopLevelLiveRange* range = LiveRangeFor(operand);
1977   if (range == nullptr) return nullptr;
1978 
1979   if (range->IsEmpty() || range->Start() > position) {
1980     // Can happen if there is a definition without use.
1981     range->AddUseInterval(position, position.NextStart(), allocation_zone());
1982     range->AddUsePosition(NewUsePosition(position.NextStart()));
1983   } else {
1984     range->ShortenTo(position);
1985   }
1986   if (!operand->IsUnallocated()) return nullptr;
1987   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1988   UsePosition* use_pos =
1989       NewUsePosition(position, unalloc_operand, hint, hint_type);
1990   range->AddUsePosition(use_pos);
1991   return use_pos;
1992 }
1993 
1994 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1995 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
1996                                    LifetimePosition position,
1997                                    InstructionOperand* operand, void* hint,
1998                                    UsePositionHintType hint_type) {
1999   TopLevelLiveRange* range = LiveRangeFor(operand);
2000   if (range == nullptr) return nullptr;
2001   UsePosition* use_pos = nullptr;
2002   if (operand->IsUnallocated()) {
2003     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2004     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2005     range->AddUsePosition(use_pos);
2006   }
2007   range->AddUseInterval(block_start, position, allocation_zone());
2008   return use_pos;
2009 }
2010 
2011 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2012 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2013                                            BitVector* live) {
2014   int block_start = block->first_instruction_index();
2015   LifetimePosition block_start_position =
2016       LifetimePosition::GapFromInstructionIndex(block_start);
2017 
2018   for (int index = block->last_instruction_index(); index >= block_start;
2019        index--) {
2020     LifetimePosition curr_position =
2021         LifetimePosition::InstructionFromInstructionIndex(index);
2022     Instruction* instr = code()->InstructionAt(index);
2023     DCHECK(instr != nullptr);
2024     DCHECK(curr_position.IsInstructionPosition());
2025     // Process output, inputs, and temps of this instruction.
2026     for (size_t i = 0; i < instr->OutputCount(); i++) {
2027       InstructionOperand* output = instr->OutputAt(i);
2028       if (output->IsUnallocated()) {
2029         // Unsupported.
2030         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2031         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2032         live->Remove(out_vreg);
2033       } else if (output->IsConstant()) {
2034         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2035         live->Remove(out_vreg);
2036       }
2037       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2038           output->IsRegister() &&
2039           AllocatedOperand::cast(output)->GetRegister().is(
2040               v8::internal::kReturnRegister0)) {
2041         // The register defined here is blocked from gap start - it is the
2042         // exception value.
2043         // TODO(mtrofin): should we explore an explicit opcode for
2044         // the first instruction in the handler?
2045         Define(LifetimePosition::GapFromInstructionIndex(index), output);
2046       } else {
2047         Define(curr_position, output);
2048       }
2049     }
2050 
2051     if (instr->ClobbersRegisters()) {
2052       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2053         int code = config()->GetAllocatableGeneralCode(i);
2054         if (!IsOutputRegisterOf(instr, code)) {
2055           TopLevelLiveRange* range = FixedLiveRangeFor(code);
2056           range->AddUseInterval(curr_position, curr_position.End(),
2057                                 allocation_zone());
2058         }
2059       }
2060     }
2061 
2062     if (instr->ClobbersDoubleRegisters()) {
2063       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2064         int code = config()->GetAllocatableDoubleCode(i);
2065         if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat64,
2066                                   code)) {
2067           TopLevelLiveRange* range =
2068               FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
2069           range->AddUseInterval(curr_position, curr_position.End(),
2070                                 allocation_zone());
2071         }
2072       }
2073       // Preserve fixed float registers on archs with non-simple aliasing.
2074       if (!kSimpleFPAliasing) {
2075         for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
2076           int code = config()->GetAllocatableFloatCode(i);
2077           if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat32,
2078                                     code)) {
2079             TopLevelLiveRange* range =
2080                 FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
2081             range->AddUseInterval(curr_position, curr_position.End(),
2082                                   allocation_zone());
2083           }
2084         }
2085       }
2086     }
2087 
2088     for (size_t i = 0; i < instr->InputCount(); i++) {
2089       InstructionOperand* input = instr->InputAt(i);
2090       if (input->IsImmediate() || input->IsExplicit()) {
2091         continue;  // Ignore immediates and explicitly reserved registers.
2092       }
2093       LifetimePosition use_pos;
2094       if (input->IsUnallocated() &&
2095           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2096         use_pos = curr_position;
2097       } else {
2098         use_pos = curr_position.End();
2099       }
2100 
2101       if (input->IsUnallocated()) {
2102         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2103         int vreg = unalloc->virtual_register();
2104         live->Add(vreg);
2105         if (unalloc->HasSlotPolicy()) {
2106           data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
2107         }
2108       }
2109       Use(block_start_position, use_pos, input);
2110     }
2111 
2112     for (size_t i = 0; i < instr->TempCount(); i++) {
2113       InstructionOperand* temp = instr->TempAt(i);
2114       // Unsupported.
2115       DCHECK_IMPLIES(temp->IsUnallocated(),
2116                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2117       if (instr->ClobbersTemps()) {
2118         if (temp->IsRegister()) continue;
2119         if (temp->IsUnallocated()) {
2120           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2121           if (temp_unalloc->HasFixedPolicy()) {
2122             continue;
2123           }
2124         }
2125       }
2126       Use(block_start_position, curr_position.End(), temp);
2127       Define(curr_position, temp);
2128     }
2129 
2130     // Process the moves of the instruction's gaps, making their sources live.
2131     const Instruction::GapPosition kPositions[] = {Instruction::END,
2132                                                    Instruction::START};
2133     curr_position = curr_position.PrevStart();
2134     DCHECK(curr_position.IsGapPosition());
2135     for (const Instruction::GapPosition& position : kPositions) {
2136       ParallelMove* move = instr->GetParallelMove(position);
2137       if (move == nullptr) continue;
2138       if (position == Instruction::END) {
2139         curr_position = curr_position.End();
2140       } else {
2141         curr_position = curr_position.Start();
2142       }
2143       for (MoveOperands* cur : *move) {
2144         InstructionOperand& from = cur->source();
2145         InstructionOperand& to = cur->destination();
2146         void* hint = &to;
2147         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2148         UsePosition* to_use = nullptr;
2149         int phi_vreg = -1;
2150         if (to.IsUnallocated()) {
2151           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2152           TopLevelLiveRange* to_range =
2153               data()->GetOrCreateLiveRangeFor(to_vreg);
2154           if (to_range->is_phi()) {
2155             phi_vreg = to_vreg;
2156             if (to_range->is_non_loop_phi()) {
2157               hint = to_range->current_hint_position();
2158               hint_type = hint == nullptr ? UsePositionHintType::kNone
2159                                           : UsePositionHintType::kUsePos;
2160             } else {
2161               hint_type = UsePositionHintType::kPhi;
2162               hint = data()->GetPhiMapValueFor(to_vreg);
2163             }
2164           } else {
2165             if (live->Contains(to_vreg)) {
2166               to_use = Define(curr_position, &to, &from,
2167                               UsePosition::HintTypeForOperand(from));
2168               live->Remove(to_vreg);
2169             } else {
2170               cur->Eliminate();
2171               continue;
2172             }
2173           }
2174         } else {
2175           Define(curr_position, &to);
2176         }
2177         UsePosition* from_use =
2178             Use(block_start_position, curr_position, &from, hint, hint_type);
2179         // Mark range live.
2180         if (from.IsUnallocated()) {
2181           live->Add(UnallocatedOperand::cast(from).virtual_register());
2182         }
2183         // Resolve use position hints just created.
2184         if (to_use != nullptr && from_use != nullptr) {
2185           to_use->ResolveHint(from_use);
2186           from_use->ResolveHint(to_use);
2187         }
2188         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2189         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2190         // Potentially resolve phi hint.
2191         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2192       }
2193     }
2194   }
2195 }
2196 
2197 
ProcessPhis(const InstructionBlock * block,BitVector * live)2198 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2199                                    BitVector* live) {
2200   for (PhiInstruction* phi : block->phis()) {
2201     // The live range interval already ends at the first instruction of the
2202     // block.
2203     int phi_vreg = phi->virtual_register();
2204     live->Remove(phi_vreg);
2205     // Select the hint from the first predecessor block that preceeds this block
2206     // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
2207     // hinting in rpo order is required because hint resolution that happens
2208     // later in the compiler pipeline visits instructions in reverse rpo,
2209     // relying on the fact that phis are encountered before their hints.
2210     const Instruction* instr = nullptr;
2211     const InstructionBlock::Predecessors& predecessors = block->predecessors();
2212     for (size_t i = 0; i < predecessors.size(); ++i) {
2213       const InstructionBlock* predecessor_block =
2214           code()->InstructionBlockAt(predecessors[i]);
2215       if (predecessor_block->rpo_number() < block->rpo_number()) {
2216         instr = GetLastInstruction(code(), predecessor_block);
2217         if (!predecessor_block->IsDeferred()) break;
2218       }
2219     }
2220     DCHECK_NOT_NULL(instr);
2221 
2222     InstructionOperand* hint = nullptr;
2223     for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
2224       InstructionOperand& to = move->destination();
2225       if (to.IsUnallocated() &&
2226           UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2227         hint = &move->source();
2228         break;
2229       }
2230     }
2231     DCHECK(hint != nullptr);
2232     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2233         block->first_instruction_index());
2234     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2235                                   UsePosition::HintTypeForOperand(*hint));
2236     MapPhiHint(hint, use_pos);
2237   }
2238 }
2239 
2240 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2241 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2242                                          BitVector* live) {
2243   DCHECK(block->IsLoopHeader());
2244   // Add a live range stretching from the first loop instruction to the last
2245   // for each value live on entry to the header.
2246   BitVector::Iterator iterator(live);
2247   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2248       block->first_instruction_index());
2249   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2250                              code()->LastLoopInstructionIndex(block))
2251                              .NextFullStart();
2252   while (!iterator.Done()) {
2253     int operand_index = iterator.Current();
2254     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2255     range->EnsureInterval(start, end, allocation_zone());
2256     iterator.Advance();
2257   }
2258   // Insert all values into the live in sets of all blocks in the loop.
2259   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2260        ++i) {
2261     live_in_sets()[i]->Union(*live);
2262   }
2263 }
2264 
2265 
BuildLiveRanges()2266 void LiveRangeBuilder::BuildLiveRanges() {
2267   // Process the blocks in reverse order.
2268   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2269        --block_id) {
2270     InstructionBlock* block =
2271         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2272     BitVector* live = ComputeLiveOut(block, data());
2273     // Initially consider all live_out values live for the entire block. We
2274     // will shorten these intervals if necessary.
2275     AddInitialIntervals(block, live);
2276     // Process the instructions in reverse order, generating and killing
2277     // live values.
2278     ProcessInstructions(block, live);
2279     // All phi output operands are killed by this block.
2280     ProcessPhis(block, live);
2281     // Now live is live_in for this block except not including values live
2282     // out on backward successor edges.
2283     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2284     live_in_sets()[block_id] = live;
2285   }
2286   // Postprocess the ranges.
2287   for (TopLevelLiveRange* range : data()->live_ranges()) {
2288     if (range == nullptr) continue;
2289     // Give slots to all ranges with a non fixed slot use.
2290     if (range->has_slot_use() && range->HasNoSpillType()) {
2291       data()->AssignSpillRangeToLiveRange(range);
2292     }
2293     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2294     // live ranges, every use requires the constant to be in a register.
2295     // Without this hack, all uses with "any" policy would get the constant
2296     // operand assigned.
2297     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2298       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2299            pos = pos->next()) {
2300         if (pos->type() == UsePositionType::kRequiresSlot) continue;
2301         UsePositionType new_type = UsePositionType::kAny;
2302         // Can't mark phis as needing a register.
2303         if (!pos->pos().IsGapPosition()) {
2304           new_type = UsePositionType::kRequiresRegister;
2305         }
2306         pos->set_type(new_type, true);
2307       }
2308     }
2309   }
2310   for (auto preassigned : data()->preassigned_slot_ranges()) {
2311     TopLevelLiveRange* range = preassigned.first;
2312     int slot_id = preassigned.second;
2313     SpillRange* spill = range->HasSpillRange()
2314                             ? range->GetSpillRange()
2315                             : data()->AssignSpillRangeToLiveRange(range);
2316     spill->set_assigned_slot(slot_id);
2317   }
2318 #ifdef DEBUG
2319   Verify();
2320 #endif
2321 }
2322 
2323 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2324 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2325                                   UsePosition* use_pos) {
2326   DCHECK(!use_pos->IsResolved());
2327   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2328   DCHECK(res.second);
2329   USE(res);
2330 }
2331 
2332 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2333 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2334                                       UsePosition* use_pos) {
2335   auto it = phi_hints_.find(operand);
2336   if (it == phi_hints_.end()) return;
2337   DCHECK(!it->second->IsResolved());
2338   it->second->ResolveHint(use_pos);
2339 }
2340 
2341 
Verify() const2342 void LiveRangeBuilder::Verify() const {
2343   for (auto& hint : phi_hints_) {
2344     CHECK(hint.second->IsResolved());
2345   }
2346   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2347     if (current != nullptr && !current->IsEmpty()) {
2348       // New LiveRanges should not be split.
2349       CHECK_NULL(current->next());
2350       // General integrity check.
2351       current->Verify();
2352       const UseInterval* first = current->first_interval();
2353       if (first->next() == nullptr) continue;
2354 
2355       // Consecutive intervals should not end and start in the same block,
2356       // otherwise the intervals should have been joined, because the
2357       // variable is live throughout that block.
2358       CHECK(NextIntervalStartsInDifferentBlocks(first));
2359 
2360       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2361         // Except for the first interval, the other intevals must start at
2362         // a block boundary, otherwise data wouldn't flow to them.
2363         CHECK(IntervalStartsAtBlockBoundary(i));
2364         // The last instruction of the predecessors of the block the interval
2365         // starts must be covered by the range.
2366         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2367         if (i->next() != nullptr) {
2368           // Check the consecutive intervals property, except for the last
2369           // interval, where it doesn't apply.
2370           CHECK(NextIntervalStartsInDifferentBlocks(i));
2371         }
2372       }
2373     }
2374   }
2375 }
2376 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2377 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2378     const UseInterval* interval) const {
2379   LifetimePosition start = interval->start();
2380   if (!start.IsFullStart()) return false;
2381   int instruction_index = start.ToInstructionIndex();
2382   const InstructionBlock* block =
2383       data()->code()->GetInstructionBlock(instruction_index);
2384   return block->first_instruction_index() == instruction_index;
2385 }
2386 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2387 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2388     const UseInterval* interval, const TopLevelLiveRange* range) const {
2389   LifetimePosition start = interval->start();
2390   int instruction_index = start.ToInstructionIndex();
2391   const InstructionBlock* block =
2392       data()->code()->GetInstructionBlock(instruction_index);
2393   for (RpoNumber pred_index : block->predecessors()) {
2394     const InstructionBlock* predecessor =
2395         data()->code()->InstructionBlockAt(pred_index);
2396     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2397         predecessor->last_instruction_index());
2398     last_pos = last_pos.NextStart().End();
2399     if (!range->Covers(last_pos)) return false;
2400   }
2401   return true;
2402 }
2403 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2404 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2405     const UseInterval* interval) const {
2406   DCHECK_NOT_NULL(interval->next());
2407   LifetimePosition end = interval->end();
2408   LifetimePosition next_start = interval->next()->start();
2409   // Since end is not covered, but the previous position is, move back a
2410   // position
2411   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2412   int last_covered_index = end.ToInstructionIndex();
2413   const InstructionBlock* block =
2414       data()->code()->GetInstructionBlock(last_covered_index);
2415   const InstructionBlock* next_block =
2416       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2417   return block->rpo_number() < next_block->rpo_number();
2418 }
2419 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2420 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2421                                      RegisterKind kind)
2422     : data_(data),
2423       mode_(kind),
2424       num_registers_(GetRegisterCount(data->config(), kind)),
2425       num_allocatable_registers_(
2426           GetAllocatableRegisterCount(data->config(), kind)),
2427       allocatable_register_codes_(
2428           GetAllocatableRegisterCodes(data->config(), kind)) {}
2429 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2430 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2431     const LiveRange* range, int instruction_index) {
2432   LifetimePosition ret = LifetimePosition::Invalid();
2433 
2434   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2435   if (range->Start() >= ret || ret >= range->End()) {
2436     return LifetimePosition::Invalid();
2437   }
2438   return ret;
2439 }
2440 
2441 
SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only)2442 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
2443     bool operands_only) {
2444   size_t initial_range_count = data()->live_ranges().size();
2445   for (size_t i = 0; i < initial_range_count; ++i) {
2446     TopLevelLiveRange* range = data()->live_ranges()[i];
2447     if (!CanProcessRange(range)) continue;
2448     if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
2449       continue;
2450     }
2451 
2452     LifetimePosition start = range->Start();
2453     TRACE("Live range %d:%d is defined by a spill operand.\n",
2454           range->TopLevel()->vreg(), range->relative_id());
2455     LifetimePosition next_pos = start;
2456     if (next_pos.IsGapPosition()) {
2457       next_pos = next_pos.NextStart();
2458     }
2459     UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2460     // If the range already has a spill operand and it doesn't need a
2461     // register immediately, split it and spill the first part of the range.
2462     if (pos == nullptr) {
2463       Spill(range);
2464     } else if (pos->pos() > range->Start().NextStart()) {
2465       // Do not spill live range eagerly if use position that can benefit from
2466       // the register is too close to the start of live range.
2467       LifetimePosition split_pos = GetSplitPositionForInstruction(
2468           range, pos->pos().ToInstructionIndex());
2469       // There is no place to split, so we can't split and spill.
2470       if (!split_pos.IsValid()) continue;
2471 
2472       split_pos =
2473           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2474 
2475       SplitRangeAt(range, split_pos);
2476       Spill(range);
2477     }
2478   }
2479 }
2480 
2481 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2482 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2483                                            LifetimePosition pos) {
2484   DCHECK(!range->TopLevel()->IsFixed());
2485   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2486         range->relative_id(), pos.value());
2487 
2488   if (pos <= range->Start()) return range;
2489 
2490   // We can't properly connect liveranges if splitting occurred at the end
2491   // a block.
2492   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2493          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2494           pos.ToInstructionIndex()));
2495 
2496   LiveRange* result = range->SplitAt(pos, allocation_zone());
2497   return result;
2498 }
2499 
2500 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2501 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2502                                            LifetimePosition start,
2503                                            LifetimePosition end) {
2504   DCHECK(!range->TopLevel()->IsFixed());
2505   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2506         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2507         end.value());
2508 
2509   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2510   DCHECK(split_pos >= start);
2511   return SplitRangeAt(range, split_pos);
2512 }
2513 
2514 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2515 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2516                                                         LifetimePosition end) {
2517   int start_instr = start.ToInstructionIndex();
2518   int end_instr = end.ToInstructionIndex();
2519   DCHECK(start_instr <= end_instr);
2520 
2521   // We have no choice
2522   if (start_instr == end_instr) return end;
2523 
2524   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2525   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2526 
2527   if (end_block == start_block) {
2528     // The interval is split in the same basic block. Split at the latest
2529     // possible position.
2530     return end;
2531   }
2532 
2533   const InstructionBlock* block = end_block;
2534   // Find header of outermost loop.
2535   do {
2536     const InstructionBlock* loop = GetContainingLoop(code(), block);
2537     if (loop == nullptr ||
2538         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2539       // No more loops or loop starts before the lifetime start.
2540       break;
2541     }
2542     block = loop;
2543   } while (true);
2544 
2545   // We did not find any suitable outer loop. Split at the latest possible
2546   // position unless end_block is a loop header itself.
2547   if (block == end_block && !end_block->IsLoopHeader()) return end;
2548 
2549   return LifetimePosition::GapFromInstructionIndex(
2550       block->first_instruction_index());
2551 }
2552 
2553 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos)2554 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2555     LiveRange* range, LifetimePosition pos) {
2556   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2557   const InstructionBlock* loop_header =
2558       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2559 
2560   if (loop_header == nullptr) return pos;
2561 
2562   const UsePosition* prev_use =
2563       range->PreviousUsePositionRegisterIsBeneficial(pos);
2564 
2565   while (loop_header != nullptr) {
2566     // We are going to spill live range inside the loop.
2567     // If possible try to move spilling position backwards to loop header.
2568     // This will reduce number of memory moves on the back edge.
2569     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2570         loop_header->first_instruction_index());
2571 
2572     if (range->Covers(loop_start)) {
2573       if (prev_use == nullptr || prev_use->pos() < loop_start) {
2574         // No register beneficial use inside the loop before the pos.
2575         pos = loop_start;
2576       }
2577     }
2578 
2579     // Try hoisting out to an outer loop.
2580     loop_header = GetContainingLoop(code(), loop_header);
2581   }
2582 
2583   return pos;
2584 }
2585 
2586 
Spill(LiveRange * range)2587 void RegisterAllocator::Spill(LiveRange* range) {
2588   DCHECK(!range->spilled());
2589   TopLevelLiveRange* first = range->TopLevel();
2590   TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2591 
2592   if (first->HasNoSpillType()) {
2593     data()->AssignSpillRangeToLiveRange(first);
2594   }
2595   range->Spill();
2596 }
2597 
RegisterName(int register_code) const2598 const char* RegisterAllocator::RegisterName(int register_code) const {
2599   if (mode() == GENERAL_REGISTERS) {
2600     return data()->config()->GetGeneralRegisterName(register_code);
2601   } else {
2602     return data()->config()->GetDoubleRegisterName(register_code);
2603   }
2604 }
2605 
2606 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2607 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2608                                          RegisterKind kind, Zone* local_zone)
2609     : RegisterAllocator(data, kind),
2610       unhandled_live_ranges_(local_zone),
2611       active_live_ranges_(local_zone),
2612       inactive_live_ranges_(local_zone) {
2613   unhandled_live_ranges().reserve(
2614       static_cast<size_t>(code()->VirtualRegisterCount() * 2));
2615   active_live_ranges().reserve(8);
2616   inactive_live_ranges().reserve(8);
2617   // TryAllocateFreeReg and AllocateBlockedReg assume this
2618   // when allocating local arrays.
2619   DCHECK(RegisterConfiguration::kMaxFPRegisters >=
2620          this->data()->config()->num_general_registers());
2621 }
2622 
2623 
AllocateRegisters()2624 void LinearScanAllocator::AllocateRegisters() {
2625   DCHECK(unhandled_live_ranges().empty());
2626   DCHECK(active_live_ranges().empty());
2627   DCHECK(inactive_live_ranges().empty());
2628 
2629   SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
2630                                             num_allocatable_registers());
2631 
2632   for (TopLevelLiveRange* range : data()->live_ranges()) {
2633     if (!CanProcessRange(range)) continue;
2634     for (LiveRange* to_add = range; to_add != nullptr;
2635          to_add = to_add->next()) {
2636       if (!to_add->spilled()) {
2637         AddToUnhandledUnsorted(to_add);
2638       }
2639     }
2640   }
2641   SortUnhandled();
2642   DCHECK(UnhandledIsSorted());
2643 
2644   if (mode() == GENERAL_REGISTERS) {
2645     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
2646       if (current != nullptr) AddToInactive(current);
2647     }
2648   } else {
2649     for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
2650       if (current != nullptr) AddToInactive(current);
2651     }
2652     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
2653       if (current != nullptr) AddToInactive(current);
2654     }
2655   }
2656 
2657   while (!unhandled_live_ranges().empty()) {
2658     DCHECK(UnhandledIsSorted());
2659     LiveRange* current = unhandled_live_ranges().back();
2660     unhandled_live_ranges().pop_back();
2661     DCHECK(UnhandledIsSorted());
2662     LifetimePosition position = current->Start();
2663 #ifdef DEBUG
2664     allocation_finger_ = position;
2665 #endif
2666     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2667           current->relative_id(), position.value());
2668 
2669     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2670       continue;
2671 
2672     for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2673       LiveRange* cur_active = active_live_ranges()[i];
2674       if (cur_active->End() <= position) {
2675         ActiveToHandled(cur_active);
2676         --i;  // The live range was removed from the list of active live ranges.
2677       } else if (!cur_active->Covers(position)) {
2678         ActiveToInactive(cur_active);
2679         --i;  // The live range was removed from the list of active live ranges.
2680       }
2681     }
2682 
2683     for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2684       LiveRange* cur_inactive = inactive_live_ranges()[i];
2685       if (cur_inactive->End() <= position) {
2686         InactiveToHandled(cur_inactive);
2687         --i;  // Live range was removed from the list of inactive live ranges.
2688       } else if (cur_inactive->Covers(position)) {
2689         InactiveToActive(cur_inactive);
2690         --i;  // Live range was removed from the list of inactive live ranges.
2691       }
2692     }
2693 
2694     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2695 
2696     bool result = TryAllocateFreeReg(current);
2697     if (!result) AllocateBlockedReg(current);
2698     if (current->HasRegisterAssigned()) {
2699       AddToActive(current);
2700     }
2701   }
2702 }
2703 
2704 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)2705 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2706                                                        int reg) {
2707   data()->MarkAllocated(range->representation(), reg);
2708   range->set_assigned_register(reg);
2709   range->SetUseHints(reg);
2710   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2711     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2712   }
2713 }
2714 
2715 
AddToActive(LiveRange * range)2716 void LinearScanAllocator::AddToActive(LiveRange* range) {
2717   TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2718         range->relative_id());
2719   active_live_ranges().push_back(range);
2720 }
2721 
2722 
AddToInactive(LiveRange * range)2723 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2724   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2725         range->relative_id());
2726   inactive_live_ranges().push_back(range);
2727 }
2728 
2729 
AddToUnhandledSorted(LiveRange * range)2730 void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
2731   if (range == nullptr || range->IsEmpty()) return;
2732   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2733   DCHECK(allocation_finger_ <= range->Start());
2734   for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2735        --i) {
2736     LiveRange* cur_range = unhandled_live_ranges().at(i);
2737     if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2738     TRACE("Add live range %d:%d to unhandled at %d\n",
2739           range->TopLevel()->vreg(), range->relative_id(), i + 1);
2740     auto it = unhandled_live_ranges().begin() + (i + 1);
2741     unhandled_live_ranges().insert(it, range);
2742     DCHECK(UnhandledIsSorted());
2743     return;
2744   }
2745   TRACE("Add live range %d:%d to unhandled at start\n",
2746         range->TopLevel()->vreg(), range->relative_id());
2747   unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2748   DCHECK(UnhandledIsSorted());
2749 }
2750 
2751 
AddToUnhandledUnsorted(LiveRange * range)2752 void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2753   if (range == nullptr || range->IsEmpty()) return;
2754   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2755   TRACE("Add live range %d:%d to unhandled unsorted at end\n",
2756         range->TopLevel()->vreg(), range->relative_id());
2757   unhandled_live_ranges().push_back(range);
2758 }
2759 
2760 
UnhandledSortHelper(LiveRange * a,LiveRange * b)2761 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2762   DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2763   if (a->ShouldBeAllocatedBefore(b)) return false;
2764   if (b->ShouldBeAllocatedBefore(a)) return true;
2765   return a->TopLevel()->vreg() < b->TopLevel()->vreg();
2766 }
2767 
2768 
2769 // Sort the unhandled live ranges so that the ranges to be processed first are
2770 // at the end of the array list.  This is convenient for the register allocation
2771 // algorithm because it is efficient to remove elements from the end.
SortUnhandled()2772 void LinearScanAllocator::SortUnhandled() {
2773   TRACE("Sort unhandled\n");
2774   std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2775             &UnhandledSortHelper);
2776 }
2777 
2778 
UnhandledIsSorted()2779 bool LinearScanAllocator::UnhandledIsSorted() {
2780   size_t len = unhandled_live_ranges().size();
2781   for (size_t i = 1; i < len; i++) {
2782     LiveRange* a = unhandled_live_ranges().at(i - 1);
2783     LiveRange* b = unhandled_live_ranges().at(i);
2784     if (a->Start() < b->Start()) return false;
2785   }
2786   return true;
2787 }
2788 
2789 
ActiveToHandled(LiveRange * range)2790 void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
2791   RemoveElement(&active_live_ranges(), range);
2792   TRACE("Moving live range %d:%d from active to handled\n",
2793         range->TopLevel()->vreg(), range->relative_id());
2794 }
2795 
2796 
ActiveToInactive(LiveRange * range)2797 void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
2798   RemoveElement(&active_live_ranges(), range);
2799   inactive_live_ranges().push_back(range);
2800   TRACE("Moving live range %d:%d from active to inactive\n",
2801         range->TopLevel()->vreg(), range->relative_id());
2802 }
2803 
2804 
InactiveToHandled(LiveRange * range)2805 void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
2806   RemoveElement(&inactive_live_ranges(), range);
2807   TRACE("Moving live range %d:%d from inactive to handled\n",
2808         range->TopLevel()->vreg(), range->relative_id());
2809 }
2810 
2811 
InactiveToActive(LiveRange * range)2812 void LinearScanAllocator::InactiveToActive(LiveRange* range) {
2813   RemoveElement(&inactive_live_ranges(), range);
2814   active_live_ranges().push_back(range);
2815   TRACE("Moving live range %d:%d from inactive to active\n",
2816         range->TopLevel()->vreg(), range->relative_id());
2817 }
2818 
2819 
TryAllocateFreeReg(LiveRange * current)2820 bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
2821   int num_regs = num_registers();
2822   int num_codes = num_allocatable_registers();
2823   const int* codes = allocatable_register_codes();
2824   if (!kSimpleFPAliasing &&
2825       (current->representation() == MachineRepresentation::kFloat32)) {
2826     num_regs = data()->config()->num_float_registers();
2827     num_codes = data()->config()->num_allocatable_float_registers();
2828     codes = data()->config()->allocatable_float_codes();
2829   }
2830   LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
2831   for (int i = 0; i < num_regs; i++) {
2832     free_until_pos[i] = LifetimePosition::MaxPosition();
2833   }
2834 
2835   for (LiveRange* cur_active : active_live_ranges()) {
2836     int cur_reg = cur_active->assigned_register();
2837     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
2838       free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
2839       TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
2840             LifetimePosition::GapFromInstructionIndex(0).value());
2841     } else {
2842       int alias_base_index = -1;
2843       int aliases = data()->config()->GetAliases(
2844           cur_active->representation(), cur_reg, current->representation(),
2845           &alias_base_index);
2846       while (aliases--) {
2847         int aliased_reg = alias_base_index + aliases;
2848         free_until_pos[aliased_reg] =
2849             LifetimePosition::GapFromInstructionIndex(0);
2850       }
2851     }
2852   }
2853 
2854   for (LiveRange* cur_inactive : inactive_live_ranges()) {
2855     DCHECK(cur_inactive->End() > current->Start());
2856     LifetimePosition next_intersection =
2857         cur_inactive->FirstIntersection(current);
2858     if (!next_intersection.IsValid()) continue;
2859     int cur_reg = cur_inactive->assigned_register();
2860     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
2861       free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
2862       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
2863             Min(free_until_pos[cur_reg], next_intersection).value());
2864     } else {
2865       int alias_base_index = -1;
2866       int aliases = data()->config()->GetAliases(
2867           cur_inactive->representation(), cur_reg, current->representation(),
2868           &alias_base_index);
2869       while (aliases--) {
2870         int aliased_reg = alias_base_index + aliases;
2871         free_until_pos[aliased_reg] =
2872             Min(free_until_pos[aliased_reg], next_intersection);
2873       }
2874     }
2875   }
2876 
2877   int hint_register;
2878   if (current->FirstHintPosition(&hint_register) != nullptr) {
2879     TRACE(
2880         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
2881         RegisterName(hint_register), free_until_pos[hint_register].value(),
2882         current->TopLevel()->vreg(), current->relative_id(),
2883         current->End().value());
2884 
2885     // The desired register is free until the end of the current live range.
2886     if (free_until_pos[hint_register] >= current->End()) {
2887       TRACE("Assigning preferred reg %s to live range %d:%d\n",
2888             RegisterName(hint_register), current->TopLevel()->vreg(),
2889             current->relative_id());
2890       SetLiveRangeAssignedRegister(current, hint_register);
2891       return true;
2892     }
2893   }
2894 
2895   // Find the register which stays free for the longest time.
2896   int reg = codes[0];
2897   for (int i = 1; i < num_codes; ++i) {
2898     int code = codes[i];
2899     if (free_until_pos[code] > free_until_pos[reg]) {
2900       reg = code;
2901     }
2902   }
2903 
2904   LifetimePosition pos = free_until_pos[reg];
2905 
2906   if (pos <= current->Start()) {
2907     // All registers are blocked.
2908     return false;
2909   }
2910 
2911   if (pos < current->End()) {
2912     // Register reg is available at the range start but becomes blocked before
2913     // the range end. Split current at position where it becomes blocked.
2914     LiveRange* tail = SplitRangeAt(current, pos);
2915     AddToUnhandledSorted(tail);
2916   }
2917 
2918   // Register reg is available at the range start and is free until the range
2919   // end.
2920   DCHECK(pos >= current->End());
2921   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
2922         current->TopLevel()->vreg(), current->relative_id());
2923   SetLiveRangeAssignedRegister(current, reg);
2924 
2925   return true;
2926 }
2927 
2928 
AllocateBlockedReg(LiveRange * current)2929 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
2930   UsePosition* register_use = current->NextRegisterPosition(current->Start());
2931   if (register_use == nullptr) {
2932     // There is no use in the current live range that requires a register.
2933     // We can just spill it.
2934     Spill(current);
2935     return;
2936   }
2937 
2938   int num_regs = num_registers();
2939   int num_codes = num_allocatable_registers();
2940   const int* codes = allocatable_register_codes();
2941   if (!kSimpleFPAliasing &&
2942       (current->representation() == MachineRepresentation::kFloat32)) {
2943     num_regs = data()->config()->num_float_registers();
2944     num_codes = data()->config()->num_allocatable_float_registers();
2945     codes = data()->config()->allocatable_float_codes();
2946   }
2947 
2948   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
2949   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
2950   for (int i = 0; i < num_regs; i++) {
2951     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
2952   }
2953 
2954   for (LiveRange* range : active_live_ranges()) {
2955     int cur_reg = range->assigned_register();
2956     bool is_fixed_or_cant_spill =
2957         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
2958     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
2959       if (is_fixed_or_cant_spill) {
2960         block_pos[cur_reg] = use_pos[cur_reg] =
2961             LifetimePosition::GapFromInstructionIndex(0);
2962       } else {
2963         UsePosition* next_use =
2964             range->NextUsePositionRegisterIsBeneficial(current->Start());
2965         if (next_use == nullptr) {
2966           use_pos[cur_reg] = range->End();
2967         } else {
2968           use_pos[cur_reg] = next_use->pos();
2969         }
2970       }
2971     } else {
2972       int alias_base_index = -1;
2973       int aliases = data()->config()->GetAliases(
2974           range->representation(), cur_reg, current->representation(),
2975           &alias_base_index);
2976       while (aliases--) {
2977         int aliased_reg = alias_base_index + aliases;
2978         if (is_fixed_or_cant_spill) {
2979           block_pos[aliased_reg] = use_pos[aliased_reg] =
2980               LifetimePosition::GapFromInstructionIndex(0);
2981         } else {
2982           UsePosition* next_use =
2983               range->NextUsePositionRegisterIsBeneficial(current->Start());
2984           if (next_use == nullptr) {
2985             use_pos[aliased_reg] = range->End();
2986           } else {
2987             use_pos[aliased_reg] = next_use->pos();
2988           }
2989         }
2990       }
2991     }
2992   }
2993 
2994   for (LiveRange* range : inactive_live_ranges()) {
2995     DCHECK(range->End() > current->Start());
2996     LifetimePosition next_intersection = range->FirstIntersection(current);
2997     if (!next_intersection.IsValid()) continue;
2998     int cur_reg = range->assigned_register();
2999     bool is_fixed = range->TopLevel()->IsFixed();
3000     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
3001       if (is_fixed) {
3002         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
3003         use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
3004       } else {
3005         use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
3006       }
3007     } else {
3008       int alias_base_index = -1;
3009       int aliases = data()->config()->GetAliases(
3010           range->representation(), cur_reg, current->representation(),
3011           &alias_base_index);
3012       while (aliases--) {
3013         int aliased_reg = alias_base_index + aliases;
3014         if (is_fixed) {
3015           block_pos[aliased_reg] =
3016               Min(block_pos[aliased_reg], next_intersection);
3017           use_pos[aliased_reg] =
3018               Min(block_pos[aliased_reg], use_pos[aliased_reg]);
3019         } else {
3020           use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
3021         }
3022       }
3023     }
3024   }
3025 
3026   int reg = codes[0];
3027   for (int i = 1; i < num_codes; ++i) {
3028     int code = codes[i];
3029     if (use_pos[code] > use_pos[reg]) {
3030       reg = code;
3031     }
3032   }
3033 
3034   LifetimePosition pos = use_pos[reg];
3035 
3036   if (pos < register_use->pos()) {
3037     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
3038                                                    register_use->pos())) {
3039       SpillBetween(current, current->Start(), register_use->pos());
3040     } else {
3041       SetLiveRangeAssignedRegister(current, reg);
3042       SplitAndSpillIntersecting(current);
3043     }
3044     return;
3045   }
3046 
3047   if (block_pos[reg] < current->End()) {
3048     // Register becomes blocked before the current range end. Split before that
3049     // position.
3050     LiveRange* tail =
3051         SplitBetween(current, current->Start(), block_pos[reg].Start());
3052     AddToUnhandledSorted(tail);
3053   }
3054 
3055   // Register reg is not blocked for the whole range.
3056   DCHECK(block_pos[reg] >= current->End());
3057   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
3058         current->TopLevel()->vreg(), current->relative_id());
3059   SetLiveRangeAssignedRegister(current, reg);
3060 
3061   // This register was not free. Thus we need to find and spill
3062   // parts of active and inactive live regions that use the same register
3063   // at the same lifetime positions as current.
3064   SplitAndSpillIntersecting(current);
3065 }
3066 
3067 
SplitAndSpillIntersecting(LiveRange * current)3068 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
3069   DCHECK(current->HasRegisterAssigned());
3070   int reg = current->assigned_register();
3071   LifetimePosition split_pos = current->Start();
3072   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
3073     LiveRange* range = active_live_ranges()[i];
3074     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
3075       if (range->assigned_register() != reg) continue;
3076     } else {
3077       if (!data()->config()->AreAliases(current->representation(), reg,
3078                                         range->representation(),
3079                                         range->assigned_register())) {
3080         continue;
3081       }
3082     }
3083 
3084     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3085     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
3086     if (next_pos == nullptr) {
3087       SpillAfter(range, spill_pos);
3088     } else {
3089       // When spilling between spill_pos and next_pos ensure that the range
3090       // remains spilled at least until the start of the current live range.
3091       // This guarantees that we will not introduce new unhandled ranges that
3092       // start before the current range as this violates allocation invariants
3093       // and will lead to an inconsistent state of active and inactive
3094       // live-ranges: ranges are allocated in order of their start positions,
3095       // ranges are retired from active/inactive when the start of the
3096       // current live-range is larger than their end.
3097       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
3098                                                         next_pos->pos()));
3099       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
3100     }
3101     ActiveToHandled(range);
3102     --i;
3103   }
3104 
3105   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
3106     LiveRange* range = inactive_live_ranges()[i];
3107     DCHECK(range->End() > current->Start());
3108     if (range->TopLevel()->IsFixed()) continue;
3109     if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
3110       if (range->assigned_register() != reg) continue;
3111     } else {
3112       if (!data()->config()->AreAliases(current->representation(), reg,
3113                                         range->representation(),
3114                                         range->assigned_register()))
3115         continue;
3116     }
3117 
3118     LifetimePosition next_intersection = range->FirstIntersection(current);
3119     if (next_intersection.IsValid()) {
3120       UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3121       if (next_pos == nullptr) {
3122         SpillAfter(range, split_pos);
3123       } else {
3124         next_intersection = Min(next_intersection, next_pos->pos());
3125         SpillBetween(range, split_pos, next_intersection);
3126       }
3127       InactiveToHandled(range);
3128       --i;
3129     }
3130   }
3131 }
3132 
3133 
TryReuseSpillForPhi(TopLevelLiveRange * range)3134 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
3135   if (!range->is_phi()) return false;
3136 
3137   DCHECK(!range->HasSpillOperand());
3138   RegisterAllocationData::PhiMapValue* phi_map_value =
3139       data()->GetPhiMapValueFor(range);
3140   const PhiInstruction* phi = phi_map_value->phi();
3141   const InstructionBlock* block = phi_map_value->block();
3142   // Count the number of spilled operands.
3143   size_t spilled_count = 0;
3144   LiveRange* first_op = nullptr;
3145   for (size_t i = 0; i < phi->operands().size(); i++) {
3146     int op = phi->operands()[i];
3147     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
3148     if (!op_range->TopLevel()->HasSpillRange()) continue;
3149     const InstructionBlock* pred =
3150         code()->InstructionBlockAt(block->predecessors()[i]);
3151     LifetimePosition pred_end =
3152         LifetimePosition::InstructionFromInstructionIndex(
3153             pred->last_instruction_index());
3154     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
3155       op_range = op_range->next();
3156     }
3157     if (op_range != nullptr && op_range->spilled()) {
3158       spilled_count++;
3159       if (first_op == nullptr) {
3160         first_op = op_range->TopLevel();
3161       }
3162     }
3163   }
3164 
3165   // Only continue if more than half of the operands are spilled.
3166   if (spilled_count * 2 <= phi->operands().size()) {
3167     return false;
3168   }
3169 
3170   // Try to merge the spilled operands and count the number of merged spilled
3171   // operands.
3172   DCHECK(first_op != nullptr);
3173   SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
3174   size_t num_merged = 1;
3175   for (size_t i = 1; i < phi->operands().size(); i++) {
3176     int op = phi->operands()[i];
3177     TopLevelLiveRange* op_range = data()->live_ranges()[op];
3178     if (!op_range->HasSpillRange()) continue;
3179     SpillRange* op_spill = op_range->GetSpillRange();
3180     if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
3181       num_merged++;
3182     }
3183   }
3184 
3185   // Only continue if enough operands could be merged to the
3186   // same spill slot.
3187   if (num_merged * 2 <= phi->operands().size() ||
3188       AreUseIntervalsIntersecting(first_op_spill->interval(),
3189                                   range->first_interval())) {
3190     return false;
3191   }
3192 
3193   // If the range does not need register soon, spill it to the merged
3194   // spill range.
3195   LifetimePosition next_pos = range->Start();
3196   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3197   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
3198   if (pos == nullptr) {
3199     SpillRange* spill_range =
3200         range->TopLevel()->HasSpillRange()
3201             ? range->TopLevel()->GetSpillRange()
3202             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3203     bool merged = first_op_spill->TryMerge(spill_range);
3204     if (!merged) return false;
3205     Spill(range);
3206     return true;
3207   } else if (pos->pos() > range->Start().NextStart()) {
3208     SpillRange* spill_range =
3209         range->TopLevel()->HasSpillRange()
3210             ? range->TopLevel()->GetSpillRange()
3211             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3212     bool merged = first_op_spill->TryMerge(spill_range);
3213     if (!merged) return false;
3214     SpillBetween(range, range->Start(), pos->pos());
3215     DCHECK(UnhandledIsSorted());
3216     return true;
3217   }
3218   return false;
3219 }
3220 
3221 
SpillAfter(LiveRange * range,LifetimePosition pos)3222 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
3223   LiveRange* second_part = SplitRangeAt(range, pos);
3224   Spill(second_part);
3225 }
3226 
3227 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)3228 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
3229                                        LifetimePosition end) {
3230   SpillBetweenUntil(range, start, start, end);
3231 }
3232 
3233 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end)3234 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
3235                                             LifetimePosition start,
3236                                             LifetimePosition until,
3237                                             LifetimePosition end) {
3238   CHECK(start < end);
3239   LiveRange* second_part = SplitRangeAt(range, start);
3240 
3241   if (second_part->Start() < end) {
3242     // The split result intersects with [start, end[.
3243     // Split it at position between ]start+1, end[, spill the middle part
3244     // and put the rest to unhandled.
3245     LifetimePosition third_part_end = end.PrevStart().End();
3246     if (data()->IsBlockBoundary(end.Start())) {
3247       third_part_end = end.Start();
3248     }
3249     LiveRange* third_part = SplitBetween(
3250         second_part, Max(second_part->Start().End(), until), third_part_end);
3251 
3252     DCHECK(third_part != second_part);
3253 
3254     Spill(second_part);
3255     AddToUnhandledSorted(third_part);
3256   } else {
3257     // The split result does not intersect with [start, end[.
3258     // Nothing to spill. Just put it to unhandled as whole.
3259     AddToUnhandledSorted(second_part);
3260   }
3261 }
3262 
3263 
SpillSlotLocator(RegisterAllocationData * data)3264 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
3265     : data_(data) {}
3266 
3267 
LocateSpillSlots()3268 void SpillSlotLocator::LocateSpillSlots() {
3269   const InstructionSequence* code = data()->code();
3270   for (TopLevelLiveRange* range : data()->live_ranges()) {
3271     if (range == nullptr || range->IsEmpty()) continue;
3272     // We care only about ranges which spill in the frame.
3273     if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
3274       continue;
3275     }
3276     TopLevelLiveRange::SpillMoveInsertionList* spills =
3277         range->GetSpillMoveInsertionLocations();
3278     DCHECK_NOT_NULL(spills);
3279     for (; spills != nullptr; spills = spills->next) {
3280       code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
3281     }
3282   }
3283 }
3284 
3285 
OperandAssigner(RegisterAllocationData * data)3286 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
3287 
3288 
AssignSpillSlots()3289 void OperandAssigner::AssignSpillSlots() {
3290   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
3291   // Merge disjoint spill ranges
3292   for (size_t i = 0; i < spill_ranges.size(); ++i) {
3293     SpillRange* range = spill_ranges[i];
3294     if (range == nullptr) continue;
3295     if (range->IsEmpty()) continue;
3296     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
3297       SpillRange* other = spill_ranges[j];
3298       if (other != nullptr && !other->IsEmpty()) {
3299         range->TryMerge(other);
3300       }
3301     }
3302   }
3303   // Allocate slots for the merged spill ranges.
3304   for (SpillRange* range : spill_ranges) {
3305     if (range == nullptr || range->IsEmpty()) continue;
3306     // Allocate a new operand referring to the spill slot.
3307     if (!range->HasSlot()) {
3308       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
3309       range->set_assigned_slot(index);
3310     }
3311   }
3312 }
3313 
3314 
CommitAssignment()3315 void OperandAssigner::CommitAssignment() {
3316   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3317     if (top_range == nullptr || top_range->IsEmpty()) continue;
3318     InstructionOperand spill_operand;
3319     if (top_range->HasSpillOperand()) {
3320       spill_operand = *top_range->TopLevel()->GetSpillOperand();
3321     } else if (top_range->TopLevel()->HasSpillRange()) {
3322       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3323     }
3324     if (top_range->is_phi()) {
3325       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3326           top_range->GetAssignedOperand());
3327     }
3328     for (LiveRange* range = top_range; range != nullptr;
3329          range = range->next()) {
3330       InstructionOperand assigned = range->GetAssignedOperand();
3331       range->ConvertUsesToOperand(assigned, spill_operand);
3332     }
3333 
3334     if (!spill_operand.IsInvalid()) {
3335       // If this top level range has a child spilled in a deferred block, we use
3336       // the range and control flow connection mechanism instead of spilling at
3337       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3338       // phases. Normally, when we spill at definition, we do not insert a
3339       // connecting move when a successor child range is spilled - because the
3340       // spilled range picks up its value from the slot which was assigned at
3341       // definition. For ranges that are determined to spill only in deferred
3342       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
3343       // where a spill operand is expected, and then finalize by inserting the
3344       // spills in the deferred blocks dominators.
3345       if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
3346         // Spill at definition if the range isn't spilled only in deferred
3347         // blocks.
3348         top_range->CommitSpillMoves(
3349             data()->code(), spill_operand,
3350             top_range->has_slot_use() || top_range->spilled());
3351       }
3352     }
3353   }
3354 }
3355 
3356 
ReferenceMapPopulator(RegisterAllocationData * data)3357 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3358     : data_(data) {}
3359 
3360 
SafePointsAreInOrder() const3361 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3362   int safe_point = 0;
3363   for (ReferenceMap* map : *data()->code()->reference_maps()) {
3364     if (safe_point > map->instruction_position()) return false;
3365     safe_point = map->instruction_position();
3366   }
3367   return true;
3368 }
3369 
3370 
PopulateReferenceMaps()3371 void ReferenceMapPopulator::PopulateReferenceMaps() {
3372   DCHECK(SafePointsAreInOrder());
3373   // Map all delayed references.
3374   for (RegisterAllocationData::DelayedReference& delayed_reference :
3375        data()->delayed_references()) {
3376     delayed_reference.map->RecordReference(
3377         AllocatedOperand::cast(*delayed_reference.operand));
3378   }
3379   // Iterate over all safe point positions and record a pointer
3380   // for all spilled live ranges at this point.
3381   int last_range_start = 0;
3382   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3383   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3384   for (TopLevelLiveRange* range : data()->live_ranges()) {
3385     if (range == nullptr) continue;
3386     // Skip non-reference values.
3387     if (!data()->IsReference(range)) continue;
3388     // Skip empty live ranges.
3389     if (range->IsEmpty()) continue;
3390     if (range->has_preassigned_slot()) continue;
3391 
3392     // Find the extent of the range and its children.
3393     int start = range->Start().ToInstructionIndex();
3394     int end = 0;
3395     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3396       LifetimePosition this_end = cur->End();
3397       if (this_end.ToInstructionIndex() > end)
3398         end = this_end.ToInstructionIndex();
3399       DCHECK(cur->Start().ToInstructionIndex() >= start);
3400     }
3401 
3402     // Most of the ranges are in order, but not all.  Keep an eye on when they
3403     // step backwards and reset the first_it so we don't miss any safe points.
3404     if (start < last_range_start) first_it = reference_maps->begin();
3405     last_range_start = start;
3406 
3407     // Step across all the safe points that are before the start of this range,
3408     // recording how far we step in order to save doing this for the next range.
3409     for (; first_it != reference_maps->end(); ++first_it) {
3410       ReferenceMap* map = *first_it;
3411       if (map->instruction_position() >= start) break;
3412     }
3413 
3414     InstructionOperand spill_operand;
3415     if (((range->HasSpillOperand() &&
3416           !range->GetSpillOperand()->IsConstant()) ||
3417          range->HasSpillRange())) {
3418       if (range->HasSpillOperand()) {
3419         spill_operand = *range->GetSpillOperand();
3420       } else {
3421         spill_operand = range->GetSpillRangeOperand();
3422       }
3423       DCHECK(spill_operand.IsStackSlot());
3424       DCHECK_EQ(MachineRepresentation::kTagged,
3425                 AllocatedOperand::cast(spill_operand).representation());
3426     }
3427 
3428     LiveRange* cur = range;
3429     // Step through the safe points to see whether they are in the range.
3430     for (auto it = first_it; it != reference_maps->end(); ++it) {
3431       ReferenceMap* map = *it;
3432       int safe_point = map->instruction_position();
3433 
3434       // The safe points are sorted so we can stop searching here.
3435       if (safe_point - 1 > end) break;
3436 
3437       // Advance to the next active range that covers the current
3438       // safe point position.
3439       LifetimePosition safe_point_pos =
3440           LifetimePosition::InstructionFromInstructionIndex(safe_point);
3441 
3442       // Search for the child range (cur) that covers safe_point_pos. If we
3443       // don't find it before the children pass safe_point_pos, keep cur at
3444       // the last child, because the next safe_point_pos may be covered by cur.
3445       // This may happen if cur has more than one interval, and the current
3446       // safe_point_pos is in between intervals.
3447       // For that reason, cur may be at most the last child.
3448       DCHECK_NOT_NULL(cur);
3449       DCHECK(safe_point_pos >= cur->Start() || range == cur);
3450       bool found = false;
3451       while (!found) {
3452         if (cur->Covers(safe_point_pos)) {
3453           found = true;
3454         } else {
3455           LiveRange* next = cur->next();
3456           if (next == nullptr || next->Start() > safe_point_pos) {
3457             break;
3458           }
3459           cur = next;
3460         }
3461       }
3462 
3463       if (!found) {
3464         continue;
3465       }
3466 
3467       // Check if the live range is spilled and the safe point is after
3468       // the spill position.
3469       int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3470                             ? cur->Start().ToInstructionIndex()
3471                             : range->spill_start_index();
3472 
3473       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3474         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3475               range->vreg(), spill_index, safe_point);
3476         map->RecordReference(AllocatedOperand::cast(spill_operand));
3477       }
3478 
3479       if (!cur->spilled()) {
3480         TRACE(
3481             "Pointer in register for range %d:%d (start at %d) "
3482             "at safe point %d\n",
3483             range->vreg(), cur->relative_id(), cur->Start().value(),
3484             safe_point);
3485         InstructionOperand operand = cur->GetAssignedOperand();
3486         DCHECK(!operand.IsStackSlot());
3487         DCHECK_EQ(MachineRepresentation::kTagged,
3488                   AllocatedOperand::cast(operand).representation());
3489         map->RecordReference(AllocatedOperand::cast(operand));
3490       }
3491     }
3492   }
3493 }
3494 
3495 
LiveRangeConnector(RegisterAllocationData * data)3496 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3497     : data_(data) {}
3498 
3499 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const3500 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3501     const InstructionBlock* block) const {
3502   if (block->PredecessorCount() != 1) return false;
3503   return block->predecessors()[0].IsNext(block->rpo_number());
3504 }
3505 
3506 
ResolveControlFlow(Zone * local_zone)3507 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3508   // Lazily linearize live ranges in memory for fast lookup.
3509   LiveRangeFinder finder(data(), local_zone);
3510   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3511   for (const InstructionBlock* block : code()->instruction_blocks()) {
3512     if (CanEagerlyResolveControlFlow(block)) continue;
3513     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3514     BitVector::Iterator iterator(live);
3515     while (!iterator.Done()) {
3516       int vreg = iterator.Current();
3517       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
3518       for (const RpoNumber& pred : block->predecessors()) {
3519         FindResult result;
3520         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3521         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3522           continue;
3523         }
3524         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3525         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3526         if (pred_op.Equals(cur_op)) continue;
3527         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
3528           // We're doing a reload.
3529           // We don't need to, if:
3530           // 1) there's no register use in this block, and
3531           // 2) the range ends before the block does, and
3532           // 3) we don't have a successor, or the successor is spilled.
3533           LifetimePosition block_start =
3534               LifetimePosition::GapFromInstructionIndex(block->code_start());
3535           LifetimePosition block_end =
3536               LifetimePosition::GapFromInstructionIndex(block->code_end());
3537           const LiveRange* current = result.cur_cover_;
3538           const LiveRange* successor = current->next();
3539           if (current->End() < block_end &&
3540               (successor == nullptr || successor->spilled())) {
3541             // verify point 1: no register use. We can go to the end of the
3542             // range, since it's all within the block.
3543 
3544             bool uses_reg = false;
3545             for (const UsePosition* use = current->NextUsePosition(block_start);
3546                  use != nullptr; use = use->next()) {
3547               if (use->operand()->IsAnyRegister()) {
3548                 uses_reg = true;
3549                 break;
3550               }
3551             }
3552             if (!uses_reg) continue;
3553           }
3554           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3555               pred_block->IsDeferred()) {
3556             // The spill location should be defined in pred_block, so add
3557             // pred_block to the list of blocks requiring a spill operand.
3558             current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
3559                 pred_block->rpo_number().ToInt());
3560           }
3561         }
3562         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3563         USE(move_loc);
3564         DCHECK_IMPLIES(
3565             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3566                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3567             code()->GetInstructionBlock(move_loc)->IsDeferred());
3568       }
3569       iterator.Advance();
3570     }
3571   }
3572 
3573   // At this stage, we collected blocks needing a spill operand from
3574   // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
3575   // deferred blocks.
3576   for (TopLevelLiveRange* top : data()->live_ranges()) {
3577     if (top == nullptr || top->IsEmpty() ||
3578         !top->IsSpilledOnlyInDeferredBlocks())
3579       continue;
3580     CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
3581   }
3582 }
3583 
3584 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)3585 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3586                                            const InstructionOperand& cur_op,
3587                                            const InstructionBlock* pred,
3588                                            const InstructionOperand& pred_op) {
3589   DCHECK(!pred_op.Equals(cur_op));
3590   int gap_index;
3591   Instruction::GapPosition position;
3592   if (block->PredecessorCount() == 1) {
3593     gap_index = block->first_instruction_index();
3594     position = Instruction::START;
3595   } else {
3596     DCHECK(pred->SuccessorCount() == 1);
3597     DCHECK(!code()
3598                 ->InstructionAt(pred->last_instruction_index())
3599                 ->HasReferenceMap());
3600     gap_index = pred->last_instruction_index();
3601     position = Instruction::END;
3602   }
3603   data()->AddGapMove(gap_index, position, pred_op, cur_op);
3604   return gap_index;
3605 }
3606 
3607 
ConnectRanges(Zone * local_zone)3608 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3609   DelayedInsertionMap delayed_insertion_map(local_zone);
3610   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3611     if (top_range == nullptr) continue;
3612     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3613     LiveRange* first_range = top_range;
3614     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3615          first_range = second_range, second_range = second_range->next()) {
3616       LifetimePosition pos = second_range->Start();
3617       // Add gap move if the two live ranges touch and there is no block
3618       // boundary.
3619       if (second_range->spilled()) continue;
3620       if (first_range->End() != pos) continue;
3621       if (data()->IsBlockBoundary(pos) &&
3622           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3623         continue;
3624       }
3625       InstructionOperand prev_operand = first_range->GetAssignedOperand();
3626       InstructionOperand cur_operand = second_range->GetAssignedOperand();
3627       if (prev_operand.Equals(cur_operand)) continue;
3628       bool delay_insertion = false;
3629       Instruction::GapPosition gap_pos;
3630       int gap_index = pos.ToInstructionIndex();
3631       if (connect_spilled && !prev_operand.IsAnyRegister() &&
3632           cur_operand.IsAnyRegister()) {
3633         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
3634         DCHECK(block->IsDeferred());
3635         // Performing a reload in this block, meaning the spill operand must
3636         // be defined here.
3637         top_range->GetListOfBlocksRequiringSpillOperands()->Add(
3638             block->rpo_number().ToInt());
3639       }
3640 
3641       if (pos.IsGapPosition()) {
3642         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3643       } else {
3644         if (pos.IsStart()) {
3645           delay_insertion = true;
3646         } else {
3647           gap_index++;
3648         }
3649         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3650       }
3651       // Reloads or spills for spilled in deferred blocks ranges must happen
3652       // only in deferred blocks.
3653       DCHECK_IMPLIES(
3654           connect_spilled &&
3655               !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
3656           code()->GetInstructionBlock(gap_index)->IsDeferred());
3657 
3658       ParallelMove* move =
3659           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3660               gap_pos, code_zone());
3661       if (!delay_insertion) {
3662         move->AddMove(prev_operand, cur_operand);
3663       } else {
3664         delayed_insertion_map.insert(
3665             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3666       }
3667     }
3668   }
3669   if (delayed_insertion_map.empty()) return;
3670   // Insert all the moves which should occur after the stored move.
3671   ZoneVector<MoveOperands*> to_insert(local_zone);
3672   ZoneVector<MoveOperands*> to_eliminate(local_zone);
3673   to_insert.reserve(4);
3674   to_eliminate.reserve(4);
3675   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3676   for (auto it = delayed_insertion_map.begin();; ++it) {
3677     bool done = it == delayed_insertion_map.end();
3678     if (done || it->first.first != moves) {
3679       // Commit the MoveOperands for current ParallelMove.
3680       for (MoveOperands* move : to_eliminate) {
3681         move->Eliminate();
3682       }
3683       for (MoveOperands* move : to_insert) {
3684         moves->push_back(move);
3685       }
3686       if (done) break;
3687       // Reset state.
3688       to_eliminate.clear();
3689       to_insert.clear();
3690       moves = it->first.first;
3691     }
3692     // Gather all MoveOperands for a single ParallelMove.
3693     MoveOperands* move =
3694         new (code_zone()) MoveOperands(it->first.second, it->second);
3695     MoveOperands* eliminate = moves->PrepareInsertAfter(move);
3696     to_insert.push_back(move);
3697     if (eliminate != nullptr) to_eliminate.push_back(eliminate);
3698   }
3699 }
3700 
3701 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)3702 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
3703     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
3704   DCHECK(range->IsSpilledOnlyInDeferredBlocks());
3705   DCHECK(!range->spilled());
3706 
3707   InstructionSequence* code = data()->code();
3708   InstructionOperand spill_operand = range->GetSpillRangeOperand();
3709 
3710   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
3711         range->vreg());
3712   // If we have ranges that aren't spilled but require the operand on the stack,
3713   // make sure we insert the spill.
3714   for (const LiveRange* child = range; child != nullptr;
3715        child = child->next()) {
3716     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
3717          pos = pos->next()) {
3718       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
3719         continue;
3720       range->AddBlockRequiringSpillOperand(
3721           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
3722               ->rpo_number());
3723     }
3724   }
3725 
3726   ZoneQueue<int> worklist(temp_zone);
3727 
3728   for (BitVector::Iterator iterator(
3729            range->GetListOfBlocksRequiringSpillOperands());
3730        !iterator.Done(); iterator.Advance()) {
3731     worklist.push(iterator.Current());
3732   }
3733 
3734   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
3735   // Seek the deferred blocks that dominate locations requiring spill operands,
3736   // and spill there. We only need to spill at the start of such blocks.
3737   BitVector done_blocks(
3738       range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
3739   while (!worklist.empty()) {
3740     int block_id = worklist.front();
3741     worklist.pop();
3742     if (done_blocks.Contains(block_id)) continue;
3743     done_blocks.Add(block_id);
3744     InstructionBlock* spill_block =
3745         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
3746 
3747     for (const RpoNumber& pred : spill_block->predecessors()) {
3748       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
3749 
3750       if (pred_block->IsDeferred()) {
3751         worklist.push(pred_block->rpo_number().ToInt());
3752       } else {
3753         LifetimePosition pred_end =
3754             LifetimePosition::InstructionFromInstructionIndex(
3755                 pred_block->last_instruction_index());
3756 
3757         LiveRangeBound* bound = array->Find(pred_end);
3758 
3759         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
3760 
3761         RpoNumber spill_block_number = spill_block->rpo_number();
3762         if (done_moves.find(std::make_pair(
3763                 spill_block_number, range->vreg())) == done_moves.end()) {
3764           data()->AddGapMove(spill_block->first_instruction_index(),
3765                              Instruction::GapPosition::START, pred_op,
3766                              spill_operand);
3767           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
3768           spill_block->mark_needs_frame();
3769         }
3770       }
3771     }
3772   }
3773 }
3774 
3775 
3776 }  // namespace compiler
3777 }  // namespace internal
3778 }  // namespace v8
3779