• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "register_allocator_linear_scan.h"
18 
19 #include <iostream>
20 #include <sstream>
21 
22 #include "base/bit_vector-inl.h"
23 #include "base/enums.h"
24 #include "code_generator.h"
25 #include "linear_order.h"
26 #include "register_allocation_resolver.h"
27 #include "ssa_liveness_analysis.h"
28 
29 namespace art HIDDEN {
30 
31 static constexpr size_t kMaxLifetimePosition = -1;
32 static constexpr size_t kDefaultNumberOfSpillSlots = 4;
33 
34 // For simplicity, we implement register pairs as (reg, reg + 1).
35 // Note that this is a requirement for double registers on ARM, since we
36 // allocate SRegister.
GetHighForLowRegister(int reg)37 static int GetHighForLowRegister(int reg) { return reg + 1; }
IsLowRegister(int reg)38 static bool IsLowRegister(int reg) { return (reg & 1) == 0; }
IsLowOfUnalignedPairInterval(LiveInterval * low)39 static bool IsLowOfUnalignedPairInterval(LiveInterval* low) {
40   return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
41 }
42 
RegisterAllocatorLinearScan(ScopedArenaAllocator * allocator,CodeGenerator * codegen,const SsaLivenessAnalysis & liveness)43 RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
44                                                          CodeGenerator* codegen,
45                                                          const SsaLivenessAnalysis& liveness)
46       : RegisterAllocator(allocator, codegen, liveness),
47         unhandled_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
48         unhandled_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
49         unhandled_(nullptr),
50         handled_(allocator->Adapter(kArenaAllocRegisterAllocator)),
51         active_(allocator->Adapter(kArenaAllocRegisterAllocator)),
52         inactive_(allocator->Adapter(kArenaAllocRegisterAllocator)),
53         physical_core_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
54         physical_fp_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
55         temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
56         int_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
57         long_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
58         float_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
59         double_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
60         catch_phi_spill_slots_(0),
61         safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
62         processing_core_registers_(false),
63         number_of_registers_(-1),
64         registers_array_(nullptr),
65         blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
66         blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
67         reserved_out_slots_(0) {
68   temp_intervals_.reserve(4);
69   int_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
70   long_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
71   float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
72   double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
73 
74   codegen->SetupBlockedRegisters();
75   physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
76   physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
77   // Always reserve for the current method and the graph's max out registers.
78   // TODO: compute it instead.
79   // ArtMethod* takes 2 vregs for 64 bits.
80   size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
81   reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
82 }
83 
~RegisterAllocatorLinearScan()84 RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {}
85 
ShouldProcess(bool processing_core_registers,LiveInterval * interval)86 static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
87   if (interval == nullptr) return false;
88   bool is_core_register = (interval->GetType() != DataType::Type::kFloat64)
89       && (interval->GetType() != DataType::Type::kFloat32);
90   return processing_core_registers == is_core_register;
91 }
92 
AllocateRegisters()93 void RegisterAllocatorLinearScan::AllocateRegisters() {
94   AllocateRegistersInternal();
95   RegisterAllocationResolver(codegen_, liveness_)
96       .Resolve(ArrayRef<HInstruction* const>(safepoints_),
97                reserved_out_slots_,
98                int_spill_slots_.size(),
99                long_spill_slots_.size(),
100                float_spill_slots_.size(),
101                double_spill_slots_.size(),
102                catch_phi_spill_slots_,
103                ArrayRef<LiveInterval* const>(temp_intervals_));
104 
105   if (kIsDebugBuild) {
106     processing_core_registers_ = true;
107     ValidateInternal(true);
108     processing_core_registers_ = false;
109     ValidateInternal(true);
110     // Check that the linear order is still correct with regards to lifetime positions.
111     // Since only parallel moves have been inserted during the register allocation,
112     // these checks are mostly for making sure these moves have been added correctly.
113     size_t current_liveness = 0;
114     for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
115       for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
116         HInstruction* instruction = inst_it.Current();
117         DCHECK_LE(current_liveness, instruction->GetLifetimePosition());
118         current_liveness = instruction->GetLifetimePosition();
119       }
120       for (HInstructionIterator inst_it(block->GetInstructions());
121            !inst_it.Done();
122            inst_it.Advance()) {
123         HInstruction* instruction = inst_it.Current();
124         DCHECK_LE(current_liveness, instruction->GetLifetimePosition()) << instruction->DebugName();
125         current_liveness = instruction->GetLifetimePosition();
126       }
127     }
128   }
129 }
130 
BlockRegister(Location location,size_t start,size_t end)131 void RegisterAllocatorLinearScan::BlockRegister(Location location, size_t start, size_t end) {
132   int reg = location.reg();
133   DCHECK(location.IsRegister() || location.IsFpuRegister());
134   LiveInterval* interval = location.IsRegister()
135       ? physical_core_register_intervals_[reg]
136       : physical_fp_register_intervals_[reg];
137   DataType::Type type = location.IsRegister()
138       ? DataType::Type::kInt32
139       : DataType::Type::kFloat32;
140   if (interval == nullptr) {
141     interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
142     if (location.IsRegister()) {
143       physical_core_register_intervals_[reg] = interval;
144     } else {
145       physical_fp_register_intervals_[reg] = interval;
146     }
147   }
148   DCHECK(interval->GetRegister() == reg);
149   interval->AddRange(start, end);
150 }
151 
BlockRegisters(size_t start,size_t end,bool caller_save_only)152 void RegisterAllocatorLinearScan::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
153   for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
154     if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
155       BlockRegister(Location::RegisterLocation(i), start, end);
156     }
157   }
158   for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
159     if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
160       BlockRegister(Location::FpuRegisterLocation(i), start, end);
161     }
162   }
163 }
164 
AllocateRegistersInternal()165 void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
166   // Iterate post-order, to ensure the list is sorted, and the last added interval
167   // is the one with the lowest start position.
168   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
169     for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
170          back_it.Advance()) {
171       ProcessInstruction(back_it.Current());
172     }
173     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
174       ProcessInstruction(inst_it.Current());
175     }
176 
177     if (block->IsCatchBlock() ||
178         (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
179       // By blocking all registers at the top of each catch block or irreducible loop, we force
180       // intervals belonging to the live-in set of the catch/header block to be spilled.
181       // TODO(ngeoffray): Phis in this block could be allocated in register.
182       size_t position = block->GetLifetimeStart();
183       BlockRegisters(position, position + 1);
184     }
185   }
186 
187   number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
188   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
189                                                     kArenaAllocRegisterAllocator);
190   processing_core_registers_ = true;
191   unhandled_ = &unhandled_core_intervals_;
192   for (LiveInterval* fixed : physical_core_register_intervals_) {
193     if (fixed != nullptr) {
194       // Fixed interval is added to inactive_ instead of unhandled_.
195       // It's also the only type of inactive interval whose start position
196       // can be after the current interval during linear scan.
197       // Fixed interval is never split and never moves to unhandled_.
198       inactive_.push_back(fixed);
199     }
200   }
201   LinearScan();
202 
203   inactive_.clear();
204   active_.clear();
205   handled_.clear();
206 
207   number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
208   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
209                                                     kArenaAllocRegisterAllocator);
210   processing_core_registers_ = false;
211   unhandled_ = &unhandled_fp_intervals_;
212   for (LiveInterval* fixed : physical_fp_register_intervals_) {
213     if (fixed != nullptr) {
214       // Fixed interval is added to inactive_ instead of unhandled_.
215       // It's also the only type of inactive interval whose start position
216       // can be after the current interval during linear scan.
217       // Fixed interval is never split and never moves to unhandled_.
218       inactive_.push_back(fixed);
219     }
220   }
221   LinearScan();
222 }
223 
ProcessInstruction(HInstruction * instruction)224 void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) {
225   LocationSummary* locations = instruction->GetLocations();
226 
227   // Check for early returns.
228   if (locations == nullptr) {
229     return;
230   }
231   if (TryRemoveSuspendCheckEntry(instruction)) {
232     return;
233   }
234 
235   CheckForTempLiveIntervals(instruction);
236   CheckForSafepoint(instruction);
237   if (locations->WillCall()) {
238     // If a call will happen, create fixed intervals for caller-save registers.
239     const size_t position = instruction->GetLifetimePosition();
240     BlockRegisters(position, position + 1, /* caller_save_only= */ true);
241   }
242   CheckForFixedInputs(instruction);
243 
244   LiveInterval* current = instruction->GetLiveInterval();
245   if (current == nullptr)
246     return;
247 
248   const bool core_register = !DataType::IsFloatingPointType(instruction->GetType());
249   ScopedArenaVector<LiveInterval*>& unhandled =
250       core_register ? unhandled_core_intervals_ : unhandled_fp_intervals_;
251 
252   DCHECK(unhandled.empty() || current->StartsBeforeOrAt(unhandled.back()));
253 
254   if (codegen_->NeedsTwoRegisters(current->GetType())) {
255     current->AddHighInterval();
256   }
257 
258   AddSafepointsFor(instruction);
259   current->ResetSearchCache();
260   CheckForFixedOutput(instruction);
261 
262   if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
263     AllocateSpillSlotForCatchPhi(instruction->AsPhi());
264   }
265 
266   // If needed, add interval to the list of unhandled intervals.
267   if (current->HasSpillSlot() || instruction->IsConstant()) {
268     // Split just before first register use.
269     size_t first_register_use = current->FirstRegisterUse();
270     if (first_register_use != kNoLifetime) {
271       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
272       // Don't add directly to `unhandled`, it needs to be sorted and the start
273       // of this new interval might be after intervals already in the list.
274       AddSorted(&unhandled, split);
275     } else {
276       // Nothing to do, we won't allocate a register for this value.
277     }
278   } else {
279     // Don't add directly to `unhandled`, temp or safepoint intervals
280     // for this instruction may have been added, and those can be
281     // processed first.
282     AddSorted(&unhandled, current);
283   }
284 }
285 
TryRemoveSuspendCheckEntry(HInstruction * instruction)286 bool RegisterAllocatorLinearScan::TryRemoveSuspendCheckEntry(HInstruction* instruction) {
287   LocationSummary* locations = instruction->GetLocations();
288   if (instruction->IsSuspendCheckEntry() && !codegen_->NeedsSuspendCheckEntry()) {
289     // TODO: We do this here because we do not want the suspend check to artificially
290     // create live registers. We should find another place, but this is currently the
291     // simplest.
292     DCHECK_EQ(locations->GetTempCount(), 0u);
293     instruction->GetBlock()->RemoveInstruction(instruction);
294     return true;
295   }
296   return false;
297 }
298 
CheckForTempLiveIntervals(HInstruction * instruction)299 void RegisterAllocatorLinearScan::CheckForTempLiveIntervals(HInstruction* instruction) {
300   LocationSummary* locations = instruction->GetLocations();
301   size_t position = instruction->GetLifetimePosition();
302 
303   // Create synthesized intervals for temporaries.
304   for (size_t i = 0; i < locations->GetTempCount(); ++i) {
305     Location temp = locations->GetTemp(i);
306     if (temp.IsRegister() || temp.IsFpuRegister()) {
307       BlockRegister(temp, position, position + 1);
308       // Ensure that an explicit temporary register is marked as being allocated.
309       codegen_->AddAllocatedRegister(temp);
310     } else {
311       DCHECK(temp.IsUnallocated());
312       switch (temp.GetPolicy()) {
313         case Location::kRequiresRegister: {
314           LiveInterval* interval =
315               LiveInterval::MakeTempInterval(allocator_, DataType::Type::kInt32);
316           temp_intervals_.push_back(interval);
317           interval->AddTempUse(instruction, i);
318           unhandled_core_intervals_.push_back(interval);
319           break;
320         }
321 
322         case Location::kRequiresFpuRegister: {
323           LiveInterval* interval =
324               LiveInterval::MakeTempInterval(allocator_, DataType::Type::kFloat64);
325           temp_intervals_.push_back(interval);
326           interval->AddTempUse(instruction, i);
327           if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
328             interval->AddHighInterval(/* is_temp= */ true);
329             LiveInterval* high = interval->GetHighInterval();
330             temp_intervals_.push_back(high);
331             unhandled_fp_intervals_.push_back(high);
332           }
333           unhandled_fp_intervals_.push_back(interval);
334           break;
335         }
336 
337         default:
338           LOG(FATAL) << "Unexpected policy for temporary location " << temp.GetPolicy();
339       }
340     }
341   }
342 }
343 
CheckForSafepoint(HInstruction * instruction)344 void RegisterAllocatorLinearScan::CheckForSafepoint(HInstruction* instruction) {
345   LocationSummary* locations = instruction->GetLocations();
346   if (locations->NeedsSafepoint()) {
347     safepoints_.push_back(instruction);
348   }
349 }
350 
CheckForFixedInputs(HInstruction * instruction)351 void RegisterAllocatorLinearScan::CheckForFixedInputs(HInstruction* instruction) {
352   LocationSummary* locations = instruction->GetLocations();
353   size_t position = instruction->GetLifetimePosition();
354   for (size_t i = 0; i < locations->GetInputCount(); ++i) {
355     Location input = locations->InAt(i);
356     if (input.IsRegister() || input.IsFpuRegister()) {
357       BlockRegister(input, position, position + 1);
358     } else if (input.IsPair()) {
359       BlockRegister(input.ToLow(), position, position + 1);
360       BlockRegister(input.ToHigh(), position, position + 1);
361     }
362   }
363 }
364 
AddSafepointsFor(HInstruction * instruction)365 void RegisterAllocatorLinearScan::AddSafepointsFor(HInstruction* instruction) {
366   LiveInterval* current = instruction->GetLiveInterval();
367   for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
368     HInstruction* safepoint = safepoints_[safepoint_index - 1u];
369     size_t safepoint_position = SafepointPosition::ComputePosition(safepoint);
370 
371     // Test that safepoints are ordered in the optimal way.
372     DCHECK(safepoint_index == safepoints_.size() ||
373            safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
374 
375     if (safepoint_position == current->GetStart()) {
376       // The safepoint is for this instruction, so the location of the instruction
377       // does not need to be saved.
378       DCHECK_EQ(safepoint_index, safepoints_.size());
379       DCHECK_EQ(safepoint, instruction);
380       continue;
381     } else if (current->IsDeadAt(safepoint_position)) {
382       break;
383     } else if (!current->Covers(safepoint_position)) {
384       // Hole in the interval.
385       continue;
386     }
387     current->AddSafepoint(safepoint);
388   }
389 }
390 
CheckForFixedOutput(HInstruction * instruction)391 void RegisterAllocatorLinearScan::CheckForFixedOutput(HInstruction* instruction) {
392   LocationSummary* locations = instruction->GetLocations();
393   size_t position = instruction->GetLifetimePosition();
394   LiveInterval* current = instruction->GetLiveInterval();
395   // Some instructions define their output in fixed register/stack slot. We need
396   // to ensure we know these locations before doing register allocation. For a
397   // given register, we create an interval that covers these locations. The register
398   // will be unavailable at these locations when trying to allocate one for an
399   // interval.
400   //
401   // The backwards walking ensures the ranges are ordered on increasing start positions.
402   Location output = locations->Out();
403   if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
404     Location first = locations->InAt(0);
405     if (first.IsRegister() || first.IsFpuRegister()) {
406       current->SetFrom(position + 1);
407       current->SetRegister(first.reg());
408     } else if (first.IsPair()) {
409       current->SetFrom(position + 1);
410       current->SetRegister(first.low());
411       LiveInterval* high = current->GetHighInterval();
412       high->SetRegister(first.high());
413       high->SetFrom(position + 1);
414     }
415   } else if (output.IsRegister() || output.IsFpuRegister()) {
416     // Shift the interval's start by one to account for the blocked register.
417     current->SetFrom(position + 1);
418     current->SetRegister(output.reg());
419     BlockRegister(output, position, position + 1);
420   } else if (output.IsPair()) {
421     current->SetFrom(position + 1);
422     current->SetRegister(output.low());
423     LiveInterval* high = current->GetHighInterval();
424     high->SetRegister(output.high());
425     high->SetFrom(position + 1);
426     BlockRegister(output.ToLow(), position, position + 1);
427     BlockRegister(output.ToHigh(), position, position + 1);
428   } else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
429     current->SetSpillSlot(output.GetStackIndex());
430   } else {
431     DCHECK(output.IsUnallocated() || output.IsConstant());
432   }
433 }
434 
435 class AllRangesIterator : public ValueObject {
436  public:
AllRangesIterator(LiveInterval * interval)437   explicit AllRangesIterator(LiveInterval* interval)
438       : current_interval_(interval),
439         current_range_(interval->GetFirstRange()) {}
440 
Done() const441   bool Done() const { return current_interval_ == nullptr; }
CurrentRange() const442   LiveRange* CurrentRange() const { return current_range_; }
CurrentInterval() const443   LiveInterval* CurrentInterval() const { return current_interval_; }
444 
Advance()445   void Advance() {
446     current_range_ = current_range_->GetNext();
447     if (current_range_ == nullptr) {
448       current_interval_ = current_interval_->GetNextSibling();
449       if (current_interval_ != nullptr) {
450         current_range_ = current_interval_->GetFirstRange();
451       }
452     }
453   }
454 
455  private:
456   LiveInterval* current_interval_;
457   LiveRange* current_range_;
458 
459   DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
460 };
461 
ValidateInternal(bool log_fatal_on_failure) const462 bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
463   // To simplify unit testing, we eagerly create the array of intervals, and
464   // call the helper method.
465   ScopedArenaAllocator allocator(allocator_->GetArenaStack());
466   ScopedArenaVector<LiveInterval*> intervals(
467       allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
468   for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
469     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
470     if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
471       intervals.push_back(instruction->GetLiveInterval());
472     }
473   }
474 
475   const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
476       ? &physical_core_register_intervals_
477       : &physical_fp_register_intervals_;
478   for (LiveInterval* fixed : *physical_register_intervals) {
479     if (fixed != nullptr) {
480       intervals.push_back(fixed);
481     }
482   }
483 
484   for (LiveInterval* temp : temp_intervals_) {
485     if (ShouldProcess(processing_core_registers_, temp)) {
486       intervals.push_back(temp);
487     }
488   }
489 
490   return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
491                            GetNumberOfSpillSlots(),
492                            reserved_out_slots_,
493                            *codegen_,
494                            processing_core_registers_,
495                            log_fatal_on_failure);
496 }
497 
DumpInterval(std::ostream & stream,LiveInterval * interval) const498 void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
499   interval->Dump(stream);
500   stream << ": ";
501   if (interval->HasRegister()) {
502     if (interval->IsFloatingPoint()) {
503       codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
504     } else {
505       codegen_->DumpCoreRegister(stream, interval->GetRegister());
506     }
507   } else {
508     stream << "spilled";
509   }
510   stream << std::endl;
511 }
512 
DumpAllIntervals(std::ostream & stream) const513 void RegisterAllocatorLinearScan::DumpAllIntervals(std::ostream& stream) const {
514   stream << "inactive: " << std::endl;
515   for (LiveInterval* inactive_interval : inactive_) {
516     DumpInterval(stream, inactive_interval);
517   }
518   stream << "active: " << std::endl;
519   for (LiveInterval* active_interval : active_) {
520     DumpInterval(stream, active_interval);
521   }
522   stream << "unhandled: " << std::endl;
523   auto unhandled = (unhandled_ != nullptr) ?
524       unhandled_ : &unhandled_core_intervals_;
525   for (LiveInterval* unhandled_interval : *unhandled) {
526     DumpInterval(stream, unhandled_interval);
527   }
528   stream << "handled: " << std::endl;
529   for (LiveInterval* handled_interval : handled_) {
530     DumpInterval(stream, handled_interval);
531   }
532 }
533 
534 // By the book implementation of a linear scan register allocator.
LinearScan()535 void RegisterAllocatorLinearScan::LinearScan() {
536   while (!unhandled_->empty()) {
537     // (1) Remove interval with the lowest start position from unhandled.
538     LiveInterval* current = unhandled_->back();
539     unhandled_->pop_back();
540 
541     // Make sure the interval is an expected state.
542     DCHECK(!current->IsFixed() && !current->HasSpillSlot());
543     // Make sure we are going in the right order.
544     DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart());
545     // Make sure a low interval is always with a high.
546     DCHECK_IMPLIES(current->IsLowInterval(), unhandled_->back()->IsHighInterval());
547     // Make sure a high interval is always with a low.
548     DCHECK(current->IsLowInterval() ||
549            unhandled_->empty() ||
550            !unhandled_->back()->IsHighInterval());
551 
552     size_t position = current->GetStart();
553 
554     // Remember the inactive_ size here since the ones moved to inactive_ from
555     // active_ below shouldn't need to be re-checked.
556     size_t inactive_intervals_to_handle = inactive_.size();
557 
558     // (2) Remove currently active intervals that are dead at this position.
559     //     Move active intervals that have a lifetime hole at this position
560     //     to inactive.
561     auto active_kept_end = std::remove_if(
562         active_.begin(),
563         active_.end(),
564         [this, position](LiveInterval* interval) {
565           if (interval->IsDeadAt(position)) {
566             handled_.push_back(interval);
567             return true;
568           } else if (!interval->Covers(position)) {
569             inactive_.push_back(interval);
570             return true;
571           } else {
572             return false;  // Keep this interval.
573           }
574         });
575     active_.erase(active_kept_end, active_.end());
576 
577     // (3) Remove currently inactive intervals that are dead at this position.
578     //     Move inactive intervals that cover this position to active.
579     auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
580     auto inactive_kept_end = std::remove_if(
581         inactive_.begin(),
582         inactive_to_handle_end,
583         [this, position](LiveInterval* interval) {
584           DCHECK(interval->GetStart() < position || interval->IsFixed());
585           if (interval->IsDeadAt(position)) {
586             handled_.push_back(interval);
587             return true;
588           } else if (interval->Covers(position)) {
589             active_.push_back(interval);
590             return true;
591           } else {
592             return false;  // Keep this interval.
593           }
594         });
595     inactive_.erase(inactive_kept_end, inactive_to_handle_end);
596 
597     if (current->IsHighInterval() && !current->GetLowInterval()->HasRegister()) {
598       DCHECK(!current->HasRegister());
599       // Allocating the low part was unsucessful. The splitted interval for the high part
600       // will be handled next (it is in the `unhandled_` list).
601       continue;
602     }
603 
604     // (4) Try to find an available register.
605     bool success = TryAllocateFreeReg(current);
606 
607     // (5) If no register could be found, we need to spill.
608     if (!success) {
609       success = AllocateBlockedReg(current);
610     }
611 
612     // (6) If the interval had a register allocated, add it to the list of active
613     //     intervals.
614     if (success) {
615       codegen_->AddAllocatedRegister(processing_core_registers_
616           ? Location::RegisterLocation(current->GetRegister())
617           : Location::FpuRegisterLocation(current->GetRegister()));
618       active_.push_back(current);
619       if (current->HasHighInterval() && !current->GetHighInterval()->HasRegister()) {
620         current->GetHighInterval()->SetRegister(GetHighForLowRegister(current->GetRegister()));
621       }
622     }
623   }
624 }
625 
FreeIfNotCoverAt(LiveInterval * interval,size_t position,size_t * free_until)626 static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* free_until) {
627   DCHECK(!interval->IsHighInterval());
628   // Note that the same instruction may occur multiple times in the input list,
629   // so `free_until` may have changed already.
630   // Since `position` is not the current scan position, we need to use CoversSlow.
631   if (interval->IsDeadAt(position)) {
632     // Set the register to be free. Note that inactive intervals might later
633     // update this.
634     free_until[interval->GetRegister()] = kMaxLifetimePosition;
635     if (interval->HasHighInterval()) {
636       DCHECK(interval->GetHighInterval()->IsDeadAt(position));
637       free_until[interval->GetHighInterval()->GetRegister()] = kMaxLifetimePosition;
638     }
639   } else if (!interval->CoversSlow(position)) {
640     // The interval becomes inactive at `defined_by`. We make its register
641     // available only until the next use strictly after `defined_by`.
642     free_until[interval->GetRegister()] = interval->FirstUseAfter(position);
643     if (interval->HasHighInterval()) {
644       DCHECK(!interval->GetHighInterval()->CoversSlow(position));
645       free_until[interval->GetHighInterval()->GetRegister()] = free_until[interval->GetRegister()];
646     }
647   }
648 }
649 
650 // Find a free register. If multiple are found, pick the register that
651 // is free the longest.
TryAllocateFreeReg(LiveInterval * current)652 bool RegisterAllocatorLinearScan::TryAllocateFreeReg(LiveInterval* current) {
653   size_t* free_until = registers_array_;
654 
655   // First set all registers to be free.
656   for (size_t i = 0; i < number_of_registers_; ++i) {
657     free_until[i] = kMaxLifetimePosition;
658   }
659 
660   // For each active interval, set its register to not free.
661   for (LiveInterval* interval : active_) {
662     DCHECK(interval->HasRegister());
663     free_until[interval->GetRegister()] = 0;
664   }
665 
666   // An interval that starts an instruction (that is, it is not split), may
667   // re-use the registers used by the inputs of that instruciton, based on the
668   // location summary.
669   HInstruction* defined_by = current->GetDefinedBy();
670   if (defined_by != nullptr && !current->IsSplit()) {
671     LocationSummary* locations = defined_by->GetLocations();
672     if (!locations->OutputCanOverlapWithInputs() && locations->Out().IsUnallocated()) {
673       HInputsRef inputs = defined_by->GetInputs();
674       for (size_t i = 0; i < inputs.size(); ++i) {
675         if (locations->InAt(i).IsValid()) {
676           // Take the last interval of the input. It is the location of that interval
677           // that will be used at `defined_by`.
678           LiveInterval* interval = inputs[i]->GetLiveInterval()->GetLastSibling();
679           // Note that interval may have not been processed yet.
680           // TODO: Handle non-split intervals last in the work list.
681           if (interval->HasRegister() && interval->SameRegisterKind(*current)) {
682             // The input must be live until the end of `defined_by`, to comply to
683             // the linear scan algorithm. So we use `defined_by`'s end lifetime
684             // position to check whether the input is dead or is inactive after
685             // `defined_by`.
686             DCHECK(interval->CoversSlow(defined_by->GetLifetimePosition()));
687             size_t position = defined_by->GetLifetimePosition() + 1;
688             FreeIfNotCoverAt(interval, position, free_until);
689           }
690         }
691       }
692     }
693   }
694 
695   // For each inactive interval, set its register to be free until
696   // the next intersection with `current`.
697   for (LiveInterval* inactive : inactive_) {
698     // Temp/Slow-path-safepoint interval has no holes.
699     DCHECK(!inactive->IsTemp());
700     if (!current->IsSplit() && !inactive->IsFixed()) {
701       // Neither current nor inactive are fixed.
702       // Thanks to SSA, a non-split interval starting in a hole of an
703       // inactive interval should never intersect with that inactive interval.
704       // Only if it's not fixed though, because fixed intervals don't come from SSA.
705       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
706       continue;
707     }
708 
709     DCHECK(inactive->HasRegister());
710     if (free_until[inactive->GetRegister()] == 0) {
711       // Already used by some active interval. No need to intersect.
712       continue;
713     }
714     size_t next_intersection = inactive->FirstIntersectionWith(current);
715     if (next_intersection != kNoLifetime) {
716       free_until[inactive->GetRegister()] =
717           std::min(free_until[inactive->GetRegister()], next_intersection);
718     }
719   }
720 
721   int reg = kNoRegister;
722   if (current->HasRegister()) {
723     // Some instructions have a fixed register output.
724     reg = current->GetRegister();
725     if (free_until[reg] == 0) {
726       DCHECK(current->IsHighInterval());
727       // AllocateBlockedReg will spill the holder of the register.
728       return false;
729     }
730   } else {
731     DCHECK(!current->IsHighInterval());
732     int hint = current->FindFirstRegisterHint(free_until, liveness_);
733     if ((hint != kNoRegister)
734         // For simplicity, if the hint we are getting for a pair cannot be used,
735         // we are just going to allocate a new pair.
736         && !(current->IsLowInterval() && IsBlocked(GetHighForLowRegister(hint)))) {
737       DCHECK(!IsBlocked(hint));
738       reg = hint;
739     } else if (current->IsLowInterval()) {
740       reg = FindAvailableRegisterPair(free_until, current->GetStart());
741     } else {
742       reg = FindAvailableRegister(free_until, current);
743     }
744   }
745 
746   DCHECK_NE(reg, kNoRegister);
747   // If we could not find a register, we need to spill.
748   if (free_until[reg] == 0) {
749     return false;
750   }
751 
752   if (current->IsLowInterval()) {
753     // If the high register of this interval is not available, we need to spill.
754     int high_reg = current->GetHighInterval()->GetRegister();
755     if (high_reg == kNoRegister) {
756       high_reg = GetHighForLowRegister(reg);
757     }
758     if (free_until[high_reg] == 0) {
759       return false;
760     }
761   }
762 
763   current->SetRegister(reg);
764   if (!current->IsDeadAt(free_until[reg])) {
765     // If the register is only available for a subset of live ranges
766     // covered by `current`, split `current` before the position where
767     // the register is not available anymore.
768     LiveInterval* split = SplitBetween(current, current->GetStart(), free_until[reg]);
769     DCHECK(split != nullptr);
770     AddSorted(unhandled_, split);
771   }
772   return true;
773 }
774 
IsBlocked(int reg) const775 bool RegisterAllocatorLinearScan::IsBlocked(int reg) const {
776   return processing_core_registers_
777       ? blocked_core_registers_[reg]
778       : blocked_fp_registers_[reg];
779 }
780 
FindAvailableRegisterPair(size_t * next_use,size_t starting_at) const781 int RegisterAllocatorLinearScan::FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const {
782   int reg = kNoRegister;
783   // Pick the register pair that is used the last.
784   for (size_t i = 0; i < number_of_registers_; ++i) {
785     if (IsBlocked(i)) continue;
786     if (!IsLowRegister(i)) continue;
787     int high_register = GetHighForLowRegister(i);
788     if (IsBlocked(high_register)) continue;
789     int existing_high_register = GetHighForLowRegister(reg);
790     if ((reg == kNoRegister) || (next_use[i] >= next_use[reg]
791                         && next_use[high_register] >= next_use[existing_high_register])) {
792       reg = i;
793       if (next_use[i] == kMaxLifetimePosition
794           && next_use[high_register] == kMaxLifetimePosition) {
795         break;
796       }
797     } else if (next_use[reg] <= starting_at || next_use[existing_high_register] <= starting_at) {
798       // If one of the current register is known to be unavailable, just unconditionally
799       // try a new one.
800       reg = i;
801     }
802   }
803   return reg;
804 }
805 
IsCallerSaveRegister(int reg) const806 bool RegisterAllocatorLinearScan::IsCallerSaveRegister(int reg) const {
807   return processing_core_registers_
808       ? !codegen_->IsCoreCalleeSaveRegister(reg)
809       : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
810 }
811 
FindAvailableRegister(size_t * next_use,LiveInterval * current) const812 int RegisterAllocatorLinearScan::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
813   // We special case intervals that do not span a safepoint to try to find a caller-save
814   // register if one is available. We iterate from 0 to the number of registers,
815   // so if there are caller-save registers available at the end, we continue the iteration.
816   bool prefers_caller_save = !current->HasWillCallSafepoint();
817   int reg = kNoRegister;
818   for (size_t i = 0; i < number_of_registers_; ++i) {
819     if (IsBlocked(i)) {
820       // Register cannot be used. Continue.
821       continue;
822     }
823 
824     // Best case: we found a register fully available.
825     if (next_use[i] == kMaxLifetimePosition) {
826       if (prefers_caller_save && !IsCallerSaveRegister(i)) {
827         // We can get shorter encodings on some platforms by using
828         // small register numbers. So only update the candidate if the previous
829         // one was not available for the whole method.
830         if (reg == kNoRegister || next_use[reg] != kMaxLifetimePosition) {
831           reg = i;
832         }
833         // Continue the iteration in the hope of finding a caller save register.
834         continue;
835       } else {
836         reg = i;
837         // We know the register is good enough. Return it.
838         break;
839       }
840     }
841 
842     // If we had no register before, take this one as a reference.
843     if (reg == kNoRegister) {
844       reg = i;
845       continue;
846     }
847 
848     // Pick the register that is used the last.
849     if (next_use[i] > next_use[reg]) {
850       reg = i;
851       continue;
852     }
853   }
854   return reg;
855 }
856 
857 // Remove interval and its other half if any. Return iterator to the following element.
RemoveIntervalAndPotentialOtherHalf(ScopedArenaVector<LiveInterval * > * intervals,ScopedArenaVector<LiveInterval * >::iterator pos)858 static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
859     ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) {
860   DCHECK(intervals->begin() <= pos && pos < intervals->end());
861   LiveInterval* interval = *pos;
862   if (interval->IsLowInterval()) {
863     DCHECK(pos + 1 < intervals->end());
864     DCHECK_EQ(*(pos + 1), interval->GetHighInterval());
865     return intervals->erase(pos, pos + 2);
866   } else if (interval->IsHighInterval()) {
867     DCHECK(intervals->begin() < pos);
868     DCHECK_EQ(*(pos - 1), interval->GetLowInterval());
869     return intervals->erase(pos - 1, pos + 1);
870   } else {
871     return intervals->erase(pos);
872   }
873 }
874 
TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,size_t first_register_use,size_t * next_use)875 bool RegisterAllocatorLinearScan::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
876                                                                            size_t first_register_use,
877                                                                            size_t* next_use) {
878   for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
879     LiveInterval* active = *it;
880     DCHECK(active->HasRegister());
881     if (active->IsFixed()) continue;
882     if (active->IsHighInterval()) continue;
883     if (first_register_use > next_use[active->GetRegister()]) continue;
884 
885     // Split the first interval found that is either:
886     // 1) A non-pair interval.
887     // 2) A pair interval whose high is not low + 1.
888     // 3) A pair interval whose low is not even.
889     if (!active->IsLowInterval() ||
890         IsLowOfUnalignedPairInterval(active) ||
891         !IsLowRegister(active->GetRegister())) {
892       LiveInterval* split = Split(active, position);
893       if (split != active) {
894         handled_.push_back(active);
895       }
896       RemoveIntervalAndPotentialOtherHalf(&active_, it);
897       AddSorted(unhandled_, split);
898       return true;
899     }
900   }
901   return false;
902 }
903 
904 // Find the register that is used the last, and spill the interval
905 // that holds it. If the first use of `current` is after that register
906 // we spill `current` instead.
AllocateBlockedReg(LiveInterval * current)907 bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) {
908   size_t first_register_use = current->FirstRegisterUse();
909   if (current->HasRegister()) {
910     DCHECK(current->IsHighInterval());
911     // The low interval has allocated the register for the high interval. In
912     // case the low interval had to split both intervals, we may end up in a
913     // situation where the high interval does not have a register use anymore.
914     // We must still proceed in order to split currently active and inactive
915     // uses of the high interval's register, and put the high interval in the
916     // active set.
917     DCHECK_IMPLIES(first_register_use == kNoLifetime, current->GetNextSibling() != nullptr);
918   } else if (first_register_use == kNoLifetime) {
919     AllocateSpillSlotFor(current);
920     return false;
921   }
922 
923   // First set all registers as not being used.
924   size_t* next_use = registers_array_;
925   for (size_t i = 0; i < number_of_registers_; ++i) {
926     next_use[i] = kMaxLifetimePosition;
927   }
928 
929   // For each active interval, find the next use of its register after the
930   // start of current.
931   for (LiveInterval* active : active_) {
932     DCHECK(active->HasRegister());
933     if (active->IsFixed()) {
934       next_use[active->GetRegister()] = current->GetStart();
935     } else {
936       size_t use = active->FirstRegisterUseAfter(current->GetStart());
937       if (use != kNoLifetime) {
938         next_use[active->GetRegister()] = use;
939       }
940     }
941   }
942 
943   // For each inactive interval, find the next use of its register after the
944   // start of current.
945   for (LiveInterval* inactive : inactive_) {
946     // Temp/Slow-path-safepoint interval has no holes.
947     DCHECK(!inactive->IsTemp());
948     if (!current->IsSplit() && !inactive->IsFixed()) {
949       // Neither current nor inactive are fixed.
950       // Thanks to SSA, a non-split interval starting in a hole of an
951       // inactive interval should never intersect with that inactive interval.
952       // Only if it's not fixed though, because fixed intervals don't come from SSA.
953       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
954       continue;
955     }
956     DCHECK(inactive->HasRegister());
957     size_t next_intersection = inactive->FirstIntersectionWith(current);
958     if (next_intersection != kNoLifetime) {
959       if (inactive->IsFixed()) {
960         next_use[inactive->GetRegister()] =
961             std::min(next_intersection, next_use[inactive->GetRegister()]);
962       } else {
963         size_t use = inactive->FirstUseAfter(current->GetStart());
964         if (use != kNoLifetime) {
965           next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
966         }
967       }
968     }
969   }
970 
971   int reg = kNoRegister;
972   bool should_spill = false;
973   if (current->HasRegister()) {
974     DCHECK(current->IsHighInterval());
975     reg = current->GetRegister();
976     // When allocating the low part, we made sure the high register was available.
977     DCHECK_LT(first_register_use, next_use[reg]);
978   } else if (current->IsLowInterval()) {
979     reg = FindAvailableRegisterPair(next_use, first_register_use);
980     // We should spill if both registers are not available.
981     should_spill = (first_register_use >= next_use[reg])
982       || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
983   } else {
984     DCHECK(!current->IsHighInterval());
985     reg = FindAvailableRegister(next_use, current);
986     should_spill = (first_register_use >= next_use[reg]);
987   }
988 
989   DCHECK_NE(reg, kNoRegister);
990   if (should_spill) {
991     DCHECK(!current->IsHighInterval());
992     bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1));
993     if (is_allocation_at_use_site) {
994       if (!current->IsLowInterval()) {
995         DumpInterval(std::cerr, current);
996         DumpAllIntervals(std::cerr);
997         // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
998         HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
999         CHECK(false) << "There is not enough registers available for "
1000           << current->GetParent()->GetDefinedBy()->DebugName() << " "
1001           << current->GetParent()->GetDefinedBy()->GetId()
1002           << " at " << first_register_use - 1 << " "
1003           << (at == nullptr ? "" : at->DebugName());
1004       }
1005 
1006       // If we're allocating a register for `current` because the instruction at
1007       // that position requires it, but we think we should spill, then there are
1008       // non-pair intervals or unaligned pair intervals blocking the allocation.
1009       // We split the first interval found, and put ourselves first in the
1010       // `unhandled_` list.
1011       bool success = TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
1012                                                               first_register_use,
1013                                                               next_use);
1014       DCHECK(success);
1015       LiveInterval* existing = unhandled_->back();
1016       DCHECK(existing->IsHighInterval());
1017       DCHECK_EQ(existing->GetLowInterval(), current);
1018       unhandled_->push_back(current);
1019     } else {
1020       // If the first use of that instruction is after the last use of the found
1021       // register, we split this interval just before its first register use.
1022       AllocateSpillSlotFor(current);
1023       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
1024       DCHECK(current != split);
1025       AddSorted(unhandled_, split);
1026     }
1027     return false;
1028   } else {
1029     // Use this register and spill the active and inactives interval that
1030     // have that register.
1031     current->SetRegister(reg);
1032 
1033     for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
1034       LiveInterval* active = *it;
1035       if (active->GetRegister() == reg) {
1036         DCHECK(!active->IsFixed());
1037         LiveInterval* split = Split(active, current->GetStart());
1038         if (split != active) {
1039           handled_.push_back(active);
1040         }
1041         RemoveIntervalAndPotentialOtherHalf(&active_, it);
1042         AddSorted(unhandled_, split);
1043         break;
1044       }
1045     }
1046 
1047     // NOTE: Retrieve end() on each iteration because we're removing elements in the loop body.
1048     for (auto it = inactive_.begin(); it != inactive_.end(); ) {
1049       LiveInterval* inactive = *it;
1050       bool erased = false;
1051       if (inactive->GetRegister() == reg) {
1052         if (!current->IsSplit() && !inactive->IsFixed()) {
1053           // Neither current nor inactive are fixed.
1054           // Thanks to SSA, a non-split interval starting in a hole of an
1055           // inactive interval should never intersect with that inactive interval.
1056           // Only if it's not fixed though, because fixed intervals don't come from SSA.
1057           DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
1058         } else {
1059           size_t next_intersection = inactive->FirstIntersectionWith(current);
1060           if (next_intersection != kNoLifetime) {
1061             if (inactive->IsFixed()) {
1062               LiveInterval* split = Split(current, next_intersection);
1063               DCHECK_NE(split, current);
1064               AddSorted(unhandled_, split);
1065             } else {
1066               // Split at the start of `current`, which will lead to splitting
1067               // at the end of the lifetime hole of `inactive`.
1068               LiveInterval* split = Split(inactive, current->GetStart());
1069               // If it's inactive, it must start before the current interval.
1070               DCHECK_NE(split, inactive);
1071               it = RemoveIntervalAndPotentialOtherHalf(&inactive_, it);
1072               erased = true;
1073               handled_.push_back(inactive);
1074               AddSorted(unhandled_, split);
1075             }
1076           }
1077         }
1078       }
1079       // If we have erased the element, `it` already points to the next element.
1080       // Otherwise we need to move to the next element.
1081       if (!erased) {
1082         ++it;
1083       }
1084     }
1085 
1086     return true;
1087   }
1088 }
1089 
AddSorted(ScopedArenaVector<LiveInterval * > * array,LiveInterval * interval)1090 void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array,
1091                                             LiveInterval* interval) {
1092   DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
1093   size_t insert_at = 0;
1094   for (size_t i = array->size(); i > 0; --i) {
1095     LiveInterval* current = (*array)[i - 1u];
1096     // High intervals must be processed right after their low equivalent.
1097     if (current->StartsAfter(interval) && !current->IsHighInterval()) {
1098       insert_at = i;
1099       break;
1100     }
1101   }
1102 
1103   // Insert the high interval before the low, to ensure the low is processed before.
1104   auto insert_pos = array->begin() + insert_at;
1105   if (interval->HasHighInterval()) {
1106     array->insert(insert_pos, { interval->GetHighInterval(), interval });
1107   } else if (interval->HasLowInterval()) {
1108     array->insert(insert_pos, { interval, interval->GetLowInterval() });
1109   } else {
1110     array->insert(insert_pos, interval);
1111   }
1112 }
1113 
AllocateSpillSlotFor(LiveInterval * interval)1114 void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
1115   if (interval->IsHighInterval()) {
1116     // The low interval already took care of allocating the spill slot.
1117     DCHECK(!interval->GetLowInterval()->HasRegister());
1118     DCHECK(interval->GetLowInterval()->GetParent()->HasSpillSlot());
1119     return;
1120   }
1121 
1122   LiveInterval* parent = interval->GetParent();
1123 
1124   // An instruction gets a spill slot for its entire lifetime. If the parent
1125   // of this interval already has a spill slot, there is nothing to do.
1126   if (parent->HasSpillSlot()) {
1127     return;
1128   }
1129 
1130   HInstruction* defined_by = parent->GetDefinedBy();
1131   DCHECK_IMPLIES(defined_by->IsPhi(), !defined_by->AsPhi()->IsCatchPhi());
1132 
1133   if (defined_by->IsParameterValue()) {
1134     // Parameters have their own stack slot.
1135     parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
1136     return;
1137   }
1138 
1139   if (defined_by->IsCurrentMethod()) {
1140     parent->SetSpillSlot(0);
1141     return;
1142   }
1143 
1144   if (defined_by->IsConstant()) {
1145     // Constants don't need a spill slot.
1146     return;
1147   }
1148 
1149   ScopedArenaVector<size_t>* spill_slots = nullptr;
1150   switch (interval->GetType()) {
1151     case DataType::Type::kFloat64:
1152       spill_slots = &double_spill_slots_;
1153       break;
1154     case DataType::Type::kInt64:
1155       spill_slots = &long_spill_slots_;
1156       break;
1157     case DataType::Type::kFloat32:
1158       spill_slots = &float_spill_slots_;
1159       break;
1160     case DataType::Type::kReference:
1161     case DataType::Type::kInt32:
1162     case DataType::Type::kUint16:
1163     case DataType::Type::kUint8:
1164     case DataType::Type::kInt8:
1165     case DataType::Type::kBool:
1166     case DataType::Type::kInt16:
1167       spill_slots = &int_spill_slots_;
1168       break;
1169     case DataType::Type::kUint32:
1170     case DataType::Type::kUint64:
1171     case DataType::Type::kVoid:
1172       LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
1173   }
1174 
1175   // Find first available spill slots.
1176   size_t number_of_spill_slots_needed = parent->NumberOfSpillSlotsNeeded();
1177   size_t slot = 0;
1178   for (size_t e = spill_slots->size(); slot < e; ++slot) {
1179     bool found = true;
1180     for (size_t s = slot, u = std::min(slot + number_of_spill_slots_needed, e); s < u; s++) {
1181       if ((*spill_slots)[s] > parent->GetStart()) {
1182         found = false;  // failure
1183         break;
1184       }
1185     }
1186     if (found) {
1187       break;  // success
1188     }
1189   }
1190 
1191   // Need new spill slots?
1192   size_t upper = slot + number_of_spill_slots_needed;
1193   if (upper > spill_slots->size()) {
1194     spill_slots->resize(upper);
1195   }
1196   // Set slots to end.
1197   size_t end = interval->GetLastSibling()->GetEnd();
1198   for (size_t s = slot; s < upper; s++) {
1199     (*spill_slots)[s] = end;
1200   }
1201 
1202   // Note that the exact spill slot location will be computed when we resolve,
1203   // that is when we know the number of spill slots for each type.
1204   parent->SetSpillSlot(slot);
1205 }
1206 
AllocateSpillSlotForCatchPhi(HPhi * phi)1207 void RegisterAllocatorLinearScan::AllocateSpillSlotForCatchPhi(HPhi* phi) {
1208   LiveInterval* interval = phi->GetLiveInterval();
1209 
1210   HInstruction* previous_phi = phi->GetPrevious();
1211   DCHECK(previous_phi == nullptr ||
1212          previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
1213       << "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
1214 
1215   if (phi->IsVRegEquivalentOf(previous_phi)) {
1216     // This is an equivalent of the previous phi. We need to assign the same
1217     // catch phi slot.
1218     DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
1219     interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
1220   } else {
1221     // Allocate a new spill slot for this catch phi.
1222     // TODO: Reuse spill slots when intervals of phis from different catch
1223     //       blocks do not overlap.
1224     interval->SetSpillSlot(catch_phi_spill_slots_);
1225     catch_phi_spill_slots_ += interval->NumberOfSpillSlotsNeeded();
1226   }
1227 }
1228 
1229 }  // namespace art
1230