• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "register_allocator_linear_scan.h"
18 
19 #include <iostream>
20 #include <sstream>
21 
22 #include "base/bit_vector-inl.h"
23 #include "base/enums.h"
24 #include "code_generator.h"
25 #include "linear_order.h"
26 #include "register_allocation_resolver.h"
27 #include "ssa_liveness_analysis.h"
28 
29 namespace art {
30 
31 static constexpr size_t kMaxLifetimePosition = -1;
32 static constexpr size_t kDefaultNumberOfSpillSlots = 4;
33 
34 // For simplicity, we implement register pairs as (reg, reg + 1).
35 // Note that this is a requirement for double registers on ARM, since we
36 // allocate SRegister.
GetHighForLowRegister(int reg)37 static int GetHighForLowRegister(int reg) { return reg + 1; }
IsLowRegister(int reg)38 static bool IsLowRegister(int reg) { return (reg & 1) == 0; }
IsLowOfUnalignedPairInterval(LiveInterval * low)39 static bool IsLowOfUnalignedPairInterval(LiveInterval* low) {
40   return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
41 }
42 
RegisterAllocatorLinearScan(ArenaAllocator * allocator,CodeGenerator * codegen,const SsaLivenessAnalysis & liveness)43 RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator,
44                                                          CodeGenerator* codegen,
45                                                          const SsaLivenessAnalysis& liveness)
46       : RegisterAllocator(allocator, codegen, liveness),
47         unhandled_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
48         unhandled_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
49         unhandled_(nullptr),
50         handled_(allocator->Adapter(kArenaAllocRegisterAllocator)),
51         active_(allocator->Adapter(kArenaAllocRegisterAllocator)),
52         inactive_(allocator->Adapter(kArenaAllocRegisterAllocator)),
53         physical_core_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
54         physical_fp_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
55         temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
56         int_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
57         long_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
58         float_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
59         double_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
60         catch_phi_spill_slots_(0),
61         safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
62         processing_core_registers_(false),
63         number_of_registers_(-1),
64         registers_array_(nullptr),
65         blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
66         blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
67         reserved_out_slots_(0) {
68   temp_intervals_.reserve(4);
69   int_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
70   long_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
71   float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
72   double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
73 
74   codegen->SetupBlockedRegisters();
75   physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
76   physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
77   // Always reserve for the current method and the graph's max out registers.
78   // TODO: compute it instead.
79   // ArtMethod* takes 2 vregs for 64 bits.
80   size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
81   reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
82 }
83 
ShouldProcess(bool processing_core_registers,LiveInterval * interval)84 static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
85   if (interval == nullptr) return false;
86   bool is_core_register = (interval->GetType() != Primitive::kPrimDouble)
87       && (interval->GetType() != Primitive::kPrimFloat);
88   return processing_core_registers == is_core_register;
89 }
90 
AllocateRegisters()91 void RegisterAllocatorLinearScan::AllocateRegisters() {
92   AllocateRegistersInternal();
93   RegisterAllocationResolver(allocator_, codegen_, liveness_)
94       .Resolve(ArrayRef<HInstruction* const>(safepoints_),
95                reserved_out_slots_,
96                int_spill_slots_.size(),
97                long_spill_slots_.size(),
98                float_spill_slots_.size(),
99                double_spill_slots_.size(),
100                catch_phi_spill_slots_,
101                temp_intervals_);
102 
103   if (kIsDebugBuild) {
104     processing_core_registers_ = true;
105     ValidateInternal(true);
106     processing_core_registers_ = false;
107     ValidateInternal(true);
108     // Check that the linear order is still correct with regards to lifetime positions.
109     // Since only parallel moves have been inserted during the register allocation,
110     // these checks are mostly for making sure these moves have been added correctly.
111     size_t current_liveness = 0;
112     for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
113       for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
114         HInstruction* instruction = inst_it.Current();
115         DCHECK_LE(current_liveness, instruction->GetLifetimePosition());
116         current_liveness = instruction->GetLifetimePosition();
117       }
118       for (HInstructionIterator inst_it(block->GetInstructions());
119            !inst_it.Done();
120            inst_it.Advance()) {
121         HInstruction* instruction = inst_it.Current();
122         DCHECK_LE(current_liveness, instruction->GetLifetimePosition()) << instruction->DebugName();
123         current_liveness = instruction->GetLifetimePosition();
124       }
125     }
126   }
127 }
128 
BlockRegister(Location location,size_t start,size_t end)129 void RegisterAllocatorLinearScan::BlockRegister(Location location, size_t start, size_t end) {
130   int reg = location.reg();
131   DCHECK(location.IsRegister() || location.IsFpuRegister());
132   LiveInterval* interval = location.IsRegister()
133       ? physical_core_register_intervals_[reg]
134       : physical_fp_register_intervals_[reg];
135   Primitive::Type type = location.IsRegister()
136       ? Primitive::kPrimInt
137       : Primitive::kPrimFloat;
138   if (interval == nullptr) {
139     interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
140     if (location.IsRegister()) {
141       physical_core_register_intervals_[reg] = interval;
142     } else {
143       physical_fp_register_intervals_[reg] = interval;
144     }
145   }
146   DCHECK(interval->GetRegister() == reg);
147   interval->AddRange(start, end);
148 }
149 
BlockRegisters(size_t start,size_t end,bool caller_save_only)150 void RegisterAllocatorLinearScan::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
151   for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
152     if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
153       BlockRegister(Location::RegisterLocation(i), start, end);
154     }
155   }
156   for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
157     if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
158       BlockRegister(Location::FpuRegisterLocation(i), start, end);
159     }
160   }
161 }
162 
AllocateRegistersInternal()163 void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
164   // Iterate post-order, to ensure the list is sorted, and the last added interval
165   // is the one with the lowest start position.
166   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
167     for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
168          back_it.Advance()) {
169       ProcessInstruction(back_it.Current());
170     }
171     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
172       ProcessInstruction(inst_it.Current());
173     }
174 
175     if (block->IsCatchBlock() ||
176         (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
177       // By blocking all registers at the top of each catch block or irreducible loop, we force
178       // intervals belonging to the live-in set of the catch/header block to be spilled.
179       // TODO(ngeoffray): Phis in this block could be allocated in register.
180       size_t position = block->GetLifetimeStart();
181       BlockRegisters(position, position + 1);
182     }
183   }
184 
185   number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
186   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
187                                                     kArenaAllocRegisterAllocator);
188   processing_core_registers_ = true;
189   unhandled_ = &unhandled_core_intervals_;
190   for (LiveInterval* fixed : physical_core_register_intervals_) {
191     if (fixed != nullptr) {
192       // Fixed interval is added to inactive_ instead of unhandled_.
193       // It's also the only type of inactive interval whose start position
194       // can be after the current interval during linear scan.
195       // Fixed interval is never split and never moves to unhandled_.
196       inactive_.push_back(fixed);
197     }
198   }
199   LinearScan();
200 
201   inactive_.clear();
202   active_.clear();
203   handled_.clear();
204 
205   number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
206   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
207                                                     kArenaAllocRegisterAllocator);
208   processing_core_registers_ = false;
209   unhandled_ = &unhandled_fp_intervals_;
210   for (LiveInterval* fixed : physical_fp_register_intervals_) {
211     if (fixed != nullptr) {
212       // Fixed interval is added to inactive_ instead of unhandled_.
213       // It's also the only type of inactive interval whose start position
214       // can be after the current interval during linear scan.
215       // Fixed interval is never split and never moves to unhandled_.
216       inactive_.push_back(fixed);
217     }
218   }
219   LinearScan();
220 }
221 
ProcessInstruction(HInstruction * instruction)222 void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) {
223   LocationSummary* locations = instruction->GetLocations();
224   size_t position = instruction->GetLifetimePosition();
225 
226   if (locations == nullptr) return;
227 
228   // Create synthesized intervals for temporaries.
229   for (size_t i = 0; i < locations->GetTempCount(); ++i) {
230     Location temp = locations->GetTemp(i);
231     if (temp.IsRegister() || temp.IsFpuRegister()) {
232       BlockRegister(temp, position, position + 1);
233       // Ensure that an explicit temporary register is marked as being allocated.
234       codegen_->AddAllocatedRegister(temp);
235     } else {
236       DCHECK(temp.IsUnallocated());
237       switch (temp.GetPolicy()) {
238         case Location::kRequiresRegister: {
239           LiveInterval* interval =
240               LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
241           temp_intervals_.push_back(interval);
242           interval->AddTempUse(instruction, i);
243           unhandled_core_intervals_.push_back(interval);
244           break;
245         }
246 
247         case Location::kRequiresFpuRegister: {
248           LiveInterval* interval =
249               LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
250           temp_intervals_.push_back(interval);
251           interval->AddTempUse(instruction, i);
252           if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
253             interval->AddHighInterval(/* is_temp */ true);
254             LiveInterval* high = interval->GetHighInterval();
255             temp_intervals_.push_back(high);
256             unhandled_fp_intervals_.push_back(high);
257           }
258           unhandled_fp_intervals_.push_back(interval);
259           break;
260         }
261 
262         default:
263           LOG(FATAL) << "Unexpected policy for temporary location "
264                      << temp.GetPolicy();
265       }
266     }
267   }
268 
269   bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
270       && (instruction->GetType() != Primitive::kPrimFloat);
271 
272   if (locations->NeedsSafepoint()) {
273     if (codegen_->IsLeafMethod()) {
274       // TODO: We do this here because we do not want the suspend check to artificially
275       // create live registers. We should find another place, but this is currently the
276       // simplest.
277       DCHECK(instruction->IsSuspendCheckEntry());
278       instruction->GetBlock()->RemoveInstruction(instruction);
279       return;
280     }
281     safepoints_.push_back(instruction);
282   }
283 
284   if (locations->WillCall()) {
285     BlockRegisters(position, position + 1, /* caller_save_only */ true);
286   }
287 
288   for (size_t i = 0; i < locations->GetInputCount(); ++i) {
289     Location input = locations->InAt(i);
290     if (input.IsRegister() || input.IsFpuRegister()) {
291       BlockRegister(input, position, position + 1);
292     } else if (input.IsPair()) {
293       BlockRegister(input.ToLow(), position, position + 1);
294       BlockRegister(input.ToHigh(), position, position + 1);
295     }
296   }
297 
298   LiveInterval* current = instruction->GetLiveInterval();
299   if (current == nullptr) return;
300 
301   ArenaVector<LiveInterval*>& unhandled = core_register
302       ? unhandled_core_intervals_
303       : unhandled_fp_intervals_;
304 
305   DCHECK(unhandled.empty() || current->StartsBeforeOrAt(unhandled.back()));
306 
307   if (codegen_->NeedsTwoRegisters(current->GetType())) {
308     current->AddHighInterval();
309   }
310 
311   for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
312     HInstruction* safepoint = safepoints_[safepoint_index - 1u];
313     size_t safepoint_position = safepoint->GetLifetimePosition();
314 
315     // Test that safepoints are ordered in the optimal way.
316     DCHECK(safepoint_index == safepoints_.size() ||
317            safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
318 
319     if (safepoint_position == current->GetStart()) {
320       // The safepoint is for this instruction, so the location of the instruction
321       // does not need to be saved.
322       DCHECK_EQ(safepoint_index, safepoints_.size());
323       DCHECK_EQ(safepoint, instruction);
324       continue;
325     } else if (current->IsDeadAt(safepoint_position)) {
326       break;
327     } else if (!current->Covers(safepoint_position)) {
328       // Hole in the interval.
329       continue;
330     }
331     current->AddSafepoint(safepoint);
332   }
333   current->ResetSearchCache();
334 
335   // Some instructions define their output in fixed register/stack slot. We need
336   // to ensure we know these locations before doing register allocation. For a
337   // given register, we create an interval that covers these locations. The register
338   // will be unavailable at these locations when trying to allocate one for an
339   // interval.
340   //
341   // The backwards walking ensures the ranges are ordered on increasing start positions.
342   Location output = locations->Out();
343   if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
344     Location first = locations->InAt(0);
345     if (first.IsRegister() || first.IsFpuRegister()) {
346       current->SetFrom(position + 1);
347       current->SetRegister(first.reg());
348     } else if (first.IsPair()) {
349       current->SetFrom(position + 1);
350       current->SetRegister(first.low());
351       LiveInterval* high = current->GetHighInterval();
352       high->SetRegister(first.high());
353       high->SetFrom(position + 1);
354     }
355   } else if (output.IsRegister() || output.IsFpuRegister()) {
356     // Shift the interval's start by one to account for the blocked register.
357     current->SetFrom(position + 1);
358     current->SetRegister(output.reg());
359     BlockRegister(output, position, position + 1);
360   } else if (output.IsPair()) {
361     current->SetFrom(position + 1);
362     current->SetRegister(output.low());
363     LiveInterval* high = current->GetHighInterval();
364     high->SetRegister(output.high());
365     high->SetFrom(position + 1);
366     BlockRegister(output.ToLow(), position, position + 1);
367     BlockRegister(output.ToHigh(), position, position + 1);
368   } else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
369     current->SetSpillSlot(output.GetStackIndex());
370   } else {
371     DCHECK(output.IsUnallocated() || output.IsConstant());
372   }
373 
374   if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
375     AllocateSpillSlotForCatchPhi(instruction->AsPhi());
376   }
377 
378   // If needed, add interval to the list of unhandled intervals.
379   if (current->HasSpillSlot() || instruction->IsConstant()) {
380     // Split just before first register use.
381     size_t first_register_use = current->FirstRegisterUse();
382     if (first_register_use != kNoLifetime) {
383       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
384       // Don't add directly to `unhandled`, it needs to be sorted and the start
385       // of this new interval might be after intervals already in the list.
386       AddSorted(&unhandled, split);
387     } else {
388       // Nothing to do, we won't allocate a register for this value.
389     }
390   } else {
391     // Don't add directly to `unhandled`, temp or safepoint intervals
392     // for this instruction may have been added, and those can be
393     // processed first.
394     AddSorted(&unhandled, current);
395   }
396 }
397 
398 class AllRangesIterator : public ValueObject {
399  public:
AllRangesIterator(LiveInterval * interval)400   explicit AllRangesIterator(LiveInterval* interval)
401       : current_interval_(interval),
402         current_range_(interval->GetFirstRange()) {}
403 
Done() const404   bool Done() const { return current_interval_ == nullptr; }
CurrentRange() const405   LiveRange* CurrentRange() const { return current_range_; }
CurrentInterval() const406   LiveInterval* CurrentInterval() const { return current_interval_; }
407 
Advance()408   void Advance() {
409     current_range_ = current_range_->GetNext();
410     if (current_range_ == nullptr) {
411       current_interval_ = current_interval_->GetNextSibling();
412       if (current_interval_ != nullptr) {
413         current_range_ = current_interval_->GetFirstRange();
414       }
415     }
416   }
417 
418  private:
419   LiveInterval* current_interval_;
420   LiveRange* current_range_;
421 
422   DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
423 };
424 
ValidateInternal(bool log_fatal_on_failure) const425 bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
426   // To simplify unit testing, we eagerly create the array of intervals, and
427   // call the helper method.
428   ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
429   for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
430     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
431     if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
432       intervals.push_back(instruction->GetLiveInterval());
433     }
434   }
435 
436   const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
437       ? &physical_core_register_intervals_
438       : &physical_fp_register_intervals_;
439   for (LiveInterval* fixed : *physical_register_intervals) {
440     if (fixed != nullptr) {
441       intervals.push_back(fixed);
442     }
443   }
444 
445   for (LiveInterval* temp : temp_intervals_) {
446     if (ShouldProcess(processing_core_registers_, temp)) {
447       intervals.push_back(temp);
448     }
449   }
450 
451   return ValidateIntervals(intervals, GetNumberOfSpillSlots(), reserved_out_slots_, *codegen_,
452                            allocator_, processing_core_registers_, log_fatal_on_failure);
453 }
454 
DumpInterval(std::ostream & stream,LiveInterval * interval) const455 void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
456   interval->Dump(stream);
457   stream << ": ";
458   if (interval->HasRegister()) {
459     if (interval->IsFloatingPoint()) {
460       codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
461     } else {
462       codegen_->DumpCoreRegister(stream, interval->GetRegister());
463     }
464   } else {
465     stream << "spilled";
466   }
467   stream << std::endl;
468 }
469 
DumpAllIntervals(std::ostream & stream) const470 void RegisterAllocatorLinearScan::DumpAllIntervals(std::ostream& stream) const {
471   stream << "inactive: " << std::endl;
472   for (LiveInterval* inactive_interval : inactive_) {
473     DumpInterval(stream, inactive_interval);
474   }
475   stream << "active: " << std::endl;
476   for (LiveInterval* active_interval : active_) {
477     DumpInterval(stream, active_interval);
478   }
479   stream << "unhandled: " << std::endl;
480   auto unhandled = (unhandled_ != nullptr) ?
481       unhandled_ : &unhandled_core_intervals_;
482   for (LiveInterval* unhandled_interval : *unhandled) {
483     DumpInterval(stream, unhandled_interval);
484   }
485   stream << "handled: " << std::endl;
486   for (LiveInterval* handled_interval : handled_) {
487     DumpInterval(stream, handled_interval);
488   }
489 }
490 
491 // By the book implementation of a linear scan register allocator.
LinearScan()492 void RegisterAllocatorLinearScan::LinearScan() {
493   while (!unhandled_->empty()) {
494     // (1) Remove interval with the lowest start position from unhandled.
495     LiveInterval* current = unhandled_->back();
496     unhandled_->pop_back();
497 
498     // Make sure the interval is an expected state.
499     DCHECK(!current->IsFixed() && !current->HasSpillSlot());
500     // Make sure we are going in the right order.
501     DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart());
502     // Make sure a low interval is always with a high.
503     DCHECK(!current->IsLowInterval() || unhandled_->back()->IsHighInterval());
504     // Make sure a high interval is always with a low.
505     DCHECK(current->IsLowInterval() ||
506            unhandled_->empty() ||
507            !unhandled_->back()->IsHighInterval());
508 
509     size_t position = current->GetStart();
510 
511     // Remember the inactive_ size here since the ones moved to inactive_ from
512     // active_ below shouldn't need to be re-checked.
513     size_t inactive_intervals_to_handle = inactive_.size();
514 
515     // (2) Remove currently active intervals that are dead at this position.
516     //     Move active intervals that have a lifetime hole at this position
517     //     to inactive.
518     auto active_kept_end = std::remove_if(
519         active_.begin(),
520         active_.end(),
521         [this, position](LiveInterval* interval) {
522           if (interval->IsDeadAt(position)) {
523             handled_.push_back(interval);
524             return true;
525           } else if (!interval->Covers(position)) {
526             inactive_.push_back(interval);
527             return true;
528           } else {
529             return false;  // Keep this interval.
530           }
531         });
532     active_.erase(active_kept_end, active_.end());
533 
534     // (3) Remove currently inactive intervals that are dead at this position.
535     //     Move inactive intervals that cover this position to active.
536     auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
537     auto inactive_kept_end = std::remove_if(
538         inactive_.begin(),
539         inactive_to_handle_end,
540         [this, position](LiveInterval* interval) {
541           DCHECK(interval->GetStart() < position || interval->IsFixed());
542           if (interval->IsDeadAt(position)) {
543             handled_.push_back(interval);
544             return true;
545           } else if (interval->Covers(position)) {
546             active_.push_back(interval);
547             return true;
548           } else {
549             return false;  // Keep this interval.
550           }
551         });
552     inactive_.erase(inactive_kept_end, inactive_to_handle_end);
553 
554     if (current->IsHighInterval() && !current->GetLowInterval()->HasRegister()) {
555       DCHECK(!current->HasRegister());
556       // Allocating the low part was unsucessful. The splitted interval for the high part
557       // will be handled next (it is in the `unhandled_` list).
558       continue;
559     }
560 
561     // (4) Try to find an available register.
562     bool success = TryAllocateFreeReg(current);
563 
564     // (5) If no register could be found, we need to spill.
565     if (!success) {
566       success = AllocateBlockedReg(current);
567     }
568 
569     // (6) If the interval had a register allocated, add it to the list of active
570     //     intervals.
571     if (success) {
572       codegen_->AddAllocatedRegister(processing_core_registers_
573           ? Location::RegisterLocation(current->GetRegister())
574           : Location::FpuRegisterLocation(current->GetRegister()));
575       active_.push_back(current);
576       if (current->HasHighInterval() && !current->GetHighInterval()->HasRegister()) {
577         current->GetHighInterval()->SetRegister(GetHighForLowRegister(current->GetRegister()));
578       }
579     }
580   }
581 }
582 
FreeIfNotCoverAt(LiveInterval * interval,size_t position,size_t * free_until)583 static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* free_until) {
584   DCHECK(!interval->IsHighInterval());
585   // Note that the same instruction may occur multiple times in the input list,
586   // so `free_until` may have changed already.
587   // Since `position` is not the current scan position, we need to use CoversSlow.
588   if (interval->IsDeadAt(position)) {
589     // Set the register to be free. Note that inactive intervals might later
590     // update this.
591     free_until[interval->GetRegister()] = kMaxLifetimePosition;
592     if (interval->HasHighInterval()) {
593       DCHECK(interval->GetHighInterval()->IsDeadAt(position));
594       free_until[interval->GetHighInterval()->GetRegister()] = kMaxLifetimePosition;
595     }
596   } else if (!interval->CoversSlow(position)) {
597     // The interval becomes inactive at `defined_by`. We make its register
598     // available only until the next use strictly after `defined_by`.
599     free_until[interval->GetRegister()] = interval->FirstUseAfter(position);
600     if (interval->HasHighInterval()) {
601       DCHECK(!interval->GetHighInterval()->CoversSlow(position));
602       free_until[interval->GetHighInterval()->GetRegister()] = free_until[interval->GetRegister()];
603     }
604   }
605 }
606 
607 // Find a free register. If multiple are found, pick the register that
608 // is free the longest.
TryAllocateFreeReg(LiveInterval * current)609 bool RegisterAllocatorLinearScan::TryAllocateFreeReg(LiveInterval* current) {
610   size_t* free_until = registers_array_;
611 
612   // First set all registers to be free.
613   for (size_t i = 0; i < number_of_registers_; ++i) {
614     free_until[i] = kMaxLifetimePosition;
615   }
616 
617   // For each active interval, set its register to not free.
618   for (LiveInterval* interval : active_) {
619     DCHECK(interval->HasRegister());
620     free_until[interval->GetRegister()] = 0;
621   }
622 
623   // An interval that starts an instruction (that is, it is not split), may
624   // re-use the registers used by the inputs of that instruciton, based on the
625   // location summary.
626   HInstruction* defined_by = current->GetDefinedBy();
627   if (defined_by != nullptr && !current->IsSplit()) {
628     LocationSummary* locations = defined_by->GetLocations();
629     if (!locations->OutputCanOverlapWithInputs() && locations->Out().IsUnallocated()) {
630       HInputsRef inputs = defined_by->GetInputs();
631       for (size_t i = 0; i < inputs.size(); ++i) {
632         if (locations->InAt(i).IsValid()) {
633           // Take the last interval of the input. It is the location of that interval
634           // that will be used at `defined_by`.
635           LiveInterval* interval = inputs[i]->GetLiveInterval()->GetLastSibling();
636           // Note that interval may have not been processed yet.
637           // TODO: Handle non-split intervals last in the work list.
638           if (interval->HasRegister() && interval->SameRegisterKind(*current)) {
639             // The input must be live until the end of `defined_by`, to comply to
640             // the linear scan algorithm. So we use `defined_by`'s end lifetime
641             // position to check whether the input is dead or is inactive after
642             // `defined_by`.
643             DCHECK(interval->CoversSlow(defined_by->GetLifetimePosition()));
644             size_t position = defined_by->GetLifetimePosition() + 1;
645             FreeIfNotCoverAt(interval, position, free_until);
646           }
647         }
648       }
649     }
650   }
651 
652   // For each inactive interval, set its register to be free until
653   // the next intersection with `current`.
654   for (LiveInterval* inactive : inactive_) {
655     // Temp/Slow-path-safepoint interval has no holes.
656     DCHECK(!inactive->IsTemp());
657     if (!current->IsSplit() && !inactive->IsFixed()) {
658       // Neither current nor inactive are fixed.
659       // Thanks to SSA, a non-split interval starting in a hole of an
660       // inactive interval should never intersect with that inactive interval.
661       // Only if it's not fixed though, because fixed intervals don't come from SSA.
662       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
663       continue;
664     }
665 
666     DCHECK(inactive->HasRegister());
667     if (free_until[inactive->GetRegister()] == 0) {
668       // Already used by some active interval. No need to intersect.
669       continue;
670     }
671     size_t next_intersection = inactive->FirstIntersectionWith(current);
672     if (next_intersection != kNoLifetime) {
673       free_until[inactive->GetRegister()] =
674           std::min(free_until[inactive->GetRegister()], next_intersection);
675     }
676   }
677 
678   int reg = kNoRegister;
679   if (current->HasRegister()) {
680     // Some instructions have a fixed register output.
681     reg = current->GetRegister();
682     if (free_until[reg] == 0) {
683       DCHECK(current->IsHighInterval());
684       // AllocateBlockedReg will spill the holder of the register.
685       return false;
686     }
687   } else {
688     DCHECK(!current->IsHighInterval());
689     int hint = current->FindFirstRegisterHint(free_until, liveness_);
690     if ((hint != kNoRegister)
691         // For simplicity, if the hint we are getting for a pair cannot be used,
692         // we are just going to allocate a new pair.
693         && !(current->IsLowInterval() && IsBlocked(GetHighForLowRegister(hint)))) {
694       DCHECK(!IsBlocked(hint));
695       reg = hint;
696     } else if (current->IsLowInterval()) {
697       reg = FindAvailableRegisterPair(free_until, current->GetStart());
698     } else {
699       reg = FindAvailableRegister(free_until, current);
700     }
701   }
702 
703   DCHECK_NE(reg, kNoRegister);
704   // If we could not find a register, we need to spill.
705   if (free_until[reg] == 0) {
706     return false;
707   }
708 
709   if (current->IsLowInterval()) {
710     // If the high register of this interval is not available, we need to spill.
711     int high_reg = current->GetHighInterval()->GetRegister();
712     if (high_reg == kNoRegister) {
713       high_reg = GetHighForLowRegister(reg);
714     }
715     if (free_until[high_reg] == 0) {
716       return false;
717     }
718   }
719 
720   current->SetRegister(reg);
721   if (!current->IsDeadAt(free_until[reg])) {
722     // If the register is only available for a subset of live ranges
723     // covered by `current`, split `current` before the position where
724     // the register is not available anymore.
725     LiveInterval* split = SplitBetween(current, current->GetStart(), free_until[reg]);
726     DCHECK(split != nullptr);
727     AddSorted(unhandled_, split);
728   }
729   return true;
730 }
731 
IsBlocked(int reg) const732 bool RegisterAllocatorLinearScan::IsBlocked(int reg) const {
733   return processing_core_registers_
734       ? blocked_core_registers_[reg]
735       : blocked_fp_registers_[reg];
736 }
737 
FindAvailableRegisterPair(size_t * next_use,size_t starting_at) const738 int RegisterAllocatorLinearScan::FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const {
739   int reg = kNoRegister;
740   // Pick the register pair that is used the last.
741   for (size_t i = 0; i < number_of_registers_; ++i) {
742     if (IsBlocked(i)) continue;
743     if (!IsLowRegister(i)) continue;
744     int high_register = GetHighForLowRegister(i);
745     if (IsBlocked(high_register)) continue;
746     int existing_high_register = GetHighForLowRegister(reg);
747     if ((reg == kNoRegister) || (next_use[i] >= next_use[reg]
748                         && next_use[high_register] >= next_use[existing_high_register])) {
749       reg = i;
750       if (next_use[i] == kMaxLifetimePosition
751           && next_use[high_register] == kMaxLifetimePosition) {
752         break;
753       }
754     } else if (next_use[reg] <= starting_at || next_use[existing_high_register] <= starting_at) {
755       // If one of the current register is known to be unavailable, just unconditionally
756       // try a new one.
757       reg = i;
758     }
759   }
760   return reg;
761 }
762 
IsCallerSaveRegister(int reg) const763 bool RegisterAllocatorLinearScan::IsCallerSaveRegister(int reg) const {
764   return processing_core_registers_
765       ? !codegen_->IsCoreCalleeSaveRegister(reg)
766       : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
767 }
768 
FindAvailableRegister(size_t * next_use,LiveInterval * current) const769 int RegisterAllocatorLinearScan::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
770   // We special case intervals that do not span a safepoint to try to find a caller-save
771   // register if one is available. We iterate from 0 to the number of registers,
772   // so if there are caller-save registers available at the end, we continue the iteration.
773   bool prefers_caller_save = !current->HasWillCallSafepoint();
774   int reg = kNoRegister;
775   for (size_t i = 0; i < number_of_registers_; ++i) {
776     if (IsBlocked(i)) {
777       // Register cannot be used. Continue.
778       continue;
779     }
780 
781     // Best case: we found a register fully available.
782     if (next_use[i] == kMaxLifetimePosition) {
783       if (prefers_caller_save && !IsCallerSaveRegister(i)) {
784         // We can get shorter encodings on some platforms by using
785         // small register numbers. So only update the candidate if the previous
786         // one was not available for the whole method.
787         if (reg == kNoRegister || next_use[reg] != kMaxLifetimePosition) {
788           reg = i;
789         }
790         // Continue the iteration in the hope of finding a caller save register.
791         continue;
792       } else {
793         reg = i;
794         // We know the register is good enough. Return it.
795         break;
796       }
797     }
798 
799     // If we had no register before, take this one as a reference.
800     if (reg == kNoRegister) {
801       reg = i;
802       continue;
803     }
804 
805     // Pick the register that is used the last.
806     if (next_use[i] > next_use[reg]) {
807       reg = i;
808       continue;
809     }
810   }
811   return reg;
812 }
813 
814 // Remove interval and its other half if any. Return iterator to the following element.
RemoveIntervalAndPotentialOtherHalf(ArenaVector<LiveInterval * > * intervals,ArenaVector<LiveInterval * >::iterator pos)815 static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
816     ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) {
817   DCHECK(intervals->begin() <= pos && pos < intervals->end());
818   LiveInterval* interval = *pos;
819   if (interval->IsLowInterval()) {
820     DCHECK(pos + 1 < intervals->end());
821     DCHECK_EQ(*(pos + 1), interval->GetHighInterval());
822     return intervals->erase(pos, pos + 2);
823   } else if (interval->IsHighInterval()) {
824     DCHECK(intervals->begin() < pos);
825     DCHECK_EQ(*(pos - 1), interval->GetLowInterval());
826     return intervals->erase(pos - 1, pos + 1);
827   } else {
828     return intervals->erase(pos);
829   }
830 }
831 
TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,size_t first_register_use,size_t * next_use)832 bool RegisterAllocatorLinearScan::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
833                                                                            size_t first_register_use,
834                                                                            size_t* next_use) {
835   for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
836     LiveInterval* active = *it;
837     DCHECK(active->HasRegister());
838     if (active->IsFixed()) continue;
839     if (active->IsHighInterval()) continue;
840     if (first_register_use > next_use[active->GetRegister()]) continue;
841 
842     // Split the first interval found that is either:
843     // 1) A non-pair interval.
844     // 2) A pair interval whose high is not low + 1.
845     // 3) A pair interval whose low is not even.
846     if (!active->IsLowInterval() ||
847         IsLowOfUnalignedPairInterval(active) ||
848         !IsLowRegister(active->GetRegister())) {
849       LiveInterval* split = Split(active, position);
850       if (split != active) {
851         handled_.push_back(active);
852       }
853       RemoveIntervalAndPotentialOtherHalf(&active_, it);
854       AddSorted(unhandled_, split);
855       return true;
856     }
857   }
858   return false;
859 }
860 
861 // Find the register that is used the last, and spill the interval
862 // that holds it. If the first use of `current` is after that register
863 // we spill `current` instead.
AllocateBlockedReg(LiveInterval * current)864 bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) {
865   size_t first_register_use = current->FirstRegisterUse();
866   if (current->HasRegister()) {
867     DCHECK(current->IsHighInterval());
868     // The low interval has allocated the register for the high interval. In
869     // case the low interval had to split both intervals, we may end up in a
870     // situation where the high interval does not have a register use anymore.
871     // We must still proceed in order to split currently active and inactive
872     // uses of the high interval's register, and put the high interval in the
873     // active set.
874     DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr));
875   } else if (first_register_use == kNoLifetime) {
876     AllocateSpillSlotFor(current);
877     return false;
878   }
879 
880   // First set all registers as not being used.
881   size_t* next_use = registers_array_;
882   for (size_t i = 0; i < number_of_registers_; ++i) {
883     next_use[i] = kMaxLifetimePosition;
884   }
885 
886   // For each active interval, find the next use of its register after the
887   // start of current.
888   for (LiveInterval* active : active_) {
889     DCHECK(active->HasRegister());
890     if (active->IsFixed()) {
891       next_use[active->GetRegister()] = current->GetStart();
892     } else {
893       size_t use = active->FirstRegisterUseAfter(current->GetStart());
894       if (use != kNoLifetime) {
895         next_use[active->GetRegister()] = use;
896       }
897     }
898   }
899 
900   // For each inactive interval, find the next use of its register after the
901   // start of current.
902   for (LiveInterval* inactive : inactive_) {
903     // Temp/Slow-path-safepoint interval has no holes.
904     DCHECK(!inactive->IsTemp());
905     if (!current->IsSplit() && !inactive->IsFixed()) {
906       // Neither current nor inactive are fixed.
907       // Thanks to SSA, a non-split interval starting in a hole of an
908       // inactive interval should never intersect with that inactive interval.
909       // Only if it's not fixed though, because fixed intervals don't come from SSA.
910       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
911       continue;
912     }
913     DCHECK(inactive->HasRegister());
914     size_t next_intersection = inactive->FirstIntersectionWith(current);
915     if (next_intersection != kNoLifetime) {
916       if (inactive->IsFixed()) {
917         next_use[inactive->GetRegister()] =
918             std::min(next_intersection, next_use[inactive->GetRegister()]);
919       } else {
920         size_t use = inactive->FirstUseAfter(current->GetStart());
921         if (use != kNoLifetime) {
922           next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
923         }
924       }
925     }
926   }
927 
928   int reg = kNoRegister;
929   bool should_spill = false;
930   if (current->HasRegister()) {
931     DCHECK(current->IsHighInterval());
932     reg = current->GetRegister();
933     // When allocating the low part, we made sure the high register was available.
934     DCHECK_LT(first_register_use, next_use[reg]);
935   } else if (current->IsLowInterval()) {
936     reg = FindAvailableRegisterPair(next_use, first_register_use);
937     // We should spill if both registers are not available.
938     should_spill = (first_register_use >= next_use[reg])
939       || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
940   } else {
941     DCHECK(!current->IsHighInterval());
942     reg = FindAvailableRegister(next_use, current);
943     should_spill = (first_register_use >= next_use[reg]);
944   }
945 
946   DCHECK_NE(reg, kNoRegister);
947   if (should_spill) {
948     DCHECK(!current->IsHighInterval());
949     bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1));
950     if (is_allocation_at_use_site) {
951       if (!current->IsLowInterval()) {
952         DumpInterval(std::cerr, current);
953         DumpAllIntervals(std::cerr);
954         // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
955         HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
956         CHECK(false) << "There is not enough registers available for "
957           << current->GetParent()->GetDefinedBy()->DebugName() << " "
958           << current->GetParent()->GetDefinedBy()->GetId()
959           << " at " << first_register_use - 1 << " "
960           << (at == nullptr ? "" : at->DebugName());
961       }
962 
963       // If we're allocating a register for `current` because the instruction at
964       // that position requires it, but we think we should spill, then there are
965       // non-pair intervals or unaligned pair intervals blocking the allocation.
966       // We split the first interval found, and put ourselves first in the
967       // `unhandled_` list.
968       bool success = TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
969                                                               first_register_use,
970                                                               next_use);
971       DCHECK(success);
972       LiveInterval* existing = unhandled_->back();
973       DCHECK(existing->IsHighInterval());
974       DCHECK_EQ(existing->GetLowInterval(), current);
975       unhandled_->push_back(current);
976     } else {
977       // If the first use of that instruction is after the last use of the found
978       // register, we split this interval just before its first register use.
979       AllocateSpillSlotFor(current);
980       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
981       DCHECK(current != split);
982       AddSorted(unhandled_, split);
983     }
984     return false;
985   } else {
986     // Use this register and spill the active and inactives interval that
987     // have that register.
988     current->SetRegister(reg);
989 
990     for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
991       LiveInterval* active = *it;
992       if (active->GetRegister() == reg) {
993         DCHECK(!active->IsFixed());
994         LiveInterval* split = Split(active, current->GetStart());
995         if (split != active) {
996           handled_.push_back(active);
997         }
998         RemoveIntervalAndPotentialOtherHalf(&active_, it);
999         AddSorted(unhandled_, split);
1000         break;
1001       }
1002     }
1003 
1004     // NOTE: Retrieve end() on each iteration because we're removing elements in the loop body.
1005     for (auto it = inactive_.begin(); it != inactive_.end(); ) {
1006       LiveInterval* inactive = *it;
1007       bool erased = false;
1008       if (inactive->GetRegister() == reg) {
1009         if (!current->IsSplit() && !inactive->IsFixed()) {
1010           // Neither current nor inactive are fixed.
1011           // Thanks to SSA, a non-split interval starting in a hole of an
1012           // inactive interval should never intersect with that inactive interval.
1013           // Only if it's not fixed though, because fixed intervals don't come from SSA.
1014           DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
1015         } else {
1016           size_t next_intersection = inactive->FirstIntersectionWith(current);
1017           if (next_intersection != kNoLifetime) {
1018             if (inactive->IsFixed()) {
1019               LiveInterval* split = Split(current, next_intersection);
1020               DCHECK_NE(split, current);
1021               AddSorted(unhandled_, split);
1022             } else {
1023               // Split at the start of `current`, which will lead to splitting
1024               // at the end of the lifetime hole of `inactive`.
1025               LiveInterval* split = Split(inactive, current->GetStart());
1026               // If it's inactive, it must start before the current interval.
1027               DCHECK_NE(split, inactive);
1028               it = RemoveIntervalAndPotentialOtherHalf(&inactive_, it);
1029               erased = true;
1030               handled_.push_back(inactive);
1031               AddSorted(unhandled_, split);
1032             }
1033           }
1034         }
1035       }
1036       // If we have erased the element, `it` already points to the next element.
1037       // Otherwise we need to move to the next element.
1038       if (!erased) {
1039         ++it;
1040       }
1041     }
1042 
1043     return true;
1044   }
1045 }
1046 
AddSorted(ArenaVector<LiveInterval * > * array,LiveInterval * interval)1047 void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
1048   DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
1049   size_t insert_at = 0;
1050   for (size_t i = array->size(); i > 0; --i) {
1051     LiveInterval* current = (*array)[i - 1u];
1052     // High intervals must be processed right after their low equivalent.
1053     if (current->StartsAfter(interval) && !current->IsHighInterval()) {
1054       insert_at = i;
1055       break;
1056     }
1057   }
1058 
1059   // Insert the high interval before the low, to ensure the low is processed before.
1060   auto insert_pos = array->begin() + insert_at;
1061   if (interval->HasHighInterval()) {
1062     array->insert(insert_pos, { interval->GetHighInterval(), interval });
1063   } else if (interval->HasLowInterval()) {
1064     array->insert(insert_pos, { interval, interval->GetLowInterval() });
1065   } else {
1066     array->insert(insert_pos, interval);
1067   }
1068 }
1069 
AllocateSpillSlotFor(LiveInterval * interval)1070 void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
1071   if (interval->IsHighInterval()) {
1072     // The low interval already took care of allocating the spill slot.
1073     DCHECK(!interval->GetLowInterval()->HasRegister());
1074     DCHECK(interval->GetLowInterval()->GetParent()->HasSpillSlot());
1075     return;
1076   }
1077 
1078   LiveInterval* parent = interval->GetParent();
1079 
1080   // An instruction gets a spill slot for its entire lifetime. If the parent
1081   // of this interval already has a spill slot, there is nothing to do.
1082   if (parent->HasSpillSlot()) {
1083     return;
1084   }
1085 
1086   HInstruction* defined_by = parent->GetDefinedBy();
1087   DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi());
1088 
1089   if (defined_by->IsParameterValue()) {
1090     // Parameters have their own stack slot.
1091     parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
1092     return;
1093   }
1094 
1095   if (defined_by->IsCurrentMethod()) {
1096     parent->SetSpillSlot(0);
1097     return;
1098   }
1099 
1100   if (defined_by->IsConstant()) {
1101     // Constants don't need a spill slot.
1102     return;
1103   }
1104 
1105   ArenaVector<size_t>* spill_slots = nullptr;
1106   switch (interval->GetType()) {
1107     case Primitive::kPrimDouble:
1108       spill_slots = &double_spill_slots_;
1109       break;
1110     case Primitive::kPrimLong:
1111       spill_slots = &long_spill_slots_;
1112       break;
1113     case Primitive::kPrimFloat:
1114       spill_slots = &float_spill_slots_;
1115       break;
1116     case Primitive::kPrimNot:
1117     case Primitive::kPrimInt:
1118     case Primitive::kPrimChar:
1119     case Primitive::kPrimByte:
1120     case Primitive::kPrimBoolean:
1121     case Primitive::kPrimShort:
1122       spill_slots = &int_spill_slots_;
1123       break;
1124     case Primitive::kPrimVoid:
1125       LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
1126   }
1127 
1128   // Find first available spill slots.
1129   size_t number_of_spill_slots_needed = parent->NumberOfSpillSlotsNeeded();
1130   size_t slot = 0;
1131   for (size_t e = spill_slots->size(); slot < e; ++slot) {
1132     bool found = true;
1133     for (size_t s = slot, u = std::min(slot + number_of_spill_slots_needed, e); s < u; s++) {
1134       if ((*spill_slots)[s] > parent->GetStart()) {
1135         found = false;  // failure
1136         break;
1137       }
1138     }
1139     if (found) {
1140       break;  // success
1141     }
1142   }
1143 
1144   // Need new spill slots?
1145   size_t upper = slot + number_of_spill_slots_needed;
1146   if (upper > spill_slots->size()) {
1147     spill_slots->resize(upper);
1148   }
1149   // Set slots to end.
1150   size_t end = interval->GetLastSibling()->GetEnd();
1151   for (size_t s = slot; s < upper; s++) {
1152     (*spill_slots)[s] = end;
1153   }
1154 
1155   // Note that the exact spill slot location will be computed when we resolve,
1156   // that is when we know the number of spill slots for each type.
1157   parent->SetSpillSlot(slot);
1158 }
1159 
AllocateSpillSlotForCatchPhi(HPhi * phi)1160 void RegisterAllocatorLinearScan::AllocateSpillSlotForCatchPhi(HPhi* phi) {
1161   LiveInterval* interval = phi->GetLiveInterval();
1162 
1163   HInstruction* previous_phi = phi->GetPrevious();
1164   DCHECK(previous_phi == nullptr ||
1165          previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
1166       << "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
1167 
1168   if (phi->IsVRegEquivalentOf(previous_phi)) {
1169     // This is an equivalent of the previous phi. We need to assign the same
1170     // catch phi slot.
1171     DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
1172     interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
1173   } else {
1174     // Allocate a new spill slot for this catch phi.
1175     // TODO: Reuse spill slots when intervals of phis from different catch
1176     //       blocks do not overlap.
1177     interval->SetSpillSlot(catch_phi_spill_slots_);
1178     catch_phi_spill_slots_ += interval->NumberOfSpillSlotsNeeded();
1179   }
1180 }
1181 
1182 }  // namespace art
1183