• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
18 #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
19 
20 #include "arch/instruction_set.h"
21 #include "base/arena_containers.h"
22 #include "base/macros.h"
23 #include "primitive.h"
24 #include "register_allocator.h"
25 
26 namespace art {
27 
28 class CodeGenerator;
29 class HBasicBlock;
30 class HGraph;
31 class HInstruction;
32 class HParallelMove;
33 class HPhi;
34 class LiveInterval;
35 class Location;
36 class SsaLivenessAnalysis;
37 
38 /**
39  * An implementation of a linear scan register allocator on an `HGraph` with SSA form.
40  */
41 class RegisterAllocatorLinearScan : public RegisterAllocator {
42  public:
43   RegisterAllocatorLinearScan(ArenaAllocator* allocator,
44                               CodeGenerator* codegen,
45                               const SsaLivenessAnalysis& analysis);
~RegisterAllocatorLinearScan()46   ~RegisterAllocatorLinearScan() OVERRIDE {}
47 
48   void AllocateRegisters() OVERRIDE;
49 
Validate(bool log_fatal_on_failure)50   bool Validate(bool log_fatal_on_failure) OVERRIDE {
51     processing_core_registers_ = true;
52     if (!ValidateInternal(log_fatal_on_failure)) {
53       return false;
54     }
55     processing_core_registers_ = false;
56     return ValidateInternal(log_fatal_on_failure);
57   }
58 
GetNumberOfSpillSlots()59   size_t GetNumberOfSpillSlots() const {
60     return int_spill_slots_.size()
61         + long_spill_slots_.size()
62         + float_spill_slots_.size()
63         + double_spill_slots_.size()
64         + catch_phi_spill_slots_;
65   }
66 
67  private:
68   // Main methods of the allocator.
69   void LinearScan();
70   bool TryAllocateFreeReg(LiveInterval* interval);
71   bool AllocateBlockedReg(LiveInterval* interval);
72 
73   // Add `interval` in the given sorted list.
74   static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
75 
76   // Returns whether `reg` is blocked by the code generator.
77   bool IsBlocked(int reg) const;
78 
79   // Update the interval for the register in `location` to cover [start, end).
80   void BlockRegister(Location location, size_t start, size_t end);
81   void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
82 
83   // Allocate a spill slot for the given interval. Should be called in linear
84   // order of interval starting positions.
85   void AllocateSpillSlotFor(LiveInterval* interval);
86 
87   // Allocate a spill slot for the given catch phi. Will allocate the same slot
88   // for phis which share the same vreg. Must be called in reverse linear order
89   // of lifetime positions and ascending vreg numbers for correctness.
90   void AllocateSpillSlotForCatchPhi(HPhi* phi);
91 
92   // Helper methods.
93   void AllocateRegistersInternal();
94   void ProcessInstruction(HInstruction* instruction);
95   bool ValidateInternal(bool log_fatal_on_failure) const;
96   void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
97   void DumpAllIntervals(std::ostream& stream) const;
98   int FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const;
99   int FindAvailableRegister(size_t* next_use, LiveInterval* current) const;
100   bool IsCallerSaveRegister(int reg) const;
101 
102   // Try splitting an active non-pair or unaligned pair interval at the given `position`.
103   // Returns whether it was successful at finding such an interval.
104   bool TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
105                                                 size_t first_register_use,
106                                                 size_t* next_use);
107 
108   // List of intervals for core registers that must be processed, ordered by start
109   // position. Last entry is the interval that has the lowest start position.
110   // This list is initially populated before doing the linear scan.
111   ArenaVector<LiveInterval*> unhandled_core_intervals_;
112 
113   // List of intervals for floating-point registers. Same comments as above.
114   ArenaVector<LiveInterval*> unhandled_fp_intervals_;
115 
116   // Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
117   // or `unhandled_fp_intervals_`.
118   ArenaVector<LiveInterval*>* unhandled_;
119 
120   // List of intervals that have been processed.
121   ArenaVector<LiveInterval*> handled_;
122 
123   // List of intervals that are currently active when processing a new live interval.
124   // That is, they have a live range that spans the start of the new interval.
125   ArenaVector<LiveInterval*> active_;
126 
127   // List of intervals that are currently inactive when processing a new live interval.
128   // That is, they have a lifetime hole that spans the start of the new interval.
129   ArenaVector<LiveInterval*> inactive_;
130 
131   // Fixed intervals for physical registers. Such intervals cover the positions
132   // where an instruction requires a specific register.
133   ArenaVector<LiveInterval*> physical_core_register_intervals_;
134   ArenaVector<LiveInterval*> physical_fp_register_intervals_;
135 
136   // Intervals for temporaries. Such intervals cover the positions
137   // where an instruction requires a temporary.
138   ArenaVector<LiveInterval*> temp_intervals_;
139 
140   // The spill slots allocated for live intervals. We ensure spill slots
141   // are typed to avoid (1) doing moves and swaps between two different kinds
142   // of registers, and (2) swapping between a single stack slot and a double
143   // stack slot. This simplifies the parallel move resolver.
144   ArenaVector<size_t> int_spill_slots_;
145   ArenaVector<size_t> long_spill_slots_;
146   ArenaVector<size_t> float_spill_slots_;
147   ArenaVector<size_t> double_spill_slots_;
148 
149   // Spill slots allocated to catch phis. This category is special-cased because
150   // (1) slots are allocated prior to linear scan and in reverse linear order,
151   // (2) equivalent phis need to share slots despite having different types.
152   size_t catch_phi_spill_slots_;
153 
154   // Instructions that need a safepoint.
155   ArenaVector<HInstruction*> safepoints_;
156 
157   // True if processing core registers. False if processing floating
158   // point registers.
159   bool processing_core_registers_;
160 
161   // Number of registers for the current register kind (core or floating point).
162   size_t number_of_registers_;
163 
164   // Temporary array, allocated ahead of time for simplicity.
165   size_t* registers_array_;
166 
167   // Blocked registers, as decided by the code generator.
168   bool* const blocked_core_registers_;
169   bool* const blocked_fp_registers_;
170 
171   // Slots reserved for out arguments.
172   size_t reserved_out_slots_;
173 
174   ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
175   ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
176 
177   DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorLinearScan);
178 };
179 
180 }  // namespace art
181 
182 #endif  // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
183