• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_GREEDY_ALLOCATOR_H_
6 #define V8_GREEDY_ALLOCATOR_H_
7 
8 #include "src/compiler/coalesced-live-ranges.h"
9 #include "src/compiler/register-allocator.h"
10 #include "src/zone-containers.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 
17 // The object of allocation scheduling. At minimum, this is a LiveRange, but
18 // we may extend this to groups of LiveRanges. It has to be comparable.
19 class AllocationCandidate {
20  public:
AllocationCandidate(LiveRange * range)21   explicit AllocationCandidate(LiveRange* range)
22       : is_group_(false), size_(range->GetSize()) {
23     candidate_.range_ = range;
24   }
25 
AllocationCandidate(LiveRangeGroup * ranges)26   explicit AllocationCandidate(LiveRangeGroup* ranges)
27       : is_group_(true), size_(CalculateGroupSize(ranges)) {
28     candidate_.group_ = ranges;
29   }
30 
31   // Strict ordering operators
32   bool operator<(const AllocationCandidate& other) const {
33     return size() < other.size();
34   }
35 
36   bool operator>(const AllocationCandidate& other) const {
37     return size() > other.size();
38   }
39 
is_group()40   bool is_group() const { return is_group_; }
live_range()41   LiveRange* live_range() const { return candidate_.range_; }
group()42   LiveRangeGroup* group() const { return candidate_.group_; }
43 
44  private:
CalculateGroupSize(LiveRangeGroup * group)45   unsigned CalculateGroupSize(LiveRangeGroup* group) {
46     unsigned ret = 0;
47     for (LiveRange* range : group->ranges()) {
48       ret += range->GetSize();
49     }
50     return ret;
51   }
52 
size()53   unsigned size() const { return size_; }
54   bool is_group_;
55   unsigned size_;
56   union {
57     LiveRange* range_;
58     LiveRangeGroup* group_;
59   } candidate_;
60 };
61 
62 
63 // Schedule processing (allocating) of AllocationCandidates.
64 class AllocationScheduler final : ZoneObject {
65  public:
AllocationScheduler(Zone * zone)66   explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
67   void Schedule(LiveRange* range);
68   void Schedule(LiveRangeGroup* group);
69   AllocationCandidate GetNext();
empty()70   bool empty() const { return queue_.empty(); }
71 
72  private:
73   typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
74   ScheduleQueue queue_;
75 
76   DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
77 };
78 
79 
80 // A variant of the LLVM Greedy Register Allocator. See
81 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
82 class GreedyAllocator final : public RegisterAllocator {
83  public:
84   explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
85                            Zone* local_zone);
86 
87   void AllocateRegisters();
88 
89  private:
90   static const float kAllocatedRangeMultiplier;
91 
UpdateWeightAtAllocation(LiveRange * range)92   static void UpdateWeightAtAllocation(LiveRange* range) {
93     DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
94     range->set_weight(range->weight() * kAllocatedRangeMultiplier);
95   }
96 
97 
UpdateWeightAtEviction(LiveRange * range)98   static void UpdateWeightAtEviction(LiveRange* range) {
99     DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
100     range->set_weight(range->weight() / kAllocatedRangeMultiplier);
101   }
102 
scheduler()103   AllocationScheduler& scheduler() { return scheduler_; }
current_allocations(unsigned i)104   CoalescedLiveRanges* current_allocations(unsigned i) {
105     return allocations_[i];
106   }
107 
current_allocations(unsigned i)108   CoalescedLiveRanges* current_allocations(unsigned i) const {
109     return allocations_[i];
110   }
111 
local_zone()112   Zone* local_zone() const { return local_zone_; }
groups()113   ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
groups()114   const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
115 
116   // Insert fixed ranges.
117   void PreallocateFixedRanges();
118 
119   void GroupLiveRanges();
120 
121   // Schedule unassigned live ranges for allocation.
122   void ScheduleAllocationCandidates();
123 
AllocateRegisterToRange(unsigned reg_id,LiveRange * range)124   void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
125     UpdateWeightAtAllocation(range);
126     current_allocations(reg_id)->AllocateRange(range);
127   }
128   // Evict and reschedule conflicts of a given range, at a given register.
129   void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
130 
131   void TryAllocateCandidate(const AllocationCandidate& candidate);
132   void TryAllocateLiveRange(LiveRange* range);
133   void TryAllocateGroup(LiveRangeGroup* group);
134 
135   // Calculate the weight of a candidate for allocation.
136   void EnsureValidRangeWeight(LiveRange* range);
137 
138   // Calculate the new weight of a range that is about to be allocated.
139   float GetAllocatedRangeWeight(float candidate_weight);
140 
141   // Returns kInvalidWeight if there are no conflicts, or the largest weight of
142   // a range conflicting with the given range, at the given register.
143   float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
144                                     float competing_weight) const;
145 
146   // Returns kInvalidWeight if there are no conflicts, or the largest weight of
147   // a range conflicting with the given range, at the given register.
148   float GetMaximumConflictingWeight(unsigned reg_id,
149                                     const LiveRangeGroup* group,
150                                     float group_weight) const;
151 
152   // This is the extension point for splitting heuristics.
153   void SplitOrSpillBlockedRange(LiveRange* range);
154 
155   // Find a good position where to fill, after a range was spilled after a call.
156   LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
157                                               int call_index);
158   // Split a range around all calls it passes over. Returns true if any changes
159   // were made, or false if no calls were found.
160   bool TrySplitAroundCalls(LiveRange* range);
161 
162   // Find a split position at the outmost loop.
163   LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
164 
165   // Finds the first call instruction in the path of this range. Splits before
166   // and requeues that segment (if any), spills the section over the call, and
167   // returns the section after the call. The return is:
168   // - same range, if no call was found
169   // - nullptr, if the range finished at the call and there's no "after the
170   //   call" portion.
171   // - the portion after the call.
172   LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
173 
174   // While we attempt to merge spill ranges later on in the allocation pipeline,
175   // we want to ensure group elements get merged. Waiting until later may hinder
176   // merge-ability, since the pipeline merger (being naive) may create conflicts
177   // between spill ranges of group members.
178   void TryReuseSpillRangesForGroups();
179 
180   LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
181 
182   bool IsProgressPossible(const LiveRange* range);
183 
184   // Necessary heuristic: spill when all else failed.
185   void SpillRangeAsLastResort(LiveRange* range);
186 
187   void AssignRangeToRegister(int reg_id, LiveRange* range);
188 
189   Zone* local_zone_;
190   ZoneVector<CoalescedLiveRanges*> allocations_;
191   AllocationScheduler scheduler_;
192   ZoneVector<LiveRangeGroup*> groups_;
193 
194   DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
195 };
196 }  // namespace compiler
197 }  // namespace internal
198 }  // namespace v8
199 #endif  // V8_GREEDY_ALLOCATOR_H_
200