• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/greedy-allocator.h"
6 #include "src/compiler/register-allocator.h"
7 
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11 
12 
13 #define TRACE(...)                             \
14   do {                                         \
15     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
16   } while (false)
17 
18 
19 const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
20 
21 
22 namespace {
23 
UpdateOperands(LiveRange * range,RegisterAllocationData * data)24 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
25   int reg_id = range->assigned_register();
26   range->SetUseHints(reg_id);
27   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
28     data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
29   }
30 }
31 
32 
UnsetOperands(LiveRange * range,RegisterAllocationData * data)33 void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
34   range->UnsetUseHints();
35   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
36     data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
37   }
38 }
39 
40 
Split(LiveRange * range,RegisterAllocationData * data,LifetimePosition pos)41 LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
42                  LifetimePosition pos) {
43   DCHECK(range->Start() < pos && pos < range->End());
44   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
45          (data->code()
46               ->GetInstructionBlock(pos.ToInstructionIndex())
47               ->last_instruction_index() != pos.ToInstructionIndex()));
48   LiveRange* result = range->SplitAt(pos, data->allocation_zone());
49   return result;
50 }
51 
52 
53 }  // namespace
54 
55 
GetNext()56 AllocationCandidate AllocationScheduler::GetNext() {
57   DCHECK(!queue_.empty());
58   AllocationCandidate ret = queue_.top();
59   queue_.pop();
60   return ret;
61 }
62 
63 
Schedule(LiveRange * range)64 void AllocationScheduler::Schedule(LiveRange* range) {
65   TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
66         range->relative_id());
67   queue_.push(AllocationCandidate(range));
68 }
69 
70 
Schedule(LiveRangeGroup * group)71 void AllocationScheduler::Schedule(LiveRangeGroup* group) {
72   queue_.push(AllocationCandidate(group));
73 }
74 
GreedyAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)75 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
76                                  RegisterKind kind, Zone* local_zone)
77     : RegisterAllocator(data, kind),
78       local_zone_(local_zone),
79       allocations_(local_zone),
80       scheduler_(local_zone),
81       groups_(local_zone) {}
82 
83 
AssignRangeToRegister(int reg_id,LiveRange * range)84 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
85   TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
86         range->TopLevel()->vreg(), range->relative_id());
87 
88   DCHECK(!range->HasRegisterAssigned());
89 
90   AllocateRegisterToRange(reg_id, range);
91 
92   TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
93         range->TopLevel()->vreg(), range->relative_id());
94   range->set_assigned_register(reg_id);
95   UpdateOperands(range, data());
96 }
97 
98 
PreallocateFixedRanges()99 void GreedyAllocator::PreallocateFixedRanges() {
100   allocations_.resize(num_registers());
101   for (int i = 0; i < num_registers(); i++) {
102     allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
103   }
104 
105   for (LiveRange* fixed_range : GetFixedRegisters()) {
106     if (fixed_range != nullptr) {
107       DCHECK_EQ(mode(), fixed_range->kind());
108       DCHECK(fixed_range->TopLevel()->IsFixed());
109 
110       int reg_nr = fixed_range->assigned_register();
111       EnsureValidRangeWeight(fixed_range);
112       AllocateRegisterToRange(reg_nr, fixed_range);
113     }
114   }
115 }
116 
117 
GroupLiveRanges()118 void GreedyAllocator::GroupLiveRanges() {
119   CoalescedLiveRanges grouper(local_zone());
120   for (TopLevelLiveRange* range : data()->live_ranges()) {
121     grouper.clear();
122     // Skip splinters, because we do not want to optimize for them, and moves
123     // due to assigning them to different registers occur in deferred blocks.
124     if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
125       continue;
126     }
127 
128     // A phi can't be a memory operand, so it couldn't have been split.
129     DCHECK(!range->spilled());
130 
131     // Maybe this phi range is itself an input to another phi which was already
132     // processed.
133     LiveRangeGroup* latest_grp = range->group() != nullptr
134                                      ? range->group()
135                                      : new (local_zone())
136                                            LiveRangeGroup(local_zone());
137 
138     // Populate the grouper.
139     if (range->group() == nullptr) {
140       grouper.AllocateRange(range);
141     } else {
142       for (LiveRange* member : range->group()->ranges()) {
143         grouper.AllocateRange(member);
144       }
145     }
146     for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
147       // skip output also in input, which may happen for loops.
148       if (j == range->vreg()) continue;
149 
150       TopLevelLiveRange* other_top = data()->live_ranges()[j];
151 
152       if (other_top->IsSplinter()) continue;
153       // If the other was a memory operand, it might have been split.
154       // So get the unsplit part.
155       LiveRange* other =
156           other_top->next() == nullptr ? other_top : other_top->next();
157 
158       if (other->spilled()) continue;
159 
160       LiveRangeGroup* other_group = other->group();
161       if (other_group != nullptr) {
162         bool can_merge = true;
163         for (LiveRange* member : other_group->ranges()) {
164           if (grouper.GetConflicts(member).Current() != nullptr) {
165             can_merge = false;
166             break;
167           }
168         }
169         // If each member doesn't conflict with the current group, then since
170         // the members don't conflict with eachother either, we can merge them.
171         if (can_merge) {
172           latest_grp->ranges().insert(latest_grp->ranges().end(),
173                                       other_group->ranges().begin(),
174                                       other_group->ranges().end());
175           for (LiveRange* member : other_group->ranges()) {
176             grouper.AllocateRange(member);
177             member->set_group(latest_grp);
178           }
179           // Clear the other range, so we avoid scheduling it.
180           other_group->ranges().clear();
181         }
182       } else if (grouper.GetConflicts(other).Current() == nullptr) {
183         grouper.AllocateRange(other);
184         latest_grp->ranges().push_back(other);
185         other->set_group(latest_grp);
186       }
187     }
188 
189     if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
190       latest_grp->ranges().push_back(range);
191       DCHECK(latest_grp->ranges().size() > 1);
192       groups().push_back(latest_grp);
193       range->set_group(latest_grp);
194     }
195   }
196 }
197 
198 
ScheduleAllocationCandidates()199 void GreedyAllocator::ScheduleAllocationCandidates() {
200   for (LiveRangeGroup* group : groups()) {
201     if (group->ranges().size() > 0) {
202       // We shouldn't have added single-range groups.
203       DCHECK(group->ranges().size() != 1);
204       scheduler().Schedule(group);
205     }
206   }
207   for (LiveRange* range : data()->live_ranges()) {
208     if (CanProcessRange(range)) {
209       for (LiveRange* child = range; child != nullptr; child = child->next()) {
210         if (!child->spilled() && child->group() == nullptr) {
211           scheduler().Schedule(child);
212         }
213       }
214     }
215   }
216 }
217 
218 
TryAllocateCandidate(const AllocationCandidate & candidate)219 void GreedyAllocator::TryAllocateCandidate(
220     const AllocationCandidate& candidate) {
221   if (candidate.is_group()) {
222     TryAllocateGroup(candidate.group());
223   } else {
224     TryAllocateLiveRange(candidate.live_range());
225   }
226 }
227 
228 
TryAllocateGroup(LiveRangeGroup * group)229 void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
230   float group_weight = 0.0;
231   for (LiveRange* member : group->ranges()) {
232     EnsureValidRangeWeight(member);
233     group_weight = Max(group_weight, member->weight());
234   }
235 
236   float eviction_weight = group_weight;
237   int eviction_reg = -1;
238   int free_reg = -1;
239   for (int i = 0; i < num_allocatable_registers(); ++i) {
240     int reg = allocatable_register_code(i);
241     float weight = GetMaximumConflictingWeight(reg, group, group_weight);
242     if (weight == LiveRange::kInvalidWeight) {
243       free_reg = reg;
244       break;
245     }
246     if (weight < eviction_weight) {
247       eviction_weight = weight;
248       eviction_reg = reg;
249     }
250   }
251   if (eviction_reg < 0 && free_reg < 0) {
252     for (LiveRange* member : group->ranges()) {
253       scheduler().Schedule(member);
254     }
255     return;
256   }
257   if (free_reg < 0) {
258     DCHECK(eviction_reg >= 0);
259     for (LiveRange* member : group->ranges()) {
260       EvictAndRescheduleConflicts(eviction_reg, member);
261     }
262     free_reg = eviction_reg;
263   }
264 
265   DCHECK(free_reg >= 0);
266   for (LiveRange* member : group->ranges()) {
267     AssignRangeToRegister(free_reg, member);
268   }
269 }
270 
271 
TryAllocateLiveRange(LiveRange * range)272 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
273   // TODO(mtrofin): once we introduce groups, we'll want to first try and
274   // allocate at the preferred register.
275   TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
276         range->relative_id());
277   int free_reg = -1;
278   int evictable_reg = -1;
279   int hinted_reg = -1;
280 
281   EnsureValidRangeWeight(range);
282   float competing_weight = range->weight();
283   DCHECK(competing_weight != LiveRange::kInvalidWeight);
284 
285   // Can we allocate at the hinted register?
286   if (range->FirstHintPosition(&hinted_reg) != nullptr) {
287     DCHECK(hinted_reg >= 0);
288     float max_conflict_weight =
289         GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
290     if (max_conflict_weight == LiveRange::kInvalidWeight) {
291       free_reg = hinted_reg;
292     } else if (max_conflict_weight < range->weight()) {
293       evictable_reg = hinted_reg;
294     }
295   }
296 
297   if (free_reg < 0 && evictable_reg < 0) {
298     // There was no hinted reg, or we cannot allocate there.
299     float smallest_weight = LiveRange::kMaxWeight;
300 
301     // Seek either the first free register, or, from the set of registers
302     // where the maximum conflict is lower than the candidate's weight, the one
303     // with the smallest such weight.
304     for (int i = 0; i < num_allocatable_registers(); i++) {
305       int reg = allocatable_register_code(i);
306       // Skip unnecessarily re-visiting the hinted register, if any.
307       if (reg == hinted_reg) continue;
308       float max_conflict_weight =
309           GetMaximumConflictingWeight(reg, range, competing_weight);
310       if (max_conflict_weight == LiveRange::kInvalidWeight) {
311         free_reg = reg;
312         break;
313       }
314       if (max_conflict_weight < range->weight() &&
315           max_conflict_weight < smallest_weight) {
316         smallest_weight = max_conflict_weight;
317         evictable_reg = reg;
318       }
319     }
320   }
321 
322   // We have a free register, so we use it.
323   if (free_reg >= 0) {
324     TRACE("Found free register %s for live range %d:%d.\n",
325           RegisterName(free_reg), range->TopLevel()->vreg(),
326           range->relative_id());
327     AssignRangeToRegister(free_reg, range);
328     return;
329   }
330 
331   // We found a register to perform evictions, so we evict and allocate our
332   // candidate.
333   if (evictable_reg >= 0) {
334     TRACE("Found evictable register %s for live range %d:%d.\n",
335           RegisterName(free_reg), range->TopLevel()->vreg(),
336           range->relative_id());
337     EvictAndRescheduleConflicts(evictable_reg, range);
338     AssignRangeToRegister(evictable_reg, range);
339     return;
340   }
341 
342   // The range needs to be split or spilled.
343   SplitOrSpillBlockedRange(range);
344 }
345 
346 
EvictAndRescheduleConflicts(unsigned reg_id,const LiveRange * range)347 void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
348                                                   const LiveRange* range) {
349   auto conflicts = current_allocations(reg_id)->GetConflicts(range);
350   for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
351        conflict = conflicts.RemoveCurrentAndGetNext()) {
352     DCHECK(conflict->HasRegisterAssigned());
353     CHECK(!conflict->TopLevel()->IsFixed());
354     conflict->UnsetAssignedRegister();
355     UnsetOperands(conflict, data());
356     UpdateWeightAtEviction(conflict);
357     scheduler().Schedule(conflict);
358     TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
359           conflict->relative_id());
360   }
361 }
362 
363 
AllocateRegisters()364 void GreedyAllocator::AllocateRegisters() {
365   CHECK(scheduler().empty());
366   CHECK(allocations_.empty());
367 
368   TRACE("Begin allocating function %s with the Greedy Allocator\n",
369         data()->debug_name());
370 
371   SplitAndSpillRangesDefinedByMemoryOperand(true);
372   GroupLiveRanges();
373   ScheduleAllocationCandidates();
374   PreallocateFixedRanges();
375   while (!scheduler().empty()) {
376     AllocationCandidate candidate = scheduler().GetNext();
377     TryAllocateCandidate(candidate);
378   }
379 
380   for (size_t i = 0; i < allocations_.size(); ++i) {
381     if (!allocations_[i]->empty()) {
382       data()->MarkAllocated(mode(), static_cast<int>(i));
383     }
384   }
385   allocations_.clear();
386 
387   TryReuseSpillRangesForGroups();
388 
389   TRACE("End allocating function %s with the Greedy Allocator\n",
390         data()->debug_name());
391 }
392 
393 
TryReuseSpillRangesForGroups()394 void GreedyAllocator::TryReuseSpillRangesForGroups() {
395   for (TopLevelLiveRange* top : data()->live_ranges()) {
396     if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
397       continue;
398     }
399 
400     SpillRange* spill_range = nullptr;
401     for (LiveRange* member : top->group()->ranges()) {
402       if (!member->TopLevel()->HasSpillRange()) continue;
403       SpillRange* member_range = member->TopLevel()->GetSpillRange();
404       if (spill_range == nullptr) {
405         spill_range = member_range;
406       } else {
407         // This may not always succeed, because we group non-conflicting ranges
408         // that may have been splintered, and the splinters may cause conflicts
409         // in the spill ranges.
410         // TODO(mtrofin): should the splinters own their own spill ranges?
411         spill_range->TryMerge(member_range);
412       }
413     }
414   }
415 }
416 
417 
GetMaximumConflictingWeight(unsigned reg_id,const LiveRange * range,float competing_weight) const418 float GreedyAllocator::GetMaximumConflictingWeight(
419     unsigned reg_id, const LiveRange* range, float competing_weight) const {
420   float ret = LiveRange::kInvalidWeight;
421 
422   auto conflicts = current_allocations(reg_id)->GetConflicts(range);
423   for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
424        conflict = conflicts.GetNext()) {
425     DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
426     if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
427     ret = Max(ret, conflict->weight());
428     DCHECK(ret < LiveRange::kMaxWeight);
429   }
430 
431   return ret;
432 }
433 
434 
GetMaximumConflictingWeight(unsigned reg_id,const LiveRangeGroup * group,float group_weight) const435 float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
436                                                    const LiveRangeGroup* group,
437                                                    float group_weight) const {
438   float ret = LiveRange::kInvalidWeight;
439 
440   for (LiveRange* member : group->ranges()) {
441     float member_conflict_weight =
442         GetMaximumConflictingWeight(reg_id, member, group_weight);
443     if (member_conflict_weight == LiveRange::kMaxWeight) {
444       return LiveRange::kMaxWeight;
445     }
446     if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
447     ret = Max(member_conflict_weight, ret);
448   }
449 
450   return ret;
451 }
452 
453 
EnsureValidRangeWeight(LiveRange * range)454 void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
455   // The live range weight will be invalidated when ranges are created or split.
456   // Otherwise, it is consistently updated when the range is allocated or
457   // unallocated.
458   if (range->weight() != LiveRange::kInvalidWeight) return;
459 
460   if (range->TopLevel()->IsFixed()) {
461     range->set_weight(LiveRange::kMaxWeight);
462     return;
463   }
464   if (!IsProgressPossible(range)) {
465     range->set_weight(LiveRange::kMaxWeight);
466     return;
467   }
468 
469   float use_count = 0.0;
470   for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
471     ++use_count;
472   }
473   range->set_weight(use_count / static_cast<float>(range->GetSize()));
474 }
475 
476 
SpillRangeAsLastResort(LiveRange * range)477 void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
478   LifetimePosition start = range->Start();
479   CHECK(range->CanBeSpilled(start));
480 
481   DCHECK(range->NextRegisterPosition(start) == nullptr);
482   Spill(range);
483 }
484 
485 
GetRemainderAfterSplittingAroundFirstCall(LiveRange * range)486 LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
487     LiveRange* range) {
488   LiveRange* ret = range;
489   for (UseInterval* interval = range->first_interval(); interval != nullptr;
490        interval = interval->next()) {
491     LifetimePosition start = interval->start();
492     LifetimePosition end = interval->end();
493     // If the interval starts at instruction end, then the first instruction
494     // in the interval is the next one.
495     int first_full_instruction = (start.IsGapPosition() || start.IsStart())
496                                      ? start.ToInstructionIndex()
497                                      : start.ToInstructionIndex() + 1;
498     // If the interval ends in a gap or at instruction start, then the last
499     // instruction is the previous one.
500     int last_full_instruction = (end.IsGapPosition() || end.IsStart())
501                                     ? end.ToInstructionIndex() - 1
502                                     : end.ToInstructionIndex();
503 
504     for (int instruction_index = first_full_instruction;
505          instruction_index <= last_full_instruction; ++instruction_index) {
506       if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
507 
508       LifetimePosition before =
509           GetSplitPositionForInstruction(range, instruction_index);
510       LiveRange* second_part =
511           before.IsValid() ? Split(range, data(), before) : range;
512 
513       if (range != second_part) scheduler().Schedule(range);
514 
515       LifetimePosition after =
516           FindSplitPositionAfterCall(second_part, instruction_index);
517 
518       if (after.IsValid()) {
519         ret = Split(second_part, data(), after);
520       } else {
521         ret = nullptr;
522       }
523       Spill(second_part);
524       return ret;
525     }
526   }
527   return ret;
528 }
529 
530 
TrySplitAroundCalls(LiveRange * range)531 bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
532   bool modified = false;
533 
534   while (range != nullptr) {
535     LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
536     // If we performed no modification, we're done.
537     if (remainder == range) {
538       break;
539     }
540     // We performed a modification.
541     modified = true;
542     range = remainder;
543   }
544   // If we have a remainder and we made modifications, it means the remainder
545   // has no calls and we should schedule it for further processing. If we made
546   // no modifications, we will just return false, because we want the algorithm
547   // to make progress by trying some other heuristic.
548   if (modified && range != nullptr) {
549     DCHECK(!range->spilled());
550     DCHECK(!range->HasRegisterAssigned());
551     scheduler().Schedule(range);
552   }
553   return modified;
554 }
555 
556 
FindSplitPositionAfterCall(const LiveRange * range,int call_index)557 LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
558     const LiveRange* range, int call_index) {
559   LifetimePosition after_call =
560       Max(range->Start(),
561           LifetimePosition::GapFromInstructionIndex(call_index + 1));
562   UsePosition* next_use = range->NextRegisterPosition(after_call);
563   if (!next_use) return LifetimePosition::Invalid();
564 
565   LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
566   split_pos =
567       GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
568   return split_pos;
569 }
570 
571 
FindSplitPositionBeforeLoops(LiveRange * range)572 LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
573     LiveRange* range) {
574   LifetimePosition end = range->End();
575   if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
576     end =
577         LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
578   }
579   LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
580   pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
581   return pos;
582 }
583 
584 
SplitOrSpillBlockedRange(LiveRange * range)585 void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
586   if (TrySplitAroundCalls(range)) return;
587 
588   LifetimePosition pos = FindSplitPositionBeforeLoops(range);
589 
590   if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
591   if (pos.IsValid()) {
592     LiveRange* tail = Split(range, data(), pos);
593     DCHECK(tail != range);
594     scheduler().Schedule(tail);
595     scheduler().Schedule(range);
596     return;
597   }
598   SpillRangeAsLastResort(range);
599 }
600 
601 
602 // Basic heuristic for advancing the algorithm, if any other splitting heuristic
603 // failed.
GetLastResortSplitPosition(const LiveRange * range)604 LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
605     const LiveRange* range) {
606   LifetimePosition previous = range->Start();
607   for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
608        previous = previous.NextFullStart(),
609                    pos = range->NextRegisterPosition(previous)) {
610     LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
611     LifetimePosition before =
612         GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
613     if (before.IsValid()) return before;
614     LifetimePosition after = GetSplitPositionForInstruction(
615         range, pos->pos().ToInstructionIndex() + 1);
616     if (after.IsValid()) return after;
617   }
618   return LifetimePosition::Invalid();
619 }
620 
621 
IsProgressPossible(const LiveRange * range)622 bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
623   return range->CanBeSpilled(range->Start()) ||
624          GetLastResortSplitPosition(range).IsValid();
625 }
626 
627 }  // namespace compiler
628 }  // namespace internal
629 }  // namespace v8
630