1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the RAGreedy function pass for register allocation in
11 // optimized builds.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "RegAllocBase.h"
20 #include "Spiller.h"
21 #include "SpillPlacement.h"
22 #include "SplitKit.h"
23 #include "VirtRegMap.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveRangeEdit.h"
32 #include "llvm/CodeGen/LiveStackAnalysis.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/Target/TargetOptions.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
45
46 #include <queue>
47
48 using namespace llvm;
49
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumEvicted, "Number of interferences evicted");
53
54 static cl::opt<SplitEditor::ComplementSpillMode>
55 SplitSpillMode("split-spill-mode", cl::Hidden,
56 cl::desc("Spill mode for splitting live ranges"),
57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
60 clEnumValEnd),
61 cl::init(SplitEditor::SM_Partition));
62
63 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
64 createGreedyRegisterAllocator);
65
66 namespace {
67 class RAGreedy : public MachineFunctionPass,
68 public RegAllocBase,
69 private LiveRangeEdit::Delegate {
70
71 // context
72 MachineFunction *MF;
73
74 // analyses
75 SlotIndexes *Indexes;
76 LiveStacks *LS;
77 MachineDominatorTree *DomTree;
78 MachineLoopInfo *Loops;
79 EdgeBundles *Bundles;
80 SpillPlacement *SpillPlacer;
81 LiveDebugVariables *DebugVars;
82
83 // state
84 std::auto_ptr<Spiller> SpillerInstance;
85 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
86 unsigned NextCascade;
87
88 // Live ranges pass through a number of stages as we try to allocate them.
89 // Some of the stages may also create new live ranges:
90 //
91 // - Region splitting.
92 // - Per-block splitting.
93 // - Local splitting.
94 // - Spilling.
95 //
96 // Ranges produced by one of the stages skip the previous stages when they are
97 // dequeued. This improves performance because we can skip interference checks
98 // that are unlikely to give any results. It also guarantees that the live
99 // range splitting algorithm terminates, something that is otherwise hard to
100 // ensure.
101 enum LiveRangeStage {
102 /// Newly created live range that has never been queued.
103 RS_New,
104
105 /// Only attempt assignment and eviction. Then requeue as RS_Split.
106 RS_Assign,
107
108 /// Attempt live range splitting if assignment is impossible.
109 RS_Split,
110
111 /// Attempt more aggressive live range splitting that is guaranteed to make
112 /// progress. This is used for split products that may not be making
113 /// progress.
114 RS_Split2,
115
116 /// Live range will be spilled. No more splitting will be attempted.
117 RS_Spill,
118
119 /// There is nothing more we can do to this live range. Abort compilation
120 /// if it can't be assigned.
121 RS_Done
122 };
123
124 static const char *const StageName[];
125
126 // RegInfo - Keep additional information about each live range.
127 struct RegInfo {
128 LiveRangeStage Stage;
129
130 // Cascade - Eviction loop prevention. See canEvictInterference().
131 unsigned Cascade;
132
RegInfo__anon4dd7a4c30111::RAGreedy::RegInfo133 RegInfo() : Stage(RS_New), Cascade(0) {}
134 };
135
136 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
137
getStage(const LiveInterval & VirtReg) const138 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
139 return ExtraRegInfo[VirtReg.reg].Stage;
140 }
141
setStage(const LiveInterval & VirtReg,LiveRangeStage Stage)142 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
143 ExtraRegInfo.resize(MRI->getNumVirtRegs());
144 ExtraRegInfo[VirtReg.reg].Stage = Stage;
145 }
146
147 template<typename Iterator>
setStage(Iterator Begin,Iterator End,LiveRangeStage NewStage)148 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
149 ExtraRegInfo.resize(MRI->getNumVirtRegs());
150 for (;Begin != End; ++Begin) {
151 unsigned Reg = (*Begin)->reg;
152 if (ExtraRegInfo[Reg].Stage == RS_New)
153 ExtraRegInfo[Reg].Stage = NewStage;
154 }
155 }
156
157 /// Cost of evicting interference.
158 struct EvictionCost {
159 unsigned BrokenHints; ///< Total number of broken hints.
160 float MaxWeight; ///< Maximum spill weight evicted.
161
EvictionCost__anon4dd7a4c30111::RAGreedy::EvictionCost162 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
163
operator <__anon4dd7a4c30111::RAGreedy::EvictionCost164 bool operator<(const EvictionCost &O) const {
165 if (BrokenHints != O.BrokenHints)
166 return BrokenHints < O.BrokenHints;
167 return MaxWeight < O.MaxWeight;
168 }
169 };
170
171 // Register mask interference. The current VirtReg is checked for register
172 // mask interference on entry to selectOrSplit(). If there is no
173 // interference, UsableRegs is left empty. If there is interference,
174 // UsableRegs has a bit mask of registers that can be used without register
175 // mask interference.
176 BitVector UsableRegs;
177
178 /// clobberedByRegMask - Returns true if PhysReg is not directly usable
179 /// because of register mask clobbers.
clobberedByRegMask(unsigned PhysReg) const180 bool clobberedByRegMask(unsigned PhysReg) const {
181 return !UsableRegs.empty() && !UsableRegs.test(PhysReg);
182 }
183
184 // splitting state.
185 std::auto_ptr<SplitAnalysis> SA;
186 std::auto_ptr<SplitEditor> SE;
187
188 /// Cached per-block interference maps
189 InterferenceCache IntfCache;
190
191 /// All basic blocks where the current register has uses.
192 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
193
194 /// Global live range splitting candidate info.
195 struct GlobalSplitCandidate {
196 // Register intended for assignment, or 0.
197 unsigned PhysReg;
198
199 // SplitKit interval index for this candidate.
200 unsigned IntvIdx;
201
202 // Interference for PhysReg.
203 InterferenceCache::Cursor Intf;
204
205 // Bundles where this candidate should be live.
206 BitVector LiveBundles;
207 SmallVector<unsigned, 8> ActiveBlocks;
208
reset__anon4dd7a4c30111::RAGreedy::GlobalSplitCandidate209 void reset(InterferenceCache &Cache, unsigned Reg) {
210 PhysReg = Reg;
211 IntvIdx = 0;
212 Intf.setPhysReg(Cache, Reg);
213 LiveBundles.clear();
214 ActiveBlocks.clear();
215 }
216
217 // Set B[i] = C for every live bundle where B[i] was NoCand.
getBundles__anon4dd7a4c30111::RAGreedy::GlobalSplitCandidate218 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
219 unsigned Count = 0;
220 for (int i = LiveBundles.find_first(); i >= 0;
221 i = LiveBundles.find_next(i))
222 if (B[i] == NoCand) {
223 B[i] = C;
224 Count++;
225 }
226 return Count;
227 }
228 };
229
230 /// Candidate info for for each PhysReg in AllocationOrder.
231 /// This vector never shrinks, but grows to the size of the largest register
232 /// class.
233 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
234
235 enum { NoCand = ~0u };
236
237 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
238 /// NoCand which indicates the stack interval.
239 SmallVector<unsigned, 32> BundleCand;
240
241 public:
242 RAGreedy();
243
244 /// Return the pass name.
getPassName() const245 virtual const char* getPassName() const {
246 return "Greedy Register Allocator";
247 }
248
249 /// RAGreedy analysis usage.
250 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
251 virtual void releaseMemory();
spiller()252 virtual Spiller &spiller() { return *SpillerInstance; }
253 virtual void enqueue(LiveInterval *LI);
254 virtual LiveInterval *dequeue();
255 virtual unsigned selectOrSplit(LiveInterval&,
256 SmallVectorImpl<LiveInterval*>&);
257
258 /// Perform register allocation.
259 virtual bool runOnMachineFunction(MachineFunction &mf);
260
261 static char ID;
262
263 private:
264 bool LRE_CanEraseVirtReg(unsigned);
265 void LRE_WillShrinkVirtReg(unsigned);
266 void LRE_DidCloneVirtReg(unsigned, unsigned);
267
268 float calcSpillCost();
269 bool addSplitConstraints(InterferenceCache::Cursor, float&);
270 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
271 void growRegion(GlobalSplitCandidate &Cand);
272 float calcGlobalSplitCost(GlobalSplitCandidate&);
273 bool calcCompactRegion(GlobalSplitCandidate&);
274 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
275 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
276 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
277 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
278 void evictInterference(LiveInterval&, unsigned,
279 SmallVectorImpl<LiveInterval*>&);
280
281 unsigned tryAssign(LiveInterval&, AllocationOrder&,
282 SmallVectorImpl<LiveInterval*>&);
283 unsigned tryEvict(LiveInterval&, AllocationOrder&,
284 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
285 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
286 SmallVectorImpl<LiveInterval*>&);
287 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
288 SmallVectorImpl<LiveInterval*>&);
289 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
290 SmallVectorImpl<LiveInterval*>&);
291 unsigned trySplit(LiveInterval&, AllocationOrder&,
292 SmallVectorImpl<LiveInterval*>&);
293 };
294 } // end anonymous namespace
295
296 char RAGreedy::ID = 0;
297
298 #ifndef NDEBUG
299 const char *const RAGreedy::StageName[] = {
300 "RS_New",
301 "RS_Assign",
302 "RS_Split",
303 "RS_Split2",
304 "RS_Spill",
305 "RS_Done"
306 };
307 #endif
308
309 // Hysteresis to use when comparing floats.
310 // This helps stabilize decisions based on float comparisons.
311 const float Hysteresis = 0.98f;
312
313
createGreedyRegisterAllocator()314 FunctionPass* llvm::createGreedyRegisterAllocator() {
315 return new RAGreedy();
316 }
317
RAGreedy()318 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
319 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
320 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
321 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
322 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
323 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
324 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
325 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
326 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
327 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
328 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
329 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
330 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
331 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
332 }
333
getAnalysisUsage(AnalysisUsage & AU) const334 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
335 AU.setPreservesCFG();
336 AU.addRequired<AliasAnalysis>();
337 AU.addPreserved<AliasAnalysis>();
338 AU.addRequired<LiveIntervals>();
339 AU.addRequired<SlotIndexes>();
340 AU.addPreserved<SlotIndexes>();
341 AU.addRequired<LiveDebugVariables>();
342 AU.addPreserved<LiveDebugVariables>();
343 AU.addRequired<CalculateSpillWeights>();
344 AU.addRequired<LiveStacks>();
345 AU.addPreserved<LiveStacks>();
346 AU.addRequired<MachineDominatorTree>();
347 AU.addPreserved<MachineDominatorTree>();
348 AU.addRequired<MachineLoopInfo>();
349 AU.addPreserved<MachineLoopInfo>();
350 AU.addRequired<VirtRegMap>();
351 AU.addPreserved<VirtRegMap>();
352 AU.addRequired<EdgeBundles>();
353 AU.addRequired<SpillPlacement>();
354 MachineFunctionPass::getAnalysisUsage(AU);
355 }
356
357
358 //===----------------------------------------------------------------------===//
359 // LiveRangeEdit delegate methods
360 //===----------------------------------------------------------------------===//
361
LRE_CanEraseVirtReg(unsigned VirtReg)362 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
363 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
364 unassign(LIS->getInterval(VirtReg), PhysReg);
365 return true;
366 }
367 // Unassigned virtreg is probably in the priority queue.
368 // RegAllocBase will erase it after dequeueing.
369 return false;
370 }
371
LRE_WillShrinkVirtReg(unsigned VirtReg)372 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
373 unsigned PhysReg = VRM->getPhys(VirtReg);
374 if (!PhysReg)
375 return;
376
377 // Register is assigned, put it back on the queue for reassignment.
378 LiveInterval &LI = LIS->getInterval(VirtReg);
379 unassign(LI, PhysReg);
380 enqueue(&LI);
381 }
382
LRE_DidCloneVirtReg(unsigned New,unsigned Old)383 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
384 // Cloning a register we haven't even heard about yet? Just ignore it.
385 if (!ExtraRegInfo.inBounds(Old))
386 return;
387
388 // LRE may clone a virtual register because dead code elimination causes it to
389 // be split into connected components. The new components are much smaller
390 // than the original, so they should get a new chance at being assigned.
391 // same stage as the parent.
392 ExtraRegInfo[Old].Stage = RS_Assign;
393 ExtraRegInfo.grow(New);
394 ExtraRegInfo[New] = ExtraRegInfo[Old];
395 }
396
releaseMemory()397 void RAGreedy::releaseMemory() {
398 SpillerInstance.reset(0);
399 ExtraRegInfo.clear();
400 GlobalCand.clear();
401 RegAllocBase::releaseMemory();
402 }
403
enqueue(LiveInterval * LI)404 void RAGreedy::enqueue(LiveInterval *LI) {
405 // Prioritize live ranges by size, assigning larger ranges first.
406 // The queue holds (size, reg) pairs.
407 const unsigned Size = LI->getSize();
408 const unsigned Reg = LI->reg;
409 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
410 "Can only enqueue virtual registers");
411 unsigned Prio;
412
413 ExtraRegInfo.grow(Reg);
414 if (ExtraRegInfo[Reg].Stage == RS_New)
415 ExtraRegInfo[Reg].Stage = RS_Assign;
416
417 if (ExtraRegInfo[Reg].Stage == RS_Split) {
418 // Unsplit ranges that couldn't be allocated immediately are deferred until
419 // everything else has been allocated.
420 Prio = Size;
421 } else {
422 // Everything is allocated in long->short order. Long ranges that don't fit
423 // should be spilled (or split) ASAP so they don't create interference.
424 Prio = (1u << 31) + Size;
425
426 // Boost ranges that have a physical register hint.
427 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
428 Prio |= (1u << 30);
429 }
430
431 Queue.push(std::make_pair(Prio, ~Reg));
432 }
433
dequeue()434 LiveInterval *RAGreedy::dequeue() {
435 if (Queue.empty())
436 return 0;
437 LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
438 Queue.pop();
439 return LI;
440 }
441
442
443 //===----------------------------------------------------------------------===//
444 // Direct Assignment
445 //===----------------------------------------------------------------------===//
446
447 /// tryAssign - Try to assign VirtReg to an available register.
tryAssign(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs)448 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
449 AllocationOrder &Order,
450 SmallVectorImpl<LiveInterval*> &NewVRegs) {
451 Order.rewind();
452 unsigned PhysReg;
453 while ((PhysReg = Order.next())) {
454 if (clobberedByRegMask(PhysReg))
455 continue;
456 if (!checkPhysRegInterference(VirtReg, PhysReg))
457 break;
458 }
459 if (!PhysReg || Order.isHint(PhysReg))
460 return PhysReg;
461
462 // PhysReg is available, but there may be a better choice.
463
464 // If we missed a simple hint, try to cheaply evict interference from the
465 // preferred register.
466 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
467 if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) {
468 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
469 EvictionCost MaxCost(1);
470 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
471 evictInterference(VirtReg, Hint, NewVRegs);
472 return Hint;
473 }
474 }
475
476 // Try to evict interference from a cheaper alternative.
477 unsigned Cost = TRI->getCostPerUse(PhysReg);
478
479 // Most registers have 0 additional cost.
480 if (!Cost)
481 return PhysReg;
482
483 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
484 << '\n');
485 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
486 return CheapReg ? CheapReg : PhysReg;
487 }
488
489
490 //===----------------------------------------------------------------------===//
491 // Interference eviction
492 //===----------------------------------------------------------------------===//
493
494 /// shouldEvict - determine if A should evict the assigned live range B. The
495 /// eviction policy defined by this function together with the allocation order
496 /// defined by enqueue() decides which registers ultimately end up being split
497 /// and spilled.
498 ///
499 /// Cascade numbers are used to prevent infinite loops if this function is a
500 /// cyclic relation.
501 ///
502 /// @param A The live range to be assigned.
503 /// @param IsHint True when A is about to be assigned to its preferred
504 /// register.
505 /// @param B The live range to be evicted.
506 /// @param BreaksHint True when B is already assigned to its preferred register.
shouldEvict(LiveInterval & A,bool IsHint,LiveInterval & B,bool BreaksHint)507 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
508 LiveInterval &B, bool BreaksHint) {
509 bool CanSplit = getStage(B) < RS_Spill;
510
511 // Be fairly aggressive about following hints as long as the evictee can be
512 // split.
513 if (CanSplit && IsHint && !BreaksHint)
514 return true;
515
516 return A.weight > B.weight;
517 }
518
519 /// canEvictInterference - Return true if all interferences between VirtReg and
520 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
521 ///
522 /// @param VirtReg Live range that is about to be assigned.
523 /// @param PhysReg Desired register for assignment.
524 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
525 /// @param MaxCost Only look for cheaper candidates and update with new cost
526 /// when returning true.
527 /// @returns True when interference can be evicted cheaper than MaxCost.
canEvictInterference(LiveInterval & VirtReg,unsigned PhysReg,bool IsHint,EvictionCost & MaxCost)528 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
529 bool IsHint, EvictionCost &MaxCost) {
530 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
531 // involved in an eviction before. If a cascade number was assigned, deny
532 // evicting anything with the same or a newer cascade number. This prevents
533 // infinite eviction loops.
534 //
535 // This works out so a register without a cascade number is allowed to evict
536 // anything, and it can be evicted by anything.
537 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
538 if (!Cascade)
539 Cascade = NextCascade;
540
541 EvictionCost Cost;
542 for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
543 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
544 // If there is 10 or more interferences, chances are one is heavier.
545 if (Q.collectInterferingVRegs(10) >= 10)
546 return false;
547
548 // Check if any interfering live range is heavier than MaxWeight.
549 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
550 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
551 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
552 return false;
553 // Never evict spill products. They cannot split or spill.
554 if (getStage(*Intf) == RS_Done)
555 return false;
556 // Once a live range becomes small enough, it is urgent that we find a
557 // register for it. This is indicated by an infinite spill weight. These
558 // urgent live ranges get to evict almost anything.
559 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
560 // Only evict older cascades or live ranges without a cascade.
561 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
562 if (Cascade <= IntfCascade) {
563 if (!Urgent)
564 return false;
565 // We permit breaking cascades for urgent evictions. It should be the
566 // last resort, though, so make it really expensive.
567 Cost.BrokenHints += 10;
568 }
569 // Would this break a satisfied hint?
570 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
571 // Update eviction cost.
572 Cost.BrokenHints += BreaksHint;
573 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
574 // Abort if this would be too expensive.
575 if (!(Cost < MaxCost))
576 return false;
577 // Finally, apply the eviction policy for non-urgent evictions.
578 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
579 return false;
580 }
581 }
582 MaxCost = Cost;
583 return true;
584 }
585
586 /// evictInterference - Evict any interferring registers that prevent VirtReg
587 /// from being assigned to Physreg. This assumes that canEvictInterference
588 /// returned true.
evictInterference(LiveInterval & VirtReg,unsigned PhysReg,SmallVectorImpl<LiveInterval * > & NewVRegs)589 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
590 SmallVectorImpl<LiveInterval*> &NewVRegs) {
591 // Make sure that VirtReg has a cascade number, and assign that cascade
592 // number to every evicted register. These live ranges than then only be
593 // evicted by a newer cascade, preventing infinite loops.
594 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
595 if (!Cascade)
596 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
597
598 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
599 << " interference: Cascade " << Cascade << '\n');
600 for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
601 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
602 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
603 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
604 LiveInterval *Intf = Q.interferingVRegs()[i];
605 unassign(*Intf, VRM->getPhys(Intf->reg));
606 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
607 VirtReg.isSpillable() < Intf->isSpillable()) &&
608 "Cannot decrease cascade number, illegal eviction");
609 ExtraRegInfo[Intf->reg].Cascade = Cascade;
610 ++NumEvicted;
611 NewVRegs.push_back(Intf);
612 }
613 }
614 }
615
616 /// tryEvict - Try to evict all interferences for a physreg.
617 /// @param VirtReg Currently unassigned virtual register.
618 /// @param Order Physregs to try.
619 /// @return Physreg to assign VirtReg, or 0.
tryEvict(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs,unsigned CostPerUseLimit)620 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
621 AllocationOrder &Order,
622 SmallVectorImpl<LiveInterval*> &NewVRegs,
623 unsigned CostPerUseLimit) {
624 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
625
626 // Keep track of the cheapest interference seen so far.
627 EvictionCost BestCost(~0u);
628 unsigned BestPhys = 0;
629
630 // When we are just looking for a reduced cost per use, don't break any
631 // hints, and only evict smaller spill weights.
632 if (CostPerUseLimit < ~0u) {
633 BestCost.BrokenHints = 0;
634 BestCost.MaxWeight = VirtReg.weight;
635 }
636
637 Order.rewind();
638 while (unsigned PhysReg = Order.next()) {
639 if (clobberedByRegMask(PhysReg))
640 continue;
641 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
642 continue;
643 // The first use of a callee-saved register in a function has cost 1.
644 // Don't start using a CSR when the CostPerUseLimit is low.
645 if (CostPerUseLimit == 1)
646 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
647 if (!MRI->isPhysRegUsed(CSR)) {
648 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
649 << PrintReg(CSR, TRI) << '\n');
650 continue;
651 }
652
653 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
654 continue;
655
656 // Best so far.
657 BestPhys = PhysReg;
658
659 // Stop if the hint can be used.
660 if (Order.isHint(PhysReg))
661 break;
662 }
663
664 if (!BestPhys)
665 return 0;
666
667 evictInterference(VirtReg, BestPhys, NewVRegs);
668 return BestPhys;
669 }
670
671
672 //===----------------------------------------------------------------------===//
673 // Region Splitting
674 //===----------------------------------------------------------------------===//
675
676 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
677 /// interference pattern in Physreg and its aliases. Add the constraints to
678 /// SpillPlacement and return the static cost of this split in Cost, assuming
679 /// that all preferences in SplitConstraints are met.
680 /// Return false if there are no bundles with positive bias.
addSplitConstraints(InterferenceCache::Cursor Intf,float & Cost)681 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
682 float &Cost) {
683 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
684
685 // Reset interference dependent info.
686 SplitConstraints.resize(UseBlocks.size());
687 float StaticCost = 0;
688 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
689 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
690 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
691
692 BC.Number = BI.MBB->getNumber();
693 Intf.moveToBlock(BC.Number);
694 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
695 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
696 BC.ChangesValue = BI.FirstDef;
697
698 if (!Intf.hasInterference())
699 continue;
700
701 // Number of spill code instructions to insert.
702 unsigned Ins = 0;
703
704 // Interference for the live-in value.
705 if (BI.LiveIn) {
706 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
707 BC.Entry = SpillPlacement::MustSpill, ++Ins;
708 else if (Intf.first() < BI.FirstInstr)
709 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
710 else if (Intf.first() < BI.LastInstr)
711 ++Ins;
712 }
713
714 // Interference for the live-out value.
715 if (BI.LiveOut) {
716 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
717 BC.Exit = SpillPlacement::MustSpill, ++Ins;
718 else if (Intf.last() > BI.LastInstr)
719 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
720 else if (Intf.last() > BI.FirstInstr)
721 ++Ins;
722 }
723
724 // Accumulate the total frequency of inserted spill code.
725 if (Ins)
726 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
727 }
728 Cost = StaticCost;
729
730 // Add constraints for use-blocks. Note that these are the only constraints
731 // that may add a positive bias, it is downhill from here.
732 SpillPlacer->addConstraints(SplitConstraints);
733 return SpillPlacer->scanActiveBundles();
734 }
735
736
737 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
738 /// live-through blocks in Blocks.
addThroughConstraints(InterferenceCache::Cursor Intf,ArrayRef<unsigned> Blocks)739 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
740 ArrayRef<unsigned> Blocks) {
741 const unsigned GroupSize = 8;
742 SpillPlacement::BlockConstraint BCS[GroupSize];
743 unsigned TBS[GroupSize];
744 unsigned B = 0, T = 0;
745
746 for (unsigned i = 0; i != Blocks.size(); ++i) {
747 unsigned Number = Blocks[i];
748 Intf.moveToBlock(Number);
749
750 if (!Intf.hasInterference()) {
751 assert(T < GroupSize && "Array overflow");
752 TBS[T] = Number;
753 if (++T == GroupSize) {
754 SpillPlacer->addLinks(makeArrayRef(TBS, T));
755 T = 0;
756 }
757 continue;
758 }
759
760 assert(B < GroupSize && "Array overflow");
761 BCS[B].Number = Number;
762
763 // Interference for the live-in value.
764 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
765 BCS[B].Entry = SpillPlacement::MustSpill;
766 else
767 BCS[B].Entry = SpillPlacement::PrefSpill;
768
769 // Interference for the live-out value.
770 if (Intf.last() >= SA->getLastSplitPoint(Number))
771 BCS[B].Exit = SpillPlacement::MustSpill;
772 else
773 BCS[B].Exit = SpillPlacement::PrefSpill;
774
775 if (++B == GroupSize) {
776 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
777 SpillPlacer->addConstraints(Array);
778 B = 0;
779 }
780 }
781
782 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
783 SpillPlacer->addConstraints(Array);
784 SpillPlacer->addLinks(makeArrayRef(TBS, T));
785 }
786
growRegion(GlobalSplitCandidate & Cand)787 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
788 // Keep track of through blocks that have not been added to SpillPlacer.
789 BitVector Todo = SA->getThroughBlocks();
790 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
791 unsigned AddedTo = 0;
792 #ifndef NDEBUG
793 unsigned Visited = 0;
794 #endif
795
796 for (;;) {
797 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
798 // Find new through blocks in the periphery of PrefRegBundles.
799 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
800 unsigned Bundle = NewBundles[i];
801 // Look at all blocks connected to Bundle in the full graph.
802 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
803 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
804 I != E; ++I) {
805 unsigned Block = *I;
806 if (!Todo.test(Block))
807 continue;
808 Todo.reset(Block);
809 // This is a new through block. Add it to SpillPlacer later.
810 ActiveBlocks.push_back(Block);
811 #ifndef NDEBUG
812 ++Visited;
813 #endif
814 }
815 }
816 // Any new blocks to add?
817 if (ActiveBlocks.size() == AddedTo)
818 break;
819
820 // Compute through constraints from the interference, or assume that all
821 // through blocks prefer spilling when forming compact regions.
822 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
823 if (Cand.PhysReg)
824 addThroughConstraints(Cand.Intf, NewBlocks);
825 else
826 // Provide a strong negative bias on through blocks to prevent unwanted
827 // liveness on loop backedges.
828 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
829 AddedTo = ActiveBlocks.size();
830
831 // Perhaps iterating can enable more bundles?
832 SpillPlacer->iterate();
833 }
834 DEBUG(dbgs() << ", v=" << Visited);
835 }
836
837 /// calcCompactRegion - Compute the set of edge bundles that should be live
838 /// when splitting the current live range into compact regions. Compact
839 /// regions can be computed without looking at interference. They are the
840 /// regions formed by removing all the live-through blocks from the live range.
841 ///
842 /// Returns false if the current live range is already compact, or if the
843 /// compact regions would form single block regions anyway.
calcCompactRegion(GlobalSplitCandidate & Cand)844 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
845 // Without any through blocks, the live range is already compact.
846 if (!SA->getNumThroughBlocks())
847 return false;
848
849 // Compact regions don't correspond to any physreg.
850 Cand.reset(IntfCache, 0);
851
852 DEBUG(dbgs() << "Compact region bundles");
853
854 // Use the spill placer to determine the live bundles. GrowRegion pretends
855 // that all the through blocks have interference when PhysReg is unset.
856 SpillPlacer->prepare(Cand.LiveBundles);
857
858 // The static split cost will be zero since Cand.Intf reports no interference.
859 float Cost;
860 if (!addSplitConstraints(Cand.Intf, Cost)) {
861 DEBUG(dbgs() << ", none.\n");
862 return false;
863 }
864
865 growRegion(Cand);
866 SpillPlacer->finish();
867
868 if (!Cand.LiveBundles.any()) {
869 DEBUG(dbgs() << ", none.\n");
870 return false;
871 }
872
873 DEBUG({
874 for (int i = Cand.LiveBundles.find_first(); i>=0;
875 i = Cand.LiveBundles.find_next(i))
876 dbgs() << " EB#" << i;
877 dbgs() << ".\n";
878 });
879 return true;
880 }
881
882 /// calcSpillCost - Compute how expensive it would be to split the live range in
883 /// SA around all use blocks instead of forming bundle regions.
calcSpillCost()884 float RAGreedy::calcSpillCost() {
885 float Cost = 0;
886 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
887 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
888 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
889 unsigned Number = BI.MBB->getNumber();
890 // We normally only need one spill instruction - a load or a store.
891 Cost += SpillPlacer->getBlockFrequency(Number);
892
893 // Unless the value is redefined in the block.
894 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
895 Cost += SpillPlacer->getBlockFrequency(Number);
896 }
897 return Cost;
898 }
899
900 /// calcGlobalSplitCost - Return the global split cost of following the split
901 /// pattern in LiveBundles. This cost should be added to the local cost of the
902 /// interference pattern in SplitConstraints.
903 ///
calcGlobalSplitCost(GlobalSplitCandidate & Cand)904 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
905 float GlobalCost = 0;
906 const BitVector &LiveBundles = Cand.LiveBundles;
907 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
908 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
909 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
910 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
911 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
912 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
913 unsigned Ins = 0;
914
915 if (BI.LiveIn)
916 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
917 if (BI.LiveOut)
918 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
919 if (Ins)
920 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
921 }
922
923 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
924 unsigned Number = Cand.ActiveBlocks[i];
925 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
926 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
927 if (!RegIn && !RegOut)
928 continue;
929 if (RegIn && RegOut) {
930 // We need double spill code if this block has interference.
931 Cand.Intf.moveToBlock(Number);
932 if (Cand.Intf.hasInterference())
933 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
934 continue;
935 }
936 // live-in / stack-out or stack-in live-out.
937 GlobalCost += SpillPlacer->getBlockFrequency(Number);
938 }
939 return GlobalCost;
940 }
941
942 /// splitAroundRegion - Split the current live range around the regions
943 /// determined by BundleCand and GlobalCand.
944 ///
945 /// Before calling this function, GlobalCand and BundleCand must be initialized
946 /// so each bundle is assigned to a valid candidate, or NoCand for the
947 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
948 /// objects must be initialized for the current live range, and intervals
949 /// created for the used candidates.
950 ///
951 /// @param LREdit The LiveRangeEdit object handling the current split.
952 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
953 /// must appear in this list.
splitAroundRegion(LiveRangeEdit & LREdit,ArrayRef<unsigned> UsedCands)954 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
955 ArrayRef<unsigned> UsedCands) {
956 // These are the intervals created for new global ranges. We may create more
957 // intervals for local ranges.
958 const unsigned NumGlobalIntvs = LREdit.size();
959 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
960 assert(NumGlobalIntvs && "No global intervals configured");
961
962 // Isolate even single instructions when dealing with a proper sub-class.
963 // That guarantees register class inflation for the stack interval because it
964 // is all copies.
965 unsigned Reg = SA->getParent().reg;
966 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
967
968 // First handle all the blocks with uses.
969 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
970 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
971 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
972 unsigned Number = BI.MBB->getNumber();
973 unsigned IntvIn = 0, IntvOut = 0;
974 SlotIndex IntfIn, IntfOut;
975 if (BI.LiveIn) {
976 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
977 if (CandIn != NoCand) {
978 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
979 IntvIn = Cand.IntvIdx;
980 Cand.Intf.moveToBlock(Number);
981 IntfIn = Cand.Intf.first();
982 }
983 }
984 if (BI.LiveOut) {
985 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
986 if (CandOut != NoCand) {
987 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
988 IntvOut = Cand.IntvIdx;
989 Cand.Intf.moveToBlock(Number);
990 IntfOut = Cand.Intf.last();
991 }
992 }
993
994 // Create separate intervals for isolated blocks with multiple uses.
995 if (!IntvIn && !IntvOut) {
996 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
997 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
998 SE->splitSingleBlock(BI);
999 continue;
1000 }
1001
1002 if (IntvIn && IntvOut)
1003 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1004 else if (IntvIn)
1005 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1006 else
1007 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1008 }
1009
1010 // Handle live-through blocks. The relevant live-through blocks are stored in
1011 // the ActiveBlocks list with each candidate. We need to filter out
1012 // duplicates.
1013 BitVector Todo = SA->getThroughBlocks();
1014 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1015 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1016 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1017 unsigned Number = Blocks[i];
1018 if (!Todo.test(Number))
1019 continue;
1020 Todo.reset(Number);
1021
1022 unsigned IntvIn = 0, IntvOut = 0;
1023 SlotIndex IntfIn, IntfOut;
1024
1025 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1026 if (CandIn != NoCand) {
1027 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1028 IntvIn = Cand.IntvIdx;
1029 Cand.Intf.moveToBlock(Number);
1030 IntfIn = Cand.Intf.first();
1031 }
1032
1033 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1034 if (CandOut != NoCand) {
1035 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1036 IntvOut = Cand.IntvIdx;
1037 Cand.Intf.moveToBlock(Number);
1038 IntfOut = Cand.Intf.last();
1039 }
1040 if (!IntvIn && !IntvOut)
1041 continue;
1042 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1043 }
1044 }
1045
1046 ++NumGlobalSplits;
1047
1048 SmallVector<unsigned, 8> IntvMap;
1049 SE->finish(&IntvMap);
1050 DebugVars->splitRegister(Reg, LREdit.regs());
1051
1052 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1053 unsigned OrigBlocks = SA->getNumLiveBlocks();
1054
1055 // Sort out the new intervals created by splitting. We get four kinds:
1056 // - Remainder intervals should not be split again.
1057 // - Candidate intervals can be assigned to Cand.PhysReg.
1058 // - Block-local splits are candidates for local splitting.
1059 // - DCE leftovers should go back on the queue.
1060 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1061 LiveInterval &Reg = *LREdit.get(i);
1062
1063 // Ignore old intervals from DCE.
1064 if (getStage(Reg) != RS_New)
1065 continue;
1066
1067 // Remainder interval. Don't try splitting again, spill if it doesn't
1068 // allocate.
1069 if (IntvMap[i] == 0) {
1070 setStage(Reg, RS_Spill);
1071 continue;
1072 }
1073
1074 // Global intervals. Allow repeated splitting as long as the number of live
1075 // blocks is strictly decreasing.
1076 if (IntvMap[i] < NumGlobalIntvs) {
1077 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1078 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1079 << " blocks as original.\n");
1080 // Don't allow repeated splitting as a safe guard against looping.
1081 setStage(Reg, RS_Split2);
1082 }
1083 continue;
1084 }
1085
1086 // Other intervals are treated as new. This includes local intervals created
1087 // for blocks with multiple uses, and anything created by DCE.
1088 }
1089
1090 if (VerifyEnabled)
1091 MF->verify(this, "After splitting live range around region");
1092 }
1093
tryRegionSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs)1094 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1095 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1096 unsigned NumCands = 0;
1097 unsigned BestCand = NoCand;
1098 float BestCost;
1099 SmallVector<unsigned, 8> UsedCands;
1100
1101 // Check if we can split this live range around a compact region.
1102 bool HasCompact = calcCompactRegion(GlobalCand.front());
1103 if (HasCompact) {
1104 // Yes, keep GlobalCand[0] as the compact region candidate.
1105 NumCands = 1;
1106 BestCost = HUGE_VALF;
1107 } else {
1108 // No benefit from the compact region, our fallback will be per-block
1109 // splitting. Make sure we find a solution that is cheaper than spilling.
1110 BestCost = Hysteresis * calcSpillCost();
1111 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1112 }
1113
1114 Order.rewind();
1115 while (unsigned PhysReg = Order.next()) {
1116 // Discard bad candidates before we run out of interference cache cursors.
1117 // This will only affect register classes with a lot of registers (>32).
1118 if (NumCands == IntfCache.getMaxCursors()) {
1119 unsigned WorstCount = ~0u;
1120 unsigned Worst = 0;
1121 for (unsigned i = 0; i != NumCands; ++i) {
1122 if (i == BestCand || !GlobalCand[i].PhysReg)
1123 continue;
1124 unsigned Count = GlobalCand[i].LiveBundles.count();
1125 if (Count < WorstCount)
1126 Worst = i, WorstCount = Count;
1127 }
1128 --NumCands;
1129 GlobalCand[Worst] = GlobalCand[NumCands];
1130 if (BestCand == NumCands)
1131 BestCand = Worst;
1132 }
1133
1134 if (GlobalCand.size() <= NumCands)
1135 GlobalCand.resize(NumCands+1);
1136 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1137 Cand.reset(IntfCache, PhysReg);
1138
1139 SpillPlacer->prepare(Cand.LiveBundles);
1140 float Cost;
1141 if (!addSplitConstraints(Cand.Intf, Cost)) {
1142 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1143 continue;
1144 }
1145 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1146 if (Cost >= BestCost) {
1147 DEBUG({
1148 if (BestCand == NoCand)
1149 dbgs() << " worse than no bundles\n";
1150 else
1151 dbgs() << " worse than "
1152 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1153 });
1154 continue;
1155 }
1156 growRegion(Cand);
1157
1158 SpillPlacer->finish();
1159
1160 // No live bundles, defer to splitSingleBlocks().
1161 if (!Cand.LiveBundles.any()) {
1162 DEBUG(dbgs() << " no bundles.\n");
1163 continue;
1164 }
1165
1166 Cost += calcGlobalSplitCost(Cand);
1167 DEBUG({
1168 dbgs() << ", total = " << Cost << " with bundles";
1169 for (int i = Cand.LiveBundles.find_first(); i>=0;
1170 i = Cand.LiveBundles.find_next(i))
1171 dbgs() << " EB#" << i;
1172 dbgs() << ".\n";
1173 });
1174 if (Cost < BestCost) {
1175 BestCand = NumCands;
1176 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1177 }
1178 ++NumCands;
1179 }
1180
1181 // No solutions found, fall back to single block splitting.
1182 if (!HasCompact && BestCand == NoCand)
1183 return 0;
1184
1185 // Prepare split editor.
1186 LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1187 SE->reset(LREdit, SplitSpillMode);
1188
1189 // Assign all edge bundles to the preferred candidate, or NoCand.
1190 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1191
1192 // Assign bundles for the best candidate region.
1193 if (BestCand != NoCand) {
1194 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1195 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1196 UsedCands.push_back(BestCand);
1197 Cand.IntvIdx = SE->openIntv();
1198 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1199 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1200 (void)B;
1201 }
1202 }
1203
1204 // Assign bundles for the compact region.
1205 if (HasCompact) {
1206 GlobalSplitCandidate &Cand = GlobalCand.front();
1207 assert(!Cand.PhysReg && "Compact region has no physreg");
1208 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1209 UsedCands.push_back(0);
1210 Cand.IntvIdx = SE->openIntv();
1211 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1212 << Cand.IntvIdx << ".\n");
1213 (void)B;
1214 }
1215 }
1216
1217 splitAroundRegion(LREdit, UsedCands);
1218 return 0;
1219 }
1220
1221
1222 //===----------------------------------------------------------------------===//
1223 // Per-Block Splitting
1224 //===----------------------------------------------------------------------===//
1225
1226 /// tryBlockSplit - Split a global live range around every block with uses. This
1227 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1228 /// they don't allocate.
tryBlockSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs)1229 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1230 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1231 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1232 unsigned Reg = VirtReg.reg;
1233 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1234 LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1235 SE->reset(LREdit, SplitSpillMode);
1236 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1237 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1238 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1239 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1240 SE->splitSingleBlock(BI);
1241 }
1242 // No blocks were split.
1243 if (LREdit.empty())
1244 return 0;
1245
1246 // We did split for some blocks.
1247 SmallVector<unsigned, 8> IntvMap;
1248 SE->finish(&IntvMap);
1249
1250 // Tell LiveDebugVariables about the new ranges.
1251 DebugVars->splitRegister(Reg, LREdit.regs());
1252
1253 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1254
1255 // Sort out the new intervals created by splitting. The remainder interval
1256 // goes straight to spilling, the new local ranges get to stay RS_New.
1257 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1258 LiveInterval &LI = *LREdit.get(i);
1259 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1260 setStage(LI, RS_Spill);
1261 }
1262
1263 if (VerifyEnabled)
1264 MF->verify(this, "After splitting live range around basic blocks");
1265 return 0;
1266 }
1267
1268 //===----------------------------------------------------------------------===//
1269 // Local Splitting
1270 //===----------------------------------------------------------------------===//
1271
1272
1273 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1274 /// in order to use PhysReg between two entries in SA->UseSlots.
1275 ///
1276 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1277 ///
calcGapWeights(unsigned PhysReg,SmallVectorImpl<float> & GapWeight)1278 void RAGreedy::calcGapWeights(unsigned PhysReg,
1279 SmallVectorImpl<float> &GapWeight) {
1280 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1281 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1282 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1283 const unsigned NumGaps = Uses.size()-1;
1284
1285 // Start and end points for the interference check.
1286 SlotIndex StartIdx =
1287 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1288 SlotIndex StopIdx =
1289 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1290
1291 GapWeight.assign(NumGaps, 0.0f);
1292
1293 // Add interference from each overlapping register.
1294 for (const uint16_t *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1295 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1296 .checkInterference())
1297 continue;
1298
1299 // We know that VirtReg is a continuous interval from FirstInstr to
1300 // LastInstr, so we don't need InterferenceQuery.
1301 //
1302 // Interference that overlaps an instruction is counted in both gaps
1303 // surrounding the instruction. The exception is interference before
1304 // StartIdx and after StopIdx.
1305 //
1306 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx);
1307 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1308 // Skip the gaps before IntI.
1309 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1310 if (++Gap == NumGaps)
1311 break;
1312 if (Gap == NumGaps)
1313 break;
1314
1315 // Update the gaps covered by IntI.
1316 const float weight = IntI.value()->weight;
1317 for (; Gap != NumGaps; ++Gap) {
1318 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1319 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1320 break;
1321 }
1322 if (Gap == NumGaps)
1323 break;
1324 }
1325 }
1326 }
1327
1328 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1329 /// basic block.
1330 ///
tryLocalSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs)1331 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1332 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1333 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1334 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1335
1336 // Note that it is possible to have an interval that is live-in or live-out
1337 // while only covering a single block - A phi-def can use undef values from
1338 // predecessors, and the block could be a single-block loop.
1339 // We don't bother doing anything clever about such a case, we simply assume
1340 // that the interval is continuous from FirstInstr to LastInstr. We should
1341 // make sure that we don't do anything illegal to such an interval, though.
1342
1343 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1344 if (Uses.size() <= 2)
1345 return 0;
1346 const unsigned NumGaps = Uses.size()-1;
1347
1348 DEBUG({
1349 dbgs() << "tryLocalSplit: ";
1350 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1351 dbgs() << ' ' << Uses[i];
1352 dbgs() << '\n';
1353 });
1354
1355 // If VirtReg is live across any register mask operands, compute a list of
1356 // gaps with register masks.
1357 SmallVector<unsigned, 8> RegMaskGaps;
1358 if (!UsableRegs.empty()) {
1359 // Get regmask slots for the whole block.
1360 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1361 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1362 // Constrain to VirtReg's live range.
1363 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1364 Uses.front().getRegSlot()) - RMS.begin();
1365 unsigned re = RMS.size();
1366 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1367 // Look for Uses[i] <= RMS <= Uses[i+1].
1368 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1369 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1370 continue;
1371 // Skip a regmask on the same instruction as the last use. It doesn't
1372 // overlap the live range.
1373 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1374 break;
1375 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1376 RegMaskGaps.push_back(i);
1377 // Advance ri to the next gap. A regmask on one of the uses counts in
1378 // both gaps.
1379 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1380 ++ri;
1381 }
1382 DEBUG(dbgs() << '\n');
1383 }
1384
1385 // Since we allow local split results to be split again, there is a risk of
1386 // creating infinite loops. It is tempting to require that the new live
1387 // ranges have less instructions than the original. That would guarantee
1388 // convergence, but it is too strict. A live range with 3 instructions can be
1389 // split 2+3 (including the COPY), and we want to allow that.
1390 //
1391 // Instead we use these rules:
1392 //
1393 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1394 // noop split, of course).
1395 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1396 // the new ranges must have fewer instructions than before the split.
1397 // 3. New ranges with the same number of instructions are marked RS_Split2,
1398 // smaller ranges are marked RS_New.
1399 //
1400 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1401 // excessive splitting and infinite loops.
1402 //
1403 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1404
1405 // Best split candidate.
1406 unsigned BestBefore = NumGaps;
1407 unsigned BestAfter = 0;
1408 float BestDiff = 0;
1409
1410 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1411 SmallVector<float, 8> GapWeight;
1412
1413 Order.rewind();
1414 while (unsigned PhysReg = Order.next()) {
1415 // Keep track of the largest spill weight that would need to be evicted in
1416 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1417 calcGapWeights(PhysReg, GapWeight);
1418
1419 // Remove any gaps with regmask clobbers.
1420 if (clobberedByRegMask(PhysReg))
1421 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1422 GapWeight[RegMaskGaps[i]] = HUGE_VALF;
1423
1424 // Try to find the best sequence of gaps to close.
1425 // The new spill weight must be larger than any gap interference.
1426
1427 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1428 unsigned SplitBefore = 0, SplitAfter = 1;
1429
1430 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1431 // It is the spill weight that needs to be evicted.
1432 float MaxGap = GapWeight[0];
1433
1434 for (;;) {
1435 // Live before/after split?
1436 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1437 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1438
1439 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1440 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1441 << " i=" << MaxGap);
1442
1443 // Stop before the interval gets so big we wouldn't be making progress.
1444 if (!LiveBefore && !LiveAfter) {
1445 DEBUG(dbgs() << " all\n");
1446 break;
1447 }
1448 // Should the interval be extended or shrunk?
1449 bool Shrink = true;
1450
1451 // How many gaps would the new range have?
1452 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1453
1454 // Legally, without causing looping?
1455 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1456
1457 if (Legal && MaxGap < HUGE_VALF) {
1458 // Estimate the new spill weight. Each instruction reads or writes the
1459 // register. Conservatively assume there are no read-modify-write
1460 // instructions.
1461 //
1462 // Try to guess the size of the new interval.
1463 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1464 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1465 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1466 // Would this split be possible to allocate?
1467 // Never allocate all gaps, we wouldn't be making progress.
1468 DEBUG(dbgs() << " w=" << EstWeight);
1469 if (EstWeight * Hysteresis >= MaxGap) {
1470 Shrink = false;
1471 float Diff = EstWeight - MaxGap;
1472 if (Diff > BestDiff) {
1473 DEBUG(dbgs() << " (best)");
1474 BestDiff = Hysteresis * Diff;
1475 BestBefore = SplitBefore;
1476 BestAfter = SplitAfter;
1477 }
1478 }
1479 }
1480
1481 // Try to shrink.
1482 if (Shrink) {
1483 if (++SplitBefore < SplitAfter) {
1484 DEBUG(dbgs() << " shrink\n");
1485 // Recompute the max when necessary.
1486 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1487 MaxGap = GapWeight[SplitBefore];
1488 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1489 MaxGap = std::max(MaxGap, GapWeight[i]);
1490 }
1491 continue;
1492 }
1493 MaxGap = 0;
1494 }
1495
1496 // Try to extend the interval.
1497 if (SplitAfter >= NumGaps) {
1498 DEBUG(dbgs() << " end\n");
1499 break;
1500 }
1501
1502 DEBUG(dbgs() << " extend\n");
1503 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1504 }
1505 }
1506
1507 // Didn't find any candidates?
1508 if (BestBefore == NumGaps)
1509 return 0;
1510
1511 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1512 << '-' << Uses[BestAfter] << ", " << BestDiff
1513 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1514
1515 LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1516 SE->reset(LREdit);
1517
1518 SE->openIntv();
1519 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1520 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1521 SE->useIntv(SegStart, SegStop);
1522 SmallVector<unsigned, 8> IntvMap;
1523 SE->finish(&IntvMap);
1524 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1525
1526 // If the new range has the same number of instructions as before, mark it as
1527 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1528 // leave the new intervals as RS_New so they can compete.
1529 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1530 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1531 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1532 if (NewGaps >= NumGaps) {
1533 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1534 assert(!ProgressRequired && "Didn't make progress when it was required.");
1535 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1536 if (IntvMap[i] == 1) {
1537 setStage(*LREdit.get(i), RS_Split2);
1538 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1539 }
1540 DEBUG(dbgs() << '\n');
1541 }
1542 ++NumLocalSplits;
1543
1544 return 0;
1545 }
1546
1547 //===----------------------------------------------------------------------===//
1548 // Live Range Splitting
1549 //===----------------------------------------------------------------------===//
1550
1551 /// trySplit - Try to split VirtReg or one of its interferences, making it
1552 /// assignable.
1553 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
trySplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<LiveInterval * > & NewVRegs)1554 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1555 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1556 // Ranges must be Split2 or less.
1557 if (getStage(VirtReg) >= RS_Spill)
1558 return 0;
1559
1560 // Local intervals are handled separately.
1561 if (LIS->intervalIsInOneMBB(VirtReg)) {
1562 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1563 SA->analyze(&VirtReg);
1564 return tryLocalSplit(VirtReg, Order, NewVRegs);
1565 }
1566
1567 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1568
1569 SA->analyze(&VirtReg);
1570
1571 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1572 // coalescer. That may cause the range to become allocatable which means that
1573 // tryRegionSplit won't be making progress. This check should be replaced with
1574 // an assertion when the coalescer is fixed.
1575 if (SA->didRepairRange()) {
1576 // VirtReg has changed, so all cached queries are invalid.
1577 invalidateVirtRegs();
1578 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1579 return PhysReg;
1580 }
1581
1582 // First try to split around a region spanning multiple blocks. RS_Split2
1583 // ranges already made dubious progress with region splitting, so they go
1584 // straight to single block splitting.
1585 if (getStage(VirtReg) < RS_Split2) {
1586 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1587 if (PhysReg || !NewVRegs.empty())
1588 return PhysReg;
1589 }
1590
1591 // Then isolate blocks.
1592 return tryBlockSplit(VirtReg, Order, NewVRegs);
1593 }
1594
1595
1596 //===----------------------------------------------------------------------===//
1597 // Main Entry Point
1598 //===----------------------------------------------------------------------===//
1599
selectOrSplit(LiveInterval & VirtReg,SmallVectorImpl<LiveInterval * > & NewVRegs)1600 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1601 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1602 // Check if VirtReg is live across any calls.
1603 UsableRegs.clear();
1604 if (LIS->checkRegMaskInterference(VirtReg, UsableRegs))
1605 DEBUG(dbgs() << "Live across regmasks.\n");
1606
1607 // First try assigning a free register.
1608 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1609 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1610 return PhysReg;
1611
1612 LiveRangeStage Stage = getStage(VirtReg);
1613 DEBUG(dbgs() << StageName[Stage]
1614 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1615
1616 // Try to evict a less worthy live range, but only for ranges from the primary
1617 // queue. The RS_Split ranges already failed to do this, and they should not
1618 // get a second chance until they have been split.
1619 if (Stage != RS_Split)
1620 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1621 return PhysReg;
1622
1623 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1624
1625 // The first time we see a live range, don't try to split or spill.
1626 // Wait until the second time, when all smaller ranges have been allocated.
1627 // This gives a better picture of the interference to split around.
1628 if (Stage < RS_Split) {
1629 setStage(VirtReg, RS_Split);
1630 DEBUG(dbgs() << "wait for second round\n");
1631 NewVRegs.push_back(&VirtReg);
1632 return 0;
1633 }
1634
1635 // If we couldn't allocate a register from spilling, there is probably some
1636 // invalid inline assembly. The base class wil report it.
1637 if (Stage >= RS_Done || !VirtReg.isSpillable())
1638 return ~0u;
1639
1640 // Try splitting VirtReg or interferences.
1641 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1642 if (PhysReg || !NewVRegs.empty())
1643 return PhysReg;
1644
1645 // Finally spill VirtReg itself.
1646 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1647 LiveRangeEdit LRE(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1648 spiller().spill(LRE);
1649 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1650
1651 if (VerifyEnabled)
1652 MF->verify(this, "After spilling");
1653
1654 // The live virtual register requesting allocation was spilled, so tell
1655 // the caller not to allocate anything during this round.
1656 return 0;
1657 }
1658
runOnMachineFunction(MachineFunction & mf)1659 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1660 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1661 << "********** Function: "
1662 << ((Value*)mf.getFunction())->getName() << '\n');
1663
1664 MF = &mf;
1665 if (VerifyEnabled)
1666 MF->verify(this, "Before greedy register allocator");
1667
1668 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1669 Indexes = &getAnalysis<SlotIndexes>();
1670 DomTree = &getAnalysis<MachineDominatorTree>();
1671 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1672 Loops = &getAnalysis<MachineLoopInfo>();
1673 Bundles = &getAnalysis<EdgeBundles>();
1674 SpillPlacer = &getAnalysis<SpillPlacement>();
1675 DebugVars = &getAnalysis<LiveDebugVariables>();
1676
1677 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1678 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1679 ExtraRegInfo.clear();
1680 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1681 NextCascade = 1;
1682 IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI);
1683 GlobalCand.resize(32); // This will grow as needed.
1684
1685 allocatePhysRegs();
1686 addMBBLiveIns(MF);
1687 LIS->addKillFlags();
1688
1689 // Run rewriter
1690 {
1691 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1692 VRM->rewrite(Indexes);
1693 }
1694
1695 // Write out new DBG_VALUE instructions.
1696 {
1697 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled);
1698 DebugVars->emitDebugValues(VRM);
1699 }
1700
1701 // All machine operands and other references to virtual registers have been
1702 // replaced. Remove the virtual registers and release all the transient data.
1703 VRM->clearAllVirt();
1704 MRI->clearVirtRegs();
1705 releaseMemory();
1706
1707 return true;
1708 }
1709