• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the spill code placement analysis.
11 //
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
14 // bias.
15 //
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
19 //
20 // The Hopfield network minimizes (possibly locally) its energy function:
21 //
22 //   E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
23 //
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
27 //
28 //===----------------------------------------------------------------------===//
29 
30 #define DEBUG_TYPE "spillplacement"
31 #include "SpillPlacement.h"
32 #include "llvm/CodeGen/EdgeBundles.h"
33 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/Format.h"
40 
41 using namespace llvm;
42 
43 char SpillPlacement::ID = 0;
44 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
45                       "Spill Code Placement Analysis", true, true)
46 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
47 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
48 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
49                     "Spill Code Placement Analysis", true, true)
50 
51 char &llvm::SpillPlacementID = SpillPlacement::ID;
52 
getAnalysisUsage(AnalysisUsage & AU) const53 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
54   AU.setPreservesAll();
55   AU.addRequiredTransitive<EdgeBundles>();
56   AU.addRequiredTransitive<MachineLoopInfo>();
57   MachineFunctionPass::getAnalysisUsage(AU);
58 }
59 
60 /// Node - Each edge bundle corresponds to a Hopfield node.
61 ///
62 /// The node contains precomputed frequency data that only depends on the CFG,
63 /// but Bias and Links are computed each time placeSpills is called.
64 ///
65 /// The node Value is positive when the variable should be in a register. The
66 /// value can change when linked nodes change, but convergence is very fast
67 /// because all weights are positive.
68 ///
69 struct SpillPlacement::Node {
70   /// Scale - Inverse block frequency feeding into[0] or out of[1] the bundle.
71   /// Ideally, these two numbers should be identical, but inaccuracies in the
72   /// block frequency estimates means that we need to normalize ingoing and
73   /// outgoing frequencies separately so they are commensurate.
74   float Scale[2];
75 
76   /// Bias - Normalized contributions from non-transparent blocks.
77   /// A bundle connected to a MustSpill block has a huge negative bias,
78   /// otherwise it is a number in the range [-2;2].
79   float Bias;
80 
81   /// Value - Output value of this node computed from the Bias and links.
82   /// This is always in the range [-1;1]. A positive number means the variable
83   /// should go in a register through this bundle.
84   float Value;
85 
86   typedef SmallVector<std::pair<float, unsigned>, 4> LinkVector;
87 
88   /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
89   /// bundles. The weights are all positive and add up to at most 2, weights
90   /// from ingoing and outgoing nodes separately add up to a most 1. The weight
91   /// sum can be less than 2 when the variable is not live into / out of some
92   /// connected basic blocks.
93   LinkVector Links;
94 
95   /// preferReg - Return true when this node prefers to be in a register.
preferRegSpillPlacement::Node96   bool preferReg() const {
97     // Undecided nodes (Value==0) go on the stack.
98     return Value > 0;
99   }
100 
101   /// mustSpill - Return True if this node is so biased that it must spill.
mustSpillSpillPlacement::Node102   bool mustSpill() const {
103     // Actually, we must spill if Bias < sum(weights).
104     // It may be worth it to compute the weight sum here?
105     return Bias < -2.0f;
106   }
107 
108   /// Node - Create a blank Node.
NodeSpillPlacement::Node109   Node() {
110     Scale[0] = Scale[1] = 0;
111   }
112 
113   /// clear - Reset per-query data, but preserve frequencies that only depend on
114   // the CFG.
clearSpillPlacement::Node115   void clear() {
116     Bias = Value = 0;
117     Links.clear();
118   }
119 
120   /// addLink - Add a link to bundle b with weight w.
121   /// out=0 for an ingoing link, and 1 for an outgoing link.
addLinkSpillPlacement::Node122   void addLink(unsigned b, float w, bool out) {
123     // Normalize w relative to all connected blocks from that direction.
124     w *= Scale[out];
125 
126     // There can be multiple links to the same bundle, add them up.
127     for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
128       if (I->second == b) {
129         I->first += w;
130         return;
131       }
132     // This must be the first link to b.
133     Links.push_back(std::make_pair(w, b));
134   }
135 
136   /// addBias - Bias this node from an ingoing[0] or outgoing[1] link.
137   /// Return the change to the total number of positive biases.
addBiasSpillPlacement::Node138   void addBias(float w, bool out) {
139     // Normalize w relative to all connected blocks from that direction.
140     w *= Scale[out];
141     Bias += w;
142   }
143 
144   /// update - Recompute Value from Bias and Links. Return true when node
145   /// preference changes.
updateSpillPlacement::Node146   bool update(const Node nodes[]) {
147     // Compute the weighted sum of inputs.
148     float Sum = Bias;
149     for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
150       Sum += I->first * nodes[I->second].Value;
151 
152     // The weighted sum is going to be in the range [-2;2]. Ideally, we should
153     // simply set Value = sign(Sum), but we will add a dead zone around 0 for
154     // two reasons:
155     //  1. It avoids arbitrary bias when all links are 0 as is possible during
156     //     initial iterations.
157     //  2. It helps tame rounding errors when the links nominally sum to 0.
158     const float Thres = 1e-4f;
159     bool Before = preferReg();
160     if (Sum < -Thres)
161       Value = -1;
162     else if (Sum > Thres)
163       Value = 1;
164     else
165       Value = 0;
166     return Before != preferReg();
167   }
168 };
169 
runOnMachineFunction(MachineFunction & mf)170 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
171   MF = &mf;
172   bundles = &getAnalysis<EdgeBundles>();
173   loops = &getAnalysis<MachineLoopInfo>();
174 
175   assert(!nodes && "Leaking node array");
176   nodes = new Node[bundles->getNumBundles()];
177 
178   // Compute total ingoing and outgoing block frequencies for all bundles.
179   BlockFrequency.resize(mf.getNumBlockIDs());
180   for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
181     float Freq = LiveIntervals::getSpillWeight(true, false,
182                                                loops->getLoopDepth(I));
183     unsigned Num = I->getNumber();
184     BlockFrequency[Num] = Freq;
185     nodes[bundles->getBundle(Num, 1)].Scale[0] += Freq;
186     nodes[bundles->getBundle(Num, 0)].Scale[1] += Freq;
187   }
188 
189   // Scales are reciprocal frequencies.
190   for (unsigned i = 0, e = bundles->getNumBundles(); i != e; ++i)
191     for (unsigned d = 0; d != 2; ++d)
192       if (nodes[i].Scale[d] > 0)
193         nodes[i].Scale[d] = 1 / nodes[i].Scale[d];
194 
195   // We never change the function.
196   return false;
197 }
198 
releaseMemory()199 void SpillPlacement::releaseMemory() {
200   delete[] nodes;
201   nodes = 0;
202 }
203 
204 /// activate - mark node n as active if it wasn't already.
activate(unsigned n)205 void SpillPlacement::activate(unsigned n) {
206   if (ActiveNodes->test(n))
207     return;
208   ActiveNodes->set(n);
209   nodes[n].clear();
210 }
211 
212 
213 /// addConstraints - Compute node biases and weights from a set of constraints.
214 /// Set a bit in NodeMask for each active node.
addConstraints(ArrayRef<BlockConstraint> LiveBlocks)215 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
216   for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
217        E = LiveBlocks.end(); I != E; ++I) {
218     float Freq = getBlockFrequency(I->Number);
219     const float Bias[] = {
220       0,           // DontCare,
221       1,           // PrefReg,
222       -1,          // PrefSpill
223       -HUGE_VALF   // MustSpill
224     };
225 
226     // Live-in to block?
227     if (I->Entry != DontCare) {
228       unsigned ib = bundles->getBundle(I->Number, 0);
229       activate(ib);
230       nodes[ib].addBias(Freq * Bias[I->Entry], 1);
231     }
232 
233     // Live-out from block?
234     if (I->Exit != DontCare) {
235       unsigned ob = bundles->getBundle(I->Number, 1);
236       activate(ob);
237       nodes[ob].addBias(Freq * Bias[I->Exit], 0);
238     }
239   }
240 }
241 
addLinks(ArrayRef<unsigned> Links)242 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
243   for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
244        ++I) {
245     unsigned Number = *I;
246     unsigned ib = bundles->getBundle(Number, 0);
247     unsigned ob = bundles->getBundle(Number, 1);
248 
249     // Ignore self-loops.
250     if (ib == ob)
251       continue;
252     activate(ib);
253     activate(ob);
254     if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
255       Linked.push_back(ib);
256     if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
257       Linked.push_back(ob);
258     float Freq = getBlockFrequency(Number);
259     nodes[ib].addLink(ob, Freq, 1);
260     nodes[ob].addLink(ib, Freq, 0);
261   }
262 }
263 
scanActiveBundles()264 bool SpillPlacement::scanActiveBundles() {
265   Linked.clear();
266   RecentPositive.clear();
267   for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
268     nodes[n].update(nodes);
269     // A node that must spill, or a node without any links is not going to
270     // change its value ever again, so exclude it from iterations.
271     if (nodes[n].mustSpill())
272       continue;
273     if (!nodes[n].Links.empty())
274       Linked.push_back(n);
275     if (nodes[n].preferReg())
276       RecentPositive.push_back(n);
277   }
278   return !RecentPositive.empty();
279 }
280 
281 /// iterate - Repeatedly update the Hopfield nodes until stability or the
282 /// maximum number of iterations is reached.
283 /// @param Linked - Numbers of linked nodes that need updating.
iterate()284 void SpillPlacement::iterate() {
285   // First update the recently positive nodes. They have likely received new
286   // negative bias that will turn them off.
287   while (!RecentPositive.empty())
288     nodes[RecentPositive.pop_back_val()].update(nodes);
289 
290   if (Linked.empty())
291     return;
292 
293   // Run up to 10 iterations. The edge bundle numbering is closely related to
294   // basic block numbering, so there is a strong tendency towards chains of
295   // linked nodes with sequential numbers. By scanning the linked nodes
296   // backwards and forwards, we make it very likely that a single node can
297   // affect the entire network in a single iteration. That means very fast
298   // convergence, usually in a single iteration.
299   for (unsigned iteration = 0; iteration != 10; ++iteration) {
300     // Scan backwards, skipping the last node which was just updated.
301     bool Changed = false;
302     for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
303            llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
304       unsigned n = *I;
305       if (nodes[n].update(nodes)) {
306         Changed = true;
307         if (nodes[n].preferReg())
308           RecentPositive.push_back(n);
309       }
310     }
311     if (!Changed || !RecentPositive.empty())
312       return;
313 
314     // Scan forwards, skipping the first node which was just updated.
315     Changed = false;
316     for (SmallVectorImpl<unsigned>::const_iterator I =
317            llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
318       unsigned n = *I;
319       if (nodes[n].update(nodes)) {
320         Changed = true;
321         if (nodes[n].preferReg())
322           RecentPositive.push_back(n);
323       }
324     }
325     if (!Changed || !RecentPositive.empty())
326       return;
327   }
328 }
329 
prepare(BitVector & RegBundles)330 void SpillPlacement::prepare(BitVector &RegBundles) {
331   Linked.clear();
332   RecentPositive.clear();
333   // Reuse RegBundles as our ActiveNodes vector.
334   ActiveNodes = &RegBundles;
335   ActiveNodes->clear();
336   ActiveNodes->resize(bundles->getNumBundles());
337 }
338 
339 bool
finish()340 SpillPlacement::finish() {
341   assert(ActiveNodes && "Call prepare() first");
342 
343   // Write preferences back to ActiveNodes.
344   bool Perfect = true;
345   for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
346     if (!nodes[n].preferReg()) {
347       ActiveNodes->reset(n);
348       Perfect = false;
349     }
350   ActiveNodes = 0;
351   return Perfect;
352 }
353