1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the spill code placement analysis.
11 //
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
14 // bias.
15 //
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
19 //
20 // The Hopfield network minimizes (possibly locally) its energy function:
21 //
22 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
23 //
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
27 //
28 //===----------------------------------------------------------------------===//
29
30 #define DEBUG_TYPE "spillplacement"
31 #include "SpillPlacement.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/CodeGen/EdgeBundles.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineLoopInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/Format.h"
41
42 using namespace llvm;
43
44 char SpillPlacement::ID = 0;
45 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
46 "Spill Code Placement Analysis", true, true)
47 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
48 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
49 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
50 "Spill Code Placement Analysis", true, true)
51
52 char &llvm::SpillPlacementID = SpillPlacement::ID;
53
getAnalysisUsage(AnalysisUsage & AU) const54 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
55 AU.setPreservesAll();
56 AU.addRequired<MachineBlockFrequencyInfo>();
57 AU.addRequiredTransitive<EdgeBundles>();
58 AU.addRequiredTransitive<MachineLoopInfo>();
59 MachineFunctionPass::getAnalysisUsage(AU);
60 }
61
62 /// Decision threshold. A node gets the output value 0 if the weighted sum of
63 /// its inputs falls in the open interval (-Threshold;Threshold).
64 static const BlockFrequency Threshold = 2;
65
66 /// Node - Each edge bundle corresponds to a Hopfield node.
67 ///
68 /// The node contains precomputed frequency data that only depends on the CFG,
69 /// but Bias and Links are computed each time placeSpills is called.
70 ///
71 /// The node Value is positive when the variable should be in a register. The
72 /// value can change when linked nodes change, but convergence is very fast
73 /// because all weights are positive.
74 ///
75 struct SpillPlacement::Node {
76 /// BiasN - Sum of blocks that prefer a spill.
77 BlockFrequency BiasN;
78 /// BiasP - Sum of blocks that prefer a register.
79 BlockFrequency BiasP;
80
81 /// Value - Output value of this node computed from the Bias and links.
82 /// This is always on of the values {-1, 0, 1}. A positive number means the
83 /// variable should go in a register through this bundle.
84 int Value;
85
86 typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
87
88 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
89 /// bundles. The weights are all positive block frequencies.
90 LinkVector Links;
91
92 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
93 BlockFrequency SumLinkWeights;
94
95 /// preferReg - Return true when this node prefers to be in a register.
preferRegSpillPlacement::Node96 bool preferReg() const {
97 // Undecided nodes (Value==0) go on the stack.
98 return Value > 0;
99 }
100
101 /// mustSpill - Return True if this node is so biased that it must spill.
mustSpillSpillPlacement::Node102 bool mustSpill() const {
103 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
104 // BiasN is saturated when MustSpill is set, make sure this still returns
105 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
106 return BiasN >= BiasP + SumLinkWeights;
107 }
108
109 /// clear - Reset per-query data, but preserve frequencies that only depend on
110 // the CFG.
clearSpillPlacement::Node111 void clear() {
112 BiasN = BiasP = Value = 0;
113 SumLinkWeights = Threshold;
114 Links.clear();
115 }
116
117 /// addLink - Add a link to bundle b with weight w.
addLinkSpillPlacement::Node118 void addLink(unsigned b, BlockFrequency w) {
119 // Update cached sum.
120 SumLinkWeights += w;
121
122 // There can be multiple links to the same bundle, add them up.
123 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
124 if (I->second == b) {
125 I->first += w;
126 return;
127 }
128 // This must be the first link to b.
129 Links.push_back(std::make_pair(w, b));
130 }
131
132 /// addBias - Bias this node.
addBiasSpillPlacement::Node133 void addBias(BlockFrequency freq, BorderConstraint direction) {
134 switch (direction) {
135 default:
136 break;
137 case PrefReg:
138 BiasP += freq;
139 break;
140 case PrefSpill:
141 BiasN += freq;
142 break;
143 case MustSpill:
144 BiasN = BlockFrequency::getMaxFrequency();
145 break;
146 }
147 }
148
149 /// update - Recompute Value from Bias and Links. Return true when node
150 /// preference changes.
updateSpillPlacement::Node151 bool update(const Node nodes[]) {
152 // Compute the weighted sum of inputs.
153 BlockFrequency SumN = BiasN;
154 BlockFrequency SumP = BiasP;
155 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
156 if (nodes[I->second].Value == -1)
157 SumN += I->first;
158 else if (nodes[I->second].Value == 1)
159 SumP += I->first;
160 }
161
162 // Each weighted sum is going to be less than the total frequency of the
163 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
164 // will add a dead zone around 0 for two reasons:
165 //
166 // 1. It avoids arbitrary bias when all links are 0 as is possible during
167 // initial iterations.
168 // 2. It helps tame rounding errors when the links nominally sum to 0.
169 //
170 bool Before = preferReg();
171 if (SumN >= SumP + Threshold)
172 Value = -1;
173 else if (SumP >= SumN + Threshold)
174 Value = 1;
175 else
176 Value = 0;
177 return Before != preferReg();
178 }
179 };
180
runOnMachineFunction(MachineFunction & mf)181 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
182 MF = &mf;
183 bundles = &getAnalysis<EdgeBundles>();
184 loops = &getAnalysis<MachineLoopInfo>();
185
186 assert(!nodes && "Leaking node array");
187 nodes = new Node[bundles->getNumBundles()];
188
189 // Compute total ingoing and outgoing block frequencies for all bundles.
190 BlockFrequencies.resize(mf.getNumBlockIDs());
191 MachineBlockFrequencyInfo &MBFI = getAnalysis<MachineBlockFrequencyInfo>();
192 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
193 unsigned Num = I->getNumber();
194 BlockFrequencies[Num] = MBFI.getBlockFreq(I);
195 }
196
197 // We never change the function.
198 return false;
199 }
200
releaseMemory()201 void SpillPlacement::releaseMemory() {
202 delete[] nodes;
203 nodes = 0;
204 }
205
206 /// activate - mark node n as active if it wasn't already.
activate(unsigned n)207 void SpillPlacement::activate(unsigned n) {
208 if (ActiveNodes->test(n))
209 return;
210 ActiveNodes->set(n);
211 nodes[n].clear();
212
213 // Very large bundles usually come from big switches, indirect branches,
214 // landing pads, or loops with many 'continue' statements. It is difficult to
215 // allocate registers when so many different blocks are involved.
216 //
217 // Give a small negative bias to large bundles such that a substantial
218 // fraction of the connected blocks need to be interested before we consider
219 // expanding the region through the bundle. This helps compile time by
220 // limiting the number of blocks visited and the number of links in the
221 // Hopfield network.
222 if (bundles->getBlocks(n).size() > 100) {
223 nodes[n].BiasP = 0;
224 nodes[n].BiasN = (BlockFrequency::getEntryFrequency() / 16);
225 }
226 }
227
228
229 /// addConstraints - Compute node biases and weights from a set of constraints.
230 /// Set a bit in NodeMask for each active node.
addConstraints(ArrayRef<BlockConstraint> LiveBlocks)231 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
232 for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
233 E = LiveBlocks.end(); I != E; ++I) {
234 BlockFrequency Freq = BlockFrequencies[I->Number];
235
236 // Live-in to block?
237 if (I->Entry != DontCare) {
238 unsigned ib = bundles->getBundle(I->Number, 0);
239 activate(ib);
240 nodes[ib].addBias(Freq, I->Entry);
241 }
242
243 // Live-out from block?
244 if (I->Exit != DontCare) {
245 unsigned ob = bundles->getBundle(I->Number, 1);
246 activate(ob);
247 nodes[ob].addBias(Freq, I->Exit);
248 }
249 }
250 }
251
252 /// addPrefSpill - Same as addConstraints(PrefSpill)
addPrefSpill(ArrayRef<unsigned> Blocks,bool Strong)253 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
254 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
255 I != E; ++I) {
256 BlockFrequency Freq = BlockFrequencies[*I];
257 if (Strong)
258 Freq += Freq;
259 unsigned ib = bundles->getBundle(*I, 0);
260 unsigned ob = bundles->getBundle(*I, 1);
261 activate(ib);
262 activate(ob);
263 nodes[ib].addBias(Freq, PrefSpill);
264 nodes[ob].addBias(Freq, PrefSpill);
265 }
266 }
267
addLinks(ArrayRef<unsigned> Links)268 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
269 for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
270 ++I) {
271 unsigned Number = *I;
272 unsigned ib = bundles->getBundle(Number, 0);
273 unsigned ob = bundles->getBundle(Number, 1);
274
275 // Ignore self-loops.
276 if (ib == ob)
277 continue;
278 activate(ib);
279 activate(ob);
280 if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
281 Linked.push_back(ib);
282 if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
283 Linked.push_back(ob);
284 BlockFrequency Freq = BlockFrequencies[Number];
285 nodes[ib].addLink(ob, Freq);
286 nodes[ob].addLink(ib, Freq);
287 }
288 }
289
scanActiveBundles()290 bool SpillPlacement::scanActiveBundles() {
291 Linked.clear();
292 RecentPositive.clear();
293 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
294 nodes[n].update(nodes);
295 // A node that must spill, or a node without any links is not going to
296 // change its value ever again, so exclude it from iterations.
297 if (nodes[n].mustSpill())
298 continue;
299 if (!nodes[n].Links.empty())
300 Linked.push_back(n);
301 if (nodes[n].preferReg())
302 RecentPositive.push_back(n);
303 }
304 return !RecentPositive.empty();
305 }
306
307 /// iterate - Repeatedly update the Hopfield nodes until stability or the
308 /// maximum number of iterations is reached.
309 /// @param Linked - Numbers of linked nodes that need updating.
iterate()310 void SpillPlacement::iterate() {
311 // First update the recently positive nodes. They have likely received new
312 // negative bias that will turn them off.
313 while (!RecentPositive.empty())
314 nodes[RecentPositive.pop_back_val()].update(nodes);
315
316 if (Linked.empty())
317 return;
318
319 // Run up to 10 iterations. The edge bundle numbering is closely related to
320 // basic block numbering, so there is a strong tendency towards chains of
321 // linked nodes with sequential numbers. By scanning the linked nodes
322 // backwards and forwards, we make it very likely that a single node can
323 // affect the entire network in a single iteration. That means very fast
324 // convergence, usually in a single iteration.
325 for (unsigned iteration = 0; iteration != 10; ++iteration) {
326 // Scan backwards, skipping the last node which was just updated.
327 bool Changed = false;
328 for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
329 llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
330 unsigned n = *I;
331 if (nodes[n].update(nodes)) {
332 Changed = true;
333 if (nodes[n].preferReg())
334 RecentPositive.push_back(n);
335 }
336 }
337 if (!Changed || !RecentPositive.empty())
338 return;
339
340 // Scan forwards, skipping the first node which was just updated.
341 Changed = false;
342 for (SmallVectorImpl<unsigned>::const_iterator I =
343 llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
344 unsigned n = *I;
345 if (nodes[n].update(nodes)) {
346 Changed = true;
347 if (nodes[n].preferReg())
348 RecentPositive.push_back(n);
349 }
350 }
351 if (!Changed || !RecentPositive.empty())
352 return;
353 }
354 }
355
prepare(BitVector & RegBundles)356 void SpillPlacement::prepare(BitVector &RegBundles) {
357 Linked.clear();
358 RecentPositive.clear();
359 // Reuse RegBundles as our ActiveNodes vector.
360 ActiveNodes = &RegBundles;
361 ActiveNodes->clear();
362 ActiveNodes->resize(bundles->getNumBundles());
363 }
364
365 bool
finish()366 SpillPlacement::finish() {
367 assert(ActiveNodes && "Call prepare() first");
368
369 // Write preferences back to ActiveNodes.
370 bool Perfect = true;
371 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
372 if (!nodes[n].preferReg()) {
373 ActiveNodes->reset(n);
374 Perfect = false;
375 }
376 ActiveNodes = 0;
377 return Perfect;
378 }
379