1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the spill code placement analysis.
11 //
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
14 // bias.
15 //
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
19 //
20 // The Hopfield network minimizes (possibly locally) its energy function:
21 //
22 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
23 //
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
27 //
28 //===----------------------------------------------------------------------===//
29
30 #include "SpillPlacement.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/CodeGen/EdgeBundles.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/Format.h"
40
41 using namespace llvm;
42
43 #define DEBUG_TYPE "spillplacement"
44
45 char SpillPlacement::ID = 0;
46 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
47 "Spill Code Placement Analysis", true, true)
48 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
49 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
50 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
51 "Spill Code Placement Analysis", true, true)
52
53 char &llvm::SpillPlacementID = SpillPlacement::ID;
54
getAnalysisUsage(AnalysisUsage & AU) const55 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
56 AU.setPreservesAll();
57 AU.addRequired<MachineBlockFrequencyInfo>();
58 AU.addRequiredTransitive<EdgeBundles>();
59 AU.addRequiredTransitive<MachineLoopInfo>();
60 MachineFunctionPass::getAnalysisUsage(AU);
61 }
62
63 namespace {
64 static BlockFrequency Threshold;
65 }
66
67 /// Decision threshold. A node gets the output value 0 if the weighted sum of
68 /// its inputs falls in the open interval (-Threshold;Threshold).
getThreshold()69 static BlockFrequency getThreshold() { return Threshold; }
70
71 /// \brief Set the threshold for a given entry frequency.
72 ///
73 /// Set the threshold relative to \c Entry. Since the threshold is used as a
74 /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
75 /// threshold.
setThreshold(const BlockFrequency & Entry)76 static void setThreshold(const BlockFrequency &Entry) {
77 // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
78 // it. Divide by 2^13, rounding as appropriate.
79 uint64_t Freq = Entry.getFrequency();
80 uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
81 Threshold = std::max(UINT64_C(1), Scaled);
82 }
83
84 /// Node - Each edge bundle corresponds to a Hopfield node.
85 ///
86 /// The node contains precomputed frequency data that only depends on the CFG,
87 /// but Bias and Links are computed each time placeSpills is called.
88 ///
89 /// The node Value is positive when the variable should be in a register. The
90 /// value can change when linked nodes change, but convergence is very fast
91 /// because all weights are positive.
92 ///
93 struct SpillPlacement::Node {
94 /// BiasN - Sum of blocks that prefer a spill.
95 BlockFrequency BiasN;
96 /// BiasP - Sum of blocks that prefer a register.
97 BlockFrequency BiasP;
98
99 /// Value - Output value of this node computed from the Bias and links.
100 /// This is always on of the values {-1, 0, 1}. A positive number means the
101 /// variable should go in a register through this bundle.
102 int Value;
103
104 typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
105
106 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
107 /// bundles. The weights are all positive block frequencies.
108 LinkVector Links;
109
110 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
111 BlockFrequency SumLinkWeights;
112
113 /// preferReg - Return true when this node prefers to be in a register.
preferRegSpillPlacement::Node114 bool preferReg() const {
115 // Undecided nodes (Value==0) go on the stack.
116 return Value > 0;
117 }
118
119 /// mustSpill - Return True if this node is so biased that it must spill.
mustSpillSpillPlacement::Node120 bool mustSpill() const {
121 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
122 // BiasN is saturated when MustSpill is set, make sure this still returns
123 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
124 return BiasN >= BiasP + SumLinkWeights;
125 }
126
127 /// clear - Reset per-query data, but preserve frequencies that only depend on
128 // the CFG.
clearSpillPlacement::Node129 void clear() {
130 BiasN = BiasP = Value = 0;
131 SumLinkWeights = getThreshold();
132 Links.clear();
133 }
134
135 /// addLink - Add a link to bundle b with weight w.
addLinkSpillPlacement::Node136 void addLink(unsigned b, BlockFrequency w) {
137 // Update cached sum.
138 SumLinkWeights += w;
139
140 // There can be multiple links to the same bundle, add them up.
141 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
142 if (I->second == b) {
143 I->first += w;
144 return;
145 }
146 // This must be the first link to b.
147 Links.push_back(std::make_pair(w, b));
148 }
149
150 /// addBias - Bias this node.
addBiasSpillPlacement::Node151 void addBias(BlockFrequency freq, BorderConstraint direction) {
152 switch (direction) {
153 default:
154 break;
155 case PrefReg:
156 BiasP += freq;
157 break;
158 case PrefSpill:
159 BiasN += freq;
160 break;
161 case MustSpill:
162 BiasN = BlockFrequency::getMaxFrequency();
163 break;
164 }
165 }
166
167 /// update - Recompute Value from Bias and Links. Return true when node
168 /// preference changes.
updateSpillPlacement::Node169 bool update(const Node nodes[]) {
170 // Compute the weighted sum of inputs.
171 BlockFrequency SumN = BiasN;
172 BlockFrequency SumP = BiasP;
173 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
174 if (nodes[I->second].Value == -1)
175 SumN += I->first;
176 else if (nodes[I->second].Value == 1)
177 SumP += I->first;
178 }
179
180 // Each weighted sum is going to be less than the total frequency of the
181 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
182 // will add a dead zone around 0 for two reasons:
183 //
184 // 1. It avoids arbitrary bias when all links are 0 as is possible during
185 // initial iterations.
186 // 2. It helps tame rounding errors when the links nominally sum to 0.
187 //
188 bool Before = preferReg();
189 if (SumN >= SumP + getThreshold())
190 Value = -1;
191 else if (SumP >= SumN + getThreshold())
192 Value = 1;
193 else
194 Value = 0;
195 return Before != preferReg();
196 }
197 };
198
runOnMachineFunction(MachineFunction & mf)199 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
200 MF = &mf;
201 bundles = &getAnalysis<EdgeBundles>();
202 loops = &getAnalysis<MachineLoopInfo>();
203
204 assert(!nodes && "Leaking node array");
205 nodes = new Node[bundles->getNumBundles()];
206
207 // Compute total ingoing and outgoing block frequencies for all bundles.
208 BlockFrequencies.resize(mf.getNumBlockIDs());
209 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
210 setThreshold(MBFI->getEntryFreq());
211 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
212 unsigned Num = I->getNumber();
213 BlockFrequencies[Num] = MBFI->getBlockFreq(I);
214 }
215
216 // We never change the function.
217 return false;
218 }
219
releaseMemory()220 void SpillPlacement::releaseMemory() {
221 delete[] nodes;
222 nodes = nullptr;
223 }
224
225 /// activate - mark node n as active if it wasn't already.
activate(unsigned n)226 void SpillPlacement::activate(unsigned n) {
227 if (ActiveNodes->test(n))
228 return;
229 ActiveNodes->set(n);
230 nodes[n].clear();
231
232 // Very large bundles usually come from big switches, indirect branches,
233 // landing pads, or loops with many 'continue' statements. It is difficult to
234 // allocate registers when so many different blocks are involved.
235 //
236 // Give a small negative bias to large bundles such that a substantial
237 // fraction of the connected blocks need to be interested before we consider
238 // expanding the region through the bundle. This helps compile time by
239 // limiting the number of blocks visited and the number of links in the
240 // Hopfield network.
241 if (bundles->getBlocks(n).size() > 100) {
242 nodes[n].BiasP = 0;
243 nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
244 }
245 }
246
247
248 /// addConstraints - Compute node biases and weights from a set of constraints.
249 /// Set a bit in NodeMask for each active node.
addConstraints(ArrayRef<BlockConstraint> LiveBlocks)250 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
251 for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
252 E = LiveBlocks.end(); I != E; ++I) {
253 BlockFrequency Freq = BlockFrequencies[I->Number];
254
255 // Live-in to block?
256 if (I->Entry != DontCare) {
257 unsigned ib = bundles->getBundle(I->Number, 0);
258 activate(ib);
259 nodes[ib].addBias(Freq, I->Entry);
260 }
261
262 // Live-out from block?
263 if (I->Exit != DontCare) {
264 unsigned ob = bundles->getBundle(I->Number, 1);
265 activate(ob);
266 nodes[ob].addBias(Freq, I->Exit);
267 }
268 }
269 }
270
271 /// addPrefSpill - Same as addConstraints(PrefSpill)
addPrefSpill(ArrayRef<unsigned> Blocks,bool Strong)272 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
273 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
274 I != E; ++I) {
275 BlockFrequency Freq = BlockFrequencies[*I];
276 if (Strong)
277 Freq += Freq;
278 unsigned ib = bundles->getBundle(*I, 0);
279 unsigned ob = bundles->getBundle(*I, 1);
280 activate(ib);
281 activate(ob);
282 nodes[ib].addBias(Freq, PrefSpill);
283 nodes[ob].addBias(Freq, PrefSpill);
284 }
285 }
286
addLinks(ArrayRef<unsigned> Links)287 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
288 for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
289 ++I) {
290 unsigned Number = *I;
291 unsigned ib = bundles->getBundle(Number, 0);
292 unsigned ob = bundles->getBundle(Number, 1);
293
294 // Ignore self-loops.
295 if (ib == ob)
296 continue;
297 activate(ib);
298 activate(ob);
299 if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
300 Linked.push_back(ib);
301 if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
302 Linked.push_back(ob);
303 BlockFrequency Freq = BlockFrequencies[Number];
304 nodes[ib].addLink(ob, Freq);
305 nodes[ob].addLink(ib, Freq);
306 }
307 }
308
scanActiveBundles()309 bool SpillPlacement::scanActiveBundles() {
310 Linked.clear();
311 RecentPositive.clear();
312 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
313 nodes[n].update(nodes);
314 // A node that must spill, or a node without any links is not going to
315 // change its value ever again, so exclude it from iterations.
316 if (nodes[n].mustSpill())
317 continue;
318 if (!nodes[n].Links.empty())
319 Linked.push_back(n);
320 if (nodes[n].preferReg())
321 RecentPositive.push_back(n);
322 }
323 return !RecentPositive.empty();
324 }
325
326 /// iterate - Repeatedly update the Hopfield nodes until stability or the
327 /// maximum number of iterations is reached.
328 /// @param Linked - Numbers of linked nodes that need updating.
iterate()329 void SpillPlacement::iterate() {
330 // First update the recently positive nodes. They have likely received new
331 // negative bias that will turn them off.
332 while (!RecentPositive.empty())
333 nodes[RecentPositive.pop_back_val()].update(nodes);
334
335 if (Linked.empty())
336 return;
337
338 // Run up to 10 iterations. The edge bundle numbering is closely related to
339 // basic block numbering, so there is a strong tendency towards chains of
340 // linked nodes with sequential numbers. By scanning the linked nodes
341 // backwards and forwards, we make it very likely that a single node can
342 // affect the entire network in a single iteration. That means very fast
343 // convergence, usually in a single iteration.
344 for (unsigned iteration = 0; iteration != 10; ++iteration) {
345 // Scan backwards, skipping the last node when iteration is not zero. When
346 // iteration is not zero, the last node was just updated.
347 bool Changed = false;
348 for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
349 iteration == 0 ? Linked.rbegin() : std::next(Linked.rbegin()),
350 E = Linked.rend(); I != E; ++I) {
351 unsigned n = *I;
352 if (nodes[n].update(nodes)) {
353 Changed = true;
354 if (nodes[n].preferReg())
355 RecentPositive.push_back(n);
356 }
357 }
358 if (!Changed || !RecentPositive.empty())
359 return;
360
361 // Scan forwards, skipping the first node which was just updated.
362 Changed = false;
363 for (SmallVectorImpl<unsigned>::const_iterator I =
364 std::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
365 unsigned n = *I;
366 if (nodes[n].update(nodes)) {
367 Changed = true;
368 if (nodes[n].preferReg())
369 RecentPositive.push_back(n);
370 }
371 }
372 if (!Changed || !RecentPositive.empty())
373 return;
374 }
375 }
376
prepare(BitVector & RegBundles)377 void SpillPlacement::prepare(BitVector &RegBundles) {
378 Linked.clear();
379 RecentPositive.clear();
380 // Reuse RegBundles as our ActiveNodes vector.
381 ActiveNodes = &RegBundles;
382 ActiveNodes->clear();
383 ActiveNodes->resize(bundles->getNumBundles());
384 }
385
386 bool
finish()387 SpillPlacement::finish() {
388 assert(ActiveNodes && "Call prepare() first");
389
390 // Write preferences back to ActiveNodes.
391 bool Perfect = true;
392 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
393 if (!nodes[n].preferReg()) {
394 ActiveNodes->reset(n);
395 Perfect = false;
396 }
397 ActiveNodes = nullptr;
398 return Perfect;
399 }
400