1 //===- SimplifyCFGPass.cpp - CFG Simplification Pass ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements dead code elimination and basic block merging, along
11 // with a collection of other peephole control flow optimizations. For example:
12 //
13 // * Removes basic blocks with no predecessors.
14 // * Merges a basic block into its predecessor if there is only one and the
15 // predecessor only has one successor.
16 // * Eliminates PHI nodes for basic blocks with a single predecessor.
17 // * Eliminates a basic block that only contains an unconditional branch.
18 // * Changes invoke instructions to nounwind functions to be calls.
19 // * Change things like "if (x) if (y)" into "if (x&y)".
20 // * etc..
21 //
22 //===----------------------------------------------------------------------===//
23
24 #define DEBUG_TYPE "simplifycfg"
25 #include "llvm/Transforms/Scalar.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "llvm/Constants.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/Module.h"
31 #include "llvm/Attributes.h"
32 #include "llvm/Support/CFG.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Target/TargetData.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/Statistic.h"
38 using namespace llvm;
39
40 STATISTIC(NumSimpl, "Number of blocks simplified");
41
42 namespace {
43 struct CFGSimplifyPass : public FunctionPass {
44 static char ID; // Pass identification, replacement for typeid
CFGSimplifyPass__anon81e7bc3e0111::CFGSimplifyPass45 CFGSimplifyPass() : FunctionPass(ID) {
46 initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
47 }
48
49 virtual bool runOnFunction(Function &F);
50 };
51 }
52
53 char CFGSimplifyPass::ID = 0;
54 INITIALIZE_PASS(CFGSimplifyPass, "simplifycfg",
55 "Simplify the CFG", false, false)
56
57 // Public interface to the CFGSimplification pass
createCFGSimplificationPass()58 FunctionPass *llvm::createCFGSimplificationPass() {
59 return new CFGSimplifyPass();
60 }
61
62 /// ChangeToUnreachable - Insert an unreachable instruction before the specified
63 /// instruction, making it and the rest of the code in the block dead.
ChangeToUnreachable(Instruction * I,bool UseLLVMTrap)64 static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
65 BasicBlock *BB = I->getParent();
66 // Loop over all of the successors, removing BB's entry from any PHI
67 // nodes.
68 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
69 (*SI)->removePredecessor(BB);
70
71 // Insert a call to llvm.trap right before this. This turns the undefined
72 // behavior into a hard fail instead of falling through into random code.
73 if (UseLLVMTrap) {
74 Function *TrapFn =
75 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
76 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
77 CallTrap->setDebugLoc(I->getDebugLoc());
78 }
79 new UnreachableInst(I->getContext(), I);
80
81 // All instructions after this are dead.
82 BasicBlock::iterator BBI = I, BBE = BB->end();
83 while (BBI != BBE) {
84 if (!BBI->use_empty())
85 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
86 BB->getInstList().erase(BBI++);
87 }
88 }
89
90 /// ChangeToCall - Convert the specified invoke into a normal call.
ChangeToCall(InvokeInst * II)91 static void ChangeToCall(InvokeInst *II) {
92 BasicBlock *BB = II->getParent();
93 SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
94 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, "", II);
95 NewCall->takeName(II);
96 NewCall->setCallingConv(II->getCallingConv());
97 NewCall->setAttributes(II->getAttributes());
98 NewCall->setDebugLoc(II->getDebugLoc());
99 II->replaceAllUsesWith(NewCall);
100
101 // Follow the call by a branch to the normal destination.
102 BranchInst::Create(II->getNormalDest(), II);
103
104 // Update PHI nodes in the unwind destination
105 II->getUnwindDest()->removePredecessor(BB);
106 BB->getInstList().erase(II);
107 }
108
MarkAliveBlocks(BasicBlock * BB,SmallPtrSet<BasicBlock *,128> & Reachable)109 static bool MarkAliveBlocks(BasicBlock *BB,
110 SmallPtrSet<BasicBlock*, 128> &Reachable) {
111
112 SmallVector<BasicBlock*, 128> Worklist;
113 Worklist.push_back(BB);
114 bool Changed = false;
115 do {
116 BB = Worklist.pop_back_val();
117
118 if (!Reachable.insert(BB))
119 continue;
120
121 // Do a quick scan of the basic block, turning any obviously unreachable
122 // instructions into LLVM unreachable insts. The instruction combining pass
123 // canonicalizes unreachable insts into stores to null or undef.
124 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){
125 if (CallInst *CI = dyn_cast<CallInst>(BBI)) {
126 if (CI->doesNotReturn()) {
127 // If we found a call to a no-return function, insert an unreachable
128 // instruction after it. Make sure there isn't *already* one there
129 // though.
130 ++BBI;
131 if (!isa<UnreachableInst>(BBI)) {
132 // Don't insert a call to llvm.trap right before the unreachable.
133 ChangeToUnreachable(BBI, false);
134 Changed = true;
135 }
136 break;
137 }
138 }
139
140 // Store to undef and store to null are undefined and used to signal that
141 // they should be changed to unreachable by passes that can't modify the
142 // CFG.
143 if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
144 // Don't touch volatile stores.
145 if (SI->isVolatile()) continue;
146
147 Value *Ptr = SI->getOperand(1);
148
149 if (isa<UndefValue>(Ptr) ||
150 (isa<ConstantPointerNull>(Ptr) &&
151 SI->getPointerAddressSpace() == 0)) {
152 ChangeToUnreachable(SI, true);
153 Changed = true;
154 break;
155 }
156 }
157 }
158
159 // Turn invokes that call 'nounwind' functions into ordinary calls.
160 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
161 if (II->doesNotThrow()) {
162 ChangeToCall(II);
163 Changed = true;
164 }
165
166 Changed |= ConstantFoldTerminator(BB, true);
167 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
168 Worklist.push_back(*SI);
169 } while (!Worklist.empty());
170 return Changed;
171 }
172
173 /// RemoveUnreachableBlocksFromFn - Remove blocks that are not reachable, even
174 /// if they are in a dead cycle. Return true if a change was made, false
175 /// otherwise.
RemoveUnreachableBlocksFromFn(Function & F)176 static bool RemoveUnreachableBlocksFromFn(Function &F) {
177 SmallPtrSet<BasicBlock*, 128> Reachable;
178 bool Changed = MarkAliveBlocks(F.begin(), Reachable);
179
180 // If there are unreachable blocks in the CFG...
181 if (Reachable.size() == F.size())
182 return Changed;
183
184 assert(Reachable.size() < F.size());
185 NumSimpl += F.size()-Reachable.size();
186
187 // Loop over all of the basic blocks that are not reachable, dropping all of
188 // their internal references...
189 for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
190 if (Reachable.count(BB))
191 continue;
192
193 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
194 if (Reachable.count(*SI))
195 (*SI)->removePredecessor(BB);
196 BB->dropAllReferences();
197 }
198
199 for (Function::iterator I = ++F.begin(); I != F.end();)
200 if (!Reachable.count(I))
201 I = F.getBasicBlockList().erase(I);
202 else
203 ++I;
204
205 return true;
206 }
207
208 /// MergeEmptyReturnBlocks - If we have more than one empty (other than phi
209 /// node) return blocks, merge them together to promote recursive block merging.
MergeEmptyReturnBlocks(Function & F)210 static bool MergeEmptyReturnBlocks(Function &F) {
211 bool Changed = false;
212
213 BasicBlock *RetBlock = 0;
214
215 // Scan all the blocks in the function, looking for empty return blocks.
216 for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) {
217 BasicBlock &BB = *BBI++;
218
219 // Only look at return blocks.
220 ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
221 if (Ret == 0) continue;
222
223 // Only look at the block if it is empty or the only other thing in it is a
224 // single PHI node that is the operand to the return.
225 if (Ret != &BB.front()) {
226 // Check for something else in the block.
227 BasicBlock::iterator I = Ret;
228 --I;
229 // Skip over debug info.
230 while (isa<DbgInfoIntrinsic>(I) && I != BB.begin())
231 --I;
232 if (!isa<DbgInfoIntrinsic>(I) &&
233 (!isa<PHINode>(I) || I != BB.begin() ||
234 Ret->getNumOperands() == 0 ||
235 Ret->getOperand(0) != I))
236 continue;
237 }
238
239 // If this is the first returning block, remember it and keep going.
240 if (RetBlock == 0) {
241 RetBlock = &BB;
242 continue;
243 }
244
245 // Otherwise, we found a duplicate return block. Merge the two.
246 Changed = true;
247
248 // Case when there is no input to the return or when the returned values
249 // agree is trivial. Note that they can't agree if there are phis in the
250 // blocks.
251 if (Ret->getNumOperands() == 0 ||
252 Ret->getOperand(0) ==
253 cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0)) {
254 BB.replaceAllUsesWith(RetBlock);
255 BB.eraseFromParent();
256 continue;
257 }
258
259 // If the canonical return block has no PHI node, create one now.
260 PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
261 if (RetBlockPHI == 0) {
262 Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0);
263 pred_iterator PB = pred_begin(RetBlock), PE = pred_end(RetBlock);
264 RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(),
265 std::distance(PB, PE), "merge",
266 &RetBlock->front());
267
268 for (pred_iterator PI = PB; PI != PE; ++PI)
269 RetBlockPHI->addIncoming(InVal, *PI);
270 RetBlock->getTerminator()->setOperand(0, RetBlockPHI);
271 }
272
273 // Turn BB into a block that just unconditionally branches to the return
274 // block. This handles the case when the two return blocks have a common
275 // predecessor but that return different things.
276 RetBlockPHI->addIncoming(Ret->getOperand(0), &BB);
277 BB.getTerminator()->eraseFromParent();
278 BranchInst::Create(RetBlock, &BB);
279 }
280
281 return Changed;
282 }
283
284 /// IterativeSimplifyCFG - Call SimplifyCFG on all the blocks in the function,
285 /// iterating until no more changes are made.
IterativeSimplifyCFG(Function & F,const TargetData * TD)286 static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
287 bool Changed = false;
288 bool LocalChange = true;
289 while (LocalChange) {
290 LocalChange = false;
291
292 // Loop over all of the basic blocks and remove them if they are unneeded...
293 //
294 for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
295 if (SimplifyCFG(BBIt++, TD)) {
296 LocalChange = true;
297 ++NumSimpl;
298 }
299 }
300 Changed |= LocalChange;
301 }
302 return Changed;
303 }
304
305 // It is possible that we may require multiple passes over the code to fully
306 // simplify the CFG.
307 //
runOnFunction(Function & F)308 bool CFGSimplifyPass::runOnFunction(Function &F) {
309 const TargetData *TD = getAnalysisIfAvailable<TargetData>();
310 bool EverChanged = RemoveUnreachableBlocksFromFn(F);
311 EverChanged |= MergeEmptyReturnBlocks(F);
312 EverChanged |= IterativeSimplifyCFG(F, TD);
313
314 // If neither pass changed anything, we're done.
315 if (!EverChanged) return false;
316
317 // IterativeSimplifyCFG can (rarely) make some loops dead. If this happens,
318 // RemoveUnreachableBlocksFromFn is needed to nuke them, which means we should
319 // iterate between the two optimizations. We structure the code like this to
320 // avoid reruning IterativeSimplifyCFG if the second pass of
321 // RemoveUnreachableBlocksFromFn doesn't do anything.
322 if (!RemoveUnreachableBlocksFromFn(F))
323 return true;
324
325 do {
326 EverChanged = IterativeSimplifyCFG(F, TD);
327 EverChanged |= RemoveUnreachableBlocksFromFn(F);
328 } while (EverChanged);
329
330 return true;
331 }
332