• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- Local.h - Functions to perform local transformations ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This family of functions perform various local transformations to the
11 // program.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
16 #define LLVM_TRANSFORMS_UTILS_LOCAL_H
17 
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Dominators.h"
21 #include "llvm/IR/GetElementPtrTypeIterator.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/Operator.h"
24 
25 namespace llvm {
26 
27 class User;
28 class BasicBlock;
29 class Function;
30 class BranchInst;
31 class Instruction;
32 class DbgDeclareInst;
33 class StoreInst;
34 class LoadInst;
35 class Value;
36 class PHINode;
37 class AllocaInst;
38 class AssumptionCache;
39 class ConstantExpr;
40 class DataLayout;
41 class TargetLibraryInfo;
42 class TargetTransformInfo;
43 class DIBuilder;
44 class DominatorTree;
45 
46 template<typename T> class SmallVectorImpl;
47 
48 //===----------------------------------------------------------------------===//
49 //  Local constant propagation.
50 //
51 
52 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
53 /// constant value, convert it into an unconditional branch to the constant
54 /// destination.  This is a nontrivial operation because the successors of this
55 /// basic block must have their PHI nodes updated.
56 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
57 /// conditions and indirectbr addresses this might make dead if
58 /// DeleteDeadConditions is true.
59 bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
60                             const TargetLibraryInfo *TLI = nullptr);
61 
62 //===----------------------------------------------------------------------===//
63 //  Local dead code elimination.
64 //
65 
66 /// isInstructionTriviallyDead - Return true if the result produced by the
67 /// instruction is not used, and the instruction has no side effects.
68 ///
69 bool isInstructionTriviallyDead(Instruction *I,
70                                 const TargetLibraryInfo *TLI = nullptr);
71 
72 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
73 /// trivially dead instruction, delete it.  If that makes any of its operands
74 /// trivially dead, delete them too, recursively.  Return true if any
75 /// instructions were deleted.
76 bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
77                                         const TargetLibraryInfo *TLI = nullptr);
78 
79 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
80 /// dead PHI node, due to being a def-use chain of single-use nodes that
81 /// either forms a cycle or is terminated by a trivially dead instruction,
82 /// delete it.  If that makes any of its operands trivially dead, delete them
83 /// too, recursively.  Return true if a change was made.
84 bool RecursivelyDeleteDeadPHINode(PHINode *PN,
85                                   const TargetLibraryInfo *TLI = nullptr);
86 
87 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
88 /// simplify any instructions in it and recursively delete dead instructions.
89 ///
90 /// This returns true if it changed the code, note that it can delete
91 /// instructions in other blocks as well in this block.
92 bool SimplifyInstructionsInBlock(BasicBlock *BB,
93                                  const TargetLibraryInfo *TLI = nullptr);
94 
95 //===----------------------------------------------------------------------===//
96 //  Control Flow Graph Restructuring.
97 //
98 
99 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
100 /// method is called when we're about to delete Pred as a predecessor of BB.  If
101 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
102 ///
103 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
104 /// nodes that collapse into identity values.  For example, if we have:
105 ///   x = phi(1, 0, 0, 0)
106 ///   y = and x, z
107 ///
108 /// .. and delete the predecessor corresponding to the '1', this will attempt to
109 /// recursively fold the 'and' to 0.
110 void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);
111 
112 /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
113 /// predecessor is known to have one successor (BB!).  Eliminate the edge
114 /// between them, moving the instructions in the predecessor into BB.  This
115 /// deletes the predecessor block.
116 ///
117 void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
118 
119 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
120 /// unconditional branch, and contains no instructions other than PHI nodes,
121 /// potential debug intrinsics and the branch.  If possible, eliminate BB by
122 /// rewriting all the predecessors to branch to the successor block and return
123 /// true.  If we can't transform, return false.
124 bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB);
125 
126 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
127 /// nodes in this block. This doesn't try to be clever about PHI nodes
128 /// which differ only in the order of the incoming values, but instcombine
129 /// orders them so it usually won't matter.
130 ///
131 bool EliminateDuplicatePHINodes(BasicBlock *BB);
132 
133 /// SimplifyCFG - This function is used to do simplification of a CFG.  For
134 /// example, it adjusts branches to branches to eliminate the extra hop, it
135 /// eliminates unreachable basic blocks, and does other "peephole" optimization
136 /// of the CFG.  It returns true if a modification was made, possibly deleting
137 /// the basic block that was pointed to.
138 ///
139 bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
140                  unsigned BonusInstThreshold, AssumptionCache *AC = nullptr);
141 
142 /// FlatternCFG - This function is used to flatten a CFG.  For
143 /// example, it uses parallel-and and parallel-or mode to collapse
144 //  if-conditions and merge if-regions with identical statements.
145 ///
146 bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
147 
148 /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
149 /// and if a predecessor branches to us and one of our successors, fold the
150 /// setcc into the predecessor and use logical operations to pick the right
151 /// destination.
152 bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
153 
154 /// DemoteRegToStack - This function takes a virtual register computed by an
155 /// Instruction and replaces it with a slot in the stack frame, allocated via
156 /// alloca.  This allows the CFG to be changed around without fear of
157 /// invalidating the SSA information for the value.  It returns the pointer to
158 /// the alloca inserted to create a stack slot for X.
159 ///
160 AllocaInst *DemoteRegToStack(Instruction &X,
161                              bool VolatileLoads = false,
162                              Instruction *AllocaPoint = nullptr);
163 
164 /// DemotePHIToStack - This function takes a virtual register computed by a phi
165 /// node and replaces it with a slot in the stack frame, allocated via alloca.
166 /// The phi node is deleted and it returns the pointer to the alloca inserted.
167 AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
168 
169 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
170 /// we can determine, return it, otherwise return 0.  If PrefAlign is specified,
171 /// and it is more than the alignment of the ultimate object, see if we can
172 /// increase the alignment of the ultimate object, making this check succeed.
173 unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
174                                     const DataLayout &DL,
175                                     const Instruction *CxtI = nullptr,
176                                     AssumptionCache *AC = nullptr,
177                                     const DominatorTree *DT = nullptr);
178 
179 /// getKnownAlignment - Try to infer an alignment for the specified pointer.
180 static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
181                                          const Instruction *CxtI = nullptr,
182                                          AssumptionCache *AC = nullptr,
183                                          const DominatorTree *DT = nullptr) {
184   return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
185 }
186 
187 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
188 /// code necessary to compute the offset from the base pointer (without adding
189 /// in the base pointer).  Return the result as a signed integer of intptr size.
190 /// When NoAssumptions is true, no assumptions about index computation not
191 /// overflowing is made.
192 template <typename IRBuilderTy>
193 Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
194                      bool NoAssumptions = false) {
195   GEPOperator *GEPOp = cast<GEPOperator>(GEP);
196   Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
197   Value *Result = Constant::getNullValue(IntPtrTy);
198 
199   // If the GEP is inbounds, we know that none of the addressing operations will
200   // overflow in an unsigned sense.
201   bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
202 
203   // Build a mask for high order bits.
204   unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
205   uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
206 
207   gep_type_iterator GTI = gep_type_begin(GEP);
208   for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
209        ++i, ++GTI) {
210     Value *Op = *i;
211     uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
212     if (Constant *OpC = dyn_cast<Constant>(Op)) {
213       if (OpC->isZeroValue())
214         continue;
215 
216       // Handle a struct index, which adds its field offset to the pointer.
217       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
218         if (OpC->getType()->isVectorTy())
219           OpC = OpC->getSplatValue();
220 
221         uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
222         Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
223 
224         if (Size)
225           Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
226                                       GEP->getName()+".offs");
227         continue;
228       }
229 
230       Constant *Scale = ConstantInt::get(IntPtrTy, Size);
231       Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
232       Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
233       // Emit an add instruction.
234       Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
235       continue;
236     }
237     // Convert to correct type.
238     if (Op->getType() != IntPtrTy)
239       Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
240     if (Size != 1) {
241       // We'll let instcombine(mul) convert this to a shl if possible.
242       Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
243                               GEP->getName()+".idx", isInBounds /*NUW*/);
244     }
245 
246     // Emit an add instruction.
247     Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
248   }
249   return Result;
250 }
251 
252 ///===---------------------------------------------------------------------===//
253 ///  Dbg Intrinsic utilities
254 ///
255 
256 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
257 /// that has an associated llvm.dbg.decl intrinsic.
258 bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
259                                      StoreInst *SI, DIBuilder &Builder);
260 
261 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
262 /// that has an associated llvm.dbg.decl intrinsic.
263 bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
264                                      LoadInst *LI, DIBuilder &Builder);
265 
266 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
267 /// of llvm.dbg.value intrinsics.
268 bool LowerDbgDeclare(Function &F);
269 
270 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to
271 /// an alloca, if any.
272 DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
273 
274 /// \brief Replaces llvm.dbg.declare instruction when the address it describes
275 /// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
276 /// prepended to the expression. If Offset is non-zero, a constant displacement
277 /// is added to the expression (after the optional Deref). Offset can be
278 /// negative.
279 bool replaceDbgDeclare(Value *Address, Value *NewAddress,
280                        Instruction *InsertBefore, DIBuilder &Builder,
281                        bool Deref, int Offset);
282 
283 /// \brief Replaces llvm.dbg.declare instruction when the alloca it describes
284 /// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
285 /// prepended to the expression. If Offset is non-zero, a constant displacement
286 /// is added to the expression (after the optional Deref). Offset can be
287 /// negative. New llvm.dbg.declare is inserted immediately before AI.
288 bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
289                                 DIBuilder &Builder, bool Deref, int Offset = 0);
290 
291 /// \brief Insert an unreachable instruction before the specified
292 /// instruction, making it and the rest of the code in the block dead.
293 void changeToUnreachable(Instruction *I, bool UseLLVMTrap);
294 
295 /// Replace 'BB's terminator with one that does not have an unwind successor
296 /// block.  Rewrites `invoke` to `call`, etc.  Updates any PHIs in unwind
297 /// successor.
298 ///
299 /// \param BB  Block whose terminator will be replaced.  Its terminator must
300 ///            have an unwind successor.
301 void removeUnwindEdge(BasicBlock *BB);
302 
303 /// \brief Remove all blocks that can not be reached from the function's entry.
304 ///
305 /// Returns true if any basic block was removed.
306 bool removeUnreachableBlocks(Function &F);
307 
308 /// \brief Combine the metadata of two instructions so that K can replace J
309 ///
310 /// Metadata not listed as known via KnownIDs is removed
311 void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
312 
313 /// \brief Replace each use of 'From' with 'To' if that use is dominated by
314 /// the given edge.  Returns the number of replacements made.
315 unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
316                                   const BasicBlockEdge &Edge);
317 /// \brief Replace each use of 'From' with 'To' if that use is dominated by
318 /// the given BasicBlock. Returns the number of replacements made.
319 unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
320                                   const BasicBlock *BB);
321 
322 
323 /// \brief Return true if the CallSite CS calls a gc leaf function.
324 ///
325 /// A leaf function is a function that does not safepoint the thread during its
326 /// execution.  During a call or invoke to such a function, the callers stack
327 /// does not have to be made parseable.
328 ///
329 /// Most passes can and should ignore this information, and it is only used
330 /// during lowering by the GC infrastructure.
331 bool callsGCLeafFunction(ImmutableCallSite CS);
332 
333 } // End llvm namespace
334 
335 #endif
336