1 //===-- Local.h - Functions to perform local transformations ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This family of functions perform various local transformations to the 11 // program. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H 16 #define LLVM_TRANSFORMS_UTILS_LOCAL_H 17 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/GetElementPtrTypeIterator.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/Operator.h" 22 23 namespace llvm { 24 25 class User; 26 class BasicBlock; 27 class Function; 28 class BranchInst; 29 class Instruction; 30 class DbgDeclareInst; 31 class StoreInst; 32 class LoadInst; 33 class Value; 34 class PHINode; 35 class AllocaInst; 36 class AssumptionCache; 37 class ConstantExpr; 38 class DataLayout; 39 class TargetLibraryInfo; 40 class TargetTransformInfo; 41 class DIBuilder; 42 class AliasAnalysis; 43 class DominatorTree; 44 45 template<typename T> class SmallVectorImpl; 46 47 //===----------------------------------------------------------------------===// 48 // Local constant propagation. 49 // 50 51 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 52 /// constant value, convert it into an unconditional branch to the constant 53 /// destination. This is a nontrivial operation because the successors of this 54 /// basic block must have their PHI nodes updated. 55 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 56 /// conditions and indirectbr addresses this might make dead if 57 /// DeleteDeadConditions is true. 58 bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false, 59 const TargetLibraryInfo *TLI = nullptr); 60 61 //===----------------------------------------------------------------------===// 62 // Local dead code elimination. 63 // 64 65 /// isInstructionTriviallyDead - Return true if the result produced by the 66 /// instruction is not used, and the instruction has no side effects. 67 /// 68 bool isInstructionTriviallyDead(Instruction *I, 69 const TargetLibraryInfo *TLI = nullptr); 70 71 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 72 /// trivially dead instruction, delete it. If that makes any of its operands 73 /// trivially dead, delete them too, recursively. Return true if any 74 /// instructions were deleted. 75 bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, 76 const TargetLibraryInfo *TLI = nullptr); 77 78 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 79 /// dead PHI node, due to being a def-use chain of single-use nodes that 80 /// either forms a cycle or is terminated by a trivially dead instruction, 81 /// delete it. If that makes any of its operands trivially dead, delete them 82 /// too, recursively. Return true if a change was made. 83 bool RecursivelyDeleteDeadPHINode(PHINode *PN, 84 const TargetLibraryInfo *TLI = nullptr); 85 86 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 87 /// simplify any instructions in it and recursively delete dead instructions. 88 /// 89 /// This returns true if it changed the code, note that it can delete 90 /// instructions in other blocks as well in this block. 91 bool SimplifyInstructionsInBlock(BasicBlock *BB, 92 const TargetLibraryInfo *TLI = nullptr); 93 94 //===----------------------------------------------------------------------===// 95 // Control Flow Graph Restructuring. 96 // 97 98 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this 99 /// method is called when we're about to delete Pred as a predecessor of BB. If 100 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. 101 /// 102 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI 103 /// nodes that collapse into identity values. For example, if we have: 104 /// x = phi(1, 0, 0, 0) 105 /// y = and x, z 106 /// 107 /// .. and delete the predecessor corresponding to the '1', this will attempt to 108 /// recursively fold the 'and' to 0. 109 void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred); 110 111 /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its 112 /// predecessor is known to have one successor (BB!). Eliminate the edge 113 /// between them, moving the instructions in the predecessor into BB. This 114 /// deletes the predecessor block. 115 /// 116 void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr); 117 118 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an 119 /// unconditional branch, and contains no instructions other than PHI nodes, 120 /// potential debug intrinsics and the branch. If possible, eliminate BB by 121 /// rewriting all the predecessors to branch to the successor block and return 122 /// true. If we can't transform, return false. 123 bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB); 124 125 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI 126 /// nodes in this block. This doesn't try to be clever about PHI nodes 127 /// which differ only in the order of the incoming values, but instcombine 128 /// orders them so it usually won't matter. 129 /// 130 bool EliminateDuplicatePHINodes(BasicBlock *BB); 131 132 /// SimplifyCFG - This function is used to do simplification of a CFG. For 133 /// example, it adjusts branches to branches to eliminate the extra hop, it 134 /// eliminates unreachable basic blocks, and does other "peephole" optimization 135 /// of the CFG. It returns true if a modification was made, possibly deleting 136 /// the basic block that was pointed to. 137 /// 138 bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, 139 unsigned BonusInstThreshold, AssumptionCache *AC = nullptr); 140 141 /// FlatternCFG - This function is used to flatten a CFG. For 142 /// example, it uses parallel-and and parallel-or mode to collapse 143 // if-conditions and merge if-regions with identical statements. 144 /// 145 bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr); 146 147 /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch, 148 /// and if a predecessor branches to us and one of our successors, fold the 149 /// setcc into the predecessor and use logical operations to pick the right 150 /// destination. 151 bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1); 152 153 /// DemoteRegToStack - This function takes a virtual register computed by an 154 /// Instruction and replaces it with a slot in the stack frame, allocated via 155 /// alloca. This allows the CFG to be changed around without fear of 156 /// invalidating the SSA information for the value. It returns the pointer to 157 /// the alloca inserted to create a stack slot for X. 158 /// 159 AllocaInst *DemoteRegToStack(Instruction &X, 160 bool VolatileLoads = false, 161 Instruction *AllocaPoint = nullptr); 162 163 /// DemotePHIToStack - This function takes a virtual register computed by a phi 164 /// node and replaces it with a slot in the stack frame, allocated via alloca. 165 /// The phi node is deleted and it returns the pointer to the alloca inserted. 166 AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr); 167 168 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that 169 /// we can determine, return it, otherwise return 0. If PrefAlign is specified, 170 /// and it is more than the alignment of the ultimate object, see if we can 171 /// increase the alignment of the ultimate object, making this check succeed. 172 unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 173 const DataLayout &DL, 174 const Instruction *CxtI = nullptr, 175 AssumptionCache *AC = nullptr, 176 const DominatorTree *DT = nullptr); 177 178 /// getKnownAlignment - Try to infer an alignment for the specified pointer. 179 static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL, 180 const Instruction *CxtI = nullptr, 181 AssumptionCache *AC = nullptr, 182 const DominatorTree *DT = nullptr) { 183 return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT); 184 } 185 186 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the 187 /// code necessary to compute the offset from the base pointer (without adding 188 /// in the base pointer). Return the result as a signed integer of intptr size. 189 /// When NoAssumptions is true, no assumptions about index computation not 190 /// overflowing is made. 191 template <typename IRBuilderTy> 192 Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, 193 bool NoAssumptions = false) { 194 GEPOperator *GEPOp = cast<GEPOperator>(GEP); 195 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 196 Value *Result = Constant::getNullValue(IntPtrTy); 197 198 // If the GEP is inbounds, we know that none of the addressing operations will 199 // overflow in an unsigned sense. 200 bool isInBounds = GEPOp->isInBounds() && !NoAssumptions; 201 202 // Build a mask for high order bits. 203 unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); 204 uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth); 205 206 gep_type_iterator GTI = gep_type_begin(GEP); 207 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; 208 ++i, ++GTI) { 209 Value *Op = *i; 210 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; 211 if (Constant *OpC = dyn_cast<Constant>(Op)) { 212 if (OpC->isZeroValue()) 213 continue; 214 215 // Handle a struct index, which adds its field offset to the pointer. 216 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 217 if (OpC->getType()->isVectorTy()) 218 OpC = OpC->getSplatValue(); 219 220 uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue(); 221 Size = DL.getStructLayout(STy)->getElementOffset(OpValue); 222 223 if (Size) 224 Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), 225 GEP->getName()+".offs"); 226 continue; 227 } 228 229 Constant *Scale = ConstantInt::get(IntPtrTy, Size); 230 Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); 231 Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); 232 // Emit an add instruction. 233 Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); 234 continue; 235 } 236 // Convert to correct type. 237 if (Op->getType() != IntPtrTy) 238 Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); 239 if (Size != 1) { 240 // We'll let instcombine(mul) convert this to a shl if possible. 241 Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), 242 GEP->getName()+".idx", isInBounds /*NUW*/); 243 } 244 245 // Emit an add instruction. 246 Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); 247 } 248 return Result; 249 } 250 251 ///===---------------------------------------------------------------------===// 252 /// Dbg Intrinsic utilities 253 /// 254 255 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 256 /// that has an associated llvm.dbg.decl intrinsic. 257 bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 258 StoreInst *SI, DIBuilder &Builder); 259 260 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 261 /// that has an associated llvm.dbg.decl intrinsic. 262 bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 263 LoadInst *LI, DIBuilder &Builder); 264 265 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 266 /// of llvm.dbg.value intrinsics. 267 bool LowerDbgDeclare(Function &F); 268 269 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to 270 /// an alloca, if any. 271 DbgDeclareInst *FindAllocaDbgDeclare(Value *V); 272 273 /// \brief Replaces llvm.dbg.declare instruction when an alloca is replaced with 274 /// a new value. If Deref is true, tan additional DW_OP_deref is prepended to 275 /// the expression. 276 bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 277 DIBuilder &Builder, bool Deref); 278 279 /// \brief Remove all blocks that can not be reached from the function's entry. 280 /// 281 /// Returns true if any basic block was removed. 282 bool removeUnreachableBlocks(Function &F); 283 284 /// \brief Combine the metadata of two instructions so that K can replace J 285 /// 286 /// Metadata not listed as known via KnownIDs is removed 287 void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs); 288 289 } // End llvm namespace 290 291 #endif 292