• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// This file provides internal interfaces used to implement the InstCombine.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17 
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetFolder.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/InstVisitor.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
31 
32 #define DEBUG_TYPE "instcombine"
33 
34 namespace llvm {
35 class CallSite;
36 class DataLayout;
37 class DominatorTree;
38 class TargetLibraryInfo;
39 class DbgDeclareInst;
40 class MemIntrinsic;
41 class MemSetInst;
42 
43 /// \brief Assign a complexity or rank value to LLVM Values.
44 ///
45 /// This routine maps IR values to various complexity ranks:
46 ///   0 -> undef
47 ///   1 -> Constants
48 ///   2 -> Other non-instructions
49 ///   3 -> Arguments
50 ///   3 -> Unary operations
51 ///   4 -> Other instructions
getComplexity(Value * V)52 static inline unsigned getComplexity(Value *V) {
53   if (isa<Instruction>(V)) {
54     if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) ||
55         BinaryOperator::isNot(V))
56       return 3;
57     return 4;
58   }
59   if (isa<Argument>(V))
60     return 3;
61   return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
62 }
63 
64 /// \brief Add one to a Constant
AddOne(Constant * C)65 static inline Constant *AddOne(Constant *C) {
66   return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
67 }
68 /// \brief Subtract one from a Constant
SubOne(Constant * C)69 static inline Constant *SubOne(Constant *C) {
70   return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
71 }
72 
73 /// \brief Return true if the specified value is free to invert (apply ~ to).
74 /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
75 /// is true, work under the assumption that the caller intends to remove all
76 /// uses of V and only keep uses of ~V.
77 ///
IsFreeToInvert(Value * V,bool WillInvertAllUses)78 static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
79   // ~(~(X)) -> X.
80   if (BinaryOperator::isNot(V))
81     return true;
82 
83   // Constants can be considered to be not'ed values.
84   if (isa<ConstantInt>(V))
85     return true;
86 
87   // Compares can be inverted if all of their uses are being modified to use the
88   // ~V.
89   if (isa<CmpInst>(V))
90     return WillInvertAllUses;
91 
92   // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
93   // - Constant) - A` if we are willing to invert all of the uses.
94   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
95     if (BO->getOpcode() == Instruction::Add ||
96         BO->getOpcode() == Instruction::Sub)
97       if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1)))
98         return WillInvertAllUses;
99 
100   return false;
101 }
102 
103 
104 /// \brief Specific patterns of overflow check idioms that we match.
105 enum OverflowCheckFlavor {
106   OCF_UNSIGNED_ADD,
107   OCF_SIGNED_ADD,
108   OCF_UNSIGNED_SUB,
109   OCF_SIGNED_SUB,
110   OCF_UNSIGNED_MUL,
111   OCF_SIGNED_MUL,
112 
113   OCF_INVALID
114 };
115 
116 /// \brief Returns the OverflowCheckFlavor corresponding to a overflow_with_op
117 /// intrinsic.
118 static inline OverflowCheckFlavor
IntrinsicIDToOverflowCheckFlavor(unsigned ID)119 IntrinsicIDToOverflowCheckFlavor(unsigned ID) {
120   switch (ID) {
121   default:
122     return OCF_INVALID;
123   case Intrinsic::uadd_with_overflow:
124     return OCF_UNSIGNED_ADD;
125   case Intrinsic::sadd_with_overflow:
126     return OCF_SIGNED_ADD;
127   case Intrinsic::usub_with_overflow:
128     return OCF_UNSIGNED_SUB;
129   case Intrinsic::ssub_with_overflow:
130     return OCF_SIGNED_SUB;
131   case Intrinsic::umul_with_overflow:
132     return OCF_UNSIGNED_MUL;
133   case Intrinsic::smul_with_overflow:
134     return OCF_SIGNED_MUL;
135   }
136 }
137 
138 /// \brief An IRBuilder inserter that adds new instructions to the instcombine
139 /// worklist.
140 class LLVM_LIBRARY_VISIBILITY InstCombineIRInserter
141     : public IRBuilderDefaultInserter {
142   InstCombineWorklist &Worklist;
143   AssumptionCache *AC;
144 
145 public:
InstCombineIRInserter(InstCombineWorklist & WL,AssumptionCache * AC)146   InstCombineIRInserter(InstCombineWorklist &WL, AssumptionCache *AC)
147       : Worklist(WL), AC(AC) {}
148 
InsertHelper(Instruction * I,const Twine & Name,BasicBlock * BB,BasicBlock::iterator InsertPt)149   void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
150                     BasicBlock::iterator InsertPt) const {
151     IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
152     Worklist.Add(I);
153 
154     using namespace llvm::PatternMatch;
155     if (match(I, m_Intrinsic<Intrinsic::assume>()))
156       AC->registerAssumption(cast<CallInst>(I));
157   }
158 };
159 
160 /// \brief The core instruction combiner logic.
161 ///
162 /// This class provides both the logic to recursively visit instructions and
163 /// combine them, as well as the pass infrastructure for running this as part
164 /// of the LLVM pass pipeline.
165 class LLVM_LIBRARY_VISIBILITY InstCombiner
166     : public InstVisitor<InstCombiner, Instruction *> {
167   // FIXME: These members shouldn't be public.
168 public:
169   /// \brief A worklist of the instructions that need to be simplified.
170   InstCombineWorklist &Worklist;
171 
172   /// \brief An IRBuilder that automatically inserts new instructions into the
173   /// worklist.
174   typedef IRBuilder<TargetFolder, InstCombineIRInserter> BuilderTy;
175   BuilderTy *Builder;
176 
177 private:
178   // Mode in which we are running the combiner.
179   const bool MinimizeSize;
180   /// Enable combines that trigger rarely but are costly in compiletime.
181   const bool ExpensiveCombines;
182 
183   AliasAnalysis *AA;
184 
185   // Required analyses.
186   // FIXME: These can never be null and should be references.
187   AssumptionCache *AC;
188   TargetLibraryInfo *TLI;
189   DominatorTree *DT;
190   const DataLayout &DL;
191 
192   // Optional analyses. When non-null, these can both be used to do better
193   // combining and will be updated to reflect any changes.
194   LoopInfo *LI;
195 
196   bool MadeIRChange;
197 
198 public:
InstCombiner(InstCombineWorklist & Worklist,BuilderTy * Builder,bool MinimizeSize,bool ExpensiveCombines,AliasAnalysis * AA,AssumptionCache * AC,TargetLibraryInfo * TLI,DominatorTree * DT,const DataLayout & DL,LoopInfo * LI)199   InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
200                bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
201                AssumptionCache *AC, TargetLibraryInfo *TLI,
202                DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
203       : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
204         ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT),
205         DL(DL), LI(LI), MadeIRChange(false) {}
206 
207   /// \brief Run the combiner over the entire worklist until it is empty.
208   ///
209   /// \returns true if the IR is changed.
210   bool run();
211 
getAssumptionCache()212   AssumptionCache *getAssumptionCache() const { return AC; }
213 
getDataLayout()214   const DataLayout &getDataLayout() const { return DL; }
215 
getDominatorTree()216   DominatorTree *getDominatorTree() const { return DT; }
217 
getLoopInfo()218   LoopInfo *getLoopInfo() const { return LI; }
219 
getTargetLibraryInfo()220   TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
221 
222   // Visitation implementation - Implement instruction combining for different
223   // instruction types.  The semantics are as follows:
224   // Return Value:
225   //    null        - No change was made
226   //     I          - Change was made, I is still valid, I may be dead though
227   //   otherwise    - Change was made, replace I with returned instruction
228   //
229   Instruction *visitAdd(BinaryOperator &I);
230   Instruction *visitFAdd(BinaryOperator &I);
231   Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
232   Instruction *visitSub(BinaryOperator &I);
233   Instruction *visitFSub(BinaryOperator &I);
234   Instruction *visitMul(BinaryOperator &I);
235   Value *foldFMulConst(Instruction *FMulOrDiv, Constant *C,
236                        Instruction *InsertBefore);
237   Instruction *visitFMul(BinaryOperator &I);
238   Instruction *visitURem(BinaryOperator &I);
239   Instruction *visitSRem(BinaryOperator &I);
240   Instruction *visitFRem(BinaryOperator &I);
241   bool SimplifyDivRemOfSelect(BinaryOperator &I);
242   Instruction *commonRemTransforms(BinaryOperator &I);
243   Instruction *commonIRemTransforms(BinaryOperator &I);
244   Instruction *commonDivTransforms(BinaryOperator &I);
245   Instruction *commonIDivTransforms(BinaryOperator &I);
246   Instruction *visitUDiv(BinaryOperator &I);
247   Instruction *visitSDiv(BinaryOperator &I);
248   Instruction *visitFDiv(BinaryOperator &I);
249   Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
250   Value *FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS);
251   Value *FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
252   Instruction *visitAnd(BinaryOperator &I);
253   Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction *CxtI);
254   Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
255   Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A,
256                                    Value *B, Value *C);
257   Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op, Value *A,
258                                     Value *B, Value *C);
259   Instruction *visitOr(BinaryOperator &I);
260   Instruction *visitXor(BinaryOperator &I);
261   Instruction *visitShl(BinaryOperator &I);
262   Instruction *visitAShr(BinaryOperator &I);
263   Instruction *visitLShr(BinaryOperator &I);
264   Instruction *commonShiftTransforms(BinaryOperator &I);
265   Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
266                                     Constant *RHSC);
267   Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
268                                             GlobalVariable *GV, CmpInst &ICI,
269                                             ConstantInt *AndCst = nullptr);
270   Instruction *visitFCmpInst(FCmpInst &I);
271   Instruction *visitICmpInst(ICmpInst &I);
272   Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
273   Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS,
274                                               ConstantInt *RHS);
275   Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
276                               ConstantInt *DivRHS);
277   Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI,
278                               ConstantInt *DivRHS);
279   Instruction *FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
280                                  ConstantInt *CI1, ConstantInt *CI2);
281   Instruction *FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
282                                  ConstantInt *CI1, ConstantInt *CI2);
283   Instruction *FoldICmpAddOpCst(Instruction &ICI, Value *X, ConstantInt *CI,
284                                 ICmpInst::Predicate Pred);
285   Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
286                            ICmpInst::Predicate Cond, Instruction &I);
287   Instruction *FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca, Value *Other);
288   Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
289                                    BinaryOperator &I);
290   Instruction *commonCastTransforms(CastInst &CI);
291   Instruction *commonPointerCastTransforms(CastInst &CI);
292   Instruction *visitTrunc(TruncInst &CI);
293   Instruction *visitZExt(ZExtInst &CI);
294   Instruction *visitSExt(SExtInst &CI);
295   Instruction *visitFPTrunc(FPTruncInst &CI);
296   Instruction *visitFPExt(CastInst &CI);
297   Instruction *visitFPToUI(FPToUIInst &FI);
298   Instruction *visitFPToSI(FPToSIInst &FI);
299   Instruction *visitUIToFP(CastInst &CI);
300   Instruction *visitSIToFP(CastInst &CI);
301   Instruction *visitPtrToInt(PtrToIntInst &CI);
302   Instruction *visitIntToPtr(IntToPtrInst &CI);
303   Instruction *visitBitCast(BitCastInst &CI);
304   Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
305   Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
306   Instruction *FoldSelectIntoOp(SelectInst &SI, Value *, Value *);
307   Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
308                             Value *A, Value *B, Instruction &Outer,
309                             SelectPatternFlavor SPF2, Value *C);
310   Instruction *FoldItoFPtoI(Instruction &FI);
311   Instruction *visitSelectInst(SelectInst &SI);
312   Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
313   Instruction *visitCallInst(CallInst &CI);
314   Instruction *visitInvokeInst(InvokeInst &II);
315 
316   Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
317   Instruction *visitPHINode(PHINode &PN);
318   Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
319   Instruction *visitAllocaInst(AllocaInst &AI);
320   Instruction *visitAllocSite(Instruction &FI);
321   Instruction *visitFree(CallInst &FI);
322   Instruction *visitLoadInst(LoadInst &LI);
323   Instruction *visitStoreInst(StoreInst &SI);
324   Instruction *visitBranchInst(BranchInst &BI);
325   Instruction *visitSwitchInst(SwitchInst &SI);
326   Instruction *visitReturnInst(ReturnInst &RI);
327   Instruction *visitInsertValueInst(InsertValueInst &IV);
328   Instruction *visitInsertElementInst(InsertElementInst &IE);
329   Instruction *visitExtractElementInst(ExtractElementInst &EI);
330   Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
331   Instruction *visitExtractValueInst(ExtractValueInst &EV);
332   Instruction *visitLandingPadInst(LandingPadInst &LI);
333   Instruction *visitVAStartInst(VAStartInst &I);
334   Instruction *visitVACopyInst(VACopyInst &I);
335 
336   // visitInstruction - Specify what to return for unhandled instructions...
visitInstruction(Instruction & I)337   Instruction *visitInstruction(Instruction &I) { return nullptr; }
338 
339   // True when DB dominates all uses of DI execpt UI.
340   // UI must be in the same block as DI.
341   // The routine checks that the DI parent and DB are different.
342   bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
343                         const BasicBlock *DB) const;
344 
345   // Replace select with select operand SIOpd in SI-ICmp sequence when possible
346   bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
347                                  const unsigned SIOpd);
348 
349 private:
350   bool ShouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
351   bool ShouldChangeType(Type *From, Type *To) const;
352   Value *dyn_castNegVal(Value *V) const;
353   Value *dyn_castFNegVal(Value *V, bool NoSignedZero = false) const;
354   Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
355                             SmallVectorImpl<Value *> &NewIndices);
356   Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
357 
358   /// \brief Classify whether a cast is worth optimizing.
359   ///
360   /// Returns true if the cast from "V to Ty" actually results in any code
361   /// being generated and is interesting to optimize out. If the cast can be
362   /// eliminated by some other simple transformation, we prefer to do the
363   /// simplification first.
364   bool ShouldOptimizeCast(Instruction::CastOps opcode, const Value *V,
365                           Type *Ty);
366 
367   /// \brief Try to optimize a sequence of instructions checking if an operation
368   /// on LHS and RHS overflows.
369   ///
370   /// If this overflow check is done via one of the overflow check intrinsics,
371   /// then CtxI has to be the call instruction calling that intrinsic.  If this
372   /// overflow check is done by arithmetic followed by a compare, then CtxI has
373   /// to be the arithmetic instruction.
374   ///
375   /// If a simplification is possible, stores the simplified result of the
376   /// operation in OperationResult and result of the overflow check in
377   /// OverflowResult, and return true.  If no simplification is possible,
378   /// returns false.
379   bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS,
380                              Instruction &CtxI, Value *&OperationResult,
381                              Constant *&OverflowResult);
382 
383   Instruction *visitCallSite(CallSite CS);
384   Instruction *tryOptimizeCall(CallInst *CI);
385   bool transformConstExprCastCall(CallSite CS);
386   Instruction *transformCallThroughTrampoline(CallSite CS,
387                                               IntrinsicInst *Tramp);
388   Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
389                                  bool DoXform = true);
390   Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
391   bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction &CxtI);
392   bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
393   bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
394   bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction &CxtI);
395   Value *EmitGEPOffset(User *GEP);
396   Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
397   Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
398   Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
399 
400 public:
401   /// \brief Inserts an instruction \p New before instruction \p Old
402   ///
403   /// Also adds the new instruction to the worklist and returns \p New so that
404   /// it is suitable for use as the return from the visitation patterns.
InsertNewInstBefore(Instruction * New,Instruction & Old)405   Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
406     assert(New && !New->getParent() &&
407            "New instruction already inserted into a basic block!");
408     BasicBlock *BB = Old.getParent();
409     BB->getInstList().insert(Old.getIterator(), New); // Insert inst
410     Worklist.Add(New);
411     return New;
412   }
413 
414   /// \brief Same as InsertNewInstBefore, but also sets the debug loc.
InsertNewInstWith(Instruction * New,Instruction & Old)415   Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
416     New->setDebugLoc(Old.getDebugLoc());
417     return InsertNewInstBefore(New, Old);
418   }
419 
420   /// \brief A combiner-aware RAUW-like routine.
421   ///
422   /// This method is to be used when an instruction is found to be dead,
423   /// replaceable with another preexisting expression. Here we add all uses of
424   /// I to the worklist, replace all uses of I with the new value, then return
425   /// I, so that the inst combiner will know that I was modified.
replaceInstUsesWith(Instruction & I,Value * V)426   Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
427     // If there are no uses to replace, then we return nullptr to indicate that
428     // no changes were made to the program.
429     if (I.use_empty()) return nullptr;
430 
431     Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
432 
433     // If we are replacing the instruction with itself, this must be in a
434     // segment of unreachable code, so just clobber the instruction.
435     if (&I == V)
436       V = UndefValue::get(I.getType());
437 
438     DEBUG(dbgs() << "IC: Replacing " << I << "\n"
439                  << "    with " << *V << '\n');
440 
441     I.replaceAllUsesWith(V);
442     return &I;
443   }
444 
445   /// Creates a result tuple for an overflow intrinsic \p II with a given
446   /// \p Result and a constant \p Overflow value.
CreateOverflowTuple(IntrinsicInst * II,Value * Result,Constant * Overflow)447   Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result,
448                                    Constant *Overflow) {
449     Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
450     StructType *ST = cast<StructType>(II->getType());
451     Constant *Struct = ConstantStruct::get(ST, V);
452     return InsertValueInst::Create(Struct, Result, 0);
453   }
454 
455   /// \brief Combiner aware instruction erasure.
456   ///
457   /// When dealing with an instruction that has side effects or produces a void
458   /// value, we can't rely on DCE to delete the instruction. Instead, visit
459   /// methods should return the value returned by this function.
eraseInstFromFunction(Instruction & I)460   Instruction *eraseInstFromFunction(Instruction &I) {
461     DEBUG(dbgs() << "IC: ERASE " << I << '\n');
462 
463     assert(I.use_empty() && "Cannot erase instruction that is used!");
464     // Make sure that we reprocess all operands now that we reduced their
465     // use counts.
466     if (I.getNumOperands() < 8) {
467       for (Use &Operand : I.operands())
468         if (auto *Inst = dyn_cast<Instruction>(Operand))
469           Worklist.Add(Inst);
470     }
471     Worklist.Remove(&I);
472     I.eraseFromParent();
473     MadeIRChange = true;
474     return nullptr; // Don't do anything with FI
475   }
476 
computeKnownBits(Value * V,APInt & KnownZero,APInt & KnownOne,unsigned Depth,Instruction * CxtI)477   void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
478                         unsigned Depth, Instruction *CxtI) const {
479     return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
480                                   DT);
481   }
482 
483   bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
484                          Instruction *CxtI = nullptr) const {
485     return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT);
486   }
487   unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0,
488                               Instruction *CxtI = nullptr) const {
489     return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT);
490   }
491   void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
492                       unsigned Depth = 0, Instruction *CxtI = nullptr) const {
493     return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
494                                 DT);
495   }
computeOverflowForUnsignedMul(Value * LHS,Value * RHS,const Instruction * CxtI)496   OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
497                                                const Instruction *CxtI) {
498     return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT);
499   }
computeOverflowForUnsignedAdd(Value * LHS,Value * RHS,const Instruction * CxtI)500   OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
501                                                const Instruction *CxtI) {
502     return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT);
503   }
504 
505 private:
506   /// \brief Performs a few simplifications for operators which are associative
507   /// or commutative.
508   bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
509 
510   /// \brief Tries to simplify binary operations which some other binary
511   /// operation distributes over.
512   ///
513   /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
514   /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
515   /// & (B | C) -> (A&B) | (A&C)" if this is a win).  Returns the simplified
516   /// value, or null if it didn't simplify.
517   Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
518 
519   /// \brief Attempts to replace V with a simpler value based on the demanded
520   /// bits.
521   Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
522                                  APInt &KnownOne, unsigned Depth,
523                                  Instruction *CxtI);
524   bool SimplifyDemandedBits(Use &U, const APInt &DemandedMask, APInt &KnownZero,
525                             APInt &KnownOne, unsigned Depth = 0);
526   /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
527   /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
528   Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
529                                     const APInt &DemandedMask, APInt &KnownZero,
530                                     APInt &KnownOne);
531 
532   /// \brief Tries to simplify operands to an integer instruction based on its
533   /// demanded bits.
534   bool SimplifyDemandedInstructionBits(Instruction &Inst);
535 
536   Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
537                                     APInt &UndefElts, unsigned Depth = 0);
538 
539   Value *SimplifyVectorOp(BinaryOperator &Inst);
540   Value *SimplifyBSwap(BinaryOperator &Inst);
541 
542   // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
543   // which has a PHI node as operand #0, see if we can fold the instruction
544   // into the PHI (which is only possible if all operands to the PHI are
545   // constants).
546   //
547   Instruction *FoldOpIntoPhi(Instruction &I);
548 
549   /// \brief Try to rotate an operation below a PHI node, using PHI nodes for
550   /// its operands.
551   Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
552   Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
553   Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
554   Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
555   Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
556 
557   Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
558                         ConstantInt *AndRHS, BinaryOperator &TheAnd);
559 
560   Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
561                             bool isSub, Instruction &I);
562   Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned,
563                          bool Inside);
564   Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
565   Instruction *MatchBSwap(BinaryOperator &I);
566   bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
567   Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
568   Instruction *SimplifyMemSet(MemSetInst *MI);
569 
570   Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
571 
572   /// \brief Returns a value X such that Val = X * Scale, or null if none.
573   ///
574   /// If the multiplication is known not to overflow then NoSignedWrap is set.
575   Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
576 };
577 
578 } // end namespace llvm.
579 
580 #undef DEBUG_TYPE
581 
582 #endif
583