• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file provides the interface for the instcombine pass implementation.
11 /// The interface is used for generic transformations in this folder and
12 /// target specific combinations in the targets.
13 /// The visitor implementation is in \c InstCombinerImpl in
14 /// \c InstCombineInternal.h.
15 ///
16 //===----------------------------------------------------------------------===//
17 
18 #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
19 #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
20 
21 #include "llvm/Analysis/DomConditionCache.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/TargetFolder.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/KnownBits.h"
29 #include <cassert>
30 
31 #define DEBUG_TYPE "instcombine"
32 #include "llvm/Transforms/Utils/InstructionWorklist.h"
33 
34 namespace llvm {
35 
36 class AAResults;
37 class AssumptionCache;
38 class OptimizationRemarkEmitter;
39 class ProfileSummaryInfo;
40 class TargetLibraryInfo;
41 class TargetTransformInfo;
42 
43 /// The core instruction combiner logic.
44 ///
45 /// This class provides both the logic to recursively visit instructions and
46 /// combine them.
47 class LLVM_LIBRARY_VISIBILITY InstCombiner {
48   /// Only used to call target specific intrinsic combining.
49   /// It must **NOT** be used for any other purpose, as InstCombine is a
50   /// target-independent canonicalization transform.
51   TargetTransformInfo &TTI;
52 
53 public:
54   /// Maximum size of array considered when transforming.
55   uint64_t MaxArraySizeForCombine = 0;
56 
57   /// An IRBuilder that automatically inserts new instructions into the
58   /// worklist.
59   using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
60   BuilderTy &Builder;
61 
62 protected:
63   /// A worklist of the instructions that need to be simplified.
64   InstructionWorklist &Worklist;
65 
66   // Mode in which we are running the combiner.
67   const bool MinimizeSize;
68 
69   AAResults *AA;
70 
71   // Required analyses.
72   AssumptionCache &AC;
73   TargetLibraryInfo &TLI;
74   DominatorTree &DT;
75   const DataLayout &DL;
76   SimplifyQuery SQ;
77   OptimizationRemarkEmitter &ORE;
78   BlockFrequencyInfo *BFI;
79   ProfileSummaryInfo *PSI;
80   DomConditionCache DC;
81 
82   // Optional analyses. When non-null, these can both be used to do better
83   // combining and will be updated to reflect any changes.
84   LoopInfo *LI;
85 
86   bool MadeIRChange = false;
87 
88   /// Edges that are known to never be taken.
89   SmallDenseSet<std::pair<BasicBlock *, BasicBlock *>, 8> DeadEdges;
90 
91   /// Order of predecessors to canonicalize phi nodes towards.
92   SmallDenseMap<BasicBlock *, SmallVector<BasicBlock *>, 8> PredOrder;
93 
94 public:
InstCombiner(InstructionWorklist & Worklist,BuilderTy & Builder,bool MinimizeSize,AAResults * AA,AssumptionCache & AC,TargetLibraryInfo & TLI,TargetTransformInfo & TTI,DominatorTree & DT,OptimizationRemarkEmitter & ORE,BlockFrequencyInfo * BFI,ProfileSummaryInfo * PSI,const DataLayout & DL,LoopInfo * LI)95   InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder,
96                bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
97                TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
98                DominatorTree &DT, OptimizationRemarkEmitter &ORE,
99                BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
100                const DataLayout &DL, LoopInfo *LI)
101       : TTI(TTI), Builder(Builder), Worklist(Worklist),
102         MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
103         SQ(DL, &TLI, &DT, &AC, nullptr, /*UseInstrInfo*/ true,
104            /*CanUseUndef*/ true, &DC),
105         ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}
106 
107   virtual ~InstCombiner() = default;
108 
109   /// Return the source operand of a potentially bitcasted value while
110   /// optionally checking if it has one use. If there is no bitcast or the one
111   /// use check is not met, return the input value itself.
112   static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
113     if (auto *BitCast = dyn_cast<BitCastInst>(V))
114       if (!OneUseOnly || BitCast->hasOneUse())
115         return BitCast->getOperand(0);
116 
117     // V is not a bitcast or V has more than one use and OneUseOnly is true.
118     return V;
119   }
120 
121   /// Assign a complexity or rank value to LLVM Values. This is used to reduce
122   /// the amount of pattern matching needed for compares and commutative
123   /// instructions. For example, if we have:
124   ///   icmp ugt X, Constant
125   /// or
126   ///   xor (add X, Constant), cast Z
127   ///
128   /// We do not have to consider the commuted variants of these patterns because
129   /// canonicalization based on complexity guarantees the above ordering.
130   ///
131   /// This routine maps IR values to various complexity ranks:
132   ///   0 -> undef
133   ///   1 -> Constants
134   ///   2 -> Other non-instructions
135   ///   3 -> Arguments
136   ///   4 -> Cast and (f)neg/not instructions
137   ///   5 -> Other instructions
getComplexity(Value * V)138   static unsigned getComplexity(Value *V) {
139     if (isa<Instruction>(V)) {
140       if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
141           match(V, m_Not(PatternMatch::m_Value())) ||
142           match(V, m_FNeg(PatternMatch::m_Value())))
143         return 4;
144       return 5;
145     }
146     if (isa<Argument>(V))
147       return 3;
148     return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
149   }
150 
151   /// Predicate canonicalization reduces the number of patterns that need to be
152   /// matched by other transforms. For example, we may swap the operands of a
153   /// conditional branch or select to create a compare with a canonical
154   /// (inverted) predicate which is then more likely to be matched with other
155   /// values.
isCanonicalPredicate(CmpInst::Predicate Pred)156   static bool isCanonicalPredicate(CmpInst::Predicate Pred) {
157     switch (Pred) {
158     case CmpInst::ICMP_NE:
159     case CmpInst::ICMP_ULE:
160     case CmpInst::ICMP_SLE:
161     case CmpInst::ICMP_UGE:
162     case CmpInst::ICMP_SGE:
163     // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
164     case CmpInst::FCMP_ONE:
165     case CmpInst::FCMP_OLE:
166     case CmpInst::FCMP_OGE:
167       return false;
168     default:
169       return true;
170     }
171   }
172 
173   /// Given an exploded icmp instruction, return true if the comparison only
174   /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
175   /// the result of the comparison is true when the input value is signed.
isSignBitCheck(ICmpInst::Predicate Pred,const APInt & RHS,bool & TrueIfSigned)176   static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
177                              bool &TrueIfSigned) {
178     switch (Pred) {
179     case ICmpInst::ICMP_SLT: // True if LHS s< 0
180       TrueIfSigned = true;
181       return RHS.isZero();
182     case ICmpInst::ICMP_SLE: // True if LHS s<= -1
183       TrueIfSigned = true;
184       return RHS.isAllOnes();
185     case ICmpInst::ICMP_SGT: // True if LHS s> -1
186       TrueIfSigned = false;
187       return RHS.isAllOnes();
188     case ICmpInst::ICMP_SGE: // True if LHS s>= 0
189       TrueIfSigned = false;
190       return RHS.isZero();
191     case ICmpInst::ICMP_UGT:
192       // True if LHS u> RHS and RHS == sign-bit-mask - 1
193       TrueIfSigned = true;
194       return RHS.isMaxSignedValue();
195     case ICmpInst::ICMP_UGE:
196       // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
197       TrueIfSigned = true;
198       return RHS.isMinSignedValue();
199     case ICmpInst::ICMP_ULT:
200       // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
201       TrueIfSigned = false;
202       return RHS.isMinSignedValue();
203     case ICmpInst::ICMP_ULE:
204       // True if LHS u<= RHS and RHS == sign-bit-mask - 1
205       TrueIfSigned = false;
206       return RHS.isMaxSignedValue();
207     default:
208       return false;
209     }
210   }
211 
212   /// Add one to a Constant
AddOne(Constant * C)213   static Constant *AddOne(Constant *C) {
214     return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
215   }
216 
217   /// Subtract one from a Constant
SubOne(Constant * C)218   static Constant *SubOne(Constant *C) {
219     return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
220   }
221 
222   std::optional<std::pair<
223       CmpInst::Predicate,
224       Constant *>> static getFlippedStrictnessPredicateAndConstant(CmpInst::
225                                                                        Predicate
226                                                                            Pred,
227                                                                    Constant *C);
228 
shouldAvoidAbsorbingNotIntoSelect(const SelectInst & SI)229   static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
230     // a ? b : false and a ? true : b are the canonical form of logical and/or.
231     // This includes !a ? b : false and !a ? true : b. Absorbing the not into
232     // the select by swapping operands would break recognition of this pattern
233     // in other analyses, so don't do that.
234     return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
235                                                  PatternMatch::m_Value())) ||
236            match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
237                                                 PatternMatch::m_Value()));
238   }
239 
240   /// Return nonnull value if V is free to invert under the condition of
241   /// WillInvertAllUses.
242   /// If Builder is nonnull, it will return a simplified ~V.
243   /// If Builder is null, it will return an arbitrary nonnull value (not
244   /// dereferenceable).
245   /// If the inversion will consume instructions, `DoesConsume` will be set to
246   /// true. Otherwise it will be false.
247   Value *getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
248                                       BuilderTy *Builder, bool &DoesConsume,
249                                       unsigned Depth);
250 
getFreelyInverted(Value * V,bool WillInvertAllUses,BuilderTy * Builder,bool & DoesConsume)251   Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
252                                   BuilderTy *Builder, bool &DoesConsume) {
253     DoesConsume = false;
254     return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume,
255                                  /*Depth*/ 0);
256   }
257 
getFreelyInverted(Value * V,bool WillInvertAllUses,BuilderTy * Builder)258   Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
259                                   BuilderTy *Builder) {
260     bool Unused;
261     return getFreelyInverted(V, WillInvertAllUses, Builder, Unused);
262   }
263 
264   /// Return true if the specified value is free to invert (apply ~ to).
265   /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
266   /// is true, work under the assumption that the caller intends to remove all
267   /// uses of V and only keep uses of ~V.
268   ///
269   /// See also: canFreelyInvertAllUsersOf()
isFreeToInvert(Value * V,bool WillInvertAllUses,bool & DoesConsume)270   bool isFreeToInvert(Value *V, bool WillInvertAllUses,
271                              bool &DoesConsume) {
272     return getFreelyInverted(V, WillInvertAllUses, /*Builder*/ nullptr,
273                              DoesConsume) != nullptr;
274   }
275 
isFreeToInvert(Value * V,bool WillInvertAllUses)276   bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
277     bool Unused;
278     return isFreeToInvert(V, WillInvertAllUses, Unused);
279   }
280 
281   /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
282   /// InstCombine's freelyInvertAllUsersOf() must be kept in sync with this fn.
283   /// NOTE: for Instructions only!
284   ///
285   /// See also: isFreeToInvert()
canFreelyInvertAllUsersOf(Instruction * V,Value * IgnoredUser)286   bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser) {
287     // Look at every user of V.
288     for (Use &U : V->uses()) {
289       if (U.getUser() == IgnoredUser)
290         continue; // Don't consider this user.
291 
292       auto *I = cast<Instruction>(U.getUser());
293       switch (I->getOpcode()) {
294       case Instruction::Select:
295         if (U.getOperandNo() != 0) // Only if the value is used as select cond.
296           return false;
297         if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
298           return false;
299         break;
300       case Instruction::Br:
301         assert(U.getOperandNo() == 0 && "Must be branching on that value.");
302         break; // Free to invert by swapping true/false values/destinations.
303       case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
304                              // it.
305         if (!match(I, m_Not(PatternMatch::m_Value())))
306           return false; // Not a 'not'.
307         break;
308       default:
309         return false; // Don't know, likely not freely invertible.
310       }
311       // So far all users were free to invert...
312     }
313     return true; // Can freely invert all users!
314   }
315 
316   /// Some binary operators require special handling to avoid poison and
317   /// undefined behavior. If a constant vector has undef elements, replace those
318   /// undefs with identity constants if possible because those are always safe
319   /// to execute. If no identity constant exists, replace undef with some other
320   /// safe constant.
321   static Constant *
getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode,Constant * In,bool IsRHSConstant)322   getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
323                                 bool IsRHSConstant) {
324     auto *InVTy = cast<FixedVectorType>(In->getType());
325 
326     Type *EltTy = InVTy->getElementType();
327     auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
328     if (!SafeC) {
329       // TODO: Should this be available as a constant utility function? It is
330       // similar to getBinOpAbsorber().
331       if (IsRHSConstant) {
332         switch (Opcode) {
333         case Instruction::SRem: // X % 1 = 0
334         case Instruction::URem: // X %u 1 = 0
335           SafeC = ConstantInt::get(EltTy, 1);
336           break;
337         case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
338           SafeC = ConstantFP::get(EltTy, 1.0);
339           break;
340         default:
341           llvm_unreachable(
342               "Only rem opcodes have no identity constant for RHS");
343         }
344       } else {
345         switch (Opcode) {
346         case Instruction::Shl:  // 0 << X = 0
347         case Instruction::LShr: // 0 >>u X = 0
348         case Instruction::AShr: // 0 >> X = 0
349         case Instruction::SDiv: // 0 / X = 0
350         case Instruction::UDiv: // 0 /u X = 0
351         case Instruction::SRem: // 0 % X = 0
352         case Instruction::URem: // 0 %u X = 0
353         case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
354         case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
355         case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
356         case Instruction::FRem: // 0.0 % X = 0
357           SafeC = Constant::getNullValue(EltTy);
358           break;
359         default:
360           llvm_unreachable("Expected to find identity constant for opcode");
361         }
362       }
363     }
364     assert(SafeC && "Must have safe constant for binop");
365     unsigned NumElts = InVTy->getNumElements();
366     SmallVector<Constant *, 16> Out(NumElts);
367     for (unsigned i = 0; i != NumElts; ++i) {
368       Constant *C = In->getAggregateElement(i);
369       Out[i] = isa<UndefValue>(C) ? SafeC : C;
370     }
371     return ConstantVector::get(Out);
372   }
373 
addToWorklist(Instruction * I)374   void addToWorklist(Instruction *I) { Worklist.push(I); }
375 
getAssumptionCache()376   AssumptionCache &getAssumptionCache() const { return AC; }
getTargetLibraryInfo()377   TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
getDominatorTree()378   DominatorTree &getDominatorTree() const { return DT; }
getDataLayout()379   const DataLayout &getDataLayout() const { return DL; }
getSimplifyQuery()380   const SimplifyQuery &getSimplifyQuery() const { return SQ; }
getOptimizationRemarkEmitter()381   OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
382     return ORE;
383   }
getBlockFrequencyInfo()384   BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
getProfileSummaryInfo()385   ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
getLoopInfo()386   LoopInfo *getLoopInfo() const { return LI; }
387 
388   // Call target specific combiners
389   std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
390   std::optional<Value *>
391   targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
392                                          KnownBits &Known,
393                                          bool &KnownBitsComputed);
394   std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
395       IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
396       APInt &UndefElts2, APInt &UndefElts3,
397       std::function<void(Instruction *, unsigned, APInt, APInt &)>
398           SimplifyAndSetOp);
399 
400   /// Inserts an instruction \p New before instruction \p Old
401   ///
402   /// Also adds the new instruction to the worklist and returns \p New so that
403   /// it is suitable for use as the return from the visitation patterns.
InsertNewInstBefore(Instruction * New,BasicBlock::iterator Old)404   Instruction *InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old) {
405     assert(New && !New->getParent() &&
406            "New instruction already inserted into a basic block!");
407     New->insertBefore(Old); // Insert inst
408     Worklist.add(New);
409     return New;
410   }
411 
412   /// Same as InsertNewInstBefore, but also sets the debug loc.
InsertNewInstWith(Instruction * New,BasicBlock::iterator Old)413   Instruction *InsertNewInstWith(Instruction *New, BasicBlock::iterator Old) {
414     New->setDebugLoc(Old->getDebugLoc());
415     return InsertNewInstBefore(New, Old);
416   }
417 
418   /// A combiner-aware RAUW-like routine.
419   ///
420   /// This method is to be used when an instruction is found to be dead,
421   /// replaceable with another preexisting expression. Here we add all uses of
422   /// I to the worklist, replace all uses of I with the new value, then return
423   /// I, so that the inst combiner will know that I was modified.
replaceInstUsesWith(Instruction & I,Value * V)424   Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
425     // If there are no uses to replace, then we return nullptr to indicate that
426     // no changes were made to the program.
427     if (I.use_empty()) return nullptr;
428 
429     Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
430 
431     // If we are replacing the instruction with itself, this must be in a
432     // segment of unreachable code, so just clobber the instruction.
433     if (&I == V)
434       V = PoisonValue::get(I.getType());
435 
436     LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
437                       << "    with " << *V << '\n');
438 
439     // If V is a new unnamed instruction, take the name from the old one.
440     if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
441       V->takeName(&I);
442 
443     I.replaceAllUsesWith(V);
444     return &I;
445   }
446 
447   /// Replace operand of instruction and add old operand to the worklist.
replaceOperand(Instruction & I,unsigned OpNum,Value * V)448   Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
449     Value *OldOp = I.getOperand(OpNum);
450     I.setOperand(OpNum, V);
451     Worklist.handleUseCountDecrement(OldOp);
452     return &I;
453   }
454 
455   /// Replace use and add the previously used value to the worklist.
replaceUse(Use & U,Value * NewValue)456   void replaceUse(Use &U, Value *NewValue) {
457     Value *OldOp = U;
458     U = NewValue;
459     Worklist.handleUseCountDecrement(OldOp);
460   }
461 
462   /// Combiner aware instruction erasure.
463   ///
464   /// When dealing with an instruction that has side effects or produces a void
465   /// value, we can't rely on DCE to delete the instruction. Instead, visit
466   /// methods should return the value returned by this function.
467   virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
468 
computeKnownBits(const Value * V,KnownBits & Known,unsigned Depth,const Instruction * CxtI)469   void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
470                         const Instruction *CxtI) const {
471     llvm::computeKnownBits(V, Known, Depth, SQ.getWithInstruction(CxtI));
472   }
473 
computeKnownBits(const Value * V,unsigned Depth,const Instruction * CxtI)474   KnownBits computeKnownBits(const Value *V, unsigned Depth,
475                              const Instruction *CxtI) const {
476     return llvm::computeKnownBits(V, Depth, SQ.getWithInstruction(CxtI));
477   }
478 
479   bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
480                               unsigned Depth = 0,
481                               const Instruction *CxtI = nullptr) {
482     return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
483   }
484 
485   bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
486                          const Instruction *CxtI = nullptr) const {
487     return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth);
488   }
489 
490   unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
491                               const Instruction *CxtI = nullptr) const {
492     return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
493   }
494 
495   unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
496                                      const Instruction *CxtI = nullptr) const {
497     return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
498   }
499 
computeOverflowForUnsignedMul(const Value * LHS,const Value * RHS,const Instruction * CxtI)500   OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
501                                                const Value *RHS,
502                                                const Instruction *CxtI) const {
503     return llvm::computeOverflowForUnsignedMul(LHS, RHS,
504                                                SQ.getWithInstruction(CxtI));
505   }
506 
computeOverflowForSignedMul(const Value * LHS,const Value * RHS,const Instruction * CxtI)507   OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
508                                              const Instruction *CxtI) const {
509     return llvm::computeOverflowForSignedMul(LHS, RHS,
510                                              SQ.getWithInstruction(CxtI));
511   }
512 
513   OverflowResult
computeOverflowForUnsignedAdd(const WithCache<const Value * > & LHS,const WithCache<const Value * > & RHS,const Instruction * CxtI)514   computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
515                                 const WithCache<const Value *> &RHS,
516                                 const Instruction *CxtI) const {
517     return llvm::computeOverflowForUnsignedAdd(LHS, RHS,
518                                                SQ.getWithInstruction(CxtI));
519   }
520 
521   OverflowResult
computeOverflowForSignedAdd(const WithCache<const Value * > & LHS,const WithCache<const Value * > & RHS,const Instruction * CxtI)522   computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
523                               const WithCache<const Value *> &RHS,
524                               const Instruction *CxtI) const {
525     return llvm::computeOverflowForSignedAdd(LHS, RHS,
526                                              SQ.getWithInstruction(CxtI));
527   }
528 
computeOverflowForUnsignedSub(const Value * LHS,const Value * RHS,const Instruction * CxtI)529   OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
530                                                const Value *RHS,
531                                                const Instruction *CxtI) const {
532     return llvm::computeOverflowForUnsignedSub(LHS, RHS,
533                                                SQ.getWithInstruction(CxtI));
534   }
535 
computeOverflowForSignedSub(const Value * LHS,const Value * RHS,const Instruction * CxtI)536   OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
537                                              const Instruction *CxtI) const {
538     return llvm::computeOverflowForSignedSub(LHS, RHS,
539                                              SQ.getWithInstruction(CxtI));
540   }
541 
542   virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
543                                     const APInt &DemandedMask, KnownBits &Known,
544                                     unsigned Depth = 0) = 0;
545   virtual Value *
546   SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
547                              unsigned Depth = 0,
548                              bool AllowMultipleUsers = false) = 0;
549 
550   bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
551 };
552 
553 } // namespace llvm
554 
555 #undef DEBUG_TYPE
556 
557 #endif
558