• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains logic for simplifying instructions based on information
10 // about how they are used.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/IR/IntrinsicInst.h"
17 #include "llvm/IR/IntrinsicsAMDGPU.h"
18 #include "llvm/IR/IntrinsicsX86.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
21 
22 using namespace llvm;
23 using namespace llvm::PatternMatch;
24 
25 #define DEBUG_TYPE "instcombine"
26 
27 namespace {
28 
29 struct AMDGPUImageDMaskIntrinsic {
30   unsigned Intr;
31 };
32 
33 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL
34 #include "InstCombineTables.inc"
35 
36 } // end anonymous namespace
37 
38 /// Check to see if the specified operand of the specified instruction is a
39 /// constant integer. If so, check to see if there are any bits set in the
40 /// constant that are not demanded. If so, shrink the constant and return true.
ShrinkDemandedConstant(Instruction * I,unsigned OpNo,const APInt & Demanded)41 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
42                                    const APInt &Demanded) {
43   assert(I && "No instruction?");
44   assert(OpNo < I->getNumOperands() && "Operand index too large");
45 
46   // The operand must be a constant integer or splat integer.
47   Value *Op = I->getOperand(OpNo);
48   const APInt *C;
49   if (!match(Op, m_APInt(C)))
50     return false;
51 
52   // If there are no bits set that aren't demanded, nothing to do.
53   if (C->isSubsetOf(Demanded))
54     return false;
55 
56   // This instruction is producing bits that are not demanded. Shrink the RHS.
57   I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
58 
59   return true;
60 }
61 
62 
63 
64 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
65 /// the instruction has any properties that allow us to simplify its operands.
SimplifyDemandedInstructionBits(Instruction & Inst)66 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
67   unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
68   KnownBits Known(BitWidth);
69   APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
70 
71   Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
72                                      0, &Inst);
73   if (!V) return false;
74   if (V == &Inst) return true;
75   replaceInstUsesWith(Inst, V);
76   return true;
77 }
78 
79 /// This form of SimplifyDemandedBits simplifies the specified instruction
80 /// operand if possible, updating it in place. It returns true if it made any
81 /// change and false otherwise.
SimplifyDemandedBits(Instruction * I,unsigned OpNo,const APInt & DemandedMask,KnownBits & Known,unsigned Depth)82 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
83                                         const APInt &DemandedMask,
84                                         KnownBits &Known,
85                                         unsigned Depth) {
86   Use &U = I->getOperandUse(OpNo);
87   Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
88                                           Depth, I);
89   if (!NewVal) return false;
90   U = NewVal;
91   return true;
92 }
93 
94 
95 /// This function attempts to replace V with a simpler value based on the
96 /// demanded bits. When this function is called, it is known that only the bits
97 /// set in DemandedMask of the result of V are ever used downstream.
98 /// Consequently, depending on the mask and V, it may be possible to replace V
99 /// with a constant or one of its operands. In such cases, this function does
100 /// the replacement and returns true. In all other cases, it returns false after
101 /// analyzing the expression and setting KnownOne and known to be one in the
102 /// expression. Known.Zero contains all the bits that are known to be zero in
103 /// the expression. These are provided to potentially allow the caller (which
104 /// might recursively be SimplifyDemandedBits itself) to simplify the
105 /// expression.
106 /// Known.One and Known.Zero always follow the invariant that:
107 ///   Known.One & Known.Zero == 0.
108 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
109 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note
110 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
111 /// be the same.
112 ///
113 /// This returns null if it did not change anything and it permits no
114 /// simplification.  This returns V itself if it did some simplification of V's
115 /// operands based on the information about what bits are demanded. This returns
116 /// some other non-null value if it found out that V is equal to another value
117 /// in the context where the specified bits are demanded, but not for all users.
SimplifyDemandedUseBits(Value * V,APInt DemandedMask,KnownBits & Known,unsigned Depth,Instruction * CxtI)118 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
119                                              KnownBits &Known, unsigned Depth,
120                                              Instruction *CxtI) {
121   assert(V != nullptr && "Null pointer of Value???");
122   assert(Depth <= 6 && "Limit Search Depth");
123   uint32_t BitWidth = DemandedMask.getBitWidth();
124   Type *VTy = V->getType();
125   assert(
126       (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
127       Known.getBitWidth() == BitWidth &&
128       "Value *V, DemandedMask and Known must have same BitWidth");
129 
130   if (isa<Constant>(V)) {
131     computeKnownBits(V, Known, Depth, CxtI);
132     return nullptr;
133   }
134 
135   Known.resetAll();
136   if (DemandedMask.isNullValue())     // Not demanding any bits from V.
137     return UndefValue::get(VTy);
138 
139   if (Depth == 6)        // Limit search depth.
140     return nullptr;
141 
142   Instruction *I = dyn_cast<Instruction>(V);
143   if (!I) {
144     computeKnownBits(V, Known, Depth, CxtI);
145     return nullptr;        // Only analyze instructions.
146   }
147 
148   // If there are multiple uses of this value and we aren't at the root, then
149   // we can't do any simplifications of the operands, because DemandedMask
150   // only reflects the bits demanded by *one* of the users.
151   if (Depth != 0 && !I->hasOneUse())
152     return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
153 
154   KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
155 
156   // If this is the root being simplified, allow it to have multiple uses,
157   // just set the DemandedMask to all bits so that we can try to simplify the
158   // operands.  This allows visitTruncInst (for example) to simplify the
159   // operand of a trunc without duplicating all the logic below.
160   if (Depth == 0 && !V->hasOneUse())
161     DemandedMask.setAllBits();
162 
163   switch (I->getOpcode()) {
164   default:
165     computeKnownBits(I, Known, Depth, CxtI);
166     break;
167   case Instruction::And: {
168     // If either the LHS or the RHS are Zero, the result is zero.
169     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
170         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
171                              Depth + 1))
172       return I;
173     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
174     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
175 
176     // Output known-0 are known to be clear if zero in either the LHS | RHS.
177     APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
178     // Output known-1 bits are only known if set in both the LHS & RHS.
179     APInt IKnownOne = RHSKnown.One & LHSKnown.One;
180 
181     // If the client is only demanding bits that we know, return the known
182     // constant.
183     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
184       return Constant::getIntegerValue(VTy, IKnownOne);
185 
186     // If all of the demanded bits are known 1 on one side, return the other.
187     // These bits cannot contribute to the result of the 'and'.
188     if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
189       return I->getOperand(0);
190     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
191       return I->getOperand(1);
192 
193     // If the RHS is a constant, see if we can simplify it.
194     if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
195       return I;
196 
197     Known.Zero = std::move(IKnownZero);
198     Known.One  = std::move(IKnownOne);
199     break;
200   }
201   case Instruction::Or: {
202     // If either the LHS or the RHS are One, the result is One.
203     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
204         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
205                              Depth + 1))
206       return I;
207     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
208     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
209 
210     // Output known-0 bits are only known if clear in both the LHS & RHS.
211     APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
212     // Output known-1 are known. to be set if s.et in either the LHS | RHS.
213     APInt IKnownOne = RHSKnown.One | LHSKnown.One;
214 
215     // If the client is only demanding bits that we know, return the known
216     // constant.
217     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
218       return Constant::getIntegerValue(VTy, IKnownOne);
219 
220     // If all of the demanded bits are known zero on one side, return the other.
221     // These bits cannot contribute to the result of the 'or'.
222     if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
223       return I->getOperand(0);
224     if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
225       return I->getOperand(1);
226 
227     // If the RHS is a constant, see if we can simplify it.
228     if (ShrinkDemandedConstant(I, 1, DemandedMask))
229       return I;
230 
231     Known.Zero = std::move(IKnownZero);
232     Known.One  = std::move(IKnownOne);
233     break;
234   }
235   case Instruction::Xor: {
236     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
237         SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
238       return I;
239     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
240     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
241 
242     // Output known-0 bits are known if clear or set in both the LHS & RHS.
243     APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
244                        (RHSKnown.One & LHSKnown.One);
245     // Output known-1 are known to be set if set in only one of the LHS, RHS.
246     APInt IKnownOne =  (RHSKnown.Zero & LHSKnown.One) |
247                        (RHSKnown.One & LHSKnown.Zero);
248 
249     // If the client is only demanding bits that we know, return the known
250     // constant.
251     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
252       return Constant::getIntegerValue(VTy, IKnownOne);
253 
254     // If all of the demanded bits are known zero on one side, return the other.
255     // These bits cannot contribute to the result of the 'xor'.
256     if (DemandedMask.isSubsetOf(RHSKnown.Zero))
257       return I->getOperand(0);
258     if (DemandedMask.isSubsetOf(LHSKnown.Zero))
259       return I->getOperand(1);
260 
261     // If all of the demanded bits are known to be zero on one side or the
262     // other, turn this into an *inclusive* or.
263     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
264     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
265       Instruction *Or =
266         BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
267                                  I->getName());
268       return InsertNewInstWith(Or, *I);
269     }
270 
271     // If all of the demanded bits on one side are known, and all of the set
272     // bits on that side are also known to be set on the other side, turn this
273     // into an AND, as we know the bits will be cleared.
274     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
275     if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
276         RHSKnown.One.isSubsetOf(LHSKnown.One)) {
277       Constant *AndC = Constant::getIntegerValue(VTy,
278                                                  ~RHSKnown.One & DemandedMask);
279       Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
280       return InsertNewInstWith(And, *I);
281     }
282 
283     // If the RHS is a constant, see if we can simplify it.
284     // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
285     if (ShrinkDemandedConstant(I, 1, DemandedMask))
286       return I;
287 
288     // If our LHS is an 'and' and if it has one use, and if any of the bits we
289     // are flipping are known to be set, then the xor is just resetting those
290     // bits to zero.  We can just knock out bits from the 'and' and the 'xor',
291     // simplifying both of them.
292     if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
293       if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
294           isa<ConstantInt>(I->getOperand(1)) &&
295           isa<ConstantInt>(LHSInst->getOperand(1)) &&
296           (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
297         ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
298         ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
299         APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
300 
301         Constant *AndC =
302           ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
303         Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
304         InsertNewInstWith(NewAnd, *I);
305 
306         Constant *XorC =
307           ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
308         Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
309         return InsertNewInstWith(NewXor, *I);
310       }
311 
312     // Output known-0 bits are known if clear or set in both the LHS & RHS.
313     Known.Zero = std::move(IKnownZero);
314     // Output known-1 are known to be set if set in only one of the LHS, RHS.
315     Known.One  = std::move(IKnownOne);
316     break;
317   }
318   case Instruction::Select: {
319     Value *LHS, *RHS;
320     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
321     if (SPF == SPF_UMAX) {
322       // UMax(A, C) == A if ...
323       // The lowest non-zero bit of DemandMask is higher than the highest
324       // non-zero bit of C.
325       const APInt *C;
326       unsigned CTZ = DemandedMask.countTrailingZeros();
327       if (match(RHS, m_APInt(C)) && CTZ >= C->getActiveBits())
328         return LHS;
329     } else if (SPF == SPF_UMIN) {
330       // UMin(A, C) == A if ...
331       // The lowest non-zero bit of DemandMask is higher than the highest
332       // non-one bit of C.
333       // This comes from using DeMorgans on the above umax example.
334       const APInt *C;
335       unsigned CTZ = DemandedMask.countTrailingZeros();
336       if (match(RHS, m_APInt(C)) &&
337           CTZ >= C->getBitWidth() - C->countLeadingOnes())
338         return LHS;
339     }
340 
341     // If this is a select as part of any other min/max pattern, don't simplify
342     // any further in case we break the structure.
343     if (SPF != SPF_UNKNOWN)
344       return nullptr;
345 
346     if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
347         SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
348       return I;
349     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
350     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
351 
352     // If the operands are constants, see if we can simplify them.
353     // This is similar to ShrinkDemandedConstant, but for a select we want to
354     // try to keep the selected constants the same as icmp value constants, if
355     // we can. This helps not break apart (or helps put back together)
356     // canonical patterns like min and max.
357     auto CanonicalizeSelectConstant = [](Instruction *I, unsigned OpNo,
358                                          APInt DemandedMask) {
359       const APInt *SelC;
360       if (!match(I->getOperand(OpNo), m_APInt(SelC)))
361         return false;
362 
363       // Get the constant out of the ICmp, if there is one.
364       const APInt *CmpC;
365       ICmpInst::Predicate Pred;
366       if (!match(I->getOperand(0), m_c_ICmp(Pred, m_APInt(CmpC), m_Value())) ||
367           CmpC->getBitWidth() != SelC->getBitWidth())
368         return ShrinkDemandedConstant(I, OpNo, DemandedMask);
369 
370       // If the constant is already the same as the ICmp, leave it as-is.
371       if (*CmpC == *SelC)
372         return false;
373       // If the constants are not already the same, but can be with the demand
374       // mask, use the constant value from the ICmp.
375       if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
376         I->setOperand(OpNo, ConstantInt::get(I->getType(), *CmpC));
377         return true;
378       }
379       return ShrinkDemandedConstant(I, OpNo, DemandedMask);
380     };
381     if (CanonicalizeSelectConstant(I, 1, DemandedMask) ||
382         CanonicalizeSelectConstant(I, 2, DemandedMask))
383       return I;
384 
385     // Only known if known in both the LHS and RHS.
386     Known.One = RHSKnown.One & LHSKnown.One;
387     Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
388     break;
389   }
390   case Instruction::ZExt:
391   case Instruction::Trunc: {
392     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
393 
394     APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
395     KnownBits InputKnown(SrcBitWidth);
396     if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
397       return I;
398     assert(InputKnown.getBitWidth() == SrcBitWidth && "Src width changed?");
399     Known = InputKnown.zextOrTrunc(BitWidth,
400                                    true /* ExtendedBitsAreKnownZero */);
401     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
402     break;
403   }
404   case Instruction::BitCast:
405     if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
406       return nullptr;  // vector->int or fp->int?
407 
408     if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
409       if (VectorType *SrcVTy =
410             dyn_cast<VectorType>(I->getOperand(0)->getType())) {
411         if (DstVTy->getNumElements() != SrcVTy->getNumElements())
412           // Don't touch a bitcast between vectors of different element counts.
413           return nullptr;
414       } else
415         // Don't touch a scalar-to-vector bitcast.
416         return nullptr;
417     } else if (I->getOperand(0)->getType()->isVectorTy())
418       // Don't touch a vector-to-scalar bitcast.
419       return nullptr;
420 
421     if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
422       return I;
423     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
424     break;
425   case Instruction::SExt: {
426     // Compute the bits in the result that are not present in the input.
427     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
428 
429     APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
430 
431     // If any of the sign extended bits are demanded, we know that the sign
432     // bit is demanded.
433     if (DemandedMask.getActiveBits() > SrcBitWidth)
434       InputDemandedBits.setBit(SrcBitWidth-1);
435 
436     KnownBits InputKnown(SrcBitWidth);
437     if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
438       return I;
439 
440     // If the input sign bit is known zero, or if the NewBits are not demanded
441     // convert this into a zero extension.
442     if (InputKnown.isNonNegative() ||
443         DemandedMask.getActiveBits() <= SrcBitWidth) {
444       // Convert to ZExt cast.
445       CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
446       return InsertNewInstWith(NewCast, *I);
447      }
448 
449     // If the sign bit of the input is known set or clear, then we know the
450     // top bits of the result.
451     Known = InputKnown.sext(BitWidth);
452     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
453     break;
454   }
455   case Instruction::Add:
456   case Instruction::Sub: {
457     /// If the high-bits of an ADD/SUB are not demanded, then we do not care
458     /// about the high bits of the operands.
459     unsigned NLZ = DemandedMask.countLeadingZeros();
460     // Right fill the mask of bits for this ADD/SUB to demand the most
461     // significant bit and all those below it.
462     APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
463     if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
464         SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
465         ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
466         SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
467       if (NLZ > 0) {
468         // Disable the nsw and nuw flags here: We can no longer guarantee that
469         // we won't wrap after simplification. Removing the nsw/nuw flags is
470         // legal here because the top bit is not demanded.
471         BinaryOperator &BinOP = *cast<BinaryOperator>(I);
472         BinOP.setHasNoSignedWrap(false);
473         BinOP.setHasNoUnsignedWrap(false);
474       }
475       return I;
476     }
477 
478     // If we are known to be adding/subtracting zeros to every bit below
479     // the highest demanded bit, we just return the other side.
480     if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
481       return I->getOperand(0);
482     // We can't do this with the LHS for subtraction, unless we are only
483     // demanding the LSB.
484     if ((I->getOpcode() == Instruction::Add ||
485          DemandedFromOps.isOneValue()) &&
486         DemandedFromOps.isSubsetOf(LHSKnown.Zero))
487       return I->getOperand(1);
488 
489     // Otherwise just compute the known bits of the result.
490     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
491     Known = KnownBits::computeForAddSub(I->getOpcode() == Instruction::Add,
492                                         NSW, LHSKnown, RHSKnown);
493     break;
494   }
495   case Instruction::Shl: {
496     const APInt *SA;
497     if (match(I->getOperand(1), m_APInt(SA))) {
498       const APInt *ShrAmt;
499       if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt))))
500         if (Instruction *Shr = dyn_cast<Instruction>(I->getOperand(0)))
501           if (Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt, I, *SA,
502                                                     DemandedMask, Known))
503             return R;
504 
505       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
506       APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
507 
508       // If the shift is NUW/NSW, then it does demand the high bits.
509       ShlOperator *IOp = cast<ShlOperator>(I);
510       if (IOp->hasNoSignedWrap())
511         DemandedMaskIn.setHighBits(ShiftAmt+1);
512       else if (IOp->hasNoUnsignedWrap())
513         DemandedMaskIn.setHighBits(ShiftAmt);
514 
515       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
516         return I;
517       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
518       Known.Zero <<= ShiftAmt;
519       Known.One  <<= ShiftAmt;
520       // low bits known zero.
521       if (ShiftAmt)
522         Known.Zero.setLowBits(ShiftAmt);
523     }
524     break;
525   }
526   case Instruction::LShr: {
527     const APInt *SA;
528     if (match(I->getOperand(1), m_APInt(SA))) {
529       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
530 
531       // Unsigned shift right.
532       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
533 
534       // If the shift is exact, then it does demand the low bits (and knows that
535       // they are zero).
536       if (cast<LShrOperator>(I)->isExact())
537         DemandedMaskIn.setLowBits(ShiftAmt);
538 
539       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
540         return I;
541       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
542       Known.Zero.lshrInPlace(ShiftAmt);
543       Known.One.lshrInPlace(ShiftAmt);
544       if (ShiftAmt)
545         Known.Zero.setHighBits(ShiftAmt);  // high bits known zero.
546     }
547     break;
548   }
549   case Instruction::AShr: {
550     // If this is an arithmetic shift right and only the low-bit is set, we can
551     // always convert this into a logical shr, even if the shift amount is
552     // variable.  The low bit of the shift cannot be an input sign bit unless
553     // the shift amount is >= the size of the datatype, which is undefined.
554     if (DemandedMask.isOneValue()) {
555       // Perform the logical shift right.
556       Instruction *NewVal = BinaryOperator::CreateLShr(
557                         I->getOperand(0), I->getOperand(1), I->getName());
558       return InsertNewInstWith(NewVal, *I);
559     }
560 
561     // If the sign bit is the only bit demanded by this ashr, then there is no
562     // need to do it, the shift doesn't change the high bit.
563     if (DemandedMask.isSignMask())
564       return I->getOperand(0);
565 
566     const APInt *SA;
567     if (match(I->getOperand(1), m_APInt(SA))) {
568       uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
569 
570       // Signed shift right.
571       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
572       // If any of the high bits are demanded, we should set the sign bit as
573       // demanded.
574       if (DemandedMask.countLeadingZeros() <= ShiftAmt)
575         DemandedMaskIn.setSignBit();
576 
577       // If the shift is exact, then it does demand the low bits (and knows that
578       // they are zero).
579       if (cast<AShrOperator>(I)->isExact())
580         DemandedMaskIn.setLowBits(ShiftAmt);
581 
582       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
583         return I;
584 
585       unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, CxtI);
586 
587       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
588       // Compute the new bits that are at the top now plus sign bits.
589       APInt HighBits(APInt::getHighBitsSet(
590           BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth)));
591       Known.Zero.lshrInPlace(ShiftAmt);
592       Known.One.lshrInPlace(ShiftAmt);
593 
594       // If the input sign bit is known to be zero, or if none of the top bits
595       // are demanded, turn this into an unsigned shift right.
596       assert(BitWidth > ShiftAmt && "Shift amount not saturated?");
597       if (Known.Zero[BitWidth-ShiftAmt-1] ||
598           !DemandedMask.intersects(HighBits)) {
599         BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
600                                                           I->getOperand(1));
601         LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
602         return InsertNewInstWith(LShr, *I);
603       } else if (Known.One[BitWidth-ShiftAmt-1]) { // New bits are known one.
604         Known.One |= HighBits;
605       }
606     }
607     break;
608   }
609   case Instruction::UDiv: {
610     // UDiv doesn't demand low bits that are zero in the divisor.
611     const APInt *SA;
612     if (match(I->getOperand(1), m_APInt(SA))) {
613       // If the shift is exact, then it does demand the low bits.
614       if (cast<UDivOperator>(I)->isExact())
615         break;
616 
617       // FIXME: Take the demanded mask of the result into account.
618       unsigned RHSTrailingZeros = SA->countTrailingZeros();
619       APInt DemandedMaskIn =
620           APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros);
621       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1))
622         return I;
623 
624       // Propagate zero bits from the input.
625       Known.Zero.setHighBits(std::min(
626           BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros));
627     }
628     break;
629   }
630   case Instruction::SRem:
631     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
632       // X % -1 demands all the bits because we don't want to introduce
633       // INT_MIN % -1 (== undef) by accident.
634       if (Rem->isMinusOne())
635         break;
636       APInt RA = Rem->getValue().abs();
637       if (RA.isPowerOf2()) {
638         if (DemandedMask.ult(RA))    // srem won't affect demanded bits
639           return I->getOperand(0);
640 
641         APInt LowBits = RA - 1;
642         APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
643         if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
644           return I;
645 
646         // The low bits of LHS are unchanged by the srem.
647         Known.Zero = LHSKnown.Zero & LowBits;
648         Known.One = LHSKnown.One & LowBits;
649 
650         // If LHS is non-negative or has all low bits zero, then the upper bits
651         // are all zero.
652         if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero))
653           Known.Zero |= ~LowBits;
654 
655         // If LHS is negative and not all low bits are zero, then the upper bits
656         // are all one.
657         if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One))
658           Known.One |= ~LowBits;
659 
660         assert(!Known.hasConflict() && "Bits known to be one AND zero?");
661         break;
662       }
663     }
664 
665     // The sign bit is the LHS's sign bit, except when the result of the
666     // remainder is zero.
667     if (DemandedMask.isSignBitSet()) {
668       computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
669       // If it's known zero, our sign bit is also zero.
670       if (LHSKnown.isNonNegative())
671         Known.makeNonNegative();
672     }
673     break;
674   case Instruction::URem: {
675     KnownBits Known2(BitWidth);
676     APInt AllOnes = APInt::getAllOnesValue(BitWidth);
677     if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
678         SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
679       return I;
680 
681     unsigned Leaders = Known2.countMinLeadingZeros();
682     Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
683     break;
684   }
685   case Instruction::Call:
686     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
687       switch (II->getIntrinsicID()) {
688       default: break;
689       case Intrinsic::bswap: {
690         // If the only bits demanded come from one byte of the bswap result,
691         // just shift the input byte into position to eliminate the bswap.
692         unsigned NLZ = DemandedMask.countLeadingZeros();
693         unsigned NTZ = DemandedMask.countTrailingZeros();
694 
695         // Round NTZ down to the next byte.  If we have 11 trailing zeros, then
696         // we need all the bits down to bit 8.  Likewise, round NLZ.  If we
697         // have 14 leading zeros, round to 8.
698         NLZ &= ~7;
699         NTZ &= ~7;
700         // If we need exactly one byte, we can do this transformation.
701         if (BitWidth-NLZ-NTZ == 8) {
702           unsigned ResultBit = NTZ;
703           unsigned InputBit = BitWidth-NTZ-8;
704 
705           // Replace this with either a left or right shift to get the byte into
706           // the right place.
707           Instruction *NewVal;
708           if (InputBit > ResultBit)
709             NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
710                     ConstantInt::get(I->getType(), InputBit-ResultBit));
711           else
712             NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
713                     ConstantInt::get(I->getType(), ResultBit-InputBit));
714           NewVal->takeName(I);
715           return InsertNewInstWith(NewVal, *I);
716         }
717 
718         // TODO: Could compute known zero/one bits based on the input.
719         break;
720       }
721       case Intrinsic::fshr:
722       case Intrinsic::fshl: {
723         const APInt *SA;
724         if (!match(I->getOperand(2), m_APInt(SA)))
725           break;
726 
727         // Normalize to funnel shift left. APInt shifts of BitWidth are well-
728         // defined, so no need to special-case zero shifts here.
729         uint64_t ShiftAmt = SA->urem(BitWidth);
730         if (II->getIntrinsicID() == Intrinsic::fshr)
731           ShiftAmt = BitWidth - ShiftAmt;
732 
733         APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
734         APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt));
735         if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Depth + 1) ||
736             SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1))
737           return I;
738 
739         Known.Zero = LHSKnown.Zero.shl(ShiftAmt) |
740                      RHSKnown.Zero.lshr(BitWidth - ShiftAmt);
741         Known.One = LHSKnown.One.shl(ShiftAmt) |
742                     RHSKnown.One.lshr(BitWidth - ShiftAmt);
743         break;
744       }
745       case Intrinsic::x86_mmx_pmovmskb:
746       case Intrinsic::x86_sse_movmsk_ps:
747       case Intrinsic::x86_sse2_movmsk_pd:
748       case Intrinsic::x86_sse2_pmovmskb_128:
749       case Intrinsic::x86_avx_movmsk_ps_256:
750       case Intrinsic::x86_avx_movmsk_pd_256:
751       case Intrinsic::x86_avx2_pmovmskb: {
752         // MOVMSK copies the vector elements' sign bits to the low bits
753         // and zeros the high bits.
754         unsigned ArgWidth;
755         if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
756           ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
757         } else {
758           auto Arg = II->getArgOperand(0);
759           auto ArgType = cast<VectorType>(Arg->getType());
760           ArgWidth = ArgType->getNumElements();
761         }
762 
763         // If we don't need any of low bits then return zero,
764         // we know that DemandedMask is non-zero already.
765         APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
766         if (DemandedElts.isNullValue())
767           return ConstantInt::getNullValue(VTy);
768 
769         // We know that the upper bits are set to zero.
770         Known.Zero.setBitsFrom(ArgWidth);
771         return nullptr;
772       }
773       case Intrinsic::x86_sse42_crc32_64_64:
774         Known.Zero.setBitsFrom(32);
775         return nullptr;
776       }
777     }
778     computeKnownBits(V, Known, Depth, CxtI);
779     break;
780   }
781 
782   // If the client is only demanding bits that we know, return the known
783   // constant.
784   if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
785     return Constant::getIntegerValue(VTy, Known.One);
786   return nullptr;
787 }
788 
789 /// Helper routine of SimplifyDemandedUseBits. It computes Known
790 /// bits. It also tries to handle simplifications that can be done based on
791 /// DemandedMask, but without modifying the Instruction.
SimplifyMultipleUseDemandedBits(Instruction * I,const APInt & DemandedMask,KnownBits & Known,unsigned Depth,Instruction * CxtI)792 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
793                                                      const APInt &DemandedMask,
794                                                      KnownBits &Known,
795                                                      unsigned Depth,
796                                                      Instruction *CxtI) {
797   unsigned BitWidth = DemandedMask.getBitWidth();
798   Type *ITy = I->getType();
799 
800   KnownBits LHSKnown(BitWidth);
801   KnownBits RHSKnown(BitWidth);
802 
803   // Despite the fact that we can't simplify this instruction in all User's
804   // context, we can at least compute the known bits, and we can
805   // do simplifications that apply to *just* the one user if we know that
806   // this instruction has a simpler value in that context.
807   switch (I->getOpcode()) {
808   case Instruction::And: {
809     // If either the LHS or the RHS are Zero, the result is zero.
810     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
811     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
812                      CxtI);
813 
814     // Output known-0 are known to be clear if zero in either the LHS | RHS.
815     APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
816     // Output known-1 bits are only known if set in both the LHS & RHS.
817     APInt IKnownOne = RHSKnown.One & LHSKnown.One;
818 
819     // If the client is only demanding bits that we know, return the known
820     // constant.
821     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
822       return Constant::getIntegerValue(ITy, IKnownOne);
823 
824     // If all of the demanded bits are known 1 on one side, return the other.
825     // These bits cannot contribute to the result of the 'and' in this
826     // context.
827     if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
828       return I->getOperand(0);
829     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
830       return I->getOperand(1);
831 
832     Known.Zero = std::move(IKnownZero);
833     Known.One  = std::move(IKnownOne);
834     break;
835   }
836   case Instruction::Or: {
837     // We can simplify (X|Y) -> X or Y in the user's context if we know that
838     // only bits from X or Y are demanded.
839 
840     // If either the LHS or the RHS are One, the result is One.
841     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
842     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
843                      CxtI);
844 
845     // Output known-0 bits are only known if clear in both the LHS & RHS.
846     APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
847     // Output known-1 are known to be set if set in either the LHS | RHS.
848     APInt IKnownOne = RHSKnown.One | LHSKnown.One;
849 
850     // If the client is only demanding bits that we know, return the known
851     // constant.
852     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
853       return Constant::getIntegerValue(ITy, IKnownOne);
854 
855     // If all of the demanded bits are known zero on one side, return the
856     // other.  These bits cannot contribute to the result of the 'or' in this
857     // context.
858     if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
859       return I->getOperand(0);
860     if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
861       return I->getOperand(1);
862 
863     Known.Zero = std::move(IKnownZero);
864     Known.One  = std::move(IKnownOne);
865     break;
866   }
867   case Instruction::Xor: {
868     // We can simplify (X^Y) -> X or Y in the user's context if we know that
869     // only bits from X or Y are demanded.
870 
871     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
872     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
873                      CxtI);
874 
875     // Output known-0 bits are known if clear or set in both the LHS & RHS.
876     APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
877                        (RHSKnown.One & LHSKnown.One);
878     // Output known-1 are known to be set if set in only one of the LHS, RHS.
879     APInt IKnownOne =  (RHSKnown.Zero & LHSKnown.One) |
880                        (RHSKnown.One & LHSKnown.Zero);
881 
882     // If the client is only demanding bits that we know, return the known
883     // constant.
884     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
885       return Constant::getIntegerValue(ITy, IKnownOne);
886 
887     // If all of the demanded bits are known zero on one side, return the
888     // other.
889     if (DemandedMask.isSubsetOf(RHSKnown.Zero))
890       return I->getOperand(0);
891     if (DemandedMask.isSubsetOf(LHSKnown.Zero))
892       return I->getOperand(1);
893 
894     // Output known-0 bits are known if clear or set in both the LHS & RHS.
895     Known.Zero = std::move(IKnownZero);
896     // Output known-1 are known to be set if set in only one of the LHS, RHS.
897     Known.One  = std::move(IKnownOne);
898     break;
899   }
900   default:
901     // Compute the Known bits to simplify things downstream.
902     computeKnownBits(I, Known, Depth, CxtI);
903 
904     // If this user is only demanding bits that we know, return the known
905     // constant.
906     if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
907       return Constant::getIntegerValue(ITy, Known.One);
908 
909     break;
910   }
911 
912   return nullptr;
913 }
914 
915 
916 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify
917 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
918 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
919 /// of "C2-C1".
920 ///
921 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
922 /// ..., bn}, without considering the specific value X is holding.
923 /// This transformation is legal iff one of following conditions is hold:
924 ///  1) All the bit in S are 0, in this case E1 == E2.
925 ///  2) We don't care those bits in S, per the input DemandedMask.
926 ///  3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
927 ///     rest bits.
928 ///
929 /// Currently we only test condition 2).
930 ///
931 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
932 /// not successful.
933 Value *
simplifyShrShlDemandedBits(Instruction * Shr,const APInt & ShrOp1,Instruction * Shl,const APInt & ShlOp1,const APInt & DemandedMask,KnownBits & Known)934 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
935                                          Instruction *Shl, const APInt &ShlOp1,
936                                          const APInt &DemandedMask,
937                                          KnownBits &Known) {
938   if (!ShlOp1 || !ShrOp1)
939     return nullptr; // No-op.
940 
941   Value *VarX = Shr->getOperand(0);
942   Type *Ty = VarX->getType();
943   unsigned BitWidth = Ty->getScalarSizeInBits();
944   if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
945     return nullptr; // Undef.
946 
947   unsigned ShlAmt = ShlOp1.getZExtValue();
948   unsigned ShrAmt = ShrOp1.getZExtValue();
949 
950   Known.One.clearAllBits();
951   Known.Zero.setLowBits(ShlAmt - 1);
952   Known.Zero &= DemandedMask;
953 
954   APInt BitMask1(APInt::getAllOnesValue(BitWidth));
955   APInt BitMask2(APInt::getAllOnesValue(BitWidth));
956 
957   bool isLshr = (Shr->getOpcode() == Instruction::LShr);
958   BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
959                       (BitMask1.ashr(ShrAmt) << ShlAmt);
960 
961   if (ShrAmt <= ShlAmt) {
962     BitMask2 <<= (ShlAmt - ShrAmt);
963   } else {
964     BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
965                         BitMask2.ashr(ShrAmt - ShlAmt);
966   }
967 
968   // Check if condition-2 (see the comment to this function) is satified.
969   if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
970     if (ShrAmt == ShlAmt)
971       return VarX;
972 
973     if (!Shr->hasOneUse())
974       return nullptr;
975 
976     BinaryOperator *New;
977     if (ShrAmt < ShlAmt) {
978       Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
979       New = BinaryOperator::CreateShl(VarX, Amt);
980       BinaryOperator *Orig = cast<BinaryOperator>(Shl);
981       New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
982       New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
983     } else {
984       Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
985       New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
986                      BinaryOperator::CreateAShr(VarX, Amt);
987       if (cast<BinaryOperator>(Shr)->isExact())
988         New->setIsExact(true);
989     }
990 
991     return InsertNewInstWith(New, *Shl);
992   }
993 
994   return nullptr;
995 }
996 
997 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
998 ///
999 /// Note: This only supports non-TFE/LWE image intrinsic calls; those have
1000 ///       struct returns.
simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst * II,APInt DemandedElts,int DMaskIdx)1001 Value *InstCombiner::simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
1002                                                            APInt DemandedElts,
1003                                                            int DMaskIdx) {
1004 
1005   // FIXME: Allow v3i16/v3f16 in buffer intrinsics when the types are fully supported.
1006   if (DMaskIdx < 0 &&
1007       II->getType()->getScalarSizeInBits() != 32 &&
1008       DemandedElts.getActiveBits() == 3)
1009     return nullptr;
1010 
1011   unsigned VWidth = II->getType()->getVectorNumElements();
1012   if (VWidth == 1)
1013     return nullptr;
1014 
1015   ConstantInt *NewDMask = nullptr;
1016 
1017   if (DMaskIdx < 0) {
1018     // Pretend that a prefix of elements is demanded to simplify the code
1019     // below.
1020     DemandedElts = (1 << DemandedElts.getActiveBits()) - 1;
1021   } else {
1022     ConstantInt *DMask = cast<ConstantInt>(II->getArgOperand(DMaskIdx));
1023     unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1024 
1025     // Mask off values that are undefined because the dmask doesn't cover them
1026     DemandedElts &= (1 << countPopulation(DMaskVal)) - 1;
1027 
1028     unsigned NewDMaskVal = 0;
1029     unsigned OrigLoadIdx = 0;
1030     for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
1031       const unsigned Bit = 1 << SrcIdx;
1032       if (!!(DMaskVal & Bit)) {
1033         if (!!DemandedElts[OrigLoadIdx])
1034           NewDMaskVal |= Bit;
1035         OrigLoadIdx++;
1036       }
1037     }
1038 
1039     if (DMaskVal != NewDMaskVal)
1040       NewDMask = ConstantInt::get(DMask->getType(), NewDMaskVal);
1041   }
1042 
1043   unsigned NewNumElts = DemandedElts.countPopulation();
1044   if (!NewNumElts)
1045     return UndefValue::get(II->getType());
1046 
1047   if (NewNumElts >= VWidth && DemandedElts.isMask()) {
1048     if (NewDMask)
1049       II->setArgOperand(DMaskIdx, NewDMask);
1050     return nullptr;
1051   }
1052 
1053   // Determine the overload types of the original intrinsic.
1054   auto IID = II->getIntrinsicID();
1055   SmallVector<Intrinsic::IITDescriptor, 16> Table;
1056   getIntrinsicInfoTableEntries(IID, Table);
1057   ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
1058 
1059   // Validate function argument and return types, extracting overloaded types
1060   // along the way.
1061   FunctionType *FTy = II->getCalledFunction()->getFunctionType();
1062   SmallVector<Type *, 6> OverloadTys;
1063   Intrinsic::matchIntrinsicSignature(FTy, TableRef, OverloadTys);
1064 
1065   Module *M = II->getParent()->getParent()->getParent();
1066   Type *EltTy = II->getType()->getVectorElementType();
1067   Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts);
1068 
1069   OverloadTys[0] = NewTy;
1070   Function *NewIntrin = Intrinsic::getDeclaration(M, IID, OverloadTys);
1071 
1072   SmallVector<Value *, 16> Args;
1073   for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
1074     Args.push_back(II->getArgOperand(I));
1075 
1076   if (NewDMask)
1077     Args[DMaskIdx] = NewDMask;
1078 
1079   IRBuilderBase::InsertPointGuard Guard(Builder);
1080   Builder.SetInsertPoint(II);
1081 
1082   CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
1083   NewCall->takeName(II);
1084   NewCall->copyMetadata(*II);
1085 
1086   if (NewNumElts == 1) {
1087     return Builder.CreateInsertElement(UndefValue::get(II->getType()), NewCall,
1088                                        DemandedElts.countTrailingZeros());
1089   }
1090 
1091   SmallVector<uint32_t, 8> EltMask;
1092   unsigned NewLoadIdx = 0;
1093   for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
1094     if (!!DemandedElts[OrigLoadIdx])
1095       EltMask.push_back(NewLoadIdx++);
1096     else
1097       EltMask.push_back(NewNumElts);
1098   }
1099 
1100   Value *Shuffle =
1101       Builder.CreateShuffleVector(NewCall, UndefValue::get(NewTy), EltMask);
1102 
1103   return Shuffle;
1104 }
1105 
1106 /// The specified value produces a vector with any number of elements.
1107 /// This method analyzes which elements of the operand are undef and returns
1108 /// that information in UndefElts.
1109 ///
1110 /// DemandedElts contains the set of elements that are actually used by the
1111 /// caller, and by default (AllowMultipleUsers equals false) the value is
1112 /// simplified only if it has a single caller. If AllowMultipleUsers is set
1113 /// to true, DemandedElts refers to the union of sets of elements that are
1114 /// used by all callers.
1115 ///
1116 /// If the information about demanded elements can be used to simplify the
1117 /// operation, the operation is simplified, then the resultant value is
1118 /// returned.  This returns null if no change was made.
SimplifyDemandedVectorElts(Value * V,APInt DemandedElts,APInt & UndefElts,unsigned Depth,bool AllowMultipleUsers)1119 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1120                                                 APInt &UndefElts,
1121                                                 unsigned Depth,
1122                                                 bool AllowMultipleUsers) {
1123   unsigned VWidth = V->getType()->getVectorNumElements();
1124   APInt EltMask(APInt::getAllOnesValue(VWidth));
1125   assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1126 
1127   if (isa<UndefValue>(V)) {
1128     // If the entire vector is undefined, just return this info.
1129     UndefElts = EltMask;
1130     return nullptr;
1131   }
1132 
1133   if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef.
1134     UndefElts = EltMask;
1135     return UndefValue::get(V->getType());
1136   }
1137 
1138   UndefElts = 0;
1139 
1140   if (auto *C = dyn_cast<Constant>(V)) {
1141     // Check if this is identity. If so, return 0 since we are not simplifying
1142     // anything.
1143     if (DemandedElts.isAllOnesValue())
1144       return nullptr;
1145 
1146     Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1147     Constant *Undef = UndefValue::get(EltTy);
1148     SmallVector<Constant*, 16> Elts;
1149     for (unsigned i = 0; i != VWidth; ++i) {
1150       if (!DemandedElts[i]) {   // If not demanded, set to undef.
1151         Elts.push_back(Undef);
1152         UndefElts.setBit(i);
1153         continue;
1154       }
1155 
1156       Constant *Elt = C->getAggregateElement(i);
1157       if (!Elt) return nullptr;
1158 
1159       if (isa<UndefValue>(Elt)) {   // Already undef.
1160         Elts.push_back(Undef);
1161         UndefElts.setBit(i);
1162       } else {                               // Otherwise, defined.
1163         Elts.push_back(Elt);
1164       }
1165     }
1166 
1167     // If we changed the constant, return it.
1168     Constant *NewCV = ConstantVector::get(Elts);
1169     return NewCV != C ? NewCV : nullptr;
1170   }
1171 
1172   // Limit search depth.
1173   if (Depth == 10)
1174     return nullptr;
1175 
1176   if (!AllowMultipleUsers) {
1177     // If multiple users are using the root value, proceed with
1178     // simplification conservatively assuming that all elements
1179     // are needed.
1180     if (!V->hasOneUse()) {
1181       // Quit if we find multiple users of a non-root value though.
1182       // They'll be handled when it's their turn to be visited by
1183       // the main instcombine process.
1184       if (Depth != 0)
1185         // TODO: Just compute the UndefElts information recursively.
1186         return nullptr;
1187 
1188       // Conservatively assume that all elements are needed.
1189       DemandedElts = EltMask;
1190     }
1191   }
1192 
1193   Instruction *I = dyn_cast<Instruction>(V);
1194   if (!I) return nullptr;        // Only analyze instructions.
1195 
1196   bool MadeChange = false;
1197   auto simplifyAndSetOp = [&](Instruction *Inst, unsigned OpNum,
1198                               APInt Demanded, APInt &Undef) {
1199     auto *II = dyn_cast<IntrinsicInst>(Inst);
1200     Value *Op = II ? II->getArgOperand(OpNum) : Inst->getOperand(OpNum);
1201     if (Value *V = SimplifyDemandedVectorElts(Op, Demanded, Undef, Depth + 1)) {
1202       if (II)
1203         II->setArgOperand(OpNum, V);
1204       else
1205         Inst->setOperand(OpNum, V);
1206       MadeChange = true;
1207     }
1208   };
1209 
1210   APInt UndefElts2(VWidth, 0);
1211   APInt UndefElts3(VWidth, 0);
1212   switch (I->getOpcode()) {
1213   default: break;
1214 
1215   case Instruction::GetElementPtr: {
1216     // The LangRef requires that struct geps have all constant indices.  As
1217     // such, we can't convert any operand to partial undef.
1218     auto mayIndexStructType = [](GetElementPtrInst &GEP) {
1219       for (auto I = gep_type_begin(GEP), E = gep_type_end(GEP);
1220            I != E; I++)
1221         if (I.isStruct())
1222           return true;;
1223       return false;
1224     };
1225     if (mayIndexStructType(cast<GetElementPtrInst>(*I)))
1226       break;
1227 
1228     // Conservatively track the demanded elements back through any vector
1229     // operands we may have.  We know there must be at least one, or we
1230     // wouldn't have a vector result to get here. Note that we intentionally
1231     // merge the undef bits here since gepping with either an undef base or
1232     // index results in undef.
1233     for (unsigned i = 0; i < I->getNumOperands(); i++) {
1234       if (isa<UndefValue>(I->getOperand(i))) {
1235         // If the entire vector is undefined, just return this info.
1236         UndefElts = EltMask;
1237         return nullptr;
1238       }
1239       if (I->getOperand(i)->getType()->isVectorTy()) {
1240         APInt UndefEltsOp(VWidth, 0);
1241         simplifyAndSetOp(I, i, DemandedElts, UndefEltsOp);
1242         UndefElts |= UndefEltsOp;
1243       }
1244     }
1245 
1246     break;
1247   }
1248   case Instruction::InsertElement: {
1249     // If this is a variable index, we don't know which element it overwrites.
1250     // demand exactly the same input as we produce.
1251     ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1252     if (!Idx) {
1253       // Note that we can't propagate undef elt info, because we don't know
1254       // which elt is getting updated.
1255       simplifyAndSetOp(I, 0, DemandedElts, UndefElts2);
1256       break;
1257     }
1258 
1259     // The element inserted overwrites whatever was there, so the input demanded
1260     // set is simpler than the output set.
1261     unsigned IdxNo = Idx->getZExtValue();
1262     APInt PreInsertDemandedElts = DemandedElts;
1263     if (IdxNo < VWidth)
1264       PreInsertDemandedElts.clearBit(IdxNo);
1265 
1266     simplifyAndSetOp(I, 0, PreInsertDemandedElts, UndefElts);
1267 
1268     // If this is inserting an element that isn't demanded, remove this
1269     // insertelement.
1270     if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1271       Worklist.Add(I);
1272       return I->getOperand(0);
1273     }
1274 
1275     // The inserted element is defined.
1276     UndefElts.clearBit(IdxNo);
1277     break;
1278   }
1279   case Instruction::ShuffleVector: {
1280     auto *Shuffle = cast<ShuffleVectorInst>(I);
1281     assert(Shuffle->getOperand(0)->getType() ==
1282            Shuffle->getOperand(1)->getType() &&
1283            "Expected shuffle operands to have same type");
1284     unsigned OpWidth =
1285         Shuffle->getOperand(0)->getType()->getVectorNumElements();
1286     APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1287     for (unsigned i = 0; i < VWidth; i++) {
1288       if (DemandedElts[i]) {
1289         unsigned MaskVal = Shuffle->getMaskValue(i);
1290         if (MaskVal != -1u) {
1291           assert(MaskVal < OpWidth * 2 &&
1292                  "shufflevector mask index out of range!");
1293           if (MaskVal < OpWidth)
1294             LeftDemanded.setBit(MaskVal);
1295           else
1296             RightDemanded.setBit(MaskVal - OpWidth);
1297         }
1298       }
1299     }
1300 
1301     APInt LHSUndefElts(OpWidth, 0);
1302     simplifyAndSetOp(I, 0, LeftDemanded, LHSUndefElts);
1303 
1304     APInt RHSUndefElts(OpWidth, 0);
1305     simplifyAndSetOp(I, 1, RightDemanded, RHSUndefElts);
1306 
1307     // If this shuffle does not change the vector length and the elements
1308     // demanded by this shuffle are an identity mask, then this shuffle is
1309     // unnecessary.
1310     //
1311     // We are assuming canonical form for the mask, so the source vector is
1312     // operand 0 and operand 1 is not used.
1313     //
1314     // Note that if an element is demanded and this shuffle mask is undefined
1315     // for that element, then the shuffle is not considered an identity
1316     // operation. The shuffle prevents poison from the operand vector from
1317     // leaking to the result by replacing poison with an undefined value.
1318     if (VWidth == OpWidth) {
1319       bool IsIdentityShuffle = true;
1320       for (unsigned i = 0; i < VWidth; i++) {
1321         unsigned MaskVal = Shuffle->getMaskValue(i);
1322         if (DemandedElts[i] && i != MaskVal) {
1323           IsIdentityShuffle = false;
1324           break;
1325         }
1326       }
1327       if (IsIdentityShuffle)
1328         return Shuffle->getOperand(0);
1329     }
1330 
1331     bool NewUndefElts = false;
1332     unsigned LHSIdx = -1u, LHSValIdx = -1u;
1333     unsigned RHSIdx = -1u, RHSValIdx = -1u;
1334     bool LHSUniform = true;
1335     bool RHSUniform = true;
1336     for (unsigned i = 0; i < VWidth; i++) {
1337       unsigned MaskVal = Shuffle->getMaskValue(i);
1338       if (MaskVal == -1u) {
1339         UndefElts.setBit(i);
1340       } else if (!DemandedElts[i]) {
1341         NewUndefElts = true;
1342         UndefElts.setBit(i);
1343       } else if (MaskVal < OpWidth) {
1344         if (LHSUndefElts[MaskVal]) {
1345           NewUndefElts = true;
1346           UndefElts.setBit(i);
1347         } else {
1348           LHSIdx = LHSIdx == -1u ? i : OpWidth;
1349           LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1350           LHSUniform = LHSUniform && (MaskVal == i);
1351         }
1352       } else {
1353         if (RHSUndefElts[MaskVal - OpWidth]) {
1354           NewUndefElts = true;
1355           UndefElts.setBit(i);
1356         } else {
1357           RHSIdx = RHSIdx == -1u ? i : OpWidth;
1358           RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1359           RHSUniform = RHSUniform && (MaskVal - OpWidth == i);
1360         }
1361       }
1362     }
1363 
1364     // Try to transform shuffle with constant vector and single element from
1365     // this constant vector to single insertelement instruction.
1366     // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1367     // insertelement V, C[ci], ci-n
1368     if (OpWidth == Shuffle->getType()->getNumElements()) {
1369       Value *Op = nullptr;
1370       Constant *Value = nullptr;
1371       unsigned Idx = -1u;
1372 
1373       // Find constant vector with the single element in shuffle (LHS or RHS).
1374       if (LHSIdx < OpWidth && RHSUniform) {
1375         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1376           Op = Shuffle->getOperand(1);
1377           Value = CV->getOperand(LHSValIdx);
1378           Idx = LHSIdx;
1379         }
1380       }
1381       if (RHSIdx < OpWidth && LHSUniform) {
1382         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1383           Op = Shuffle->getOperand(0);
1384           Value = CV->getOperand(RHSValIdx);
1385           Idx = RHSIdx;
1386         }
1387       }
1388       // Found constant vector with single element - convert to insertelement.
1389       if (Op && Value) {
1390         Instruction *New = InsertElementInst::Create(
1391             Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1392             Shuffle->getName());
1393         InsertNewInstWith(New, *Shuffle);
1394         return New;
1395       }
1396     }
1397     if (NewUndefElts) {
1398       // Add additional discovered undefs.
1399       SmallVector<Constant*, 16> Elts;
1400       for (unsigned i = 0; i < VWidth; ++i) {
1401         if (UndefElts[i])
1402           Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1403         else
1404           Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1405                                           Shuffle->getMaskValue(i)));
1406       }
1407       I->setOperand(2, ConstantVector::get(Elts));
1408       MadeChange = true;
1409     }
1410     break;
1411   }
1412   case Instruction::Select: {
1413     // If this is a vector select, try to transform the select condition based
1414     // on the current demanded elements.
1415     SelectInst *Sel = cast<SelectInst>(I);
1416     if (Sel->getCondition()->getType()->isVectorTy()) {
1417       // TODO: We are not doing anything with UndefElts based on this call.
1418       // It is overwritten below based on the other select operands. If an
1419       // element of the select condition is known undef, then we are free to
1420       // choose the output value from either arm of the select. If we know that
1421       // one of those values is undef, then the output can be undef.
1422       simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1423     }
1424 
1425     // Next, see if we can transform the arms of the select.
1426     APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1427     if (auto *CV = dyn_cast<ConstantVector>(Sel->getCondition())) {
1428       for (unsigned i = 0; i < VWidth; i++) {
1429         // isNullValue() always returns false when called on a ConstantExpr.
1430         // Skip constant expressions to avoid propagating incorrect information.
1431         Constant *CElt = CV->getAggregateElement(i);
1432         if (isa<ConstantExpr>(CElt))
1433           continue;
1434         // TODO: If a select condition element is undef, we can demand from
1435         // either side. If one side is known undef, choosing that side would
1436         // propagate undef.
1437         if (CElt->isNullValue())
1438           DemandedLHS.clearBit(i);
1439         else
1440           DemandedRHS.clearBit(i);
1441       }
1442     }
1443 
1444     simplifyAndSetOp(I, 1, DemandedLHS, UndefElts2);
1445     simplifyAndSetOp(I, 2, DemandedRHS, UndefElts3);
1446 
1447     // Output elements are undefined if the element from each arm is undefined.
1448     // TODO: This can be improved. See comment in select condition handling.
1449     UndefElts = UndefElts2 & UndefElts3;
1450     break;
1451   }
1452   case Instruction::BitCast: {
1453     // Vector->vector casts only.
1454     VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1455     if (!VTy) break;
1456     unsigned InVWidth = VTy->getNumElements();
1457     APInt InputDemandedElts(InVWidth, 0);
1458     UndefElts2 = APInt(InVWidth, 0);
1459     unsigned Ratio;
1460 
1461     if (VWidth == InVWidth) {
1462       // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1463       // elements as are demanded of us.
1464       Ratio = 1;
1465       InputDemandedElts = DemandedElts;
1466     } else if ((VWidth % InVWidth) == 0) {
1467       // If the number of elements in the output is a multiple of the number of
1468       // elements in the input then an input element is live if any of the
1469       // corresponding output elements are live.
1470       Ratio = VWidth / InVWidth;
1471       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1472         if (DemandedElts[OutIdx])
1473           InputDemandedElts.setBit(OutIdx / Ratio);
1474     } else if ((InVWidth % VWidth) == 0) {
1475       // If the number of elements in the input is a multiple of the number of
1476       // elements in the output then an input element is live if the
1477       // corresponding output element is live.
1478       Ratio = InVWidth / VWidth;
1479       for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1480         if (DemandedElts[InIdx / Ratio])
1481           InputDemandedElts.setBit(InIdx);
1482     } else {
1483       // Unsupported so far.
1484       break;
1485     }
1486 
1487     simplifyAndSetOp(I, 0, InputDemandedElts, UndefElts2);
1488 
1489     if (VWidth == InVWidth) {
1490       UndefElts = UndefElts2;
1491     } else if ((VWidth % InVWidth) == 0) {
1492       // If the number of elements in the output is a multiple of the number of
1493       // elements in the input then an output element is undef if the
1494       // corresponding input element is undef.
1495       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1496         if (UndefElts2[OutIdx / Ratio])
1497           UndefElts.setBit(OutIdx);
1498     } else if ((InVWidth % VWidth) == 0) {
1499       // If the number of elements in the input is a multiple of the number of
1500       // elements in the output then an output element is undef if all of the
1501       // corresponding input elements are undef.
1502       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1503         APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1504         if (SubUndef.countPopulation() == Ratio)
1505           UndefElts.setBit(OutIdx);
1506       }
1507     } else {
1508       llvm_unreachable("Unimp");
1509     }
1510     break;
1511   }
1512   case Instruction::FPTrunc:
1513   case Instruction::FPExt:
1514     simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1515     break;
1516 
1517   case Instruction::Call: {
1518     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1519     if (!II) break;
1520     switch (II->getIntrinsicID()) {
1521     case Intrinsic::masked_gather: // fallthrough
1522     case Intrinsic::masked_load: {
1523       // Subtlety: If we load from a pointer, the pointer must be valid
1524       // regardless of whether the element is demanded.  Doing otherwise risks
1525       // segfaults which didn't exist in the original program.
1526       APInt DemandedPtrs(APInt::getAllOnesValue(VWidth)),
1527         DemandedPassThrough(DemandedElts);
1528       if (auto *CV = dyn_cast<ConstantVector>(II->getOperand(2)))
1529         for (unsigned i = 0; i < VWidth; i++) {
1530           Constant *CElt = CV->getAggregateElement(i);
1531           if (CElt->isNullValue())
1532             DemandedPtrs.clearBit(i);
1533           else if (CElt->isAllOnesValue())
1534             DemandedPassThrough.clearBit(i);
1535         }
1536       if (II->getIntrinsicID() == Intrinsic::masked_gather)
1537         simplifyAndSetOp(II, 0, DemandedPtrs, UndefElts2);
1538       simplifyAndSetOp(II, 3, DemandedPassThrough, UndefElts3);
1539 
1540       // Output elements are undefined if the element from both sources are.
1541       // TODO: can strengthen via mask as well.
1542       UndefElts = UndefElts2 & UndefElts3;
1543       break;
1544     }
1545     case Intrinsic::x86_xop_vfrcz_ss:
1546     case Intrinsic::x86_xop_vfrcz_sd:
1547       // The instructions for these intrinsics are speced to zero upper bits not
1548       // pass them through like other scalar intrinsics. So we shouldn't just
1549       // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1550       // Instead we should return a zero vector.
1551       if (!DemandedElts[0]) {
1552         Worklist.Add(II);
1553         return ConstantAggregateZero::get(II->getType());
1554       }
1555 
1556       // Only the lower element is used.
1557       DemandedElts = 1;
1558       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1559 
1560       // Only the lower element is undefined. The high elements are zero.
1561       UndefElts = UndefElts[0];
1562       break;
1563 
1564     // Unary scalar-as-vector operations that work column-wise.
1565     case Intrinsic::x86_sse_rcp_ss:
1566     case Intrinsic::x86_sse_rsqrt_ss:
1567       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1568 
1569       // If lowest element of a scalar op isn't used then use Arg0.
1570       if (!DemandedElts[0]) {
1571         Worklist.Add(II);
1572         return II->getArgOperand(0);
1573       }
1574       // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1575       // checks).
1576       break;
1577 
1578     // Binary scalar-as-vector operations that work column-wise. The high
1579     // elements come from operand 0. The low element is a function of both
1580     // operands.
1581     case Intrinsic::x86_sse_min_ss:
1582     case Intrinsic::x86_sse_max_ss:
1583     case Intrinsic::x86_sse_cmp_ss:
1584     case Intrinsic::x86_sse2_min_sd:
1585     case Intrinsic::x86_sse2_max_sd:
1586     case Intrinsic::x86_sse2_cmp_sd: {
1587       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1588 
1589       // If lowest element of a scalar op isn't used then use Arg0.
1590       if (!DemandedElts[0]) {
1591         Worklist.Add(II);
1592         return II->getArgOperand(0);
1593       }
1594 
1595       // Only lower element is used for operand 1.
1596       DemandedElts = 1;
1597       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1598 
1599       // Lower element is undefined if both lower elements are undefined.
1600       // Consider things like undef&0.  The result is known zero, not undef.
1601       if (!UndefElts2[0])
1602         UndefElts.clearBit(0);
1603 
1604       break;
1605     }
1606 
1607     // Binary scalar-as-vector operations that work column-wise. The high
1608     // elements come from operand 0 and the low element comes from operand 1.
1609     case Intrinsic::x86_sse41_round_ss:
1610     case Intrinsic::x86_sse41_round_sd: {
1611       // Don't use the low element of operand 0.
1612       APInt DemandedElts2 = DemandedElts;
1613       DemandedElts2.clearBit(0);
1614       simplifyAndSetOp(II, 0, DemandedElts2, UndefElts);
1615 
1616       // If lowest element of a scalar op isn't used then use Arg0.
1617       if (!DemandedElts[0]) {
1618         Worklist.Add(II);
1619         return II->getArgOperand(0);
1620       }
1621 
1622       // Only lower element is used for operand 1.
1623       DemandedElts = 1;
1624       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1625 
1626       // Take the high undef elements from operand 0 and take the lower element
1627       // from operand 1.
1628       UndefElts.clearBit(0);
1629       UndefElts |= UndefElts2[0];
1630       break;
1631     }
1632 
1633     // Three input scalar-as-vector operations that work column-wise. The high
1634     // elements come from operand 0 and the low element is a function of all
1635     // three inputs.
1636     case Intrinsic::x86_avx512_mask_add_ss_round:
1637     case Intrinsic::x86_avx512_mask_div_ss_round:
1638     case Intrinsic::x86_avx512_mask_mul_ss_round:
1639     case Intrinsic::x86_avx512_mask_sub_ss_round:
1640     case Intrinsic::x86_avx512_mask_max_ss_round:
1641     case Intrinsic::x86_avx512_mask_min_ss_round:
1642     case Intrinsic::x86_avx512_mask_add_sd_round:
1643     case Intrinsic::x86_avx512_mask_div_sd_round:
1644     case Intrinsic::x86_avx512_mask_mul_sd_round:
1645     case Intrinsic::x86_avx512_mask_sub_sd_round:
1646     case Intrinsic::x86_avx512_mask_max_sd_round:
1647     case Intrinsic::x86_avx512_mask_min_sd_round:
1648       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1649 
1650       // If lowest element of a scalar op isn't used then use Arg0.
1651       if (!DemandedElts[0]) {
1652         Worklist.Add(II);
1653         return II->getArgOperand(0);
1654       }
1655 
1656       // Only lower element is used for operand 1 and 2.
1657       DemandedElts = 1;
1658       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1659       simplifyAndSetOp(II, 2, DemandedElts, UndefElts3);
1660 
1661       // Lower element is undefined if all three lower elements are undefined.
1662       // Consider things like undef&0.  The result is known zero, not undef.
1663       if (!UndefElts2[0] || !UndefElts3[0])
1664         UndefElts.clearBit(0);
1665 
1666       break;
1667 
1668     case Intrinsic::x86_sse2_packssdw_128:
1669     case Intrinsic::x86_sse2_packsswb_128:
1670     case Intrinsic::x86_sse2_packuswb_128:
1671     case Intrinsic::x86_sse41_packusdw:
1672     case Intrinsic::x86_avx2_packssdw:
1673     case Intrinsic::x86_avx2_packsswb:
1674     case Intrinsic::x86_avx2_packusdw:
1675     case Intrinsic::x86_avx2_packuswb:
1676     case Intrinsic::x86_avx512_packssdw_512:
1677     case Intrinsic::x86_avx512_packsswb_512:
1678     case Intrinsic::x86_avx512_packusdw_512:
1679     case Intrinsic::x86_avx512_packuswb_512: {
1680       auto *Ty0 = II->getArgOperand(0)->getType();
1681       unsigned InnerVWidth = Ty0->getVectorNumElements();
1682       assert(VWidth == (InnerVWidth * 2) && "Unexpected input size");
1683 
1684       unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1685       unsigned VWidthPerLane = VWidth / NumLanes;
1686       unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1687 
1688       // Per lane, pack the elements of the first input and then the second.
1689       // e.g.
1690       // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1691       // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1692       for (int OpNum = 0; OpNum != 2; ++OpNum) {
1693         APInt OpDemandedElts(InnerVWidth, 0);
1694         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1695           unsigned LaneIdx = Lane * VWidthPerLane;
1696           for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1697             unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1698             if (DemandedElts[Idx])
1699               OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1700           }
1701         }
1702 
1703         // Demand elements from the operand.
1704         APInt OpUndefElts(InnerVWidth, 0);
1705         simplifyAndSetOp(II, OpNum, OpDemandedElts, OpUndefElts);
1706 
1707         // Pack the operand's UNDEF elements, one lane at a time.
1708         OpUndefElts = OpUndefElts.zext(VWidth);
1709         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1710           APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1711           LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1712           LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1713           UndefElts |= LaneElts;
1714         }
1715       }
1716       break;
1717     }
1718 
1719     // PSHUFB
1720     case Intrinsic::x86_ssse3_pshuf_b_128:
1721     case Intrinsic::x86_avx2_pshuf_b:
1722     case Intrinsic::x86_avx512_pshuf_b_512:
1723     // PERMILVAR
1724     case Intrinsic::x86_avx_vpermilvar_ps:
1725     case Intrinsic::x86_avx_vpermilvar_ps_256:
1726     case Intrinsic::x86_avx512_vpermilvar_ps_512:
1727     case Intrinsic::x86_avx_vpermilvar_pd:
1728     case Intrinsic::x86_avx_vpermilvar_pd_256:
1729     case Intrinsic::x86_avx512_vpermilvar_pd_512:
1730     // PERMV
1731     case Intrinsic::x86_avx2_permd:
1732     case Intrinsic::x86_avx2_permps: {
1733       simplifyAndSetOp(II, 1, DemandedElts, UndefElts);
1734       break;
1735     }
1736 
1737     // SSE4A instructions leave the upper 64-bits of the 128-bit result
1738     // in an undefined state.
1739     case Intrinsic::x86_sse4a_extrq:
1740     case Intrinsic::x86_sse4a_extrqi:
1741     case Intrinsic::x86_sse4a_insertq:
1742     case Intrinsic::x86_sse4a_insertqi:
1743       UndefElts.setHighBits(VWidth / 2);
1744       break;
1745     case Intrinsic::amdgcn_buffer_load:
1746     case Intrinsic::amdgcn_buffer_load_format:
1747     case Intrinsic::amdgcn_raw_buffer_load:
1748     case Intrinsic::amdgcn_raw_buffer_load_format:
1749     case Intrinsic::amdgcn_raw_tbuffer_load:
1750     case Intrinsic::amdgcn_struct_buffer_load:
1751     case Intrinsic::amdgcn_struct_buffer_load_format:
1752     case Intrinsic::amdgcn_struct_tbuffer_load:
1753     case Intrinsic::amdgcn_tbuffer_load:
1754       return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts);
1755     default: {
1756       if (getAMDGPUImageDMaskIntrinsic(II->getIntrinsicID()))
1757         return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts, 0);
1758 
1759       break;
1760     }
1761     } // switch on IntrinsicID
1762     break;
1763   } // case Call
1764   } // switch on Opcode
1765 
1766   // TODO: We bail completely on integer div/rem and shifts because they have
1767   // UB/poison potential, but that should be refined.
1768   BinaryOperator *BO;
1769   if (match(I, m_BinOp(BO)) && !BO->isIntDivRem() && !BO->isShift()) {
1770     simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1771     simplifyAndSetOp(I, 1, DemandedElts, UndefElts2);
1772 
1773     // Any change to an instruction with potential poison must clear those flags
1774     // because we can not guarantee those constraints now. Other analysis may
1775     // determine that it is safe to re-apply the flags.
1776     if (MadeChange)
1777       BO->dropPoisonGeneratingFlags();
1778 
1779     // Output elements are undefined if both are undefined. Consider things
1780     // like undef & 0. The result is known zero, not undef.
1781     UndefElts &= UndefElts2;
1782   }
1783 
1784   // If we've proven all of the lanes undef, return an undef value.
1785   // TODO: Intersect w/demanded lanes
1786   if (UndefElts.isAllOnesValue())
1787     return UndefValue::get(I->getType());;
1788 
1789   return MadeChange ? I : nullptr;
1790 }
1791