1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
11 // srem, urem, frem.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "InstCombineInternal.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Instruction.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/PatternMatch.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/IR/Value.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
36 #include "llvm/Transforms/Utils/BuildLibCalls.h"
37 #include <cassert>
38 #include <cstddef>
39 #include <cstdint>
40 #include <utility>
41
42 using namespace llvm;
43 using namespace PatternMatch;
44
45 #define DEBUG_TYPE "instcombine"
46
47 /// The specific integer value is used in a context where it is known to be
48 /// non-zero. If this allows us to simplify the computation, do so and return
49 /// the new operand, otherwise return null.
simplifyValueKnownNonZero(Value * V,InstCombiner & IC,Instruction & CxtI)50 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
51 Instruction &CxtI) {
52 // If V has multiple uses, then we would have to do more analysis to determine
53 // if this is safe. For example, the use could be in dynamically unreached
54 // code.
55 if (!V->hasOneUse()) return nullptr;
56
57 bool MadeChange = false;
58
59 // ((1 << A) >>u B) --> (1 << (A-B))
60 // Because V cannot be zero, we know that B is less than A.
61 Value *A = nullptr, *B = nullptr, *One = nullptr;
62 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
63 match(One, m_One())) {
64 A = IC.Builder.CreateSub(A, B);
65 return IC.Builder.CreateShl(One, A);
66 }
67
68 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
69 // inexact. Similarly for <<.
70 BinaryOperator *I = dyn_cast<BinaryOperator>(V);
71 if (I && I->isLogicalShift() &&
72 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
73 // We know that this is an exact/nuw shift and that the input is a
74 // non-zero context as well.
75 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
76 I->setOperand(0, V2);
77 MadeChange = true;
78 }
79
80 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
81 I->setIsExact();
82 MadeChange = true;
83 }
84
85 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
86 I->setHasNoUnsignedWrap();
87 MadeChange = true;
88 }
89 }
90
91 // TODO: Lots more we could do here:
92 // If V is a phi node, we can call this on each of its operands.
93 // "select cond, X, 0" can simplify to "X".
94
95 return MadeChange ? V : nullptr;
96 }
97
98 /// A helper routine of InstCombiner::visitMul().
99 ///
100 /// If C is a scalar/vector of known powers of 2, then this function returns
101 /// a new scalar/vector obtained from logBase2 of C.
102 /// Return a null pointer otherwise.
getLogBase2(Type * Ty,Constant * C)103 static Constant *getLogBase2(Type *Ty, Constant *C) {
104 const APInt *IVal;
105 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2())
106 return ConstantInt::get(Ty, IVal->logBase2());
107
108 if (!Ty->isVectorTy())
109 return nullptr;
110
111 SmallVector<Constant *, 4> Elts;
112 for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) {
113 Constant *Elt = C->getAggregateElement(I);
114 if (!Elt)
115 return nullptr;
116 if (isa<UndefValue>(Elt)) {
117 Elts.push_back(UndefValue::get(Ty->getScalarType()));
118 continue;
119 }
120 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
121 return nullptr;
122 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2()));
123 }
124
125 return ConstantVector::get(Elts);
126 }
127
visitMul(BinaryOperator & I)128 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
129 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
130 SQ.getWithInstruction(&I)))
131 return replaceInstUsesWith(I, V);
132
133 if (SimplifyAssociativeOrCommutative(I))
134 return &I;
135
136 if (Instruction *X = foldShuffledBinop(I))
137 return X;
138
139 if (Value *V = SimplifyUsingDistributiveLaws(I))
140 return replaceInstUsesWith(I, V);
141
142 // X * -1 == 0 - X
143 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
144 if (match(Op1, m_AllOnes())) {
145 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
146 if (I.hasNoSignedWrap())
147 BO->setHasNoSignedWrap();
148 return BO;
149 }
150
151 // Also allow combining multiply instructions on vectors.
152 {
153 Value *NewOp;
154 Constant *C1, *C2;
155 const APInt *IVal;
156 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
157 m_Constant(C1))) &&
158 match(C1, m_APInt(IVal))) {
159 // ((X << C2)*C1) == (X * (C1 << C2))
160 Constant *Shl = ConstantExpr::getShl(C1, C2);
161 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
162 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
163 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
164 BO->setHasNoUnsignedWrap();
165 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
166 Shl->isNotMinSignedValue())
167 BO->setHasNoSignedWrap();
168 return BO;
169 }
170
171 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
172 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
173 if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) {
174 unsigned Width = NewCst->getType()->getPrimitiveSizeInBits();
175 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
176
177 if (I.hasNoUnsignedWrap())
178 Shl->setHasNoUnsignedWrap();
179 if (I.hasNoSignedWrap()) {
180 const APInt *V;
181 if (match(NewCst, m_APInt(V)) && *V != Width - 1)
182 Shl->setHasNoSignedWrap();
183 }
184
185 return Shl;
186 }
187 }
188 }
189
190 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
191 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n
192 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n
193 // The "* (2**n)" thus becomes a potential shifting opportunity.
194 {
195 const APInt & Val = CI->getValue();
196 const APInt &PosVal = Val.abs();
197 if (Val.isNegative() && PosVal.isPowerOf2()) {
198 Value *X = nullptr, *Y = nullptr;
199 if (Op0->hasOneUse()) {
200 ConstantInt *C1;
201 Value *Sub = nullptr;
202 if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
203 Sub = Builder.CreateSub(X, Y, "suba");
204 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
205 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc");
206 if (Sub)
207 return
208 BinaryOperator::CreateMul(Sub,
209 ConstantInt::get(Y->getType(), PosVal));
210 }
211 }
212 }
213 }
214
215 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
216 return FoldedMul;
217
218 // Simplify mul instructions with a constant RHS.
219 if (isa<Constant>(Op1)) {
220 // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
221 Value *X;
222 Constant *C1;
223 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
224 Value *Mul = Builder.CreateMul(C1, Op1);
225 // Only go forward with the transform if C1*CI simplifies to a tidier
226 // constant.
227 if (!match(Mul, m_Mul(m_Value(), m_Value())))
228 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
229 }
230 }
231
232 // -X * C --> X * -C
233 Value *X, *Y;
234 Constant *Op1C;
235 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
236 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
237
238 // -X * -Y --> X * Y
239 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
240 auto *NewMul = BinaryOperator::CreateMul(X, Y);
241 if (I.hasNoSignedWrap() &&
242 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
243 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
244 NewMul->setHasNoSignedWrap();
245 return NewMul;
246 }
247
248 // (X / Y) * Y = X - (X % Y)
249 // (X / Y) * -Y = (X % Y) - X
250 {
251 Value *Y = Op1;
252 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
253 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
254 Div->getOpcode() != Instruction::SDiv)) {
255 Y = Op0;
256 Div = dyn_cast<BinaryOperator>(Op1);
257 }
258 Value *Neg = dyn_castNegVal(Y);
259 if (Div && Div->hasOneUse() &&
260 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
261 (Div->getOpcode() == Instruction::UDiv ||
262 Div->getOpcode() == Instruction::SDiv)) {
263 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
264
265 // If the division is exact, X % Y is zero, so we end up with X or -X.
266 if (Div->isExact()) {
267 if (DivOp1 == Y)
268 return replaceInstUsesWith(I, X);
269 return BinaryOperator::CreateNeg(X);
270 }
271
272 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
273 : Instruction::SRem;
274 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
275 if (DivOp1 == Y)
276 return BinaryOperator::CreateSub(X, Rem);
277 return BinaryOperator::CreateSub(Rem, X);
278 }
279 }
280
281 /// i1 mul -> i1 and.
282 if (I.getType()->isIntOrIntVectorTy(1))
283 return BinaryOperator::CreateAnd(Op0, Op1);
284
285 // X*(1 << Y) --> X << Y
286 // (1 << Y)*X --> X << Y
287 {
288 Value *Y;
289 BinaryOperator *BO = nullptr;
290 bool ShlNSW = false;
291 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
292 BO = BinaryOperator::CreateShl(Op1, Y);
293 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
294 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
295 BO = BinaryOperator::CreateShl(Op0, Y);
296 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
297 }
298 if (BO) {
299 if (I.hasNoUnsignedWrap())
300 BO->setHasNoUnsignedWrap();
301 if (I.hasNoSignedWrap() && ShlNSW)
302 BO->setHasNoSignedWrap();
303 return BO;
304 }
305 }
306
307 // (bool X) * Y --> X ? Y : 0
308 // Y * (bool X) --> X ? Y : 0
309 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
310 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
311 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
312 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
313
314 // (lshr X, 31) * Y --> (ashr X, 31) & Y
315 // Y * (lshr X, 31) --> (ashr X, 31) & Y
316 // TODO: We are not checking one-use because the elimination of the multiply
317 // is better for analysis?
318 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be
319 // more similar to what we're doing above.
320 const APInt *C;
321 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
322 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1);
323 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
324 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
325
326 // Check for (mul (sext x), y), see if we can merge this into an
327 // integer mul followed by a sext.
328 if (SExtInst *Op0Conv = dyn_cast<SExtInst>(Op0)) {
329 // (mul (sext x), cst) --> (sext (mul x, cst'))
330 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
331 if (Op0Conv->hasOneUse()) {
332 Constant *CI =
333 ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType());
334 if (ConstantExpr::getSExt(CI, I.getType()) == Op1C &&
335 willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) {
336 // Insert the new, smaller mul.
337 Value *NewMul =
338 Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv");
339 return new SExtInst(NewMul, I.getType());
340 }
341 }
342 }
343
344 // (mul (sext x), (sext y)) --> (sext (mul int x, y))
345 if (SExtInst *Op1Conv = dyn_cast<SExtInst>(Op1)) {
346 // Only do this if x/y have the same type, if at last one of them has a
347 // single use (so we don't increase the number of sexts), and if the
348 // integer mul will not overflow.
349 if (Op0Conv->getOperand(0)->getType() ==
350 Op1Conv->getOperand(0)->getType() &&
351 (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) &&
352 willNotOverflowSignedMul(Op0Conv->getOperand(0),
353 Op1Conv->getOperand(0), I)) {
354 // Insert the new integer mul.
355 Value *NewMul = Builder.CreateNSWMul(
356 Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
357 return new SExtInst(NewMul, I.getType());
358 }
359 }
360 }
361
362 // Check for (mul (zext x), y), see if we can merge this into an
363 // integer mul followed by a zext.
364 if (auto *Op0Conv = dyn_cast<ZExtInst>(Op0)) {
365 // (mul (zext x), cst) --> (zext (mul x, cst'))
366 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
367 if (Op0Conv->hasOneUse()) {
368 Constant *CI =
369 ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType());
370 if (ConstantExpr::getZExt(CI, I.getType()) == Op1C &&
371 willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) {
372 // Insert the new, smaller mul.
373 Value *NewMul =
374 Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv");
375 return new ZExtInst(NewMul, I.getType());
376 }
377 }
378 }
379
380 // (mul (zext x), (zext y)) --> (zext (mul int x, y))
381 if (auto *Op1Conv = dyn_cast<ZExtInst>(Op1)) {
382 // Only do this if x/y have the same type, if at last one of them has a
383 // single use (so we don't increase the number of zexts), and if the
384 // integer mul will not overflow.
385 if (Op0Conv->getOperand(0)->getType() ==
386 Op1Conv->getOperand(0)->getType() &&
387 (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) &&
388 willNotOverflowUnsignedMul(Op0Conv->getOperand(0),
389 Op1Conv->getOperand(0), I)) {
390 // Insert the new integer mul.
391 Value *NewMul = Builder.CreateNUWMul(
392 Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
393 return new ZExtInst(NewMul, I.getType());
394 }
395 }
396 }
397
398 bool Changed = false;
399 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
400 Changed = true;
401 I.setHasNoSignedWrap(true);
402 }
403
404 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
405 Changed = true;
406 I.setHasNoUnsignedWrap(true);
407 }
408
409 return Changed ? &I : nullptr;
410 }
411
visitFMul(BinaryOperator & I)412 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
413 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
414 I.getFastMathFlags(),
415 SQ.getWithInstruction(&I)))
416 return replaceInstUsesWith(I, V);
417
418 if (SimplifyAssociativeOrCommutative(I))
419 return &I;
420
421 if (Instruction *X = foldShuffledBinop(I))
422 return X;
423
424 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
425 return FoldedMul;
426
427 // X * -1.0 --> -X
428 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
429 if (match(Op1, m_SpecificFP(-1.0)))
430 return BinaryOperator::CreateFNegFMF(Op0, &I);
431
432 // -X * -Y --> X * Y
433 Value *X, *Y;
434 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
435 return BinaryOperator::CreateFMulFMF(X, Y, &I);
436
437 // -X * C --> X * -C
438 Constant *C;
439 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
440 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
441
442 // Sink negation: -X * Y --> -(X * Y)
443 if (match(Op0, m_OneUse(m_FNeg(m_Value(X)))))
444 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op1, &I), &I);
445
446 // Sink negation: Y * -X --> -(X * Y)
447 if (match(Op1, m_OneUse(m_FNeg(m_Value(X)))))
448 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op0, &I), &I);
449
450 // fabs(X) * fabs(X) -> X * X
451 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))))
452 return BinaryOperator::CreateFMulFMF(X, X, &I);
453
454 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
455 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
456 return replaceInstUsesWith(I, V);
457
458 if (I.hasAllowReassoc()) {
459 // Reassociate constant RHS with another constant to form constant
460 // expression.
461 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
462 Constant *C1;
463 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
464 // (C1 / X) * C --> (C * C1) / X
465 Constant *CC1 = ConstantExpr::getFMul(C, C1);
466 if (CC1->isNormalFP())
467 return BinaryOperator::CreateFDivFMF(CC1, X, &I);
468 }
469 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
470 // (X / C1) * C --> X * (C / C1)
471 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1);
472 if (CDivC1->isNormalFP())
473 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
474
475 // If the constant was a denormal, try reassociating differently.
476 // (X / C1) * C --> X / (C1 / C)
477 Constant *C1DivC = ConstantExpr::getFDiv(C1, C);
478 if (Op0->hasOneUse() && C1DivC->isNormalFP())
479 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
480 }
481
482 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
483 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
484 // further folds and (X * C) + C2 is 'fma'.
485 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
486 // (X + C1) * C --> (X * C) + (C * C1)
487 Constant *CC1 = ConstantExpr::getFMul(C, C1);
488 Value *XC = Builder.CreateFMulFMF(X, C, &I);
489 return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
490 }
491 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
492 // (C1 - X) * C --> (C * C1) - (X * C)
493 Constant *CC1 = ConstantExpr::getFMul(C, C1);
494 Value *XC = Builder.CreateFMulFMF(X, C, &I);
495 return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
496 }
497 }
498
499 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
500 // nnan disallows the possibility of returning a number if both operands are
501 // negative (in that case, we should return NaN).
502 if (I.hasNoNaNs() &&
503 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) &&
504 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
505 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
506 Value *Sqrt = Builder.CreateIntrinsic(Intrinsic::sqrt, { XY }, &I);
507 return replaceInstUsesWith(I, Sqrt);
508 }
509
510 // (X*Y) * X => (X*X) * Y where Y != X
511 // The purpose is two-fold:
512 // 1) to form a power expression (of X).
513 // 2) potentially shorten the critical path: After transformation, the
514 // latency of the instruction Y is amortized by the expression of X*X,
515 // and therefore Y is in a "less critical" position compared to what it
516 // was before the transformation.
517 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
518 Op1 != Y) {
519 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
520 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
521 }
522 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
523 Op0 != Y) {
524 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
525 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
526 }
527 }
528
529 // log2(X * 0.5) * Y = log2(X) * Y - Y
530 if (I.isFast()) {
531 IntrinsicInst *Log2 = nullptr;
532 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
533 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
534 Log2 = cast<IntrinsicInst>(Op0);
535 Y = Op1;
536 }
537 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
538 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
539 Log2 = cast<IntrinsicInst>(Op1);
540 Y = Op0;
541 }
542 if (Log2) {
543 Log2->setArgOperand(0, X);
544 Log2->copyFastMathFlags(&I);
545 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
546 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
547 }
548 }
549
550 return nullptr;
551 }
552
553 /// Fold a divide or remainder with a select instruction divisor when one of the
554 /// select operands is zero. In that case, we can use the other select operand
555 /// because div/rem by zero is undefined.
simplifyDivRemOfSelectWithZeroOp(BinaryOperator & I)556 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
557 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
558 if (!SI)
559 return false;
560
561 int NonNullOperand;
562 if (match(SI->getTrueValue(), m_Zero()))
563 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
564 NonNullOperand = 2;
565 else if (match(SI->getFalseValue(), m_Zero()))
566 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
567 NonNullOperand = 1;
568 else
569 return false;
570
571 // Change the div/rem to use 'Y' instead of the select.
572 I.setOperand(1, SI->getOperand(NonNullOperand));
573
574 // Okay, we know we replace the operand of the div/rem with 'Y' with no
575 // problem. However, the select, or the condition of the select may have
576 // multiple uses. Based on our knowledge that the operand must be non-zero,
577 // propagate the known value for the select into other uses of it, and
578 // propagate a known value of the condition into its other users.
579
580 // If the select and condition only have a single use, don't bother with this,
581 // early exit.
582 Value *SelectCond = SI->getCondition();
583 if (SI->use_empty() && SelectCond->hasOneUse())
584 return true;
585
586 // Scan the current block backward, looking for other uses of SI.
587 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
588 Type *CondTy = SelectCond->getType();
589 while (BBI != BBFront) {
590 --BBI;
591 // If we found an instruction that we can't assume will return, so
592 // information from below it cannot be propagated above it.
593 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
594 break;
595
596 // Replace uses of the select or its condition with the known values.
597 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
598 I != E; ++I) {
599 if (*I == SI) {
600 *I = SI->getOperand(NonNullOperand);
601 Worklist.Add(&*BBI);
602 } else if (*I == SelectCond) {
603 *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
604 : ConstantInt::getFalse(CondTy);
605 Worklist.Add(&*BBI);
606 }
607 }
608
609 // If we past the instruction, quit looking for it.
610 if (&*BBI == SI)
611 SI = nullptr;
612 if (&*BBI == SelectCond)
613 SelectCond = nullptr;
614
615 // If we ran out of things to eliminate, break out of the loop.
616 if (!SelectCond && !SI)
617 break;
618
619 }
620 return true;
621 }
622
623 /// True if the multiply can not be expressed in an int this size.
multiplyOverflows(const APInt & C1,const APInt & C2,APInt & Product,bool IsSigned)624 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
625 bool IsSigned) {
626 bool Overflow;
627 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
628 return Overflow;
629 }
630
631 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
isMultiple(const APInt & C1,const APInt & C2,APInt & Quotient,bool IsSigned)632 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
633 bool IsSigned) {
634 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
635
636 // Bail if we will divide by zero.
637 if (C2.isNullValue())
638 return false;
639
640 // Bail if we would divide INT_MIN by -1.
641 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue())
642 return false;
643
644 APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned);
645 if (IsSigned)
646 APInt::sdivrem(C1, C2, Quotient, Remainder);
647 else
648 APInt::udivrem(C1, C2, Quotient, Remainder);
649
650 return Remainder.isMinValue();
651 }
652
653 /// This function implements the transforms common to both integer division
654 /// instructions (udiv and sdiv). It is called by the visitors to those integer
655 /// division instructions.
656 /// Common integer divide transforms
commonIDivTransforms(BinaryOperator & I)657 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
658 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
659 bool IsSigned = I.getOpcode() == Instruction::SDiv;
660 Type *Ty = I.getType();
661
662 // The RHS is known non-zero.
663 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
664 I.setOperand(1, V);
665 return &I;
666 }
667
668 // Handle cases involving: [su]div X, (select Cond, Y, Z)
669 // This does not apply for fdiv.
670 if (simplifyDivRemOfSelectWithZeroOp(I))
671 return &I;
672
673 const APInt *C2;
674 if (match(Op1, m_APInt(C2))) {
675 Value *X;
676 const APInt *C1;
677
678 // (X / C1) / C2 -> X / (C1*C2)
679 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
680 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
681 APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
682 if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
683 return BinaryOperator::Create(I.getOpcode(), X,
684 ConstantInt::get(Ty, Product));
685 }
686
687 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
688 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
689 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
690
691 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
692 if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
693 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
694 ConstantInt::get(Ty, Quotient));
695 NewDiv->setIsExact(I.isExact());
696 return NewDiv;
697 }
698
699 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
700 if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
701 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
702 ConstantInt::get(Ty, Quotient));
703 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
704 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
705 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
706 return Mul;
707 }
708 }
709
710 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
711 *C1 != C1->getBitWidth() - 1) ||
712 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
713 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned);
714 APInt C1Shifted = APInt::getOneBitSet(
715 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));
716
717 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
718 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
719 auto *BO = BinaryOperator::Create(I.getOpcode(), X,
720 ConstantInt::get(Ty, Quotient));
721 BO->setIsExact(I.isExact());
722 return BO;
723 }
724
725 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
726 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
727 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
728 ConstantInt::get(Ty, Quotient));
729 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
730 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
731 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
732 return Mul;
733 }
734 }
735
736 if (!C2->isNullValue()) // avoid X udiv 0
737 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
738 return FoldedDiv;
739 }
740
741 if (match(Op0, m_One())) {
742 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
743 if (IsSigned) {
744 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
745 // result is one, if Op1 is -1 then the result is minus one, otherwise
746 // it's zero.
747 Value *Inc = Builder.CreateAdd(Op1, Op0);
748 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
749 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0));
750 } else {
751 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
752 // result is one, otherwise it's zero.
753 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
754 }
755 }
756
757 // See if we can fold away this div instruction.
758 if (SimplifyDemandedInstructionBits(I))
759 return &I;
760
761 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
762 Value *X, *Z;
763 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
764 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
765 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
766 return BinaryOperator::Create(I.getOpcode(), X, Op1);
767
768 // (X << Y) / X -> 1 << Y
769 Value *Y;
770 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
771 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
772 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
773 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
774
775 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
776 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
777 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
778 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
779 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
780 I.setOperand(0, ConstantInt::get(Ty, 1));
781 I.setOperand(1, Y);
782 return &I;
783 }
784 }
785
786 return nullptr;
787 }
788
789 static const unsigned MaxDepth = 6;
790
791 namespace {
792
793 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1,
794 const BinaryOperator &I,
795 InstCombiner &IC);
796
797 /// Used to maintain state for visitUDivOperand().
798 struct UDivFoldAction {
799 /// Informs visitUDiv() how to fold this operand. This can be zero if this
800 /// action joins two actions together.
801 FoldUDivOperandCb FoldAction;
802
803 /// Which operand to fold.
804 Value *OperandToFold;
805
806 union {
807 /// The instruction returned when FoldAction is invoked.
808 Instruction *FoldResult;
809
810 /// Stores the LHS action index if this action joins two actions together.
811 size_t SelectLHSIdx;
812 };
813
UDivFoldAction__anonf8c5564b0111::UDivFoldAction814 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
815 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
UDivFoldAction__anonf8c5564b0111::UDivFoldAction816 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
817 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
818 };
819
820 } // end anonymous namespace
821
822 // X udiv 2^C -> X >> C
foldUDivPow2Cst(Value * Op0,Value * Op1,const BinaryOperator & I,InstCombiner & IC)823 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
824 const BinaryOperator &I, InstCombiner &IC) {
825 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1));
826 if (!C1)
827 llvm_unreachable("Failed to constant fold udiv -> logbase2");
828 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1);
829 if (I.isExact())
830 LShr->setIsExact();
831 return LShr;
832 }
833
834 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
835 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2)
foldUDivShl(Value * Op0,Value * Op1,const BinaryOperator & I,InstCombiner & IC)836 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
837 InstCombiner &IC) {
838 Value *ShiftLeft;
839 if (!match(Op1, m_ZExt(m_Value(ShiftLeft))))
840 ShiftLeft = Op1;
841
842 Constant *CI;
843 Value *N;
844 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N))))
845 llvm_unreachable("match should never fail here!");
846 Constant *Log2Base = getLogBase2(N->getType(), CI);
847 if (!Log2Base)
848 llvm_unreachable("getLogBase2 should never fail here!");
849 N = IC.Builder.CreateAdd(N, Log2Base);
850 if (Op1 != ShiftLeft)
851 N = IC.Builder.CreateZExt(N, Op1->getType());
852 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
853 if (I.isExact())
854 LShr->setIsExact();
855 return LShr;
856 }
857
858 // Recursively visits the possible right hand operands of a udiv
859 // instruction, seeing through select instructions, to determine if we can
860 // replace the udiv with something simpler. If we find that an operand is not
861 // able to simplify the udiv, we abort the entire transformation.
visitUDivOperand(Value * Op0,Value * Op1,const BinaryOperator & I,SmallVectorImpl<UDivFoldAction> & Actions,unsigned Depth=0)862 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
863 SmallVectorImpl<UDivFoldAction> &Actions,
864 unsigned Depth = 0) {
865 // Check to see if this is an unsigned division with an exact power of 2,
866 // if so, convert to a right shift.
867 if (match(Op1, m_Power2())) {
868 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1));
869 return Actions.size();
870 }
871
872 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
873 if (match(Op1, m_Shl(m_Power2(), m_Value())) ||
874 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) {
875 Actions.push_back(UDivFoldAction(foldUDivShl, Op1));
876 return Actions.size();
877 }
878
879 // The remaining tests are all recursive, so bail out if we hit the limit.
880 if (Depth++ == MaxDepth)
881 return 0;
882
883 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
884 if (size_t LHSIdx =
885 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth))
886 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) {
887 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1));
888 return Actions.size();
889 }
890
891 return 0;
892 }
893
894 /// If we have zero-extended operands of an unsigned div or rem, we may be able
895 /// to narrow the operation (sink the zext below the math).
narrowUDivURem(BinaryOperator & I,InstCombiner::BuilderTy & Builder)896 static Instruction *narrowUDivURem(BinaryOperator &I,
897 InstCombiner::BuilderTy &Builder) {
898 Instruction::BinaryOps Opcode = I.getOpcode();
899 Value *N = I.getOperand(0);
900 Value *D = I.getOperand(1);
901 Type *Ty = I.getType();
902 Value *X, *Y;
903 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
904 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
905 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
906 // urem (zext X), (zext Y) --> zext (urem X, Y)
907 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
908 return new ZExtInst(NarrowOp, Ty);
909 }
910
911 Constant *C;
912 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
913 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
914 // If the constant is the same in the smaller type, use the narrow version.
915 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
916 if (ConstantExpr::getZExt(TruncC, Ty) != C)
917 return nullptr;
918
919 // udiv (zext X), C --> zext (udiv X, C')
920 // urem (zext X), C --> zext (urem X, C')
921 // udiv C, (zext X) --> zext (udiv C', X)
922 // urem C, (zext X) --> zext (urem C', X)
923 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
924 : Builder.CreateBinOp(Opcode, TruncC, X);
925 return new ZExtInst(NarrowOp, Ty);
926 }
927
928 return nullptr;
929 }
930
visitUDiv(BinaryOperator & I)931 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
932 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
933 SQ.getWithInstruction(&I)))
934 return replaceInstUsesWith(I, V);
935
936 if (Instruction *X = foldShuffledBinop(I))
937 return X;
938
939 // Handle the integer div common cases
940 if (Instruction *Common = commonIDivTransforms(I))
941 return Common;
942
943 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
944 Value *X;
945 const APInt *C1, *C2;
946 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
947 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
948 bool Overflow;
949 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
950 if (!Overflow) {
951 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
952 BinaryOperator *BO = BinaryOperator::CreateUDiv(
953 X, ConstantInt::get(X->getType(), C2ShlC1));
954 if (IsExact)
955 BO->setIsExact();
956 return BO;
957 }
958 }
959
960 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
961 // TODO: Could use isKnownNegative() to handle non-constant values.
962 Type *Ty = I.getType();
963 if (match(Op1, m_Negative())) {
964 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
965 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
966 }
967 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
968 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
969 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
970 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
971 }
972
973 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
974 return NarrowDiv;
975
976 // If the udiv operands are non-overflowing multiplies with a common operand,
977 // then eliminate the common factor:
978 // (A * B) / (A * X) --> B / X (and commuted variants)
979 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
980 // TODO: If -reassociation handled this generally, we could remove this.
981 Value *A, *B;
982 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
983 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
984 match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
985 return BinaryOperator::CreateUDiv(B, X);
986 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
987 match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
988 return BinaryOperator::CreateUDiv(A, X);
989 }
990
991 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
992 SmallVector<UDivFoldAction, 6> UDivActions;
993 if (visitUDivOperand(Op0, Op1, I, UDivActions))
994 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) {
995 FoldUDivOperandCb Action = UDivActions[i].FoldAction;
996 Value *ActionOp1 = UDivActions[i].OperandToFold;
997 Instruction *Inst;
998 if (Action)
999 Inst = Action(Op0, ActionOp1, I, *this);
1000 else {
1001 // This action joins two actions together. The RHS of this action is
1002 // simply the last action we processed, we saved the LHS action index in
1003 // the joining action.
1004 size_t SelectRHSIdx = i - 1;
1005 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult;
1006 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx;
1007 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult;
1008 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(),
1009 SelectLHS, SelectRHS);
1010 }
1011
1012 // If this is the last action to process, return it to the InstCombiner.
1013 // Otherwise, we insert it before the UDiv and record it so that we may
1014 // use it as part of a joining action (i.e., a SelectInst).
1015 if (e - i != 1) {
1016 Inst->insertBefore(&I);
1017 UDivActions[i].FoldResult = Inst;
1018 } else
1019 return Inst;
1020 }
1021
1022 return nullptr;
1023 }
1024
visitSDiv(BinaryOperator & I)1025 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
1026 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
1027 SQ.getWithInstruction(&I)))
1028 return replaceInstUsesWith(I, V);
1029
1030 if (Instruction *X = foldShuffledBinop(I))
1031 return X;
1032
1033 // Handle the integer div common cases
1034 if (Instruction *Common = commonIDivTransforms(I))
1035 return Common;
1036
1037 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1038 Value *X;
1039 // sdiv Op0, -1 --> -Op0
1040 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1041 if (match(Op1, m_AllOnes()) ||
1042 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1043 return BinaryOperator::CreateNeg(Op0);
1044
1045 const APInt *Op1C;
1046 if (match(Op1, m_APInt(Op1C))) {
1047 // sdiv exact X, C --> ashr exact X, log2(C)
1048 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) {
1049 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2());
1050 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName());
1051 }
1052
1053 // If the dividend is sign-extended and the constant divisor is small enough
1054 // to fit in the source type, shrink the division to the narrower type:
1055 // (sext X) sdiv C --> sext (X sdiv C)
1056 Value *Op0Src;
1057 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1058 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1059
1060 // In the general case, we need to make sure that the dividend is not the
1061 // minimum signed value because dividing that by -1 is UB. But here, we
1062 // know that the -1 divisor case is already handled above.
1063
1064 Constant *NarrowDivisor =
1065 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1066 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1067 return new SExtInst(NarrowOp, Op0->getType());
1068 }
1069 }
1070
1071 if (Constant *RHS = dyn_cast<Constant>(Op1)) {
1072 // X/INT_MIN -> X == INT_MIN
1073 if (RHS->isMinSignedValue())
1074 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType());
1075
1076 // -X/C --> X/-C provided the negation doesn't overflow.
1077 Value *X;
1078 if (match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1079 auto *BO = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(RHS));
1080 BO->setIsExact(I.isExact());
1081 return BO;
1082 }
1083 }
1084
1085 // If the sign bits of both operands are zero (i.e. we can prove they are
1086 // unsigned inputs), turn this into a udiv.
1087 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1088 if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1089 if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1090 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1091 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1092 BO->setIsExact(I.isExact());
1093 return BO;
1094 }
1095
1096 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1097 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1098 // Safe because the only negative value (1 << Y) can take on is
1099 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1100 // the sign bit set.
1101 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1102 BO->setIsExact(I.isExact());
1103 return BO;
1104 }
1105 }
1106
1107 return nullptr;
1108 }
1109
1110 /// Remove negation and try to convert division into multiplication.
foldFDivConstantDivisor(BinaryOperator & I)1111 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1112 Constant *C;
1113 if (!match(I.getOperand(1), m_Constant(C)))
1114 return nullptr;
1115
1116 // -X / C --> X / -C
1117 Value *X;
1118 if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1119 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1120
1121 // If the constant divisor has an exact inverse, this is always safe. If not,
1122 // then we can still create a reciprocal if fast-math-flags allow it and the
1123 // constant is a regular number (not zero, infinite, or denormal).
1124 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1125 return nullptr;
1126
1127 // Disallow denormal constants because we don't know what would happen
1128 // on all targets.
1129 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1130 // denorms are flushed?
1131 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C);
1132 if (!RecipC->isNormalFP())
1133 return nullptr;
1134
1135 // X / C --> X * (1 / C)
1136 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1137 }
1138
1139 /// Remove negation and try to reassociate constant math.
foldFDivConstantDividend(BinaryOperator & I)1140 static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1141 Constant *C;
1142 if (!match(I.getOperand(0), m_Constant(C)))
1143 return nullptr;
1144
1145 // C / -X --> -C / X
1146 Value *X;
1147 if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1148 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1149
1150 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1151 return nullptr;
1152
1153 // Try to reassociate C / X expressions where X includes another constant.
1154 Constant *C2, *NewC = nullptr;
1155 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1156 // C / (X * C2) --> (C / C2) / X
1157 NewC = ConstantExpr::getFDiv(C, C2);
1158 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1159 // C / (X / C2) --> (C * C2) / X
1160 NewC = ConstantExpr::getFMul(C, C2);
1161 }
1162 // Disallow denormal constants because we don't know what would happen
1163 // on all targets.
1164 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1165 // denorms are flushed?
1166 if (!NewC || !NewC->isNormalFP())
1167 return nullptr;
1168
1169 return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1170 }
1171
visitFDiv(BinaryOperator & I)1172 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
1173 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
1174 I.getFastMathFlags(),
1175 SQ.getWithInstruction(&I)))
1176 return replaceInstUsesWith(I, V);
1177
1178 if (Instruction *X = foldShuffledBinop(I))
1179 return X;
1180
1181 if (Instruction *R = foldFDivConstantDivisor(I))
1182 return R;
1183
1184 if (Instruction *R = foldFDivConstantDividend(I))
1185 return R;
1186
1187 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1188 if (isa<Constant>(Op0))
1189 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1190 if (Instruction *R = FoldOpIntoSelect(I, SI))
1191 return R;
1192
1193 if (isa<Constant>(Op1))
1194 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1195 if (Instruction *R = FoldOpIntoSelect(I, SI))
1196 return R;
1197
1198 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1199 Value *X, *Y;
1200 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1201 (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1202 // (X / Y) / Z => X / (Y * Z)
1203 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1204 return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1205 }
1206 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1207 (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1208 // Z / (X / Y) => (Y * Z) / X
1209 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1210 return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1211 }
1212 }
1213
1214 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1215 // sin(X) / cos(X) -> tan(X)
1216 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1217 Value *X;
1218 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1219 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1220 bool IsCot =
1221 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1222 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1223
1224 if ((IsTan || IsCot) && hasUnaryFloatFn(&TLI, I.getType(), LibFunc_tan,
1225 LibFunc_tanf, LibFunc_tanl)) {
1226 IRBuilder<> B(&I);
1227 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1228 B.setFastMathFlags(I.getFastMathFlags());
1229 AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes();
1230 Value *Res = emitUnaryFloatFnCall(X, TLI.getName(LibFunc_tan), B, Attrs);
1231 if (IsCot)
1232 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1233 return replaceInstUsesWith(I, Res);
1234 }
1235 }
1236
1237 // -X / -Y -> X / Y
1238 Value *X, *Y;
1239 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) {
1240 I.setOperand(0, X);
1241 I.setOperand(1, Y);
1242 return &I;
1243 }
1244
1245 // X / (X * Y) --> 1.0 / Y
1246 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1247 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1248 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1249 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1250 I.setOperand(0, ConstantFP::get(I.getType(), 1.0));
1251 I.setOperand(1, Y);
1252 return &I;
1253 }
1254
1255 return nullptr;
1256 }
1257
1258 /// This function implements the transforms common to both integer remainder
1259 /// instructions (urem and srem). It is called by the visitors to those integer
1260 /// remainder instructions.
1261 /// Common integer remainder transforms
commonIRemTransforms(BinaryOperator & I)1262 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
1263 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1264
1265 // The RHS is known non-zero.
1266 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
1267 I.setOperand(1, V);
1268 return &I;
1269 }
1270
1271 // Handle cases involving: rem X, (select Cond, Y, Z)
1272 if (simplifyDivRemOfSelectWithZeroOp(I))
1273 return &I;
1274
1275 if (isa<Constant>(Op1)) {
1276 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1277 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1278 if (Instruction *R = FoldOpIntoSelect(I, SI))
1279 return R;
1280 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1281 const APInt *Op1Int;
1282 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1283 (I.getOpcode() == Instruction::URem ||
1284 !Op1Int->isMinSignedValue())) {
1285 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1286 // predecessor blocks, so do this only if we know the srem or urem
1287 // will not fault.
1288 if (Instruction *NV = foldOpIntoPhi(I, PN))
1289 return NV;
1290 }
1291 }
1292
1293 // See if we can fold away this rem instruction.
1294 if (SimplifyDemandedInstructionBits(I))
1295 return &I;
1296 }
1297 }
1298
1299 return nullptr;
1300 }
1301
visitURem(BinaryOperator & I)1302 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
1303 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
1304 SQ.getWithInstruction(&I)))
1305 return replaceInstUsesWith(I, V);
1306
1307 if (Instruction *X = foldShuffledBinop(I))
1308 return X;
1309
1310 if (Instruction *common = commonIRemTransforms(I))
1311 return common;
1312
1313 if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1314 return NarrowRem;
1315
1316 // X urem Y -> X and Y-1, where Y is a power of 2,
1317 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1318 Type *Ty = I.getType();
1319 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1320 Constant *N1 = Constant::getAllOnesValue(Ty);
1321 Value *Add = Builder.CreateAdd(Op1, N1);
1322 return BinaryOperator::CreateAnd(Op0, Add);
1323 }
1324
1325 // 1 urem X -> zext(X != 1)
1326 if (match(Op0, m_One()))
1327 return CastInst::CreateZExtOrBitCast(Builder.CreateICmpNE(Op1, Op0), Ty);
1328
1329 // X urem C -> X < C ? X : X - C, where C >= signbit.
1330 if (match(Op1, m_Negative())) {
1331 Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
1332 Value *Sub = Builder.CreateSub(Op0, Op1);
1333 return SelectInst::Create(Cmp, Op0, Sub);
1334 }
1335
1336 // If the divisor is a sext of a boolean, then the divisor must be max
1337 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1338 // max unsigned value. In that case, the remainder is 0:
1339 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1340 Value *X;
1341 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1342 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1343 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1344 }
1345
1346 return nullptr;
1347 }
1348
visitSRem(BinaryOperator & I)1349 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
1350 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
1351 SQ.getWithInstruction(&I)))
1352 return replaceInstUsesWith(I, V);
1353
1354 if (Instruction *X = foldShuffledBinop(I))
1355 return X;
1356
1357 // Handle the integer rem common cases
1358 if (Instruction *Common = commonIRemTransforms(I))
1359 return Common;
1360
1361 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1362 {
1363 const APInt *Y;
1364 // X % -Y -> X % Y
1365 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) {
1366 Worklist.AddValue(I.getOperand(1));
1367 I.setOperand(1, ConstantInt::get(I.getType(), -*Y));
1368 return &I;
1369 }
1370 }
1371
1372 // If the sign bits of both operands are zero (i.e. we can prove they are
1373 // unsigned inputs), turn this into a urem.
1374 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1375 if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1376 MaskedValueIsZero(Op0, Mask, 0, &I)) {
1377 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1378 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1379 }
1380
1381 // If it's a constant vector, flip any negative values positive.
1382 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1383 Constant *C = cast<Constant>(Op1);
1384 unsigned VWidth = C->getType()->getVectorNumElements();
1385
1386 bool hasNegative = false;
1387 bool hasMissing = false;
1388 for (unsigned i = 0; i != VWidth; ++i) {
1389 Constant *Elt = C->getAggregateElement(i);
1390 if (!Elt) {
1391 hasMissing = true;
1392 break;
1393 }
1394
1395 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1396 if (RHS->isNegative())
1397 hasNegative = true;
1398 }
1399
1400 if (hasNegative && !hasMissing) {
1401 SmallVector<Constant *, 16> Elts(VWidth);
1402 for (unsigned i = 0; i != VWidth; ++i) {
1403 Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
1404 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1405 if (RHS->isNegative())
1406 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1407 }
1408 }
1409
1410 Constant *NewRHSV = ConstantVector::get(Elts);
1411 if (NewRHSV != C) { // Don't loop on -MININT
1412 Worklist.AddValue(I.getOperand(1));
1413 I.setOperand(1, NewRHSV);
1414 return &I;
1415 }
1416 }
1417 }
1418
1419 return nullptr;
1420 }
1421
visitFRem(BinaryOperator & I)1422 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
1423 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
1424 I.getFastMathFlags(),
1425 SQ.getWithInstruction(&I)))
1426 return replaceInstUsesWith(I, V);
1427
1428 if (Instruction *X = foldShuffledBinop(I))
1429 return X;
1430
1431 return nullptr;
1432 }
1433