1 //===- InstCombineAddSub.cpp ----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for add, fadd, sub, and fsub.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombine.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/Target/TargetData.h"
17 #include "llvm/Support/GetElementPtrTypeIterator.h"
18 #include "llvm/Support/PatternMatch.h"
19 using namespace llvm;
20 using namespace PatternMatch;
21
22 /// AddOne - Add one to a ConstantInt.
AddOne(Constant * C)23 static Constant *AddOne(Constant *C) {
24 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
25 }
26 /// SubOne - Subtract one from a ConstantInt.
SubOne(ConstantInt * C)27 static Constant *SubOne(ConstantInt *C) {
28 return ConstantInt::get(C->getContext(), C->getValue()-1);
29 }
30
31
32 // dyn_castFoldableMul - If this value is a multiply that can be folded into
33 // other computations (because it has a constant operand), return the
34 // non-constant operand of the multiply, and set CST to point to the multiplier.
35 // Otherwise, return null.
36 //
dyn_castFoldableMul(Value * V,ConstantInt * & CST)37 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
38 if (!V->hasOneUse() || !V->getType()->isIntegerTy())
39 return 0;
40
41 Instruction *I = dyn_cast<Instruction>(V);
42 if (I == 0) return 0;
43
44 if (I->getOpcode() == Instruction::Mul)
45 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
46 return I->getOperand(0);
47 if (I->getOpcode() == Instruction::Shl)
48 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
49 // The multiplier is really 1 << CST.
50 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
51 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
52 CST = ConstantInt::get(V->getType()->getContext(),
53 APInt(BitWidth, 1).shl(CSTVal));
54 return I->getOperand(0);
55 }
56 return 0;
57 }
58
59
60 /// WillNotOverflowSignedAdd - Return true if we can prove that:
61 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
62 /// This basically requires proving that the add in the original type would not
63 /// overflow to change the sign bit or have a carry out.
WillNotOverflowSignedAdd(Value * LHS,Value * RHS)64 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
65 // There are different heuristics we can use for this. Here are some simple
66 // ones.
67
68 // Add has the property that adding any two 2's complement numbers can only
69 // have one carry bit which can change a sign. As such, if LHS and RHS each
70 // have at least two sign bits, we know that the addition of the two values
71 // will sign extend fine.
72 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
73 return true;
74
75
76 // If one of the operands only has one non-zero bit, and if the other operand
77 // has a known-zero bit in a more significant place than it (not including the
78 // sign bit) the ripple may go up to and fill the zero, but won't change the
79 // sign. For example, (X & ~4) + 1.
80
81 // TODO: Implement.
82
83 return false;
84 }
85
visitAdd(BinaryOperator & I)86 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
87 bool Changed = SimplifyAssociativeOrCommutative(I);
88 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
89
90 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
91 I.hasNoUnsignedWrap(), TD))
92 return ReplaceInstUsesWith(I, V);
93
94 // (A*B)+(A*C) -> A*(B+C) etc
95 if (Value *V = SimplifyUsingDistributiveLaws(I))
96 return ReplaceInstUsesWith(I, V);
97
98 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
99 // X + (signbit) --> X ^ signbit
100 const APInt &Val = CI->getValue();
101 if (Val.isSignBit())
102 return BinaryOperator::CreateXor(LHS, RHS);
103
104 // See if SimplifyDemandedBits can simplify this. This handles stuff like
105 // (X & 254)+1 -> (X&254)|1
106 if (SimplifyDemandedInstructionBits(I))
107 return &I;
108
109 // zext(bool) + C -> bool ? C + 1 : C
110 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
111 if (ZI->getSrcTy()->isIntegerTy(1))
112 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
113
114 Value *XorLHS = 0; ConstantInt *XorRHS = 0;
115 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
116 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
117 const APInt &RHSVal = CI->getValue();
118 unsigned ExtendAmt = 0;
119 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
120 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
121 if (XorRHS->getValue() == -RHSVal) {
122 if (RHSVal.isPowerOf2())
123 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
124 else if (XorRHS->getValue().isPowerOf2())
125 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
126 }
127
128 if (ExtendAmt) {
129 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
130 if (!MaskedValueIsZero(XorLHS, Mask))
131 ExtendAmt = 0;
132 }
133
134 if (ExtendAmt) {
135 Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
136 Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
137 return BinaryOperator::CreateAShr(NewShl, ShAmt);
138 }
139
140 // If this is a xor that was canonicalized from a sub, turn it back into
141 // a sub and fuse this add with it.
142 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
143 IntegerType *IT = cast<IntegerType>(I.getType());
144 APInt LHSKnownOne(IT->getBitWidth(), 0);
145 APInt LHSKnownZero(IT->getBitWidth(), 0);
146 ComputeMaskedBits(XorLHS, LHSKnownZero, LHSKnownOne);
147 if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
148 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
149 XorLHS);
150 }
151 }
152 }
153
154 if (isa<Constant>(RHS) && isa<PHINode>(LHS))
155 if (Instruction *NV = FoldOpIntoPhi(I))
156 return NV;
157
158 if (I.getType()->isIntegerTy(1))
159 return BinaryOperator::CreateXor(LHS, RHS);
160
161 // X + X --> X << 1
162 if (LHS == RHS) {
163 BinaryOperator *New =
164 BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
165 New->setHasNoSignedWrap(I.hasNoSignedWrap());
166 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
167 return New;
168 }
169
170 // -A + B --> B - A
171 // -A + -B --> -(A + B)
172 if (Value *LHSV = dyn_castNegVal(LHS)) {
173 if (Value *RHSV = dyn_castNegVal(RHS)) {
174 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
175 return BinaryOperator::CreateNeg(NewAdd);
176 }
177
178 return BinaryOperator::CreateSub(RHS, LHSV);
179 }
180
181 // A + -B --> A - B
182 if (!isa<Constant>(RHS))
183 if (Value *V = dyn_castNegVal(RHS))
184 return BinaryOperator::CreateSub(LHS, V);
185
186
187 ConstantInt *C2;
188 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
189 if (X == RHS) // X*C + X --> X * (C+1)
190 return BinaryOperator::CreateMul(RHS, AddOne(C2));
191
192 // X*C1 + X*C2 --> X * (C1+C2)
193 ConstantInt *C1;
194 if (X == dyn_castFoldableMul(RHS, C1))
195 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
196 }
197
198 // X + X*C --> X * (C+1)
199 if (dyn_castFoldableMul(RHS, C2) == LHS)
200 return BinaryOperator::CreateMul(LHS, AddOne(C2));
201
202 // A+B --> A|B iff A and B have no bits set in common.
203 if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
204 APInt LHSKnownOne(IT->getBitWidth(), 0);
205 APInt LHSKnownZero(IT->getBitWidth(), 0);
206 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
207 if (LHSKnownZero != 0) {
208 APInt RHSKnownOne(IT->getBitWidth(), 0);
209 APInt RHSKnownZero(IT->getBitWidth(), 0);
210 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
211
212 // No bits in common -> bitwise or.
213 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
214 return BinaryOperator::CreateOr(LHS, RHS);
215 }
216 }
217
218 // W*X + Y*Z --> W * (X+Z) iff W == Y
219 {
220 Value *W, *X, *Y, *Z;
221 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
222 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
223 if (W != Y) {
224 if (W == Z) {
225 std::swap(Y, Z);
226 } else if (Y == X) {
227 std::swap(W, X);
228 } else if (X == Z) {
229 std::swap(Y, Z);
230 std::swap(W, X);
231 }
232 }
233
234 if (W == Y) {
235 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
236 return BinaryOperator::CreateMul(W, NewAdd);
237 }
238 }
239 }
240
241 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
242 Value *X = 0;
243 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
244 return BinaryOperator::CreateSub(SubOne(CRHS), X);
245
246 // (X & FF00) + xx00 -> (X+xx00) & FF00
247 if (LHS->hasOneUse() &&
248 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
249 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
250 // See if all bits from the first bit set in the Add RHS up are included
251 // in the mask. First, get the rightmost bit.
252 const APInt &AddRHSV = CRHS->getValue();
253
254 // Form a mask of all bits from the lowest bit added through the top.
255 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
256
257 // See if the and mask includes all of these bits.
258 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
259
260 if (AddRHSHighBits == AddRHSHighBitsAnd) {
261 // Okay, the xform is safe. Insert the new add pronto.
262 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
263 return BinaryOperator::CreateAnd(NewAdd, C2);
264 }
265 }
266
267 // Try to fold constant add into select arguments.
268 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
269 if (Instruction *R = FoldOpIntoSelect(I, SI))
270 return R;
271 }
272
273 // add (select X 0 (sub n A)) A --> select X A n
274 {
275 SelectInst *SI = dyn_cast<SelectInst>(LHS);
276 Value *A = RHS;
277 if (!SI) {
278 SI = dyn_cast<SelectInst>(RHS);
279 A = LHS;
280 }
281 if (SI && SI->hasOneUse()) {
282 Value *TV = SI->getTrueValue();
283 Value *FV = SI->getFalseValue();
284 Value *N;
285
286 // Can we fold the add into the argument of the select?
287 // We check both true and false select arguments for a matching subtract.
288 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
289 // Fold the add into the true select value.
290 return SelectInst::Create(SI->getCondition(), N, A);
291
292 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
293 // Fold the add into the false select value.
294 return SelectInst::Create(SI->getCondition(), A, N);
295 }
296 }
297
298 // Check for (add (sext x), y), see if we can merge this into an
299 // integer add followed by a sext.
300 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
301 // (add (sext x), cst) --> (sext (add x, cst'))
302 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
303 Constant *CI =
304 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
305 if (LHSConv->hasOneUse() &&
306 ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
307 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
308 // Insert the new, smaller add.
309 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
310 CI, "addconv");
311 return new SExtInst(NewAdd, I.getType());
312 }
313 }
314
315 // (add (sext x), (sext y)) --> (sext (add int x, y))
316 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
317 // Only do this if x/y have the same type, if at last one of them has a
318 // single use (so we don't increase the number of sexts), and if the
319 // integer add will not overflow.
320 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
321 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
322 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
323 RHSConv->getOperand(0))) {
324 // Insert the new integer add.
325 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
326 RHSConv->getOperand(0), "addconv");
327 return new SExtInst(NewAdd, I.getType());
328 }
329 }
330 }
331
332 return Changed ? &I : 0;
333 }
334
visitFAdd(BinaryOperator & I)335 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
336 bool Changed = SimplifyAssociativeOrCommutative(I);
337 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
338
339 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
340 // X + 0 --> X
341 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
342 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
343 (I.getType())->getValueAPF()))
344 return ReplaceInstUsesWith(I, LHS);
345 }
346
347 if (isa<PHINode>(LHS))
348 if (Instruction *NV = FoldOpIntoPhi(I))
349 return NV;
350 }
351
352 // -A + B --> B - A
353 // -A + -B --> -(A + B)
354 if (Value *LHSV = dyn_castFNegVal(LHS))
355 return BinaryOperator::CreateFSub(RHS, LHSV);
356
357 // A + -B --> A - B
358 if (!isa<Constant>(RHS))
359 if (Value *V = dyn_castFNegVal(RHS))
360 return BinaryOperator::CreateFSub(LHS, V);
361
362 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
363 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
364 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
365 return ReplaceInstUsesWith(I, LHS);
366
367 // Check for (fadd double (sitofp x), y), see if we can merge this into an
368 // integer add followed by a promotion.
369 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
370 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
371 // ... if the constant fits in the integer value. This is useful for things
372 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
373 // requires a constant pool load, and generally allows the add to be better
374 // instcombined.
375 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
376 Constant *CI =
377 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
378 if (LHSConv->hasOneUse() &&
379 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
380 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
381 // Insert the new integer add.
382 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
383 CI, "addconv");
384 return new SIToFPInst(NewAdd, I.getType());
385 }
386 }
387
388 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
389 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
390 // Only do this if x/y have the same type, if at last one of them has a
391 // single use (so we don't increase the number of int->fp conversions),
392 // and if the integer add will not overflow.
393 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
394 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
395 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
396 RHSConv->getOperand(0))) {
397 // Insert the new integer add.
398 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
399 RHSConv->getOperand(0),"addconv");
400 return new SIToFPInst(NewAdd, I.getType());
401 }
402 }
403 }
404
405 return Changed ? &I : 0;
406 }
407
408
409 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
410 /// code necessary to compute the offset from the base pointer (without adding
411 /// in the base pointer). Return the result as a signed integer of intptr size.
EmitGEPOffset(User * GEP)412 Value *InstCombiner::EmitGEPOffset(User *GEP) {
413 TargetData &TD = *getTargetData();
414 gep_type_iterator GTI = gep_type_begin(GEP);
415 Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
416 Value *Result = Constant::getNullValue(IntPtrTy);
417
418 // If the GEP is inbounds, we know that none of the addressing operations will
419 // overflow in an unsigned sense.
420 bool isInBounds = cast<GEPOperator>(GEP)->isInBounds();
421
422 // Build a mask for high order bits.
423 unsigned IntPtrWidth = TD.getPointerSizeInBits();
424 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
425
426 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
427 ++i, ++GTI) {
428 Value *Op = *i;
429 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
430 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
431 if (OpC->isZero()) continue;
432
433 // Handle a struct index, which adds its field offset to the pointer.
434 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
435 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
436
437 if (Size)
438 Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
439 GEP->getName()+".offs");
440 continue;
441 }
442
443 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
444 Constant *OC =
445 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
446 Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
447 // Emit an add instruction.
448 Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
449 continue;
450 }
451 // Convert to correct type.
452 if (Op->getType() != IntPtrTy)
453 Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
454 if (Size != 1) {
455 // We'll let instcombine(mul) convert this to a shl if possible.
456 Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
457 GEP->getName()+".idx", isInBounds /*NUW*/);
458 }
459
460 // Emit an add instruction.
461 Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
462 }
463 return Result;
464 }
465
466
467
468
469 /// Optimize pointer differences into the same array into a size. Consider:
470 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
471 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
472 ///
OptimizePointerDifference(Value * LHS,Value * RHS,Type * Ty)473 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
474 Type *Ty) {
475 assert(TD && "Must have target data info for this");
476
477 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
478 // this.
479 bool Swapped = false;
480 GEPOperator *GEP1 = 0, *GEP2 = 0;
481
482 // For now we require one side to be the base pointer "A" or a constant
483 // GEP derived from it.
484 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
485 // (gep X, ...) - X
486 if (LHSGEP->getOperand(0) == RHS) {
487 GEP1 = LHSGEP;
488 Swapped = false;
489 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
490 // (gep X, ...) - (gep X, ...)
491 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
492 RHSGEP->getOperand(0)->stripPointerCasts()) {
493 GEP2 = RHSGEP;
494 GEP1 = LHSGEP;
495 Swapped = false;
496 }
497 }
498 }
499
500 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
501 // X - (gep X, ...)
502 if (RHSGEP->getOperand(0) == LHS) {
503 GEP1 = RHSGEP;
504 Swapped = true;
505 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
506 // (gep X, ...) - (gep X, ...)
507 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
508 LHSGEP->getOperand(0)->stripPointerCasts()) {
509 GEP2 = LHSGEP;
510 GEP1 = RHSGEP;
511 Swapped = true;
512 }
513 }
514 }
515
516 // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
517 // multiple users.
518 if (GEP1 == 0 ||
519 (GEP2 != 0 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
520 return 0;
521
522 // Emit the offset of the GEP and an intptr_t.
523 Value *Result = EmitGEPOffset(GEP1);
524
525 // If we had a constant expression GEP on the other side offsetting the
526 // pointer, subtract it from the offset we have.
527 if (GEP2) {
528 Value *Offset = EmitGEPOffset(GEP2);
529 Result = Builder->CreateSub(Result, Offset);
530 }
531
532 // If we have p - gep(p, ...) then we have to negate the result.
533 if (Swapped)
534 Result = Builder->CreateNeg(Result, "diff.neg");
535
536 return Builder->CreateIntCast(Result, Ty, true);
537 }
538
539
visitSub(BinaryOperator & I)540 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
541 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
542
543 if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
544 I.hasNoUnsignedWrap(), TD))
545 return ReplaceInstUsesWith(I, V);
546
547 // (A*B)-(A*C) -> A*(B-C) etc
548 if (Value *V = SimplifyUsingDistributiveLaws(I))
549 return ReplaceInstUsesWith(I, V);
550
551 // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
552 if (Value *V = dyn_castNegVal(Op1)) {
553 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
554 Res->setHasNoSignedWrap(I.hasNoSignedWrap());
555 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
556 return Res;
557 }
558
559 if (I.getType()->isIntegerTy(1))
560 return BinaryOperator::CreateXor(Op0, Op1);
561
562 // Replace (-1 - A) with (~A).
563 if (match(Op0, m_AllOnes()))
564 return BinaryOperator::CreateNot(Op1);
565
566 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
567 // C - ~X == X + (1+C)
568 Value *X = 0;
569 if (match(Op1, m_Not(m_Value(X))))
570 return BinaryOperator::CreateAdd(X, AddOne(C));
571
572 // -(X >>u 31) -> (X >>s 31)
573 // -(X >>s 31) -> (X >>u 31)
574 if (C->isZero()) {
575 Value *X; ConstantInt *CI;
576 if (match(Op1, m_LShr(m_Value(X), m_ConstantInt(CI))) &&
577 // Verify we are shifting out everything but the sign bit.
578 CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
579 return BinaryOperator::CreateAShr(X, CI);
580
581 if (match(Op1, m_AShr(m_Value(X), m_ConstantInt(CI))) &&
582 // Verify we are shifting out everything but the sign bit.
583 CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
584 return BinaryOperator::CreateLShr(X, CI);
585 }
586
587 // Try to fold constant sub into select arguments.
588 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
589 if (Instruction *R = FoldOpIntoSelect(I, SI))
590 return R;
591
592 // C - zext(bool) -> bool ? C - 1 : C
593 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
594 if (ZI->getSrcTy()->isIntegerTy(1))
595 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
596
597 // C-(X+C2) --> (C-C2)-X
598 ConstantInt *C2;
599 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
600 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
601
602 if (SimplifyDemandedInstructionBits(I))
603 return &I;
604 }
605
606
607 { Value *Y;
608 // X-(X+Y) == -Y X-(Y+X) == -Y
609 if (match(Op1, m_Add(m_Specific(Op0), m_Value(Y))) ||
610 match(Op1, m_Add(m_Value(Y), m_Specific(Op0))))
611 return BinaryOperator::CreateNeg(Y);
612
613 // (X-Y)-X == -Y
614 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
615 return BinaryOperator::CreateNeg(Y);
616 }
617
618 if (Op1->hasOneUse()) {
619 Value *X = 0, *Y = 0, *Z = 0;
620 Constant *C = 0;
621 ConstantInt *CI = 0;
622
623 // (X - (Y - Z)) --> (X + (Z - Y)).
624 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
625 return BinaryOperator::CreateAdd(Op0,
626 Builder->CreateSub(Z, Y, Op1->getName()));
627
628 // (X - (X & Y)) --> (X & ~Y)
629 //
630 if (match(Op1, m_And(m_Value(Y), m_Specific(Op0))) ||
631 match(Op1, m_And(m_Specific(Op0), m_Value(Y))))
632 return BinaryOperator::CreateAnd(Op0,
633 Builder->CreateNot(Y, Y->getName() + ".not"));
634
635 // 0 - (X sdiv C) -> (X sdiv -C)
636 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) &&
637 match(Op0, m_Zero()))
638 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
639
640 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
641 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
642 if (Value *XNeg = dyn_castNegVal(X))
643 return BinaryOperator::CreateShl(XNeg, Y);
644
645 // X - X*C --> X * (1-C)
646 if (match(Op1, m_Mul(m_Specific(Op0), m_ConstantInt(CI)))) {
647 Constant *CP1 = ConstantExpr::getSub(ConstantInt::get(I.getType(),1), CI);
648 return BinaryOperator::CreateMul(Op0, CP1);
649 }
650
651 // X - X<<C --> X * (1-(1<<C))
652 if (match(Op1, m_Shl(m_Specific(Op0), m_ConstantInt(CI)))) {
653 Constant *One = ConstantInt::get(I.getType(), 1);
654 C = ConstantExpr::getSub(One, ConstantExpr::getShl(One, CI));
655 return BinaryOperator::CreateMul(Op0, C);
656 }
657
658 // X - A*-B -> X + A*B
659 // X - -A*B -> X + A*B
660 Value *A, *B;
661 if (match(Op1, m_Mul(m_Value(A), m_Neg(m_Value(B)))) ||
662 match(Op1, m_Mul(m_Neg(m_Value(A)), m_Value(B))))
663 return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
664
665 // X - A*CI -> X + A*-CI
666 // X - CI*A -> X + A*-CI
667 if (match(Op1, m_Mul(m_Value(A), m_ConstantInt(CI))) ||
668 match(Op1, m_Mul(m_ConstantInt(CI), m_Value(A)))) {
669 Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
670 return BinaryOperator::CreateAdd(Op0, NewMul);
671 }
672 }
673
674 ConstantInt *C1;
675 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
676 if (X == Op1) // X*C - X --> X * (C-1)
677 return BinaryOperator::CreateMul(Op1, SubOne(C1));
678
679 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
680 if (X == dyn_castFoldableMul(Op1, C2))
681 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
682 }
683
684 // Optimize pointer differences into the same array into a size. Consider:
685 // &A[10] - &A[0]: we should compile this to "10".
686 if (TD) {
687 Value *LHSOp, *RHSOp;
688 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
689 match(Op1, m_PtrToInt(m_Value(RHSOp))))
690 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
691 return ReplaceInstUsesWith(I, Res);
692
693 // trunc(p)-trunc(q) -> trunc(p-q)
694 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
695 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
696 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
697 return ReplaceInstUsesWith(I, Res);
698 }
699
700 return 0;
701 }
702
visitFSub(BinaryOperator & I)703 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
704 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
705
706 // If this is a 'B = x-(-A)', change to B = x+A...
707 if (Value *V = dyn_castFNegVal(Op1))
708 return BinaryOperator::CreateFAdd(Op0, V);
709
710 return 0;
711 }
712