• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- InstCombineNegator.cpp -----------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements sinking of negation into expression trees,
10 // as long as that can be done without increasing instruction count.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/TargetFolder.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DebugLoc.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instruction.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Use.h"
37 #include "llvm/IR/User.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Compiler.h"
42 #include "llvm/Support/DebugCounter.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/InstCombine/InstCombiner.h"
46 #include <cassert>
47 #include <cstdint>
48 #include <functional>
49 #include <tuple>
50 #include <type_traits>
51 #include <utility>
52 
53 namespace llvm {
54 class AssumptionCache;
55 class DataLayout;
56 class DominatorTree;
57 class LLVMContext;
58 } // namespace llvm
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "instcombine"
63 
64 STATISTIC(NegatorTotalNegationsAttempted,
65           "Negator: Number of negations attempted to be sinked");
66 STATISTIC(NegatorNumTreesNegated,
67           "Negator: Number of negations successfully sinked");
68 STATISTIC(NegatorMaxDepthVisited, "Negator: Maximal traversal depth ever "
69                                   "reached while attempting to sink negation");
70 STATISTIC(NegatorTimesDepthLimitReached,
71           "Negator: How many times did the traversal depth limit was reached "
72           "during sinking");
73 STATISTIC(
74     NegatorNumValuesVisited,
75     "Negator: Total number of values visited during attempts to sink negation");
76 STATISTIC(NegatorNumNegationsFoundInCache,
77           "Negator: How many negations did we retrieve/reuse from cache");
78 STATISTIC(NegatorMaxTotalValuesVisited,
79           "Negator: Maximal number of values ever visited while attempting to "
80           "sink negation");
81 STATISTIC(NegatorNumInstructionsCreatedTotal,
82           "Negator: Number of new negated instructions created, total");
83 STATISTIC(NegatorMaxInstructionsCreated,
84           "Negator: Maximal number of new instructions created during negation "
85           "attempt");
86 STATISTIC(NegatorNumInstructionsNegatedSuccess,
87           "Negator: Number of new negated instructions created in successful "
88           "negation sinking attempts");
89 
90 DEBUG_COUNTER(NegatorCounter, "instcombine-negator",
91               "Controls Negator transformations in InstCombine pass");
92 
93 static cl::opt<bool>
94     NegatorEnabled("instcombine-negator-enabled", cl::init(true),
95                    cl::desc("Should we attempt to sink negations?"));
96 
97 static cl::opt<unsigned>
98     NegatorMaxDepth("instcombine-negator-max-depth",
99                     cl::init(NegatorDefaultMaxDepth),
100                     cl::desc("What is the maximal lookup depth when trying to "
101                              "check for viability of negation sinking."));
102 
Negator(LLVMContext & C,const DataLayout & DL_,AssumptionCache & AC_,const DominatorTree & DT_,bool IsTrulyNegation_)103 Negator::Negator(LLVMContext &C, const DataLayout &DL_, AssumptionCache &AC_,
104                  const DominatorTree &DT_, bool IsTrulyNegation_)
105     : Builder(C, TargetFolder(DL_),
106               IRBuilderCallbackInserter([&](Instruction *I) {
107                 ++NegatorNumInstructionsCreatedTotal;
108                 NewInstructions.push_back(I);
109               })),
110       DL(DL_), AC(AC_), DT(DT_), IsTrulyNegation(IsTrulyNegation_) {}
111 
112 #if LLVM_ENABLE_STATS
~Negator()113 Negator::~Negator() {
114   NegatorMaxTotalValuesVisited.updateMax(NumValuesVisitedInThisNegator);
115 }
116 #endif
117 
118 // Due to the InstCombine's worklist management, there are no guarantees that
119 // each instruction we'll encounter has been visited by InstCombine already.
120 // In particular, most importantly for us, that means we have to canonicalize
121 // constants to RHS ourselves, since that is helpful sometimes.
getSortedOperandsOfBinOp(Instruction * I)122 std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
123   assert(I->getNumOperands() == 2 && "Only for binops!");
124   std::array<Value *, 2> Ops{I->getOperand(0), I->getOperand(1)};
125   if (I->isCommutative() && InstCombiner::getComplexity(I->getOperand(0)) <
126                                 InstCombiner::getComplexity(I->getOperand(1)))
127     std::swap(Ops[0], Ops[1]);
128   return Ops;
129 }
130 
131 // FIXME: can this be reworked into a worklist-based algorithm while preserving
132 // the depth-first, early bailout traversal?
visitImpl(Value * V,unsigned Depth)133 LLVM_NODISCARD Value *Negator::visitImpl(Value *V, unsigned Depth) {
134   // -(undef) -> undef.
135   if (match(V, m_Undef()))
136     return V;
137 
138   // In i1, negation can simply be ignored.
139   if (V->getType()->isIntOrIntVectorTy(1))
140     return V;
141 
142   Value *X;
143 
144   // -(-(X)) -> X.
145   if (match(V, m_Neg(m_Value(X))))
146     return X;
147 
148   // Integral constants can be freely negated.
149   if (match(V, m_AnyIntegralConstant()))
150     return ConstantExpr::getNeg(cast<Constant>(V), /*HasNUW=*/false,
151                                 /*HasNSW=*/false);
152 
153   // If we have a non-instruction, then give up.
154   if (!isa<Instruction>(V))
155     return nullptr;
156 
157   // If we have started with a true negation (i.e. `sub 0, %y`), then if we've
158   // got instruction that does not require recursive reasoning, we can still
159   // negate it even if it has other uses, without increasing instruction count.
160   if (!V->hasOneUse() && !IsTrulyNegation)
161     return nullptr;
162 
163   auto *I = cast<Instruction>(V);
164   unsigned BitWidth = I->getType()->getScalarSizeInBits();
165 
166   // We must preserve the insertion point and debug info that is set in the
167   // builder at the time this function is called.
168   InstCombiner::BuilderTy::InsertPointGuard Guard(Builder);
169   // And since we are trying to negate instruction I, that tells us about the
170   // insertion point and the debug info that we need to keep.
171   Builder.SetInsertPoint(I);
172 
173   // In some cases we can give the answer without further recursion.
174   switch (I->getOpcode()) {
175   case Instruction::Add: {
176     std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
177     // `inc` is always negatible.
178     if (match(Ops[1], m_One()))
179       return Builder.CreateNot(Ops[0], I->getName() + ".neg");
180     break;
181   }
182   case Instruction::Xor:
183     // `not` is always negatible.
184     if (match(I, m_Not(m_Value(X))))
185       return Builder.CreateAdd(X, ConstantInt::get(X->getType(), 1),
186                                I->getName() + ".neg");
187     break;
188   case Instruction::AShr:
189   case Instruction::LShr: {
190     // Right-shift sign bit smear is negatible.
191     const APInt *Op1Val;
192     if (match(I->getOperand(1), m_APInt(Op1Val)) && *Op1Val == BitWidth - 1) {
193       Value *BO = I->getOpcode() == Instruction::AShr
194                       ? Builder.CreateLShr(I->getOperand(0), I->getOperand(1))
195                       : Builder.CreateAShr(I->getOperand(0), I->getOperand(1));
196       if (auto *NewInstr = dyn_cast<Instruction>(BO)) {
197         NewInstr->copyIRFlags(I);
198         NewInstr->setName(I->getName() + ".neg");
199       }
200       return BO;
201     }
202     // While we could negate exact arithmetic shift:
203     //   ashr exact %x, C  -->   sdiv exact i8 %x, -1<<C
204     // iff C != 0 and C u< bitwidth(%x), we don't want to,
205     // because division is *THAT* much worse than a shift.
206     break;
207   }
208   case Instruction::SExt:
209   case Instruction::ZExt:
210     // `*ext` of i1 is always negatible
211     if (I->getOperand(0)->getType()->isIntOrIntVectorTy(1))
212       return I->getOpcode() == Instruction::SExt
213                  ? Builder.CreateZExt(I->getOperand(0), I->getType(),
214                                       I->getName() + ".neg")
215                  : Builder.CreateSExt(I->getOperand(0), I->getType(),
216                                       I->getName() + ".neg");
217     break;
218   default:
219     break; // Other instructions require recursive reasoning.
220   }
221 
222   if (I->getOpcode() == Instruction::Sub &&
223       (I->hasOneUse() || (isa<Constant>(I->getOperand(0)) &&
224                           !isa<ConstantExpr>(I->getOperand(0))))) {
225     // `sub` is always negatible.
226     // However, only do this either if the old `sub` doesn't stick around, or
227     // it was subtracting from a constant. Otherwise, this isn't profitable.
228     return Builder.CreateSub(I->getOperand(1), I->getOperand(0),
229                              I->getName() + ".neg");
230   }
231 
232   // Some other cases, while still don't require recursion,
233   // are restricted to the one-use case.
234   if (!V->hasOneUse())
235     return nullptr;
236 
237   switch (I->getOpcode()) {
238   case Instruction::SDiv:
239     // `sdiv` is negatible if divisor is not undef/INT_MIN/1.
240     // While this is normally not behind a use-check,
241     // let's consider division to be special since it's costly.
242     if (auto *Op1C = dyn_cast<Constant>(I->getOperand(1))) {
243       if (!Op1C->containsUndefElement() && Op1C->isNotMinSignedValue() &&
244           Op1C->isNotOneValue()) {
245         Value *BO =
246             Builder.CreateSDiv(I->getOperand(0), ConstantExpr::getNeg(Op1C),
247                                I->getName() + ".neg");
248         if (auto *NewInstr = dyn_cast<Instruction>(BO))
249           NewInstr->setIsExact(I->isExact());
250         return BO;
251       }
252     }
253     break;
254   }
255 
256   // Rest of the logic is recursive, so if it's time to give up then it's time.
257   if (Depth > NegatorMaxDepth) {
258     LLVM_DEBUG(dbgs() << "Negator: reached maximal allowed traversal depth in "
259                       << *V << ". Giving up.\n");
260     ++NegatorTimesDepthLimitReached;
261     return nullptr;
262   }
263 
264   switch (I->getOpcode()) {
265   case Instruction::Freeze: {
266     // `freeze` is negatible if its operand is negatible.
267     Value *NegOp = negate(I->getOperand(0), Depth + 1);
268     if (!NegOp) // Early return.
269       return nullptr;
270     return Builder.CreateFreeze(NegOp, I->getName() + ".neg");
271   }
272   case Instruction::PHI: {
273     // `phi` is negatible if all the incoming values are negatible.
274     auto *PHI = cast<PHINode>(I);
275     SmallVector<Value *, 4> NegatedIncomingValues(PHI->getNumOperands());
276     for (auto I : zip(PHI->incoming_values(), NegatedIncomingValues)) {
277       if (!(std::get<1>(I) =
278                 negate(std::get<0>(I), Depth + 1))) // Early return.
279         return nullptr;
280     }
281     // All incoming values are indeed negatible. Create negated PHI node.
282     PHINode *NegatedPHI = Builder.CreatePHI(
283         PHI->getType(), PHI->getNumOperands(), PHI->getName() + ".neg");
284     for (auto I : zip(NegatedIncomingValues, PHI->blocks()))
285       NegatedPHI->addIncoming(std::get<0>(I), std::get<1>(I));
286     return NegatedPHI;
287   }
288   case Instruction::Select: {
289     if (isKnownNegation(I->getOperand(1), I->getOperand(2))) {
290       // Of one hand of select is known to be negation of another hand,
291       // just swap the hands around.
292       auto *NewSelect = cast<SelectInst>(I->clone());
293       // Just swap the operands of the select.
294       NewSelect->swapValues();
295       // Don't swap prof metadata, we didn't change the branch behavior.
296       NewSelect->setName(I->getName() + ".neg");
297       Builder.Insert(NewSelect);
298       return NewSelect;
299     }
300     // `select` is negatible if both hands of `select` are negatible.
301     Value *NegOp1 = negate(I->getOperand(1), Depth + 1);
302     if (!NegOp1) // Early return.
303       return nullptr;
304     Value *NegOp2 = negate(I->getOperand(2), Depth + 1);
305     if (!NegOp2)
306       return nullptr;
307     // Do preserve the metadata!
308     return Builder.CreateSelect(I->getOperand(0), NegOp1, NegOp2,
309                                 I->getName() + ".neg", /*MDFrom=*/I);
310   }
311   case Instruction::ShuffleVector: {
312     // `shufflevector` is negatible if both operands are negatible.
313     auto *Shuf = cast<ShuffleVectorInst>(I);
314     Value *NegOp0 = negate(I->getOperand(0), Depth + 1);
315     if (!NegOp0) // Early return.
316       return nullptr;
317     Value *NegOp1 = negate(I->getOperand(1), Depth + 1);
318     if (!NegOp1)
319       return nullptr;
320     return Builder.CreateShuffleVector(NegOp0, NegOp1, Shuf->getShuffleMask(),
321                                        I->getName() + ".neg");
322   }
323   case Instruction::ExtractElement: {
324     // `extractelement` is negatible if source operand is negatible.
325     auto *EEI = cast<ExtractElementInst>(I);
326     Value *NegVector = negate(EEI->getVectorOperand(), Depth + 1);
327     if (!NegVector) // Early return.
328       return nullptr;
329     return Builder.CreateExtractElement(NegVector, EEI->getIndexOperand(),
330                                         I->getName() + ".neg");
331   }
332   case Instruction::InsertElement: {
333     // `insertelement` is negatible if both the source vector and
334     // element-to-be-inserted are negatible.
335     auto *IEI = cast<InsertElementInst>(I);
336     Value *NegVector = negate(IEI->getOperand(0), Depth + 1);
337     if (!NegVector) // Early return.
338       return nullptr;
339     Value *NegNewElt = negate(IEI->getOperand(1), Depth + 1);
340     if (!NegNewElt) // Early return.
341       return nullptr;
342     return Builder.CreateInsertElement(NegVector, NegNewElt, IEI->getOperand(2),
343                                        I->getName() + ".neg");
344   }
345   case Instruction::Trunc: {
346     // `trunc` is negatible if its operand is negatible.
347     Value *NegOp = negate(I->getOperand(0), Depth + 1);
348     if (!NegOp) // Early return.
349       return nullptr;
350     return Builder.CreateTrunc(NegOp, I->getType(), I->getName() + ".neg");
351   }
352   case Instruction::Shl: {
353     // `shl` is negatible if the first operand is negatible.
354     if (Value *NegOp0 = negate(I->getOperand(0), Depth + 1))
355       return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg");
356     // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1<<C`.
357     auto *Op1C = dyn_cast<Constant>(I->getOperand(1));
358     if (!Op1C) // Early return.
359       return nullptr;
360     return Builder.CreateMul(
361         I->getOperand(0),
362         ConstantExpr::getShl(Constant::getAllOnesValue(Op1C->getType()), Op1C),
363         I->getName() + ".neg");
364   }
365   case Instruction::Or: {
366     if (!haveNoCommonBitsSet(I->getOperand(0), I->getOperand(1), DL, &AC, I,
367                              &DT))
368       return nullptr; // Don't know how to handle `or` in general.
369     std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
370     // `or`/`add` are interchangeable when operands have no common bits set.
371     // `inc` is always negatible.
372     if (match(Ops[1], m_One()))
373       return Builder.CreateNot(Ops[0], I->getName() + ".neg");
374     // Else, just defer to Instruction::Add handling.
375     LLVM_FALLTHROUGH;
376   }
377   case Instruction::Add: {
378     // `add` is negatible if both of its operands are negatible.
379     SmallVector<Value *, 2> NegatedOps, NonNegatedOps;
380     for (Value *Op : I->operands()) {
381       // Can we sink the negation into this operand?
382       if (Value *NegOp = negate(Op, Depth + 1)) {
383         NegatedOps.emplace_back(NegOp); // Successfully negated operand!
384         continue;
385       }
386       // Failed to sink negation into this operand. IFF we started from negation
387       // and we manage to sink negation into one operand, we can still do this.
388       if (!IsTrulyNegation)
389         return nullptr;
390       NonNegatedOps.emplace_back(Op); // Just record which operand that was.
391     }
392     assert((NegatedOps.size() + NonNegatedOps.size()) == 2 &&
393            "Internal consistency sanity check.");
394     // Did we manage to sink negation into both of the operands?
395     if (NegatedOps.size() == 2) // Then we get to keep the `add`!
396       return Builder.CreateAdd(NegatedOps[0], NegatedOps[1],
397                                I->getName() + ".neg");
398     assert(IsTrulyNegation && "We should have early-exited then.");
399     // Completely failed to sink negation?
400     if (NonNegatedOps.size() == 2)
401       return nullptr;
402     // 0-(a+b) --> (-a)-b
403     return Builder.CreateSub(NegatedOps[0], NonNegatedOps[0],
404                              I->getName() + ".neg");
405   }
406   case Instruction::Xor: {
407     std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
408     // `xor` is negatible if one of its operands is invertible.
409     // FIXME: InstCombineInverter? But how to connect Inverter and Negator?
410     if (auto *C = dyn_cast<Constant>(Ops[1])) {
411       Value *Xor = Builder.CreateXor(Ops[0], ConstantExpr::getNot(C));
412       return Builder.CreateAdd(Xor, ConstantInt::get(Xor->getType(), 1),
413                                I->getName() + ".neg");
414     }
415     return nullptr;
416   }
417   case Instruction::Mul: {
418     std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
419     // `mul` is negatible if one of its operands is negatible.
420     Value *NegatedOp, *OtherOp;
421     // First try the second operand, in case it's a constant it will be best to
422     // just invert it instead of sinking the `neg` deeper.
423     if (Value *NegOp1 = negate(Ops[1], Depth + 1)) {
424       NegatedOp = NegOp1;
425       OtherOp = Ops[0];
426     } else if (Value *NegOp0 = negate(Ops[0], Depth + 1)) {
427       NegatedOp = NegOp0;
428       OtherOp = Ops[1];
429     } else
430       // Can't negate either of them.
431       return nullptr;
432     return Builder.CreateMul(NegatedOp, OtherOp, I->getName() + ".neg");
433   }
434   default:
435     return nullptr; // Don't know, likely not negatible for free.
436   }
437 
438   llvm_unreachable("Can't get here. We always return from switch.");
439 }
440 
negate(Value * V,unsigned Depth)441 LLVM_NODISCARD Value *Negator::negate(Value *V, unsigned Depth) {
442   NegatorMaxDepthVisited.updateMax(Depth);
443   ++NegatorNumValuesVisited;
444 
445 #if LLVM_ENABLE_STATS
446   ++NumValuesVisitedInThisNegator;
447 #endif
448 
449 #ifndef NDEBUG
450   // We can't ever have a Value with such an address.
451   Value *Placeholder = reinterpret_cast<Value *>(static_cast<uintptr_t>(-1));
452 #endif
453 
454   // Did we already try to negate this value?
455   auto NegationsCacheIterator = NegationsCache.find(V);
456   if (NegationsCacheIterator != NegationsCache.end()) {
457     ++NegatorNumNegationsFoundInCache;
458     Value *NegatedV = NegationsCacheIterator->second;
459     assert(NegatedV != Placeholder && "Encountered a cycle during negation.");
460     return NegatedV;
461   }
462 
463 #ifndef NDEBUG
464   // We did not find a cached result for negation of V. While there,
465   // let's temporairly cache a placeholder value, with the idea that if later
466   // during negation we fetch it from cache, we'll know we're in a cycle.
467   NegationsCache[V] = Placeholder;
468 #endif
469 
470   // No luck. Try negating it for real.
471   Value *NegatedV = visitImpl(V, Depth);
472   // And cache the (real) result for the future.
473   NegationsCache[V] = NegatedV;
474 
475   return NegatedV;
476 }
477 
run(Value * Root)478 LLVM_NODISCARD Optional<Negator::Result> Negator::run(Value *Root) {
479   Value *Negated = negate(Root, /*Depth=*/0);
480   if (!Negated) {
481     // We must cleanup newly-inserted instructions, to avoid any potential
482     // endless combine looping.
483     llvm::for_each(llvm::reverse(NewInstructions),
484                    [&](Instruction *I) { I->eraseFromParent(); });
485     return llvm::None;
486   }
487   return std::make_pair(ArrayRef<Instruction *>(NewInstructions), Negated);
488 }
489 
Negate(bool LHSIsZero,Value * Root,InstCombinerImpl & IC)490 LLVM_NODISCARD Value *Negator::Negate(bool LHSIsZero, Value *Root,
491                                       InstCombinerImpl &IC) {
492   ++NegatorTotalNegationsAttempted;
493   LLVM_DEBUG(dbgs() << "Negator: attempting to sink negation into " << *Root
494                     << "\n");
495 
496   if (!NegatorEnabled || !DebugCounter::shouldExecute(NegatorCounter))
497     return nullptr;
498 
499   Negator N(Root->getContext(), IC.getDataLayout(), IC.getAssumptionCache(),
500             IC.getDominatorTree(), LHSIsZero);
501   Optional<Result> Res = N.run(Root);
502   if (!Res) { // Negation failed.
503     LLVM_DEBUG(dbgs() << "Negator: failed to sink negation into " << *Root
504                       << "\n");
505     return nullptr;
506   }
507 
508   LLVM_DEBUG(dbgs() << "Negator: successfully sunk negation into " << *Root
509                     << "\n         NEW: " << *Res->second << "\n");
510   ++NegatorNumTreesNegated;
511 
512   // We must temporarily unset the 'current' insertion point and DebugLoc of the
513   // InstCombine's IRBuilder so that it won't interfere with the ones we have
514   // already specified when producing negated instructions.
515   InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
516   IC.Builder.ClearInsertionPoint();
517   IC.Builder.SetCurrentDebugLocation(DebugLoc());
518 
519   // And finally, we must add newly-created instructions into the InstCombine's
520   // worklist (in a proper order!) so it can attempt to combine them.
521   LLVM_DEBUG(dbgs() << "Negator: Propagating " << Res->first.size()
522                     << " instrs to InstCombine\n");
523   NegatorMaxInstructionsCreated.updateMax(Res->first.size());
524   NegatorNumInstructionsNegatedSuccess += Res->first.size();
525 
526   // They are in def-use order, so nothing fancy, just insert them in order.
527   llvm::for_each(Res->first,
528                  [&](Instruction *I) { IC.Builder.Insert(I, I->getName()); });
529 
530   // And return the new root.
531   return Res->second;
532 }
533