• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
11 //
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal.  Doing so would be pretty trivial.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PostOrderIterator.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/MemorySSA.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/PostDominators.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/DebugCounter.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <iterator>
73 #include <map>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace PatternMatch;
78 
79 #define DEBUG_TYPE "dse"
80 
81 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
82 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
83 STATISTIC(NumFastStores, "Number of stores deleted");
84 STATISTIC(NumFastOther, "Number of other instrs removed");
85 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
86 STATISTIC(NumModifiedStores, "Number of stores modified");
87 STATISTIC(NumCFGChecks, "Number of stores modified");
88 STATISTIC(NumCFGTries, "Number of stores modified");
89 STATISTIC(NumCFGSuccess, "Number of stores modified");
90 STATISTIC(NumGetDomMemoryDefPassed,
91           "Number of times a valid candidate is returned from getDomMemoryDef");
92 STATISTIC(NumDomMemDefChecks,
93           "Number iterations check for reads in getDomMemoryDef");
94 
95 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
96               "Controls which MemoryDefs are eliminated.");
97 
98 static cl::opt<bool>
99 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
100   cl::init(true), cl::Hidden,
101   cl::desc("Enable partial-overwrite tracking in DSE"));
102 
103 static cl::opt<bool>
104 EnablePartialStoreMerging("enable-dse-partial-store-merging",
105   cl::init(true), cl::Hidden,
106   cl::desc("Enable partial store merging in DSE"));
107 
108 static cl::opt<bool>
109     EnableMemorySSA("enable-dse-memoryssa", cl::init(true), cl::Hidden,
110                     cl::desc("Use the new MemorySSA-backed DSE."));
111 
112 static cl::opt<unsigned>
113     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
114                        cl::desc("The number of memory instructions to scan for "
115                                 "dead store elimination (default = 100)"));
116 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
117     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
118     cl::desc("The maximum number of steps while walking upwards to find "
119              "MemoryDefs that may be killed (default = 90)"));
120 
121 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
122     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
123     cl::desc("The maximum number candidates that only partially overwrite the "
124              "killing MemoryDef to consider"
125              " (default = 5)"));
126 
127 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
128     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
129     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
130              "other stores per basic block (default = 5000)"));
131 
132 static cl::opt<unsigned> MemorySSASameBBStepCost(
133     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
134     cl::desc(
135         "The cost of a step in the same basic block as the killing MemoryDef"
136         "(default = 1)"));
137 
138 static cl::opt<unsigned>
139     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
140                              cl::Hidden,
141                              cl::desc("The cost of a step in a different basic "
142                                       "block than the killing MemoryDef"
143                                       "(default = 5)"));
144 
145 static cl::opt<unsigned> MemorySSAPathCheckLimit(
146     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
147     cl::desc("The maximum number of blocks to check when trying to prove that "
148              "all paths to an exit go through a killing block (default = 50)"));
149 
150 //===----------------------------------------------------------------------===//
151 // Helper functions
152 //===----------------------------------------------------------------------===//
153 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
154 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
155 
156 /// Delete this instruction.  Before we do, go through and zero out all the
157 /// operands of this instruction.  If any of them become dead, delete them and
158 /// the computation tree that feeds them.
159 /// If ValueSet is non-null, remove any deleted instructions from it as well.
160 static void
deleteDeadInstruction(Instruction * I,BasicBlock::iterator * BBI,MemoryDependenceResults & MD,const TargetLibraryInfo & TLI,InstOverlapIntervalsTy & IOL,MapVector<Instruction *,bool> & ThrowableInst,SmallSetVector<const Value *,16> * ValueSet=nullptr)161 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
162                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
163                       InstOverlapIntervalsTy &IOL,
164                       MapVector<Instruction *, bool> &ThrowableInst,
165                       SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
166   SmallVector<Instruction*, 32> NowDeadInsts;
167 
168   NowDeadInsts.push_back(I);
169   --NumFastOther;
170 
171   // Keeping the iterator straight is a pain, so we let this routine tell the
172   // caller what the next instruction is after we're done mucking about.
173   BasicBlock::iterator NewIter = *BBI;
174 
175   // Before we touch this instruction, remove it from memdep!
176   do {
177     Instruction *DeadInst = NowDeadInsts.pop_back_val();
178     // Mark the DeadInst as dead in the list of throwable instructions.
179     auto It = ThrowableInst.find(DeadInst);
180     if (It != ThrowableInst.end())
181       ThrowableInst[It->first] = false;
182     ++NumFastOther;
183 
184     // Try to preserve debug information attached to the dead instruction.
185     salvageDebugInfo(*DeadInst);
186     salvageKnowledge(DeadInst);
187 
188     // This instruction is dead, zap it, in stages.  Start by removing it from
189     // MemDep, which needs to know the operands and needs it to be in the
190     // function.
191     MD.removeInstruction(DeadInst);
192 
193     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
194       Value *Op = DeadInst->getOperand(op);
195       DeadInst->setOperand(op, nullptr);
196 
197       // If this operand just became dead, add it to the NowDeadInsts list.
198       if (!Op->use_empty()) continue;
199 
200       if (Instruction *OpI = dyn_cast<Instruction>(Op))
201         if (isInstructionTriviallyDead(OpI, &TLI))
202           NowDeadInsts.push_back(OpI);
203     }
204 
205     if (ValueSet) ValueSet->remove(DeadInst);
206     IOL.erase(DeadInst);
207 
208     if (NewIter == DeadInst->getIterator())
209       NewIter = DeadInst->eraseFromParent();
210     else
211       DeadInst->eraseFromParent();
212   } while (!NowDeadInsts.empty());
213   *BBI = NewIter;
214   // Pop dead entries from back of ThrowableInst till we find an alive entry.
215   while (!ThrowableInst.empty() && !ThrowableInst.back().second)
216     ThrowableInst.pop_back();
217 }
218 
219 /// Does this instruction write some memory?  This only returns true for things
220 /// that we can analyze with other helpers below.
hasAnalyzableMemoryWrite(Instruction * I,const TargetLibraryInfo & TLI)221 static bool hasAnalyzableMemoryWrite(Instruction *I,
222                                      const TargetLibraryInfo &TLI) {
223   if (isa<StoreInst>(I))
224     return true;
225   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
226     switch (II->getIntrinsicID()) {
227     default:
228       return false;
229     case Intrinsic::memset:
230     case Intrinsic::memmove:
231     case Intrinsic::memcpy:
232     case Intrinsic::memcpy_inline:
233     case Intrinsic::memcpy_element_unordered_atomic:
234     case Intrinsic::memmove_element_unordered_atomic:
235     case Intrinsic::memset_element_unordered_atomic:
236     case Intrinsic::init_trampoline:
237     case Intrinsic::lifetime_end:
238     case Intrinsic::masked_store:
239       return true;
240     }
241   }
242   if (auto *CB = dyn_cast<CallBase>(I)) {
243     LibFunc LF;
244     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
245       switch (LF) {
246       case LibFunc_strcpy:
247       case LibFunc_strncpy:
248       case LibFunc_strcat:
249       case LibFunc_strncat:
250         return true;
251       default:
252         return false;
253       }
254     }
255   }
256   return false;
257 }
258 
259 /// Return a Location stored to by the specified instruction. If isRemovable
260 /// returns true, this function and getLocForRead completely describe the memory
261 /// operations for this instruction.
getLocForWrite(Instruction * Inst,const TargetLibraryInfo & TLI)262 static MemoryLocation getLocForWrite(Instruction *Inst,
263                                      const TargetLibraryInfo &TLI) {
264   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
265     return MemoryLocation::get(SI);
266 
267   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
268     // memcpy/memmove/memset.
269     MemoryLocation Loc = MemoryLocation::getForDest(MI);
270     return Loc;
271   }
272 
273   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
274     switch (II->getIntrinsicID()) {
275     default:
276       return MemoryLocation(); // Unhandled intrinsic.
277     case Intrinsic::init_trampoline:
278       return MemoryLocation::getAfter(II->getArgOperand(0));
279     case Intrinsic::masked_store:
280       return MemoryLocation::getForArgument(II, 1, TLI);
281     case Intrinsic::lifetime_end: {
282       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
283       return MemoryLocation(II->getArgOperand(1), Len);
284     }
285     }
286   }
287   if (auto *CB = dyn_cast<CallBase>(Inst))
288     // All the supported TLI functions so far happen to have dest as their
289     // first argument.
290     return MemoryLocation::getAfter(CB->getArgOperand(0));
291   return MemoryLocation();
292 }
293 
294 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
295 /// instruction if any.
getLocForRead(Instruction * Inst,const TargetLibraryInfo & TLI)296 static MemoryLocation getLocForRead(Instruction *Inst,
297                                     const TargetLibraryInfo &TLI) {
298   assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
299 
300   // The only instructions that both read and write are the mem transfer
301   // instructions (memcpy/memmove).
302   if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
303     return MemoryLocation::getForSource(MTI);
304   return MemoryLocation();
305 }
306 
307 /// If the value of this instruction and the memory it writes to is unused, may
308 /// we delete this instruction?
isRemovable(Instruction * I)309 static bool isRemovable(Instruction *I) {
310   // Don't remove volatile/atomic stores.
311   if (StoreInst *SI = dyn_cast<StoreInst>(I))
312     return SI->isUnordered();
313 
314   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
315     switch (II->getIntrinsicID()) {
316     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
317     case Intrinsic::lifetime_end:
318       // Never remove dead lifetime_end's, e.g. because it is followed by a
319       // free.
320       return false;
321     case Intrinsic::init_trampoline:
322       // Always safe to remove init_trampoline.
323       return true;
324     case Intrinsic::memset:
325     case Intrinsic::memmove:
326     case Intrinsic::memcpy:
327     case Intrinsic::memcpy_inline:
328       // Don't remove volatile memory intrinsics.
329       return !cast<MemIntrinsic>(II)->isVolatile();
330     case Intrinsic::memcpy_element_unordered_atomic:
331     case Intrinsic::memmove_element_unordered_atomic:
332     case Intrinsic::memset_element_unordered_atomic:
333     case Intrinsic::masked_store:
334       return true;
335     }
336   }
337 
338   // note: only get here for calls with analyzable writes - i.e. libcalls
339   if (auto *CB = dyn_cast<CallBase>(I))
340     return CB->use_empty();
341 
342   return false;
343 }
344 
345 /// Returns true if the end of this instruction can be safely shortened in
346 /// length.
isShortenableAtTheEnd(Instruction * I)347 static bool isShortenableAtTheEnd(Instruction *I) {
348   // Don't shorten stores for now
349   if (isa<StoreInst>(I))
350     return false;
351 
352   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
353     switch (II->getIntrinsicID()) {
354       default: return false;
355       case Intrinsic::memset:
356       case Intrinsic::memcpy:
357       case Intrinsic::memcpy_element_unordered_atomic:
358       case Intrinsic::memset_element_unordered_atomic:
359         // Do shorten memory intrinsics.
360         // FIXME: Add memmove if it's also safe to transform.
361         return true;
362     }
363   }
364 
365   // Don't shorten libcalls calls for now.
366 
367   return false;
368 }
369 
370 /// Returns true if the beginning of this instruction can be safely shortened
371 /// in length.
isShortenableAtTheBeginning(Instruction * I)372 static bool isShortenableAtTheBeginning(Instruction *I) {
373   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
374   // easily done by offsetting the source address.
375   return isa<AnyMemSetInst>(I);
376 }
377 
378 /// Return the pointer that is being written to.
getStoredPointerOperand(Instruction * I,const TargetLibraryInfo & TLI)379 static Value *getStoredPointerOperand(Instruction *I,
380                                       const TargetLibraryInfo &TLI) {
381   //TODO: factor this to reuse getLocForWrite
382   MemoryLocation Loc = getLocForWrite(I, TLI);
383   assert(Loc.Ptr &&
384          "unable to find pointer written for analyzable instruction?");
385   // TODO: most APIs don't expect const Value *
386   return const_cast<Value*>(Loc.Ptr);
387 }
388 
getPointerSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,const Function * F)389 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
390                                const TargetLibraryInfo &TLI,
391                                const Function *F) {
392   uint64_t Size;
393   ObjectSizeOpts Opts;
394   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
395 
396   if (getObjectSize(V, Size, DL, &TLI, Opts))
397     return Size;
398   return MemoryLocation::UnknownSize;
399 }
400 
401 namespace {
402 
403 enum OverwriteResult {
404   OW_Begin,
405   OW_Complete,
406   OW_End,
407   OW_PartialEarlierWithFullLater,
408   OW_MaybePartial,
409   OW_Unknown
410 };
411 
412 } // end anonymous namespace
413 
414 /// Check if two instruction are masked stores that completely
415 /// overwrite one another. More specifically, \p Later has to
416 /// overwrite \p Earlier.
417 template <typename AATy>
isMaskedStoreOverwrite(const Instruction * Later,const Instruction * Earlier,AATy & AA)418 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
419                                               const Instruction *Earlier,
420                                               AATy &AA) {
421   const auto *IIL = dyn_cast<IntrinsicInst>(Later);
422   const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
423   if (IIL == nullptr || IIE == nullptr)
424     return OW_Unknown;
425   if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
426       IIE->getIntrinsicID() != Intrinsic::masked_store)
427     return OW_Unknown;
428   // Pointers.
429   Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
430   Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
431   if (LP != EP && !AA.isMustAlias(LP, EP))
432     return OW_Unknown;
433   // Masks.
434   // TODO: check that Later's mask is a superset of the Earlier's mask.
435   if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
436     return OW_Unknown;
437   return OW_Complete;
438 }
439 
440 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
441 /// instruction) completely overwrites a store to the 'Earlier' location.
442 /// (by \p EarlierI instruction).
443 /// Return OW_MaybePartial if \p Later does not completely overwrite
444 /// \p Earlier, but they both write to the same underlying object. In that
445 /// case, use isPartialOverwrite to check if \p Later partially overwrites
446 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
447 template <typename AATy>
448 static OverwriteResult
isOverwrite(const Instruction * LaterI,const Instruction * EarlierI,const MemoryLocation & Later,const MemoryLocation & Earlier,const DataLayout & DL,const TargetLibraryInfo & TLI,int64_t & EarlierOff,int64_t & LaterOff,AATy & AA,const Function * F)449 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
450             const MemoryLocation &Later, const MemoryLocation &Earlier,
451             const DataLayout &DL, const TargetLibraryInfo &TLI,
452             int64_t &EarlierOff, int64_t &LaterOff, AATy &AA,
453             const Function *F) {
454   // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
455   // get imprecise values here, though (except for unknown sizes).
456   if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
457     // Masked stores have imprecise locations, but we can reason about them
458     // to some extent.
459     return isMaskedStoreOverwrite(LaterI, EarlierI, AA);
460   }
461 
462   const uint64_t LaterSize = Later.Size.getValue();
463   const uint64_t EarlierSize = Earlier.Size.getValue();
464 
465   const Value *P1 = Earlier.Ptr->stripPointerCasts();
466   const Value *P2 = Later.Ptr->stripPointerCasts();
467 
468   // If the start pointers are the same, we just have to compare sizes to see if
469   // the later store was larger than the earlier store.
470   if (P1 == P2 || AA.isMustAlias(P1, P2)) {
471     // Make sure that the Later size is >= the Earlier size.
472     if (LaterSize >= EarlierSize)
473       return OW_Complete;
474   }
475 
476   // Check to see if the later store is to the entire object (either a global,
477   // an alloca, or a byval/inalloca argument).  If so, then it clearly
478   // overwrites any other store to the same object.
479   const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
480 
481   // If we can't resolve the same pointers to the same object, then we can't
482   // analyze them at all.
483   if (UO1 != UO2)
484     return OW_Unknown;
485 
486   // If the "Later" store is to a recognizable object, get its size.
487   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
488   if (ObjectSize != MemoryLocation::UnknownSize)
489     if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
490       return OW_Complete;
491 
492   // Okay, we have stores to two completely different pointers.  Try to
493   // decompose the pointer into a "base + constant_offset" form.  If the base
494   // pointers are equal, then we can reason about the two stores.
495   EarlierOff = 0;
496   LaterOff = 0;
497   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
498   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
499 
500   // If the base pointers still differ, we have two completely different stores.
501   if (BP1 != BP2)
502     return OW_Unknown;
503 
504   // The later access completely overlaps the earlier store if and only if
505   // both start and end of the earlier one is "inside" the later one:
506   //    |<->|--earlier--|<->|
507   //    |-------later-------|
508   // Accesses may overlap if and only if start of one of them is "inside"
509   // another one:
510   //    |<->|--earlier--|<----->|
511   //    |-------later-------|
512   //           OR
513   //    |----- earlier -----|
514   //    |<->|---later---|<----->|
515   //
516   // We have to be careful here as *Off is signed while *.Size is unsigned.
517 
518   // Check if the earlier access starts "not before" the later one.
519   if (EarlierOff >= LaterOff) {
520     // If the earlier access ends "not after" the later access then the earlier
521     // one is completely overwritten by the later one.
522     if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
523       return OW_Complete;
524     // If start of the earlier access is "before" end of the later access then
525     // accesses overlap.
526     else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
527       return OW_MaybePartial;
528   }
529   // If start of the later access is "before" end of the earlier access then
530   // accesses overlap.
531   else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
532     return OW_MaybePartial;
533   }
534 
535   // Can reach here only if accesses are known not to overlap. There is no
536   // dedicated code to indicate no overlap so signal "unknown".
537   return OW_Unknown;
538 }
539 
540 /// Return 'OW_Complete' if a store to the 'Later' location completely
541 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
542 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
543 /// beginning of the 'Earlier' location is overwritten by 'Later'.
544 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
545 /// overwritten by a latter (smaller) store which doesn't write outside the big
546 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
547 /// NOTE: This function must only be called if both \p Later and \p Earlier
548 /// write to the same underlying object with valid \p EarlierOff and \p
549 /// LaterOff.
isPartialOverwrite(const MemoryLocation & Later,const MemoryLocation & Earlier,int64_t EarlierOff,int64_t LaterOff,Instruction * DepWrite,InstOverlapIntervalsTy & IOL)550 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
551                                           const MemoryLocation &Earlier,
552                                           int64_t EarlierOff, int64_t LaterOff,
553                                           Instruction *DepWrite,
554                                           InstOverlapIntervalsTy &IOL) {
555   const uint64_t LaterSize = Later.Size.getValue();
556   const uint64_t EarlierSize = Earlier.Size.getValue();
557   // We may now overlap, although the overlap is not complete. There might also
558   // be other incomplete overlaps, and together, they might cover the complete
559   // earlier write.
560   // Note: The correctness of this logic depends on the fact that this function
561   // is not even called providing DepWrite when there are any intervening reads.
562   if (EnablePartialOverwriteTracking &&
563       LaterOff < int64_t(EarlierOff + EarlierSize) &&
564       int64_t(LaterOff + LaterSize) >= EarlierOff) {
565 
566     // Insert our part of the overlap into the map.
567     auto &IM = IOL[DepWrite];
568     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
569                       << ", " << int64_t(EarlierOff + EarlierSize)
570                       << ") Later [" << LaterOff << ", "
571                       << int64_t(LaterOff + LaterSize) << ")\n");
572 
573     // Make sure that we only insert non-overlapping intervals and combine
574     // adjacent intervals. The intervals are stored in the map with the ending
575     // offset as the key (in the half-open sense) and the starting offset as
576     // the value.
577     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
578 
579     // Find any intervals ending at, or after, LaterIntStart which start
580     // before LaterIntEnd.
581     auto ILI = IM.lower_bound(LaterIntStart);
582     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
583       // This existing interval is overlapped with the current store somewhere
584       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
585       // intervals and adjusting our start and end.
586       LaterIntStart = std::min(LaterIntStart, ILI->second);
587       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
588       ILI = IM.erase(ILI);
589 
590       // Continue erasing and adjusting our end in case other previous
591       // intervals are also overlapped with the current store.
592       //
593       // |--- ealier 1 ---|  |--- ealier 2 ---|
594       //     |------- later---------|
595       //
596       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
597         assert(ILI->second > LaterIntStart && "Unexpected interval");
598         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
599         ILI = IM.erase(ILI);
600       }
601     }
602 
603     IM[LaterIntEnd] = LaterIntStart;
604 
605     ILI = IM.begin();
606     if (ILI->second <= EarlierOff &&
607         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
608       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
609                         << EarlierOff << ", "
610                         << int64_t(EarlierOff + EarlierSize)
611                         << ") Composite Later [" << ILI->second << ", "
612                         << ILI->first << ")\n");
613       ++NumCompletePartials;
614       return OW_Complete;
615     }
616   }
617 
618   // Check for an earlier store which writes to all the memory locations that
619   // the later store writes to.
620   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
621       int64_t(EarlierOff + EarlierSize) > LaterOff &&
622       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
623     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
624                       << EarlierOff << ", "
625                       << int64_t(EarlierOff + EarlierSize)
626                       << ") by a later store [" << LaterOff << ", "
627                       << int64_t(LaterOff + LaterSize) << ")\n");
628     // TODO: Maybe come up with a better name?
629     return OW_PartialEarlierWithFullLater;
630   }
631 
632   // Another interesting case is if the later store overwrites the end of the
633   // earlier store.
634   //
635   //      |--earlier--|
636   //                |--   later   --|
637   //
638   // In this case we may want to trim the size of earlier to avoid generating
639   // writes to addresses which will definitely be overwritten later
640   if (!EnablePartialOverwriteTracking &&
641       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
642        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
643     return OW_End;
644 
645   // Finally, we also need to check if the later store overwrites the beginning
646   // of the earlier store.
647   //
648   //                |--earlier--|
649   //      |--   later   --|
650   //
651   // In this case we may want to move the destination address and trim the size
652   // of earlier to avoid generating writes to addresses which will definitely
653   // be overwritten later.
654   if (!EnablePartialOverwriteTracking &&
655       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
656     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
657            "Expect to be handled as OW_Complete");
658     return OW_Begin;
659   }
660   // Otherwise, they don't completely overlap.
661   return OW_Unknown;
662 }
663 
664 /// If 'Inst' might be a self read (i.e. a noop copy of a
665 /// memory region into an identical pointer) then it doesn't actually make its
666 /// input dead in the traditional sense.  Consider this case:
667 ///
668 ///   memmove(A <- B)
669 ///   memmove(A <- A)
670 ///
671 /// In this case, the second store to A does not make the first store to A dead.
672 /// The usual situation isn't an explicit A<-A store like this (which can be
673 /// trivially removed) but a case where two pointers may alias.
674 ///
675 /// This function detects when it is unsafe to remove a dependent instruction
676 /// because the DSE inducing instruction may be a self-read.
isPossibleSelfRead(Instruction * Inst,const MemoryLocation & InstStoreLoc,Instruction * DepWrite,const TargetLibraryInfo & TLI,AliasAnalysis & AA)677 static bool isPossibleSelfRead(Instruction *Inst,
678                                const MemoryLocation &InstStoreLoc,
679                                Instruction *DepWrite,
680                                const TargetLibraryInfo &TLI,
681                                AliasAnalysis &AA) {
682   // Self reads can only happen for instructions that read memory.  Get the
683   // location read.
684   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
685   if (!InstReadLoc.Ptr)
686     return false; // Not a reading instruction.
687 
688   // If the read and written loc obviously don't alias, it isn't a read.
689   if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
690     return false;
691 
692   if (isa<AnyMemCpyInst>(Inst)) {
693     // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
694     // but in practice memcpy(A <- B) either means that A and B are disjoint or
695     // are equal (i.e. there are not partial overlaps).  Given that, if we have:
696     //
697     //   memcpy/memmove(A <- B)  // DepWrite
698     //   memcpy(A <- B)  // Inst
699     //
700     // with Inst reading/writing a >= size than DepWrite, we can reason as
701     // follows:
702     //
703     //   - If A == B then both the copies are no-ops, so the DepWrite can be
704     //     removed.
705     //   - If A != B then A and B are disjoint locations in Inst.  Since
706     //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
707     //     Therefore DepWrite can be removed.
708     MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
709 
710     if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
711       return false;
712   }
713 
714   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
715   // then it can't be considered dead.
716   return true;
717 }
718 
719 /// Returns true if the memory which is accessed by the second instruction is not
720 /// modified between the first and the second instruction.
721 /// Precondition: Second instruction must be dominated by the first
722 /// instruction.
723 template <typename AATy>
724 static bool
memoryIsNotModifiedBetween(Instruction * FirstI,Instruction * SecondI,AATy & AA,const DataLayout & DL,DominatorTree * DT)725 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AATy &AA,
726                            const DataLayout &DL, DominatorTree *DT) {
727   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
728   // instructions which can modify the memory location accessed by SecondI.
729   //
730   // While doing the walk keep track of the address to check. It might be
731   // different in different basic blocks due to PHI translation.
732   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
733   SmallVector<BlockAddressPair, 16> WorkList;
734   // Keep track of the address we visited each block with. Bail out if we
735   // visit a block with different addresses.
736   DenseMap<BasicBlock *, Value *> Visited;
737 
738   BasicBlock::iterator FirstBBI(FirstI);
739   ++FirstBBI;
740   BasicBlock::iterator SecondBBI(SecondI);
741   BasicBlock *FirstBB = FirstI->getParent();
742   BasicBlock *SecondBB = SecondI->getParent();
743   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
744   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
745 
746   // Start checking the SecondBB.
747   WorkList.push_back(
748       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
749   bool isFirstBlock = true;
750 
751   // Check all blocks going backward until we reach the FirstBB.
752   while (!WorkList.empty()) {
753     BlockAddressPair Current = WorkList.pop_back_val();
754     BasicBlock *B = Current.first;
755     PHITransAddr &Addr = Current.second;
756     Value *Ptr = Addr.getAddr();
757 
758     // Ignore instructions before FirstI if this is the FirstBB.
759     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
760 
761     BasicBlock::iterator EI;
762     if (isFirstBlock) {
763       // Ignore instructions after SecondI if this is the first visit of SecondBB.
764       assert(B == SecondBB && "first block is not the store block");
765       EI = SecondBBI;
766       isFirstBlock = false;
767     } else {
768       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
769       // In this case we also have to look at instructions after SecondI.
770       EI = B->end();
771     }
772     for (; BI != EI; ++BI) {
773       Instruction *I = &*BI;
774       if (I->mayWriteToMemory() && I != SecondI)
775         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
776           return false;
777     }
778     if (B != FirstBB) {
779       assert(B != &FirstBB->getParent()->getEntryBlock() &&
780           "Should not hit the entry block because SI must be dominated by LI");
781       for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
782         PHITransAddr PredAddr = Addr;
783         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
784           if (!PredAddr.IsPotentiallyPHITranslatable())
785             return false;
786           if (PredAddr.PHITranslateValue(B, *PredI, DT, false))
787             return false;
788         }
789         Value *TranslatedPtr = PredAddr.getAddr();
790         auto Inserted = Visited.insert(std::make_pair(*PredI, TranslatedPtr));
791         if (!Inserted.second) {
792           // We already visited this block before. If it was with a different
793           // address - bail out!
794           if (TranslatedPtr != Inserted.first->second)
795             return false;
796           // ... otherwise just skip it.
797           continue;
798         }
799         WorkList.push_back(std::make_pair(*PredI, PredAddr));
800       }
801     }
802   }
803   return true;
804 }
805 
806 /// Find all blocks that will unconditionally lead to the block BB and append
807 /// them to F.
findUnconditionalPreds(SmallVectorImpl<BasicBlock * > & Blocks,BasicBlock * BB,DominatorTree * DT)808 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
809                                    BasicBlock *BB, DominatorTree *DT) {
810   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
811     BasicBlock *Pred = *I;
812     if (Pred == BB) continue;
813     Instruction *PredTI = Pred->getTerminator();
814     if (PredTI->getNumSuccessors() != 1)
815       continue;
816 
817     if (DT->isReachableFromEntry(Pred))
818       Blocks.push_back(Pred);
819   }
820 }
821 
822 /// Handle frees of entire structures whose dependency is a store
823 /// to a field of that structure.
handleFree(CallInst * F,AliasAnalysis * AA,MemoryDependenceResults * MD,DominatorTree * DT,const TargetLibraryInfo * TLI,InstOverlapIntervalsTy & IOL,MapVector<Instruction *,bool> & ThrowableInst)824 static bool handleFree(CallInst *F, AliasAnalysis *AA,
825                        MemoryDependenceResults *MD, DominatorTree *DT,
826                        const TargetLibraryInfo *TLI,
827                        InstOverlapIntervalsTy &IOL,
828                        MapVector<Instruction *, bool> &ThrowableInst) {
829   bool MadeChange = false;
830 
831   MemoryLocation Loc = MemoryLocation::getAfter(F->getOperand(0));
832   SmallVector<BasicBlock *, 16> Blocks;
833   Blocks.push_back(F->getParent());
834 
835   while (!Blocks.empty()) {
836     BasicBlock *BB = Blocks.pop_back_val();
837     Instruction *InstPt = BB->getTerminator();
838     if (BB == F->getParent()) InstPt = F;
839 
840     MemDepResult Dep =
841         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
842     while (Dep.isDef() || Dep.isClobber()) {
843       Instruction *Dependency = Dep.getInst();
844       if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
845           !isRemovable(Dependency))
846         break;
847 
848       Value *DepPointer =
849           getUnderlyingObject(getStoredPointerOperand(Dependency, *TLI));
850 
851       // Check for aliasing.
852       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
853         break;
854 
855       LLVM_DEBUG(
856           dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
857                  << *Dependency << '\n');
858 
859       // DCE instructions only used to calculate that store.
860       BasicBlock::iterator BBI(Dependency);
861       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL,
862                             ThrowableInst);
863       ++NumFastStores;
864       MadeChange = true;
865 
866       // Inst's old Dependency is now deleted. Compute the next dependency,
867       // which may also be dead, as in
868       //    s[0] = 0;
869       //    s[1] = 0; // This has just been deleted.
870       //    free(s);
871       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
872     }
873 
874     if (Dep.isNonLocal())
875       findUnconditionalPreds(Blocks, BB, DT);
876   }
877 
878   return MadeChange;
879 }
880 
881 /// Check to see if the specified location may alias any of the stack objects in
882 /// the DeadStackObjects set. If so, they become live because the location is
883 /// being loaded.
removeAccessedObjects(const MemoryLocation & LoadedLoc,SmallSetVector<const Value *,16> & DeadStackObjects,const DataLayout & DL,AliasAnalysis * AA,const TargetLibraryInfo * TLI,const Function * F)884 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
885                                   SmallSetVector<const Value *, 16> &DeadStackObjects,
886                                   const DataLayout &DL, AliasAnalysis *AA,
887                                   const TargetLibraryInfo *TLI,
888                                   const Function *F) {
889   const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr);
890 
891   // A constant can't be in the dead pointer set.
892   if (isa<Constant>(UnderlyingPointer))
893     return;
894 
895   // If the kill pointer can be easily reduced to an alloca, don't bother doing
896   // extraneous AA queries.
897   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
898     DeadStackObjects.remove(UnderlyingPointer);
899     return;
900   }
901 
902   // Remove objects that could alias LoadedLoc.
903   DeadStackObjects.remove_if([&](const Value *I) {
904     // See if the loaded location could alias the stack location.
905     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
906     return !AA->isNoAlias(StackLoc, LoadedLoc);
907   });
908 }
909 
910 /// Remove dead stores to stack-allocated locations in the function end block.
911 /// Ex:
912 /// %A = alloca i32
913 /// ...
914 /// store i32 1, i32* %A
915 /// ret void
handleEndBlock(BasicBlock & BB,AliasAnalysis * AA,MemoryDependenceResults * MD,const TargetLibraryInfo * TLI,InstOverlapIntervalsTy & IOL,MapVector<Instruction *,bool> & ThrowableInst)916 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
917                            MemoryDependenceResults *MD,
918                            const TargetLibraryInfo *TLI,
919                            InstOverlapIntervalsTy &IOL,
920                            MapVector<Instruction *, bool> &ThrowableInst) {
921   bool MadeChange = false;
922 
923   // Keep track of all of the stack objects that are dead at the end of the
924   // function.
925   SmallSetVector<const Value*, 16> DeadStackObjects;
926 
927   // Find all of the alloca'd pointers in the entry block.
928   BasicBlock &Entry = BB.getParent()->front();
929   for (Instruction &I : Entry) {
930     if (isa<AllocaInst>(&I))
931       DeadStackObjects.insert(&I);
932 
933     // Okay, so these are dead heap objects, but if the pointer never escapes
934     // then it's leaked by this function anyways.
935     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
936       DeadStackObjects.insert(&I);
937   }
938 
939   // Treat byval or inalloca arguments the same, stores to them are dead at the
940   // end of the function.
941   for (Argument &AI : BB.getParent()->args())
942     if (AI.hasPassPointeeByValueCopyAttr())
943       DeadStackObjects.insert(&AI);
944 
945   const DataLayout &DL = BB.getModule()->getDataLayout();
946 
947   // Scan the basic block backwards
948   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
949     --BBI;
950 
951     // If we find a store, check to see if it points into a dead stack value.
952     if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
953       // See through pointer-to-pointer bitcasts
954       SmallVector<const Value *, 4> Pointers;
955       getUnderlyingObjects(getStoredPointerOperand(&*BBI, *TLI), Pointers);
956 
957       // Stores to stack values are valid candidates for removal.
958       bool AllDead = true;
959       for (const Value *Pointer : Pointers)
960         if (!DeadStackObjects.count(Pointer)) {
961           AllDead = false;
962           break;
963         }
964 
965       if (AllDead) {
966         Instruction *Dead = &*BBI;
967 
968         LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
969                           << *Dead << "\n  Objects: ";
970                    for (SmallVectorImpl<const Value *>::iterator I =
971                             Pointers.begin(),
972                         E = Pointers.end();
973                         I != E; ++I) {
974                      dbgs() << **I;
975                      if (std::next(I) != E)
976                        dbgs() << ", ";
977                    } dbgs()
978                    << '\n');
979 
980         // DCE instructions only used to calculate that store.
981         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, ThrowableInst,
982                               &DeadStackObjects);
983         ++NumFastStores;
984         MadeChange = true;
985         continue;
986       }
987     }
988 
989     // Remove any dead non-memory-mutating instructions.
990     if (isInstructionTriviallyDead(&*BBI, TLI)) {
991       LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
992                         << *&*BBI << '\n');
993       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, ThrowableInst,
994                             &DeadStackObjects);
995       ++NumFastOther;
996       MadeChange = true;
997       continue;
998     }
999 
1000     if (isa<AllocaInst>(BBI)) {
1001       // Remove allocas from the list of dead stack objects; there can't be
1002       // any references before the definition.
1003       DeadStackObjects.remove(&*BBI);
1004       continue;
1005     }
1006 
1007     if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
1008       // Remove allocation function calls from the list of dead stack objects;
1009       // there can't be any references before the definition.
1010       if (isAllocLikeFn(&*BBI, TLI))
1011         DeadStackObjects.remove(&*BBI);
1012 
1013       // If this call does not access memory, it can't be loading any of our
1014       // pointers.
1015       if (AA->doesNotAccessMemory(Call))
1016         continue;
1017 
1018       // If the call might load from any of our allocas, then any store above
1019       // the call is live.
1020       DeadStackObjects.remove_if([&](const Value *I) {
1021         // See if the call site touches the value.
1022         return isRefSet(AA->getModRefInfo(
1023             Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
1024       });
1025 
1026       // If all of the allocas were clobbered by the call then we're not going
1027       // to find anything else to process.
1028       if (DeadStackObjects.empty())
1029         break;
1030 
1031       continue;
1032     }
1033 
1034     // We can remove the dead stores, irrespective of the fence and its ordering
1035     // (release/acquire/seq_cst). Fences only constraints the ordering of
1036     // already visible stores, it does not make a store visible to other
1037     // threads. So, skipping over a fence does not change a store from being
1038     // dead.
1039     if (isa<FenceInst>(*BBI))
1040       continue;
1041 
1042     MemoryLocation LoadedLoc;
1043 
1044     // If we encounter a use of the pointer, it is no longer considered dead
1045     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
1046       if (!L->isUnordered()) // Be conservative with atomic/volatile load
1047         break;
1048       LoadedLoc = MemoryLocation::get(L);
1049     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
1050       LoadedLoc = MemoryLocation::get(V);
1051     } else if (!BBI->mayReadFromMemory()) {
1052       // Instruction doesn't read memory.  Note that stores that weren't removed
1053       // above will hit this case.
1054       continue;
1055     } else {
1056       // Unknown inst; assume it clobbers everything.
1057       break;
1058     }
1059 
1060     // Remove any allocas from the DeadPointer set that are loaded, as this
1061     // makes any stores above the access live.
1062     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
1063 
1064     // If all of the allocas were clobbered by the access then we're not going
1065     // to find anything else to process.
1066     if (DeadStackObjects.empty())
1067       break;
1068   }
1069 
1070   return MadeChange;
1071 }
1072 
tryToShorten(Instruction * EarlierWrite,int64_t & EarlierOffset,uint64_t & EarlierSize,int64_t LaterOffset,uint64_t LaterSize,bool IsOverwriteEnd)1073 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
1074                          uint64_t &EarlierSize, int64_t LaterOffset,
1075                          uint64_t LaterSize, bool IsOverwriteEnd) {
1076   // TODO: base this on the target vector size so that if the earlier
1077   // store was too small to get vector writes anyway then its likely
1078   // a good idea to shorten it
1079   // Power of 2 vector writes are probably always a bad idea to optimize
1080   // as any store/memset/memcpy is likely using vector instructions so
1081   // shortening it to not vector size is likely to be slower
1082   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
1083   unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
1084   if (!IsOverwriteEnd)
1085     LaterOffset = int64_t(LaterOffset + LaterSize);
1086 
1087   if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
1088       !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
1089     return false;
1090 
1091   int64_t NewLength = IsOverwriteEnd
1092                           ? LaterOffset - EarlierOffset
1093                           : EarlierSize - (LaterOffset - EarlierOffset);
1094 
1095   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
1096     // When shortening an atomic memory intrinsic, the newly shortened
1097     // length must remain an integer multiple of the element size.
1098     const uint32_t ElementSize = AMI->getElementSizeInBytes();
1099     if (0 != NewLength % ElementSize)
1100       return false;
1101   }
1102 
1103   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
1104                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
1105                     << *EarlierWrite << "\n  KILLER (offset " << LaterOffset
1106                     << ", " << EarlierSize << ")\n");
1107 
1108   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
1109   Value *TrimmedLength =
1110       ConstantInt::get(EarlierWriteLength->getType(), NewLength);
1111   EarlierIntrinsic->setLength(TrimmedLength);
1112 
1113   EarlierSize = NewLength;
1114   if (!IsOverwriteEnd) {
1115     int64_t OffsetMoved = (LaterOffset - EarlierOffset);
1116     Value *Indices[1] = {
1117         ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
1118     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
1119         EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
1120         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
1121     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
1122     EarlierIntrinsic->setDest(NewDestGEP);
1123     EarlierOffset = EarlierOffset + OffsetMoved;
1124   }
1125   return true;
1126 }
1127 
tryToShortenEnd(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)1128 static bool tryToShortenEnd(Instruction *EarlierWrite,
1129                             OverlapIntervalsTy &IntervalMap,
1130                             int64_t &EarlierStart, uint64_t &EarlierSize) {
1131   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
1132     return false;
1133 
1134   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
1135   int64_t LaterStart = OII->second;
1136   uint64_t LaterSize = OII->first - LaterStart;
1137 
1138   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1139 
1140   if (LaterStart > EarlierStart &&
1141       // Note: "LaterStart - EarlierStart" is known to be positive due to
1142       // preceding check.
1143       (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
1144       // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
1145       // be non negative due to preceding checks.
1146       LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
1147     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1148                      LaterSize, true)) {
1149       IntervalMap.erase(OII);
1150       return true;
1151     }
1152   }
1153   return false;
1154 }
1155 
tryToShortenBegin(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)1156 static bool tryToShortenBegin(Instruction *EarlierWrite,
1157                               OverlapIntervalsTy &IntervalMap,
1158                               int64_t &EarlierStart, uint64_t &EarlierSize) {
1159   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
1160     return false;
1161 
1162   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1163   int64_t LaterStart = OII->second;
1164   uint64_t LaterSize = OII->first - LaterStart;
1165 
1166   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1167 
1168   if (LaterStart <= EarlierStart &&
1169       // Note: "EarlierStart - LaterStart" is known to be non negative due to
1170       // preceding check.
1171       LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
1172     // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
1173     // positive due to preceding checks.
1174     assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
1175            "Should have been handled as OW_Complete");
1176     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1177                      LaterSize, false)) {
1178       IntervalMap.erase(OII);
1179       return true;
1180     }
1181   }
1182   return false;
1183 }
1184 
removePartiallyOverlappedStores(const DataLayout & DL,InstOverlapIntervalsTy & IOL,const TargetLibraryInfo & TLI)1185 static bool removePartiallyOverlappedStores(const DataLayout &DL,
1186                                             InstOverlapIntervalsTy &IOL,
1187                                             const TargetLibraryInfo &TLI) {
1188   bool Changed = false;
1189   for (auto OI : IOL) {
1190     Instruction *EarlierWrite = OI.first;
1191     MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
1192     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1193 
1194     const Value *Ptr = Loc.Ptr->stripPointerCasts();
1195     int64_t EarlierStart = 0;
1196     uint64_t EarlierSize = Loc.Size.getValue();
1197     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1198     OverlapIntervalsTy &IntervalMap = OI.second;
1199     Changed |=
1200         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1201     if (IntervalMap.empty())
1202       continue;
1203     Changed |=
1204         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1205   }
1206   return Changed;
1207 }
1208 
eliminateNoopStore(Instruction * Inst,BasicBlock::iterator & BBI,AliasAnalysis * AA,MemoryDependenceResults * MD,const DataLayout & DL,const TargetLibraryInfo * TLI,InstOverlapIntervalsTy & IOL,MapVector<Instruction *,bool> & ThrowableInst,DominatorTree * DT)1209 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1210                                AliasAnalysis *AA, MemoryDependenceResults *MD,
1211                                const DataLayout &DL,
1212                                const TargetLibraryInfo *TLI,
1213                                InstOverlapIntervalsTy &IOL,
1214                                MapVector<Instruction *, bool> &ThrowableInst,
1215                                DominatorTree *DT) {
1216   // Must be a store instruction.
1217   StoreInst *SI = dyn_cast<StoreInst>(Inst);
1218   if (!SI)
1219     return false;
1220 
1221   // If we're storing the same value back to a pointer that we just loaded from,
1222   // then the store can be removed.
1223   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1224     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1225         isRemovable(SI) &&
1226         memoryIsNotModifiedBetween(DepLoad, SI, *AA, DL, DT)) {
1227 
1228       LLVM_DEBUG(
1229           dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1230                  << *DepLoad << "\n  STORE: " << *SI << '\n');
1231 
1232       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1233       ++NumRedundantStores;
1234       return true;
1235     }
1236   }
1237 
1238   // Remove null stores into the calloc'ed objects
1239   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1240   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1241     Instruction *UnderlyingPointer =
1242         dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand()));
1243 
1244     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1245         memoryIsNotModifiedBetween(UnderlyingPointer, SI, *AA, DL, DT)) {
1246       LLVM_DEBUG(
1247           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1248                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1249 
1250       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1251       ++NumRedundantStores;
1252       return true;
1253     }
1254   }
1255   return false;
1256 }
1257 
1258 template <typename AATy>
tryToMergePartialOverlappingStores(StoreInst * Earlier,StoreInst * Later,int64_t InstWriteOffset,int64_t DepWriteOffset,const DataLayout & DL,AATy & AA,DominatorTree * DT)1259 static Constant *tryToMergePartialOverlappingStores(
1260     StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
1261     int64_t DepWriteOffset, const DataLayout &DL, AATy &AA, DominatorTree *DT) {
1262 
1263   if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1264       DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
1265       Later && isa<ConstantInt>(Later->getValueOperand()) &&
1266       DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
1267       memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
1268     // If the store we find is:
1269     //   a) partially overwritten by the store to 'Loc'
1270     //   b) the later store is fully contained in the earlier one and
1271     //   c) they both have a constant value
1272     //   d) none of the two stores need padding
1273     // Merge the two stores, replacing the earlier store's value with a
1274     // merge of both values.
1275     // TODO: Deal with other constant types (vectors, etc), and probably
1276     // some mem intrinsics (if needed)
1277 
1278     APInt EarlierValue =
1279         cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1280     APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
1281     unsigned LaterBits = LaterValue.getBitWidth();
1282     assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1283     LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1284 
1285     // Offset of the smaller store inside the larger store
1286     unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1287     unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
1288                                                    BitOffsetDiff - LaterBits
1289                                              : BitOffsetDiff;
1290     APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1291                                    LShiftAmount + LaterBits);
1292     // Clear the bits we'll be replacing, then OR with the smaller
1293     // store, shifted appropriately.
1294     APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1295     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
1296                       << "\n  Later: " << *Later
1297                       << "\n  Merged Value: " << Merged << '\n');
1298     return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
1299   }
1300   return nullptr;
1301 }
1302 
eliminateDeadStores(BasicBlock & BB,AliasAnalysis * AA,MemoryDependenceResults * MD,DominatorTree * DT,const TargetLibraryInfo * TLI)1303 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1304                                 MemoryDependenceResults *MD, DominatorTree *DT,
1305                                 const TargetLibraryInfo *TLI) {
1306   const DataLayout &DL = BB.getModule()->getDataLayout();
1307   bool MadeChange = false;
1308 
1309   MapVector<Instruction *, bool> ThrowableInst;
1310 
1311   // A map of interval maps representing partially-overwritten value parts.
1312   InstOverlapIntervalsTy IOL;
1313 
1314   // Do a top-down walk on the BB.
1315   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1316     // Handle 'free' calls specially.
1317     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1318       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, ThrowableInst);
1319       // Increment BBI after handleFree has potentially deleted instructions.
1320       // This ensures we maintain a valid iterator.
1321       ++BBI;
1322       continue;
1323     }
1324 
1325     Instruction *Inst = &*BBI++;
1326 
1327     if (Inst->mayThrow()) {
1328       ThrowableInst[Inst] = true;
1329       continue;
1330     }
1331 
1332     // Check to see if Inst writes to memory.  If not, continue.
1333     if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1334       continue;
1335 
1336     // eliminateNoopStore will update in iterator, if necessary.
1337     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL,
1338                            ThrowableInst, DT)) {
1339       MadeChange = true;
1340       continue;
1341     }
1342 
1343     // If we find something that writes memory, get its memory dependence.
1344     MemDepResult InstDep = MD->getDependency(Inst);
1345 
1346     // Ignore any store where we can't find a local dependence.
1347     // FIXME: cross-block DSE would be fun. :)
1348     if (!InstDep.isDef() && !InstDep.isClobber())
1349       continue;
1350 
1351     // Figure out what location is being stored to.
1352     MemoryLocation Loc = getLocForWrite(Inst, *TLI);
1353 
1354     // If we didn't get a useful location, fail.
1355     if (!Loc.Ptr)
1356       continue;
1357 
1358     // Loop until we find a store we can eliminate or a load that
1359     // invalidates the analysis. Without an upper bound on the number of
1360     // instructions examined, this analysis can become very time-consuming.
1361     // However, the potential gain diminishes as we process more instructions
1362     // without eliminating any of them. Therefore, we limit the number of
1363     // instructions we look at.
1364     auto Limit = MD->getDefaultBlockScanLimit();
1365     while (InstDep.isDef() || InstDep.isClobber()) {
1366       // Get the memory clobbered by the instruction we depend on.  MemDep will
1367       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1368       // end up depending on a may- or must-aliased load, then we can't optimize
1369       // away the store and we bail out.  However, if we depend on something
1370       // that overwrites the memory location we *can* potentially optimize it.
1371       //
1372       // Find out what memory location the dependent instruction stores.
1373       Instruction *DepWrite = InstDep.getInst();
1374       if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1375         break;
1376       MemoryLocation DepLoc = getLocForWrite(DepWrite, *TLI);
1377       // If we didn't get a useful location, or if it isn't a size, bail out.
1378       if (!DepLoc.Ptr)
1379         break;
1380 
1381       // Find the last throwable instruction not removed by call to
1382       // deleteDeadInstruction.
1383       Instruction *LastThrowing = nullptr;
1384       if (!ThrowableInst.empty())
1385         LastThrowing = ThrowableInst.back().first;
1386 
1387       // Make sure we don't look past a call which might throw. This is an
1388       // issue because MemoryDependenceAnalysis works in the wrong direction:
1389       // it finds instructions which dominate the current instruction, rather than
1390       // instructions which are post-dominated by the current instruction.
1391       //
1392       // If the underlying object is a non-escaping memory allocation, any store
1393       // to it is dead along the unwind edge. Otherwise, we need to preserve
1394       // the store.
1395       if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
1396         const Value *Underlying = getUnderlyingObject(DepLoc.Ptr);
1397         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1398         if (!IsStoreDeadOnUnwind) {
1399             // We're looking for a call to an allocation function
1400             // where the allocation doesn't escape before the last
1401             // throwing instruction; PointerMayBeCaptured
1402             // reasonably fast approximation.
1403             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1404                 !PointerMayBeCaptured(Underlying, false, true);
1405         }
1406         if (!IsStoreDeadOnUnwind)
1407           break;
1408       }
1409 
1410       // If we find a write that is a) removable (i.e., non-volatile), b) is
1411       // completely obliterated by the store to 'Loc', and c) which we know that
1412       // 'Inst' doesn't load from, then we can remove it.
1413       // Also try to merge two stores if a later one only touches memory written
1414       // to by the earlier one.
1415       if (isRemovable(DepWrite) &&
1416           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1417         int64_t InstWriteOffset, DepWriteOffset;
1418         OverwriteResult OR = isOverwrite(Inst, DepWrite, Loc, DepLoc, DL, *TLI,
1419                                          DepWriteOffset, InstWriteOffset, *AA,
1420                                          BB.getParent());
1421         if (OR == OW_MaybePartial)
1422           OR = isPartialOverwrite(Loc, DepLoc, DepWriteOffset, InstWriteOffset,
1423                                   DepWrite, IOL);
1424 
1425         if (OR == OW_Complete) {
1426           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1427                             << "\n  KILLER: " << *Inst << '\n');
1428 
1429           // Delete the store and now-dead instructions that feed it.
1430           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1431                                 ThrowableInst);
1432           ++NumFastStores;
1433           MadeChange = true;
1434 
1435           // We erased DepWrite; start over.
1436           InstDep = MD->getDependency(Inst);
1437           continue;
1438         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1439                    ((OR == OW_Begin &&
1440                      isShortenableAtTheBeginning(DepWrite)))) {
1441           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1442                                                     "when partial-overwrite "
1443                                                     "tracking is enabled");
1444           // The overwrite result is known, so these must be known, too.
1445           uint64_t EarlierSize = DepLoc.Size.getValue();
1446           uint64_t LaterSize = Loc.Size.getValue();
1447           bool IsOverwriteEnd = (OR == OW_End);
1448           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1449                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1450         } else if (EnablePartialStoreMerging &&
1451                    OR == OW_PartialEarlierWithFullLater) {
1452           auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1453           auto *Later = dyn_cast<StoreInst>(Inst);
1454           if (Constant *C = tryToMergePartialOverlappingStores(
1455                   Earlier, Later, InstWriteOffset, DepWriteOffset, DL, *AA,
1456                   DT)) {
1457             auto *SI = new StoreInst(
1458                 C, Earlier->getPointerOperand(), false, Earlier->getAlign(),
1459                 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1460 
1461             unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1462                                    LLVMContext::MD_alias_scope,
1463                                    LLVMContext::MD_noalias,
1464                                    LLVMContext::MD_nontemporal};
1465             SI->copyMetadata(*DepWrite, MDToKeep);
1466             ++NumModifiedStores;
1467 
1468             // Delete the old stores and now-dead instructions that feed them.
1469             deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL,
1470                                   ThrowableInst);
1471             deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1472                                   ThrowableInst);
1473             MadeChange = true;
1474 
1475             // We erased DepWrite and Inst (Loc); start over.
1476             break;
1477           }
1478         }
1479       }
1480 
1481       // If this is a may-aliased store that is clobbering the store value, we
1482       // can keep searching past it for another must-aliased pointer that stores
1483       // to the same location.  For example, in:
1484       //   store -> P
1485       //   store -> Q
1486       //   store -> P
1487       // we can remove the first store to P even though we don't know if P and Q
1488       // alias.
1489       if (DepWrite == &BB.front()) break;
1490 
1491       // Can't look past this instruction if it might read 'Loc'.
1492       if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1493         break;
1494 
1495       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1496                                              DepWrite->getIterator(), &BB,
1497                                              /*QueryInst=*/ nullptr, &Limit);
1498     }
1499   }
1500 
1501   if (EnablePartialOverwriteTracking)
1502     MadeChange |= removePartiallyOverlappedStores(DL, IOL, *TLI);
1503 
1504   // If this block ends in a return, unwind, or unreachable, all allocas are
1505   // dead at its end, which means stores to them are also dead.
1506   if (BB.getTerminator()->getNumSuccessors() == 0)
1507     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, ThrowableInst);
1508 
1509   return MadeChange;
1510 }
1511 
eliminateDeadStores(Function & F,AliasAnalysis * AA,MemoryDependenceResults * MD,DominatorTree * DT,const TargetLibraryInfo * TLI)1512 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1513                                 MemoryDependenceResults *MD, DominatorTree *DT,
1514                                 const TargetLibraryInfo *TLI) {
1515   bool MadeChange = false;
1516   for (BasicBlock &BB : F)
1517     // Only check non-dead blocks.  Dead blocks may have strange pointer
1518     // cycles that will confuse alias analysis.
1519     if (DT->isReachableFromEntry(&BB))
1520       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1521 
1522   return MadeChange;
1523 }
1524 
1525 namespace {
1526 //=============================================================================
1527 // MemorySSA backed dead store elimination.
1528 //
1529 // The code below implements dead store elimination using MemorySSA. It uses
1530 // the following general approach: given a MemoryDef, walk upwards to find
1531 // clobbering MemoryDefs that may be killed by the starting def. Then check
1532 // that there are no uses that may read the location of the original MemoryDef
1533 // in between both MemoryDefs. A bit more concretely:
1534 //
1535 // For all MemoryDefs StartDef:
1536 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
1537 //    upwards.
1538 // 2. Check that there are no reads between EarlierAccess and the StartDef by
1539 //    checking all uses starting at EarlierAccess and walking until we see
1540 //    StartDef.
1541 // 3. For each found CurrentDef, check that:
1542 //   1. There are no barrier instructions between CurrentDef and StartDef (like
1543 //       throws or stores with ordering constraints).
1544 //   2. StartDef is executed whenever CurrentDef is executed.
1545 //   3. StartDef completely overwrites CurrentDef.
1546 // 4. Erase CurrentDef from the function and MemorySSA.
1547 
1548 // Returns true if \p I is an intrisnic that does not read or write memory.
isNoopIntrinsic(Instruction * I)1549 bool isNoopIntrinsic(Instruction *I) {
1550   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1551     switch (II->getIntrinsicID()) {
1552     case Intrinsic::lifetime_start:
1553     case Intrinsic::lifetime_end:
1554     case Intrinsic::invariant_end:
1555     case Intrinsic::launder_invariant_group:
1556     case Intrinsic::assume:
1557       return true;
1558     case Intrinsic::dbg_addr:
1559     case Intrinsic::dbg_declare:
1560     case Intrinsic::dbg_label:
1561     case Intrinsic::dbg_value:
1562       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
1563     default:
1564       return false;
1565     }
1566   }
1567   return false;
1568 }
1569 
1570 // Check if we can ignore \p D for DSE.
canSkipDef(MemoryDef * D,bool DefVisibleToCaller)1571 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
1572   Instruction *DI = D->getMemoryInst();
1573   // Calls that only access inaccessible memory cannot read or write any memory
1574   // locations we consider for elimination.
1575   if (auto *CB = dyn_cast<CallBase>(DI))
1576     if (CB->onlyAccessesInaccessibleMemory())
1577       return true;
1578 
1579   // We can eliminate stores to locations not visible to the caller across
1580   // throwing instructions.
1581   if (DI->mayThrow() && !DefVisibleToCaller)
1582     return true;
1583 
1584   // We can remove the dead stores, irrespective of the fence and its ordering
1585   // (release/acquire/seq_cst). Fences only constraints the ordering of
1586   // already visible stores, it does not make a store visible to other
1587   // threads. So, skipping over a fence does not change a store from being
1588   // dead.
1589   if (isa<FenceInst>(DI))
1590     return true;
1591 
1592   // Skip intrinsics that do not really read or modify memory.
1593   if (isNoopIntrinsic(D->getMemoryInst()))
1594     return true;
1595 
1596   return false;
1597 }
1598 
1599 struct DSEState {
1600   Function &F;
1601   AliasAnalysis &AA;
1602 
1603   /// The single BatchAA instance that is used to cache AA queries. It will
1604   /// not be invalidated over the whole run. This is safe, because:
1605   /// 1. Only memory writes are removed, so the alias cache for memory
1606   ///    locations remains valid.
1607   /// 2. No new instructions are added (only instructions removed), so cached
1608   ///    information for a deleted value cannot be accessed by a re-used new
1609   ///    value pointer.
1610   BatchAAResults BatchAA;
1611 
1612   MemorySSA &MSSA;
1613   DominatorTree &DT;
1614   PostDominatorTree &PDT;
1615   const TargetLibraryInfo &TLI;
1616   const DataLayout &DL;
1617 
1618   // All MemoryDefs that potentially could kill other MemDefs.
1619   SmallVector<MemoryDef *, 64> MemDefs;
1620   // Any that should be skipped as they are already deleted
1621   SmallPtrSet<MemoryAccess *, 4> SkipStores;
1622   // Keep track of all of the objects that are invisible to the caller before
1623   // the function returns.
1624   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
1625   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
1626   // Keep track of all of the objects that are invisible to the caller after
1627   // the function returns.
1628   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
1629   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
1630   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
1631   // Post-order numbers for each basic block. Used to figure out if memory
1632   // accesses are executed before another access.
1633   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
1634 
1635   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
1636   /// basic block.
1637   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
1638 
1639   struct CheckCache {
1640     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
1641     SmallPtrSet<MemoryAccess *, 16> KnownReads;
1642 
isKnownNoRead__anon56eeda7e0411::DSEState::CheckCache1643     bool isKnownNoRead(MemoryAccess *A) const {
1644       return KnownNoReads.find(A) != KnownNoReads.end();
1645     }
isKnownRead__anon56eeda7e0411::DSEState::CheckCache1646     bool isKnownRead(MemoryAccess *A) const {
1647       return KnownReads.find(A) != KnownReads.end();
1648     }
1649   };
1650 
DSEState__anon56eeda7e0411::DSEState1651   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
1652            PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
1653       : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
1654         DL(F.getParent()->getDataLayout()) {}
1655 
get__anon56eeda7e0411::DSEState1656   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1657                       DominatorTree &DT, PostDominatorTree &PDT,
1658                       const TargetLibraryInfo &TLI) {
1659     DSEState State(F, AA, MSSA, DT, PDT, TLI);
1660     // Collect blocks with throwing instructions not modeled in MemorySSA and
1661     // alloc-like objects.
1662     unsigned PO = 0;
1663     for (BasicBlock *BB : post_order(&F)) {
1664       State.PostOrderNumbers[BB] = PO++;
1665       for (Instruction &I : *BB) {
1666         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
1667         if (I.mayThrow() && !MA)
1668           State.ThrowingBlocks.insert(I.getParent());
1669 
1670         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1671         if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
1672             (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
1673           State.MemDefs.push_back(MD);
1674       }
1675     }
1676 
1677     // Treat byval or inalloca arguments the same as Allocas, stores to them are
1678     // dead at the end of the function.
1679     for (Argument &AI : F.args())
1680       if (AI.hasPassPointeeByValueCopyAttr()) {
1681         // For byval, the caller doesn't know the address of the allocation.
1682         if (AI.hasByValAttr())
1683           State.InvisibleToCallerBeforeRet.insert({&AI, true});
1684         State.InvisibleToCallerAfterRet.insert({&AI, true});
1685       }
1686 
1687     return State;
1688   }
1689 
isInvisibleToCallerAfterRet__anon56eeda7e0411::DSEState1690   bool isInvisibleToCallerAfterRet(const Value *V) {
1691     if (isa<AllocaInst>(V))
1692       return true;
1693     auto I = InvisibleToCallerAfterRet.insert({V, false});
1694     if (I.second) {
1695       if (!isInvisibleToCallerBeforeRet(V)) {
1696         I.first->second = false;
1697       } else {
1698         auto *Inst = dyn_cast<Instruction>(V);
1699         if (Inst && isAllocLikeFn(Inst, &TLI))
1700           I.first->second = !PointerMayBeCaptured(V, true, false);
1701       }
1702     }
1703     return I.first->second;
1704   }
1705 
isInvisibleToCallerBeforeRet__anon56eeda7e0411::DSEState1706   bool isInvisibleToCallerBeforeRet(const Value *V) {
1707     if (isa<AllocaInst>(V))
1708       return true;
1709     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1710     if (I.second) {
1711       auto *Inst = dyn_cast<Instruction>(V);
1712       if (Inst && isAllocLikeFn(Inst, &TLI))
1713         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1714         // with the killing MemoryDef. But we refrain from doing so for now to
1715         // limit compile-time and this does not cause any changes to the number
1716         // of stores removed on a large test set in practice.
1717         I.first->second = !PointerMayBeCaptured(V, false, true);
1718     }
1719     return I.first->second;
1720   }
1721 
getLocForWriteEx__anon56eeda7e0411::DSEState1722   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1723     if (!I->mayWriteToMemory())
1724       return None;
1725 
1726     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1727       return {MemoryLocation::getForDest(MTI)};
1728 
1729     if (auto *CB = dyn_cast<CallBase>(I)) {
1730       // If the functions may write to memory we do not know about, bail out.
1731       if (!CB->onlyAccessesArgMemory() &&
1732           !CB->onlyAccessesInaccessibleMemOrArgMem())
1733         return None;
1734 
1735       LibFunc LF;
1736       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1737         switch (LF) {
1738         case LibFunc_strcpy:
1739         case LibFunc_strncpy:
1740         case LibFunc_strcat:
1741         case LibFunc_strncat:
1742           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1743         default:
1744           break;
1745         }
1746       }
1747       switch (CB->getIntrinsicID()) {
1748       case Intrinsic::init_trampoline:
1749         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1750       case Intrinsic::masked_store:
1751         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1752       default:
1753         break;
1754       }
1755       return None;
1756     }
1757 
1758     return MemoryLocation::getOrNone(I);
1759   }
1760 
1761   /// Returns true if \p UseInst completely overwrites \p DefLoc
1762   /// (stored by \p DefInst).
isCompleteOverwrite__anon56eeda7e0411::DSEState1763   bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *DefInst,
1764                            Instruction *UseInst) {
1765     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1766     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1767     // MemoryDef.
1768     if (!UseInst->mayWriteToMemory())
1769       return false;
1770 
1771     if (auto *CB = dyn_cast<CallBase>(UseInst))
1772       if (CB->onlyAccessesInaccessibleMemory())
1773         return false;
1774 
1775     int64_t InstWriteOffset, DepWriteOffset;
1776     if (auto CC = getLocForWriteEx(UseInst))
1777       return isOverwrite(UseInst, DefInst, *CC, DefLoc, DL, TLI, DepWriteOffset,
1778                          InstWriteOffset, BatchAA, &F) == OW_Complete;
1779     return false;
1780   }
1781 
1782   /// Returns true if \p Def is not read before returning from the function.
isWriteAtEndOfFunction__anon56eeda7e0411::DSEState1783   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1784     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1785                       << *Def->getMemoryInst()
1786                       << ") is at the end the function \n");
1787 
1788     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1789     if (!MaybeLoc) {
1790       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1791       return false;
1792     }
1793 
1794     SmallVector<MemoryAccess *, 4> WorkList;
1795     SmallPtrSet<MemoryAccess *, 8> Visited;
1796     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1797       if (!Visited.insert(Acc).second)
1798         return;
1799       for (Use &U : Acc->uses())
1800         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1801     };
1802     PushMemUses(Def);
1803     for (unsigned I = 0; I < WorkList.size(); I++) {
1804       if (WorkList.size() >= MemorySSAScanLimit) {
1805         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1806         return false;
1807       }
1808 
1809       MemoryAccess *UseAccess = WorkList[I];
1810       // Simply adding the users of MemoryPhi to the worklist is not enough,
1811       // because we might miss read clobbers in different iterations of a loop,
1812       // for example.
1813       // TODO: Add support for phi translation to handle the loop case.
1814       if (isa<MemoryPhi>(UseAccess))
1815         return false;
1816 
1817       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1818       // of times this is called and/or caching it.
1819       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1820       if (isReadClobber(*MaybeLoc, UseInst)) {
1821         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1822         return false;
1823       }
1824 
1825       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1826         PushMemUses(UseDef);
1827     }
1828     return true;
1829   }
1830 
1831   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1832   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1833   /// indicating whether \p I is a free-like call.
1834   Optional<std::pair<MemoryLocation, bool>>
getLocForTerminator__anon56eeda7e0411::DSEState1835   getLocForTerminator(Instruction *I) const {
1836     uint64_t Len;
1837     Value *Ptr;
1838     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1839                                                       m_Value(Ptr))))
1840       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1841 
1842     if (auto *CB = dyn_cast<CallBase>(I)) {
1843       if (isFreeCall(I, &TLI))
1844         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1845                                true)};
1846     }
1847 
1848     return None;
1849   }
1850 
1851   /// Returns true if \p I is a memory terminator instruction like
1852   /// llvm.lifetime.end or free.
isMemTerminatorInst__anon56eeda7e0411::DSEState1853   bool isMemTerminatorInst(Instruction *I) const {
1854     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1855     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1856            isFreeCall(I, &TLI);
1857   }
1858 
1859   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1860   /// instruction \p AccessI.
isMemTerminator__anon56eeda7e0411::DSEState1861   bool isMemTerminator(MemoryLocation Loc, Instruction *AccessI,
1862                        Instruction *MaybeTerm) {
1863     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1864         getLocForTerminator(MaybeTerm);
1865 
1866     if (!MaybeTermLoc)
1867       return false;
1868 
1869     // If the terminator is a free-like call, all accesses to the underlying
1870     // object can be considered terminated.
1871     if (getUnderlyingObject(Loc.Ptr) !=
1872         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1873       return false;
1874 
1875     auto TermLoc = MaybeTermLoc->first;
1876     if (MaybeTermLoc->second) {
1877       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1878       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1879     }
1880     int64_t InstWriteOffset, DepWriteOffset;
1881     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DL, TLI,
1882                        DepWriteOffset, InstWriteOffset, BatchAA,
1883                        &F) == OW_Complete;
1884   }
1885 
1886   // Returns true if \p Use may read from \p DefLoc.
isReadClobber__anon56eeda7e0411::DSEState1887   bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) {
1888     if (isNoopIntrinsic(UseInst))
1889       return false;
1890 
1891     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1892     // treated as read clobber.
1893     if (auto SI = dyn_cast<StoreInst>(UseInst))
1894       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1895 
1896     if (!UseInst->mayReadFromMemory())
1897       return false;
1898 
1899     if (auto *CB = dyn_cast<CallBase>(UseInst))
1900       if (CB->onlyAccessesInaccessibleMemory())
1901         return false;
1902 
1903     // NOTE: For calls, the number of stores removed could be slightly improved
1904     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1905     // be expensive compared to the benefits in practice. For now, avoid more
1906     // expensive analysis to limit compile-time.
1907     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1908   }
1909 
1910   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1911   /// loop. In particular, this guarantees that it only references a single
1912   /// MemoryLocation during execution of the containing function.
IsGuaranteedLoopInvariant__anon56eeda7e0411::DSEState1913   bool IsGuaranteedLoopInvariant(Value *Ptr) {
1914     auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1915       Ptr = Ptr->stripPointerCasts();
1916       if (auto *I = dyn_cast<Instruction>(Ptr)) {
1917         if (isa<AllocaInst>(Ptr))
1918           return true;
1919 
1920         if (isAllocLikeFn(I, &TLI))
1921           return true;
1922 
1923         return false;
1924       }
1925       return true;
1926     };
1927 
1928     Ptr = Ptr->stripPointerCasts();
1929     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1930       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1931              GEP->hasAllConstantIndices();
1932     }
1933     return IsGuaranteedLoopInvariantBase(Ptr);
1934   }
1935 
1936   // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1937   // no read access between them or on any other path to a function exit block
1938   // if \p DefLoc is not accessible after the function returns. If there is no
1939   // such MemoryDef, return None. The returned value may not (completely)
1940   // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1941   // MemoryUse (read).
1942   Optional<MemoryAccess *>
getDomMemoryDef__anon56eeda7e0411::DSEState1943   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1944                   MemoryLocation DefLoc, const Value *DefUO, CheckCache &Cache,
1945                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1946                   bool IsMemTerm, unsigned &PartialLimit) {
1947     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1948       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1949       return None;
1950     }
1951 
1952     MemoryAccess *Current = StartAccess;
1953     Instruction *KillingI = KillingDef->getMemoryInst();
1954     bool StepAgain;
1955     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1956 
1957     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1958     do {
1959       StepAgain = false;
1960       LLVM_DEBUG({
1961         dbgs() << "   visiting " << *Current;
1962         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1963           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1964                  << ")";
1965         dbgs() << "\n";
1966       });
1967 
1968       // Reached TOP.
1969       if (MSSA.isLiveOnEntryDef(Current)) {
1970         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1971         return None;
1972       }
1973 
1974       // Cost of a step. Accesses in the same block are more likely to be valid
1975       // candidates for elimination, hence consider them cheaper.
1976       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1977                               ? MemorySSASameBBStepCost
1978                               : MemorySSAOtherBBStepCost;
1979       if (WalkerStepLimit <= StepCost) {
1980         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
1981         return None;
1982       }
1983       WalkerStepLimit -= StepCost;
1984 
1985       // Return for MemoryPhis. They cannot be eliminated directly and the
1986       // caller is responsible for traversing them.
1987       if (isa<MemoryPhi>(Current)) {
1988         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
1989         return Current;
1990       }
1991 
1992       // Below, check if CurrentDef is a valid candidate to be eliminated by
1993       // KillingDef. If it is not, check the next candidate.
1994       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1995       Instruction *CurrentI = CurrentDef->getMemoryInst();
1996 
1997       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
1998         StepAgain = true;
1999         Current = CurrentDef->getDefiningAccess();
2000         continue;
2001       }
2002 
2003       // Before we try to remove anything, check for any extra throwing
2004       // instructions that block us from DSEing
2005       if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
2006         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
2007         return None;
2008       }
2009 
2010       // Check for anything that looks like it will be a barrier to further
2011       // removal
2012       if (isDSEBarrier(DefUO, CurrentI)) {
2013         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
2014         return None;
2015       }
2016 
2017       // If Current is known to be on path that reads DefLoc or is a read
2018       // clobber, bail out, as the path is not profitable. We skip this check
2019       // for intrinsic calls, because the code knows how to handle memcpy
2020       // intrinsics.
2021       if (!isa<IntrinsicInst>(CurrentI) &&
2022           (Cache.KnownReads.contains(Current) ||
2023            isReadClobber(DefLoc, CurrentI))) {
2024         Cache.KnownReads.insert(Current);
2025         return None;
2026       }
2027 
2028       // Quick check if there are direct uses that are read-clobbers.
2029       if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
2030             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
2031               return !MSSA.dominates(StartAccess, UseOrDef) &&
2032                      isReadClobber(DefLoc, UseOrDef->getMemoryInst());
2033             return false;
2034           })) {
2035         Cache.KnownReads.insert(Current);
2036         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
2037         return None;
2038       }
2039 
2040       // If Current cannot be analyzed or is not removable, check the next
2041       // candidate.
2042       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
2043         StepAgain = true;
2044         Current = CurrentDef->getDefiningAccess();
2045         continue;
2046       }
2047 
2048       // If Current does not have an analyzable write location, skip it
2049       auto CurrentLoc = getLocForWriteEx(CurrentI);
2050       if (!CurrentLoc) {
2051         StepAgain = true;
2052         Current = CurrentDef->getDefiningAccess();
2053         continue;
2054       }
2055 
2056       if (IsMemTerm) {
2057         // If the killing def is a memory terminator (e.g. lifetime.end), check
2058         // the next candidate if the current Current does not write the same
2059         // underlying object as the terminator.
2060         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
2061           StepAgain = true;
2062           Current = CurrentDef->getDefiningAccess();
2063         }
2064         continue;
2065       } else {
2066         // AliasAnalysis does not account for loops. Limit elimination to
2067         // candidates for which we can guarantee they always store to the same
2068         // memory location and not multiple locations in a loop.
2069         if (Current->getBlock() != KillingDef->getBlock() &&
2070             !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
2071           StepAgain = true;
2072           Current = CurrentDef->getDefiningAccess();
2073           WalkerStepLimit -= 1;
2074           continue;
2075         }
2076 
2077         int64_t InstWriteOffset, DepWriteOffset;
2078         auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc, DL, TLI,
2079                               DepWriteOffset, InstWriteOffset, BatchAA, &F);
2080         // If Current does not write to the same object as KillingDef, check
2081         // the next candidate.
2082         if (OR == OW_Unknown) {
2083           StepAgain = true;
2084           Current = CurrentDef->getDefiningAccess();
2085         } else if (OR == OW_MaybePartial) {
2086           // If KillingDef only partially overwrites Current, check the next
2087           // candidate if the partial step limit is exceeded. This aggressively
2088           // limits the number of candidates for partial store elimination,
2089           // which are less likely to be removable in the end.
2090           if (PartialLimit <= 1) {
2091             StepAgain = true;
2092             Current = CurrentDef->getDefiningAccess();
2093             WalkerStepLimit -= 1;
2094             continue;
2095           }
2096           PartialLimit -= 1;
2097         }
2098       }
2099     } while (StepAgain);
2100 
2101     // Accesses to objects accessible after the function returns can only be
2102     // eliminated if the access is killed along all paths to the exit. Collect
2103     // the blocks with killing (=completely overwriting MemoryDefs) and check if
2104     // they cover all paths from EarlierAccess to any function exit.
2105     SmallPtrSet<Instruction *, 16> KillingDefs;
2106     KillingDefs.insert(KillingDef->getMemoryInst());
2107     MemoryAccess *EarlierAccess = Current;
2108     Instruction *EarlierMemInst =
2109         cast<MemoryDef>(EarlierAccess)->getMemoryInst();
2110     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *EarlierAccess << " ("
2111                       << *EarlierMemInst << ")\n");
2112 
2113     SmallSetVector<MemoryAccess *, 32> WorkList;
2114     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
2115       for (Use &U : Acc->uses())
2116         WorkList.insert(cast<MemoryAccess>(U.getUser()));
2117     };
2118     PushMemUses(EarlierAccess);
2119 
2120     // Optimistically collect all accesses for reads. If we do not find any
2121     // read clobbers, add them to the cache.
2122     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
2123     if (!EarlierMemInst->mayReadFromMemory())
2124       KnownNoReads.insert(EarlierAccess);
2125     // Check if EarlierDef may be read.
2126     for (unsigned I = 0; I < WorkList.size(); I++) {
2127       MemoryAccess *UseAccess = WorkList[I];
2128 
2129       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
2130       // Bail out if the number of accesses to check exceeds the scan limit.
2131       if (ScanLimit < (WorkList.size() - I)) {
2132         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
2133         return None;
2134       }
2135       --ScanLimit;
2136       NumDomMemDefChecks++;
2137 
2138       // Check if we already visited this access.
2139       if (Cache.isKnownNoRead(UseAccess)) {
2140         LLVM_DEBUG(dbgs() << " ... skip, discovered that " << *UseAccess
2141                           << " is safe earlier.\n");
2142         continue;
2143       }
2144       if (Cache.isKnownRead(UseAccess)) {
2145         LLVM_DEBUG(dbgs() << " ... bail out, discovered that " << *UseAccess
2146                           << " has a read-clobber earlier.\n");
2147         return None;
2148       }
2149       KnownNoReads.insert(UseAccess);
2150 
2151       if (isa<MemoryPhi>(UseAccess)) {
2152         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
2153               return DT.properlyDominates(KI->getParent(),
2154                                           UseAccess->getBlock());
2155             })) {
2156           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
2157           continue;
2158         }
2159         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
2160         PushMemUses(UseAccess);
2161         continue;
2162       }
2163 
2164       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
2165       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
2166 
2167       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
2168             return DT.dominates(KI, UseInst);
2169           })) {
2170         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
2171         continue;
2172       }
2173 
2174       // A memory terminator kills all preceeding MemoryDefs and all succeeding
2175       // MemoryAccesses. We do not have to check it's users.
2176       if (isMemTerminator(DefLoc, KillingI, UseInst)) {
2177         LLVM_DEBUG(
2178             dbgs()
2179             << " ... skipping, memterminator invalidates following accesses\n");
2180         continue;
2181       }
2182 
2183       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
2184         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
2185         PushMemUses(UseAccess);
2186         continue;
2187       }
2188 
2189       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
2190         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
2191         Cache.KnownReads.insert(UseAccess);
2192         Cache.KnownReads.insert(StartAccess);
2193         Cache.KnownReads.insert(EarlierAccess);
2194         return None;
2195       }
2196 
2197       // Uses which may read the original MemoryDef mean we cannot eliminate the
2198       // original MD. Stop walk.
2199       if (isReadClobber(DefLoc, UseInst)) {
2200         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
2201         Cache.KnownReads.insert(UseAccess);
2202         Cache.KnownReads.insert(StartAccess);
2203         Cache.KnownReads.insert(EarlierAccess);
2204         return None;
2205       }
2206 
2207       // For the KillingDef and EarlierAccess we only have to check if it reads
2208       // the memory location.
2209       // TODO: It would probably be better to check for self-reads before
2210       // calling the function.
2211       if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
2212         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
2213         continue;
2214       }
2215 
2216       // Check all uses for MemoryDefs, except for defs completely overwriting
2217       // the original location. Otherwise we have to check uses of *all*
2218       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
2219       // miss cases like the following
2220       //   1 = Def(LoE) ; <----- EarlierDef stores [0,1]
2221       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
2222       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
2223       //                  (The Use points to the *first* Def it may alias)
2224       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
2225       //                  stores [0,1]
2226       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
2227         if (isCompleteOverwrite(DefLoc, KillingI, UseInst)) {
2228           if (!isInvisibleToCallerAfterRet(DefUO) &&
2229               UseAccess != EarlierAccess) {
2230             BasicBlock *MaybeKillingBlock = UseInst->getParent();
2231             if (PostOrderNumbers.find(MaybeKillingBlock)->second <
2232                 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
2233 
2234               LLVM_DEBUG(dbgs()
2235                          << "    ... found killing def " << *UseInst << "\n");
2236               KillingDefs.insert(UseInst);
2237             }
2238           }
2239         } else
2240           PushMemUses(UseDef);
2241       }
2242     }
2243 
2244     // For accesses to locations visible after the function returns, make sure
2245     // that the location is killed (=overwritten) along all paths from
2246     // EarlierAccess to the exit.
2247     if (!isInvisibleToCallerAfterRet(DefUO)) {
2248       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
2249       for (Instruction *KD : KillingDefs)
2250         KillingBlocks.insert(KD->getParent());
2251       assert(!KillingBlocks.empty() &&
2252              "Expected at least a single killing block");
2253 
2254       // Find the common post-dominator of all killing blocks.
2255       BasicBlock *CommonPred = *KillingBlocks.begin();
2256       for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
2257            I != E; I++) {
2258         if (!CommonPred)
2259           break;
2260         CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
2261       }
2262 
2263       // If CommonPred is in the set of killing blocks, just check if it
2264       // post-dominates EarlierAccess.
2265       if (KillingBlocks.count(CommonPred)) {
2266         if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
2267           return {EarlierAccess};
2268         return None;
2269       }
2270 
2271       // If the common post-dominator does not post-dominate EarlierAccess,
2272       // there is a path from EarlierAccess to an exit not going through a
2273       // killing block.
2274       if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
2275         SetVector<BasicBlock *> WorkList;
2276 
2277         // If CommonPred is null, there are multiple exits from the function.
2278         // They all have to be added to the worklist.
2279         if (CommonPred)
2280           WorkList.insert(CommonPred);
2281         else
2282           for (BasicBlock *R : PDT.roots())
2283             WorkList.insert(R);
2284 
2285         NumCFGTries++;
2286         // Check if all paths starting from an exit node go through one of the
2287         // killing blocks before reaching EarlierAccess.
2288         for (unsigned I = 0; I < WorkList.size(); I++) {
2289           NumCFGChecks++;
2290           BasicBlock *Current = WorkList[I];
2291           if (KillingBlocks.count(Current))
2292             continue;
2293           if (Current == EarlierAccess->getBlock())
2294             return None;
2295 
2296           // EarlierAccess is reachable from the entry, so we don't have to
2297           // explore unreachable blocks further.
2298           if (!DT.isReachableFromEntry(Current))
2299             continue;
2300 
2301           for (BasicBlock *Pred : predecessors(Current))
2302             WorkList.insert(Pred);
2303 
2304           if (WorkList.size() >= MemorySSAPathCheckLimit)
2305             return None;
2306         }
2307         NumCFGSuccess++;
2308         return {EarlierAccess};
2309       }
2310       return None;
2311     }
2312 
2313     // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
2314     // potentially dead.
2315     Cache.KnownNoReads.insert(KnownNoReads.begin(), KnownNoReads.end());
2316     return {EarlierAccess};
2317   }
2318 
2319   // Delete dead memory defs
deleteDeadInstruction__anon56eeda7e0411::DSEState2320   void deleteDeadInstruction(Instruction *SI) {
2321     MemorySSAUpdater Updater(&MSSA);
2322     SmallVector<Instruction *, 32> NowDeadInsts;
2323     NowDeadInsts.push_back(SI);
2324     --NumFastOther;
2325 
2326     while (!NowDeadInsts.empty()) {
2327       Instruction *DeadInst = NowDeadInsts.pop_back_val();
2328       ++NumFastOther;
2329 
2330       // Try to preserve debug information attached to the dead instruction.
2331       salvageDebugInfo(*DeadInst);
2332       salvageKnowledge(DeadInst);
2333 
2334       // Remove the Instruction from MSSA.
2335       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
2336         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
2337           SkipStores.insert(MD);
2338         }
2339         Updater.removeMemoryAccess(MA);
2340       }
2341 
2342       auto I = IOLs.find(DeadInst->getParent());
2343       if (I != IOLs.end())
2344         I->second.erase(DeadInst);
2345       // Remove its operands
2346       for (Use &O : DeadInst->operands())
2347         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
2348           O = nullptr;
2349           if (isInstructionTriviallyDead(OpI, &TLI))
2350             NowDeadInsts.push_back(OpI);
2351         }
2352 
2353       DeadInst->eraseFromParent();
2354     }
2355   }
2356 
2357   // Check for any extra throws between SI and NI that block DSE.  This only
2358   // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
2359   // throw are handled during the walk from one def to the next.
mayThrowBetween__anon56eeda7e0411::DSEState2360   bool mayThrowBetween(Instruction *SI, Instruction *NI,
2361                        const Value *SILocUnd) {
2362     // First see if we can ignore it by using the fact that SI is an
2363     // alloca/alloca like object that is not visible to the caller during
2364     // execution of the function.
2365     if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
2366       return false;
2367 
2368     if (SI->getParent() == NI->getParent())
2369       return ThrowingBlocks.count(SI->getParent());
2370     return !ThrowingBlocks.empty();
2371   }
2372 
2373   // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
2374   // act as barriers:
2375   //  * A memory instruction that may throw and \p SI accesses a non-stack
2376   //  object.
2377   //  * Atomic stores stronger that monotonic.
isDSEBarrier__anon56eeda7e0411::DSEState2378   bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
2379     // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
2380     // like object that does not escape.
2381     if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
2382       return true;
2383 
2384     // If NI is an atomic load/store stronger than monotonic, do not try to
2385     // eliminate/reorder it.
2386     if (NI->isAtomic()) {
2387       if (auto *LI = dyn_cast<LoadInst>(NI))
2388         return isStrongerThanMonotonic(LI->getOrdering());
2389       if (auto *SI = dyn_cast<StoreInst>(NI))
2390         return isStrongerThanMonotonic(SI->getOrdering());
2391       if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
2392         return isStrongerThanMonotonic(ARMW->getOrdering());
2393       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
2394         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
2395                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
2396       llvm_unreachable("other instructions should be skipped in MemorySSA");
2397     }
2398     return false;
2399   }
2400 
2401   /// Eliminate writes to objects that are not visible in the caller and are not
2402   /// accessed before returning from the function.
eliminateDeadWritesAtEndOfFunction__anon56eeda7e0411::DSEState2403   bool eliminateDeadWritesAtEndOfFunction() {
2404     bool MadeChange = false;
2405     LLVM_DEBUG(
2406         dbgs()
2407         << "Trying to eliminate MemoryDefs at the end of the function\n");
2408     for (int I = MemDefs.size() - 1; I >= 0; I--) {
2409       MemoryDef *Def = MemDefs[I];
2410       if (SkipStores.find(Def) != SkipStores.end() ||
2411           !isRemovable(Def->getMemoryInst()))
2412         continue;
2413 
2414       Instruction *DefI = Def->getMemoryInst();
2415       SmallVector<const Value *, 4> Pointers;
2416       auto DefLoc = getLocForWriteEx(DefI);
2417       if (!DefLoc)
2418         continue;
2419 
2420       // NOTE: Currently eliminating writes at the end of a function is limited
2421       // to MemoryDefs with a single underlying object, to save compile-time. In
2422       // practice it appears the case with multiple underlying objects is very
2423       // uncommon. If it turns out to be important, we can use
2424       // getUnderlyingObjects here instead.
2425       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
2426       if (!UO || !isInvisibleToCallerAfterRet(UO))
2427         continue;
2428 
2429       if (isWriteAtEndOfFunction(Def)) {
2430         // See through pointer-to-pointer bitcasts
2431         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
2432                              "of the function\n");
2433         deleteDeadInstruction(DefI);
2434         ++NumFastStores;
2435         MadeChange = true;
2436       }
2437     }
2438     return MadeChange;
2439   }
2440 
2441   /// \returns true if \p Def is a no-op store, either because it
2442   /// directly stores back a loaded value or stores zero to a calloced object.
storeIsNoop__anon56eeda7e0411::DSEState2443   bool storeIsNoop(MemoryDef *Def, MemoryLocation DefLoc, const Value *DefUO) {
2444     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
2445     if (!Store)
2446       return false;
2447 
2448     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
2449       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
2450         // Get the defining access for the load.
2451         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
2452         // Fast path: the defining accesses are the same.
2453         if (LoadAccess == Def->getDefiningAccess())
2454           return true;
2455 
2456         // Look through phi accesses. Recursively scan all phi accesses by
2457         // adding them to a worklist. Bail when we run into a memory def that
2458         // does not match LoadAccess.
2459         SetVector<MemoryAccess *> ToCheck;
2460         MemoryAccess *Current =
2461             MSSA.getWalker()->getClobberingMemoryAccess(Def);
2462         // We don't want to bail when we run into the store memory def. But,
2463         // the phi access may point to it. So, pretend like we've already
2464         // checked it.
2465         ToCheck.insert(Def);
2466         ToCheck.insert(Current);
2467         // Start at current (1) to simulate already having checked Def.
2468         for (unsigned I = 1; I < ToCheck.size(); ++I) {
2469           Current = ToCheck[I];
2470           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
2471             // Check all the operands.
2472             for (auto &Use : PhiAccess->incoming_values())
2473               ToCheck.insert(cast<MemoryAccess>(&Use));
2474             continue;
2475           }
2476 
2477           // If we found a memory def, bail. This happens when we have an
2478           // unrelated write in between an otherwise noop store.
2479           assert(isa<MemoryDef>(Current) &&
2480                  "Only MemoryDefs should reach here.");
2481           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
2482           // We are searching for the definition of the store's destination.
2483           // So, if that is the same definition as the load, then this is a
2484           // noop. Otherwise, fail.
2485           if (LoadAccess != Current)
2486             return false;
2487         }
2488         return true;
2489       }
2490     }
2491 
2492     Constant *StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
2493     if (StoredConstant && StoredConstant->isNullValue()) {
2494       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
2495       if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
2496         auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
2497         // If UnderlyingDef is the clobbering access of Def, no instructions
2498         // between them can modify the memory location.
2499         auto *ClobberDef =
2500             MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
2501         return UnderlyingDef == ClobberDef;
2502       }
2503     }
2504     return false;
2505   }
2506 };
2507 
eliminateDeadStoresMemorySSA(Function & F,AliasAnalysis & AA,MemorySSA & MSSA,DominatorTree & DT,PostDominatorTree & PDT,const TargetLibraryInfo & TLI)2508 bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
2509                                   MemorySSA &MSSA, DominatorTree &DT,
2510                                   PostDominatorTree &PDT,
2511                                   const TargetLibraryInfo &TLI) {
2512   bool MadeChange = false;
2513 
2514   DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
2515   // For each store:
2516   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2517     MemoryDef *KillingDef = State.MemDefs[I];
2518     if (State.SkipStores.count(KillingDef))
2519       continue;
2520     Instruction *SI = KillingDef->getMemoryInst();
2521 
2522     Optional<MemoryLocation> MaybeSILoc;
2523     if (State.isMemTerminatorInst(SI))
2524       MaybeSILoc = State.getLocForTerminator(SI).map(
2525           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
2526     else
2527       MaybeSILoc = State.getLocForWriteEx(SI);
2528 
2529     if (!MaybeSILoc) {
2530       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2531                         << *SI << "\n");
2532       continue;
2533     }
2534     MemoryLocation SILoc = *MaybeSILoc;
2535     assert(SILoc.Ptr && "SILoc should not be null");
2536     const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
2537 
2538     MemoryAccess *Current = KillingDef;
2539     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2540                       << *KillingDef << " (" << *SI << ")\n");
2541 
2542     unsigned ScanLimit = MemorySSAScanLimit;
2543     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2544     unsigned PartialLimit = MemorySSAPartialStoreLimit;
2545     // Worklist of MemoryAccesses that may be killed by KillingDef.
2546     SetVector<MemoryAccess *> ToCheck;
2547 
2548     if (SILocUnd)
2549       ToCheck.insert(KillingDef->getDefiningAccess());
2550 
2551     bool Shortend = false;
2552     bool IsMemTerm = State.isMemTerminatorInst(SI);
2553     DSEState::CheckCache Cache;
2554     // Check if MemoryAccesses in the worklist are killed by KillingDef.
2555     for (unsigned I = 0; I < ToCheck.size(); I++) {
2556       Current = ToCheck[I];
2557       if (State.SkipStores.count(Current))
2558         continue;
2559 
2560       Optional<MemoryAccess *> Next = State.getDomMemoryDef(
2561           KillingDef, Current, SILoc, SILocUnd, Cache, ScanLimit,
2562           WalkerStepLimit, IsMemTerm, PartialLimit);
2563 
2564       if (!Next) {
2565         LLVM_DEBUG(dbgs() << "  finished walk\n");
2566         continue;
2567       }
2568 
2569       MemoryAccess *EarlierAccess = *Next;
2570       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess);
2571       if (isa<MemoryPhi>(EarlierAccess)) {
2572         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
2573         for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
2574           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2575           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2576           BasicBlock *PhiBlock = EarlierAccess->getBlock();
2577 
2578           // We only consider incoming MemoryAccesses that come before the
2579           // MemoryPhi. Otherwise we could discover candidates that do not
2580           // strictly dominate our starting def.
2581           if (State.PostOrderNumbers[IncomingBlock] >
2582               State.PostOrderNumbers[PhiBlock])
2583             ToCheck.insert(IncomingAccess);
2584         }
2585         continue;
2586       }
2587       auto *NextDef = cast<MemoryDef>(EarlierAccess);
2588       Instruction *NI = NextDef->getMemoryInst();
2589       LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
2590       ToCheck.insert(NextDef->getDefiningAccess());
2591       NumGetDomMemoryDefPassed++;
2592 
2593       if (!DebugCounter::shouldExecute(MemorySSACounter))
2594         continue;
2595 
2596       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2597 
2598       if (IsMemTerm) {
2599         const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
2600         if (SILocUnd != NIUnd)
2601           continue;
2602         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2603                           << "\n  KILLER: " << *SI << '\n');
2604         State.deleteDeadInstruction(NI);
2605         ++NumFastStores;
2606         MadeChange = true;
2607       } else {
2608         // Check if NI overwrites SI.
2609         int64_t InstWriteOffset, DepWriteOffset;
2610         OverwriteResult OR =
2611             isOverwrite(SI, NI, SILoc, NILoc, State.DL, TLI, DepWriteOffset,
2612                         InstWriteOffset, State.BatchAA, &F);
2613         if (OR == OW_MaybePartial) {
2614           auto Iter = State.IOLs.insert(
2615               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2616                   NI->getParent(), InstOverlapIntervalsTy()));
2617           auto &IOL = Iter.first->second;
2618           OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
2619                                   NI, IOL);
2620         }
2621 
2622         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2623           auto *Earlier = dyn_cast<StoreInst>(NI);
2624           auto *Later = dyn_cast<StoreInst>(SI);
2625           // We are re-using tryToMergePartialOverlappingStores, which requires
2626           // Earlier to domiante Later.
2627           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2628           if (Earlier && Later && DT.dominates(Earlier, Later)) {
2629             if (Constant *Merged = tryToMergePartialOverlappingStores(
2630                     Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
2631                     State.BatchAA, &DT)) {
2632 
2633               // Update stored value of earlier store to merged constant.
2634               Earlier->setOperand(0, Merged);
2635               ++NumModifiedStores;
2636               MadeChange = true;
2637 
2638               Shortend = true;
2639               // Remove later store and remove any outstanding overlap intervals
2640               // for the updated store.
2641               State.deleteDeadInstruction(Later);
2642               auto I = State.IOLs.find(Earlier->getParent());
2643               if (I != State.IOLs.end())
2644                 I->second.erase(Earlier);
2645               break;
2646             }
2647           }
2648         }
2649 
2650         if (OR == OW_Complete) {
2651           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2652                             << "\n  KILLER: " << *SI << '\n');
2653           State.deleteDeadInstruction(NI);
2654           ++NumFastStores;
2655           MadeChange = true;
2656         }
2657       }
2658     }
2659 
2660     // Check if the store is a no-op.
2661     if (!Shortend && isRemovable(SI) &&
2662         State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2663       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
2664       State.deleteDeadInstruction(SI);
2665       NumRedundantStores++;
2666       MadeChange = true;
2667       continue;
2668     }
2669   }
2670 
2671   if (EnablePartialOverwriteTracking)
2672     for (auto &KV : State.IOLs)
2673       MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2674 
2675   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2676   return MadeChange;
2677 }
2678 } // end anonymous namespace
2679 
2680 //===----------------------------------------------------------------------===//
2681 // DSE Pass
2682 //===----------------------------------------------------------------------===//
run(Function & F,FunctionAnalysisManager & AM)2683 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2684   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2685   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2686   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2687 
2688   bool Changed = false;
2689   if (EnableMemorySSA) {
2690     MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2691     PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2692 
2693     Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2694   } else {
2695     MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
2696 
2697     Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2698   }
2699 
2700 #ifdef LLVM_ENABLE_STATS
2701   if (AreStatisticsEnabled())
2702     for (auto &I : instructions(F))
2703       NumRemainingStores += isa<StoreInst>(&I);
2704 #endif
2705 
2706   if (!Changed)
2707     return PreservedAnalyses::all();
2708 
2709   PreservedAnalyses PA;
2710   PA.preserveSet<CFGAnalyses>();
2711   PA.preserve<GlobalsAA>();
2712   if (EnableMemorySSA)
2713     PA.preserve<MemorySSAAnalysis>();
2714   else
2715     PA.preserve<MemoryDependenceAnalysis>();
2716   return PA;
2717 }
2718 
2719 namespace {
2720 
2721 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2722 class DSELegacyPass : public FunctionPass {
2723 public:
2724   static char ID; // Pass identification, replacement for typeid
2725 
DSELegacyPass()2726   DSELegacyPass() : FunctionPass(ID) {
2727     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2728   }
2729 
runOnFunction(Function & F)2730   bool runOnFunction(Function &F) override {
2731     if (skipFunction(F))
2732       return false;
2733 
2734     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2735     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2736     const TargetLibraryInfo &TLI =
2737         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2738 
2739     bool Changed = false;
2740     if (EnableMemorySSA) {
2741       MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2742       PostDominatorTree &PDT =
2743           getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2744 
2745       Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2746     } else {
2747       MemoryDependenceResults &MD =
2748           getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
2749 
2750       Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2751     }
2752 
2753 #ifdef LLVM_ENABLE_STATS
2754     if (AreStatisticsEnabled())
2755       for (auto &I : instructions(F))
2756         NumRemainingStores += isa<StoreInst>(&I);
2757 #endif
2758 
2759     return Changed;
2760   }
2761 
getAnalysisUsage(AnalysisUsage & AU) const2762   void getAnalysisUsage(AnalysisUsage &AU) const override {
2763     AU.setPreservesCFG();
2764     AU.addRequired<AAResultsWrapperPass>();
2765     AU.addRequired<TargetLibraryInfoWrapperPass>();
2766     AU.addPreserved<GlobalsAAWrapperPass>();
2767     AU.addRequired<DominatorTreeWrapperPass>();
2768     AU.addPreserved<DominatorTreeWrapperPass>();
2769 
2770     if (EnableMemorySSA) {
2771       AU.addRequired<PostDominatorTreeWrapperPass>();
2772       AU.addRequired<MemorySSAWrapperPass>();
2773       AU.addPreserved<PostDominatorTreeWrapperPass>();
2774       AU.addPreserved<MemorySSAWrapperPass>();
2775     } else {
2776       AU.addRequired<MemoryDependenceWrapperPass>();
2777       AU.addPreserved<MemoryDependenceWrapperPass>();
2778     }
2779   }
2780 };
2781 
2782 } // end anonymous namespace
2783 
2784 char DSELegacyPass::ID = 0;
2785 
2786 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2787                       false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)2788 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2789 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2790 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2791 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2792 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2793 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2794 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2795 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2796                     false)
2797 
2798 FunctionPass *llvm::createDeadStoreEliminationPass() {
2799   return new DSELegacyPass();
2800 }
2801