• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This transformation implements the well known scalar replacement of
11 /// aggregates transformation. It tries to identify promotable elements of an
12 /// aggregate alloca, and promote them to registers. It will also try to
13 /// convert uses of an element (or set of elements) of an alloca into a vector
14 /// or bitfield-style integer scalar if appropriate.
15 ///
16 /// It works to do this with minimal slicing of the alloca so that regions
17 /// which are merely transferred in and out of external memory remain unchanged
18 /// and are not decomposed to scalar code.
19 ///
20 /// Because this also performs alloca promotion, it can be thought of as also
21 /// serving the purpose of SSA formation. The algorithm iterates on the
22 /// function until all opportunities for promotion have been realized.
23 ///
24 //===----------------------------------------------------------------------===//
25 
26 #include "llvm/Transforms/Scalar.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/PtrUseVisitor.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DIBuilder.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DebugInfo.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InstVisitor.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/TimeValue.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
57 #include "llvm/Transforms/Utils/SSAUpdater.h"
58 
59 #if __cplusplus >= 201103L && !defined(NDEBUG)
60 // We only use this for a debug check in C++11
61 #include <random>
62 #endif
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "sroa"
67 
68 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
69 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
70 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
71 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
72 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
73 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
74 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
75 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
76 STATISTIC(NumDeleted, "Number of instructions deleted");
77 STATISTIC(NumVectorized, "Number of vectorized aggregates");
78 
79 /// Hidden option to force the pass to not use DomTree and mem2reg, instead
80 /// forming SSA values through the SSAUpdater infrastructure.
81 static cl::opt<bool>
82 ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
83 
84 /// Hidden option to enable randomly shuffling the slices to help uncover
85 /// instability in their order.
86 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
87                                              cl::init(false), cl::Hidden);
88 
89 /// Hidden option to experiment with completely strict handling of inbounds
90 /// GEPs.
91 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds",
92                                         cl::init(false), cl::Hidden);
93 
94 namespace {
95 /// \brief A custom IRBuilder inserter which prefixes all names if they are
96 /// preserved.
97 template <bool preserveNames = true>
98 class IRBuilderPrefixedInserter :
99     public IRBuilderDefaultInserter<preserveNames> {
100   std::string Prefix;
101 
102 public:
SetNamePrefix(const Twine & P)103   void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
104 
105 protected:
InsertHelper(Instruction * I,const Twine & Name,BasicBlock * BB,BasicBlock::iterator InsertPt) const106   void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
107                     BasicBlock::iterator InsertPt) const {
108     IRBuilderDefaultInserter<preserveNames>::InsertHelper(
109         I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
110   }
111 };
112 
113 // Specialization for not preserving the name is trivial.
114 template <>
115 class IRBuilderPrefixedInserter<false> :
116     public IRBuilderDefaultInserter<false> {
117 public:
SetNamePrefix(const Twine & P)118   void SetNamePrefix(const Twine &P) {}
119 };
120 
121 /// \brief Provide a typedef for IRBuilder that drops names in release builds.
122 #ifndef NDEBUG
123 typedef llvm::IRBuilder<true, ConstantFolder,
124                         IRBuilderPrefixedInserter<true> > IRBuilderTy;
125 #else
126 typedef llvm::IRBuilder<false, ConstantFolder,
127                         IRBuilderPrefixedInserter<false> > IRBuilderTy;
128 #endif
129 }
130 
131 namespace {
132 /// \brief A used slice of an alloca.
133 ///
134 /// This structure represents a slice of an alloca used by some instruction. It
135 /// stores both the begin and end offsets of this use, a pointer to the use
136 /// itself, and a flag indicating whether we can classify the use as splittable
137 /// or not when forming partitions of the alloca.
138 class Slice {
139   /// \brief The beginning offset of the range.
140   uint64_t BeginOffset;
141 
142   /// \brief The ending offset, not included in the range.
143   uint64_t EndOffset;
144 
145   /// \brief Storage for both the use of this slice and whether it can be
146   /// split.
147   PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
148 
149 public:
Slice()150   Slice() : BeginOffset(), EndOffset() {}
Slice(uint64_t BeginOffset,uint64_t EndOffset,Use * U,bool IsSplittable)151   Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
152       : BeginOffset(BeginOffset), EndOffset(EndOffset),
153         UseAndIsSplittable(U, IsSplittable) {}
154 
beginOffset() const155   uint64_t beginOffset() const { return BeginOffset; }
endOffset() const156   uint64_t endOffset() const { return EndOffset; }
157 
isSplittable() const158   bool isSplittable() const { return UseAndIsSplittable.getInt(); }
makeUnsplittable()159   void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
160 
getUse() const161   Use *getUse() const { return UseAndIsSplittable.getPointer(); }
162 
isDead() const163   bool isDead() const { return getUse() == nullptr; }
kill()164   void kill() { UseAndIsSplittable.setPointer(nullptr); }
165 
166   /// \brief Support for ordering ranges.
167   ///
168   /// This provides an ordering over ranges such that start offsets are
169   /// always increasing, and within equal start offsets, the end offsets are
170   /// decreasing. Thus the spanning range comes first in a cluster with the
171   /// same start position.
operator <(const Slice & RHS) const172   bool operator<(const Slice &RHS) const {
173     if (beginOffset() < RHS.beginOffset()) return true;
174     if (beginOffset() > RHS.beginOffset()) return false;
175     if (isSplittable() != RHS.isSplittable()) return !isSplittable();
176     if (endOffset() > RHS.endOffset()) return true;
177     return false;
178   }
179 
180   /// \brief Support comparison with a single offset to allow binary searches.
operator <(const Slice & LHS,uint64_t RHSOffset)181   friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
182                                               uint64_t RHSOffset) {
183     return LHS.beginOffset() < RHSOffset;
184   }
operator <(uint64_t LHSOffset,const Slice & RHS)185   friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
186                                               const Slice &RHS) {
187     return LHSOffset < RHS.beginOffset();
188   }
189 
operator ==(const Slice & RHS) const190   bool operator==(const Slice &RHS) const {
191     return isSplittable() == RHS.isSplittable() &&
192            beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
193   }
operator !=(const Slice & RHS) const194   bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
195 };
196 } // end anonymous namespace
197 
198 namespace llvm {
199 template <typename T> struct isPodLike;
200 template <> struct isPodLike<Slice> {
201    static const bool value = true;
202 };
203 }
204 
205 namespace {
206 /// \brief Representation of the alloca slices.
207 ///
208 /// This class represents the slices of an alloca which are formed by its
209 /// various uses. If a pointer escapes, we can't fully build a representation
210 /// for the slices used and we reflect that in this structure. The uses are
211 /// stored, sorted by increasing beginning offset and with unsplittable slices
212 /// starting at a particular offset before splittable slices.
213 class AllocaSlices {
214 public:
215   /// \brief Construct the slices of a particular alloca.
216   AllocaSlices(const DataLayout &DL, AllocaInst &AI);
217 
218   /// \brief Test whether a pointer to the allocation escapes our analysis.
219   ///
220   /// If this is true, the slices are never fully built and should be
221   /// ignored.
isEscaped() const222   bool isEscaped() const { return PointerEscapingInstr; }
223 
224   /// \brief Support for iterating over the slices.
225   /// @{
226   typedef SmallVectorImpl<Slice>::iterator iterator;
begin()227   iterator begin() { return Slices.begin(); }
end()228   iterator end() { return Slices.end(); }
229 
230   typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
begin() const231   const_iterator begin() const { return Slices.begin(); }
end() const232   const_iterator end() const { return Slices.end(); }
233   /// @}
234 
235   /// \brief Allow iterating the dead users for this alloca.
236   ///
237   /// These are instructions which will never actually use the alloca as they
238   /// are outside the allocated range. They are safe to replace with undef and
239   /// delete.
240   /// @{
241   typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
dead_user_begin() const242   dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
dead_user_end() const243   dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
244   /// @}
245 
246   /// \brief Allow iterating the dead expressions referring to this alloca.
247   ///
248   /// These are operands which have cannot actually be used to refer to the
249   /// alloca as they are outside its range and the user doesn't correct for
250   /// that. These mostly consist of PHI node inputs and the like which we just
251   /// need to replace with undef.
252   /// @{
253   typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
dead_op_begin() const254   dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
dead_op_end() const255   dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
256   /// @}
257 
258 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
259   void print(raw_ostream &OS, const_iterator I, StringRef Indent = "  ") const;
260   void printSlice(raw_ostream &OS, const_iterator I,
261                   StringRef Indent = "  ") const;
262   void printUse(raw_ostream &OS, const_iterator I,
263                 StringRef Indent = "  ") const;
264   void print(raw_ostream &OS) const;
265   void dump(const_iterator I) const;
266   void dump() const;
267 #endif
268 
269 private:
270   template <typename DerivedT, typename RetT = void> class BuilderBase;
271   class SliceBuilder;
272   friend class AllocaSlices::SliceBuilder;
273 
274 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
275   /// \brief Handle to alloca instruction to simplify method interfaces.
276   AllocaInst &AI;
277 #endif
278 
279   /// \brief The instruction responsible for this alloca not having a known set
280   /// of slices.
281   ///
282   /// When an instruction (potentially) escapes the pointer to the alloca, we
283   /// store a pointer to that here and abort trying to form slices of the
284   /// alloca. This will be null if the alloca slices are analyzed successfully.
285   Instruction *PointerEscapingInstr;
286 
287   /// \brief The slices of the alloca.
288   ///
289   /// We store a vector of the slices formed by uses of the alloca here. This
290   /// vector is sorted by increasing begin offset, and then the unsplittable
291   /// slices before the splittable ones. See the Slice inner class for more
292   /// details.
293   SmallVector<Slice, 8> Slices;
294 
295   /// \brief Instructions which will become dead if we rewrite the alloca.
296   ///
297   /// Note that these are not separated by slice. This is because we expect an
298   /// alloca to be completely rewritten or not rewritten at all. If rewritten,
299   /// all these instructions can simply be removed and replaced with undef as
300   /// they come from outside of the allocated space.
301   SmallVector<Instruction *, 8> DeadUsers;
302 
303   /// \brief Operands which will become dead if we rewrite the alloca.
304   ///
305   /// These are operands that in their particular use can be replaced with
306   /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
307   /// to PHI nodes and the like. They aren't entirely dead (there might be
308   /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
309   /// want to swap this particular input for undef to simplify the use lists of
310   /// the alloca.
311   SmallVector<Use *, 8> DeadOperands;
312 };
313 }
314 
foldSelectInst(SelectInst & SI)315 static Value *foldSelectInst(SelectInst &SI) {
316   // If the condition being selected on is a constant or the same value is
317   // being selected between, fold the select. Yes this does (rarely) happen
318   // early on.
319   if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
320     return SI.getOperand(1+CI->isZero());
321   if (SI.getOperand(1) == SI.getOperand(2))
322     return SI.getOperand(1);
323 
324   return nullptr;
325 }
326 
327 /// \brief Builder for the alloca slices.
328 ///
329 /// This class builds a set of alloca slices by recursively visiting the uses
330 /// of an alloca and making a slice for each load and store at each offset.
331 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
332   friend class PtrUseVisitor<SliceBuilder>;
333   friend class InstVisitor<SliceBuilder>;
334   typedef PtrUseVisitor<SliceBuilder> Base;
335 
336   const uint64_t AllocSize;
337   AllocaSlices &S;
338 
339   SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
340   SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
341 
342   /// \brief Set to de-duplicate dead instructions found in the use walk.
343   SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
344 
345 public:
SliceBuilder(const DataLayout & DL,AllocaInst & AI,AllocaSlices & S)346   SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
347       : PtrUseVisitor<SliceBuilder>(DL),
348         AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {}
349 
350 private:
markAsDead(Instruction & I)351   void markAsDead(Instruction &I) {
352     if (VisitedDeadInsts.insert(&I))
353       S.DeadUsers.push_back(&I);
354   }
355 
insertUse(Instruction & I,const APInt & Offset,uint64_t Size,bool IsSplittable=false)356   void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
357                  bool IsSplittable = false) {
358     // Completely skip uses which have a zero size or start either before or
359     // past the end of the allocation.
360     if (Size == 0 || Offset.uge(AllocSize)) {
361       DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
362                    << " which has zero size or starts outside of the "
363                    << AllocSize << " byte alloca:\n"
364                    << "    alloca: " << S.AI << "\n"
365                    << "       use: " << I << "\n");
366       return markAsDead(I);
367     }
368 
369     uint64_t BeginOffset = Offset.getZExtValue();
370     uint64_t EndOffset = BeginOffset + Size;
371 
372     // Clamp the end offset to the end of the allocation. Note that this is
373     // formulated to handle even the case where "BeginOffset + Size" overflows.
374     // This may appear superficially to be something we could ignore entirely,
375     // but that is not so! There may be widened loads or PHI-node uses where
376     // some instructions are dead but not others. We can't completely ignore
377     // them, and so have to record at least the information here.
378     assert(AllocSize >= BeginOffset); // Established above.
379     if (Size > AllocSize - BeginOffset) {
380       DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
381                    << " to remain within the " << AllocSize << " byte alloca:\n"
382                    << "    alloca: " << S.AI << "\n"
383                    << "       use: " << I << "\n");
384       EndOffset = AllocSize;
385     }
386 
387     S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
388   }
389 
visitBitCastInst(BitCastInst & BC)390   void visitBitCastInst(BitCastInst &BC) {
391     if (BC.use_empty())
392       return markAsDead(BC);
393 
394     return Base::visitBitCastInst(BC);
395   }
396 
visitGetElementPtrInst(GetElementPtrInst & GEPI)397   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
398     if (GEPI.use_empty())
399       return markAsDead(GEPI);
400 
401     if (SROAStrictInbounds && GEPI.isInBounds()) {
402       // FIXME: This is a manually un-factored variant of the basic code inside
403       // of GEPs with checking of the inbounds invariant specified in the
404       // langref in a very strict sense. If we ever want to enable
405       // SROAStrictInbounds, this code should be factored cleanly into
406       // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
407       // by writing out the code here where we have tho underlying allocation
408       // size readily available.
409       APInt GEPOffset = Offset;
410       for (gep_type_iterator GTI = gep_type_begin(GEPI),
411                              GTE = gep_type_end(GEPI);
412            GTI != GTE; ++GTI) {
413         ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
414         if (!OpC)
415           break;
416 
417         // Handle a struct index, which adds its field offset to the pointer.
418         if (StructType *STy = dyn_cast<StructType>(*GTI)) {
419           unsigned ElementIdx = OpC->getZExtValue();
420           const StructLayout *SL = DL.getStructLayout(STy);
421           GEPOffset +=
422               APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
423         } else {
424           // For array or vector indices, scale the index by the size of the type.
425           APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
426           GEPOffset += Index * APInt(Offset.getBitWidth(),
427                                      DL.getTypeAllocSize(GTI.getIndexedType()));
428         }
429 
430         // If this index has computed an intermediate pointer which is not
431         // inbounds, then the result of the GEP is a poison value and we can
432         // delete it and all uses.
433         if (GEPOffset.ugt(AllocSize))
434           return markAsDead(GEPI);
435       }
436     }
437 
438     return Base::visitGetElementPtrInst(GEPI);
439   }
440 
handleLoadOrStore(Type * Ty,Instruction & I,const APInt & Offset,uint64_t Size,bool IsVolatile)441   void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
442                          uint64_t Size, bool IsVolatile) {
443     // We allow splitting of loads and stores where the type is an integer type
444     // and cover the entire alloca. This prevents us from splitting over
445     // eagerly.
446     // FIXME: In the great blue eventually, we should eagerly split all integer
447     // loads and stores, and then have a separate step that merges adjacent
448     // alloca partitions into a single partition suitable for integer widening.
449     // Or we should skip the merge step and rely on GVN and other passes to
450     // merge adjacent loads and stores that survive mem2reg.
451     bool IsSplittable =
452         Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
453 
454     insertUse(I, Offset, Size, IsSplittable);
455   }
456 
visitLoadInst(LoadInst & LI)457   void visitLoadInst(LoadInst &LI) {
458     assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
459            "All simple FCA loads should have been pre-split");
460 
461     if (!IsOffsetKnown)
462       return PI.setAborted(&LI);
463 
464     uint64_t Size = DL.getTypeStoreSize(LI.getType());
465     return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
466   }
467 
visitStoreInst(StoreInst & SI)468   void visitStoreInst(StoreInst &SI) {
469     Value *ValOp = SI.getValueOperand();
470     if (ValOp == *U)
471       return PI.setEscapedAndAborted(&SI);
472     if (!IsOffsetKnown)
473       return PI.setAborted(&SI);
474 
475     uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
476 
477     // If this memory access can be shown to *statically* extend outside the
478     // bounds of of the allocation, it's behavior is undefined, so simply
479     // ignore it. Note that this is more strict than the generic clamping
480     // behavior of insertUse. We also try to handle cases which might run the
481     // risk of overflow.
482     // FIXME: We should instead consider the pointer to have escaped if this
483     // function is being instrumented for addressing bugs or race conditions.
484     if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
485       DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
486                    << " which extends past the end of the " << AllocSize
487                    << " byte alloca:\n"
488                    << "    alloca: " << S.AI << "\n"
489                    << "       use: " << SI << "\n");
490       return markAsDead(SI);
491     }
492 
493     assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
494            "All simple FCA stores should have been pre-split");
495     handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
496   }
497 
498 
visitMemSetInst(MemSetInst & II)499   void visitMemSetInst(MemSetInst &II) {
500     assert(II.getRawDest() == *U && "Pointer use is not the destination?");
501     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
502     if ((Length && Length->getValue() == 0) ||
503         (IsOffsetKnown && Offset.uge(AllocSize)))
504       // Zero-length mem transfer intrinsics can be ignored entirely.
505       return markAsDead(II);
506 
507     if (!IsOffsetKnown)
508       return PI.setAborted(&II);
509 
510     insertUse(II, Offset,
511               Length ? Length->getLimitedValue()
512                      : AllocSize - Offset.getLimitedValue(),
513               (bool)Length);
514   }
515 
visitMemTransferInst(MemTransferInst & II)516   void visitMemTransferInst(MemTransferInst &II) {
517     ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
518     if (Length && Length->getValue() == 0)
519       // Zero-length mem transfer intrinsics can be ignored entirely.
520       return markAsDead(II);
521 
522     // Because we can visit these intrinsics twice, also check to see if the
523     // first time marked this instruction as dead. If so, skip it.
524     if (VisitedDeadInsts.count(&II))
525       return;
526 
527     if (!IsOffsetKnown)
528       return PI.setAborted(&II);
529 
530     // This side of the transfer is completely out-of-bounds, and so we can
531     // nuke the entire transfer. However, we also need to nuke the other side
532     // if already added to our partitions.
533     // FIXME: Yet another place we really should bypass this when
534     // instrumenting for ASan.
535     if (Offset.uge(AllocSize)) {
536       SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II);
537       if (MTPI != MemTransferSliceMap.end())
538         S.Slices[MTPI->second].kill();
539       return markAsDead(II);
540     }
541 
542     uint64_t RawOffset = Offset.getLimitedValue();
543     uint64_t Size = Length ? Length->getLimitedValue()
544                            : AllocSize - RawOffset;
545 
546     // Check for the special case where the same exact value is used for both
547     // source and dest.
548     if (*U == II.getRawDest() && *U == II.getRawSource()) {
549       // For non-volatile transfers this is a no-op.
550       if (!II.isVolatile())
551         return markAsDead(II);
552 
553       return insertUse(II, Offset, Size, /*IsSplittable=*/false);
554     }
555 
556     // If we have seen both source and destination for a mem transfer, then
557     // they both point to the same alloca.
558     bool Inserted;
559     SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
560     std::tie(MTPI, Inserted) =
561         MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
562     unsigned PrevIdx = MTPI->second;
563     if (!Inserted) {
564       Slice &PrevP = S.Slices[PrevIdx];
565 
566       // Check if the begin offsets match and this is a non-volatile transfer.
567       // In that case, we can completely elide the transfer.
568       if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
569         PrevP.kill();
570         return markAsDead(II);
571       }
572 
573       // Otherwise we have an offset transfer within the same alloca. We can't
574       // split those.
575       PrevP.makeUnsplittable();
576     }
577 
578     // Insert the use now that we've fixed up the splittable nature.
579     insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
580 
581     // Check that we ended up with a valid index in the map.
582     assert(S.Slices[PrevIdx].getUse()->getUser() == &II &&
583            "Map index doesn't point back to a slice with this user.");
584   }
585 
586   // Disable SRoA for any intrinsics except for lifetime invariants.
587   // FIXME: What about debug intrinsics? This matches old behavior, but
588   // doesn't make sense.
visitIntrinsicInst(IntrinsicInst & II)589   void visitIntrinsicInst(IntrinsicInst &II) {
590     if (!IsOffsetKnown)
591       return PI.setAborted(&II);
592 
593     if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
594         II.getIntrinsicID() == Intrinsic::lifetime_end) {
595       ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
596       uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
597                                Length->getLimitedValue());
598       insertUse(II, Offset, Size, true);
599       return;
600     }
601 
602     Base::visitIntrinsicInst(II);
603   }
604 
hasUnsafePHIOrSelectUse(Instruction * Root,uint64_t & Size)605   Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
606     // We consider any PHI or select that results in a direct load or store of
607     // the same offset to be a viable use for slicing purposes. These uses
608     // are considered unsplittable and the size is the maximum loaded or stored
609     // size.
610     SmallPtrSet<Instruction *, 4> Visited;
611     SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
612     Visited.insert(Root);
613     Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
614     // If there are no loads or stores, the access is dead. We mark that as
615     // a size zero access.
616     Size = 0;
617     do {
618       Instruction *I, *UsedI;
619       std::tie(UsedI, I) = Uses.pop_back_val();
620 
621       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
622         Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
623         continue;
624       }
625       if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
626         Value *Op = SI->getOperand(0);
627         if (Op == UsedI)
628           return SI;
629         Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
630         continue;
631       }
632 
633       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
634         if (!GEP->hasAllZeroIndices())
635           return GEP;
636       } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
637                  !isa<SelectInst>(I)) {
638         return I;
639       }
640 
641       for (User *U : I->users())
642         if (Visited.insert(cast<Instruction>(U)))
643           Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
644     } while (!Uses.empty());
645 
646     return nullptr;
647   }
648 
visitPHINode(PHINode & PN)649   void visitPHINode(PHINode &PN) {
650     if (PN.use_empty())
651       return markAsDead(PN);
652     if (!IsOffsetKnown)
653       return PI.setAborted(&PN);
654 
655     // See if we already have computed info on this node.
656     uint64_t &PHISize = PHIOrSelectSizes[&PN];
657     if (!PHISize) {
658       // This is a new PHI node, check for an unsafe use of the PHI node.
659       if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize))
660         return PI.setAborted(UnsafeI);
661     }
662 
663     // For PHI and select operands outside the alloca, we can't nuke the entire
664     // phi or select -- the other side might still be relevant, so we special
665     // case them here and use a separate structure to track the operands
666     // themselves which should be replaced with undef.
667     // FIXME: This should instead be escaped in the event we're instrumenting
668     // for address sanitization.
669     if (Offset.uge(AllocSize)) {
670       S.DeadOperands.push_back(U);
671       return;
672     }
673 
674     insertUse(PN, Offset, PHISize);
675   }
676 
visitSelectInst(SelectInst & SI)677   void visitSelectInst(SelectInst &SI) {
678     if (SI.use_empty())
679       return markAsDead(SI);
680     if (Value *Result = foldSelectInst(SI)) {
681       if (Result == *U)
682         // If the result of the constant fold will be the pointer, recurse
683         // through the select as if we had RAUW'ed it.
684         enqueueUsers(SI);
685       else
686         // Otherwise the operand to the select is dead, and we can replace it
687         // with undef.
688         S.DeadOperands.push_back(U);
689 
690       return;
691     }
692     if (!IsOffsetKnown)
693       return PI.setAborted(&SI);
694 
695     // See if we already have computed info on this node.
696     uint64_t &SelectSize = PHIOrSelectSizes[&SI];
697     if (!SelectSize) {
698       // This is a new Select, check for an unsafe use of it.
699       if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize))
700         return PI.setAborted(UnsafeI);
701     }
702 
703     // For PHI and select operands outside the alloca, we can't nuke the entire
704     // phi or select -- the other side might still be relevant, so we special
705     // case them here and use a separate structure to track the operands
706     // themselves which should be replaced with undef.
707     // FIXME: This should instead be escaped in the event we're instrumenting
708     // for address sanitization.
709     if (Offset.uge(AllocSize)) {
710       S.DeadOperands.push_back(U);
711       return;
712     }
713 
714     insertUse(SI, Offset, SelectSize);
715   }
716 
717   /// \brief Disable SROA entirely if there are unhandled users of the alloca.
visitInstruction(Instruction & I)718   void visitInstruction(Instruction &I) {
719     PI.setAborted(&I);
720   }
721 };
722 
AllocaSlices(const DataLayout & DL,AllocaInst & AI)723 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
724     :
725 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
726       AI(AI),
727 #endif
728       PointerEscapingInstr(nullptr) {
729   SliceBuilder PB(DL, AI, *this);
730   SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
731   if (PtrI.isEscaped() || PtrI.isAborted()) {
732     // FIXME: We should sink the escape vs. abort info into the caller nicely,
733     // possibly by just storing the PtrInfo in the AllocaSlices.
734     PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
735                                                   : PtrI.getAbortingInst();
736     assert(PointerEscapingInstr && "Did not track a bad instruction");
737     return;
738   }
739 
740   Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
741                               std::mem_fun_ref(&Slice::isDead)),
742                Slices.end());
743 
744 #if __cplusplus >= 201103L && !defined(NDEBUG)
745   if (SROARandomShuffleSlices) {
746     std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec()));
747     std::shuffle(Slices.begin(), Slices.end(), MT);
748   }
749 #endif
750 
751   // Sort the uses. This arranges for the offsets to be in ascending order,
752   // and the sizes to be in descending order.
753   std::sort(Slices.begin(), Slices.end());
754 }
755 
756 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
757 
print(raw_ostream & OS,const_iterator I,StringRef Indent) const758 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
759                          StringRef Indent) const {
760   printSlice(OS, I, Indent);
761   printUse(OS, I, Indent);
762 }
763 
printSlice(raw_ostream & OS,const_iterator I,StringRef Indent) const764 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
765                               StringRef Indent) const {
766   OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
767      << " slice #" << (I - begin())
768      << (I->isSplittable() ? " (splittable)" : "") << "\n";
769 }
770 
printUse(raw_ostream & OS,const_iterator I,StringRef Indent) const771 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
772                             StringRef Indent) const {
773   OS << Indent << "  used by: " << *I->getUse()->getUser() << "\n";
774 }
775 
print(raw_ostream & OS) const776 void AllocaSlices::print(raw_ostream &OS) const {
777   if (PointerEscapingInstr) {
778     OS << "Can't analyze slices for alloca: " << AI << "\n"
779        << "  A pointer to this alloca escaped by:\n"
780        << "  " << *PointerEscapingInstr << "\n";
781     return;
782   }
783 
784   OS << "Slices of alloca: " << AI << "\n";
785   for (const_iterator I = begin(), E = end(); I != E; ++I)
786     print(OS, I);
787 }
788 
dump(const_iterator I) const789 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
790   print(dbgs(), I);
791 }
dump() const792 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
793 
794 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
795 
796 namespace {
797 /// \brief Implementation of LoadAndStorePromoter for promoting allocas.
798 ///
799 /// This subclass of LoadAndStorePromoter adds overrides to handle promoting
800 /// the loads and stores of an alloca instruction, as well as updating its
801 /// debug information. This is used when a domtree is unavailable and thus
802 /// mem2reg in its full form can't be used to handle promotion of allocas to
803 /// scalar values.
804 class AllocaPromoter : public LoadAndStorePromoter {
805   AllocaInst &AI;
806   DIBuilder &DIB;
807 
808   SmallVector<DbgDeclareInst *, 4> DDIs;
809   SmallVector<DbgValueInst *, 4> DVIs;
810 
811 public:
AllocaPromoter(const SmallVectorImpl<Instruction * > & Insts,SSAUpdater & S,AllocaInst & AI,DIBuilder & DIB)812   AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
813                  AllocaInst &AI, DIBuilder &DIB)
814       : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
815 
run(const SmallVectorImpl<Instruction * > & Insts)816   void run(const SmallVectorImpl<Instruction*> &Insts) {
817     // Retain the debug information attached to the alloca for use when
818     // rewriting loads and stores.
819     if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
820       for (User *U : DebugNode->users())
821         if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
822           DDIs.push_back(DDI);
823         else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
824           DVIs.push_back(DVI);
825     }
826 
827     LoadAndStorePromoter::run(Insts);
828 
829     // While we have the debug information, clear it off of the alloca. The
830     // caller takes care of deleting the alloca.
831     while (!DDIs.empty())
832       DDIs.pop_back_val()->eraseFromParent();
833     while (!DVIs.empty())
834       DVIs.pop_back_val()->eraseFromParent();
835   }
836 
isInstInList(Instruction * I,const SmallVectorImpl<Instruction * > & Insts) const837   bool isInstInList(Instruction *I,
838                     const SmallVectorImpl<Instruction*> &Insts) const override {
839     Value *Ptr;
840     if (LoadInst *LI = dyn_cast<LoadInst>(I))
841       Ptr = LI->getOperand(0);
842     else
843       Ptr = cast<StoreInst>(I)->getPointerOperand();
844 
845     // Only used to detect cycles, which will be rare and quickly found as
846     // we're walking up a chain of defs rather than down through uses.
847     SmallPtrSet<Value *, 4> Visited;
848 
849     do {
850       if (Ptr == &AI)
851         return true;
852 
853       if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr))
854         Ptr = BCI->getOperand(0);
855       else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
856         Ptr = GEPI->getPointerOperand();
857       else
858         return false;
859 
860     } while (Visited.insert(Ptr));
861 
862     return false;
863   }
864 
updateDebugInfo(Instruction * Inst) const865   void updateDebugInfo(Instruction *Inst) const override {
866     for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
867            E = DDIs.end(); I != E; ++I) {
868       DbgDeclareInst *DDI = *I;
869       if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
870         ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
871       else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
872         ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
873     }
874     for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
875            E = DVIs.end(); I != E; ++I) {
876       DbgValueInst *DVI = *I;
877       Value *Arg = nullptr;
878       if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
879         // If an argument is zero extended then use argument directly. The ZExt
880         // may be zapped by an optimization pass in future.
881         if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
882           Arg = dyn_cast<Argument>(ZExt->getOperand(0));
883         else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
884           Arg = dyn_cast<Argument>(SExt->getOperand(0));
885         if (!Arg)
886           Arg = SI->getValueOperand();
887       } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
888         Arg = LI->getPointerOperand();
889       } else {
890         continue;
891       }
892       Instruction *DbgVal =
893         DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
894                                      Inst);
895       DbgVal->setDebugLoc(DVI->getDebugLoc());
896     }
897   }
898 };
899 } // end anon namespace
900 
901 
902 namespace {
903 /// \brief An optimization pass providing Scalar Replacement of Aggregates.
904 ///
905 /// This pass takes allocations which can be completely analyzed (that is, they
906 /// don't escape) and tries to turn them into scalar SSA values. There are
907 /// a few steps to this process.
908 ///
909 /// 1) It takes allocations of aggregates and analyzes the ways in which they
910 ///    are used to try to split them into smaller allocations, ideally of
911 ///    a single scalar data type. It will split up memcpy and memset accesses
912 ///    as necessary and try to isolate individual scalar accesses.
913 /// 2) It will transform accesses into forms which are suitable for SSA value
914 ///    promotion. This can be replacing a memset with a scalar store of an
915 ///    integer value, or it can involve speculating operations on a PHI or
916 ///    select to be a PHI or select of the results.
917 /// 3) Finally, this will try to detect a pattern of accesses which map cleanly
918 ///    onto insert and extract operations on a vector value, and convert them to
919 ///    this form. By doing so, it will enable promotion of vector aggregates to
920 ///    SSA vector values.
921 class SROA : public FunctionPass {
922   const bool RequiresDomTree;
923 
924   LLVMContext *C;
925   const DataLayout *DL;
926   DominatorTree *DT;
927 
928   /// \brief Worklist of alloca instructions to simplify.
929   ///
930   /// Each alloca in the function is added to this. Each new alloca formed gets
931   /// added to it as well to recursively simplify unless that alloca can be
932   /// directly promoted. Finally, each time we rewrite a use of an alloca other
933   /// the one being actively rewritten, we add it back onto the list if not
934   /// already present to ensure it is re-visited.
935   SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
936 
937   /// \brief A collection of instructions to delete.
938   /// We try to batch deletions to simplify code and make things a bit more
939   /// efficient.
940   SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts;
941 
942   /// \brief Post-promotion worklist.
943   ///
944   /// Sometimes we discover an alloca which has a high probability of becoming
945   /// viable for SROA after a round of promotion takes place. In those cases,
946   /// the alloca is enqueued here for re-processing.
947   ///
948   /// Note that we have to be very careful to clear allocas out of this list in
949   /// the event they are deleted.
950   SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
951 
952   /// \brief A collection of alloca instructions we can directly promote.
953   std::vector<AllocaInst *> PromotableAllocas;
954 
955   /// \brief A worklist of PHIs to speculate prior to promoting allocas.
956   ///
957   /// All of these PHIs have been checked for the safety of speculation and by
958   /// being speculated will allow promoting allocas currently in the promotable
959   /// queue.
960   SetVector<PHINode *, SmallVector<PHINode *, 2> > SpeculatablePHIs;
961 
962   /// \brief A worklist of select instructions to speculate prior to promoting
963   /// allocas.
964   ///
965   /// All of these select instructions have been checked for the safety of
966   /// speculation and by being speculated will allow promoting allocas
967   /// currently in the promotable queue.
968   SetVector<SelectInst *, SmallVector<SelectInst *, 2> > SpeculatableSelects;
969 
970 public:
SROA(bool RequiresDomTree=true)971   SROA(bool RequiresDomTree = true)
972       : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
973         C(nullptr), DL(nullptr), DT(nullptr) {
974     initializeSROAPass(*PassRegistry::getPassRegistry());
975   }
976   bool runOnFunction(Function &F) override;
977   void getAnalysisUsage(AnalysisUsage &AU) const override;
978 
getPassName() const979   const char *getPassName() const override { return "SROA"; }
980   static char ID;
981 
982 private:
983   friend class PHIOrSelectSpeculator;
984   friend class AllocaSliceRewriter;
985 
986   bool rewritePartition(AllocaInst &AI, AllocaSlices &S,
987                         AllocaSlices::iterator B, AllocaSlices::iterator E,
988                         int64_t BeginOffset, int64_t EndOffset,
989                         ArrayRef<AllocaSlices::iterator> SplitUses);
990   bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
991   bool runOnAlloca(AllocaInst &AI);
992   void clobberUse(Use &U);
993   void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
994   bool promoteAllocas(Function &F);
995 };
996 }
997 
998 char SROA::ID = 0;
999 
createSROAPass(bool RequiresDomTree)1000 FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
1001   return new SROA(RequiresDomTree);
1002 }
1003 
1004 INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
1005                       false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)1006 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1007 INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
1008                     false, false)
1009 
1010 /// Walk the range of a partitioning looking for a common type to cover this
1011 /// sequence of slices.
1012 static Type *findCommonType(AllocaSlices::const_iterator B,
1013                             AllocaSlices::const_iterator E,
1014                             uint64_t EndOffset) {
1015   Type *Ty = nullptr;
1016   bool TyIsCommon = true;
1017   IntegerType *ITy = nullptr;
1018 
1019   // Note that we need to look at *every* alloca slice's Use to ensure we
1020   // always get consistent results regardless of the order of slices.
1021   for (AllocaSlices::const_iterator I = B; I != E; ++I) {
1022     Use *U = I->getUse();
1023     if (isa<IntrinsicInst>(*U->getUser()))
1024       continue;
1025     if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
1026       continue;
1027 
1028     Type *UserTy = nullptr;
1029     if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1030       UserTy = LI->getType();
1031     } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1032       UserTy = SI->getValueOperand()->getType();
1033     }
1034 
1035     if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
1036       // If the type is larger than the partition, skip it. We only encounter
1037       // this for split integer operations where we want to use the type of the
1038       // entity causing the split. Also skip if the type is not a byte width
1039       // multiple.
1040       if (UserITy->getBitWidth() % 8 != 0 ||
1041           UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
1042         continue;
1043 
1044       // Track the largest bitwidth integer type used in this way in case there
1045       // is no common type.
1046       if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
1047         ITy = UserITy;
1048     }
1049 
1050     // To avoid depending on the order of slices, Ty and TyIsCommon must not
1051     // depend on types skipped above.
1052     if (!UserTy || (Ty && Ty != UserTy))
1053       TyIsCommon = false; // Give up on anything but an iN type.
1054     else
1055       Ty = UserTy;
1056   }
1057 
1058   return TyIsCommon ? Ty : ITy;
1059 }
1060 
1061 /// PHI instructions that use an alloca and are subsequently loaded can be
1062 /// rewritten to load both input pointers in the pred blocks and then PHI the
1063 /// results, allowing the load of the alloca to be promoted.
1064 /// From this:
1065 ///   %P2 = phi [i32* %Alloca, i32* %Other]
1066 ///   %V = load i32* %P2
1067 /// to:
1068 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
1069 ///   ...
1070 ///   %V2 = load i32* %Other
1071 ///   ...
1072 ///   %V = phi [i32 %V1, i32 %V2]
1073 ///
1074 /// We can do this to a select if its only uses are loads and if the operands
1075 /// to the select can be loaded unconditionally.
1076 ///
1077 /// FIXME: This should be hoisted into a generic utility, likely in
1078 /// Transforms/Util/Local.h
isSafePHIToSpeculate(PHINode & PN,const DataLayout * DL=nullptr)1079 static bool isSafePHIToSpeculate(PHINode &PN,
1080                                  const DataLayout *DL = nullptr) {
1081   // For now, we can only do this promotion if the load is in the same block
1082   // as the PHI, and if there are no stores between the phi and load.
1083   // TODO: Allow recursive phi users.
1084   // TODO: Allow stores.
1085   BasicBlock *BB = PN.getParent();
1086   unsigned MaxAlign = 0;
1087   bool HaveLoad = false;
1088   for (User *U : PN.users()) {
1089     LoadInst *LI = dyn_cast<LoadInst>(U);
1090     if (!LI || !LI->isSimple())
1091       return false;
1092 
1093     // For now we only allow loads in the same block as the PHI.  This is
1094     // a common case that happens when instcombine merges two loads through
1095     // a PHI.
1096     if (LI->getParent() != BB)
1097       return false;
1098 
1099     // Ensure that there are no instructions between the PHI and the load that
1100     // could store.
1101     for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
1102       if (BBI->mayWriteToMemory())
1103         return false;
1104 
1105     MaxAlign = std::max(MaxAlign, LI->getAlignment());
1106     HaveLoad = true;
1107   }
1108 
1109   if (!HaveLoad)
1110     return false;
1111 
1112   // We can only transform this if it is safe to push the loads into the
1113   // predecessor blocks. The only thing to watch out for is that we can't put
1114   // a possibly trapping load in the predecessor if it is a critical edge.
1115   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1116     TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
1117     Value *InVal = PN.getIncomingValue(Idx);
1118 
1119     // If the value is produced by the terminator of the predecessor (an
1120     // invoke) or it has side-effects, there is no valid place to put a load
1121     // in the predecessor.
1122     if (TI == InVal || TI->mayHaveSideEffects())
1123       return false;
1124 
1125     // If the predecessor has a single successor, then the edge isn't
1126     // critical.
1127     if (TI->getNumSuccessors() == 1)
1128       continue;
1129 
1130     // If this pointer is always safe to load, or if we can prove that there
1131     // is already a load in the block, then we can move the load to the pred
1132     // block.
1133     if (InVal->isDereferenceablePointer(DL) ||
1134         isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
1135       continue;
1136 
1137     return false;
1138   }
1139 
1140   return true;
1141 }
1142 
speculatePHINodeLoads(PHINode & PN)1143 static void speculatePHINodeLoads(PHINode &PN) {
1144   DEBUG(dbgs() << "    original: " << PN << "\n");
1145 
1146   Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1147   IRBuilderTy PHIBuilder(&PN);
1148   PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1149                                         PN.getName() + ".sroa.speculated");
1150 
1151   // Get the TBAA tag and alignment to use from one of the loads.  It doesn't
1152   // matter which one we get and if any differ.
1153   LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
1154   MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1155   unsigned Align = SomeLoad->getAlignment();
1156 
1157   // Rewrite all loads of the PN to use the new PHI.
1158   while (!PN.use_empty()) {
1159     LoadInst *LI = cast<LoadInst>(PN.user_back());
1160     LI->replaceAllUsesWith(NewPN);
1161     LI->eraseFromParent();
1162   }
1163 
1164   // Inject loads into all of the pred blocks.
1165   for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1166     BasicBlock *Pred = PN.getIncomingBlock(Idx);
1167     TerminatorInst *TI = Pred->getTerminator();
1168     Value *InVal = PN.getIncomingValue(Idx);
1169     IRBuilderTy PredBuilder(TI);
1170 
1171     LoadInst *Load = PredBuilder.CreateLoad(
1172         InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1173     ++NumLoadsSpeculated;
1174     Load->setAlignment(Align);
1175     if (TBAATag)
1176       Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1177     NewPN->addIncoming(Load, Pred);
1178   }
1179 
1180   DEBUG(dbgs() << "          speculated to: " << *NewPN << "\n");
1181   PN.eraseFromParent();
1182 }
1183 
1184 /// Select instructions that use an alloca and are subsequently loaded can be
1185 /// rewritten to load both input pointers and then select between the result,
1186 /// allowing the load of the alloca to be promoted.
1187 /// From this:
1188 ///   %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1189 ///   %V = load i32* %P2
1190 /// to:
1191 ///   %V1 = load i32* %Alloca      -> will be mem2reg'd
1192 ///   %V2 = load i32* %Other
1193 ///   %V = select i1 %cond, i32 %V1, i32 %V2
1194 ///
1195 /// We can do this to a select if its only uses are loads and if the operand
1196 /// to the select can be loaded unconditionally.
isSafeSelectToSpeculate(SelectInst & SI,const DataLayout * DL=nullptr)1197 static bool isSafeSelectToSpeculate(SelectInst &SI,
1198                                     const DataLayout *DL = nullptr) {
1199   Value *TValue = SI.getTrueValue();
1200   Value *FValue = SI.getFalseValue();
1201   bool TDerefable = TValue->isDereferenceablePointer(DL);
1202   bool FDerefable = FValue->isDereferenceablePointer(DL);
1203 
1204   for (User *U : SI.users()) {
1205     LoadInst *LI = dyn_cast<LoadInst>(U);
1206     if (!LI || !LI->isSimple())
1207       return false;
1208 
1209     // Both operands to the select need to be dereferencable, either
1210     // absolutely (e.g. allocas) or at this point because we can see other
1211     // accesses to it.
1212     if (!TDerefable &&
1213         !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
1214       return false;
1215     if (!FDerefable &&
1216         !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
1217       return false;
1218   }
1219 
1220   return true;
1221 }
1222 
speculateSelectInstLoads(SelectInst & SI)1223 static void speculateSelectInstLoads(SelectInst &SI) {
1224   DEBUG(dbgs() << "    original: " << SI << "\n");
1225 
1226   IRBuilderTy IRB(&SI);
1227   Value *TV = SI.getTrueValue();
1228   Value *FV = SI.getFalseValue();
1229   // Replace the loads of the select with a select of two loads.
1230   while (!SI.use_empty()) {
1231     LoadInst *LI = cast<LoadInst>(SI.user_back());
1232     assert(LI->isSimple() && "We only speculate simple loads");
1233 
1234     IRB.SetInsertPoint(LI);
1235     LoadInst *TL =
1236         IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
1237     LoadInst *FL =
1238         IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
1239     NumLoadsSpeculated += 2;
1240 
1241     // Transfer alignment and TBAA info if present.
1242     TL->setAlignment(LI->getAlignment());
1243     FL->setAlignment(LI->getAlignment());
1244     if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
1245       TL->setMetadata(LLVMContext::MD_tbaa, Tag);
1246       FL->setMetadata(LLVMContext::MD_tbaa, Tag);
1247     }
1248 
1249     Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1250                                 LI->getName() + ".sroa.speculated");
1251 
1252     DEBUG(dbgs() << "          speculated to: " << *V << "\n");
1253     LI->replaceAllUsesWith(V);
1254     LI->eraseFromParent();
1255   }
1256   SI.eraseFromParent();
1257 }
1258 
1259 /// \brief Build a GEP out of a base pointer and indices.
1260 ///
1261 /// This will return the BasePtr if that is valid, or build a new GEP
1262 /// instruction using the IRBuilder if GEP-ing is needed.
buildGEP(IRBuilderTy & IRB,Value * BasePtr,SmallVectorImpl<Value * > & Indices,Twine NamePrefix)1263 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1264                        SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
1265   if (Indices.empty())
1266     return BasePtr;
1267 
1268   // A single zero index is a no-op, so check for this and avoid building a GEP
1269   // in that case.
1270   if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1271     return BasePtr;
1272 
1273   return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx");
1274 }
1275 
1276 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1277 /// TargetTy without changing the offset of the pointer.
1278 ///
1279 /// This routine assumes we've already established a properly offset GEP with
1280 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1281 /// zero-indices down through type layers until we find one the same as
1282 /// TargetTy. If we can't find one with the same type, we at least try to use
1283 /// one with the same size. If none of that works, we just produce the GEP as
1284 /// indicated by Indices to have the correct offset.
getNaturalGEPWithType(IRBuilderTy & IRB,const DataLayout & DL,Value * BasePtr,Type * Ty,Type * TargetTy,SmallVectorImpl<Value * > & Indices,Twine NamePrefix)1285 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1286                                     Value *BasePtr, Type *Ty, Type *TargetTy,
1287                                     SmallVectorImpl<Value *> &Indices,
1288                                     Twine NamePrefix) {
1289   if (Ty == TargetTy)
1290     return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1291 
1292   // Pointer size to use for the indices.
1293   unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType());
1294 
1295   // See if we can descend into a struct and locate a field with the correct
1296   // type.
1297   unsigned NumLayers = 0;
1298   Type *ElementTy = Ty;
1299   do {
1300     if (ElementTy->isPointerTy())
1301       break;
1302 
1303     if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
1304       ElementTy = ArrayTy->getElementType();
1305       Indices.push_back(IRB.getIntN(PtrSize, 0));
1306     } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
1307       ElementTy = VectorTy->getElementType();
1308       Indices.push_back(IRB.getInt32(0));
1309     } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1310       if (STy->element_begin() == STy->element_end())
1311         break; // Nothing left to descend into.
1312       ElementTy = *STy->element_begin();
1313       Indices.push_back(IRB.getInt32(0));
1314     } else {
1315       break;
1316     }
1317     ++NumLayers;
1318   } while (ElementTy != TargetTy);
1319   if (ElementTy != TargetTy)
1320     Indices.erase(Indices.end() - NumLayers, Indices.end());
1321 
1322   return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1323 }
1324 
1325 /// \brief Recursively compute indices for a natural GEP.
1326 ///
1327 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1328 /// element types adding appropriate indices for the GEP.
getNaturalGEPRecursively(IRBuilderTy & IRB,const DataLayout & DL,Value * Ptr,Type * Ty,APInt & Offset,Type * TargetTy,SmallVectorImpl<Value * > & Indices,Twine NamePrefix)1329 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
1330                                        Value *Ptr, Type *Ty, APInt &Offset,
1331                                        Type *TargetTy,
1332                                        SmallVectorImpl<Value *> &Indices,
1333                                        Twine NamePrefix) {
1334   if (Offset == 0)
1335     return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix);
1336 
1337   // We can't recurse through pointer types.
1338   if (Ty->isPointerTy())
1339     return nullptr;
1340 
1341   // We try to analyze GEPs over vectors here, but note that these GEPs are
1342   // extremely poorly defined currently. The long-term goal is to remove GEPing
1343   // over a vector from the IR completely.
1344   if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1345     unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
1346     if (ElementSizeInBits % 8 != 0) {
1347       // GEPs over non-multiple of 8 size vector elements are invalid.
1348       return nullptr;
1349     }
1350     APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1351     APInt NumSkippedElements = Offset.sdiv(ElementSize);
1352     if (NumSkippedElements.ugt(VecTy->getNumElements()))
1353       return nullptr;
1354     Offset -= NumSkippedElements * ElementSize;
1355     Indices.push_back(IRB.getInt(NumSkippedElements));
1356     return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
1357                                     Offset, TargetTy, Indices, NamePrefix);
1358   }
1359 
1360   if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1361     Type *ElementTy = ArrTy->getElementType();
1362     APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1363     APInt NumSkippedElements = Offset.sdiv(ElementSize);
1364     if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1365       return nullptr;
1366 
1367     Offset -= NumSkippedElements * ElementSize;
1368     Indices.push_back(IRB.getInt(NumSkippedElements));
1369     return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1370                                     Indices, NamePrefix);
1371   }
1372 
1373   StructType *STy = dyn_cast<StructType>(Ty);
1374   if (!STy)
1375     return nullptr;
1376 
1377   const StructLayout *SL = DL.getStructLayout(STy);
1378   uint64_t StructOffset = Offset.getZExtValue();
1379   if (StructOffset >= SL->getSizeInBytes())
1380     return nullptr;
1381   unsigned Index = SL->getElementContainingOffset(StructOffset);
1382   Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1383   Type *ElementTy = STy->getElementType(Index);
1384   if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
1385     return nullptr; // The offset points into alignment padding.
1386 
1387   Indices.push_back(IRB.getInt32(Index));
1388   return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1389                                   Indices, NamePrefix);
1390 }
1391 
1392 /// \brief Get a natural GEP from a base pointer to a particular offset and
1393 /// resulting in a particular type.
1394 ///
1395 /// The goal is to produce a "natural" looking GEP that works with the existing
1396 /// composite types to arrive at the appropriate offset and element type for
1397 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1398 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1399 /// Indices, and setting Ty to the result subtype.
1400 ///
1401 /// If no natural GEP can be constructed, this function returns null.
getNaturalGEPWithOffset(IRBuilderTy & IRB,const DataLayout & DL,Value * Ptr,APInt Offset,Type * TargetTy,SmallVectorImpl<Value * > & Indices,Twine NamePrefix)1402 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1403                                       Value *Ptr, APInt Offset, Type *TargetTy,
1404                                       SmallVectorImpl<Value *> &Indices,
1405                                       Twine NamePrefix) {
1406   PointerType *Ty = cast<PointerType>(Ptr->getType());
1407 
1408   // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1409   // an i8.
1410   if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
1411     return nullptr;
1412 
1413   Type *ElementTy = Ty->getElementType();
1414   if (!ElementTy->isSized())
1415     return nullptr; // We can't GEP through an unsized element.
1416   APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1417   if (ElementSize == 0)
1418     return nullptr; // Zero-length arrays can't help us build a natural GEP.
1419   APInt NumSkippedElements = Offset.sdiv(ElementSize);
1420 
1421   Offset -= NumSkippedElements * ElementSize;
1422   Indices.push_back(IRB.getInt(NumSkippedElements));
1423   return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1424                                   Indices, NamePrefix);
1425 }
1426 
1427 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1428 /// resulting pointer has PointerTy.
1429 ///
1430 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1431 /// and produces the pointer type desired. Where it cannot, it will try to use
1432 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1433 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1434 /// bitcast to the type.
1435 ///
1436 /// The strategy for finding the more natural GEPs is to peel off layers of the
1437 /// pointer, walking back through bit casts and GEPs, searching for a base
1438 /// pointer from which we can compute a natural GEP with the desired
1439 /// properties. The algorithm tries to fold as many constant indices into
1440 /// a single GEP as possible, thus making each GEP more independent of the
1441 /// surrounding code.
getAdjustedPtr(IRBuilderTy & IRB,const DataLayout & DL,Value * Ptr,APInt Offset,Type * PointerTy,Twine NamePrefix)1442 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
1443                              APInt Offset, Type *PointerTy,
1444                              Twine NamePrefix) {
1445   // Even though we don't look through PHI nodes, we could be called on an
1446   // instruction in an unreachable block, which may be on a cycle.
1447   SmallPtrSet<Value *, 4> Visited;
1448   Visited.insert(Ptr);
1449   SmallVector<Value *, 4> Indices;
1450 
1451   // We may end up computing an offset pointer that has the wrong type. If we
1452   // never are able to compute one directly that has the correct type, we'll
1453   // fall back to it, so keep it around here.
1454   Value *OffsetPtr = nullptr;
1455 
1456   // Remember any i8 pointer we come across to re-use if we need to do a raw
1457   // byte offset.
1458   Value *Int8Ptr = nullptr;
1459   APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1460 
1461   Type *TargetTy = PointerTy->getPointerElementType();
1462 
1463   do {
1464     // First fold any existing GEPs into the offset.
1465     while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1466       APInt GEPOffset(Offset.getBitWidth(), 0);
1467       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1468         break;
1469       Offset += GEPOffset;
1470       Ptr = GEP->getPointerOperand();
1471       if (!Visited.insert(Ptr))
1472         break;
1473     }
1474 
1475     // See if we can perform a natural GEP here.
1476     Indices.clear();
1477     if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1478                                            Indices, NamePrefix)) {
1479       if (P->getType() == PointerTy) {
1480         // Zap any offset pointer that we ended up computing in previous rounds.
1481         if (OffsetPtr && OffsetPtr->use_empty())
1482           if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
1483             I->eraseFromParent();
1484         return P;
1485       }
1486       if (!OffsetPtr) {
1487         OffsetPtr = P;
1488       }
1489     }
1490 
1491     // Stash this pointer if we've found an i8*.
1492     if (Ptr->getType()->isIntegerTy(8)) {
1493       Int8Ptr = Ptr;
1494       Int8PtrOffset = Offset;
1495     }
1496 
1497     // Peel off a layer of the pointer and update the offset appropriately.
1498     if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1499       Ptr = cast<Operator>(Ptr)->getOperand(0);
1500     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1501       if (GA->mayBeOverridden())
1502         break;
1503       Ptr = GA->getAliasee();
1504     } else {
1505       break;
1506     }
1507     assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1508   } while (Visited.insert(Ptr));
1509 
1510   if (!OffsetPtr) {
1511     if (!Int8Ptr) {
1512       Int8Ptr = IRB.CreateBitCast(
1513           Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
1514           NamePrefix + "sroa_raw_cast");
1515       Int8PtrOffset = Offset;
1516     }
1517 
1518     OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
1519       IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
1520                             NamePrefix + "sroa_raw_idx");
1521   }
1522   Ptr = OffsetPtr;
1523 
1524   // On the off chance we were targeting i8*, guard the bitcast here.
1525   if (Ptr->getType() != PointerTy)
1526     Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast");
1527 
1528   return Ptr;
1529 }
1530 
1531 /// \brief Test whether we can convert a value from the old to the new type.
1532 ///
1533 /// This predicate should be used to guard calls to convertValue in order to
1534 /// ensure that we only try to convert viable values. The strategy is that we
1535 /// will peel off single element struct and array wrappings to get to an
1536 /// underlying value, and convert that value.
canConvertValue(const DataLayout & DL,Type * OldTy,Type * NewTy)1537 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1538   if (OldTy == NewTy)
1539     return true;
1540   if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1541     if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
1542       if (NewITy->getBitWidth() >= OldITy->getBitWidth())
1543         return true;
1544   if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
1545     return false;
1546   if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1547     return false;
1548 
1549   // We can convert pointers to integers and vice-versa. Same for vectors
1550   // of pointers and integers.
1551   OldTy = OldTy->getScalarType();
1552   NewTy = NewTy->getScalarType();
1553   if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1554     if (NewTy->isPointerTy() && OldTy->isPointerTy())
1555       return true;
1556     if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
1557       return true;
1558     return false;
1559   }
1560 
1561   return true;
1562 }
1563 
1564 /// \brief Generic routine to convert an SSA value to a value of a different
1565 /// type.
1566 ///
1567 /// This will try various different casting techniques, such as bitcasts,
1568 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1569 /// two types for viability with this routine.
convertValue(const DataLayout & DL,IRBuilderTy & IRB,Value * V,Type * NewTy)1570 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1571                            Type *NewTy) {
1572   Type *OldTy = V->getType();
1573   assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1574 
1575   if (OldTy == NewTy)
1576     return V;
1577 
1578   if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
1579     if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
1580       if (NewITy->getBitWidth() > OldITy->getBitWidth())
1581         return IRB.CreateZExt(V, NewITy);
1582 
1583   // See if we need inttoptr for this type pair. A cast involving both scalars
1584   // and vectors requires and additional bitcast.
1585   if (OldTy->getScalarType()->isIntegerTy() &&
1586       NewTy->getScalarType()->isPointerTy()) {
1587     // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1588     if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1589       return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1590                                 NewTy);
1591 
1592     // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1593     if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1594       return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1595                                 NewTy);
1596 
1597     return IRB.CreateIntToPtr(V, NewTy);
1598   }
1599 
1600   // See if we need ptrtoint for this type pair. A cast involving both scalars
1601   // and vectors requires and additional bitcast.
1602   if (OldTy->getScalarType()->isPointerTy() &&
1603       NewTy->getScalarType()->isIntegerTy()) {
1604     // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1605     if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1606       return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1607                                NewTy);
1608 
1609     // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1610     if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1611       return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1612                                NewTy);
1613 
1614     return IRB.CreatePtrToInt(V, NewTy);
1615   }
1616 
1617   return IRB.CreateBitCast(V, NewTy);
1618 }
1619 
1620 /// \brief Test whether the given slice use can be promoted to a vector.
1621 ///
1622 /// This function is called to test each entry in a partioning which is slated
1623 /// for a single slice.
isVectorPromotionViableForSlice(const DataLayout & DL,AllocaSlices & S,uint64_t SliceBeginOffset,uint64_t SliceEndOffset,VectorType * Ty,uint64_t ElementSize,AllocaSlices::const_iterator I)1624 static bool isVectorPromotionViableForSlice(
1625     const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset,
1626     uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize,
1627     AllocaSlices::const_iterator I) {
1628   // First validate the slice offsets.
1629   uint64_t BeginOffset =
1630       std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset;
1631   uint64_t BeginIndex = BeginOffset / ElementSize;
1632   if (BeginIndex * ElementSize != BeginOffset ||
1633       BeginIndex >= Ty->getNumElements())
1634     return false;
1635   uint64_t EndOffset =
1636       std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset;
1637   uint64_t EndIndex = EndOffset / ElementSize;
1638   if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
1639     return false;
1640 
1641   assert(EndIndex > BeginIndex && "Empty vector!");
1642   uint64_t NumElements = EndIndex - BeginIndex;
1643   Type *SliceTy =
1644       (NumElements == 1) ? Ty->getElementType()
1645                          : VectorType::get(Ty->getElementType(), NumElements);
1646 
1647   Type *SplitIntTy =
1648       Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1649 
1650   Use *U = I->getUse();
1651 
1652   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1653     if (MI->isVolatile())
1654       return false;
1655     if (!I->isSplittable())
1656       return false; // Skip any unsplittable intrinsics.
1657   } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
1658     // Disable vector promotion when there are loads or stores of an FCA.
1659     return false;
1660   } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1661     if (LI->isVolatile())
1662       return false;
1663     Type *LTy = LI->getType();
1664     if (SliceBeginOffset > I->beginOffset() ||
1665         SliceEndOffset < I->endOffset()) {
1666       assert(LTy->isIntegerTy());
1667       LTy = SplitIntTy;
1668     }
1669     if (!canConvertValue(DL, SliceTy, LTy))
1670       return false;
1671   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1672     if (SI->isVolatile())
1673       return false;
1674     Type *STy = SI->getValueOperand()->getType();
1675     if (SliceBeginOffset > I->beginOffset() ||
1676         SliceEndOffset < I->endOffset()) {
1677       assert(STy->isIntegerTy());
1678       STy = SplitIntTy;
1679     }
1680     if (!canConvertValue(DL, STy, SliceTy))
1681       return false;
1682   } else {
1683     return false;
1684   }
1685 
1686   return true;
1687 }
1688 
1689 /// \brief Test whether the given alloca partitioning and range of slices can be
1690 /// promoted to a vector.
1691 ///
1692 /// This is a quick test to check whether we can rewrite a particular alloca
1693 /// partition (and its newly formed alloca) into a vector alloca with only
1694 /// whole-vector loads and stores such that it could be promoted to a vector
1695 /// SSA value. We only can ensure this for a limited set of operations, and we
1696 /// don't want to do the rewrites unless we are confident that the result will
1697 /// be promotable, so we have an early test here.
1698 static bool
isVectorPromotionViable(const DataLayout & DL,Type * AllocaTy,AllocaSlices & S,uint64_t SliceBeginOffset,uint64_t SliceEndOffset,AllocaSlices::const_iterator I,AllocaSlices::const_iterator E,ArrayRef<AllocaSlices::iterator> SplitUses)1699 isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S,
1700                         uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
1701                         AllocaSlices::const_iterator I,
1702                         AllocaSlices::const_iterator E,
1703                         ArrayRef<AllocaSlices::iterator> SplitUses) {
1704   VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
1705   if (!Ty)
1706     return false;
1707 
1708   uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType());
1709 
1710   // While the definition of LLVM vectors is bitpacked, we don't support sizes
1711   // that aren't byte sized.
1712   if (ElementSize % 8)
1713     return false;
1714   assert((DL.getTypeSizeInBits(Ty) % 8) == 0 &&
1715          "vector size not a multiple of element size?");
1716   ElementSize /= 8;
1717 
1718   for (; I != E; ++I)
1719     if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1720                                          SliceEndOffset, Ty, ElementSize, I))
1721       return false;
1722 
1723   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
1724                                                         SUE = SplitUses.end();
1725        SUI != SUE; ++SUI)
1726     if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
1727                                          SliceEndOffset, Ty, ElementSize, *SUI))
1728       return false;
1729 
1730   return true;
1731 }
1732 
1733 /// \brief Test whether a slice of an alloca is valid for integer widening.
1734 ///
1735 /// This implements the necessary checking for the \c isIntegerWideningViable
1736 /// test below on a single slice of the alloca.
isIntegerWideningViableForSlice(const DataLayout & DL,Type * AllocaTy,uint64_t AllocBeginOffset,uint64_t Size,AllocaSlices & S,AllocaSlices::const_iterator I,bool & WholeAllocaOp)1737 static bool isIntegerWideningViableForSlice(const DataLayout &DL,
1738                                             Type *AllocaTy,
1739                                             uint64_t AllocBeginOffset,
1740                                             uint64_t Size, AllocaSlices &S,
1741                                             AllocaSlices::const_iterator I,
1742                                             bool &WholeAllocaOp) {
1743   uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
1744   uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
1745 
1746   // We can't reasonably handle cases where the load or store extends past
1747   // the end of the aloca's type and into its padding.
1748   if (RelEnd > Size)
1749     return false;
1750 
1751   Use *U = I->getUse();
1752 
1753   if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1754     if (LI->isVolatile())
1755       return false;
1756     if (RelBegin == 0 && RelEnd == Size)
1757       WholeAllocaOp = true;
1758     if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
1759       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1760         return false;
1761     } else if (RelBegin != 0 || RelEnd != Size ||
1762                !canConvertValue(DL, AllocaTy, LI->getType())) {
1763       // Non-integer loads need to be convertible from the alloca type so that
1764       // they are promotable.
1765       return false;
1766     }
1767   } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1768     Type *ValueTy = SI->getValueOperand()->getType();
1769     if (SI->isVolatile())
1770       return false;
1771     if (RelBegin == 0 && RelEnd == Size)
1772       WholeAllocaOp = true;
1773     if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
1774       if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
1775         return false;
1776     } else if (RelBegin != 0 || RelEnd != Size ||
1777                !canConvertValue(DL, ValueTy, AllocaTy)) {
1778       // Non-integer stores need to be convertible to the alloca type so that
1779       // they are promotable.
1780       return false;
1781     }
1782   } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1783     if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
1784       return false;
1785     if (!I->isSplittable())
1786       return false; // Skip any unsplittable intrinsics.
1787   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1788     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1789         II->getIntrinsicID() != Intrinsic::lifetime_end)
1790       return false;
1791   } else {
1792     return false;
1793   }
1794 
1795   return true;
1796 }
1797 
1798 /// \brief Test whether the given alloca partition's integer operations can be
1799 /// widened to promotable ones.
1800 ///
1801 /// This is a quick test to check whether we can rewrite the integer loads and
1802 /// stores to a particular alloca into wider loads and stores and be able to
1803 /// promote the resulting alloca.
1804 static bool
isIntegerWideningViable(const DataLayout & DL,Type * AllocaTy,uint64_t AllocBeginOffset,AllocaSlices & S,AllocaSlices::const_iterator I,AllocaSlices::const_iterator E,ArrayRef<AllocaSlices::iterator> SplitUses)1805 isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
1806                         uint64_t AllocBeginOffset, AllocaSlices &S,
1807                         AllocaSlices::const_iterator I,
1808                         AllocaSlices::const_iterator E,
1809                         ArrayRef<AllocaSlices::iterator> SplitUses) {
1810   uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
1811   // Don't create integer types larger than the maximum bitwidth.
1812   if (SizeInBits > IntegerType::MAX_INT_BITS)
1813     return false;
1814 
1815   // Don't try to handle allocas with bit-padding.
1816   if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
1817     return false;
1818 
1819   // We need to ensure that an integer type with the appropriate bitwidth can
1820   // be converted to the alloca type, whatever that is. We don't want to force
1821   // the alloca itself to have an integer type if there is a more suitable one.
1822   Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
1823   if (!canConvertValue(DL, AllocaTy, IntTy) ||
1824       !canConvertValue(DL, IntTy, AllocaTy))
1825     return false;
1826 
1827   uint64_t Size = DL.getTypeStoreSize(AllocaTy);
1828 
1829   // While examining uses, we ensure that the alloca has a covering load or
1830   // store. We don't want to widen the integer operations only to fail to
1831   // promote due to some other unsplittable entry (which we may make splittable
1832   // later). However, if there are only splittable uses, go ahead and assume
1833   // that we cover the alloca.
1834   bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits);
1835 
1836   for (; I != E; ++I)
1837     if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1838                                          S, I, WholeAllocaOp))
1839       return false;
1840 
1841   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
1842                                                         SUE = SplitUses.end();
1843        SUI != SUE; ++SUI)
1844     if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
1845                                          S, *SUI, WholeAllocaOp))
1846       return false;
1847 
1848   return WholeAllocaOp;
1849 }
1850 
extractInteger(const DataLayout & DL,IRBuilderTy & IRB,Value * V,IntegerType * Ty,uint64_t Offset,const Twine & Name)1851 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1852                              IntegerType *Ty, uint64_t Offset,
1853                              const Twine &Name) {
1854   DEBUG(dbgs() << "       start: " << *V << "\n");
1855   IntegerType *IntTy = cast<IntegerType>(V->getType());
1856   assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1857          "Element extends past full value");
1858   uint64_t ShAmt = 8*Offset;
1859   if (DL.isBigEndian())
1860     ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
1861   if (ShAmt) {
1862     V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
1863     DEBUG(dbgs() << "     shifted: " << *V << "\n");
1864   }
1865   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1866          "Cannot extract to a larger integer!");
1867   if (Ty != IntTy) {
1868     V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
1869     DEBUG(dbgs() << "     trunced: " << *V << "\n");
1870   }
1871   return V;
1872 }
1873 
insertInteger(const DataLayout & DL,IRBuilderTy & IRB,Value * Old,Value * V,uint64_t Offset,const Twine & Name)1874 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
1875                             Value *V, uint64_t Offset, const Twine &Name) {
1876   IntegerType *IntTy = cast<IntegerType>(Old->getType());
1877   IntegerType *Ty = cast<IntegerType>(V->getType());
1878   assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
1879          "Cannot insert a larger integer!");
1880   DEBUG(dbgs() << "       start: " << *V << "\n");
1881   if (Ty != IntTy) {
1882     V = IRB.CreateZExt(V, IntTy, Name + ".ext");
1883     DEBUG(dbgs() << "    extended: " << *V << "\n");
1884   }
1885   assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1886          "Element store outside of alloca store");
1887   uint64_t ShAmt = 8*Offset;
1888   if (DL.isBigEndian())
1889     ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
1890   if (ShAmt) {
1891     V = IRB.CreateShl(V, ShAmt, Name + ".shift");
1892     DEBUG(dbgs() << "     shifted: " << *V << "\n");
1893   }
1894 
1895   if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
1896     APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
1897     Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
1898     DEBUG(dbgs() << "      masked: " << *Old << "\n");
1899     V = IRB.CreateOr(Old, V, Name + ".insert");
1900     DEBUG(dbgs() << "    inserted: " << *V << "\n");
1901   }
1902   return V;
1903 }
1904 
extractVector(IRBuilderTy & IRB,Value * V,unsigned BeginIndex,unsigned EndIndex,const Twine & Name)1905 static Value *extractVector(IRBuilderTy &IRB, Value *V,
1906                             unsigned BeginIndex, unsigned EndIndex,
1907                             const Twine &Name) {
1908   VectorType *VecTy = cast<VectorType>(V->getType());
1909   unsigned NumElements = EndIndex - BeginIndex;
1910   assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
1911 
1912   if (NumElements == VecTy->getNumElements())
1913     return V;
1914 
1915   if (NumElements == 1) {
1916     V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
1917                                  Name + ".extract");
1918     DEBUG(dbgs() << "     extract: " << *V << "\n");
1919     return V;
1920   }
1921 
1922   SmallVector<Constant*, 8> Mask;
1923   Mask.reserve(NumElements);
1924   for (unsigned i = BeginIndex; i != EndIndex; ++i)
1925     Mask.push_back(IRB.getInt32(i));
1926   V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1927                               ConstantVector::get(Mask),
1928                               Name + ".extract");
1929   DEBUG(dbgs() << "     shuffle: " << *V << "\n");
1930   return V;
1931 }
1932 
insertVector(IRBuilderTy & IRB,Value * Old,Value * V,unsigned BeginIndex,const Twine & Name)1933 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
1934                            unsigned BeginIndex, const Twine &Name) {
1935   VectorType *VecTy = cast<VectorType>(Old->getType());
1936   assert(VecTy && "Can only insert a vector into a vector");
1937 
1938   VectorType *Ty = dyn_cast<VectorType>(V->getType());
1939   if (!Ty) {
1940     // Single element to insert.
1941     V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
1942                                 Name + ".insert");
1943     DEBUG(dbgs() <<  "     insert: " << *V << "\n");
1944     return V;
1945   }
1946 
1947   assert(Ty->getNumElements() <= VecTy->getNumElements() &&
1948          "Too many elements!");
1949   if (Ty->getNumElements() == VecTy->getNumElements()) {
1950     assert(V->getType() == VecTy && "Vector type mismatch");
1951     return V;
1952   }
1953   unsigned EndIndex = BeginIndex + Ty->getNumElements();
1954 
1955   // When inserting a smaller vector into the larger to store, we first
1956   // use a shuffle vector to widen it with undef elements, and then
1957   // a second shuffle vector to select between the loaded vector and the
1958   // incoming vector.
1959   SmallVector<Constant*, 8> Mask;
1960   Mask.reserve(VecTy->getNumElements());
1961   for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
1962     if (i >= BeginIndex && i < EndIndex)
1963       Mask.push_back(IRB.getInt32(i - BeginIndex));
1964     else
1965       Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
1966   V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
1967                               ConstantVector::get(Mask),
1968                               Name + ".expand");
1969   DEBUG(dbgs() << "    shuffle: " << *V << "\n");
1970 
1971   Mask.clear();
1972   for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
1973     Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
1974 
1975   V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
1976 
1977   DEBUG(dbgs() << "    blend: " << *V << "\n");
1978   return V;
1979 }
1980 
1981 namespace {
1982 /// \brief Visitor to rewrite instructions using p particular slice of an alloca
1983 /// to use a new alloca.
1984 ///
1985 /// Also implements the rewriting to vector-based accesses when the partition
1986 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
1987 /// lives here.
1988 class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
1989   // Befriend the base class so it can delegate to private visit methods.
1990   friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
1991   typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base;
1992 
1993   const DataLayout &DL;
1994   AllocaSlices &S;
1995   SROA &Pass;
1996   AllocaInst &OldAI, &NewAI;
1997   const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
1998   Type *NewAllocaTy;
1999 
2000   // If we are rewriting an alloca partition which can be written as pure
2001   // vector operations, we stash extra information here. When VecTy is
2002   // non-null, we have some strict guarantees about the rewritten alloca:
2003   //   - The new alloca is exactly the size of the vector type here.
2004   //   - The accesses all either map to the entire vector or to a single
2005   //     element.
2006   //   - The set of accessing instructions is only one of those handled above
2007   //     in isVectorPromotionViable. Generally these are the same access kinds
2008   //     which are promotable via mem2reg.
2009   VectorType *VecTy;
2010   Type *ElementTy;
2011   uint64_t ElementSize;
2012 
2013   // This is a convenience and flag variable that will be null unless the new
2014   // alloca's integer operations should be widened to this integer type due to
2015   // passing isIntegerWideningViable above. If it is non-null, the desired
2016   // integer type will be stored here for easy access during rewriting.
2017   IntegerType *IntTy;
2018 
2019   // The original offset of the slice currently being rewritten relative to
2020   // the original alloca.
2021   uint64_t BeginOffset, EndOffset;
2022   // The new offsets of the slice currently being rewritten relative to the
2023   // original alloca.
2024   uint64_t NewBeginOffset, NewEndOffset;
2025 
2026   uint64_t SliceSize;
2027   bool IsSplittable;
2028   bool IsSplit;
2029   Use *OldUse;
2030   Instruction *OldPtr;
2031 
2032   // Track post-rewrite users which are PHI nodes and Selects.
2033   SmallPtrSetImpl<PHINode *> &PHIUsers;
2034   SmallPtrSetImpl<SelectInst *> &SelectUsers;
2035 
2036   // Utility IR builder, whose name prefix is setup for each visited use, and
2037   // the insertion point is set to point to the user.
2038   IRBuilderTy IRB;
2039 
2040 public:
AllocaSliceRewriter(const DataLayout & DL,AllocaSlices & S,SROA & Pass,AllocaInst & OldAI,AllocaInst & NewAI,uint64_t NewAllocaBeginOffset,uint64_t NewAllocaEndOffset,bool IsVectorPromotable,bool IsIntegerPromotable,SmallPtrSetImpl<PHINode * > & PHIUsers,SmallPtrSetImpl<SelectInst * > & SelectUsers)2041   AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
2042                       AllocaInst &OldAI, AllocaInst &NewAI,
2043                       uint64_t NewAllocaBeginOffset,
2044                       uint64_t NewAllocaEndOffset, bool IsVectorPromotable,
2045                       bool IsIntegerPromotable,
2046                       SmallPtrSetImpl<PHINode *> &PHIUsers,
2047                       SmallPtrSetImpl<SelectInst *> &SelectUsers)
2048       : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
2049         NewAllocaBeginOffset(NewAllocaBeginOffset),
2050         NewAllocaEndOffset(NewAllocaEndOffset),
2051         NewAllocaTy(NewAI.getAllocatedType()),
2052         VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : nullptr),
2053         ElementTy(VecTy ? VecTy->getElementType() : nullptr),
2054         ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
2055         IntTy(IsIntegerPromotable
2056                   ? Type::getIntNTy(
2057                         NewAI.getContext(),
2058                         DL.getTypeSizeInBits(NewAI.getAllocatedType()))
2059                   : nullptr),
2060         BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
2061         OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers),
2062         IRB(NewAI.getContext(), ConstantFolder()) {
2063     if (VecTy) {
2064       assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
2065              "Only multiple-of-8 sized vector elements are viable");
2066       ++NumVectorized;
2067     }
2068     assert((!IsVectorPromotable && !IsIntegerPromotable) ||
2069            IsVectorPromotable != IsIntegerPromotable);
2070   }
2071 
visit(AllocaSlices::const_iterator I)2072   bool visit(AllocaSlices::const_iterator I) {
2073     bool CanSROA = true;
2074     BeginOffset = I->beginOffset();
2075     EndOffset = I->endOffset();
2076     IsSplittable = I->isSplittable();
2077     IsSplit =
2078         BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
2079 
2080     // Compute the intersecting offset range.
2081     assert(BeginOffset < NewAllocaEndOffset);
2082     assert(EndOffset > NewAllocaBeginOffset);
2083     NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2084     NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2085 
2086     SliceSize = NewEndOffset - NewBeginOffset;
2087 
2088     OldUse = I->getUse();
2089     OldPtr = cast<Instruction>(OldUse->get());
2090 
2091     Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2092     IRB.SetInsertPoint(OldUserI);
2093     IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2094     IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2095 
2096     CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2097     if (VecTy || IntTy)
2098       assert(CanSROA);
2099     return CanSROA;
2100   }
2101 
2102 private:
2103   // Make sure the other visit overloads are visible.
2104   using Base::visit;
2105 
2106   // Every instruction which can end up as a user must have a rewrite rule.
visitInstruction(Instruction & I)2107   bool visitInstruction(Instruction &I) {
2108     DEBUG(dbgs() << "    !!!! Cannot rewrite: " << I << "\n");
2109     llvm_unreachable("No rewrite rule for this instruction!");
2110   }
2111 
getNewAllocaSlicePtr(IRBuilderTy & IRB,Type * PointerTy)2112   Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
2113     // Note that the offset computation can use BeginOffset or NewBeginOffset
2114     // interchangeably for unsplit slices.
2115     assert(IsSplit || BeginOffset == NewBeginOffset);
2116     uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2117 
2118 #ifndef NDEBUG
2119     StringRef OldName = OldPtr->getName();
2120     // Skip through the last '.sroa.' component of the name.
2121     size_t LastSROAPrefix = OldName.rfind(".sroa.");
2122     if (LastSROAPrefix != StringRef::npos) {
2123       OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
2124       // Look for an SROA slice index.
2125       size_t IndexEnd = OldName.find_first_not_of("0123456789");
2126       if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
2127         // Strip the index and look for the offset.
2128         OldName = OldName.substr(IndexEnd + 1);
2129         size_t OffsetEnd = OldName.find_first_not_of("0123456789");
2130         if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
2131           // Strip the offset.
2132           OldName = OldName.substr(OffsetEnd + 1);
2133       }
2134     }
2135     // Strip any SROA suffixes as well.
2136     OldName = OldName.substr(0, OldName.find(".sroa_"));
2137 #endif
2138 
2139     return getAdjustedPtr(IRB, DL, &NewAI,
2140                           APInt(DL.getPointerSizeInBits(), Offset), PointerTy,
2141 #ifndef NDEBUG
2142                           Twine(OldName) + "."
2143 #else
2144                           Twine()
2145 #endif
2146                           );
2147   }
2148 
2149   /// \brief Compute suitable alignment to access this slice of the *new* alloca.
2150   ///
2151   /// You can optionally pass a type to this routine and if that type's ABI
2152   /// alignment is itself suitable, this will return zero.
getSliceAlign(Type * Ty=nullptr)2153   unsigned getSliceAlign(Type *Ty = nullptr) {
2154     unsigned NewAIAlign = NewAI.getAlignment();
2155     if (!NewAIAlign)
2156       NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
2157     unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
2158     return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
2159   }
2160 
getIndex(uint64_t Offset)2161   unsigned getIndex(uint64_t Offset) {
2162     assert(VecTy && "Can only call getIndex when rewriting a vector");
2163     uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2164     assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2165     uint32_t Index = RelOffset / ElementSize;
2166     assert(Index * ElementSize == RelOffset);
2167     return Index;
2168   }
2169 
deleteIfTriviallyDead(Value * V)2170   void deleteIfTriviallyDead(Value *V) {
2171     Instruction *I = cast<Instruction>(V);
2172     if (isInstructionTriviallyDead(I))
2173       Pass.DeadInsts.insert(I);
2174   }
2175 
rewriteVectorizedLoadInst()2176   Value *rewriteVectorizedLoadInst() {
2177     unsigned BeginIndex = getIndex(NewBeginOffset);
2178     unsigned EndIndex = getIndex(NewEndOffset);
2179     assert(EndIndex > BeginIndex && "Empty vector!");
2180 
2181     Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2182                                      "load");
2183     return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
2184   }
2185 
rewriteIntegerLoad(LoadInst & LI)2186   Value *rewriteIntegerLoad(LoadInst &LI) {
2187     assert(IntTy && "We cannot insert an integer to the alloca");
2188     assert(!LI.isVolatile());
2189     Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2190                                      "load");
2191     V = convertValue(DL, IRB, V, IntTy);
2192     assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2193     uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2194     if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
2195       V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
2196                          "extract");
2197     return V;
2198   }
2199 
visitLoadInst(LoadInst & LI)2200   bool visitLoadInst(LoadInst &LI) {
2201     DEBUG(dbgs() << "    original: " << LI << "\n");
2202     Value *OldOp = LI.getOperand(0);
2203     assert(OldOp == OldPtr);
2204 
2205     Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
2206                              : LI.getType();
2207     bool IsPtrAdjusted = false;
2208     Value *V;
2209     if (VecTy) {
2210       V = rewriteVectorizedLoadInst();
2211     } else if (IntTy && LI.getType()->isIntegerTy()) {
2212       V = rewriteIntegerLoad(LI);
2213     } else if (NewBeginOffset == NewAllocaBeginOffset &&
2214                canConvertValue(DL, NewAllocaTy, LI.getType())) {
2215       V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2216                                 LI.isVolatile(), LI.getName());
2217     } else {
2218       Type *LTy = TargetTy->getPointerTo();
2219       V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
2220                                 getSliceAlign(TargetTy), LI.isVolatile(),
2221                                 LI.getName());
2222       IsPtrAdjusted = true;
2223     }
2224     V = convertValue(DL, IRB, V, TargetTy);
2225 
2226     if (IsSplit) {
2227       assert(!LI.isVolatile());
2228       assert(LI.getType()->isIntegerTy() &&
2229              "Only integer type loads and stores are split");
2230       assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
2231              "Split load isn't smaller than original load");
2232       assert(LI.getType()->getIntegerBitWidth() ==
2233              DL.getTypeStoreSizeInBits(LI.getType()) &&
2234              "Non-byte-multiple bit width");
2235       // Move the insertion point just past the load so that we can refer to it.
2236       IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI)));
2237       // Create a placeholder value with the same type as LI to use as the
2238       // basis for the new value. This allows us to replace the uses of LI with
2239       // the computed value, and then replace the placeholder with LI, leaving
2240       // LI only used for this computation.
2241       Value *Placeholder
2242         = new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
2243       V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
2244                         "insert");
2245       LI.replaceAllUsesWith(V);
2246       Placeholder->replaceAllUsesWith(&LI);
2247       delete Placeholder;
2248     } else {
2249       LI.replaceAllUsesWith(V);
2250     }
2251 
2252     Pass.DeadInsts.insert(&LI);
2253     deleteIfTriviallyDead(OldOp);
2254     DEBUG(dbgs() << "          to: " << *V << "\n");
2255     return !LI.isVolatile() && !IsPtrAdjusted;
2256   }
2257 
rewriteVectorizedStoreInst(Value * V,StoreInst & SI,Value * OldOp)2258   bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) {
2259     if (V->getType() != VecTy) {
2260       unsigned BeginIndex = getIndex(NewBeginOffset);
2261       unsigned EndIndex = getIndex(NewEndOffset);
2262       assert(EndIndex > BeginIndex && "Empty vector!");
2263       unsigned NumElements = EndIndex - BeginIndex;
2264       assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2265       Type *SliceTy =
2266           (NumElements == 1) ? ElementTy
2267                              : VectorType::get(ElementTy, NumElements);
2268       if (V->getType() != SliceTy)
2269         V = convertValue(DL, IRB, V, SliceTy);
2270 
2271       // Mix in the existing elements.
2272       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2273                                          "load");
2274       V = insertVector(IRB, Old, V, BeginIndex, "vec");
2275     }
2276     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2277     Pass.DeadInsts.insert(&SI);
2278 
2279     (void)Store;
2280     DEBUG(dbgs() << "          to: " << *Store << "\n");
2281     return true;
2282   }
2283 
rewriteIntegerStore(Value * V,StoreInst & SI)2284   bool rewriteIntegerStore(Value *V, StoreInst &SI) {
2285     assert(IntTy && "We cannot extract an integer from the alloca");
2286     assert(!SI.isVolatile());
2287     if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
2288       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2289                                          "oldload");
2290       Old = convertValue(DL, IRB, Old, IntTy);
2291       assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2292       uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2293       V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
2294                         "insert");
2295     }
2296     V = convertValue(DL, IRB, V, NewAllocaTy);
2297     StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2298     Pass.DeadInsts.insert(&SI);
2299     (void)Store;
2300     DEBUG(dbgs() << "          to: " << *Store << "\n");
2301     return true;
2302   }
2303 
visitStoreInst(StoreInst & SI)2304   bool visitStoreInst(StoreInst &SI) {
2305     DEBUG(dbgs() << "    original: " << SI << "\n");
2306     Value *OldOp = SI.getOperand(1);
2307     assert(OldOp == OldPtr);
2308 
2309     Value *V = SI.getValueOperand();
2310 
2311     // Strip all inbounds GEPs and pointer casts to try to dig out any root
2312     // alloca that should be re-examined after promoting this alloca.
2313     if (V->getType()->isPointerTy())
2314       if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2315         Pass.PostPromotionWorklist.insert(AI);
2316 
2317     if (SliceSize < DL.getTypeStoreSize(V->getType())) {
2318       assert(!SI.isVolatile());
2319       assert(V->getType()->isIntegerTy() &&
2320              "Only integer type loads and stores are split");
2321       assert(V->getType()->getIntegerBitWidth() ==
2322              DL.getTypeStoreSizeInBits(V->getType()) &&
2323              "Non-byte-multiple bit width");
2324       IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
2325       V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
2326                          "extract");
2327     }
2328 
2329     if (VecTy)
2330       return rewriteVectorizedStoreInst(V, SI, OldOp);
2331     if (IntTy && V->getType()->isIntegerTy())
2332       return rewriteIntegerStore(V, SI);
2333 
2334     StoreInst *NewSI;
2335     if (NewBeginOffset == NewAllocaBeginOffset &&
2336         NewEndOffset == NewAllocaEndOffset &&
2337         canConvertValue(DL, V->getType(), NewAllocaTy)) {
2338       V = convertValue(DL, IRB, V, NewAllocaTy);
2339       NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2340                                      SI.isVolatile());
2341     } else {
2342       Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo());
2343       NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
2344                                      SI.isVolatile());
2345     }
2346     (void)NewSI;
2347     Pass.DeadInsts.insert(&SI);
2348     deleteIfTriviallyDead(OldOp);
2349 
2350     DEBUG(dbgs() << "          to: " << *NewSI << "\n");
2351     return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
2352   }
2353 
2354   /// \brief Compute an integer value from splatting an i8 across the given
2355   /// number of bytes.
2356   ///
2357   /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2358   /// call this routine.
2359   /// FIXME: Heed the advice above.
2360   ///
2361   /// \param V The i8 value to splat.
2362   /// \param Size The number of bytes in the output (assuming i8 is one byte)
getIntegerSplat(Value * V,unsigned Size)2363   Value *getIntegerSplat(Value *V, unsigned Size) {
2364     assert(Size > 0 && "Expected a positive number of bytes.");
2365     IntegerType *VTy = cast<IntegerType>(V->getType());
2366     assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2367     if (Size == 1)
2368       return V;
2369 
2370     Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
2371     V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
2372                       ConstantExpr::getUDiv(
2373                         Constant::getAllOnesValue(SplatIntTy),
2374                         ConstantExpr::getZExt(
2375                           Constant::getAllOnesValue(V->getType()),
2376                           SplatIntTy)),
2377                       "isplat");
2378     return V;
2379   }
2380 
2381   /// \brief Compute a vector splat for a given element value.
getVectorSplat(Value * V,unsigned NumElements)2382   Value *getVectorSplat(Value *V, unsigned NumElements) {
2383     V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2384     DEBUG(dbgs() << "       splat: " << *V << "\n");
2385     return V;
2386   }
2387 
visitMemSetInst(MemSetInst & II)2388   bool visitMemSetInst(MemSetInst &II) {
2389     DEBUG(dbgs() << "    original: " << II << "\n");
2390     assert(II.getRawDest() == OldPtr);
2391 
2392     // If the memset has a variable size, it cannot be split, just adjust the
2393     // pointer to the new alloca.
2394     if (!isa<Constant>(II.getLength())) {
2395       assert(!IsSplit);
2396       assert(NewBeginOffset == BeginOffset);
2397       II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
2398       Type *CstTy = II.getAlignmentCst()->getType();
2399       II.setAlignment(ConstantInt::get(CstTy, getSliceAlign()));
2400 
2401       deleteIfTriviallyDead(OldPtr);
2402       return false;
2403     }
2404 
2405     // Record this instruction for deletion.
2406     Pass.DeadInsts.insert(&II);
2407 
2408     Type *AllocaTy = NewAI.getAllocatedType();
2409     Type *ScalarTy = AllocaTy->getScalarType();
2410 
2411     // If this doesn't map cleanly onto the alloca type, and that type isn't
2412     // a single value type, just emit a memset.
2413     if (!VecTy && !IntTy &&
2414         (BeginOffset > NewAllocaBeginOffset ||
2415          EndOffset < NewAllocaEndOffset ||
2416          !AllocaTy->isSingleValueType() ||
2417          !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
2418          DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
2419       Type *SizeTy = II.getLength()->getType();
2420       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2421       CallInst *New = IRB.CreateMemSet(
2422           getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
2423           getSliceAlign(), II.isVolatile());
2424       (void)New;
2425       DEBUG(dbgs() << "          to: " << *New << "\n");
2426       return false;
2427     }
2428 
2429     // If we can represent this as a simple value, we have to build the actual
2430     // value to store, which requires expanding the byte present in memset to
2431     // a sensible representation for the alloca type. This is essentially
2432     // splatting the byte to a sufficiently wide integer, splatting it across
2433     // any desired vector width, and bitcasting to the final type.
2434     Value *V;
2435 
2436     if (VecTy) {
2437       // If this is a memset of a vectorized alloca, insert it.
2438       assert(ElementTy == ScalarTy);
2439 
2440       unsigned BeginIndex = getIndex(NewBeginOffset);
2441       unsigned EndIndex = getIndex(NewEndOffset);
2442       assert(EndIndex > BeginIndex && "Empty vector!");
2443       unsigned NumElements = EndIndex - BeginIndex;
2444       assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2445 
2446       Value *Splat =
2447           getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
2448       Splat = convertValue(DL, IRB, Splat, ElementTy);
2449       if (NumElements > 1)
2450         Splat = getVectorSplat(Splat, NumElements);
2451 
2452       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2453                                          "oldload");
2454       V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2455     } else if (IntTy) {
2456       // If this is a memset on an alloca where we can widen stores, insert the
2457       // set integer.
2458       assert(!II.isVolatile());
2459 
2460       uint64_t Size = NewEndOffset - NewBeginOffset;
2461       V = getIntegerSplat(II.getValue(), Size);
2462 
2463       if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2464                     EndOffset != NewAllocaBeginOffset)) {
2465         Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2466                                            "oldload");
2467         Old = convertValue(DL, IRB, Old, IntTy);
2468         uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2469         V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2470       } else {
2471         assert(V->getType() == IntTy &&
2472                "Wrong type for an alloca wide integer!");
2473       }
2474       V = convertValue(DL, IRB, V, AllocaTy);
2475     } else {
2476       // Established these invariants above.
2477       assert(NewBeginOffset == NewAllocaBeginOffset);
2478       assert(NewEndOffset == NewAllocaEndOffset);
2479 
2480       V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
2481       if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2482         V = getVectorSplat(V, AllocaVecTy->getNumElements());
2483 
2484       V = convertValue(DL, IRB, V, AllocaTy);
2485     }
2486 
2487     Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2488                                         II.isVolatile());
2489     (void)New;
2490     DEBUG(dbgs() << "          to: " << *New << "\n");
2491     return !II.isVolatile();
2492   }
2493 
visitMemTransferInst(MemTransferInst & II)2494   bool visitMemTransferInst(MemTransferInst &II) {
2495     // Rewriting of memory transfer instructions can be a bit tricky. We break
2496     // them into two categories: split intrinsics and unsplit intrinsics.
2497 
2498     DEBUG(dbgs() << "    original: " << II << "\n");
2499 
2500     bool IsDest = &II.getRawDestUse() == OldUse;
2501     assert((IsDest && II.getRawDest() == OldPtr) ||
2502            (!IsDest && II.getRawSource() == OldPtr));
2503 
2504     unsigned SliceAlign = getSliceAlign();
2505 
2506     // For unsplit intrinsics, we simply modify the source and destination
2507     // pointers in place. This isn't just an optimization, it is a matter of
2508     // correctness. With unsplit intrinsics we may be dealing with transfers
2509     // within a single alloca before SROA ran, or with transfers that have
2510     // a variable length. We may also be dealing with memmove instead of
2511     // memcpy, and so simply updating the pointers is the necessary for us to
2512     // update both source and dest of a single call.
2513     if (!IsSplittable) {
2514       Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2515       if (IsDest)
2516         II.setDest(AdjustedPtr);
2517       else
2518         II.setSource(AdjustedPtr);
2519 
2520       if (II.getAlignment() > SliceAlign) {
2521         Type *CstTy = II.getAlignmentCst()->getType();
2522         II.setAlignment(
2523             ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
2524       }
2525 
2526       DEBUG(dbgs() << "          to: " << II << "\n");
2527       deleteIfTriviallyDead(OldPtr);
2528       return false;
2529     }
2530     // For split transfer intrinsics we have an incredibly useful assurance:
2531     // the source and destination do not reside within the same alloca, and at
2532     // least one of them does not escape. This means that we can replace
2533     // memmove with memcpy, and we don't need to worry about all manner of
2534     // downsides to splitting and transforming the operations.
2535 
2536     // If this doesn't map cleanly onto the alloca type, and that type isn't
2537     // a single value type, just emit a memcpy.
2538     bool EmitMemCpy
2539       = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset ||
2540                              EndOffset < NewAllocaEndOffset ||
2541                              !NewAI.getAllocatedType()->isSingleValueType());
2542 
2543     // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2544     // size hasn't been shrunk based on analysis of the viable range, this is
2545     // a no-op.
2546     if (EmitMemCpy && &OldAI == &NewAI) {
2547       // Ensure the start lines up.
2548       assert(NewBeginOffset == BeginOffset);
2549 
2550       // Rewrite the size as needed.
2551       if (NewEndOffset != EndOffset)
2552         II.setLength(ConstantInt::get(II.getLength()->getType(),
2553                                       NewEndOffset - NewBeginOffset));
2554       return false;
2555     }
2556     // Record this instruction for deletion.
2557     Pass.DeadInsts.insert(&II);
2558 
2559     // Strip all inbounds GEPs and pointer casts to try to dig out any root
2560     // alloca that should be re-examined after rewriting this instruction.
2561     Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2562     if (AllocaInst *AI
2563           = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2564       assert(AI != &OldAI && AI != &NewAI &&
2565              "Splittable transfers cannot reach the same alloca on both ends.");
2566       Pass.Worklist.insert(AI);
2567     }
2568 
2569     Type *OtherPtrTy = OtherPtr->getType();
2570     unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
2571 
2572     // Compute the relative offset for the other pointer within the transfer.
2573     unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
2574     APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
2575     unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
2576                                    OtherOffset.zextOrTrunc(64).getZExtValue());
2577 
2578     if (EmitMemCpy) {
2579       // Compute the other pointer, folding as much as possible to produce
2580       // a single, simple GEP in most cases.
2581       OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2582                                 OtherPtr->getName() + ".");
2583 
2584       Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2585       Type *SizeTy = II.getLength()->getType();
2586       Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2587 
2588       CallInst *New = IRB.CreateMemCpy(
2589           IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
2590           MinAlign(SliceAlign, OtherAlign), II.isVolatile());
2591       (void)New;
2592       DEBUG(dbgs() << "          to: " << *New << "\n");
2593       return false;
2594     }
2595 
2596     bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2597                          NewEndOffset == NewAllocaEndOffset;
2598     uint64_t Size = NewEndOffset - NewBeginOffset;
2599     unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2600     unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
2601     unsigned NumElements = EndIndex - BeginIndex;
2602     IntegerType *SubIntTy
2603       = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr;
2604 
2605     // Reset the other pointer type to match the register type we're going to
2606     // use, but using the address space of the original other pointer.
2607     if (VecTy && !IsWholeAlloca) {
2608       if (NumElements == 1)
2609         OtherPtrTy = VecTy->getElementType();
2610       else
2611         OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
2612 
2613       OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
2614     } else if (IntTy && !IsWholeAlloca) {
2615       OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
2616     } else {
2617       OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
2618     }
2619 
2620     Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2621                                    OtherPtr->getName() + ".");
2622     unsigned SrcAlign = OtherAlign;
2623     Value *DstPtr = &NewAI;
2624     unsigned DstAlign = SliceAlign;
2625     if (!IsDest) {
2626       std::swap(SrcPtr, DstPtr);
2627       std::swap(SrcAlign, DstAlign);
2628     }
2629 
2630     Value *Src;
2631     if (VecTy && !IsWholeAlloca && !IsDest) {
2632       Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2633                                   "load");
2634       Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
2635     } else if (IntTy && !IsWholeAlloca && !IsDest) {
2636       Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2637                                   "load");
2638       Src = convertValue(DL, IRB, Src, IntTy);
2639       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2640       Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
2641     } else {
2642       Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
2643                                   "copyload");
2644     }
2645 
2646     if (VecTy && !IsWholeAlloca && IsDest) {
2647       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2648                                          "oldload");
2649       Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
2650     } else if (IntTy && !IsWholeAlloca && IsDest) {
2651       Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2652                                          "oldload");
2653       Old = convertValue(DL, IRB, Old, IntTy);
2654       uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2655       Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
2656       Src = convertValue(DL, IRB, Src, NewAllocaTy);
2657     }
2658 
2659     StoreInst *Store = cast<StoreInst>(
2660         IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
2661     (void)Store;
2662     DEBUG(dbgs() << "          to: " << *Store << "\n");
2663     return !II.isVolatile();
2664   }
2665 
visitIntrinsicInst(IntrinsicInst & II)2666   bool visitIntrinsicInst(IntrinsicInst &II) {
2667     assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2668            II.getIntrinsicID() == Intrinsic::lifetime_end);
2669     DEBUG(dbgs() << "    original: " << II << "\n");
2670     assert(II.getArgOperand(1) == OldPtr);
2671 
2672     // Record this instruction for deletion.
2673     Pass.DeadInsts.insert(&II);
2674 
2675     ConstantInt *Size
2676       = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
2677                          NewEndOffset - NewBeginOffset);
2678     Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2679     Value *New;
2680     if (II.getIntrinsicID() == Intrinsic::lifetime_start)
2681       New = IRB.CreateLifetimeStart(Ptr, Size);
2682     else
2683       New = IRB.CreateLifetimeEnd(Ptr, Size);
2684 
2685     (void)New;
2686     DEBUG(dbgs() << "          to: " << *New << "\n");
2687     return true;
2688   }
2689 
visitPHINode(PHINode & PN)2690   bool visitPHINode(PHINode &PN) {
2691     DEBUG(dbgs() << "    original: " << PN << "\n");
2692     assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
2693     assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
2694 
2695     // We would like to compute a new pointer in only one place, but have it be
2696     // as local as possible to the PHI. To do that, we re-use the location of
2697     // the old pointer, which necessarily must be in the right position to
2698     // dominate the PHI.
2699     IRBuilderTy PtrBuilder(IRB);
2700     PtrBuilder.SetInsertPoint(OldPtr);
2701     PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
2702 
2703     Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
2704     // Replace the operands which were using the old pointer.
2705     std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
2706 
2707     DEBUG(dbgs() << "          to: " << PN << "\n");
2708     deleteIfTriviallyDead(OldPtr);
2709 
2710     // PHIs can't be promoted on their own, but often can be speculated. We
2711     // check the speculation outside of the rewriter so that we see the
2712     // fully-rewritten alloca.
2713     PHIUsers.insert(&PN);
2714     return true;
2715   }
2716 
visitSelectInst(SelectInst & SI)2717   bool visitSelectInst(SelectInst &SI) {
2718     DEBUG(dbgs() << "    original: " << SI << "\n");
2719     assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
2720            "Pointer isn't an operand!");
2721     assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
2722     assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
2723 
2724     Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2725     // Replace the operands which were using the old pointer.
2726     if (SI.getOperand(1) == OldPtr)
2727       SI.setOperand(1, NewPtr);
2728     if (SI.getOperand(2) == OldPtr)
2729       SI.setOperand(2, NewPtr);
2730 
2731     DEBUG(dbgs() << "          to: " << SI << "\n");
2732     deleteIfTriviallyDead(OldPtr);
2733 
2734     // Selects can't be promoted on their own, but often can be speculated. We
2735     // check the speculation outside of the rewriter so that we see the
2736     // fully-rewritten alloca.
2737     SelectUsers.insert(&SI);
2738     return true;
2739   }
2740 
2741 };
2742 }
2743 
2744 namespace {
2745 /// \brief Visitor to rewrite aggregate loads and stores as scalar.
2746 ///
2747 /// This pass aggressively rewrites all aggregate loads and stores on
2748 /// a particular pointer (or any pointer derived from it which we can identify)
2749 /// with scalar loads and stores.
2750 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
2751   // Befriend the base class so it can delegate to private visit methods.
2752   friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
2753 
2754   const DataLayout &DL;
2755 
2756   /// Queue of pointer uses to analyze and potentially rewrite.
2757   SmallVector<Use *, 8> Queue;
2758 
2759   /// Set to prevent us from cycling with phi nodes and loops.
2760   SmallPtrSet<User *, 8> Visited;
2761 
2762   /// The current pointer use being rewritten. This is used to dig up the used
2763   /// value (as opposed to the user).
2764   Use *U;
2765 
2766 public:
AggLoadStoreRewriter(const DataLayout & DL)2767   AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
2768 
2769   /// Rewrite loads and stores through a pointer and all pointers derived from
2770   /// it.
rewrite(Instruction & I)2771   bool rewrite(Instruction &I) {
2772     DEBUG(dbgs() << "  Rewriting FCA loads and stores...\n");
2773     enqueueUsers(I);
2774     bool Changed = false;
2775     while (!Queue.empty()) {
2776       U = Queue.pop_back_val();
2777       Changed |= visit(cast<Instruction>(U->getUser()));
2778     }
2779     return Changed;
2780   }
2781 
2782 private:
2783   /// Enqueue all the users of the given instruction for further processing.
2784   /// This uses a set to de-duplicate users.
enqueueUsers(Instruction & I)2785   void enqueueUsers(Instruction &I) {
2786     for (Use &U : I.uses())
2787       if (Visited.insert(U.getUser()))
2788         Queue.push_back(&U);
2789   }
2790 
2791   // Conservative default is to not rewrite anything.
visitInstruction(Instruction & I)2792   bool visitInstruction(Instruction &I) { return false; }
2793 
2794   /// \brief Generic recursive split emission class.
2795   template <typename Derived>
2796   class OpSplitter {
2797   protected:
2798     /// The builder used to form new instructions.
2799     IRBuilderTy IRB;
2800     /// The indices which to be used with insert- or extractvalue to select the
2801     /// appropriate value within the aggregate.
2802     SmallVector<unsigned, 4> Indices;
2803     /// The indices to a GEP instruction which will move Ptr to the correct slot
2804     /// within the aggregate.
2805     SmallVector<Value *, 4> GEPIndices;
2806     /// The base pointer of the original op, used as a base for GEPing the
2807     /// split operations.
2808     Value *Ptr;
2809 
2810     /// Initialize the splitter with an insertion point, Ptr and start with a
2811     /// single zero GEP index.
OpSplitter(Instruction * InsertionPoint,Value * Ptr)2812     OpSplitter(Instruction *InsertionPoint, Value *Ptr)
2813       : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
2814 
2815   public:
2816     /// \brief Generic recursive split emission routine.
2817     ///
2818     /// This method recursively splits an aggregate op (load or store) into
2819     /// scalar or vector ops. It splits recursively until it hits a single value
2820     /// and emits that single value operation via the template argument.
2821     ///
2822     /// The logic of this routine relies on GEPs and insertvalue and
2823     /// extractvalue all operating with the same fundamental index list, merely
2824     /// formatted differently (GEPs need actual values).
2825     ///
2826     /// \param Ty  The type being split recursively into smaller ops.
2827     /// \param Agg The aggregate value being built up or stored, depending on
2828     /// whether this is splitting a load or a store respectively.
emitSplitOps(Type * Ty,Value * & Agg,const Twine & Name)2829     void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
2830       if (Ty->isSingleValueType())
2831         return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
2832 
2833       if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2834         unsigned OldSize = Indices.size();
2835         (void)OldSize;
2836         for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
2837              ++Idx) {
2838           assert(Indices.size() == OldSize && "Did not return to the old size");
2839           Indices.push_back(Idx);
2840           GEPIndices.push_back(IRB.getInt32(Idx));
2841           emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
2842           GEPIndices.pop_back();
2843           Indices.pop_back();
2844         }
2845         return;
2846       }
2847 
2848       if (StructType *STy = dyn_cast<StructType>(Ty)) {
2849         unsigned OldSize = Indices.size();
2850         (void)OldSize;
2851         for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
2852              ++Idx) {
2853           assert(Indices.size() == OldSize && "Did not return to the old size");
2854           Indices.push_back(Idx);
2855           GEPIndices.push_back(IRB.getInt32(Idx));
2856           emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
2857           GEPIndices.pop_back();
2858           Indices.pop_back();
2859         }
2860         return;
2861       }
2862 
2863       llvm_unreachable("Only arrays and structs are aggregate loadable types");
2864     }
2865   };
2866 
2867   struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
LoadOpSplitter__anon20cc1aae0711::AggLoadStoreRewriter::LoadOpSplitter2868     LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
2869       : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
2870 
2871     /// Emit a leaf load of a single value. This is called at the leaves of the
2872     /// recursive emission to actually load values.
emitFunc__anon20cc1aae0711::AggLoadStoreRewriter::LoadOpSplitter2873     void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
2874       assert(Ty->isSingleValueType());
2875       // Load the single value and insert it using the indices.
2876       Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
2877       Value *Load = IRB.CreateLoad(GEP, Name + ".load");
2878       Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
2879       DEBUG(dbgs() << "          to: " << *Load << "\n");
2880     }
2881   };
2882 
visitLoadInst(LoadInst & LI)2883   bool visitLoadInst(LoadInst &LI) {
2884     assert(LI.getPointerOperand() == *U);
2885     if (!LI.isSimple() || LI.getType()->isSingleValueType())
2886       return false;
2887 
2888     // We have an aggregate being loaded, split it apart.
2889     DEBUG(dbgs() << "    original: " << LI << "\n");
2890     LoadOpSplitter Splitter(&LI, *U);
2891     Value *V = UndefValue::get(LI.getType());
2892     Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
2893     LI.replaceAllUsesWith(V);
2894     LI.eraseFromParent();
2895     return true;
2896   }
2897 
2898   struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
StoreOpSplitter__anon20cc1aae0711::AggLoadStoreRewriter::StoreOpSplitter2899     StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
2900       : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
2901 
2902     /// Emit a leaf store of a single value. This is called at the leaves of the
2903     /// recursive emission to actually produce stores.
emitFunc__anon20cc1aae0711::AggLoadStoreRewriter::StoreOpSplitter2904     void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
2905       assert(Ty->isSingleValueType());
2906       // Extract the single value and store it using the indices.
2907       Value *Store = IRB.CreateStore(
2908         IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
2909         IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
2910       (void)Store;
2911       DEBUG(dbgs() << "          to: " << *Store << "\n");
2912     }
2913   };
2914 
visitStoreInst(StoreInst & SI)2915   bool visitStoreInst(StoreInst &SI) {
2916     if (!SI.isSimple() || SI.getPointerOperand() != *U)
2917       return false;
2918     Value *V = SI.getValueOperand();
2919     if (V->getType()->isSingleValueType())
2920       return false;
2921 
2922     // We have an aggregate being stored, split it apart.
2923     DEBUG(dbgs() << "    original: " << SI << "\n");
2924     StoreOpSplitter Splitter(&SI, *U);
2925     Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
2926     SI.eraseFromParent();
2927     return true;
2928   }
2929 
visitBitCastInst(BitCastInst & BC)2930   bool visitBitCastInst(BitCastInst &BC) {
2931     enqueueUsers(BC);
2932     return false;
2933   }
2934 
visitGetElementPtrInst(GetElementPtrInst & GEPI)2935   bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2936     enqueueUsers(GEPI);
2937     return false;
2938   }
2939 
visitPHINode(PHINode & PN)2940   bool visitPHINode(PHINode &PN) {
2941     enqueueUsers(PN);
2942     return false;
2943   }
2944 
visitSelectInst(SelectInst & SI)2945   bool visitSelectInst(SelectInst &SI) {
2946     enqueueUsers(SI);
2947     return false;
2948   }
2949 };
2950 }
2951 
2952 /// \brief Strip aggregate type wrapping.
2953 ///
2954 /// This removes no-op aggregate types wrapping an underlying type. It will
2955 /// strip as many layers of types as it can without changing either the type
2956 /// size or the allocated size.
stripAggregateTypeWrapping(const DataLayout & DL,Type * Ty)2957 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
2958   if (Ty->isSingleValueType())
2959     return Ty;
2960 
2961   uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2962   uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
2963 
2964   Type *InnerTy;
2965   if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
2966     InnerTy = ArrTy->getElementType();
2967   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2968     const StructLayout *SL = DL.getStructLayout(STy);
2969     unsigned Index = SL->getElementContainingOffset(0);
2970     InnerTy = STy->getElementType(Index);
2971   } else {
2972     return Ty;
2973   }
2974 
2975   if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
2976       TypeSize > DL.getTypeSizeInBits(InnerTy))
2977     return Ty;
2978 
2979   return stripAggregateTypeWrapping(DL, InnerTy);
2980 }
2981 
2982 /// \brief Try to find a partition of the aggregate type passed in for a given
2983 /// offset and size.
2984 ///
2985 /// This recurses through the aggregate type and tries to compute a subtype
2986 /// based on the offset and size. When the offset and size span a sub-section
2987 /// of an array, it will even compute a new array type for that sub-section,
2988 /// and the same for structs.
2989 ///
2990 /// Note that this routine is very strict and tries to find a partition of the
2991 /// type which produces the *exact* right offset and size. It is not forgiving
2992 /// when the size or offset cause either end of type-based partition to be off.
2993 /// Also, this is a best-effort routine. It is reasonable to give up and not
2994 /// return a type if necessary.
getTypePartition(const DataLayout & DL,Type * Ty,uint64_t Offset,uint64_t Size)2995 static Type *getTypePartition(const DataLayout &DL, Type *Ty,
2996                               uint64_t Offset, uint64_t Size) {
2997   if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
2998     return stripAggregateTypeWrapping(DL, Ty);
2999   if (Offset > DL.getTypeAllocSize(Ty) ||
3000       (DL.getTypeAllocSize(Ty) - Offset) < Size)
3001     return nullptr;
3002 
3003   if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
3004     // We can't partition pointers...
3005     if (SeqTy->isPointerTy())
3006       return nullptr;
3007 
3008     Type *ElementTy = SeqTy->getElementType();
3009     uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3010     uint64_t NumSkippedElements = Offset / ElementSize;
3011     if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
3012       if (NumSkippedElements >= ArrTy->getNumElements())
3013         return nullptr;
3014     } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
3015       if (NumSkippedElements >= VecTy->getNumElements())
3016         return nullptr;
3017     }
3018     Offset -= NumSkippedElements * ElementSize;
3019 
3020     // First check if we need to recurse.
3021     if (Offset > 0 || Size < ElementSize) {
3022       // Bail if the partition ends in a different array element.
3023       if ((Offset + Size) > ElementSize)
3024         return nullptr;
3025       // Recurse through the element type trying to peel off offset bytes.
3026       return getTypePartition(DL, ElementTy, Offset, Size);
3027     }
3028     assert(Offset == 0);
3029 
3030     if (Size == ElementSize)
3031       return stripAggregateTypeWrapping(DL, ElementTy);
3032     assert(Size > ElementSize);
3033     uint64_t NumElements = Size / ElementSize;
3034     if (NumElements * ElementSize != Size)
3035       return nullptr;
3036     return ArrayType::get(ElementTy, NumElements);
3037   }
3038 
3039   StructType *STy = dyn_cast<StructType>(Ty);
3040   if (!STy)
3041     return nullptr;
3042 
3043   const StructLayout *SL = DL.getStructLayout(STy);
3044   if (Offset >= SL->getSizeInBytes())
3045     return nullptr;
3046   uint64_t EndOffset = Offset + Size;
3047   if (EndOffset > SL->getSizeInBytes())
3048     return nullptr;
3049 
3050   unsigned Index = SL->getElementContainingOffset(Offset);
3051   Offset -= SL->getElementOffset(Index);
3052 
3053   Type *ElementTy = STy->getElementType(Index);
3054   uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3055   if (Offset >= ElementSize)
3056     return nullptr; // The offset points into alignment padding.
3057 
3058   // See if any partition must be contained by the element.
3059   if (Offset > 0 || Size < ElementSize) {
3060     if ((Offset + Size) > ElementSize)
3061       return nullptr;
3062     return getTypePartition(DL, ElementTy, Offset, Size);
3063   }
3064   assert(Offset == 0);
3065 
3066   if (Size == ElementSize)
3067     return stripAggregateTypeWrapping(DL, ElementTy);
3068 
3069   StructType::element_iterator EI = STy->element_begin() + Index,
3070                                EE = STy->element_end();
3071   if (EndOffset < SL->getSizeInBytes()) {
3072     unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3073     if (Index == EndIndex)
3074       return nullptr; // Within a single element and its padding.
3075 
3076     // Don't try to form "natural" types if the elements don't line up with the
3077     // expected size.
3078     // FIXME: We could potentially recurse down through the last element in the
3079     // sub-struct to find a natural end point.
3080     if (SL->getElementOffset(EndIndex) != EndOffset)
3081       return nullptr;
3082 
3083     assert(Index < EndIndex);
3084     EE = STy->element_begin() + EndIndex;
3085   }
3086 
3087   // Try to build up a sub-structure.
3088   StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
3089                                       STy->isPacked());
3090   const StructLayout *SubSL = DL.getStructLayout(SubTy);
3091   if (Size != SubSL->getSizeInBytes())
3092     return nullptr; // The sub-struct doesn't have quite the size needed.
3093 
3094   return SubTy;
3095 }
3096 
3097 /// \brief Rewrite an alloca partition's users.
3098 ///
3099 /// This routine drives both of the rewriting goals of the SROA pass. It tries
3100 /// to rewrite uses of an alloca partition to be conducive for SSA value
3101 /// promotion. If the partition needs a new, more refined alloca, this will
3102 /// build that new alloca, preserving as much type information as possible, and
3103 /// rewrite the uses of the old alloca to point at the new one and have the
3104 /// appropriate new offsets. It also evaluates how successful the rewrite was
3105 /// at enabling promotion and if it was successful queues the alloca to be
3106 /// promoted.
rewritePartition(AllocaInst & AI,AllocaSlices & S,AllocaSlices::iterator B,AllocaSlices::iterator E,int64_t BeginOffset,int64_t EndOffset,ArrayRef<AllocaSlices::iterator> SplitUses)3107 bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
3108                             AllocaSlices::iterator B, AllocaSlices::iterator E,
3109                             int64_t BeginOffset, int64_t EndOffset,
3110                             ArrayRef<AllocaSlices::iterator> SplitUses) {
3111   assert(BeginOffset < EndOffset);
3112   uint64_t SliceSize = EndOffset - BeginOffset;
3113 
3114   // Try to compute a friendly type for this partition of the alloca. This
3115   // won't always succeed, in which case we fall back to a legal integer type
3116   // or an i8 array of an appropriate size.
3117   Type *SliceTy = nullptr;
3118   if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
3119     if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
3120       SliceTy = CommonUseTy;
3121   if (!SliceTy)
3122     if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
3123                                                  BeginOffset, SliceSize))
3124       SliceTy = TypePartitionTy;
3125   if ((!SliceTy || (SliceTy->isArrayTy() &&
3126                     SliceTy->getArrayElementType()->isIntegerTy())) &&
3127       DL->isLegalInteger(SliceSize * 8))
3128     SliceTy = Type::getIntNTy(*C, SliceSize * 8);
3129   if (!SliceTy)
3130     SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
3131   assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
3132 
3133   bool IsVectorPromotable = isVectorPromotionViable(
3134       *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses);
3135 
3136   bool IsIntegerPromotable =
3137       !IsVectorPromotable &&
3138       isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses);
3139 
3140   // Check for the case where we're going to rewrite to a new alloca of the
3141   // exact same type as the original, and with the same access offsets. In that
3142   // case, re-use the existing alloca, but still run through the rewriter to
3143   // perform phi and select speculation.
3144   AllocaInst *NewAI;
3145   if (SliceTy == AI.getAllocatedType()) {
3146     assert(BeginOffset == 0 &&
3147            "Non-zero begin offset but same alloca type");
3148     NewAI = &AI;
3149     // FIXME: We should be able to bail at this point with "nothing changed".
3150     // FIXME: We might want to defer PHI speculation until after here.
3151   } else {
3152     unsigned Alignment = AI.getAlignment();
3153     if (!Alignment) {
3154       // The minimum alignment which users can rely on when the explicit
3155       // alignment is omitted or zero is that required by the ABI for this
3156       // type.
3157       Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
3158     }
3159     Alignment = MinAlign(Alignment, BeginOffset);
3160     // If we will get at least this much alignment from the type alone, leave
3161     // the alloca's alignment unconstrained.
3162     if (Alignment <= DL->getABITypeAlignment(SliceTy))
3163       Alignment = 0;
3164     NewAI = new AllocaInst(SliceTy, nullptr, Alignment,
3165                            AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
3166     ++NumNewAllocas;
3167   }
3168 
3169   DEBUG(dbgs() << "Rewriting alloca partition "
3170                << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
3171                << "\n");
3172 
3173   // Track the high watermark on the worklist as it is only relevant for
3174   // promoted allocas. We will reset it to this point if the alloca is not in
3175   // fact scheduled for promotion.
3176   unsigned PPWOldSize = PostPromotionWorklist.size();
3177   unsigned NumUses = 0;
3178   SmallPtrSet<PHINode *, 8> PHIUsers;
3179   SmallPtrSet<SelectInst *, 8> SelectUsers;
3180 
3181   AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
3182                                EndOffset, IsVectorPromotable,
3183                                IsIntegerPromotable, PHIUsers, SelectUsers);
3184   bool Promotable = true;
3185   for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
3186                                                         SUE = SplitUses.end();
3187        SUI != SUE; ++SUI) {
3188     DEBUG(dbgs() << "  rewriting split ");
3189     DEBUG(S.printSlice(dbgs(), *SUI, ""));
3190     Promotable &= Rewriter.visit(*SUI);
3191     ++NumUses;
3192   }
3193   for (AllocaSlices::iterator I = B; I != E; ++I) {
3194     DEBUG(dbgs() << "  rewriting ");
3195     DEBUG(S.printSlice(dbgs(), I, ""));
3196     Promotable &= Rewriter.visit(I);
3197     ++NumUses;
3198   }
3199 
3200   NumAllocaPartitionUses += NumUses;
3201   MaxUsesPerAllocaPartition =
3202       std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
3203 
3204   // Now that we've processed all the slices in the new partition, check if any
3205   // PHIs or Selects would block promotion.
3206   for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
3207                                             E = PHIUsers.end();
3208        I != E; ++I)
3209     if (!isSafePHIToSpeculate(**I, DL)) {
3210       Promotable = false;
3211       PHIUsers.clear();
3212       SelectUsers.clear();
3213       break;
3214     }
3215   for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
3216                                                E = SelectUsers.end();
3217        I != E; ++I)
3218     if (!isSafeSelectToSpeculate(**I, DL)) {
3219       Promotable = false;
3220       PHIUsers.clear();
3221       SelectUsers.clear();
3222       break;
3223     }
3224 
3225   if (Promotable) {
3226     if (PHIUsers.empty() && SelectUsers.empty()) {
3227       // Promote the alloca.
3228       PromotableAllocas.push_back(NewAI);
3229     } else {
3230       // If we have either PHIs or Selects to speculate, add them to those
3231       // worklists and re-queue the new alloca so that we promote in on the
3232       // next iteration.
3233       for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
3234                                                 E = PHIUsers.end();
3235            I != E; ++I)
3236         SpeculatablePHIs.insert(*I);
3237       for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
3238                                                    E = SelectUsers.end();
3239            I != E; ++I)
3240         SpeculatableSelects.insert(*I);
3241       Worklist.insert(NewAI);
3242     }
3243   } else {
3244     // If we can't promote the alloca, iterate on it to check for new
3245     // refinements exposed by splitting the current alloca. Don't iterate on an
3246     // alloca which didn't actually change and didn't get promoted.
3247     if (NewAI != &AI)
3248       Worklist.insert(NewAI);
3249 
3250     // Drop any post-promotion work items if promotion didn't happen.
3251     while (PostPromotionWorklist.size() > PPWOldSize)
3252       PostPromotionWorklist.pop_back();
3253   }
3254 
3255   return true;
3256 }
3257 
3258 static void
removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> & SplitUses,uint64_t & MaxSplitUseEndOffset,uint64_t Offset)3259 removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
3260                         uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
3261   if (Offset >= MaxSplitUseEndOffset) {
3262     SplitUses.clear();
3263     MaxSplitUseEndOffset = 0;
3264     return;
3265   }
3266 
3267   size_t SplitUsesOldSize = SplitUses.size();
3268   SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
3269                                  [Offset](const AllocaSlices::iterator &I) {
3270                     return I->endOffset() <= Offset;
3271                   }),
3272                   SplitUses.end());
3273   if (SplitUsesOldSize == SplitUses.size())
3274     return;
3275 
3276   // Recompute the max. While this is linear, so is remove_if.
3277   MaxSplitUseEndOffset = 0;
3278   for (SmallVectorImpl<AllocaSlices::iterator>::iterator
3279            SUI = SplitUses.begin(),
3280            SUE = SplitUses.end();
3281        SUI != SUE; ++SUI)
3282     MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
3283 }
3284 
3285 /// \brief Walks the slices of an alloca and form partitions based on them,
3286 /// rewriting each of their uses.
splitAlloca(AllocaInst & AI,AllocaSlices & S)3287 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
3288   if (S.begin() == S.end())
3289     return false;
3290 
3291   unsigned NumPartitions = 0;
3292   bool Changed = false;
3293   SmallVector<AllocaSlices::iterator, 4> SplitUses;
3294   uint64_t MaxSplitUseEndOffset = 0;
3295 
3296   uint64_t BeginOffset = S.begin()->beginOffset();
3297 
3298   for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end();
3299        SI != SE; SI = SJ) {
3300     uint64_t MaxEndOffset = SI->endOffset();
3301 
3302     if (!SI->isSplittable()) {
3303       // When we're forming an unsplittable region, it must always start at the
3304       // first slice and will extend through its end.
3305       assert(BeginOffset == SI->beginOffset());
3306 
3307       // Form a partition including all of the overlapping slices with this
3308       // unsplittable slice.
3309       while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3310         if (!SJ->isSplittable())
3311           MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3312         ++SJ;
3313       }
3314     } else {
3315       assert(SI->isSplittable()); // Established above.
3316 
3317       // Collect all of the overlapping splittable slices.
3318       while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
3319              SJ->isSplittable()) {
3320         MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
3321         ++SJ;
3322       }
3323 
3324       // Back up MaxEndOffset and SJ if we ended the span early when
3325       // encountering an unsplittable slice.
3326       if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
3327         assert(!SJ->isSplittable());
3328         MaxEndOffset = SJ->beginOffset();
3329       }
3330     }
3331 
3332     // Check if we have managed to move the end offset forward yet. If so,
3333     // we'll have to rewrite uses and erase old split uses.
3334     if (BeginOffset < MaxEndOffset) {
3335       // Rewrite a sequence of overlapping slices.
3336       Changed |=
3337           rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses);
3338       ++NumPartitions;
3339 
3340       removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
3341     }
3342 
3343     // Accumulate all the splittable slices from the [SI,SJ) region which
3344     // overlap going forward.
3345     for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
3346       if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
3347         SplitUses.push_back(SK);
3348         MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
3349       }
3350 
3351     // If we're already at the end and we have no split uses, we're done.
3352     if (SJ == SE && SplitUses.empty())
3353       break;
3354 
3355     // If we have no split uses or no gap in offsets, we're ready to move to
3356     // the next slice.
3357     if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
3358       BeginOffset = SJ->beginOffset();
3359       continue;
3360     }
3361 
3362     // Even if we have split slices, if the next slice is splittable and the
3363     // split slices reach it, we can simply set up the beginning offset of the
3364     // next iteration to bridge between them.
3365     if (SJ != SE && SJ->isSplittable() &&
3366         MaxSplitUseEndOffset > SJ->beginOffset()) {
3367       BeginOffset = MaxEndOffset;
3368       continue;
3369     }
3370 
3371     // Otherwise, we have a tail of split slices. Rewrite them with an empty
3372     // range of slices.
3373     uint64_t PostSplitEndOffset =
3374         SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
3375 
3376     Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset,
3377                                 SplitUses);
3378     ++NumPartitions;
3379 
3380     if (SJ == SE)
3381       break; // Skip the rest, we don't need to do any cleanup.
3382 
3383     removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
3384                             PostSplitEndOffset);
3385 
3386     // Now just reset the begin offset for the next iteration.
3387     BeginOffset = SJ->beginOffset();
3388   }
3389 
3390   NumAllocaPartitions += NumPartitions;
3391   MaxPartitionsPerAlloca =
3392       std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
3393 
3394   return Changed;
3395 }
3396 
3397 /// \brief Clobber a use with undef, deleting the used value if it becomes dead.
clobberUse(Use & U)3398 void SROA::clobberUse(Use &U) {
3399   Value *OldV = U;
3400   // Replace the use with an undef value.
3401   U = UndefValue::get(OldV->getType());
3402 
3403   // Check for this making an instruction dead. We have to garbage collect
3404   // all the dead instructions to ensure the uses of any alloca end up being
3405   // minimal.
3406   if (Instruction *OldI = dyn_cast<Instruction>(OldV))
3407     if (isInstructionTriviallyDead(OldI)) {
3408       DeadInsts.insert(OldI);
3409     }
3410 }
3411 
3412 /// \brief Analyze an alloca for SROA.
3413 ///
3414 /// This analyzes the alloca to ensure we can reason about it, builds
3415 /// the slices of the alloca, and then hands it off to be split and
3416 /// rewritten as needed.
runOnAlloca(AllocaInst & AI)3417 bool SROA::runOnAlloca(AllocaInst &AI) {
3418   DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
3419   ++NumAllocasAnalyzed;
3420 
3421   // Special case dead allocas, as they're trivial.
3422   if (AI.use_empty()) {
3423     AI.eraseFromParent();
3424     return true;
3425   }
3426 
3427   // Skip alloca forms that this analysis can't handle.
3428   if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
3429       DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
3430     return false;
3431 
3432   bool Changed = false;
3433 
3434   // First, split any FCA loads and stores touching this alloca to promote
3435   // better splitting and promotion opportunities.
3436   AggLoadStoreRewriter AggRewriter(*DL);
3437   Changed |= AggRewriter.rewrite(AI);
3438 
3439   // Build the slices using a recursive instruction-visiting builder.
3440   AllocaSlices S(*DL, AI);
3441   DEBUG(S.print(dbgs()));
3442   if (S.isEscaped())
3443     return Changed;
3444 
3445   // Delete all the dead users of this alloca before splitting and rewriting it.
3446   for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
3447                                         DE = S.dead_user_end();
3448        DI != DE; ++DI) {
3449     // Free up everything used by this instruction.
3450     for (Use &DeadOp : (*DI)->operands())
3451       clobberUse(DeadOp);
3452 
3453     // Now replace the uses of this instruction.
3454     (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
3455 
3456     // And mark it for deletion.
3457     DeadInsts.insert(*DI);
3458     Changed = true;
3459   }
3460   for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
3461                                       DE = S.dead_op_end();
3462        DO != DE; ++DO) {
3463     clobberUse(**DO);
3464     Changed = true;
3465   }
3466 
3467   // No slices to split. Leave the dead alloca for a later pass to clean up.
3468   if (S.begin() == S.end())
3469     return Changed;
3470 
3471   Changed |= splitAlloca(AI, S);
3472 
3473   DEBUG(dbgs() << "  Speculating PHIs\n");
3474   while (!SpeculatablePHIs.empty())
3475     speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
3476 
3477   DEBUG(dbgs() << "  Speculating Selects\n");
3478   while (!SpeculatableSelects.empty())
3479     speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
3480 
3481   return Changed;
3482 }
3483 
3484 /// \brief Delete the dead instructions accumulated in this run.
3485 ///
3486 /// Recursively deletes the dead instructions we've accumulated. This is done
3487 /// at the very end to maximize locality of the recursive delete and to
3488 /// minimize the problems of invalidated instruction pointers as such pointers
3489 /// are used heavily in the intermediate stages of the algorithm.
3490 ///
3491 /// We also record the alloca instructions deleted here so that they aren't
3492 /// subsequently handed to mem2reg to promote.
deleteDeadInstructions(SmallPtrSet<AllocaInst *,4> & DeletedAllocas)3493 void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
3494   while (!DeadInsts.empty()) {
3495     Instruction *I = DeadInsts.pop_back_val();
3496     DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
3497 
3498     I->replaceAllUsesWith(UndefValue::get(I->getType()));
3499 
3500     for (Use &Operand : I->operands())
3501       if (Instruction *U = dyn_cast<Instruction>(Operand)) {
3502         // Zero out the operand and see if it becomes trivially dead.
3503         Operand = nullptr;
3504         if (isInstructionTriviallyDead(U))
3505           DeadInsts.insert(U);
3506       }
3507 
3508     if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3509       DeletedAllocas.insert(AI);
3510 
3511     ++NumDeleted;
3512     I->eraseFromParent();
3513   }
3514 }
3515 
enqueueUsersInWorklist(Instruction & I,SmallVectorImpl<Instruction * > & Worklist,SmallPtrSet<Instruction *,8> & Visited)3516 static void enqueueUsersInWorklist(Instruction &I,
3517                                    SmallVectorImpl<Instruction *> &Worklist,
3518                                    SmallPtrSet<Instruction *, 8> &Visited) {
3519   for (User *U : I.users())
3520     if (Visited.insert(cast<Instruction>(U)))
3521       Worklist.push_back(cast<Instruction>(U));
3522 }
3523 
3524 /// \brief Promote the allocas, using the best available technique.
3525 ///
3526 /// This attempts to promote whatever allocas have been identified as viable in
3527 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
3528 /// If there is a domtree available, we attempt to promote using the full power
3529 /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
3530 /// based on the SSAUpdater utilities. This function returns whether any
3531 /// promotion occurred.
promoteAllocas(Function & F)3532 bool SROA::promoteAllocas(Function &F) {
3533   if (PromotableAllocas.empty())
3534     return false;
3535 
3536   NumPromoted += PromotableAllocas.size();
3537 
3538   if (DT && !ForceSSAUpdater) {
3539     DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
3540     PromoteMemToReg(PromotableAllocas, *DT);
3541     PromotableAllocas.clear();
3542     return true;
3543   }
3544 
3545   DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
3546   SSAUpdater SSA;
3547   DIBuilder DIB(*F.getParent());
3548   SmallVector<Instruction *, 64> Insts;
3549 
3550   // We need a worklist to walk the uses of each alloca.
3551   SmallVector<Instruction *, 8> Worklist;
3552   SmallPtrSet<Instruction *, 8> Visited;
3553   SmallVector<Instruction *, 32> DeadInsts;
3554 
3555   for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
3556     AllocaInst *AI = PromotableAllocas[Idx];
3557     Insts.clear();
3558     Worklist.clear();
3559     Visited.clear();
3560 
3561     enqueueUsersInWorklist(*AI, Worklist, Visited);
3562 
3563     while (!Worklist.empty()) {
3564       Instruction *I = Worklist.pop_back_val();
3565 
3566       // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
3567       // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
3568       // leading to them) here. Eventually it should use them to optimize the
3569       // scalar values produced.
3570       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3571         assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
3572                II->getIntrinsicID() == Intrinsic::lifetime_end);
3573         II->eraseFromParent();
3574         continue;
3575       }
3576 
3577       // Push the loads and stores we find onto the list. SROA will already
3578       // have validated that all loads and stores are viable candidates for
3579       // promotion.
3580       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
3581         assert(LI->getType() == AI->getAllocatedType());
3582         Insts.push_back(LI);
3583         continue;
3584       }
3585       if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
3586         assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
3587         Insts.push_back(SI);
3588         continue;
3589       }
3590 
3591       // For everything else, we know that only no-op bitcasts and GEPs will
3592       // make it this far, just recurse through them and recall them for later
3593       // removal.
3594       DeadInsts.push_back(I);
3595       enqueueUsersInWorklist(*I, Worklist, Visited);
3596     }
3597     AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
3598     while (!DeadInsts.empty())
3599       DeadInsts.pop_back_val()->eraseFromParent();
3600     AI->eraseFromParent();
3601   }
3602 
3603   PromotableAllocas.clear();
3604   return true;
3605 }
3606 
runOnFunction(Function & F)3607 bool SROA::runOnFunction(Function &F) {
3608   if (skipOptnoneFunction(F))
3609     return false;
3610 
3611   DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
3612   C = &F.getContext();
3613   DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
3614   if (!DLP) {
3615     DEBUG(dbgs() << "  Skipping SROA -- no target data!\n");
3616     return false;
3617   }
3618   DL = &DLP->getDataLayout();
3619   DominatorTreeWrapperPass *DTWP =
3620       getAnalysisIfAvailable<DominatorTreeWrapperPass>();
3621   DT = DTWP ? &DTWP->getDomTree() : nullptr;
3622 
3623   BasicBlock &EntryBB = F.getEntryBlock();
3624   for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
3625        I != E; ++I)
3626     if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3627       Worklist.insert(AI);
3628 
3629   bool Changed = false;
3630   // A set of deleted alloca instruction pointers which should be removed from
3631   // the list of promotable allocas.
3632   SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
3633 
3634   do {
3635     while (!Worklist.empty()) {
3636       Changed |= runOnAlloca(*Worklist.pop_back_val());
3637       deleteDeadInstructions(DeletedAllocas);
3638 
3639       // Remove the deleted allocas from various lists so that we don't try to
3640       // continue processing them.
3641       if (!DeletedAllocas.empty()) {
3642         auto IsInSet = [&](AllocaInst *AI) {
3643           return DeletedAllocas.count(AI);
3644         };
3645         Worklist.remove_if(IsInSet);
3646         PostPromotionWorklist.remove_if(IsInSet);
3647         PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
3648                                                PromotableAllocas.end(),
3649                                                IsInSet),
3650                                 PromotableAllocas.end());
3651         DeletedAllocas.clear();
3652       }
3653     }
3654 
3655     Changed |= promoteAllocas(F);
3656 
3657     Worklist = PostPromotionWorklist;
3658     PostPromotionWorklist.clear();
3659   } while (!Worklist.empty());
3660 
3661   return Changed;
3662 }
3663 
getAnalysisUsage(AnalysisUsage & AU) const3664 void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
3665   if (RequiresDomTree)
3666     AU.addRequired<DominatorTreeWrapperPass>();
3667   AU.setPreservesCFG();
3668 }
3669