1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/PostOrderIterator.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CodeMetrics.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/LoopAccessAnalysis.h"
26 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/NoFolder.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/IR/Verifier.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Vectorize.h"
44 #include <algorithm>
45 #include <memory>
46
47 using namespace llvm;
48 using namespace slpvectorizer;
49
50 #define SV_NAME "slp-vectorizer"
51 #define DEBUG_TYPE "SLP"
52
53 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
54
55 static cl::opt<int>
56 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
57 cl::desc("Only vectorize if you gain more than this "
58 "number "));
59
60 static cl::opt<bool>
61 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
62 cl::desc("Attempt to vectorize horizontal reductions"));
63
64 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
65 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
66 cl::desc(
67 "Attempt to vectorize horizontal reductions feeding into a store"));
68
69 static cl::opt<int>
70 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
71 cl::desc("Attempt to vectorize for this register size in bits"));
72
73 /// Limits the size of scheduling regions in a block.
74 /// It avoid long compile times for _very_ large blocks where vector
75 /// instructions are spread over a wide range.
76 /// This limit is way higher than needed by real-world functions.
77 static cl::opt<int>
78 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
79 cl::desc("Limit the size of the SLP scheduling region per block"));
80
81 static cl::opt<int> MinVectorRegSizeOption(
82 "slp-min-reg-size", cl::init(128), cl::Hidden,
83 cl::desc("Attempt to vectorize for this register size in bits"));
84
85 // FIXME: Set this via cl::opt to allow overriding.
86 static const unsigned RecursionMaxDepth = 12;
87
88 // Limit the number of alias checks. The limit is chosen so that
89 // it has no negative effect on the llvm benchmarks.
90 static const unsigned AliasedCheckLimit = 10;
91
92 // Another limit for the alias checks: The maximum distance between load/store
93 // instructions where alias checks are done.
94 // This limit is useful for very large basic blocks.
95 static const unsigned MaxMemDepDistance = 160;
96
97 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
98 /// regions to be handled.
99 static const int MinScheduleRegionSize = 16;
100
101 /// \brief Predicate for the element types that the SLP vectorizer supports.
102 ///
103 /// The most important thing to filter here are types which are invalid in LLVM
104 /// vectors. We also filter target specific types which have absolutely no
105 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
106 /// avoids spending time checking the cost model and realizing that they will
107 /// be inevitably scalarized.
isValidElementType(Type * Ty)108 static bool isValidElementType(Type *Ty) {
109 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
110 !Ty->isPPC_FP128Ty();
111 }
112
113 /// \returns the parent basic block if all of the instructions in \p VL
114 /// are in the same block or null otherwise.
getSameBlock(ArrayRef<Value * > VL)115 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
116 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
117 if (!I0)
118 return nullptr;
119 BasicBlock *BB = I0->getParent();
120 for (int i = 1, e = VL.size(); i < e; i++) {
121 Instruction *I = dyn_cast<Instruction>(VL[i]);
122 if (!I)
123 return nullptr;
124
125 if (BB != I->getParent())
126 return nullptr;
127 }
128 return BB;
129 }
130
131 /// \returns True if all of the values in \p VL are constants.
allConstant(ArrayRef<Value * > VL)132 static bool allConstant(ArrayRef<Value *> VL) {
133 for (Value *i : VL)
134 if (!isa<Constant>(i))
135 return false;
136 return true;
137 }
138
139 /// \returns True if all of the values in \p VL are identical.
isSplat(ArrayRef<Value * > VL)140 static bool isSplat(ArrayRef<Value *> VL) {
141 for (unsigned i = 1, e = VL.size(); i < e; ++i)
142 if (VL[i] != VL[0])
143 return false;
144 return true;
145 }
146
147 ///\returns Opcode that can be clubbed with \p Op to create an alternate
148 /// sequence which can later be merged as a ShuffleVector instruction.
getAltOpcode(unsigned Op)149 static unsigned getAltOpcode(unsigned Op) {
150 switch (Op) {
151 case Instruction::FAdd:
152 return Instruction::FSub;
153 case Instruction::FSub:
154 return Instruction::FAdd;
155 case Instruction::Add:
156 return Instruction::Sub;
157 case Instruction::Sub:
158 return Instruction::Add;
159 default:
160 return 0;
161 }
162 }
163
164 ///\returns bool representing if Opcode \p Op can be part
165 /// of an alternate sequence which can later be merged as
166 /// a ShuffleVector instruction.
canCombineAsAltInst(unsigned Op)167 static bool canCombineAsAltInst(unsigned Op) {
168 return Op == Instruction::FAdd || Op == Instruction::FSub ||
169 Op == Instruction::Sub || Op == Instruction::Add;
170 }
171
172 /// \returns ShuffleVector instruction if instructions in \p VL have
173 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
174 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
isAltInst(ArrayRef<Value * > VL)175 static unsigned isAltInst(ArrayRef<Value *> VL) {
176 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
177 unsigned Opcode = I0->getOpcode();
178 unsigned AltOpcode = getAltOpcode(Opcode);
179 for (int i = 1, e = VL.size(); i < e; i++) {
180 Instruction *I = dyn_cast<Instruction>(VL[i]);
181 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
182 return 0;
183 }
184 return Instruction::ShuffleVector;
185 }
186
187 /// \returns The opcode if all of the Instructions in \p VL have the same
188 /// opcode, or zero.
getSameOpcode(ArrayRef<Value * > VL)189 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
190 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
191 if (!I0)
192 return 0;
193 unsigned Opcode = I0->getOpcode();
194 for (int i = 1, e = VL.size(); i < e; i++) {
195 Instruction *I = dyn_cast<Instruction>(VL[i]);
196 if (!I || Opcode != I->getOpcode()) {
197 if (canCombineAsAltInst(Opcode) && i == 1)
198 return isAltInst(VL);
199 return 0;
200 }
201 }
202 return Opcode;
203 }
204
205 /// Get the intersection (logical and) of all of the potential IR flags
206 /// of each scalar operation (VL) that will be converted into a vector (I).
207 /// Flag set: NSW, NUW, exact, and all of fast-math.
propagateIRFlags(Value * I,ArrayRef<Value * > VL)208 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
209 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) {
210 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) {
211 // Intersection is initialized to the 0th scalar,
212 // so start counting from index '1'.
213 for (int i = 1, e = VL.size(); i < e; ++i) {
214 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i]))
215 Intersection->andIRFlags(Scalar);
216 }
217 VecOp->copyIRFlags(Intersection);
218 }
219 }
220 }
221
222 /// \returns The type that all of the values in \p VL have or null if there
223 /// are different types.
getSameType(ArrayRef<Value * > VL)224 static Type* getSameType(ArrayRef<Value *> VL) {
225 Type *Ty = VL[0]->getType();
226 for (int i = 1, e = VL.size(); i < e; i++)
227 if (VL[i]->getType() != Ty)
228 return nullptr;
229
230 return Ty;
231 }
232
233 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
matchExtractIndex(Instruction * E,unsigned Idx,unsigned Opcode)234 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) {
235 assert(Opcode == Instruction::ExtractElement ||
236 Opcode == Instruction::ExtractValue);
237 if (Opcode == Instruction::ExtractElement) {
238 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
239 return CI && CI->getZExtValue() == Idx;
240 } else {
241 ExtractValueInst *EI = cast<ExtractValueInst>(E);
242 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx;
243 }
244 }
245
246 /// \returns True if in-tree use also needs extract. This refers to
247 /// possible scalar operand in vectorized instruction.
InTreeUserNeedToExtract(Value * Scalar,Instruction * UserInst,TargetLibraryInfo * TLI)248 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
249 TargetLibraryInfo *TLI) {
250
251 unsigned Opcode = UserInst->getOpcode();
252 switch (Opcode) {
253 case Instruction::Load: {
254 LoadInst *LI = cast<LoadInst>(UserInst);
255 return (LI->getPointerOperand() == Scalar);
256 }
257 case Instruction::Store: {
258 StoreInst *SI = cast<StoreInst>(UserInst);
259 return (SI->getPointerOperand() == Scalar);
260 }
261 case Instruction::Call: {
262 CallInst *CI = cast<CallInst>(UserInst);
263 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
264 if (hasVectorInstrinsicScalarOpd(ID, 1)) {
265 return (CI->getArgOperand(1) == Scalar);
266 }
267 }
268 default:
269 return false;
270 }
271 }
272
273 /// \returns the AA location that is being access by the instruction.
getLocation(Instruction * I,AliasAnalysis * AA)274 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
275 if (StoreInst *SI = dyn_cast<StoreInst>(I))
276 return MemoryLocation::get(SI);
277 if (LoadInst *LI = dyn_cast<LoadInst>(I))
278 return MemoryLocation::get(LI);
279 return MemoryLocation();
280 }
281
282 /// \returns True if the instruction is not a volatile or atomic load/store.
isSimple(Instruction * I)283 static bool isSimple(Instruction *I) {
284 if (LoadInst *LI = dyn_cast<LoadInst>(I))
285 return LI->isSimple();
286 if (StoreInst *SI = dyn_cast<StoreInst>(I))
287 return SI->isSimple();
288 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
289 return !MI->isVolatile();
290 return true;
291 }
292
293 namespace llvm {
294 namespace slpvectorizer {
295 /// Bottom Up SLP Vectorizer.
296 class BoUpSLP {
297 public:
298 typedef SmallVector<Value *, 8> ValueList;
299 typedef SmallVector<Instruction *, 16> InstrList;
300 typedef SmallPtrSet<Value *, 16> ValueSet;
301 typedef SmallVector<StoreInst *, 8> StoreList;
302
BoUpSLP(Function * Func,ScalarEvolution * Se,TargetTransformInfo * Tti,TargetLibraryInfo * TLi,AliasAnalysis * Aa,LoopInfo * Li,DominatorTree * Dt,AssumptionCache * AC,DemandedBits * DB,const DataLayout * DL)303 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
304 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
305 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
306 const DataLayout *DL)
307 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
308 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB),
309 DL(DL), Builder(Se->getContext()) {
310 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
311 // Use the vector register size specified by the target unless overridden
312 // by a command-line option.
313 // TODO: It would be better to limit the vectorization factor based on
314 // data type rather than just register size. For example, x86 AVX has
315 // 256-bit registers, but it does not support integer operations
316 // at that width (that requires AVX2).
317 if (MaxVectorRegSizeOption.getNumOccurrences())
318 MaxVecRegSize = MaxVectorRegSizeOption;
319 else
320 MaxVecRegSize = TTI->getRegisterBitWidth(true);
321
322 MinVecRegSize = MinVectorRegSizeOption;
323 }
324
325 /// \brief Vectorize the tree that starts with the elements in \p VL.
326 /// Returns the vectorized root.
327 Value *vectorizeTree();
328
329 /// \returns the cost incurred by unwanted spills and fills, caused by
330 /// holding live values over call sites.
331 int getSpillCost();
332
333 /// \returns the vectorization cost of the subtree that starts at \p VL.
334 /// A negative number means that this is profitable.
335 int getTreeCost();
336
337 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
338 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
339 void buildTree(ArrayRef<Value *> Roots,
340 ArrayRef<Value *> UserIgnoreLst = None);
341
342 /// Clear the internal data structures that are created by 'buildTree'.
deleteTree()343 void deleteTree() {
344 VectorizableTree.clear();
345 ScalarToTreeEntry.clear();
346 MustGather.clear();
347 ExternalUses.clear();
348 NumLoadsWantToKeepOrder = 0;
349 NumLoadsWantToChangeOrder = 0;
350 for (auto &Iter : BlocksSchedules) {
351 BlockScheduling *BS = Iter.second.get();
352 BS->clear();
353 }
354 MinBWs.clear();
355 }
356
357 /// \brief Perform LICM and CSE on the newly generated gather sequences.
358 void optimizeGatherSequence();
359
360 /// \returns true if it is beneficial to reverse the vector order.
shouldReorder() const361 bool shouldReorder() const {
362 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
363 }
364
365 /// \return The vector element size in bits to use when vectorizing the
366 /// expression tree ending at \p V. If V is a store, the size is the width of
367 /// the stored value. Otherwise, the size is the width of the largest loaded
368 /// value reaching V. This method is used by the vectorizer to calculate
369 /// vectorization factors.
370 unsigned getVectorElementSize(Value *V);
371
372 /// Compute the minimum type sizes required to represent the entries in a
373 /// vectorizable tree.
374 void computeMinimumValueSizes();
375
376 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
getMaxVecRegSize() const377 unsigned getMaxVecRegSize() const {
378 return MaxVecRegSize;
379 }
380
381 // \returns minimum vector register size as set by cl::opt.
getMinVecRegSize() const382 unsigned getMinVecRegSize() const {
383 return MinVecRegSize;
384 }
385
386 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType.
387 ///
388 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
389 unsigned canMapToVector(Type *T, const DataLayout &DL) const;
390
391 private:
392 struct TreeEntry;
393
394 /// \returns the cost of the vectorizable entry.
395 int getEntryCost(TreeEntry *E);
396
397 /// This is the recursive part of buildTree.
398 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
399
400 /// \returns True if the ExtractElement/ExtractValue instructions in VL can
401 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
402 bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const;
403
404 /// Vectorize a single entry in the tree.
405 Value *vectorizeTree(TreeEntry *E);
406
407 /// Vectorize a single entry in the tree, starting in \p VL.
408 Value *vectorizeTree(ArrayRef<Value *> VL);
409
410 /// \returns the pointer to the vectorized value if \p VL is already
411 /// vectorized, or NULL. They may happen in cycles.
412 Value *alreadyVectorized(ArrayRef<Value *> VL) const;
413
414 /// \returns the scalarization cost for this type. Scalarization in this
415 /// context means the creation of vectors from a group of scalars.
416 int getGatherCost(Type *Ty);
417
418 /// \returns the scalarization cost for this list of values. Assuming that
419 /// this subtree gets vectorized, we may need to extract the values from the
420 /// roots. This method calculates the cost of extracting the values.
421 int getGatherCost(ArrayRef<Value *> VL);
422
423 /// \brief Set the Builder insert point to one after the last instruction in
424 /// the bundle
425 void setInsertPointAfterBundle(ArrayRef<Value *> VL);
426
427 /// \returns a vector from a collection of scalars in \p VL.
428 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
429
430 /// \returns whether the VectorizableTree is fully vectorizable and will
431 /// be beneficial even the tree height is tiny.
432 bool isFullyVectorizableTinyTree();
433
434 /// \reorder commutative operands in alt shuffle if they result in
435 /// vectorized code.
436 void reorderAltShuffleOperands(ArrayRef<Value *> VL,
437 SmallVectorImpl<Value *> &Left,
438 SmallVectorImpl<Value *> &Right);
439 /// \reorder commutative operands to get better probability of
440 /// generating vectorized code.
441 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
442 SmallVectorImpl<Value *> &Left,
443 SmallVectorImpl<Value *> &Right);
444 struct TreeEntry {
TreeEntryllvm::slpvectorizer::BoUpSLP::TreeEntry445 TreeEntry() : Scalars(), VectorizedValue(nullptr),
446 NeedToGather(0) {}
447
448 /// \returns true if the scalars in VL are equal to this entry.
isSamellvm::slpvectorizer::BoUpSLP::TreeEntry449 bool isSame(ArrayRef<Value *> VL) const {
450 assert(VL.size() == Scalars.size() && "Invalid size");
451 return std::equal(VL.begin(), VL.end(), Scalars.begin());
452 }
453
454 /// A vector of scalars.
455 ValueList Scalars;
456
457 /// The Scalars are vectorized into this value. It is initialized to Null.
458 Value *VectorizedValue;
459
460 /// Do we need to gather this sequence ?
461 bool NeedToGather;
462 };
463
464 /// Create a new VectorizableTree entry.
newTreeEntry(ArrayRef<Value * > VL,bool Vectorized)465 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) {
466 VectorizableTree.emplace_back();
467 int idx = VectorizableTree.size() - 1;
468 TreeEntry *Last = &VectorizableTree[idx];
469 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
470 Last->NeedToGather = !Vectorized;
471 if (Vectorized) {
472 for (int i = 0, e = VL.size(); i != e; ++i) {
473 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
474 ScalarToTreeEntry[VL[i]] = idx;
475 }
476 } else {
477 MustGather.insert(VL.begin(), VL.end());
478 }
479 return Last;
480 }
481
482 /// -- Vectorization State --
483 /// Holds all of the tree entries.
484 std::vector<TreeEntry> VectorizableTree;
485
486 /// Maps a specific scalar to its tree entry.
487 SmallDenseMap<Value*, int> ScalarToTreeEntry;
488
489 /// A list of scalars that we found that we need to keep as scalars.
490 ValueSet MustGather;
491
492 /// This POD struct describes one external user in the vectorized tree.
493 struct ExternalUser {
ExternalUserllvm::slpvectorizer::BoUpSLP::ExternalUser494 ExternalUser (Value *S, llvm::User *U, int L) :
495 Scalar(S), User(U), Lane(L){}
496 // Which scalar in our function.
497 Value *Scalar;
498 // Which user that uses the scalar.
499 llvm::User *User;
500 // Which lane does the scalar belong to.
501 int Lane;
502 };
503 typedef SmallVector<ExternalUser, 16> UserList;
504
505 /// Checks if two instructions may access the same memory.
506 ///
507 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
508 /// is invariant in the calling loop.
isAliased(const MemoryLocation & Loc1,Instruction * Inst1,Instruction * Inst2)509 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
510 Instruction *Inst2) {
511
512 // First check if the result is already in the cache.
513 AliasCacheKey key = std::make_pair(Inst1, Inst2);
514 Optional<bool> &result = AliasCache[key];
515 if (result.hasValue()) {
516 return result.getValue();
517 }
518 MemoryLocation Loc2 = getLocation(Inst2, AA);
519 bool aliased = true;
520 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
521 // Do the alias check.
522 aliased = AA->alias(Loc1, Loc2);
523 }
524 // Store the result in the cache.
525 result = aliased;
526 return aliased;
527 }
528
529 typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
530
531 /// Cache for alias results.
532 /// TODO: consider moving this to the AliasAnalysis itself.
533 DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
534
535 /// Removes an instruction from its block and eventually deletes it.
536 /// It's like Instruction::eraseFromParent() except that the actual deletion
537 /// is delayed until BoUpSLP is destructed.
538 /// This is required to ensure that there are no incorrect collisions in the
539 /// AliasCache, which can happen if a new instruction is allocated at the
540 /// same address as a previously deleted instruction.
eraseInstruction(Instruction * I)541 void eraseInstruction(Instruction *I) {
542 I->removeFromParent();
543 I->dropAllReferences();
544 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
545 }
546
547 /// Temporary store for deleted instructions. Instructions will be deleted
548 /// eventually when the BoUpSLP is destructed.
549 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
550
551 /// A list of values that need to extracted out of the tree.
552 /// This list holds pairs of (Internal Scalar : External User).
553 UserList ExternalUses;
554
555 /// Values used only by @llvm.assume calls.
556 SmallPtrSet<const Value *, 32> EphValues;
557
558 /// Holds all of the instructions that we gathered.
559 SetVector<Instruction *> GatherSeq;
560 /// A list of blocks that we are going to CSE.
561 SetVector<BasicBlock *> CSEBlocks;
562
563 /// Contains all scheduling relevant data for an instruction.
564 /// A ScheduleData either represents a single instruction or a member of an
565 /// instruction bundle (= a group of instructions which is combined into a
566 /// vector instruction).
567 struct ScheduleData {
568
569 // The initial value for the dependency counters. It means that the
570 // dependencies are not calculated yet.
571 enum { InvalidDeps = -1 };
572
ScheduleDatallvm::slpvectorizer::BoUpSLP::ScheduleData573 ScheduleData()
574 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
575 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
576 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
577 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
578
initllvm::slpvectorizer::BoUpSLP::ScheduleData579 void init(int BlockSchedulingRegionID) {
580 FirstInBundle = this;
581 NextInBundle = nullptr;
582 NextLoadStore = nullptr;
583 IsScheduled = false;
584 SchedulingRegionID = BlockSchedulingRegionID;
585 UnscheduledDepsInBundle = UnscheduledDeps;
586 clearDependencies();
587 }
588
589 /// Returns true if the dependency information has been calculated.
hasValidDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData590 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
591
592 /// Returns true for single instructions and for bundle representatives
593 /// (= the head of a bundle).
isSchedulingEntityllvm::slpvectorizer::BoUpSLP::ScheduleData594 bool isSchedulingEntity() const { return FirstInBundle == this; }
595
596 /// Returns true if it represents an instruction bundle and not only a
597 /// single instruction.
isPartOfBundlellvm::slpvectorizer::BoUpSLP::ScheduleData598 bool isPartOfBundle() const {
599 return NextInBundle != nullptr || FirstInBundle != this;
600 }
601
602 /// Returns true if it is ready for scheduling, i.e. it has no more
603 /// unscheduled depending instructions/bundles.
isReadyllvm::slpvectorizer::BoUpSLP::ScheduleData604 bool isReady() const {
605 assert(isSchedulingEntity() &&
606 "can't consider non-scheduling entity for ready list");
607 return UnscheduledDepsInBundle == 0 && !IsScheduled;
608 }
609
610 /// Modifies the number of unscheduled dependencies, also updating it for
611 /// the whole bundle.
incrementUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData612 int incrementUnscheduledDeps(int Incr) {
613 UnscheduledDeps += Incr;
614 return FirstInBundle->UnscheduledDepsInBundle += Incr;
615 }
616
617 /// Sets the number of unscheduled dependencies to the number of
618 /// dependencies.
resetUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData619 void resetUnscheduledDeps() {
620 incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
621 }
622
623 /// Clears all dependency information.
clearDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData624 void clearDependencies() {
625 Dependencies = InvalidDeps;
626 resetUnscheduledDeps();
627 MemoryDependencies.clear();
628 }
629
dumpllvm::slpvectorizer::BoUpSLP::ScheduleData630 void dump(raw_ostream &os) const {
631 if (!isSchedulingEntity()) {
632 os << "/ " << *Inst;
633 } else if (NextInBundle) {
634 os << '[' << *Inst;
635 ScheduleData *SD = NextInBundle;
636 while (SD) {
637 os << ';' << *SD->Inst;
638 SD = SD->NextInBundle;
639 }
640 os << ']';
641 } else {
642 os << *Inst;
643 }
644 }
645
646 Instruction *Inst;
647
648 /// Points to the head in an instruction bundle (and always to this for
649 /// single instructions).
650 ScheduleData *FirstInBundle;
651
652 /// Single linked list of all instructions in a bundle. Null if it is a
653 /// single instruction.
654 ScheduleData *NextInBundle;
655
656 /// Single linked list of all memory instructions (e.g. load, store, call)
657 /// in the block - until the end of the scheduling region.
658 ScheduleData *NextLoadStore;
659
660 /// The dependent memory instructions.
661 /// This list is derived on demand in calculateDependencies().
662 SmallVector<ScheduleData *, 4> MemoryDependencies;
663
664 /// This ScheduleData is in the current scheduling region if this matches
665 /// the current SchedulingRegionID of BlockScheduling.
666 int SchedulingRegionID;
667
668 /// Used for getting a "good" final ordering of instructions.
669 int SchedulingPriority;
670
671 /// The number of dependencies. Constitutes of the number of users of the
672 /// instruction plus the number of dependent memory instructions (if any).
673 /// This value is calculated on demand.
674 /// If InvalidDeps, the number of dependencies is not calculated yet.
675 ///
676 int Dependencies;
677
678 /// The number of dependencies minus the number of dependencies of scheduled
679 /// instructions. As soon as this is zero, the instruction/bundle gets ready
680 /// for scheduling.
681 /// Note that this is negative as long as Dependencies is not calculated.
682 int UnscheduledDeps;
683
684 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
685 /// single instructions.
686 int UnscheduledDepsInBundle;
687
688 /// True if this instruction is scheduled (or considered as scheduled in the
689 /// dry-run).
690 bool IsScheduled;
691 };
692
693 #ifndef NDEBUG
operator <<(raw_ostream & os,const BoUpSLP::ScheduleData & SD)694 friend inline raw_ostream &operator<<(raw_ostream &os,
695 const BoUpSLP::ScheduleData &SD) {
696 SD.dump(os);
697 return os;
698 }
699 #endif
700
701 /// Contains all scheduling data for a basic block.
702 ///
703 struct BlockScheduling {
704
BlockSchedulingllvm::slpvectorizer::BoUpSLP::BlockScheduling705 BlockScheduling(BasicBlock *BB)
706 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
707 ScheduleStart(nullptr), ScheduleEnd(nullptr),
708 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
709 ScheduleRegionSize(0),
710 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget),
711 // Make sure that the initial SchedulingRegionID is greater than the
712 // initial SchedulingRegionID in ScheduleData (which is 0).
713 SchedulingRegionID(1) {}
714
clearllvm::slpvectorizer::BoUpSLP::BlockScheduling715 void clear() {
716 ReadyInsts.clear();
717 ScheduleStart = nullptr;
718 ScheduleEnd = nullptr;
719 FirstLoadStoreInRegion = nullptr;
720 LastLoadStoreInRegion = nullptr;
721
722 // Reduce the maximum schedule region size by the size of the
723 // previous scheduling run.
724 ScheduleRegionSizeLimit -= ScheduleRegionSize;
725 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
726 ScheduleRegionSizeLimit = MinScheduleRegionSize;
727 ScheduleRegionSize = 0;
728
729 // Make a new scheduling region, i.e. all existing ScheduleData is not
730 // in the new region yet.
731 ++SchedulingRegionID;
732 }
733
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling734 ScheduleData *getScheduleData(Value *V) {
735 ScheduleData *SD = ScheduleDataMap[V];
736 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
737 return SD;
738 return nullptr;
739 }
740
isInSchedulingRegionllvm::slpvectorizer::BoUpSLP::BlockScheduling741 bool isInSchedulingRegion(ScheduleData *SD) {
742 return SD->SchedulingRegionID == SchedulingRegionID;
743 }
744
745 /// Marks an instruction as scheduled and puts all dependent ready
746 /// instructions into the ready-list.
747 template <typename ReadyListType>
schedulellvm::slpvectorizer::BoUpSLP::BlockScheduling748 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
749 SD->IsScheduled = true;
750 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
751
752 ScheduleData *BundleMember = SD;
753 while (BundleMember) {
754 // Handle the def-use chain dependencies.
755 for (Use &U : BundleMember->Inst->operands()) {
756 ScheduleData *OpDef = getScheduleData(U.get());
757 if (OpDef && OpDef->hasValidDependencies() &&
758 OpDef->incrementUnscheduledDeps(-1) == 0) {
759 // There are no more unscheduled dependencies after decrementing,
760 // so we can put the dependent instruction into the ready list.
761 ScheduleData *DepBundle = OpDef->FirstInBundle;
762 assert(!DepBundle->IsScheduled &&
763 "already scheduled bundle gets ready");
764 ReadyList.insert(DepBundle);
765 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n");
766 }
767 }
768 // Handle the memory dependencies.
769 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
770 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
771 // There are no more unscheduled dependencies after decrementing,
772 // so we can put the dependent instruction into the ready list.
773 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
774 assert(!DepBundle->IsScheduled &&
775 "already scheduled bundle gets ready");
776 ReadyList.insert(DepBundle);
777 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n");
778 }
779 }
780 BundleMember = BundleMember->NextInBundle;
781 }
782 }
783
784 /// Put all instructions into the ReadyList which are ready for scheduling.
785 template <typename ReadyListType>
initialFillReadyListllvm::slpvectorizer::BoUpSLP::BlockScheduling786 void initialFillReadyList(ReadyListType &ReadyList) {
787 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
788 ScheduleData *SD = getScheduleData(I);
789 if (SD->isSchedulingEntity() && SD->isReady()) {
790 ReadyList.insert(SD);
791 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n");
792 }
793 }
794 }
795
796 /// Checks if a bundle of instructions can be scheduled, i.e. has no
797 /// cyclic dependencies. This is only a dry-run, no instructions are
798 /// actually moved at this stage.
799 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
800
801 /// Un-bundles a group of instructions.
802 void cancelScheduling(ArrayRef<Value *> VL);
803
804 /// Extends the scheduling region so that V is inside the region.
805 /// \returns true if the region size is within the limit.
806 bool extendSchedulingRegion(Value *V);
807
808 /// Initialize the ScheduleData structures for new instructions in the
809 /// scheduling region.
810 void initScheduleData(Instruction *FromI, Instruction *ToI,
811 ScheduleData *PrevLoadStore,
812 ScheduleData *NextLoadStore);
813
814 /// Updates the dependency information of a bundle and of all instructions/
815 /// bundles which depend on the original bundle.
816 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
817 BoUpSLP *SLP);
818
819 /// Sets all instruction in the scheduling region to un-scheduled.
820 void resetSchedule();
821
822 BasicBlock *BB;
823
824 /// Simple memory allocation for ScheduleData.
825 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
826
827 /// The size of a ScheduleData array in ScheduleDataChunks.
828 int ChunkSize;
829
830 /// The allocator position in the current chunk, which is the last entry
831 /// of ScheduleDataChunks.
832 int ChunkPos;
833
834 /// Attaches ScheduleData to Instruction.
835 /// Note that the mapping survives during all vectorization iterations, i.e.
836 /// ScheduleData structures are recycled.
837 DenseMap<Value *, ScheduleData *> ScheduleDataMap;
838
839 struct ReadyList : SmallVector<ScheduleData *, 8> {
insertllvm::slpvectorizer::BoUpSLP::BlockScheduling::ReadyList840 void insert(ScheduleData *SD) { push_back(SD); }
841 };
842
843 /// The ready-list for scheduling (only used for the dry-run).
844 ReadyList ReadyInsts;
845
846 /// The first instruction of the scheduling region.
847 Instruction *ScheduleStart;
848
849 /// The first instruction _after_ the scheduling region.
850 Instruction *ScheduleEnd;
851
852 /// The first memory accessing instruction in the scheduling region
853 /// (can be null).
854 ScheduleData *FirstLoadStoreInRegion;
855
856 /// The last memory accessing instruction in the scheduling region
857 /// (can be null).
858 ScheduleData *LastLoadStoreInRegion;
859
860 /// The current size of the scheduling region.
861 int ScheduleRegionSize;
862
863 /// The maximum size allowed for the scheduling region.
864 int ScheduleRegionSizeLimit;
865
866 /// The ID of the scheduling region. For a new vectorization iteration this
867 /// is incremented which "removes" all ScheduleData from the region.
868 int SchedulingRegionID;
869 };
870
871 /// Attaches the BlockScheduling structures to basic blocks.
872 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
873
874 /// Performs the "real" scheduling. Done before vectorization is actually
875 /// performed in a basic block.
876 void scheduleBlock(BlockScheduling *BS);
877
878 /// List of users to ignore during scheduling and that don't need extracting.
879 ArrayRef<Value *> UserIgnoreList;
880
881 // Number of load-bundles, which contain consecutive loads.
882 int NumLoadsWantToKeepOrder;
883
884 // Number of load-bundles of size 2, which are consecutive loads if reversed.
885 int NumLoadsWantToChangeOrder;
886
887 // Analysis and block reference.
888 Function *F;
889 ScalarEvolution *SE;
890 TargetTransformInfo *TTI;
891 TargetLibraryInfo *TLI;
892 AliasAnalysis *AA;
893 LoopInfo *LI;
894 DominatorTree *DT;
895 AssumptionCache *AC;
896 DemandedBits *DB;
897 const DataLayout *DL;
898 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
899 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
900 /// Instruction builder to construct the vectorized tree.
901 IRBuilder<> Builder;
902
903 /// A map of scalar integer values to the smallest bit width with which they
904 /// can legally be represented.
905 MapVector<Value *, uint64_t> MinBWs;
906 };
907
908 } // end namespace llvm
909 } // end namespace slpvectorizer
910
buildTree(ArrayRef<Value * > Roots,ArrayRef<Value * > UserIgnoreLst)911 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
912 ArrayRef<Value *> UserIgnoreLst) {
913 deleteTree();
914 UserIgnoreList = UserIgnoreLst;
915 if (!getSameType(Roots))
916 return;
917 buildTree_rec(Roots, 0);
918
919 // Collect the values that we need to extract from the tree.
920 for (TreeEntry &EIdx : VectorizableTree) {
921 TreeEntry *Entry = &EIdx;
922
923 // For each lane:
924 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
925 Value *Scalar = Entry->Scalars[Lane];
926
927 // No need to handle users of gathered values.
928 if (Entry->NeedToGather)
929 continue;
930
931 for (User *U : Scalar->users()) {
932 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
933
934 Instruction *UserInst = dyn_cast<Instruction>(U);
935 if (!UserInst)
936 continue;
937
938 // Skip in-tree scalars that become vectors
939 if (ScalarToTreeEntry.count(U)) {
940 int Idx = ScalarToTreeEntry[U];
941 TreeEntry *UseEntry = &VectorizableTree[Idx];
942 Value *UseScalar = UseEntry->Scalars[0];
943 // Some in-tree scalars will remain as scalar in vectorized
944 // instructions. If that is the case, the one in Lane 0 will
945 // be used.
946 if (UseScalar != U ||
947 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
948 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
949 << ".\n");
950 assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
951 continue;
952 }
953 }
954
955 // Ignore users in the user ignore list.
956 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) !=
957 UserIgnoreList.end())
958 continue;
959
960 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
961 Lane << " from " << *Scalar << ".\n");
962 ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
963 }
964 }
965 }
966 }
967
968
buildTree_rec(ArrayRef<Value * > VL,unsigned Depth)969 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
970 bool SameTy = allConstant(VL) || getSameType(VL); (void)SameTy;
971 bool isAltShuffle = false;
972 assert(SameTy && "Invalid types!");
973
974 if (Depth == RecursionMaxDepth) {
975 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
976 newTreeEntry(VL, false);
977 return;
978 }
979
980 // Don't handle vectors.
981 if (VL[0]->getType()->isVectorTy()) {
982 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
983 newTreeEntry(VL, false);
984 return;
985 }
986
987 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
988 if (SI->getValueOperand()->getType()->isVectorTy()) {
989 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
990 newTreeEntry(VL, false);
991 return;
992 }
993 unsigned Opcode = getSameOpcode(VL);
994
995 // Check that this shuffle vector refers to the alternate
996 // sequence of opcodes.
997 if (Opcode == Instruction::ShuffleVector) {
998 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
999 unsigned Op = I0->getOpcode();
1000 if (Op != Instruction::ShuffleVector)
1001 isAltShuffle = true;
1002 }
1003
1004 // If all of the operands are identical or constant we have a simple solution.
1005 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) {
1006 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
1007 newTreeEntry(VL, false);
1008 return;
1009 }
1010
1011 // We now know that this is a vector of instructions of the same type from
1012 // the same block.
1013
1014 // Don't vectorize ephemeral values.
1015 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1016 if (EphValues.count(VL[i])) {
1017 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1018 ") is ephemeral.\n");
1019 newTreeEntry(VL, false);
1020 return;
1021 }
1022 }
1023
1024 // Check if this is a duplicate of another entry.
1025 if (ScalarToTreeEntry.count(VL[0])) {
1026 int Idx = ScalarToTreeEntry[VL[0]];
1027 TreeEntry *E = &VectorizableTree[Idx];
1028 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1029 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
1030 if (E->Scalars[i] != VL[i]) {
1031 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
1032 newTreeEntry(VL, false);
1033 return;
1034 }
1035 }
1036 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
1037 return;
1038 }
1039
1040 // Check that none of the instructions in the bundle are already in the tree.
1041 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1042 if (ScalarToTreeEntry.count(VL[i])) {
1043 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1044 ") is already in tree.\n");
1045 newTreeEntry(VL, false);
1046 return;
1047 }
1048 }
1049
1050 // If any of the scalars is marked as a value that needs to stay scalar then
1051 // we need to gather the scalars.
1052 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1053 if (MustGather.count(VL[i])) {
1054 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
1055 newTreeEntry(VL, false);
1056 return;
1057 }
1058 }
1059
1060 // Check that all of the users of the scalars that we want to vectorize are
1061 // schedulable.
1062 Instruction *VL0 = cast<Instruction>(VL[0]);
1063 BasicBlock *BB = cast<Instruction>(VL0)->getParent();
1064
1065 if (!DT->isReachableFromEntry(BB)) {
1066 // Don't go into unreachable blocks. They may contain instructions with
1067 // dependency cycles which confuse the final scheduling.
1068 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
1069 newTreeEntry(VL, false);
1070 return;
1071 }
1072
1073 // Check that every instructions appears once in this bundle.
1074 for (unsigned i = 0, e = VL.size(); i < e; ++i)
1075 for (unsigned j = i+1; j < e; ++j)
1076 if (VL[i] == VL[j]) {
1077 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
1078 newTreeEntry(VL, false);
1079 return;
1080 }
1081
1082 auto &BSRef = BlocksSchedules[BB];
1083 if (!BSRef) {
1084 BSRef = llvm::make_unique<BlockScheduling>(BB);
1085 }
1086 BlockScheduling &BS = *BSRef.get();
1087
1088 if (!BS.tryScheduleBundle(VL, this)) {
1089 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
1090 assert((!BS.getScheduleData(VL[0]) ||
1091 !BS.getScheduleData(VL[0])->isPartOfBundle()) &&
1092 "tryScheduleBundle should cancelScheduling on failure");
1093 newTreeEntry(VL, false);
1094 return;
1095 }
1096 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
1097
1098 switch (Opcode) {
1099 case Instruction::PHI: {
1100 PHINode *PH = dyn_cast<PHINode>(VL0);
1101
1102 // Check for terminator values (e.g. invoke).
1103 for (unsigned j = 0; j < VL.size(); ++j)
1104 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1105 TerminatorInst *Term = dyn_cast<TerminatorInst>(
1106 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
1107 if (Term) {
1108 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
1109 BS.cancelScheduling(VL);
1110 newTreeEntry(VL, false);
1111 return;
1112 }
1113 }
1114
1115 newTreeEntry(VL, true);
1116 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
1117
1118 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1119 ValueList Operands;
1120 // Prepare the operand vector.
1121 for (Value *j : VL)
1122 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
1123 PH->getIncomingBlock(i)));
1124
1125 buildTree_rec(Operands, Depth + 1);
1126 }
1127 return;
1128 }
1129 case Instruction::ExtractValue:
1130 case Instruction::ExtractElement: {
1131 bool Reuse = canReuseExtract(VL, Opcode);
1132 if (Reuse) {
1133 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
1134 } else {
1135 BS.cancelScheduling(VL);
1136 }
1137 newTreeEntry(VL, Reuse);
1138 return;
1139 }
1140 case Instruction::Load: {
1141 // Check that a vectorized load would load the same memory as a scalar
1142 // load.
1143 // For example we don't want vectorize loads that are smaller than 8 bit.
1144 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
1145 // loading/storing it as an i8 struct. If we vectorize loads/stores from
1146 // such a struct we read/write packed bits disagreeing with the
1147 // unvectorized version.
1148 Type *ScalarTy = VL[0]->getType();
1149
1150 if (DL->getTypeSizeInBits(ScalarTy) !=
1151 DL->getTypeAllocSizeInBits(ScalarTy)) {
1152 BS.cancelScheduling(VL);
1153 newTreeEntry(VL, false);
1154 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
1155 return;
1156 }
1157 // Check if the loads are consecutive or of we need to swizzle them.
1158 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1159 LoadInst *L = cast<LoadInst>(VL[i]);
1160 if (!L->isSimple()) {
1161 BS.cancelScheduling(VL);
1162 newTreeEntry(VL, false);
1163 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
1164 return;
1165 }
1166
1167 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1168 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) {
1169 ++NumLoadsWantToChangeOrder;
1170 }
1171 BS.cancelScheduling(VL);
1172 newTreeEntry(VL, false);
1173 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
1174 return;
1175 }
1176 }
1177 ++NumLoadsWantToKeepOrder;
1178 newTreeEntry(VL, true);
1179 DEBUG(dbgs() << "SLP: added a vector of loads.\n");
1180 return;
1181 }
1182 case Instruction::ZExt:
1183 case Instruction::SExt:
1184 case Instruction::FPToUI:
1185 case Instruction::FPToSI:
1186 case Instruction::FPExt:
1187 case Instruction::PtrToInt:
1188 case Instruction::IntToPtr:
1189 case Instruction::SIToFP:
1190 case Instruction::UIToFP:
1191 case Instruction::Trunc:
1192 case Instruction::FPTrunc:
1193 case Instruction::BitCast: {
1194 Type *SrcTy = VL0->getOperand(0)->getType();
1195 for (unsigned i = 0; i < VL.size(); ++i) {
1196 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
1197 if (Ty != SrcTy || !isValidElementType(Ty)) {
1198 BS.cancelScheduling(VL);
1199 newTreeEntry(VL, false);
1200 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
1201 return;
1202 }
1203 }
1204 newTreeEntry(VL, true);
1205 DEBUG(dbgs() << "SLP: added a vector of casts.\n");
1206
1207 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1208 ValueList Operands;
1209 // Prepare the operand vector.
1210 for (Value *j : VL)
1211 Operands.push_back(cast<Instruction>(j)->getOperand(i));
1212
1213 buildTree_rec(Operands, Depth+1);
1214 }
1215 return;
1216 }
1217 case Instruction::ICmp:
1218 case Instruction::FCmp: {
1219 // Check that all of the compares have the same predicate.
1220 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
1221 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
1222 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1223 CmpInst *Cmp = cast<CmpInst>(VL[i]);
1224 if (Cmp->getPredicate() != P0 ||
1225 Cmp->getOperand(0)->getType() != ComparedTy) {
1226 BS.cancelScheduling(VL);
1227 newTreeEntry(VL, false);
1228 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
1229 return;
1230 }
1231 }
1232
1233 newTreeEntry(VL, true);
1234 DEBUG(dbgs() << "SLP: added a vector of compares.\n");
1235
1236 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1237 ValueList Operands;
1238 // Prepare the operand vector.
1239 for (Value *j : VL)
1240 Operands.push_back(cast<Instruction>(j)->getOperand(i));
1241
1242 buildTree_rec(Operands, Depth+1);
1243 }
1244 return;
1245 }
1246 case Instruction::Select:
1247 case Instruction::Add:
1248 case Instruction::FAdd:
1249 case Instruction::Sub:
1250 case Instruction::FSub:
1251 case Instruction::Mul:
1252 case Instruction::FMul:
1253 case Instruction::UDiv:
1254 case Instruction::SDiv:
1255 case Instruction::FDiv:
1256 case Instruction::URem:
1257 case Instruction::SRem:
1258 case Instruction::FRem:
1259 case Instruction::Shl:
1260 case Instruction::LShr:
1261 case Instruction::AShr:
1262 case Instruction::And:
1263 case Instruction::Or:
1264 case Instruction::Xor: {
1265 newTreeEntry(VL, true);
1266 DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
1267
1268 // Sort operands of the instructions so that each side is more likely to
1269 // have the same opcode.
1270 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
1271 ValueList Left, Right;
1272 reorderInputsAccordingToOpcode(VL, Left, Right);
1273 buildTree_rec(Left, Depth + 1);
1274 buildTree_rec(Right, Depth + 1);
1275 return;
1276 }
1277
1278 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1279 ValueList Operands;
1280 // Prepare the operand vector.
1281 for (Value *j : VL)
1282 Operands.push_back(cast<Instruction>(j)->getOperand(i));
1283
1284 buildTree_rec(Operands, Depth+1);
1285 }
1286 return;
1287 }
1288 case Instruction::GetElementPtr: {
1289 // We don't combine GEPs with complicated (nested) indexing.
1290 for (unsigned j = 0; j < VL.size(); ++j) {
1291 if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
1292 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
1293 BS.cancelScheduling(VL);
1294 newTreeEntry(VL, false);
1295 return;
1296 }
1297 }
1298
1299 // We can't combine several GEPs into one vector if they operate on
1300 // different types.
1301 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
1302 for (unsigned j = 0; j < VL.size(); ++j) {
1303 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
1304 if (Ty0 != CurTy) {
1305 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
1306 BS.cancelScheduling(VL);
1307 newTreeEntry(VL, false);
1308 return;
1309 }
1310 }
1311
1312 // We don't combine GEPs with non-constant indexes.
1313 for (unsigned j = 0; j < VL.size(); ++j) {
1314 auto Op = cast<Instruction>(VL[j])->getOperand(1);
1315 if (!isa<ConstantInt>(Op)) {
1316 DEBUG(
1317 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
1318 BS.cancelScheduling(VL);
1319 newTreeEntry(VL, false);
1320 return;
1321 }
1322 }
1323
1324 newTreeEntry(VL, true);
1325 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
1326 for (unsigned i = 0, e = 2; i < e; ++i) {
1327 ValueList Operands;
1328 // Prepare the operand vector.
1329 for (Value *j : VL)
1330 Operands.push_back(cast<Instruction>(j)->getOperand(i));
1331
1332 buildTree_rec(Operands, Depth + 1);
1333 }
1334 return;
1335 }
1336 case Instruction::Store: {
1337 // Check if the stores are consecutive or of we need to swizzle them.
1338 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
1339 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1340 BS.cancelScheduling(VL);
1341 newTreeEntry(VL, false);
1342 DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
1343 return;
1344 }
1345
1346 newTreeEntry(VL, true);
1347 DEBUG(dbgs() << "SLP: added a vector of stores.\n");
1348
1349 ValueList Operands;
1350 for (Value *j : VL)
1351 Operands.push_back(cast<Instruction>(j)->getOperand(0));
1352
1353 buildTree_rec(Operands, Depth + 1);
1354 return;
1355 }
1356 case Instruction::Call: {
1357 // Check if the calls are all to the same vectorizable intrinsic.
1358 CallInst *CI = cast<CallInst>(VL[0]);
1359 // Check if this is an Intrinsic call or something that can be
1360 // represented by an intrinsic call
1361 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1362 if (!isTriviallyVectorizable(ID)) {
1363 BS.cancelScheduling(VL);
1364 newTreeEntry(VL, false);
1365 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
1366 return;
1367 }
1368 Function *Int = CI->getCalledFunction();
1369 Value *A1I = nullptr;
1370 if (hasVectorInstrinsicScalarOpd(ID, 1))
1371 A1I = CI->getArgOperand(1);
1372 for (unsigned i = 1, e = VL.size(); i != e; ++i) {
1373 CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
1374 if (!CI2 || CI2->getCalledFunction() != Int ||
1375 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
1376 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
1377 BS.cancelScheduling(VL);
1378 newTreeEntry(VL, false);
1379 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
1380 << "\n");
1381 return;
1382 }
1383 // ctlz,cttz and powi are special intrinsics whose second argument
1384 // should be same in order for them to be vectorized.
1385 if (hasVectorInstrinsicScalarOpd(ID, 1)) {
1386 Value *A1J = CI2->getArgOperand(1);
1387 if (A1I != A1J) {
1388 BS.cancelScheduling(VL);
1389 newTreeEntry(VL, false);
1390 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
1391 << " argument "<< A1I<<"!=" << A1J
1392 << "\n");
1393 return;
1394 }
1395 }
1396 // Verify that the bundle operands are identical between the two calls.
1397 if (CI->hasOperandBundles() &&
1398 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
1399 CI->op_begin() + CI->getBundleOperandsEndIndex(),
1400 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
1401 BS.cancelScheduling(VL);
1402 newTreeEntry(VL, false);
1403 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
1404 << *VL[i] << '\n');
1405 return;
1406 }
1407 }
1408
1409 newTreeEntry(VL, true);
1410 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
1411 ValueList Operands;
1412 // Prepare the operand vector.
1413 for (Value *j : VL) {
1414 CallInst *CI2 = dyn_cast<CallInst>(j);
1415 Operands.push_back(CI2->getArgOperand(i));
1416 }
1417 buildTree_rec(Operands, Depth + 1);
1418 }
1419 return;
1420 }
1421 case Instruction::ShuffleVector: {
1422 // If this is not an alternate sequence of opcode like add-sub
1423 // then do not vectorize this instruction.
1424 if (!isAltShuffle) {
1425 BS.cancelScheduling(VL);
1426 newTreeEntry(VL, false);
1427 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
1428 return;
1429 }
1430 newTreeEntry(VL, true);
1431 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
1432
1433 // Reorder operands if reordering would enable vectorization.
1434 if (isa<BinaryOperator>(VL0)) {
1435 ValueList Left, Right;
1436 reorderAltShuffleOperands(VL, Left, Right);
1437 buildTree_rec(Left, Depth + 1);
1438 buildTree_rec(Right, Depth + 1);
1439 return;
1440 }
1441
1442 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1443 ValueList Operands;
1444 // Prepare the operand vector.
1445 for (Value *j : VL)
1446 Operands.push_back(cast<Instruction>(j)->getOperand(i));
1447
1448 buildTree_rec(Operands, Depth + 1);
1449 }
1450 return;
1451 }
1452 default:
1453 BS.cancelScheduling(VL);
1454 newTreeEntry(VL, false);
1455 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
1456 return;
1457 }
1458 }
1459
canMapToVector(Type * T,const DataLayout & DL) const1460 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
1461 unsigned N;
1462 Type *EltTy;
1463 auto *ST = dyn_cast<StructType>(T);
1464 if (ST) {
1465 N = ST->getNumElements();
1466 EltTy = *ST->element_begin();
1467 } else {
1468 N = cast<ArrayType>(T)->getNumElements();
1469 EltTy = cast<ArrayType>(T)->getElementType();
1470 }
1471 if (!isValidElementType(EltTy))
1472 return 0;
1473 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
1474 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
1475 return 0;
1476 if (ST) {
1477 // Check that struct is homogeneous.
1478 for (const auto *Ty : ST->elements())
1479 if (Ty != EltTy)
1480 return 0;
1481 }
1482 return N;
1483 }
1484
canReuseExtract(ArrayRef<Value * > VL,unsigned Opcode) const1485 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const {
1486 assert(Opcode == Instruction::ExtractElement ||
1487 Opcode == Instruction::ExtractValue);
1488 assert(Opcode == getSameOpcode(VL) && "Invalid opcode");
1489 // Check if all of the extracts come from the same vector and from the
1490 // correct offset.
1491 Value *VL0 = VL[0];
1492 Instruction *E0 = cast<Instruction>(VL0);
1493 Value *Vec = E0->getOperand(0);
1494
1495 // We have to extract from a vector/aggregate with the same number of elements.
1496 unsigned NElts;
1497 if (Opcode == Instruction::ExtractValue) {
1498 const DataLayout &DL = E0->getModule()->getDataLayout();
1499 NElts = canMapToVector(Vec->getType(), DL);
1500 if (!NElts)
1501 return false;
1502 // Check if load can be rewritten as load of vector.
1503 LoadInst *LI = dyn_cast<LoadInst>(Vec);
1504 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
1505 return false;
1506 } else {
1507 NElts = Vec->getType()->getVectorNumElements();
1508 }
1509
1510 if (NElts != VL.size())
1511 return false;
1512
1513 // Check that all of the indices extract from the correct offset.
1514 if (!matchExtractIndex(E0, 0, Opcode))
1515 return false;
1516
1517 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1518 Instruction *E = cast<Instruction>(VL[i]);
1519 if (!matchExtractIndex(E, i, Opcode))
1520 return false;
1521 if (E->getOperand(0) != Vec)
1522 return false;
1523 }
1524
1525 return true;
1526 }
1527
getEntryCost(TreeEntry * E)1528 int BoUpSLP::getEntryCost(TreeEntry *E) {
1529 ArrayRef<Value*> VL = E->Scalars;
1530
1531 Type *ScalarTy = VL[0]->getType();
1532 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1533 ScalarTy = SI->getValueOperand()->getType();
1534 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1535
1536 // If we have computed a smaller type for the expression, update VecTy so
1537 // that the costs will be accurate.
1538 if (MinBWs.count(VL[0]))
1539 VecTy = VectorType::get(IntegerType::get(F->getContext(), MinBWs[VL[0]]),
1540 VL.size());
1541
1542 if (E->NeedToGather) {
1543 if (allConstant(VL))
1544 return 0;
1545 if (isSplat(VL)) {
1546 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
1547 }
1548 return getGatherCost(E->Scalars);
1549 }
1550 unsigned Opcode = getSameOpcode(VL);
1551 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL");
1552 Instruction *VL0 = cast<Instruction>(VL[0]);
1553 switch (Opcode) {
1554 case Instruction::PHI: {
1555 return 0;
1556 }
1557 case Instruction::ExtractValue:
1558 case Instruction::ExtractElement: {
1559 if (canReuseExtract(VL, Opcode)) {
1560 int DeadCost = 0;
1561 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1562 Instruction *E = cast<Instruction>(VL[i]);
1563 if (E->hasOneUse())
1564 // Take credit for instruction that will become dead.
1565 DeadCost +=
1566 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
1567 }
1568 return -DeadCost;
1569 }
1570 return getGatherCost(VecTy);
1571 }
1572 case Instruction::ZExt:
1573 case Instruction::SExt:
1574 case Instruction::FPToUI:
1575 case Instruction::FPToSI:
1576 case Instruction::FPExt:
1577 case Instruction::PtrToInt:
1578 case Instruction::IntToPtr:
1579 case Instruction::SIToFP:
1580 case Instruction::UIToFP:
1581 case Instruction::Trunc:
1582 case Instruction::FPTrunc:
1583 case Instruction::BitCast: {
1584 Type *SrcTy = VL0->getOperand(0)->getType();
1585
1586 // Calculate the cost of this instruction.
1587 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
1588 VL0->getType(), SrcTy);
1589
1590 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
1591 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
1592 return VecCost - ScalarCost;
1593 }
1594 case Instruction::FCmp:
1595 case Instruction::ICmp:
1596 case Instruction::Select: {
1597 // Calculate the cost of this instruction.
1598 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
1599 int ScalarCost = VecTy->getNumElements() *
1600 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
1601 int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
1602 return VecCost - ScalarCost;
1603 }
1604 case Instruction::Add:
1605 case Instruction::FAdd:
1606 case Instruction::Sub:
1607 case Instruction::FSub:
1608 case Instruction::Mul:
1609 case Instruction::FMul:
1610 case Instruction::UDiv:
1611 case Instruction::SDiv:
1612 case Instruction::FDiv:
1613 case Instruction::URem:
1614 case Instruction::SRem:
1615 case Instruction::FRem:
1616 case Instruction::Shl:
1617 case Instruction::LShr:
1618 case Instruction::AShr:
1619 case Instruction::And:
1620 case Instruction::Or:
1621 case Instruction::Xor: {
1622 // Certain instructions can be cheaper to vectorize if they have a
1623 // constant second vector operand.
1624 TargetTransformInfo::OperandValueKind Op1VK =
1625 TargetTransformInfo::OK_AnyValue;
1626 TargetTransformInfo::OperandValueKind Op2VK =
1627 TargetTransformInfo::OK_UniformConstantValue;
1628 TargetTransformInfo::OperandValueProperties Op1VP =
1629 TargetTransformInfo::OP_None;
1630 TargetTransformInfo::OperandValueProperties Op2VP =
1631 TargetTransformInfo::OP_None;
1632
1633 // If all operands are exactly the same ConstantInt then set the
1634 // operand kind to OK_UniformConstantValue.
1635 // If instead not all operands are constants, then set the operand kind
1636 // to OK_AnyValue. If all operands are constants but not the same,
1637 // then set the operand kind to OK_NonUniformConstantValue.
1638 ConstantInt *CInt = nullptr;
1639 for (unsigned i = 0; i < VL.size(); ++i) {
1640 const Instruction *I = cast<Instruction>(VL[i]);
1641 if (!isa<ConstantInt>(I->getOperand(1))) {
1642 Op2VK = TargetTransformInfo::OK_AnyValue;
1643 break;
1644 }
1645 if (i == 0) {
1646 CInt = cast<ConstantInt>(I->getOperand(1));
1647 continue;
1648 }
1649 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
1650 CInt != cast<ConstantInt>(I->getOperand(1)))
1651 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
1652 }
1653 // FIXME: Currently cost of model modification for division by power of
1654 // 2 is handled for X86 and AArch64. Add support for other targets.
1655 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
1656 CInt->getValue().isPowerOf2())
1657 Op2VP = TargetTransformInfo::OP_PowerOf2;
1658
1659 int ScalarCost = VecTy->getNumElements() *
1660 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK,
1661 Op2VK, Op1VP, Op2VP);
1662 int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
1663 Op1VP, Op2VP);
1664 return VecCost - ScalarCost;
1665 }
1666 case Instruction::GetElementPtr: {
1667 TargetTransformInfo::OperandValueKind Op1VK =
1668 TargetTransformInfo::OK_AnyValue;
1669 TargetTransformInfo::OperandValueKind Op2VK =
1670 TargetTransformInfo::OK_UniformConstantValue;
1671
1672 int ScalarCost =
1673 VecTy->getNumElements() *
1674 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
1675 int VecCost =
1676 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
1677
1678 return VecCost - ScalarCost;
1679 }
1680 case Instruction::Load: {
1681 // Cost of wide load - cost of scalar loads.
1682 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment();
1683 int ScalarLdCost = VecTy->getNumElements() *
1684 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0);
1685 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load,
1686 VecTy, alignment, 0);
1687 return VecLdCost - ScalarLdCost;
1688 }
1689 case Instruction::Store: {
1690 // We know that we can merge the stores. Calculate the cost.
1691 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment();
1692 int ScalarStCost = VecTy->getNumElements() *
1693 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0);
1694 int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
1695 VecTy, alignment, 0);
1696 return VecStCost - ScalarStCost;
1697 }
1698 case Instruction::Call: {
1699 CallInst *CI = cast<CallInst>(VL0);
1700 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1701
1702 // Calculate the cost of the scalar and vector calls.
1703 SmallVector<Type*, 4> ScalarTys, VecTys;
1704 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
1705 ScalarTys.push_back(CI->getArgOperand(op)->getType());
1706 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
1707 VecTy->getNumElements()));
1708 }
1709
1710 FastMathFlags FMF;
1711 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
1712 FMF = FPMO->getFastMathFlags();
1713
1714 int ScalarCallCost = VecTy->getNumElements() *
1715 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF);
1716
1717 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF);
1718
1719 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
1720 << " (" << VecCallCost << "-" << ScalarCallCost << ")"
1721 << " for " << *CI << "\n");
1722
1723 return VecCallCost - ScalarCallCost;
1724 }
1725 case Instruction::ShuffleVector: {
1726 TargetTransformInfo::OperandValueKind Op1VK =
1727 TargetTransformInfo::OK_AnyValue;
1728 TargetTransformInfo::OperandValueKind Op2VK =
1729 TargetTransformInfo::OK_AnyValue;
1730 int ScalarCost = 0;
1731 int VecCost = 0;
1732 for (Value *i : VL) {
1733 Instruction *I = cast<Instruction>(i);
1734 if (!I)
1735 break;
1736 ScalarCost +=
1737 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
1738 }
1739 // VecCost is equal to sum of the cost of creating 2 vectors
1740 // and the cost of creating shuffle.
1741 Instruction *I0 = cast<Instruction>(VL[0]);
1742 VecCost =
1743 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
1744 Instruction *I1 = cast<Instruction>(VL[1]);
1745 VecCost +=
1746 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
1747 VecCost +=
1748 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
1749 return VecCost - ScalarCost;
1750 }
1751 default:
1752 llvm_unreachable("Unknown instruction");
1753 }
1754 }
1755
isFullyVectorizableTinyTree()1756 bool BoUpSLP::isFullyVectorizableTinyTree() {
1757 DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
1758 VectorizableTree.size() << " is fully vectorizable .\n");
1759
1760 // We only handle trees of height 2.
1761 if (VectorizableTree.size() != 2)
1762 return false;
1763
1764 // Handle splat and all-constants stores.
1765 if (!VectorizableTree[0].NeedToGather &&
1766 (allConstant(VectorizableTree[1].Scalars) ||
1767 isSplat(VectorizableTree[1].Scalars)))
1768 return true;
1769
1770 // Gathering cost would be too much for tiny trees.
1771 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
1772 return false;
1773
1774 return true;
1775 }
1776
getSpillCost()1777 int BoUpSLP::getSpillCost() {
1778 // Walk from the bottom of the tree to the top, tracking which values are
1779 // live. When we see a call instruction that is not part of our tree,
1780 // query TTI to see if there is a cost to keeping values live over it
1781 // (for example, if spills and fills are required).
1782 unsigned BundleWidth = VectorizableTree.front().Scalars.size();
1783 int Cost = 0;
1784
1785 SmallPtrSet<Instruction*, 4> LiveValues;
1786 Instruction *PrevInst = nullptr;
1787
1788 for (const auto &N : VectorizableTree) {
1789 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]);
1790 if (!Inst)
1791 continue;
1792
1793 if (!PrevInst) {
1794 PrevInst = Inst;
1795 continue;
1796 }
1797
1798 // Update LiveValues.
1799 LiveValues.erase(PrevInst);
1800 for (auto &J : PrevInst->operands()) {
1801 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
1802 LiveValues.insert(cast<Instruction>(&*J));
1803 }
1804
1805 DEBUG(
1806 dbgs() << "SLP: #LV: " << LiveValues.size();
1807 for (auto *X : LiveValues)
1808 dbgs() << " " << X->getName();
1809 dbgs() << ", Looking at ";
1810 Inst->dump();
1811 );
1812
1813 // Now find the sequence of instructions between PrevInst and Inst.
1814 BasicBlock::reverse_iterator InstIt(Inst->getIterator()),
1815 PrevInstIt(PrevInst->getIterator());
1816 --PrevInstIt;
1817 while (InstIt != PrevInstIt) {
1818 if (PrevInstIt == PrevInst->getParent()->rend()) {
1819 PrevInstIt = Inst->getParent()->rbegin();
1820 continue;
1821 }
1822
1823 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
1824 SmallVector<Type*, 4> V;
1825 for (auto *II : LiveValues)
1826 V.push_back(VectorType::get(II->getType(), BundleWidth));
1827 Cost += TTI->getCostOfKeepingLiveOverCall(V);
1828 }
1829
1830 ++PrevInstIt;
1831 }
1832
1833 PrevInst = Inst;
1834 }
1835
1836 return Cost;
1837 }
1838
getTreeCost()1839 int BoUpSLP::getTreeCost() {
1840 int Cost = 0;
1841 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
1842 VectorizableTree.size() << ".\n");
1843
1844 // We only vectorize tiny trees if it is fully vectorizable.
1845 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) {
1846 if (VectorizableTree.empty()) {
1847 assert(!ExternalUses.size() && "We should not have any external users");
1848 }
1849 return INT_MAX;
1850 }
1851
1852 unsigned BundleWidth = VectorizableTree[0].Scalars.size();
1853
1854 for (TreeEntry &TE : VectorizableTree) {
1855 int C = getEntryCost(&TE);
1856 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
1857 << *TE.Scalars[0] << ".\n");
1858 Cost += C;
1859 }
1860
1861 SmallSet<Value *, 16> ExtractCostCalculated;
1862 int ExtractCost = 0;
1863 for (ExternalUser &EU : ExternalUses) {
1864 // We only add extract cost once for the same scalar.
1865 if (!ExtractCostCalculated.insert(EU.Scalar).second)
1866 continue;
1867
1868 // Uses by ephemeral values are free (because the ephemeral value will be
1869 // removed prior to code generation, and so the extraction will be
1870 // removed as well).
1871 if (EphValues.count(EU.User))
1872 continue;
1873
1874 // If we plan to rewrite the tree in a smaller type, we will need to sign
1875 // extend the extracted value back to the original type. Here, we account
1876 // for the extract and the added cost of the sign extend if needed.
1877 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
1878 auto *ScalarRoot = VectorizableTree[0].Scalars[0];
1879 if (MinBWs.count(ScalarRoot)) {
1880 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]);
1881 VecTy = VectorType::get(MinTy, BundleWidth);
1882 ExtractCost += TTI->getExtractWithExtendCost(
1883 Instruction::SExt, EU.Scalar->getType(), VecTy, EU.Lane);
1884 } else {
1885 ExtractCost +=
1886 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
1887 }
1888 }
1889
1890 int SpillCost = getSpillCost();
1891 Cost += SpillCost + ExtractCost;
1892
1893 DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n"
1894 << "SLP: Extract Cost = " << ExtractCost << ".\n"
1895 << "SLP: Total Cost = " << Cost << ".\n");
1896 return Cost;
1897 }
1898
getGatherCost(Type * Ty)1899 int BoUpSLP::getGatherCost(Type *Ty) {
1900 int Cost = 0;
1901 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
1902 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
1903 return Cost;
1904 }
1905
getGatherCost(ArrayRef<Value * > VL)1906 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
1907 // Find the type of the operands in VL.
1908 Type *ScalarTy = VL[0]->getType();
1909 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1910 ScalarTy = SI->getValueOperand()->getType();
1911 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1912 // Find the cost of inserting/extracting values from the vector.
1913 return getGatherCost(VecTy);
1914 }
1915
1916 // Reorder commutative operations in alternate shuffle if the resulting vectors
1917 // are consecutive loads. This would allow us to vectorize the tree.
1918 // If we have something like-
1919 // load a[0] - load b[0]
1920 // load b[1] + load a[1]
1921 // load a[2] - load b[2]
1922 // load a[3] + load b[3]
1923 // Reordering the second load b[1] load a[1] would allow us to vectorize this
1924 // code.
reorderAltShuffleOperands(ArrayRef<Value * > VL,SmallVectorImpl<Value * > & Left,SmallVectorImpl<Value * > & Right)1925 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
1926 SmallVectorImpl<Value *> &Left,
1927 SmallVectorImpl<Value *> &Right) {
1928 // Push left and right operands of binary operation into Left and Right
1929 for (Value *i : VL) {
1930 Left.push_back(cast<Instruction>(i)->getOperand(0));
1931 Right.push_back(cast<Instruction>(i)->getOperand(1));
1932 }
1933
1934 // Reorder if we have a commutative operation and consecutive access
1935 // are on either side of the alternate instructions.
1936 for (unsigned j = 0; j < VL.size() - 1; ++j) {
1937 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
1938 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
1939 Instruction *VL1 = cast<Instruction>(VL[j]);
1940 Instruction *VL2 = cast<Instruction>(VL[j + 1]);
1941 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
1942 std::swap(Left[j], Right[j]);
1943 continue;
1944 } else if (VL2->isCommutative() &&
1945 isConsecutiveAccess(L, L1, *DL, *SE)) {
1946 std::swap(Left[j + 1], Right[j + 1]);
1947 continue;
1948 }
1949 // else unchanged
1950 }
1951 }
1952 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
1953 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
1954 Instruction *VL1 = cast<Instruction>(VL[j]);
1955 Instruction *VL2 = cast<Instruction>(VL[j + 1]);
1956 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
1957 std::swap(Left[j], Right[j]);
1958 continue;
1959 } else if (VL2->isCommutative() &&
1960 isConsecutiveAccess(L, L1, *DL, *SE)) {
1961 std::swap(Left[j + 1], Right[j + 1]);
1962 continue;
1963 }
1964 // else unchanged
1965 }
1966 }
1967 }
1968 }
1969
1970 // Return true if I should be commuted before adding it's left and right
1971 // operands to the arrays Left and Right.
1972 //
1973 // The vectorizer is trying to either have all elements one side being
1974 // instruction with the same opcode to enable further vectorization, or having
1975 // a splat to lower the vectorizing cost.
shouldReorderOperands(int i,Instruction & I,SmallVectorImpl<Value * > & Left,SmallVectorImpl<Value * > & Right,bool AllSameOpcodeLeft,bool AllSameOpcodeRight,bool SplatLeft,bool SplatRight)1976 static bool shouldReorderOperands(int i, Instruction &I,
1977 SmallVectorImpl<Value *> &Left,
1978 SmallVectorImpl<Value *> &Right,
1979 bool AllSameOpcodeLeft,
1980 bool AllSameOpcodeRight, bool SplatLeft,
1981 bool SplatRight) {
1982 Value *VLeft = I.getOperand(0);
1983 Value *VRight = I.getOperand(1);
1984 // If we have "SplatRight", try to see if commuting is needed to preserve it.
1985 if (SplatRight) {
1986 if (VRight == Right[i - 1])
1987 // Preserve SplatRight
1988 return false;
1989 if (VLeft == Right[i - 1]) {
1990 // Commuting would preserve SplatRight, but we don't want to break
1991 // SplatLeft either, i.e. preserve the original order if possible.
1992 // (FIXME: why do we care?)
1993 if (SplatLeft && VLeft == Left[i - 1])
1994 return false;
1995 return true;
1996 }
1997 }
1998 // Symmetrically handle Right side.
1999 if (SplatLeft) {
2000 if (VLeft == Left[i - 1])
2001 // Preserve SplatLeft
2002 return false;
2003 if (VRight == Left[i - 1])
2004 return true;
2005 }
2006
2007 Instruction *ILeft = dyn_cast<Instruction>(VLeft);
2008 Instruction *IRight = dyn_cast<Instruction>(VRight);
2009
2010 // If we have "AllSameOpcodeRight", try to see if the left operands preserves
2011 // it and not the right, in this case we want to commute.
2012 if (AllSameOpcodeRight) {
2013 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode();
2014 if (IRight && RightPrevOpcode == IRight->getOpcode())
2015 // Do not commute, a match on the right preserves AllSameOpcodeRight
2016 return false;
2017 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) {
2018 // We have a match and may want to commute, but first check if there is
2019 // not also a match on the existing operands on the Left to preserve
2020 // AllSameOpcodeLeft, i.e. preserve the original order if possible.
2021 // (FIXME: why do we care?)
2022 if (AllSameOpcodeLeft && ILeft &&
2023 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode())
2024 return false;
2025 return true;
2026 }
2027 }
2028 // Symmetrically handle Left side.
2029 if (AllSameOpcodeLeft) {
2030 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode();
2031 if (ILeft && LeftPrevOpcode == ILeft->getOpcode())
2032 return false;
2033 if (IRight && LeftPrevOpcode == IRight->getOpcode())
2034 return true;
2035 }
2036 return false;
2037 }
2038
reorderInputsAccordingToOpcode(ArrayRef<Value * > VL,SmallVectorImpl<Value * > & Left,SmallVectorImpl<Value * > & Right)2039 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2040 SmallVectorImpl<Value *> &Left,
2041 SmallVectorImpl<Value *> &Right) {
2042
2043 if (VL.size()) {
2044 // Peel the first iteration out of the loop since there's nothing
2045 // interesting to do anyway and it simplifies the checks in the loop.
2046 auto VLeft = cast<Instruction>(VL[0])->getOperand(0);
2047 auto VRight = cast<Instruction>(VL[0])->getOperand(1);
2048 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft))
2049 // Favor having instruction to the right. FIXME: why?
2050 std::swap(VLeft, VRight);
2051 Left.push_back(VLeft);
2052 Right.push_back(VRight);
2053 }
2054
2055 // Keep track if we have instructions with all the same opcode on one side.
2056 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]);
2057 bool AllSameOpcodeRight = isa<Instruction>(Right[0]);
2058 // Keep track if we have one side with all the same value (broadcast).
2059 bool SplatLeft = true;
2060 bool SplatRight = true;
2061
2062 for (unsigned i = 1, e = VL.size(); i != e; ++i) {
2063 Instruction *I = cast<Instruction>(VL[i]);
2064 assert(I->isCommutative() && "Can only process commutative instruction");
2065 // Commute to favor either a splat or maximizing having the same opcodes on
2066 // one side.
2067 if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft,
2068 AllSameOpcodeRight, SplatLeft, SplatRight)) {
2069 Left.push_back(I->getOperand(1));
2070 Right.push_back(I->getOperand(0));
2071 } else {
2072 Left.push_back(I->getOperand(0));
2073 Right.push_back(I->getOperand(1));
2074 }
2075 // Update Splat* and AllSameOpcode* after the insertion.
2076 SplatRight = SplatRight && (Right[i - 1] == Right[i]);
2077 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]);
2078 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) &&
2079 (cast<Instruction>(Left[i - 1])->getOpcode() ==
2080 cast<Instruction>(Left[i])->getOpcode());
2081 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) &&
2082 (cast<Instruction>(Right[i - 1])->getOpcode() ==
2083 cast<Instruction>(Right[i])->getOpcode());
2084 }
2085
2086 // If one operand end up being broadcast, return this operand order.
2087 if (SplatRight || SplatLeft)
2088 return;
2089
2090 // Finally check if we can get longer vectorizable chain by reordering
2091 // without breaking the good operand order detected above.
2092 // E.g. If we have something like-
2093 // load a[0] load b[0]
2094 // load b[1] load a[1]
2095 // load a[2] load b[2]
2096 // load a[3] load b[3]
2097 // Reordering the second load b[1] load a[1] would allow us to vectorize
2098 // this code and we still retain AllSameOpcode property.
2099 // FIXME: This load reordering might break AllSameOpcode in some rare cases
2100 // such as-
2101 // add a[0],c[0] load b[0]
2102 // add a[1],c[2] load b[1]
2103 // b[2] load b[2]
2104 // add a[3],c[3] load b[3]
2105 for (unsigned j = 0; j < VL.size() - 1; ++j) {
2106 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2107 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2108 if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2109 std::swap(Left[j + 1], Right[j + 1]);
2110 continue;
2111 }
2112 }
2113 }
2114 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2115 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2116 if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2117 std::swap(Left[j + 1], Right[j + 1]);
2118 continue;
2119 }
2120 }
2121 }
2122 // else unchanged
2123 }
2124 }
2125
setInsertPointAfterBundle(ArrayRef<Value * > VL)2126 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
2127 Instruction *VL0 = cast<Instruction>(VL[0]);
2128 BasicBlock::iterator NextInst(VL0);
2129 ++NextInst;
2130 Builder.SetInsertPoint(VL0->getParent(), NextInst);
2131 Builder.SetCurrentDebugLocation(VL0->getDebugLoc());
2132 }
2133
Gather(ArrayRef<Value * > VL,VectorType * Ty)2134 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
2135 Value *Vec = UndefValue::get(Ty);
2136 // Generate the 'InsertElement' instruction.
2137 for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
2138 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
2139 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
2140 GatherSeq.insert(Insrt);
2141 CSEBlocks.insert(Insrt->getParent());
2142
2143 // Add to our 'need-to-extract' list.
2144 if (ScalarToTreeEntry.count(VL[i])) {
2145 int Idx = ScalarToTreeEntry[VL[i]];
2146 TreeEntry *E = &VectorizableTree[Idx];
2147 // Find which lane we need to extract.
2148 int FoundLane = -1;
2149 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2150 // Is this the lane of the scalar that we are looking for ?
2151 if (E->Scalars[Lane] == VL[i]) {
2152 FoundLane = Lane;
2153 break;
2154 }
2155 }
2156 assert(FoundLane >= 0 && "Could not find the correct lane");
2157 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
2158 }
2159 }
2160 }
2161
2162 return Vec;
2163 }
2164
alreadyVectorized(ArrayRef<Value * > VL) const2165 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
2166 SmallDenseMap<Value*, int>::const_iterator Entry
2167 = ScalarToTreeEntry.find(VL[0]);
2168 if (Entry != ScalarToTreeEntry.end()) {
2169 int Idx = Entry->second;
2170 const TreeEntry *En = &VectorizableTree[Idx];
2171 if (En->isSame(VL) && En->VectorizedValue)
2172 return En->VectorizedValue;
2173 }
2174 return nullptr;
2175 }
2176
vectorizeTree(ArrayRef<Value * > VL)2177 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
2178 if (ScalarToTreeEntry.count(VL[0])) {
2179 int Idx = ScalarToTreeEntry[VL[0]];
2180 TreeEntry *E = &VectorizableTree[Idx];
2181 if (E->isSame(VL))
2182 return vectorizeTree(E);
2183 }
2184
2185 Type *ScalarTy = VL[0]->getType();
2186 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2187 ScalarTy = SI->getValueOperand()->getType();
2188 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2189
2190 return Gather(VL, VecTy);
2191 }
2192
vectorizeTree(TreeEntry * E)2193 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
2194 IRBuilder<>::InsertPointGuard Guard(Builder);
2195
2196 if (E->VectorizedValue) {
2197 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
2198 return E->VectorizedValue;
2199 }
2200
2201 Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
2202 Type *ScalarTy = VL0->getType();
2203 if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
2204 ScalarTy = SI->getValueOperand()->getType();
2205 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
2206
2207 if (E->NeedToGather) {
2208 setInsertPointAfterBundle(E->Scalars);
2209 return Gather(E->Scalars, VecTy);
2210 }
2211
2212 unsigned Opcode = getSameOpcode(E->Scalars);
2213
2214 switch (Opcode) {
2215 case Instruction::PHI: {
2216 PHINode *PH = dyn_cast<PHINode>(VL0);
2217 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
2218 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2219 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
2220 E->VectorizedValue = NewPhi;
2221
2222 // PHINodes may have multiple entries from the same block. We want to
2223 // visit every block once.
2224 SmallSet<BasicBlock*, 4> VisitedBBs;
2225
2226 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2227 ValueList Operands;
2228 BasicBlock *IBB = PH->getIncomingBlock(i);
2229
2230 if (!VisitedBBs.insert(IBB).second) {
2231 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
2232 continue;
2233 }
2234
2235 // Prepare the operand vector.
2236 for (Value *V : E->Scalars)
2237 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB));
2238
2239 Builder.SetInsertPoint(IBB->getTerminator());
2240 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2241 Value *Vec = vectorizeTree(Operands);
2242 NewPhi->addIncoming(Vec, IBB);
2243 }
2244
2245 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
2246 "Invalid number of incoming values");
2247 return NewPhi;
2248 }
2249
2250 case Instruction::ExtractElement: {
2251 if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) {
2252 Value *V = VL0->getOperand(0);
2253 E->VectorizedValue = V;
2254 return V;
2255 }
2256 return Gather(E->Scalars, VecTy);
2257 }
2258 case Instruction::ExtractValue: {
2259 if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) {
2260 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0));
2261 Builder.SetInsertPoint(LI);
2262 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
2263 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
2264 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
2265 E->VectorizedValue = V;
2266 return propagateMetadata(V, E->Scalars);
2267 }
2268 return Gather(E->Scalars, VecTy);
2269 }
2270 case Instruction::ZExt:
2271 case Instruction::SExt:
2272 case Instruction::FPToUI:
2273 case Instruction::FPToSI:
2274 case Instruction::FPExt:
2275 case Instruction::PtrToInt:
2276 case Instruction::IntToPtr:
2277 case Instruction::SIToFP:
2278 case Instruction::UIToFP:
2279 case Instruction::Trunc:
2280 case Instruction::FPTrunc:
2281 case Instruction::BitCast: {
2282 ValueList INVL;
2283 for (Value *V : E->Scalars)
2284 INVL.push_back(cast<Instruction>(V)->getOperand(0));
2285
2286 setInsertPointAfterBundle(E->Scalars);
2287
2288 Value *InVec = vectorizeTree(INVL);
2289
2290 if (Value *V = alreadyVectorized(E->Scalars))
2291 return V;
2292
2293 CastInst *CI = dyn_cast<CastInst>(VL0);
2294 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
2295 E->VectorizedValue = V;
2296 ++NumVectorInstructions;
2297 return V;
2298 }
2299 case Instruction::FCmp:
2300 case Instruction::ICmp: {
2301 ValueList LHSV, RHSV;
2302 for (Value *V : E->Scalars) {
2303 LHSV.push_back(cast<Instruction>(V)->getOperand(0));
2304 RHSV.push_back(cast<Instruction>(V)->getOperand(1));
2305 }
2306
2307 setInsertPointAfterBundle(E->Scalars);
2308
2309 Value *L = vectorizeTree(LHSV);
2310 Value *R = vectorizeTree(RHSV);
2311
2312 if (Value *V = alreadyVectorized(E->Scalars))
2313 return V;
2314
2315 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2316 Value *V;
2317 if (Opcode == Instruction::FCmp)
2318 V = Builder.CreateFCmp(P0, L, R);
2319 else
2320 V = Builder.CreateICmp(P0, L, R);
2321
2322 E->VectorizedValue = V;
2323 ++NumVectorInstructions;
2324 return V;
2325 }
2326 case Instruction::Select: {
2327 ValueList TrueVec, FalseVec, CondVec;
2328 for (Value *V : E->Scalars) {
2329 CondVec.push_back(cast<Instruction>(V)->getOperand(0));
2330 TrueVec.push_back(cast<Instruction>(V)->getOperand(1));
2331 FalseVec.push_back(cast<Instruction>(V)->getOperand(2));
2332 }
2333
2334 setInsertPointAfterBundle(E->Scalars);
2335
2336 Value *Cond = vectorizeTree(CondVec);
2337 Value *True = vectorizeTree(TrueVec);
2338 Value *False = vectorizeTree(FalseVec);
2339
2340 if (Value *V = alreadyVectorized(E->Scalars))
2341 return V;
2342
2343 Value *V = Builder.CreateSelect(Cond, True, False);
2344 E->VectorizedValue = V;
2345 ++NumVectorInstructions;
2346 return V;
2347 }
2348 case Instruction::Add:
2349 case Instruction::FAdd:
2350 case Instruction::Sub:
2351 case Instruction::FSub:
2352 case Instruction::Mul:
2353 case Instruction::FMul:
2354 case Instruction::UDiv:
2355 case Instruction::SDiv:
2356 case Instruction::FDiv:
2357 case Instruction::URem:
2358 case Instruction::SRem:
2359 case Instruction::FRem:
2360 case Instruction::Shl:
2361 case Instruction::LShr:
2362 case Instruction::AShr:
2363 case Instruction::And:
2364 case Instruction::Or:
2365 case Instruction::Xor: {
2366 ValueList LHSVL, RHSVL;
2367 if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
2368 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
2369 else
2370 for (Value *V : E->Scalars) {
2371 LHSVL.push_back(cast<Instruction>(V)->getOperand(0));
2372 RHSVL.push_back(cast<Instruction>(V)->getOperand(1));
2373 }
2374
2375 setInsertPointAfterBundle(E->Scalars);
2376
2377 Value *LHS = vectorizeTree(LHSVL);
2378 Value *RHS = vectorizeTree(RHSVL);
2379
2380 if (LHS == RHS && isa<Instruction>(LHS)) {
2381 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order");
2382 }
2383
2384 if (Value *V = alreadyVectorized(E->Scalars))
2385 return V;
2386
2387 BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
2388 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
2389 E->VectorizedValue = V;
2390 propagateIRFlags(E->VectorizedValue, E->Scalars);
2391 ++NumVectorInstructions;
2392
2393 if (Instruction *I = dyn_cast<Instruction>(V))
2394 return propagateMetadata(I, E->Scalars);
2395
2396 return V;
2397 }
2398 case Instruction::Load: {
2399 // Loads are inserted at the head of the tree because we don't want to
2400 // sink them all the way down past store instructions.
2401 setInsertPointAfterBundle(E->Scalars);
2402
2403 LoadInst *LI = cast<LoadInst>(VL0);
2404 Type *ScalarLoadTy = LI->getType();
2405 unsigned AS = LI->getPointerAddressSpace();
2406
2407 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
2408 VecTy->getPointerTo(AS));
2409
2410 // The pointer operand uses an in-tree scalar so we add the new BitCast to
2411 // ExternalUses list to make sure that an extract will be generated in the
2412 // future.
2413 if (ScalarToTreeEntry.count(LI->getPointerOperand()))
2414 ExternalUses.push_back(
2415 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
2416
2417 unsigned Alignment = LI->getAlignment();
2418 LI = Builder.CreateLoad(VecPtr);
2419 if (!Alignment) {
2420 Alignment = DL->getABITypeAlignment(ScalarLoadTy);
2421 }
2422 LI->setAlignment(Alignment);
2423 E->VectorizedValue = LI;
2424 ++NumVectorInstructions;
2425 return propagateMetadata(LI, E->Scalars);
2426 }
2427 case Instruction::Store: {
2428 StoreInst *SI = cast<StoreInst>(VL0);
2429 unsigned Alignment = SI->getAlignment();
2430 unsigned AS = SI->getPointerAddressSpace();
2431
2432 ValueList ValueOp;
2433 for (Value *V : E->Scalars)
2434 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand());
2435
2436 setInsertPointAfterBundle(E->Scalars);
2437
2438 Value *VecValue = vectorizeTree(ValueOp);
2439 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
2440 VecTy->getPointerTo(AS));
2441 StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
2442
2443 // The pointer operand uses an in-tree scalar so we add the new BitCast to
2444 // ExternalUses list to make sure that an extract will be generated in the
2445 // future.
2446 if (ScalarToTreeEntry.count(SI->getPointerOperand()))
2447 ExternalUses.push_back(
2448 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
2449
2450 if (!Alignment) {
2451 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType());
2452 }
2453 S->setAlignment(Alignment);
2454 E->VectorizedValue = S;
2455 ++NumVectorInstructions;
2456 return propagateMetadata(S, E->Scalars);
2457 }
2458 case Instruction::GetElementPtr: {
2459 setInsertPointAfterBundle(E->Scalars);
2460
2461 ValueList Op0VL;
2462 for (Value *V : E->Scalars)
2463 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
2464
2465 Value *Op0 = vectorizeTree(Op0VL);
2466
2467 std::vector<Value *> OpVecs;
2468 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
2469 ++j) {
2470 ValueList OpVL;
2471 for (Value *V : E->Scalars)
2472 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
2473
2474 Value *OpVec = vectorizeTree(OpVL);
2475 OpVecs.push_back(OpVec);
2476 }
2477
2478 Value *V = Builder.CreateGEP(
2479 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
2480 E->VectorizedValue = V;
2481 ++NumVectorInstructions;
2482
2483 if (Instruction *I = dyn_cast<Instruction>(V))
2484 return propagateMetadata(I, E->Scalars);
2485
2486 return V;
2487 }
2488 case Instruction::Call: {
2489 CallInst *CI = cast<CallInst>(VL0);
2490 setInsertPointAfterBundle(E->Scalars);
2491 Function *FI;
2492 Intrinsic::ID IID = Intrinsic::not_intrinsic;
2493 Value *ScalarArg = nullptr;
2494 if (CI && (FI = CI->getCalledFunction())) {
2495 IID = FI->getIntrinsicID();
2496 }
2497 std::vector<Value *> OpVecs;
2498 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
2499 ValueList OpVL;
2500 // ctlz,cttz and powi are special intrinsics whose second argument is
2501 // a scalar. This argument should not be vectorized.
2502 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
2503 CallInst *CEI = cast<CallInst>(E->Scalars[0]);
2504 ScalarArg = CEI->getArgOperand(j);
2505 OpVecs.push_back(CEI->getArgOperand(j));
2506 continue;
2507 }
2508 for (Value *V : E->Scalars) {
2509 CallInst *CEI = cast<CallInst>(V);
2510 OpVL.push_back(CEI->getArgOperand(j));
2511 }
2512
2513 Value *OpVec = vectorizeTree(OpVL);
2514 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
2515 OpVecs.push_back(OpVec);
2516 }
2517
2518 Module *M = F->getParent();
2519 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
2520 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
2521 Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
2522 SmallVector<OperandBundleDef, 1> OpBundles;
2523 CI->getOperandBundlesAsDefs(OpBundles);
2524 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
2525
2526 // The scalar argument uses an in-tree scalar so we add the new vectorized
2527 // call to ExternalUses list to make sure that an extract will be
2528 // generated in the future.
2529 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
2530 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
2531
2532 E->VectorizedValue = V;
2533 ++NumVectorInstructions;
2534 return V;
2535 }
2536 case Instruction::ShuffleVector: {
2537 ValueList LHSVL, RHSVL;
2538 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
2539 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
2540 setInsertPointAfterBundle(E->Scalars);
2541
2542 Value *LHS = vectorizeTree(LHSVL);
2543 Value *RHS = vectorizeTree(RHSVL);
2544
2545 if (Value *V = alreadyVectorized(E->Scalars))
2546 return V;
2547
2548 // Create a vector of LHS op1 RHS
2549 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
2550 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
2551
2552 // Create a vector of LHS op2 RHS
2553 Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
2554 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
2555 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
2556
2557 // Create shuffle to take alternate operations from the vector.
2558 // Also, gather up odd and even scalar ops to propagate IR flags to
2559 // each vector operation.
2560 ValueList OddScalars, EvenScalars;
2561 unsigned e = E->Scalars.size();
2562 SmallVector<Constant *, 8> Mask(e);
2563 for (unsigned i = 0; i < e; ++i) {
2564 if (i & 1) {
2565 Mask[i] = Builder.getInt32(e + i);
2566 OddScalars.push_back(E->Scalars[i]);
2567 } else {
2568 Mask[i] = Builder.getInt32(i);
2569 EvenScalars.push_back(E->Scalars[i]);
2570 }
2571 }
2572
2573 Value *ShuffleMask = ConstantVector::get(Mask);
2574 propagateIRFlags(V0, EvenScalars);
2575 propagateIRFlags(V1, OddScalars);
2576
2577 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2578 E->VectorizedValue = V;
2579 ++NumVectorInstructions;
2580 if (Instruction *I = dyn_cast<Instruction>(V))
2581 return propagateMetadata(I, E->Scalars);
2582
2583 return V;
2584 }
2585 default:
2586 llvm_unreachable("unknown inst");
2587 }
2588 return nullptr;
2589 }
2590
vectorizeTree()2591 Value *BoUpSLP::vectorizeTree() {
2592
2593 // All blocks must be scheduled before any instructions are inserted.
2594 for (auto &BSIter : BlocksSchedules) {
2595 scheduleBlock(BSIter.second.get());
2596 }
2597
2598 Builder.SetInsertPoint(&F->getEntryBlock().front());
2599 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]);
2600
2601 // If the vectorized tree can be rewritten in a smaller type, we truncate the
2602 // vectorized root. InstCombine will then rewrite the entire expression. We
2603 // sign extend the extracted values below.
2604 auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2605 if (MinBWs.count(ScalarRoot)) {
2606 if (auto *I = dyn_cast<Instruction>(VectorRoot))
2607 Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
2608 auto BundleWidth = VectorizableTree[0].Scalars.size();
2609 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]);
2610 auto *VecTy = VectorType::get(MinTy, BundleWidth);
2611 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
2612 VectorizableTree[0].VectorizedValue = Trunc;
2613 }
2614
2615 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
2616
2617 // Extract all of the elements with the external uses.
2618 for (const auto &ExternalUse : ExternalUses) {
2619 Value *Scalar = ExternalUse.Scalar;
2620 llvm::User *User = ExternalUse.User;
2621
2622 // Skip users that we already RAUW. This happens when one instruction
2623 // has multiple uses of the same value.
2624 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) ==
2625 Scalar->user_end())
2626 continue;
2627 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
2628
2629 int Idx = ScalarToTreeEntry[Scalar];
2630 TreeEntry *E = &VectorizableTree[Idx];
2631 assert(!E->NeedToGather && "Extracting from a gather list");
2632
2633 Value *Vec = E->VectorizedValue;
2634 assert(Vec && "Can't find vectorizable value");
2635
2636 Value *Lane = Builder.getInt32(ExternalUse.Lane);
2637 // Generate extracts for out-of-tree users.
2638 // Find the insertion point for the extractelement lane.
2639 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2640 if (PHINode *PH = dyn_cast<PHINode>(User)) {
2641 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
2642 if (PH->getIncomingValue(i) == Scalar) {
2643 TerminatorInst *IncomingTerminator =
2644 PH->getIncomingBlock(i)->getTerminator();
2645 if (isa<CatchSwitchInst>(IncomingTerminator)) {
2646 Builder.SetInsertPoint(VecI->getParent(),
2647 std::next(VecI->getIterator()));
2648 } else {
2649 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
2650 }
2651 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2652 if (MinBWs.count(ScalarRoot))
2653 Ex = Builder.CreateSExt(Ex, Scalar->getType());
2654 CSEBlocks.insert(PH->getIncomingBlock(i));
2655 PH->setOperand(i, Ex);
2656 }
2657 }
2658 } else {
2659 Builder.SetInsertPoint(cast<Instruction>(User));
2660 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2661 if (MinBWs.count(ScalarRoot))
2662 Ex = Builder.CreateSExt(Ex, Scalar->getType());
2663 CSEBlocks.insert(cast<Instruction>(User)->getParent());
2664 User->replaceUsesOfWith(Scalar, Ex);
2665 }
2666 } else {
2667 Builder.SetInsertPoint(&F->getEntryBlock().front());
2668 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2669 if (MinBWs.count(ScalarRoot))
2670 Ex = Builder.CreateSExt(Ex, Scalar->getType());
2671 CSEBlocks.insert(&F->getEntryBlock());
2672 User->replaceUsesOfWith(Scalar, Ex);
2673 }
2674
2675 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
2676 }
2677
2678 // For each vectorized value:
2679 for (TreeEntry &EIdx : VectorizableTree) {
2680 TreeEntry *Entry = &EIdx;
2681
2682 // For each lane:
2683 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2684 Value *Scalar = Entry->Scalars[Lane];
2685 // No need to handle users of gathered values.
2686 if (Entry->NeedToGather)
2687 continue;
2688
2689 assert(Entry->VectorizedValue && "Can't find vectorizable value");
2690
2691 Type *Ty = Scalar->getType();
2692 if (!Ty->isVoidTy()) {
2693 #ifndef NDEBUG
2694 for (User *U : Scalar->users()) {
2695 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
2696
2697 assert((ScalarToTreeEntry.count(U) ||
2698 // It is legal to replace users in the ignorelist by undef.
2699 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) !=
2700 UserIgnoreList.end())) &&
2701 "Replacing out-of-tree value with undef");
2702 }
2703 #endif
2704 Value *Undef = UndefValue::get(Ty);
2705 Scalar->replaceAllUsesWith(Undef);
2706 }
2707 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
2708 eraseInstruction(cast<Instruction>(Scalar));
2709 }
2710 }
2711
2712 Builder.ClearInsertionPoint();
2713
2714 return VectorizableTree[0].VectorizedValue;
2715 }
2716
optimizeGatherSequence()2717 void BoUpSLP::optimizeGatherSequence() {
2718 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
2719 << " gather sequences instructions.\n");
2720 // LICM InsertElementInst sequences.
2721 for (Instruction *it : GatherSeq) {
2722 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it);
2723
2724 if (!Insert)
2725 continue;
2726
2727 // Check if this block is inside a loop.
2728 Loop *L = LI->getLoopFor(Insert->getParent());
2729 if (!L)
2730 continue;
2731
2732 // Check if it has a preheader.
2733 BasicBlock *PreHeader = L->getLoopPreheader();
2734 if (!PreHeader)
2735 continue;
2736
2737 // If the vector or the element that we insert into it are
2738 // instructions that are defined in this basic block then we can't
2739 // hoist this instruction.
2740 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
2741 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
2742 if (CurrVec && L->contains(CurrVec))
2743 continue;
2744 if (NewElem && L->contains(NewElem))
2745 continue;
2746
2747 // We can hoist this instruction. Move it to the pre-header.
2748 Insert->moveBefore(PreHeader->getTerminator());
2749 }
2750
2751 // Make a list of all reachable blocks in our CSE queue.
2752 SmallVector<const DomTreeNode *, 8> CSEWorkList;
2753 CSEWorkList.reserve(CSEBlocks.size());
2754 for (BasicBlock *BB : CSEBlocks)
2755 if (DomTreeNode *N = DT->getNode(BB)) {
2756 assert(DT->isReachableFromEntry(N));
2757 CSEWorkList.push_back(N);
2758 }
2759
2760 // Sort blocks by domination. This ensures we visit a block after all blocks
2761 // dominating it are visited.
2762 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
2763 [this](const DomTreeNode *A, const DomTreeNode *B) {
2764 return DT->properlyDominates(A, B);
2765 });
2766
2767 // Perform O(N^2) search over the gather sequences and merge identical
2768 // instructions. TODO: We can further optimize this scan if we split the
2769 // instructions into different buckets based on the insert lane.
2770 SmallVector<Instruction *, 16> Visited;
2771 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
2772 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
2773 "Worklist not sorted properly!");
2774 BasicBlock *BB = (*I)->getBlock();
2775 // For all instructions in blocks containing gather sequences:
2776 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
2777 Instruction *In = &*it++;
2778 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
2779 continue;
2780
2781 // Check if we can replace this instruction with any of the
2782 // visited instructions.
2783 for (Instruction *v : Visited) {
2784 if (In->isIdenticalTo(v) &&
2785 DT->dominates(v->getParent(), In->getParent())) {
2786 In->replaceAllUsesWith(v);
2787 eraseInstruction(In);
2788 In = nullptr;
2789 break;
2790 }
2791 }
2792 if (In) {
2793 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end());
2794 Visited.push_back(In);
2795 }
2796 }
2797 }
2798 CSEBlocks.clear();
2799 GatherSeq.clear();
2800 }
2801
2802 // Groups the instructions to a bundle (which is then a single scheduling entity)
2803 // and schedules instructions until the bundle gets ready.
tryScheduleBundle(ArrayRef<Value * > VL,BoUpSLP * SLP)2804 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
2805 BoUpSLP *SLP) {
2806 if (isa<PHINode>(VL[0]))
2807 return true;
2808
2809 // Initialize the instruction bundle.
2810 Instruction *OldScheduleEnd = ScheduleEnd;
2811 ScheduleData *PrevInBundle = nullptr;
2812 ScheduleData *Bundle = nullptr;
2813 bool ReSchedule = false;
2814 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n");
2815
2816 // Make sure that the scheduling region contains all
2817 // instructions of the bundle.
2818 for (Value *V : VL) {
2819 if (!extendSchedulingRegion(V))
2820 return false;
2821 }
2822
2823 for (Value *V : VL) {
2824 ScheduleData *BundleMember = getScheduleData(V);
2825 assert(BundleMember &&
2826 "no ScheduleData for bundle member (maybe not in same basic block)");
2827 if (BundleMember->IsScheduled) {
2828 // A bundle member was scheduled as single instruction before and now
2829 // needs to be scheduled as part of the bundle. We just get rid of the
2830 // existing schedule.
2831 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
2832 << " was already scheduled\n");
2833 ReSchedule = true;
2834 }
2835 assert(BundleMember->isSchedulingEntity() &&
2836 "bundle member already part of other bundle");
2837 if (PrevInBundle) {
2838 PrevInBundle->NextInBundle = BundleMember;
2839 } else {
2840 Bundle = BundleMember;
2841 }
2842 BundleMember->UnscheduledDepsInBundle = 0;
2843 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
2844
2845 // Group the instructions to a bundle.
2846 BundleMember->FirstInBundle = Bundle;
2847 PrevInBundle = BundleMember;
2848 }
2849 if (ScheduleEnd != OldScheduleEnd) {
2850 // The scheduling region got new instructions at the lower end (or it is a
2851 // new region for the first bundle). This makes it necessary to
2852 // recalculate all dependencies.
2853 // It is seldom that this needs to be done a second time after adding the
2854 // initial bundle to the region.
2855 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2856 ScheduleData *SD = getScheduleData(I);
2857 SD->clearDependencies();
2858 }
2859 ReSchedule = true;
2860 }
2861 if (ReSchedule) {
2862 resetSchedule();
2863 initialFillReadyList(ReadyInsts);
2864 }
2865
2866 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
2867 << BB->getName() << "\n");
2868
2869 calculateDependencies(Bundle, true, SLP);
2870
2871 // Now try to schedule the new bundle. As soon as the bundle is "ready" it
2872 // means that there are no cyclic dependencies and we can schedule it.
2873 // Note that's important that we don't "schedule" the bundle yet (see
2874 // cancelScheduling).
2875 while (!Bundle->isReady() && !ReadyInsts.empty()) {
2876
2877 ScheduleData *pickedSD = ReadyInsts.back();
2878 ReadyInsts.pop_back();
2879
2880 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
2881 schedule(pickedSD, ReadyInsts);
2882 }
2883 }
2884 if (!Bundle->isReady()) {
2885 cancelScheduling(VL);
2886 return false;
2887 }
2888 return true;
2889 }
2890
cancelScheduling(ArrayRef<Value * > VL)2891 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
2892 if (isa<PHINode>(VL[0]))
2893 return;
2894
2895 ScheduleData *Bundle = getScheduleData(VL[0]);
2896 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
2897 assert(!Bundle->IsScheduled &&
2898 "Can't cancel bundle which is already scheduled");
2899 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
2900 "tried to unbundle something which is not a bundle");
2901
2902 // Un-bundle: make single instructions out of the bundle.
2903 ScheduleData *BundleMember = Bundle;
2904 while (BundleMember) {
2905 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
2906 BundleMember->FirstInBundle = BundleMember;
2907 ScheduleData *Next = BundleMember->NextInBundle;
2908 BundleMember->NextInBundle = nullptr;
2909 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
2910 if (BundleMember->UnscheduledDepsInBundle == 0) {
2911 ReadyInsts.insert(BundleMember);
2912 }
2913 BundleMember = Next;
2914 }
2915 }
2916
extendSchedulingRegion(Value * V)2917 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
2918 if (getScheduleData(V))
2919 return true;
2920 Instruction *I = dyn_cast<Instruction>(V);
2921 assert(I && "bundle member must be an instruction");
2922 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
2923 if (!ScheduleStart) {
2924 // It's the first instruction in the new region.
2925 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
2926 ScheduleStart = I;
2927 ScheduleEnd = I->getNextNode();
2928 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
2929 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
2930 return true;
2931 }
2932 // Search up and down at the same time, because we don't know if the new
2933 // instruction is above or below the existing scheduling region.
2934 BasicBlock::reverse_iterator UpIter(ScheduleStart->getIterator());
2935 BasicBlock::reverse_iterator UpperEnd = BB->rend();
2936 BasicBlock::iterator DownIter(ScheduleEnd);
2937 BasicBlock::iterator LowerEnd = BB->end();
2938 for (;;) {
2939 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
2940 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
2941 return false;
2942 }
2943
2944 if (UpIter != UpperEnd) {
2945 if (&*UpIter == I) {
2946 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
2947 ScheduleStart = I;
2948 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n");
2949 return true;
2950 }
2951 UpIter++;
2952 }
2953 if (DownIter != LowerEnd) {
2954 if (&*DownIter == I) {
2955 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
2956 nullptr);
2957 ScheduleEnd = I->getNextNode();
2958 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
2959 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
2960 return true;
2961 }
2962 DownIter++;
2963 }
2964 assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
2965 "instruction not found in block");
2966 }
2967 return true;
2968 }
2969
initScheduleData(Instruction * FromI,Instruction * ToI,ScheduleData * PrevLoadStore,ScheduleData * NextLoadStore)2970 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
2971 Instruction *ToI,
2972 ScheduleData *PrevLoadStore,
2973 ScheduleData *NextLoadStore) {
2974 ScheduleData *CurrentLoadStore = PrevLoadStore;
2975 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
2976 ScheduleData *SD = ScheduleDataMap[I];
2977 if (!SD) {
2978 // Allocate a new ScheduleData for the instruction.
2979 if (ChunkPos >= ChunkSize) {
2980 ScheduleDataChunks.push_back(
2981 llvm::make_unique<ScheduleData[]>(ChunkSize));
2982 ChunkPos = 0;
2983 }
2984 SD = &(ScheduleDataChunks.back()[ChunkPos++]);
2985 ScheduleDataMap[I] = SD;
2986 SD->Inst = I;
2987 }
2988 assert(!isInSchedulingRegion(SD) &&
2989 "new ScheduleData already in scheduling region");
2990 SD->init(SchedulingRegionID);
2991
2992 if (I->mayReadOrWriteMemory()) {
2993 // Update the linked list of memory accessing instructions.
2994 if (CurrentLoadStore) {
2995 CurrentLoadStore->NextLoadStore = SD;
2996 } else {
2997 FirstLoadStoreInRegion = SD;
2998 }
2999 CurrentLoadStore = SD;
3000 }
3001 }
3002 if (NextLoadStore) {
3003 if (CurrentLoadStore)
3004 CurrentLoadStore->NextLoadStore = NextLoadStore;
3005 } else {
3006 LastLoadStoreInRegion = CurrentLoadStore;
3007 }
3008 }
3009
calculateDependencies(ScheduleData * SD,bool InsertInReadyList,BoUpSLP * SLP)3010 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
3011 bool InsertInReadyList,
3012 BoUpSLP *SLP) {
3013 assert(SD->isSchedulingEntity());
3014
3015 SmallVector<ScheduleData *, 10> WorkList;
3016 WorkList.push_back(SD);
3017
3018 while (!WorkList.empty()) {
3019 ScheduleData *SD = WorkList.back();
3020 WorkList.pop_back();
3021
3022 ScheduleData *BundleMember = SD;
3023 while (BundleMember) {
3024 assert(isInSchedulingRegion(BundleMember));
3025 if (!BundleMember->hasValidDependencies()) {
3026
3027 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n");
3028 BundleMember->Dependencies = 0;
3029 BundleMember->resetUnscheduledDeps();
3030
3031 // Handle def-use chain dependencies.
3032 for (User *U : BundleMember->Inst->users()) {
3033 if (isa<Instruction>(U)) {
3034 ScheduleData *UseSD = getScheduleData(U);
3035 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
3036 BundleMember->Dependencies++;
3037 ScheduleData *DestBundle = UseSD->FirstInBundle;
3038 if (!DestBundle->IsScheduled) {
3039 BundleMember->incrementUnscheduledDeps(1);
3040 }
3041 if (!DestBundle->hasValidDependencies()) {
3042 WorkList.push_back(DestBundle);
3043 }
3044 }
3045 } else {
3046 // I'm not sure if this can ever happen. But we need to be safe.
3047 // This lets the instruction/bundle never be scheduled and
3048 // eventually disable vectorization.
3049 BundleMember->Dependencies++;
3050 BundleMember->incrementUnscheduledDeps(1);
3051 }
3052 }
3053
3054 // Handle the memory dependencies.
3055 ScheduleData *DepDest = BundleMember->NextLoadStore;
3056 if (DepDest) {
3057 Instruction *SrcInst = BundleMember->Inst;
3058 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
3059 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
3060 unsigned numAliased = 0;
3061 unsigned DistToSrc = 1;
3062
3063 while (DepDest) {
3064 assert(isInSchedulingRegion(DepDest));
3065
3066 // We have two limits to reduce the complexity:
3067 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
3068 // SLP->isAliased (which is the expensive part in this loop).
3069 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
3070 // the whole loop (even if the loop is fast, it's quadratic).
3071 // It's important for the loop break condition (see below) to
3072 // check this limit even between two read-only instructions.
3073 if (DistToSrc >= MaxMemDepDistance ||
3074 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
3075 (numAliased >= AliasedCheckLimit ||
3076 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
3077
3078 // We increment the counter only if the locations are aliased
3079 // (instead of counting all alias checks). This gives a better
3080 // balance between reduced runtime and accurate dependencies.
3081 numAliased++;
3082
3083 DepDest->MemoryDependencies.push_back(BundleMember);
3084 BundleMember->Dependencies++;
3085 ScheduleData *DestBundle = DepDest->FirstInBundle;
3086 if (!DestBundle->IsScheduled) {
3087 BundleMember->incrementUnscheduledDeps(1);
3088 }
3089 if (!DestBundle->hasValidDependencies()) {
3090 WorkList.push_back(DestBundle);
3091 }
3092 }
3093 DepDest = DepDest->NextLoadStore;
3094
3095 // Example, explaining the loop break condition: Let's assume our
3096 // starting instruction is i0 and MaxMemDepDistance = 3.
3097 //
3098 // +--------v--v--v
3099 // i0,i1,i2,i3,i4,i5,i6,i7,i8
3100 // +--------^--^--^
3101 //
3102 // MaxMemDepDistance let us stop alias-checking at i3 and we add
3103 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
3104 // Previously we already added dependencies from i3 to i6,i7,i8
3105 // (because of MaxMemDepDistance). As we added a dependency from
3106 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
3107 // and we can abort this loop at i6.
3108 if (DistToSrc >= 2 * MaxMemDepDistance)
3109 break;
3110 DistToSrc++;
3111 }
3112 }
3113 }
3114 BundleMember = BundleMember->NextInBundle;
3115 }
3116 if (InsertInReadyList && SD->isReady()) {
3117 ReadyInsts.push_back(SD);
3118 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n");
3119 }
3120 }
3121 }
3122
resetSchedule()3123 void BoUpSLP::BlockScheduling::resetSchedule() {
3124 assert(ScheduleStart &&
3125 "tried to reset schedule on block which has not been scheduled");
3126 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3127 ScheduleData *SD = getScheduleData(I);
3128 assert(isInSchedulingRegion(SD));
3129 SD->IsScheduled = false;
3130 SD->resetUnscheduledDeps();
3131 }
3132 ReadyInsts.clear();
3133 }
3134
scheduleBlock(BlockScheduling * BS)3135 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
3136
3137 if (!BS->ScheduleStart)
3138 return;
3139
3140 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
3141
3142 BS->resetSchedule();
3143
3144 // For the real scheduling we use a more sophisticated ready-list: it is
3145 // sorted by the original instruction location. This lets the final schedule
3146 // be as close as possible to the original instruction order.
3147 struct ScheduleDataCompare {
3148 bool operator()(ScheduleData *SD1, ScheduleData *SD2) {
3149 return SD2->SchedulingPriority < SD1->SchedulingPriority;
3150 }
3151 };
3152 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
3153
3154 // Ensure that all dependency data is updated and fill the ready-list with
3155 // initial instructions.
3156 int Idx = 0;
3157 int NumToSchedule = 0;
3158 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
3159 I = I->getNextNode()) {
3160 ScheduleData *SD = BS->getScheduleData(I);
3161 assert(
3162 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
3163 "scheduler and vectorizer have different opinion on what is a bundle");
3164 SD->FirstInBundle->SchedulingPriority = Idx++;
3165 if (SD->isSchedulingEntity()) {
3166 BS->calculateDependencies(SD, false, this);
3167 NumToSchedule++;
3168 }
3169 }
3170 BS->initialFillReadyList(ReadyInsts);
3171
3172 Instruction *LastScheduledInst = BS->ScheduleEnd;
3173
3174 // Do the "real" scheduling.
3175 while (!ReadyInsts.empty()) {
3176 ScheduleData *picked = *ReadyInsts.begin();
3177 ReadyInsts.erase(ReadyInsts.begin());
3178
3179 // Move the scheduled instruction(s) to their dedicated places, if not
3180 // there yet.
3181 ScheduleData *BundleMember = picked;
3182 while (BundleMember) {
3183 Instruction *pickedInst = BundleMember->Inst;
3184 if (LastScheduledInst->getNextNode() != pickedInst) {
3185 BS->BB->getInstList().remove(pickedInst);
3186 BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
3187 pickedInst);
3188 }
3189 LastScheduledInst = pickedInst;
3190 BundleMember = BundleMember->NextInBundle;
3191 }
3192
3193 BS->schedule(picked, ReadyInsts);
3194 NumToSchedule--;
3195 }
3196 assert(NumToSchedule == 0 && "could not schedule all instructions");
3197
3198 // Avoid duplicate scheduling of the block.
3199 BS->ScheduleStart = nullptr;
3200 }
3201
getVectorElementSize(Value * V)3202 unsigned BoUpSLP::getVectorElementSize(Value *V) {
3203 // If V is a store, just return the width of the stored value without
3204 // traversing the expression tree. This is the common case.
3205 if (auto *Store = dyn_cast<StoreInst>(V))
3206 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
3207
3208 // If V is not a store, we can traverse the expression tree to find loads
3209 // that feed it. The type of the loaded value may indicate a more suitable
3210 // width than V's type. We want to base the vector element size on the width
3211 // of memory operations where possible.
3212 SmallVector<Instruction *, 16> Worklist;
3213 SmallPtrSet<Instruction *, 16> Visited;
3214 if (auto *I = dyn_cast<Instruction>(V))
3215 Worklist.push_back(I);
3216
3217 // Traverse the expression tree in bottom-up order looking for loads. If we
3218 // encounter an instruciton we don't yet handle, we give up.
3219 auto MaxWidth = 0u;
3220 auto FoundUnknownInst = false;
3221 while (!Worklist.empty() && !FoundUnknownInst) {
3222 auto *I = Worklist.pop_back_val();
3223 Visited.insert(I);
3224
3225 // We should only be looking at scalar instructions here. If the current
3226 // instruction has a vector type, give up.
3227 auto *Ty = I->getType();
3228 if (isa<VectorType>(Ty))
3229 FoundUnknownInst = true;
3230
3231 // If the current instruction is a load, update MaxWidth to reflect the
3232 // width of the loaded value.
3233 else if (isa<LoadInst>(I))
3234 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
3235
3236 // Otherwise, we need to visit the operands of the instruction. We only
3237 // handle the interesting cases from buildTree here. If an operand is an
3238 // instruction we haven't yet visited, we add it to the worklist.
3239 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
3240 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
3241 for (Use &U : I->operands())
3242 if (auto *J = dyn_cast<Instruction>(U.get()))
3243 if (!Visited.count(J))
3244 Worklist.push_back(J);
3245 }
3246
3247 // If we don't yet handle the instruction, give up.
3248 else
3249 FoundUnknownInst = true;
3250 }
3251
3252 // If we didn't encounter a memory access in the expression tree, or if we
3253 // gave up for some reason, just return the width of V.
3254 if (!MaxWidth || FoundUnknownInst)
3255 return DL->getTypeSizeInBits(V->getType());
3256
3257 // Otherwise, return the maximum width we found.
3258 return MaxWidth;
3259 }
3260
3261 // Determine if a value V in a vectorizable expression Expr can be demoted to a
3262 // smaller type with a truncation. We collect the values that will be demoted
3263 // in ToDemote and additional roots that require investigating in Roots.
collectValuesToDemote(Value * V,SmallPtrSetImpl<Value * > & Expr,SmallVectorImpl<Value * > & ToDemote,SmallVectorImpl<Value * > & Roots)3264 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
3265 SmallVectorImpl<Value *> &ToDemote,
3266 SmallVectorImpl<Value *> &Roots) {
3267
3268 // We can always demote constants.
3269 if (isa<Constant>(V)) {
3270 ToDemote.push_back(V);
3271 return true;
3272 }
3273
3274 // If the value is not an instruction in the expression with only one use, it
3275 // cannot be demoted.
3276 auto *I = dyn_cast<Instruction>(V);
3277 if (!I || !I->hasOneUse() || !Expr.count(I))
3278 return false;
3279
3280 switch (I->getOpcode()) {
3281
3282 // We can always demote truncations and extensions. Since truncations can
3283 // seed additional demotion, we save the truncated value.
3284 case Instruction::Trunc:
3285 Roots.push_back(I->getOperand(0));
3286 case Instruction::ZExt:
3287 case Instruction::SExt:
3288 break;
3289
3290 // We can demote certain binary operations if we can demote both of their
3291 // operands.
3292 case Instruction::Add:
3293 case Instruction::Sub:
3294 case Instruction::Mul:
3295 case Instruction::And:
3296 case Instruction::Or:
3297 case Instruction::Xor:
3298 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
3299 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
3300 return false;
3301 break;
3302
3303 // We can demote selects if we can demote their true and false values.
3304 case Instruction::Select: {
3305 SelectInst *SI = cast<SelectInst>(I);
3306 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
3307 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
3308 return false;
3309 break;
3310 }
3311
3312 // We can demote phis if we can demote all their incoming operands. Note that
3313 // we don't need to worry about cycles since we ensure single use above.
3314 case Instruction::PHI: {
3315 PHINode *PN = cast<PHINode>(I);
3316 for (Value *IncValue : PN->incoming_values())
3317 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
3318 return false;
3319 break;
3320 }
3321
3322 // Otherwise, conservatively give up.
3323 default:
3324 return false;
3325 }
3326
3327 // Record the value that we can demote.
3328 ToDemote.push_back(V);
3329 return true;
3330 }
3331
computeMinimumValueSizes()3332 void BoUpSLP::computeMinimumValueSizes() {
3333 // If there are no external uses, the expression tree must be rooted by a
3334 // store. We can't demote in-memory values, so there is nothing to do here.
3335 if (ExternalUses.empty())
3336 return;
3337
3338 // We only attempt to truncate integer expressions.
3339 auto &TreeRoot = VectorizableTree[0].Scalars;
3340 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
3341 if (!TreeRootIT)
3342 return;
3343
3344 // If the expression is not rooted by a store, these roots should have
3345 // external uses. We will rely on InstCombine to rewrite the expression in
3346 // the narrower type. However, InstCombine only rewrites single-use values.
3347 // This means that if a tree entry other than a root is used externally, it
3348 // must have multiple uses and InstCombine will not rewrite it. The code
3349 // below ensures that only the roots are used externally.
3350 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
3351 for (auto &EU : ExternalUses)
3352 if (!Expr.erase(EU.Scalar))
3353 return;
3354 if (!Expr.empty())
3355 return;
3356
3357 // Collect the scalar values of the vectorizable expression. We will use this
3358 // context to determine which values can be demoted. If we see a truncation,
3359 // we mark it as seeding another demotion.
3360 for (auto &Entry : VectorizableTree)
3361 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end());
3362
3363 // Ensure the roots of the vectorizable tree don't form a cycle. They must
3364 // have a single external user that is not in the vectorizable tree.
3365 for (auto *Root : TreeRoot)
3366 if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
3367 return;
3368
3369 // Conservatively determine if we can actually truncate the roots of the
3370 // expression. Collect the values that can be demoted in ToDemote and
3371 // additional roots that require investigating in Roots.
3372 SmallVector<Value *, 32> ToDemote;
3373 SmallVector<Value *, 4> Roots;
3374 for (auto *Root : TreeRoot)
3375 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
3376 return;
3377
3378 // The maximum bit width required to represent all the values that can be
3379 // demoted without loss of precision. It would be safe to truncate the roots
3380 // of the expression to this width.
3381 auto MaxBitWidth = 8u;
3382
3383 // We first check if all the bits of the roots are demanded. If they're not,
3384 // we can truncate the roots to this narrower type.
3385 for (auto *Root : TreeRoot) {
3386 auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
3387 MaxBitWidth = std::max<unsigned>(
3388 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
3389 }
3390
3391 // If all the bits of the roots are demanded, we can try a little harder to
3392 // compute a narrower type. This can happen, for example, if the roots are
3393 // getelementptr indices. InstCombine promotes these indices to the pointer
3394 // width. Thus, all their bits are technically demanded even though the
3395 // address computation might be vectorized in a smaller type.
3396 //
3397 // We start by looking at each entry that can be demoted. We compute the
3398 // maximum bit width required to store the scalar by using ValueTracking to
3399 // compute the number of high-order bits we can truncate.
3400 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) {
3401 MaxBitWidth = 8u;
3402 for (auto *Scalar : ToDemote) {
3403 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT);
3404 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
3405 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
3406 }
3407 }
3408
3409 // Round MaxBitWidth up to the next power-of-two.
3410 if (!isPowerOf2_64(MaxBitWidth))
3411 MaxBitWidth = NextPowerOf2(MaxBitWidth);
3412
3413 // If the maximum bit width we compute is less than the with of the roots'
3414 // type, we can proceed with the narrowing. Otherwise, do nothing.
3415 if (MaxBitWidth >= TreeRootIT->getBitWidth())
3416 return;
3417
3418 // If we can truncate the root, we must collect additional values that might
3419 // be demoted as a result. That is, those seeded by truncations we will
3420 // modify.
3421 while (!Roots.empty())
3422 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
3423
3424 // Finally, map the values we can demote to the maximum bit with we computed.
3425 for (auto *Scalar : ToDemote)
3426 MinBWs[Scalar] = MaxBitWidth;
3427 }
3428
3429 namespace {
3430 /// The SLPVectorizer Pass.
3431 struct SLPVectorizer : public FunctionPass {
3432 SLPVectorizerPass Impl;
3433
3434 /// Pass identification, replacement for typeid
3435 static char ID;
3436
SLPVectorizer__anona116d11f0311::SLPVectorizer3437 explicit SLPVectorizer() : FunctionPass(ID) {
3438 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
3439 }
3440
3441
doInitialization__anona116d11f0311::SLPVectorizer3442 bool doInitialization(Module &M) override {
3443 return false;
3444 }
3445
runOnFunction__anona116d11f0311::SLPVectorizer3446 bool runOnFunction(Function &F) override {
3447 if (skipFunction(F))
3448 return false;
3449
3450 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3451 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3452 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
3453 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
3454 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3455 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3456 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3457 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3458 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
3459
3460 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3461 }
3462
getAnalysisUsage__anona116d11f0311::SLPVectorizer3463 void getAnalysisUsage(AnalysisUsage &AU) const override {
3464 FunctionPass::getAnalysisUsage(AU);
3465 AU.addRequired<AssumptionCacheTracker>();
3466 AU.addRequired<ScalarEvolutionWrapperPass>();
3467 AU.addRequired<AAResultsWrapperPass>();
3468 AU.addRequired<TargetTransformInfoWrapperPass>();
3469 AU.addRequired<LoopInfoWrapperPass>();
3470 AU.addRequired<DominatorTreeWrapperPass>();
3471 AU.addRequired<DemandedBitsWrapperPass>();
3472 AU.addPreserved<LoopInfoWrapperPass>();
3473 AU.addPreserved<DominatorTreeWrapperPass>();
3474 AU.addPreserved<AAResultsWrapperPass>();
3475 AU.addPreserved<GlobalsAAWrapperPass>();
3476 AU.setPreservesCFG();
3477 }
3478 };
3479 } // end anonymous namespace
3480
run(Function & F,FunctionAnalysisManager & AM)3481 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
3482 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
3483 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
3484 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
3485 auto *AA = &AM.getResult<AAManager>(F);
3486 auto *LI = &AM.getResult<LoopAnalysis>(F);
3487 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
3488 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
3489 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
3490
3491 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3492 if (!Changed)
3493 return PreservedAnalyses::all();
3494 PreservedAnalyses PA;
3495 PA.preserve<LoopAnalysis>();
3496 PA.preserve<DominatorTreeAnalysis>();
3497 PA.preserve<AAManager>();
3498 PA.preserve<GlobalsAA>();
3499 return PA;
3500 }
3501
runImpl(Function & F,ScalarEvolution * SE_,TargetTransformInfo * TTI_,TargetLibraryInfo * TLI_,AliasAnalysis * AA_,LoopInfo * LI_,DominatorTree * DT_,AssumptionCache * AC_,DemandedBits * DB_)3502 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
3503 TargetTransformInfo *TTI_,
3504 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
3505 LoopInfo *LI_, DominatorTree *DT_,
3506 AssumptionCache *AC_, DemandedBits *DB_) {
3507 SE = SE_;
3508 TTI = TTI_;
3509 TLI = TLI_;
3510 AA = AA_;
3511 LI = LI_;
3512 DT = DT_;
3513 AC = AC_;
3514 DB = DB_;
3515 DL = &F.getParent()->getDataLayout();
3516
3517 Stores.clear();
3518 GEPs.clear();
3519 bool Changed = false;
3520
3521 // If the target claims to have no vector registers don't attempt
3522 // vectorization.
3523 if (!TTI->getNumberOfRegisters(true))
3524 return false;
3525
3526 // Don't vectorize when the attribute NoImplicitFloat is used.
3527 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
3528 return false;
3529
3530 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
3531
3532 // Use the bottom up slp vectorizer to construct chains that start with
3533 // store instructions.
3534 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL);
3535
3536 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
3537 // delete instructions.
3538
3539 // Scan the blocks in the function in post order.
3540 for (auto BB : post_order(&F.getEntryBlock())) {
3541 collectSeedInstructions(BB);
3542
3543 // Vectorize trees that end at stores.
3544 if (!Stores.empty()) {
3545 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
3546 << " underlying objects.\n");
3547 Changed |= vectorizeStoreChains(R);
3548 }
3549
3550 // Vectorize trees that end at reductions.
3551 Changed |= vectorizeChainsInBlock(BB, R);
3552
3553 // Vectorize the index computations of getelementptr instructions. This
3554 // is primarily intended to catch gather-like idioms ending at
3555 // non-consecutive loads.
3556 if (!GEPs.empty()) {
3557 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
3558 << " underlying objects.\n");
3559 Changed |= vectorizeGEPIndices(BB, R);
3560 }
3561 }
3562
3563 if (Changed) {
3564 R.optimizeGatherSequence();
3565 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
3566 DEBUG(verifyFunction(F));
3567 }
3568 return Changed;
3569 }
3570
3571 /// \brief Check that the Values in the slice in VL array are still existent in
3572 /// the WeakVH array.
3573 /// Vectorization of part of the VL array may cause later values in the VL array
3574 /// to become invalid. We track when this has happened in the WeakVH array.
hasValueBeenRAUWed(ArrayRef<Value * > VL,ArrayRef<WeakVH> VH,unsigned SliceBegin,unsigned SliceSize)3575 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
3576 unsigned SliceBegin, unsigned SliceSize) {
3577 VL = VL.slice(SliceBegin, SliceSize);
3578 VH = VH.slice(SliceBegin, SliceSize);
3579 return !std::equal(VL.begin(), VL.end(), VH.begin());
3580 }
3581
vectorizeStoreChain(ArrayRef<Value * > Chain,int CostThreshold,BoUpSLP & R,unsigned VecRegSize)3582 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain,
3583 int CostThreshold, BoUpSLP &R,
3584 unsigned VecRegSize) {
3585 unsigned ChainLen = Chain.size();
3586 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
3587 << "\n");
3588 unsigned Sz = R.getVectorElementSize(Chain[0]);
3589 unsigned VF = VecRegSize / Sz;
3590
3591 if (!isPowerOf2_32(Sz) || VF < 2)
3592 return false;
3593
3594 // Keep track of values that were deleted by vectorizing in the loop below.
3595 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
3596
3597 bool Changed = false;
3598 // Look for profitable vectorizable trees at all offsets, starting at zero.
3599 for (unsigned i = 0, e = ChainLen; i < e; ++i) {
3600 if (i + VF > e)
3601 break;
3602
3603 // Check that a previous iteration of this loop did not delete the Value.
3604 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
3605 continue;
3606
3607 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
3608 << "\n");
3609 ArrayRef<Value *> Operands = Chain.slice(i, VF);
3610
3611 R.buildTree(Operands);
3612 R.computeMinimumValueSizes();
3613
3614 int Cost = R.getTreeCost();
3615
3616 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
3617 if (Cost < CostThreshold) {
3618 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
3619 R.vectorizeTree();
3620
3621 // Move to the next bundle.
3622 i += VF - 1;
3623 Changed = true;
3624 }
3625 }
3626
3627 return Changed;
3628 }
3629
vectorizeStores(ArrayRef<StoreInst * > Stores,int costThreshold,BoUpSLP & R)3630 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
3631 int costThreshold, BoUpSLP &R) {
3632 SetVector<StoreInst *> Heads, Tails;
3633 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
3634
3635 // We may run into multiple chains that merge into a single chain. We mark the
3636 // stores that we vectorized so that we don't visit the same store twice.
3637 BoUpSLP::ValueSet VectorizedStores;
3638 bool Changed = false;
3639
3640 // Do a quadratic search on all of the given stores and find
3641 // all of the pairs of stores that follow each other.
3642 SmallVector<unsigned, 16> IndexQueue;
3643 for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
3644 IndexQueue.clear();
3645 // If a store has multiple consecutive store candidates, search Stores
3646 // array according to the sequence: from i+1 to e, then from i-1 to 0.
3647 // This is because usually pairing with immediate succeeding or preceding
3648 // candidate create the best chance to find slp vectorization opportunity.
3649 unsigned j = 0;
3650 for (j = i + 1; j < e; ++j)
3651 IndexQueue.push_back(j);
3652 for (j = i; j > 0; --j)
3653 IndexQueue.push_back(j - 1);
3654
3655 for (auto &k : IndexQueue) {
3656 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) {
3657 Tails.insert(Stores[k]);
3658 Heads.insert(Stores[i]);
3659 ConsecutiveChain[Stores[i]] = Stores[k];
3660 break;
3661 }
3662 }
3663 }
3664
3665 // For stores that start but don't end a link in the chain:
3666 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
3667 it != e; ++it) {
3668 if (Tails.count(*it))
3669 continue;
3670
3671 // We found a store instr that starts a chain. Now follow the chain and try
3672 // to vectorize it.
3673 BoUpSLP::ValueList Operands;
3674 StoreInst *I = *it;
3675 // Collect the chain into a list.
3676 while (Tails.count(I) || Heads.count(I)) {
3677 if (VectorizedStores.count(I))
3678 break;
3679 Operands.push_back(I);
3680 // Move to the next value in the chain.
3681 I = ConsecutiveChain[I];
3682 }
3683
3684 // FIXME: Is division-by-2 the correct step? Should we assert that the
3685 // register size is a power-of-2?
3686 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); Size /= 2) {
3687 if (vectorizeStoreChain(Operands, costThreshold, R, Size)) {
3688 // Mark the vectorized stores so that we don't vectorize them again.
3689 VectorizedStores.insert(Operands.begin(), Operands.end());
3690 Changed = true;
3691 break;
3692 }
3693 }
3694 }
3695
3696 return Changed;
3697 }
3698
collectSeedInstructions(BasicBlock * BB)3699 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
3700
3701 // Initialize the collections. We will make a single pass over the block.
3702 Stores.clear();
3703 GEPs.clear();
3704
3705 // Visit the store and getelementptr instructions in BB and organize them in
3706 // Stores and GEPs according to the underlying objects of their pointer
3707 // operands.
3708 for (Instruction &I : *BB) {
3709
3710 // Ignore store instructions that are volatile or have a pointer operand
3711 // that doesn't point to a scalar type.
3712 if (auto *SI = dyn_cast<StoreInst>(&I)) {
3713 if (!SI->isSimple())
3714 continue;
3715 if (!isValidElementType(SI->getValueOperand()->getType()))
3716 continue;
3717 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
3718 }
3719
3720 // Ignore getelementptr instructions that have more than one index, a
3721 // constant index, or a pointer operand that doesn't point to a scalar
3722 // type.
3723 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
3724 auto Idx = GEP->idx_begin()->get();
3725 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
3726 continue;
3727 if (!isValidElementType(Idx->getType()))
3728 continue;
3729 if (GEP->getType()->isVectorTy())
3730 continue;
3731 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP);
3732 }
3733 }
3734 }
3735
tryToVectorizePair(Value * A,Value * B,BoUpSLP & R)3736 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
3737 if (!A || !B)
3738 return false;
3739 Value *VL[] = { A, B };
3740 return tryToVectorizeList(VL, R, None, true);
3741 }
3742
tryToVectorizeList(ArrayRef<Value * > VL,BoUpSLP & R,ArrayRef<Value * > BuildVector,bool allowReorder)3743 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
3744 ArrayRef<Value *> BuildVector,
3745 bool allowReorder) {
3746 if (VL.size() < 2)
3747 return false;
3748
3749 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n");
3750
3751 // Check that all of the parts are scalar instructions of the same type.
3752 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
3753 if (!I0)
3754 return false;
3755
3756 unsigned Opcode0 = I0->getOpcode();
3757
3758 // FIXME: Register size should be a parameter to this function, so we can
3759 // try different vectorization factors.
3760 unsigned Sz = R.getVectorElementSize(I0);
3761 unsigned VF = R.getMinVecRegSize() / Sz;
3762
3763 for (Value *V : VL) {
3764 Type *Ty = V->getType();
3765 if (!isValidElementType(Ty))
3766 return false;
3767 Instruction *Inst = dyn_cast<Instruction>(V);
3768 if (!Inst || Inst->getOpcode() != Opcode0)
3769 return false;
3770 }
3771
3772 bool Changed = false;
3773
3774 // Keep track of values that were deleted by vectorizing in the loop below.
3775 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
3776
3777 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3778 unsigned OpsWidth = 0;
3779
3780 if (i + VF > e)
3781 OpsWidth = e - i;
3782 else
3783 OpsWidth = VF;
3784
3785 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
3786 break;
3787
3788 // Check that a previous iteration of this loop did not delete the Value.
3789 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth))
3790 continue;
3791
3792 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
3793 << "\n");
3794 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth);
3795
3796 ArrayRef<Value *> BuildVectorSlice;
3797 if (!BuildVector.empty())
3798 BuildVectorSlice = BuildVector.slice(i, OpsWidth);
3799
3800 R.buildTree(Ops, BuildVectorSlice);
3801 // TODO: check if we can allow reordering also for other cases than
3802 // tryToVectorizePair()
3803 if (allowReorder && R.shouldReorder()) {
3804 assert(Ops.size() == 2);
3805 assert(BuildVectorSlice.empty());
3806 Value *ReorderedOps[] = { Ops[1], Ops[0] };
3807 R.buildTree(ReorderedOps, None);
3808 }
3809 R.computeMinimumValueSizes();
3810 int Cost = R.getTreeCost();
3811
3812 if (Cost < -SLPCostThreshold) {
3813 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
3814 Value *VectorizedRoot = R.vectorizeTree();
3815
3816 // Reconstruct the build vector by extracting the vectorized root. This
3817 // way we handle the case where some elements of the vector are undefined.
3818 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
3819 if (!BuildVectorSlice.empty()) {
3820 // The insert point is the last build vector instruction. The vectorized
3821 // root will precede it. This guarantees that we get an instruction. The
3822 // vectorized tree could have been constant folded.
3823 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
3824 unsigned VecIdx = 0;
3825 for (auto &V : BuildVectorSlice) {
3826 IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
3827 ++BasicBlock::iterator(InsertAfter));
3828 Instruction *I = cast<Instruction>(V);
3829 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I));
3830 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement(
3831 VectorizedRoot, Builder.getInt32(VecIdx++)));
3832 I->setOperand(1, Extract);
3833 I->removeFromParent();
3834 I->insertAfter(Extract);
3835 InsertAfter = I;
3836 }
3837 }
3838 // Move to the next bundle.
3839 i += VF - 1;
3840 Changed = true;
3841 }
3842 }
3843
3844 return Changed;
3845 }
3846
tryToVectorize(BinaryOperator * V,BoUpSLP & R)3847 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
3848 if (!V)
3849 return false;
3850
3851 // Try to vectorize V.
3852 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R))
3853 return true;
3854
3855 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0));
3856 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1));
3857 // Try to skip B.
3858 if (B && B->hasOneUse()) {
3859 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
3860 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
3861 if (tryToVectorizePair(A, B0, R)) {
3862 return true;
3863 }
3864 if (tryToVectorizePair(A, B1, R)) {
3865 return true;
3866 }
3867 }
3868
3869 // Try to skip A.
3870 if (A && A->hasOneUse()) {
3871 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
3872 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
3873 if (tryToVectorizePair(A0, B, R)) {
3874 return true;
3875 }
3876 if (tryToVectorizePair(A1, B, R)) {
3877 return true;
3878 }
3879 }
3880 return 0;
3881 }
3882
3883 /// \brief Generate a shuffle mask to be used in a reduction tree.
3884 ///
3885 /// \param VecLen The length of the vector to be reduced.
3886 /// \param NumEltsToRdx The number of elements that should be reduced in the
3887 /// vector.
3888 /// \param IsPairwise Whether the reduction is a pairwise or splitting
3889 /// reduction. A pairwise reduction will generate a mask of
3890 /// <0,2,...> or <1,3,..> while a splitting reduction will generate
3891 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
3892 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
createRdxShuffleMask(unsigned VecLen,unsigned NumEltsToRdx,bool IsPairwise,bool IsLeft,IRBuilder<> & Builder)3893 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
3894 bool IsPairwise, bool IsLeft,
3895 IRBuilder<> &Builder) {
3896 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
3897
3898 SmallVector<Constant *, 32> ShuffleMask(
3899 VecLen, UndefValue::get(Builder.getInt32Ty()));
3900
3901 if (IsPairwise)
3902 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
3903 for (unsigned i = 0; i != NumEltsToRdx; ++i)
3904 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
3905 else
3906 // Move the upper half of the vector to the lower half.
3907 for (unsigned i = 0; i != NumEltsToRdx; ++i)
3908 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
3909
3910 return ConstantVector::get(ShuffleMask);
3911 }
3912
3913
3914 /// Model horizontal reductions.
3915 ///
3916 /// A horizontal reduction is a tree of reduction operations (currently add and
3917 /// fadd) that has operations that can be put into a vector as its leaf.
3918 /// For example, this tree:
3919 ///
3920 /// mul mul mul mul
3921 /// \ / \ /
3922 /// + +
3923 /// \ /
3924 /// +
3925 /// This tree has "mul" as its reduced values and "+" as its reduction
3926 /// operations. A reduction might be feeding into a store or a binary operation
3927 /// feeding a phi.
3928 /// ...
3929 /// \ /
3930 /// +
3931 /// |
3932 /// phi +=
3933 ///
3934 /// Or:
3935 /// ...
3936 /// \ /
3937 /// +
3938 /// |
3939 /// *p =
3940 ///
3941 class HorizontalReduction {
3942 SmallVector<Value *, 16> ReductionOps;
3943 SmallVector<Value *, 32> ReducedVals;
3944
3945 BinaryOperator *ReductionRoot;
3946 PHINode *ReductionPHI;
3947
3948 /// The opcode of the reduction.
3949 unsigned ReductionOpcode;
3950 /// The opcode of the values we perform a reduction on.
3951 unsigned ReducedValueOpcode;
3952 /// Should we model this reduction as a pairwise reduction tree or a tree that
3953 /// splits the vector in halves and adds those halves.
3954 bool IsPairwiseReduction;
3955
3956 public:
3957 /// The width of one full horizontal reduction operation.
3958 unsigned ReduxWidth;
3959
3960 /// Minimal width of available vector registers. It's used to determine
3961 /// ReduxWidth.
3962 unsigned MinVecRegSize;
3963
HorizontalReduction(unsigned MinVecRegSize)3964 HorizontalReduction(unsigned MinVecRegSize)
3965 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0),
3966 ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0),
3967 MinVecRegSize(MinVecRegSize) {}
3968
3969 /// \brief Try to find a reduction tree.
matchAssociativeReduction(PHINode * Phi,BinaryOperator * B)3970 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
3971 assert((!Phi ||
3972 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&
3973 "Thi phi needs to use the binary operator");
3974
3975 // We could have a initial reductions that is not an add.
3976 // r *= v1 + v2 + v3 + v4
3977 // In such a case start looking for a tree rooted in the first '+'.
3978 if (Phi) {
3979 if (B->getOperand(0) == Phi) {
3980 Phi = nullptr;
3981 B = dyn_cast<BinaryOperator>(B->getOperand(1));
3982 } else if (B->getOperand(1) == Phi) {
3983 Phi = nullptr;
3984 B = dyn_cast<BinaryOperator>(B->getOperand(0));
3985 }
3986 }
3987
3988 if (!B)
3989 return false;
3990
3991 Type *Ty = B->getType();
3992 if (!isValidElementType(Ty))
3993 return false;
3994
3995 const DataLayout &DL = B->getModule()->getDataLayout();
3996 ReductionOpcode = B->getOpcode();
3997 ReducedValueOpcode = 0;
3998 // FIXME: Register size should be a parameter to this function, so we can
3999 // try different vectorization factors.
4000 ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty);
4001 ReductionRoot = B;
4002 ReductionPHI = Phi;
4003
4004 if (ReduxWidth < 4)
4005 return false;
4006
4007 // We currently only support adds.
4008 if (ReductionOpcode != Instruction::Add &&
4009 ReductionOpcode != Instruction::FAdd)
4010 return false;
4011
4012 // Post order traverse the reduction tree starting at B. We only handle true
4013 // trees containing only binary operators or selects.
4014 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
4015 Stack.push_back(std::make_pair(B, 0));
4016 while (!Stack.empty()) {
4017 Instruction *TreeN = Stack.back().first;
4018 unsigned EdgeToVist = Stack.back().second++;
4019 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
4020
4021 // Only handle trees in the current basic block.
4022 if (TreeN->getParent() != B->getParent())
4023 return false;
4024
4025 // Each tree node needs to have one user except for the ultimate
4026 // reduction.
4027 if (!TreeN->hasOneUse() && TreeN != B)
4028 return false;
4029
4030 // Postorder vist.
4031 if (EdgeToVist == 2 || IsReducedValue) {
4032 if (IsReducedValue) {
4033 // Make sure that the opcodes of the operations that we are going to
4034 // reduce match.
4035 if (!ReducedValueOpcode)
4036 ReducedValueOpcode = TreeN->getOpcode();
4037 else if (ReducedValueOpcode != TreeN->getOpcode())
4038 return false;
4039 ReducedVals.push_back(TreeN);
4040 } else {
4041 // We need to be able to reassociate the adds.
4042 if (!TreeN->isAssociative())
4043 return false;
4044 ReductionOps.push_back(TreeN);
4045 }
4046 // Retract.
4047 Stack.pop_back();
4048 continue;
4049 }
4050
4051 // Visit left or right.
4052 Value *NextV = TreeN->getOperand(EdgeToVist);
4053 // We currently only allow BinaryOperator's and SelectInst's as reduction
4054 // values in our tree.
4055 if (isa<BinaryOperator>(NextV) || isa<SelectInst>(NextV))
4056 Stack.push_back(std::make_pair(cast<Instruction>(NextV), 0));
4057 else if (NextV != Phi)
4058 return false;
4059 }
4060 return true;
4061 }
4062
4063 /// \brief Attempt to vectorize the tree found by
4064 /// matchAssociativeReduction.
tryToReduce(BoUpSLP & V,TargetTransformInfo * TTI)4065 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
4066 if (ReducedVals.empty())
4067 return false;
4068
4069 unsigned NumReducedVals = ReducedVals.size();
4070 if (NumReducedVals < ReduxWidth)
4071 return false;
4072
4073 Value *VectorizedTree = nullptr;
4074 IRBuilder<> Builder(ReductionRoot);
4075 FastMathFlags Unsafe;
4076 Unsafe.setUnsafeAlgebra();
4077 Builder.setFastMathFlags(Unsafe);
4078 unsigned i = 0;
4079
4080 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
4081 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps);
4082 V.computeMinimumValueSizes();
4083
4084 // Estimate cost.
4085 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]);
4086 if (Cost >= -SLPCostThreshold)
4087 break;
4088
4089 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
4090 << ". (HorRdx)\n");
4091
4092 // Vectorize a tree.
4093 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
4094 Value *VectorizedRoot = V.vectorizeTree();
4095
4096 // Emit a reduction.
4097 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder);
4098 if (VectorizedTree) {
4099 Builder.SetCurrentDebugLocation(Loc);
4100 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
4101 ReducedSubTree, "bin.rdx");
4102 } else
4103 VectorizedTree = ReducedSubTree;
4104 }
4105
4106 if (VectorizedTree) {
4107 // Finish the reduction.
4108 for (; i < NumReducedVals; ++i) {
4109 Builder.SetCurrentDebugLocation(
4110 cast<Instruction>(ReducedVals[i])->getDebugLoc());
4111 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
4112 ReducedVals[i]);
4113 }
4114 // Update users.
4115 if (ReductionPHI) {
4116 assert(ReductionRoot && "Need a reduction operation");
4117 ReductionRoot->setOperand(0, VectorizedTree);
4118 ReductionRoot->setOperand(1, ReductionPHI);
4119 } else
4120 ReductionRoot->replaceAllUsesWith(VectorizedTree);
4121 }
4122 return VectorizedTree != nullptr;
4123 }
4124
numReductionValues() const4125 unsigned numReductionValues() const {
4126 return ReducedVals.size();
4127 }
4128
4129 private:
4130 /// \brief Calculate the cost of a reduction.
getReductionCost(TargetTransformInfo * TTI,Value * FirstReducedVal)4131 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) {
4132 Type *ScalarTy = FirstReducedVal->getType();
4133 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
4134
4135 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
4136 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
4137
4138 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
4139 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
4140
4141 int ScalarReduxCost =
4142 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy);
4143
4144 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
4145 << " for reduction that starts with " << *FirstReducedVal
4146 << " (It is a "
4147 << (IsPairwiseReduction ? "pairwise" : "splitting")
4148 << " reduction)\n");
4149
4150 return VecReduxCost - ScalarReduxCost;
4151 }
4152
createBinOp(IRBuilder<> & Builder,unsigned Opcode,Value * L,Value * R,const Twine & Name="")4153 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L,
4154 Value *R, const Twine &Name = "") {
4155 if (Opcode == Instruction::FAdd)
4156 return Builder.CreateFAdd(L, R, Name);
4157 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name);
4158 }
4159
4160 /// \brief Emit a horizontal reduction of the vectorized value.
emitReduction(Value * VectorizedValue,IRBuilder<> & Builder)4161 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) {
4162 assert(VectorizedValue && "Need to have a vectorized tree node");
4163 assert(isPowerOf2_32(ReduxWidth) &&
4164 "We only handle power-of-two reductions for now");
4165
4166 Value *TmpVec = VectorizedValue;
4167 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
4168 if (IsPairwiseReduction) {
4169 Value *LeftMask =
4170 createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
4171 Value *RightMask =
4172 createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
4173
4174 Value *LeftShuf = Builder.CreateShuffleVector(
4175 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
4176 Value *RightShuf = Builder.CreateShuffleVector(
4177 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
4178 "rdx.shuf.r");
4179 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf,
4180 "bin.rdx");
4181 } else {
4182 Value *UpperHalf =
4183 createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
4184 Value *Shuf = Builder.CreateShuffleVector(
4185 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
4186 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx");
4187 }
4188 }
4189
4190 // The result is in the first element of the vector.
4191 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4192 }
4193 };
4194
4195 /// \brief Recognize construction of vectors like
4196 /// %ra = insertelement <4 x float> undef, float %s0, i32 0
4197 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
4198 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
4199 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
4200 ///
4201 /// Returns true if it matches
4202 ///
findBuildVector(InsertElementInst * FirstInsertElem,SmallVectorImpl<Value * > & BuildVector,SmallVectorImpl<Value * > & BuildVectorOpds)4203 static bool findBuildVector(InsertElementInst *FirstInsertElem,
4204 SmallVectorImpl<Value *> &BuildVector,
4205 SmallVectorImpl<Value *> &BuildVectorOpds) {
4206 if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
4207 return false;
4208
4209 InsertElementInst *IE = FirstInsertElem;
4210 while (true) {
4211 BuildVector.push_back(IE);
4212 BuildVectorOpds.push_back(IE->getOperand(1));
4213
4214 if (IE->use_empty())
4215 return false;
4216
4217 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
4218 if (!NextUse)
4219 return true;
4220
4221 // If this isn't the final use, make sure the next insertelement is the only
4222 // use. It's OK if the final constructed vector is used multiple times
4223 if (!IE->hasOneUse())
4224 return false;
4225
4226 IE = NextUse;
4227 }
4228
4229 return false;
4230 }
4231
4232 /// \brief Like findBuildVector, but looks backwards for construction of aggregate.
4233 ///
4234 /// \return true if it matches.
findBuildAggregate(InsertValueInst * IV,SmallVectorImpl<Value * > & BuildVector,SmallVectorImpl<Value * > & BuildVectorOpds)4235 static bool findBuildAggregate(InsertValueInst *IV,
4236 SmallVectorImpl<Value *> &BuildVector,
4237 SmallVectorImpl<Value *> &BuildVectorOpds) {
4238 if (!IV->hasOneUse())
4239 return false;
4240 Value *V = IV->getAggregateOperand();
4241 if (!isa<UndefValue>(V)) {
4242 InsertValueInst *I = dyn_cast<InsertValueInst>(V);
4243 if (!I || !findBuildAggregate(I, BuildVector, BuildVectorOpds))
4244 return false;
4245 }
4246 BuildVector.push_back(IV);
4247 BuildVectorOpds.push_back(IV->getInsertedValueOperand());
4248 return true;
4249 }
4250
PhiTypeSorterFunc(Value * V,Value * V2)4251 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
4252 return V->getType() < V2->getType();
4253 }
4254
4255 /// \brief Try and get a reduction value from a phi node.
4256 ///
4257 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
4258 /// if they come from either \p ParentBB or a containing loop latch.
4259 ///
4260 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
4261 /// if not possible.
getReductionValue(const DominatorTree * DT,PHINode * P,BasicBlock * ParentBB,LoopInfo * LI)4262 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
4263 BasicBlock *ParentBB, LoopInfo *LI) {
4264 // There are situations where the reduction value is not dominated by the
4265 // reduction phi. Vectorizing such cases has been reported to cause
4266 // miscompiles. See PR25787.
4267 auto DominatedReduxValue = [&](Value *R) {
4268 return (
4269 dyn_cast<Instruction>(R) &&
4270 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent()));
4271 };
4272
4273 Value *Rdx = nullptr;
4274
4275 // Return the incoming value if it comes from the same BB as the phi node.
4276 if (P->getIncomingBlock(0) == ParentBB) {
4277 Rdx = P->getIncomingValue(0);
4278 } else if (P->getIncomingBlock(1) == ParentBB) {
4279 Rdx = P->getIncomingValue(1);
4280 }
4281
4282 if (Rdx && DominatedReduxValue(Rdx))
4283 return Rdx;
4284
4285 // Otherwise, check whether we have a loop latch to look at.
4286 Loop *BBL = LI->getLoopFor(ParentBB);
4287 if (!BBL)
4288 return nullptr;
4289 BasicBlock *BBLatch = BBL->getLoopLatch();
4290 if (!BBLatch)
4291 return nullptr;
4292
4293 // There is a loop latch, return the incoming value if it comes from
4294 // that. This reduction pattern occassionaly turns up.
4295 if (P->getIncomingBlock(0) == BBLatch) {
4296 Rdx = P->getIncomingValue(0);
4297 } else if (P->getIncomingBlock(1) == BBLatch) {
4298 Rdx = P->getIncomingValue(1);
4299 }
4300
4301 if (Rdx && DominatedReduxValue(Rdx))
4302 return Rdx;
4303
4304 return nullptr;
4305 }
4306
4307 /// \brief Attempt to reduce a horizontal reduction.
4308 /// If it is legal to match a horizontal reduction feeding
4309 /// the phi node P with reduction operators BI, then check if it
4310 /// can be done.
4311 /// \returns true if a horizontal reduction was matched and reduced.
4312 /// \returns false if a horizontal reduction was not matched.
canMatchHorizontalReduction(PHINode * P,BinaryOperator * BI,BoUpSLP & R,TargetTransformInfo * TTI,unsigned MinRegSize)4313 static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI,
4314 BoUpSLP &R, TargetTransformInfo *TTI,
4315 unsigned MinRegSize) {
4316 if (!ShouldVectorizeHor)
4317 return false;
4318
4319 HorizontalReduction HorRdx(MinRegSize);
4320 if (!HorRdx.matchAssociativeReduction(P, BI))
4321 return false;
4322
4323 // If there is a sufficient number of reduction values, reduce
4324 // to a nearby power-of-2. Can safely generate oversized
4325 // vectors and rely on the backend to split them to legal sizes.
4326 HorRdx.ReduxWidth =
4327 std::max((uint64_t)4, PowerOf2Floor(HorRdx.numReductionValues()));
4328
4329 return HorRdx.tryToReduce(R, TTI);
4330 }
4331
vectorizeChainsInBlock(BasicBlock * BB,BoUpSLP & R)4332 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
4333 bool Changed = false;
4334 SmallVector<Value *, 4> Incoming;
4335 SmallSet<Value *, 16> VisitedInstrs;
4336
4337 bool HaveVectorizedPhiNodes = true;
4338 while (HaveVectorizedPhiNodes) {
4339 HaveVectorizedPhiNodes = false;
4340
4341 // Collect the incoming values from the PHIs.
4342 Incoming.clear();
4343 for (Instruction &I : *BB) {
4344 PHINode *P = dyn_cast<PHINode>(&I);
4345 if (!P)
4346 break;
4347
4348 if (!VisitedInstrs.count(P))
4349 Incoming.push_back(P);
4350 }
4351
4352 // Sort by type.
4353 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
4354
4355 // Try to vectorize elements base on their type.
4356 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
4357 E = Incoming.end();
4358 IncIt != E;) {
4359
4360 // Look for the next elements with the same type.
4361 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
4362 while (SameTypeIt != E &&
4363 (*SameTypeIt)->getType() == (*IncIt)->getType()) {
4364 VisitedInstrs.insert(*SameTypeIt);
4365 ++SameTypeIt;
4366 }
4367
4368 // Try to vectorize them.
4369 unsigned NumElts = (SameTypeIt - IncIt);
4370 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
4371 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
4372 // Success start over because instructions might have been changed.
4373 HaveVectorizedPhiNodes = true;
4374 Changed = true;
4375 break;
4376 }
4377
4378 // Start over at the next instruction of a different type (or the end).
4379 IncIt = SameTypeIt;
4380 }
4381 }
4382
4383 VisitedInstrs.clear();
4384
4385 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
4386 // We may go through BB multiple times so skip the one we have checked.
4387 if (!VisitedInstrs.insert(&*it).second)
4388 continue;
4389
4390 if (isa<DbgInfoIntrinsic>(it))
4391 continue;
4392
4393 // Try to vectorize reductions that use PHINodes.
4394 if (PHINode *P = dyn_cast<PHINode>(it)) {
4395 // Check that the PHI is a reduction PHI.
4396 if (P->getNumIncomingValues() != 2)
4397 return Changed;
4398
4399 Value *Rdx = getReductionValue(DT, P, BB, LI);
4400
4401 // Check if this is a Binary Operator.
4402 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
4403 if (!BI)
4404 continue;
4405
4406 // Try to match and vectorize a horizontal reduction.
4407 if (canMatchHorizontalReduction(P, BI, R, TTI, R.getMinVecRegSize())) {
4408 Changed = true;
4409 it = BB->begin();
4410 e = BB->end();
4411 continue;
4412 }
4413
4414 Value *Inst = BI->getOperand(0);
4415 if (Inst == P)
4416 Inst = BI->getOperand(1);
4417
4418 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) {
4419 // We would like to start over since some instructions are deleted
4420 // and the iterator may become invalid value.
4421 Changed = true;
4422 it = BB->begin();
4423 e = BB->end();
4424 continue;
4425 }
4426
4427 continue;
4428 }
4429
4430 if (ShouldStartVectorizeHorAtStore)
4431 if (StoreInst *SI = dyn_cast<StoreInst>(it))
4432 if (BinaryOperator *BinOp =
4433 dyn_cast<BinaryOperator>(SI->getValueOperand())) {
4434 if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI,
4435 R.getMinVecRegSize()) ||
4436 tryToVectorize(BinOp, R)) {
4437 Changed = true;
4438 it = BB->begin();
4439 e = BB->end();
4440 continue;
4441 }
4442 }
4443
4444 // Try to vectorize horizontal reductions feeding into a return.
4445 if (ReturnInst *RI = dyn_cast<ReturnInst>(it))
4446 if (RI->getNumOperands() != 0)
4447 if (BinaryOperator *BinOp =
4448 dyn_cast<BinaryOperator>(RI->getOperand(0))) {
4449 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n");
4450 if (tryToVectorizePair(BinOp->getOperand(0),
4451 BinOp->getOperand(1), R)) {
4452 Changed = true;
4453 it = BB->begin();
4454 e = BB->end();
4455 continue;
4456 }
4457 }
4458
4459 // Try to vectorize trees that start at compare instructions.
4460 if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
4461 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
4462 Changed = true;
4463 // We would like to start over since some instructions are deleted
4464 // and the iterator may become invalid value.
4465 it = BB->begin();
4466 e = BB->end();
4467 continue;
4468 }
4469
4470 for (int i = 0; i < 2; ++i) {
4471 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) {
4472 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) {
4473 Changed = true;
4474 // We would like to start over since some instructions are deleted
4475 // and the iterator may become invalid value.
4476 it = BB->begin();
4477 e = BB->end();
4478 break;
4479 }
4480 }
4481 }
4482 continue;
4483 }
4484
4485 // Try to vectorize trees that start at insertelement instructions.
4486 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
4487 SmallVector<Value *, 16> BuildVector;
4488 SmallVector<Value *, 16> BuildVectorOpds;
4489 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
4490 continue;
4491
4492 // Vectorize starting with the build vector operands ignoring the
4493 // BuildVector instructions for the purpose of scheduling and user
4494 // extraction.
4495 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
4496 Changed = true;
4497 it = BB->begin();
4498 e = BB->end();
4499 }
4500
4501 continue;
4502 }
4503
4504 // Try to vectorize trees that start at insertvalue instructions feeding into
4505 // a store.
4506 if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4507 if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) {
4508 const DataLayout &DL = BB->getModule()->getDataLayout();
4509 if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) {
4510 SmallVector<Value *, 16> BuildVector;
4511 SmallVector<Value *, 16> BuildVectorOpds;
4512 if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds))
4513 continue;
4514
4515 DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n");
4516 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) {
4517 Changed = true;
4518 it = BB->begin();
4519 e = BB->end();
4520 }
4521 continue;
4522 }
4523 }
4524 }
4525 }
4526
4527 return Changed;
4528 }
4529
vectorizeGEPIndices(BasicBlock * BB,BoUpSLP & R)4530 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
4531 auto Changed = false;
4532 for (auto &Entry : GEPs) {
4533
4534 // If the getelementptr list has fewer than two elements, there's nothing
4535 // to do.
4536 if (Entry.second.size() < 2)
4537 continue;
4538
4539 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
4540 << Entry.second.size() << ".\n");
4541
4542 // We process the getelementptr list in chunks of 16 (like we do for
4543 // stores) to minimize compile-time.
4544 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) {
4545 auto Len = std::min<unsigned>(BE - BI, 16);
4546 auto GEPList = makeArrayRef(&Entry.second[BI], Len);
4547
4548 // Initialize a set a candidate getelementptrs. Note that we use a
4549 // SetVector here to preserve program order. If the index computations
4550 // are vectorizable and begin with loads, we want to minimize the chance
4551 // of having to reorder them later.
4552 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
4553
4554 // Some of the candidates may have already been vectorized after we
4555 // initially collected them. If so, the WeakVHs will have nullified the
4556 // values, so remove them from the set of candidates.
4557 Candidates.remove(nullptr);
4558
4559 // Remove from the set of candidates all pairs of getelementptrs with
4560 // constant differences. Such getelementptrs are likely not good
4561 // candidates for vectorization in a bottom-up phase since one can be
4562 // computed from the other. We also ensure all candidate getelementptr
4563 // indices are unique.
4564 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
4565 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]);
4566 if (!Candidates.count(GEPI))
4567 continue;
4568 auto *SCEVI = SE->getSCEV(GEPList[I]);
4569 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
4570 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]);
4571 auto *SCEVJ = SE->getSCEV(GEPList[J]);
4572 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
4573 Candidates.remove(GEPList[I]);
4574 Candidates.remove(GEPList[J]);
4575 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
4576 Candidates.remove(GEPList[J]);
4577 }
4578 }
4579 }
4580
4581 // We break out of the above computation as soon as we know there are
4582 // fewer than two candidates remaining.
4583 if (Candidates.size() < 2)
4584 continue;
4585
4586 // Add the single, non-constant index of each candidate to the bundle. We
4587 // ensured the indices met these constraints when we originally collected
4588 // the getelementptrs.
4589 SmallVector<Value *, 16> Bundle(Candidates.size());
4590 auto BundleIndex = 0u;
4591 for (auto *V : Candidates) {
4592 auto *GEP = cast<GetElementPtrInst>(V);
4593 auto *GEPIdx = GEP->idx_begin()->get();
4594 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
4595 Bundle[BundleIndex++] = GEPIdx;
4596 }
4597
4598 // Try and vectorize the indices. We are currently only interested in
4599 // gather-like cases of the form:
4600 //
4601 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
4602 //
4603 // where the loads of "a", the loads of "b", and the subtractions can be
4604 // performed in parallel. It's likely that detecting this pattern in a
4605 // bottom-up phase will be simpler and less costly than building a
4606 // full-blown top-down phase beginning at the consecutive loads.
4607 Changed |= tryToVectorizeList(Bundle, R);
4608 }
4609 }
4610 return Changed;
4611 }
4612
vectorizeStoreChains(BoUpSLP & R)4613 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
4614 bool Changed = false;
4615 // Attempt to sort and vectorize each of the store-groups.
4616 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
4617 ++it) {
4618 if (it->second.size() < 2)
4619 continue;
4620
4621 DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
4622 << it->second.size() << ".\n");
4623
4624 // Process the stores in chunks of 16.
4625 // TODO: The limit of 16 inhibits greater vectorization factors.
4626 // For example, AVX2 supports v32i8. Increasing this limit, however,
4627 // may cause a significant compile-time increase.
4628 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
4629 unsigned Len = std::min<unsigned>(CE - CI, 16);
4630 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len),
4631 -SLPCostThreshold, R);
4632 }
4633 }
4634 return Changed;
4635 }
4636
4637 char SLPVectorizer::ID = 0;
4638 static const char lv_name[] = "SLP Vectorizer";
4639 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
4640 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4641 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4642 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4643 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
4644 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
4645 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
4646 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
4647
4648 namespace llvm {
createSLPVectorizerPass()4649 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }
4650 }
4651