1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass converts vector operations into scalar operations, in order
10 // to expose optimization opportunities on the individual scalar operations.
11 // It is mainly intended for targets that do not have vector units, but it
12 // may also be useful for revectorizing code to different vector widths.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/Scalar/Scalarizer.h"
17 #include "llvm/ADT/PostOrderIterator.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/VectorUtils.h"
21 #include "llvm/IR/Argument.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstVisitor.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include <cassert>
45 #include <cstdint>
46 #include <iterator>
47 #include <map>
48 #include <utility>
49
50 using namespace llvm;
51
52 #define DEBUG_TYPE "scalarizer"
53
54 // This is disabled by default because having separate loads and stores
55 // makes it more likely that the -combiner-alias-analysis limits will be
56 // reached.
57 static cl::opt<bool>
58 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden,
59 cl::desc("Allow the scalarizer pass to scalarize loads and store"));
60
61 namespace {
62
63 // Used to store the scattered form of a vector.
64 using ValueVector = SmallVector<Value *, 8>;
65
66 // Used to map a vector Value to its scattered form. We use std::map
67 // because we want iterators to persist across insertion and because the
68 // values are relatively large.
69 using ScatterMap = std::map<Value *, ValueVector>;
70
71 // Lists Instructions that have been replaced with scalar implementations,
72 // along with a pointer to their scattered forms.
73 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
74
75 // Provides a very limited vector-like interface for lazily accessing one
76 // component of a scattered vector or vector pointer.
77 class Scatterer {
78 public:
79 Scatterer() = default;
80
81 // Scatter V into Size components. If new instructions are needed,
82 // insert them before BBI in BB. If Cache is nonnull, use it to cache
83 // the results.
84 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
85 ValueVector *cachePtr = nullptr);
86
87 // Return component I, creating a new Value for it if necessary.
88 Value *operator[](unsigned I);
89
90 // Return the number of components.
size() const91 unsigned size() const { return Size; }
92
93 private:
94 BasicBlock *BB;
95 BasicBlock::iterator BBI;
96 Value *V;
97 ValueVector *CachePtr;
98 PointerType *PtrTy;
99 ValueVector Tmp;
100 unsigned Size;
101 };
102
103 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
104 // called Name that compares X and Y in the same way as FCI.
105 struct FCmpSplitter {
FCmpSplitter__anon7efb3dcf0111::FCmpSplitter106 FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
107
operator ()__anon7efb3dcf0111::FCmpSplitter108 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
109 const Twine &Name) const {
110 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
111 }
112
113 FCmpInst &FCI;
114 };
115
116 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
117 // called Name that compares X and Y in the same way as ICI.
118 struct ICmpSplitter {
ICmpSplitter__anon7efb3dcf0111::ICmpSplitter119 ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
120
operator ()__anon7efb3dcf0111::ICmpSplitter121 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
122 const Twine &Name) const {
123 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
124 }
125
126 ICmpInst &ICI;
127 };
128
129 // UnarySpliiter(UO)(Builder, X, Name) uses Builder to create
130 // a unary operator like UO called Name with operand X.
131 struct UnarySplitter {
UnarySplitter__anon7efb3dcf0111::UnarySplitter132 UnarySplitter(UnaryOperator &uo) : UO(uo) {}
133
operator ()__anon7efb3dcf0111::UnarySplitter134 Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const {
135 return Builder.CreateUnOp(UO.getOpcode(), Op, Name);
136 }
137
138 UnaryOperator &UO;
139 };
140
141 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
142 // a binary operator like BO called Name with operands X and Y.
143 struct BinarySplitter {
BinarySplitter__anon7efb3dcf0111::BinarySplitter144 BinarySplitter(BinaryOperator &bo) : BO(bo) {}
145
operator ()__anon7efb3dcf0111::BinarySplitter146 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
147 const Twine &Name) const {
148 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
149 }
150
151 BinaryOperator &BO;
152 };
153
154 // Information about a load or store that we're scalarizing.
155 struct VectorLayout {
156 VectorLayout() = default;
157
158 // Return the alignment of element I.
getElemAlign__anon7efb3dcf0111::VectorLayout159 uint64_t getElemAlign(unsigned I) {
160 return MinAlign(VecAlign, I * ElemSize);
161 }
162
163 // The type of the vector.
164 VectorType *VecTy = nullptr;
165
166 // The type of each element.
167 Type *ElemTy = nullptr;
168
169 // The alignment of the vector.
170 uint64_t VecAlign = 0;
171
172 // The size of each element.
173 uint64_t ElemSize = 0;
174 };
175
176 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
177 public:
ScalarizerVisitor(unsigned ParallelLoopAccessMDKind,DominatorTree * DT)178 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT)
179 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT) {
180 }
181
182 bool visit(Function &F);
183
184 // InstVisitor methods. They return true if the instruction was scalarized,
185 // false if nothing changed.
visitInstruction(Instruction & I)186 bool visitInstruction(Instruction &I) { return false; }
187 bool visitSelectInst(SelectInst &SI);
188 bool visitICmpInst(ICmpInst &ICI);
189 bool visitFCmpInst(FCmpInst &FCI);
190 bool visitUnaryOperator(UnaryOperator &UO);
191 bool visitBinaryOperator(BinaryOperator &BO);
192 bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
193 bool visitCastInst(CastInst &CI);
194 bool visitBitCastInst(BitCastInst &BCI);
195 bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
196 bool visitPHINode(PHINode &PHI);
197 bool visitLoadInst(LoadInst &LI);
198 bool visitStoreInst(StoreInst &SI);
199 bool visitCallInst(CallInst &ICI);
200
201 private:
202 Scatterer scatter(Instruction *Point, Value *V);
203 void gather(Instruction *Op, const ValueVector &CV);
204 bool canTransferMetadata(unsigned Kind);
205 void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
206 bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout,
207 const DataLayout &DL);
208 bool finish();
209
210 template<typename T> bool splitUnary(Instruction &, const T &);
211 template<typename T> bool splitBinary(Instruction &, const T &);
212
213 bool splitCall(CallInst &CI);
214
215 ScatterMap Scattered;
216 GatherList Gathered;
217
218 unsigned ParallelLoopAccessMDKind;
219
220 DominatorTree *DT;
221 };
222
223 class ScalarizerLegacyPass : public FunctionPass {
224 public:
225 static char ID;
226
ScalarizerLegacyPass()227 ScalarizerLegacyPass() : FunctionPass(ID) {
228 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
229 }
230
231 bool runOnFunction(Function &F) override;
232
getAnalysisUsage(AnalysisUsage & AU) const233 void getAnalysisUsage(AnalysisUsage& AU) const override {
234 AU.addRequired<DominatorTreeWrapperPass>();
235 AU.addPreserved<DominatorTreeWrapperPass>();
236 }
237 };
238
239 } // end anonymous namespace
240
241 char ScalarizerLegacyPass::ID = 0;
242 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
243 "Scalarize vector operations", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)244 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
245 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
246 "Scalarize vector operations", false, false)
247
248 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
249 ValueVector *cachePtr)
250 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
251 Type *Ty = V->getType();
252 PtrTy = dyn_cast<PointerType>(Ty);
253 if (PtrTy)
254 Ty = PtrTy->getElementType();
255 Size = Ty->getVectorNumElements();
256 if (!CachePtr)
257 Tmp.resize(Size, nullptr);
258 else if (CachePtr->empty())
259 CachePtr->resize(Size, nullptr);
260 else
261 assert(Size == CachePtr->size() && "Inconsistent vector sizes");
262 }
263
264 // Return component I, creating a new Value for it if necessary.
operator [](unsigned I)265 Value *Scatterer::operator[](unsigned I) {
266 ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
267 // Try to reuse a previous value.
268 if (CV[I])
269 return CV[I];
270 IRBuilder<> Builder(BB, BBI);
271 if (PtrTy) {
272 Type *ElTy = PtrTy->getElementType()->getVectorElementType();
273 if (!CV[0]) {
274 Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace());
275 CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
276 }
277 if (I != 0)
278 CV[I] = Builder.CreateConstGEP1_32(ElTy, CV[0], I,
279 V->getName() + ".i" + Twine(I));
280 } else {
281 // Search through a chain of InsertElementInsts looking for element I.
282 // Record other elements in the cache. The new V is still suitable
283 // for all uncached indices.
284 while (true) {
285 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
286 if (!Insert)
287 break;
288 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
289 if (!Idx)
290 break;
291 unsigned J = Idx->getZExtValue();
292 V = Insert->getOperand(0);
293 if (I == J) {
294 CV[J] = Insert->getOperand(1);
295 return CV[J];
296 } else if (!CV[J]) {
297 // Only cache the first entry we find for each index we're not actively
298 // searching for. This prevents us from going too far up the chain and
299 // caching incorrect entries.
300 CV[J] = Insert->getOperand(1);
301 }
302 }
303 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
304 V->getName() + ".i" + Twine(I));
305 }
306 return CV[I];
307 }
308
runOnFunction(Function & F)309 bool ScalarizerLegacyPass::runOnFunction(Function &F) {
310 if (skipFunction(F))
311 return false;
312
313 Module &M = *F.getParent();
314 unsigned ParallelLoopAccessMDKind =
315 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
316 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
317 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT);
318 return Impl.visit(F);
319 }
320
createScalarizerPass()321 FunctionPass *llvm::createScalarizerPass() {
322 return new ScalarizerLegacyPass();
323 }
324
visit(Function & F)325 bool ScalarizerVisitor::visit(Function &F) {
326 assert(Gathered.empty() && Scattered.empty());
327
328 // To ensure we replace gathered components correctly we need to do an ordered
329 // traversal of the basic blocks in the function.
330 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
331 for (BasicBlock *BB : RPOT) {
332 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
333 Instruction *I = &*II;
334 bool Done = InstVisitor::visit(I);
335 ++II;
336 if (Done && I->getType()->isVoidTy())
337 I->eraseFromParent();
338 }
339 }
340 return finish();
341 }
342
343 // Return a scattered form of V that can be accessed by Point. V must be a
344 // vector or a pointer to a vector.
scatter(Instruction * Point,Value * V)345 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) {
346 if (Argument *VArg = dyn_cast<Argument>(V)) {
347 // Put the scattered form of arguments in the entry block,
348 // so that it can be used everywhere.
349 Function *F = VArg->getParent();
350 BasicBlock *BB = &F->getEntryBlock();
351 return Scatterer(BB, BB->begin(), V, &Scattered[V]);
352 }
353 if (Instruction *VOp = dyn_cast<Instruction>(V)) {
354 // When scalarizing PHI nodes we might try to examine/rewrite InsertElement
355 // nodes in predecessors. If those predecessors are unreachable from entry,
356 // then the IR in those blocks could have unexpected properties resulting in
357 // infinite loops in Scatterer::operator[]. By simply treating values
358 // originating from instructions in unreachable blocks as undef we do not
359 // need to analyse them further.
360 if (!DT->isReachableFromEntry(VOp->getParent()))
361 return Scatterer(Point->getParent(), Point->getIterator(),
362 UndefValue::get(V->getType()));
363 // Put the scattered form of an instruction directly after the
364 // instruction.
365 BasicBlock *BB = VOp->getParent();
366 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
367 V, &Scattered[V]);
368 }
369 // In the fallback case, just put the scattered before Point and
370 // keep the result local to Point.
371 return Scatterer(Point->getParent(), Point->getIterator(), V);
372 }
373
374 // Replace Op with the gathered form of the components in CV. Defer the
375 // deletion of Op and creation of the gathered form to the end of the pass,
376 // so that we can avoid creating the gathered form if all uses of Op are
377 // replaced with uses of CV.
gather(Instruction * Op,const ValueVector & CV)378 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
379 // Since we're not deleting Op yet, stub out its operands, so that it
380 // doesn't make anything live unnecessarily.
381 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
382 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
383
384 transferMetadataAndIRFlags(Op, CV);
385
386 // If we already have a scattered form of Op (created from ExtractElements
387 // of Op itself), replace them with the new form.
388 ValueVector &SV = Scattered[Op];
389 if (!SV.empty()) {
390 for (unsigned I = 0, E = SV.size(); I != E; ++I) {
391 Value *V = SV[I];
392 if (V == nullptr)
393 continue;
394
395 Instruction *Old = cast<Instruction>(V);
396 CV[I]->takeName(Old);
397 Old->replaceAllUsesWith(CV[I]);
398 Old->eraseFromParent();
399 }
400 }
401 SV = CV;
402 Gathered.push_back(GatherList::value_type(Op, &SV));
403 }
404
405 // Return true if it is safe to transfer the given metadata tag from
406 // vector to scalar instructions.
canTransferMetadata(unsigned Tag)407 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
408 return (Tag == LLVMContext::MD_tbaa
409 || Tag == LLVMContext::MD_fpmath
410 || Tag == LLVMContext::MD_tbaa_struct
411 || Tag == LLVMContext::MD_invariant_load
412 || Tag == LLVMContext::MD_alias_scope
413 || Tag == LLVMContext::MD_noalias
414 || Tag == ParallelLoopAccessMDKind
415 || Tag == LLVMContext::MD_access_group);
416 }
417
418 // Transfer metadata from Op to the instructions in CV if it is known
419 // to be safe to do so.
transferMetadataAndIRFlags(Instruction * Op,const ValueVector & CV)420 void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
421 const ValueVector &CV) {
422 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
423 Op->getAllMetadataOtherThanDebugLoc(MDs);
424 for (unsigned I = 0, E = CV.size(); I != E; ++I) {
425 if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
426 for (const auto &MD : MDs)
427 if (canTransferMetadata(MD.first))
428 New->setMetadata(MD.first, MD.second);
429 New->copyIRFlags(Op);
430 if (Op->getDebugLoc() && !New->getDebugLoc())
431 New->setDebugLoc(Op->getDebugLoc());
432 }
433 }
434 }
435
436 // Try to fill in Layout from Ty, returning true on success. Alignment is
437 // the alignment of the vector, or 0 if the ABI default should be used.
getVectorLayout(Type * Ty,unsigned Alignment,VectorLayout & Layout,const DataLayout & DL)438 bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment,
439 VectorLayout &Layout, const DataLayout &DL) {
440 // Make sure we're dealing with a vector.
441 Layout.VecTy = dyn_cast<VectorType>(Ty);
442 if (!Layout.VecTy)
443 return false;
444
445 // Check that we're dealing with full-byte elements.
446 Layout.ElemTy = Layout.VecTy->getElementType();
447 if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy))
448 return false;
449
450 if (Alignment)
451 Layout.VecAlign = Alignment;
452 else
453 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
454 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
455 return true;
456 }
457
458 // Scalarize one-operand instruction I, using Split(Builder, X, Name)
459 // to create an instruction like I with operand X and name Name.
460 template<typename Splitter>
splitUnary(Instruction & I,const Splitter & Split)461 bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) {
462 VectorType *VT = dyn_cast<VectorType>(I.getType());
463 if (!VT)
464 return false;
465
466 unsigned NumElems = VT->getNumElements();
467 IRBuilder<> Builder(&I);
468 Scatterer Op = scatter(&I, I.getOperand(0));
469 assert(Op.size() == NumElems && "Mismatched unary operation");
470 ValueVector Res;
471 Res.resize(NumElems);
472 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
473 Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem));
474 gather(&I, Res);
475 return true;
476 }
477
478 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
479 // to create an instruction like I with operands X and Y and name Name.
480 template<typename Splitter>
splitBinary(Instruction & I,const Splitter & Split)481 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
482 VectorType *VT = dyn_cast<VectorType>(I.getType());
483 if (!VT)
484 return false;
485
486 unsigned NumElems = VT->getNumElements();
487 IRBuilder<> Builder(&I);
488 Scatterer Op0 = scatter(&I, I.getOperand(0));
489 Scatterer Op1 = scatter(&I, I.getOperand(1));
490 assert(Op0.size() == NumElems && "Mismatched binary operation");
491 assert(Op1.size() == NumElems && "Mismatched binary operation");
492 ValueVector Res;
493 Res.resize(NumElems);
494 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
495 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
496 I.getName() + ".i" + Twine(Elem));
497 gather(&I, Res);
498 return true;
499 }
500
isTriviallyScalariable(Intrinsic::ID ID)501 static bool isTriviallyScalariable(Intrinsic::ID ID) {
502 return isTriviallyVectorizable(ID);
503 }
504
505 // All of the current scalarizable intrinsics only have one mangled type.
getScalarIntrinsicDeclaration(Module * M,Intrinsic::ID ID,VectorType * Ty)506 static Function *getScalarIntrinsicDeclaration(Module *M,
507 Intrinsic::ID ID,
508 VectorType *Ty) {
509 return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() });
510 }
511
512 /// If a call to a vector typed intrinsic function, split into a scalar call per
513 /// element if possible for the intrinsic.
splitCall(CallInst & CI)514 bool ScalarizerVisitor::splitCall(CallInst &CI) {
515 VectorType *VT = dyn_cast<VectorType>(CI.getType());
516 if (!VT)
517 return false;
518
519 Function *F = CI.getCalledFunction();
520 if (!F)
521 return false;
522
523 Intrinsic::ID ID = F->getIntrinsicID();
524 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
525 return false;
526
527 unsigned NumElems = VT->getNumElements();
528 unsigned NumArgs = CI.getNumArgOperands();
529
530 ValueVector ScalarOperands(NumArgs);
531 SmallVector<Scatterer, 8> Scattered(NumArgs);
532
533 Scattered.resize(NumArgs);
534
535 // Assumes that any vector type has the same number of elements as the return
536 // vector type, which is true for all current intrinsics.
537 for (unsigned I = 0; I != NumArgs; ++I) {
538 Value *OpI = CI.getOperand(I);
539 if (OpI->getType()->isVectorTy()) {
540 Scattered[I] = scatter(&CI, OpI);
541 assert(Scattered[I].size() == NumElems && "mismatched call operands");
542 } else {
543 ScalarOperands[I] = OpI;
544 }
545 }
546
547 ValueVector Res(NumElems);
548 ValueVector ScalarCallOps(NumArgs);
549
550 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT);
551 IRBuilder<> Builder(&CI);
552
553 // Perform actual scalarization, taking care to preserve any scalar operands.
554 for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
555 ScalarCallOps.clear();
556
557 for (unsigned J = 0; J != NumArgs; ++J) {
558 if (hasVectorInstrinsicScalarOpd(ID, J))
559 ScalarCallOps.push_back(ScalarOperands[J]);
560 else
561 ScalarCallOps.push_back(Scattered[J][Elem]);
562 }
563
564 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
565 CI.getName() + ".i" + Twine(Elem));
566 }
567
568 gather(&CI, Res);
569 return true;
570 }
571
visitSelectInst(SelectInst & SI)572 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
573 VectorType *VT = dyn_cast<VectorType>(SI.getType());
574 if (!VT)
575 return false;
576
577 unsigned NumElems = VT->getNumElements();
578 IRBuilder<> Builder(&SI);
579 Scatterer Op1 = scatter(&SI, SI.getOperand(1));
580 Scatterer Op2 = scatter(&SI, SI.getOperand(2));
581 assert(Op1.size() == NumElems && "Mismatched select");
582 assert(Op2.size() == NumElems && "Mismatched select");
583 ValueVector Res;
584 Res.resize(NumElems);
585
586 if (SI.getOperand(0)->getType()->isVectorTy()) {
587 Scatterer Op0 = scatter(&SI, SI.getOperand(0));
588 assert(Op0.size() == NumElems && "Mismatched select");
589 for (unsigned I = 0; I < NumElems; ++I)
590 Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
591 SI.getName() + ".i" + Twine(I));
592 } else {
593 Value *Op0 = SI.getOperand(0);
594 for (unsigned I = 0; I < NumElems; ++I)
595 Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
596 SI.getName() + ".i" + Twine(I));
597 }
598 gather(&SI, Res);
599 return true;
600 }
601
visitICmpInst(ICmpInst & ICI)602 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
603 return splitBinary(ICI, ICmpSplitter(ICI));
604 }
605
visitFCmpInst(FCmpInst & FCI)606 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
607 return splitBinary(FCI, FCmpSplitter(FCI));
608 }
609
visitUnaryOperator(UnaryOperator & UO)610 bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) {
611 return splitUnary(UO, UnarySplitter(UO));
612 }
613
visitBinaryOperator(BinaryOperator & BO)614 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
615 return splitBinary(BO, BinarySplitter(BO));
616 }
617
visitGetElementPtrInst(GetElementPtrInst & GEPI)618 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
619 VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
620 if (!VT)
621 return false;
622
623 IRBuilder<> Builder(&GEPI);
624 unsigned NumElems = VT->getNumElements();
625 unsigned NumIndices = GEPI.getNumIndices();
626
627 // The base pointer might be scalar even if it's a vector GEP. In those cases,
628 // splat the pointer into a vector value, and scatter that vector.
629 Value *Op0 = GEPI.getOperand(0);
630 if (!Op0->getType()->isVectorTy())
631 Op0 = Builder.CreateVectorSplat(NumElems, Op0);
632 Scatterer Base = scatter(&GEPI, Op0);
633
634 SmallVector<Scatterer, 8> Ops;
635 Ops.resize(NumIndices);
636 for (unsigned I = 0; I < NumIndices; ++I) {
637 Value *Op = GEPI.getOperand(I + 1);
638
639 // The indices might be scalars even if it's a vector GEP. In those cases,
640 // splat the scalar into a vector value, and scatter that vector.
641 if (!Op->getType()->isVectorTy())
642 Op = Builder.CreateVectorSplat(NumElems, Op);
643
644 Ops[I] = scatter(&GEPI, Op);
645 }
646
647 ValueVector Res;
648 Res.resize(NumElems);
649 for (unsigned I = 0; I < NumElems; ++I) {
650 SmallVector<Value *, 8> Indices;
651 Indices.resize(NumIndices);
652 for (unsigned J = 0; J < NumIndices; ++J)
653 Indices[J] = Ops[J][I];
654 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
655 GEPI.getName() + ".i" + Twine(I));
656 if (GEPI.isInBounds())
657 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
658 NewGEPI->setIsInBounds();
659 }
660 gather(&GEPI, Res);
661 return true;
662 }
663
visitCastInst(CastInst & CI)664 bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
665 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
666 if (!VT)
667 return false;
668
669 unsigned NumElems = VT->getNumElements();
670 IRBuilder<> Builder(&CI);
671 Scatterer Op0 = scatter(&CI, CI.getOperand(0));
672 assert(Op0.size() == NumElems && "Mismatched cast");
673 ValueVector Res;
674 Res.resize(NumElems);
675 for (unsigned I = 0; I < NumElems; ++I)
676 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
677 CI.getName() + ".i" + Twine(I));
678 gather(&CI, Res);
679 return true;
680 }
681
visitBitCastInst(BitCastInst & BCI)682 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
683 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
684 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
685 if (!DstVT || !SrcVT)
686 return false;
687
688 unsigned DstNumElems = DstVT->getNumElements();
689 unsigned SrcNumElems = SrcVT->getNumElements();
690 IRBuilder<> Builder(&BCI);
691 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
692 ValueVector Res;
693 Res.resize(DstNumElems);
694
695 if (DstNumElems == SrcNumElems) {
696 for (unsigned I = 0; I < DstNumElems; ++I)
697 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
698 BCI.getName() + ".i" + Twine(I));
699 } else if (DstNumElems > SrcNumElems) {
700 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
701 // individual elements to the destination.
702 unsigned FanOut = DstNumElems / SrcNumElems;
703 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
704 unsigned ResI = 0;
705 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
706 Value *V = Op0[Op0I];
707 Instruction *VI;
708 // Look through any existing bitcasts before converting to <N x t2>.
709 // In the best case, the resulting conversion might be a no-op.
710 while ((VI = dyn_cast<Instruction>(V)) &&
711 VI->getOpcode() == Instruction::BitCast)
712 V = VI->getOperand(0);
713 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
714 Scatterer Mid = scatter(&BCI, V);
715 for (unsigned MidI = 0; MidI < FanOut; ++MidI)
716 Res[ResI++] = Mid[MidI];
717 }
718 } else {
719 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
720 unsigned FanIn = SrcNumElems / DstNumElems;
721 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
722 unsigned Op0I = 0;
723 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
724 Value *V = UndefValue::get(MidTy);
725 for (unsigned MidI = 0; MidI < FanIn; ++MidI)
726 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
727 BCI.getName() + ".i" + Twine(ResI)
728 + ".upto" + Twine(MidI));
729 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
730 BCI.getName() + ".i" + Twine(ResI));
731 }
732 }
733 gather(&BCI, Res);
734 return true;
735 }
736
visitShuffleVectorInst(ShuffleVectorInst & SVI)737 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
738 VectorType *VT = dyn_cast<VectorType>(SVI.getType());
739 if (!VT)
740 return false;
741
742 unsigned NumElems = VT->getNumElements();
743 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
744 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
745 ValueVector Res;
746 Res.resize(NumElems);
747
748 for (unsigned I = 0; I < NumElems; ++I) {
749 int Selector = SVI.getMaskValue(I);
750 if (Selector < 0)
751 Res[I] = UndefValue::get(VT->getElementType());
752 else if (unsigned(Selector) < Op0.size())
753 Res[I] = Op0[Selector];
754 else
755 Res[I] = Op1[Selector - Op0.size()];
756 }
757 gather(&SVI, Res);
758 return true;
759 }
760
visitPHINode(PHINode & PHI)761 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
762 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
763 if (!VT)
764 return false;
765
766 unsigned NumElems = VT->getNumElements();
767 IRBuilder<> Builder(&PHI);
768 ValueVector Res;
769 Res.resize(NumElems);
770
771 unsigned NumOps = PHI.getNumOperands();
772 for (unsigned I = 0; I < NumElems; ++I)
773 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
774 PHI.getName() + ".i" + Twine(I));
775
776 for (unsigned I = 0; I < NumOps; ++I) {
777 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
778 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
779 for (unsigned J = 0; J < NumElems; ++J)
780 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
781 }
782 gather(&PHI, Res);
783 return true;
784 }
785
visitLoadInst(LoadInst & LI)786 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
787 if (!ScalarizeLoadStore)
788 return false;
789 if (!LI.isSimple())
790 return false;
791
792 VectorLayout Layout;
793 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
794 LI.getModule()->getDataLayout()))
795 return false;
796
797 unsigned NumElems = Layout.VecTy->getNumElements();
798 IRBuilder<> Builder(&LI);
799 Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
800 ValueVector Res;
801 Res.resize(NumElems);
802
803 for (unsigned I = 0; I < NumElems; ++I)
804 Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
805 Layout.getElemAlign(I),
806 LI.getName() + ".i" + Twine(I));
807 gather(&LI, Res);
808 return true;
809 }
810
visitStoreInst(StoreInst & SI)811 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
812 if (!ScalarizeLoadStore)
813 return false;
814 if (!SI.isSimple())
815 return false;
816
817 VectorLayout Layout;
818 Value *FullValue = SI.getValueOperand();
819 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
820 SI.getModule()->getDataLayout()))
821 return false;
822
823 unsigned NumElems = Layout.VecTy->getNumElements();
824 IRBuilder<> Builder(&SI);
825 Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
826 Scatterer Val = scatter(&SI, FullValue);
827
828 ValueVector Stores;
829 Stores.resize(NumElems);
830 for (unsigned I = 0; I < NumElems; ++I) {
831 unsigned Align = Layout.getElemAlign(I);
832 Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
833 }
834 transferMetadataAndIRFlags(&SI, Stores);
835 return true;
836 }
837
visitCallInst(CallInst & CI)838 bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
839 return splitCall(CI);
840 }
841
842 // Delete the instructions that we scalarized. If a full vector result
843 // is still needed, recreate it using InsertElements.
finish()844 bool ScalarizerVisitor::finish() {
845 // The presence of data in Gathered or Scattered indicates changes
846 // made to the Function.
847 if (Gathered.empty() && Scattered.empty())
848 return false;
849 for (const auto &GMI : Gathered) {
850 Instruction *Op = GMI.first;
851 ValueVector &CV = *GMI.second;
852 if (!Op->use_empty()) {
853 // The value is still needed, so recreate it using a series of
854 // InsertElements.
855 Type *Ty = Op->getType();
856 Value *Res = UndefValue::get(Ty);
857 BasicBlock *BB = Op->getParent();
858 unsigned Count = Ty->getVectorNumElements();
859 IRBuilder<> Builder(Op);
860 if (isa<PHINode>(Op))
861 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
862 for (unsigned I = 0; I < Count; ++I)
863 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
864 Op->getName() + ".upto" + Twine(I));
865 Res->takeName(Op);
866 Op->replaceAllUsesWith(Res);
867 }
868 Op->eraseFromParent();
869 }
870 Gathered.clear();
871 Scattered.clear();
872 return true;
873 }
874
run(Function & F,FunctionAnalysisManager & AM)875 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
876 Module &M = *F.getParent();
877 unsigned ParallelLoopAccessMDKind =
878 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
879 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
880 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT);
881 bool Changed = Impl.visit(F);
882 PreservedAnalyses PA;
883 PA.preserve<DominatorTreeAnalysis>();
884 return Changed ? PA : PreservedAnalyses::all();
885 }
886