1 //===--------------------- InterleavedAccessPass.cpp ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the Interleaved Access pass, which identifies
11 // interleaved memory accesses and transforms them into target specific
12 // intrinsics.
13 //
14 // An interleaved load reads data from memory into several vectors, with
15 // DE-interleaving the data on a factor. An interleaved store writes several
16 // vectors to memory with RE-interleaving the data on a factor.
17 //
18 // As interleaved accesses are difficult to identified in CodeGen (mainly
19 // because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector
20 // IR), we identify and transform them to intrinsics in this pass so the
21 // intrinsics can be easily matched into target specific instructions later in
22 // CodeGen.
23 //
24 // E.g. An interleaved load (Factor = 2):
25 // %wide.vec = load <8 x i32>, <8 x i32>* %ptr
26 // %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
27 // %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
28 //
29 // It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2
30 // intrinsic in ARM backend.
31 //
32 // E.g. An interleaved store (Factor = 3):
33 // %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
34 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
35 // store <12 x i32> %i.vec, <12 x i32>* %ptr
36 //
37 // It could be transformed into a st3 intrinsic in AArch64 backend or a vst3
38 // intrinsic in ARM backend.
39 //
40 //===----------------------------------------------------------------------===//
41
42 #include "llvm/CodeGen/Passes.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/InstIterator.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/MathExtras.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include "llvm/Target/TargetLowering.h"
49 #include "llvm/Target/TargetSubtargetInfo.h"
50
51 using namespace llvm;
52
53 #define DEBUG_TYPE "interleaved-access"
54
55 static cl::opt<bool> LowerInterleavedAccesses(
56 "lower-interleaved-accesses",
57 cl::desc("Enable lowering interleaved accesses to intrinsics"),
58 cl::init(true), cl::Hidden);
59
60 static unsigned MaxFactor; // The maximum supported interleave factor.
61
62 namespace {
63
64 class InterleavedAccess : public FunctionPass {
65
66 public:
67 static char ID;
InterleavedAccess(const TargetMachine * TM=nullptr)68 InterleavedAccess(const TargetMachine *TM = nullptr)
69 : FunctionPass(ID), DT(nullptr), TM(TM), TLI(nullptr) {
70 initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
71 }
72
getPassName() const73 const char *getPassName() const override { return "Interleaved Access Pass"; }
74
75 bool runOnFunction(Function &F) override;
76
getAnalysisUsage(AnalysisUsage & AU) const77 void getAnalysisUsage(AnalysisUsage &AU) const override {
78 AU.addRequired<DominatorTreeWrapperPass>();
79 AU.addPreserved<DominatorTreeWrapperPass>();
80 }
81
82 private:
83 DominatorTree *DT;
84 const TargetMachine *TM;
85 const TargetLowering *TLI;
86
87 /// \brief Transform an interleaved load into target specific intrinsics.
88 bool lowerInterleavedLoad(LoadInst *LI,
89 SmallVector<Instruction *, 32> &DeadInsts);
90
91 /// \brief Transform an interleaved store into target specific intrinsics.
92 bool lowerInterleavedStore(StoreInst *SI,
93 SmallVector<Instruction *, 32> &DeadInsts);
94
95 /// \brief Returns true if the uses of an interleaved load by the
96 /// extractelement instructions in \p Extracts can be replaced by uses of the
97 /// shufflevector instructions in \p Shuffles instead. If so, the necessary
98 /// replacements are also performed.
99 bool tryReplaceExtracts(ArrayRef<ExtractElementInst *> Extracts,
100 ArrayRef<ShuffleVectorInst *> Shuffles);
101 };
102 } // end anonymous namespace.
103
104 char InterleavedAccess::ID = 0;
105 INITIALIZE_TM_PASS_BEGIN(
106 InterleavedAccess, "interleaved-access",
107 "Lower interleaved memory accesses to target specific intrinsics", false,
108 false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)109 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
110 INITIALIZE_TM_PASS_END(
111 InterleavedAccess, "interleaved-access",
112 "Lower interleaved memory accesses to target specific intrinsics", false,
113 false)
114
115 FunctionPass *llvm::createInterleavedAccessPass(const TargetMachine *TM) {
116 return new InterleavedAccess(TM);
117 }
118
119 /// \brief Check if the mask is a DE-interleave mask of the given factor
120 /// \p Factor like:
121 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,unsigned Factor,unsigned & Index)122 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
123 unsigned &Index) {
124 // Check all potential start indices from 0 to (Factor - 1).
125 for (Index = 0; Index < Factor; Index++) {
126 unsigned i = 0;
127
128 // Check that elements are in ascending order by Factor. Ignore undef
129 // elements.
130 for (; i < Mask.size(); i++)
131 if (Mask[i] >= 0 && static_cast<unsigned>(Mask[i]) != Index + i * Factor)
132 break;
133
134 if (i == Mask.size())
135 return true;
136 }
137
138 return false;
139 }
140
141 /// \brief Check if the mask is a DE-interleave mask for an interleaved load.
142 ///
143 /// E.g. DE-interleave masks (Factor = 2) could be:
144 /// <0, 2, 4, 6> (mask of index 0 to extract even elements)
145 /// <1, 3, 5, 7> (mask of index 1 to extract odd elements)
isDeInterleaveMask(ArrayRef<int> Mask,unsigned & Factor,unsigned & Index)146 static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
147 unsigned &Index) {
148 if (Mask.size() < 2)
149 return false;
150
151 // Check potential Factors.
152 for (Factor = 2; Factor <= MaxFactor; Factor++)
153 if (isDeInterleaveMaskOfFactor(Mask, Factor, Index))
154 return true;
155
156 return false;
157 }
158
159 /// \brief Check if the mask is RE-interleave mask for an interleaved store.
160 ///
161 /// I.e. <0, NumSubElts, ... , NumSubElts*(Factor - 1), 1, NumSubElts + 1, ...>
162 ///
163 /// E.g. The RE-interleave mask (Factor = 2) could be:
164 /// <0, 4, 1, 5, 2, 6, 3, 7>
isReInterleaveMask(ArrayRef<int> Mask,unsigned & Factor)165 static bool isReInterleaveMask(ArrayRef<int> Mask, unsigned &Factor) {
166 unsigned NumElts = Mask.size();
167 if (NumElts < 4)
168 return false;
169
170 // Check potential Factors.
171 for (Factor = 2; Factor <= MaxFactor; Factor++) {
172 if (NumElts % Factor)
173 continue;
174
175 unsigned NumSubElts = NumElts / Factor;
176 if (!isPowerOf2_32(NumSubElts))
177 continue;
178
179 // Check whether each element matchs the RE-interleaved rule. Ignore undef
180 // elements.
181 unsigned i = 0;
182 for (; i < NumElts; i++)
183 if (Mask[i] >= 0 &&
184 static_cast<unsigned>(Mask[i]) !=
185 (i % Factor) * NumSubElts + i / Factor)
186 break;
187
188 // Find a RE-interleaved mask of current factor.
189 if (i == NumElts)
190 return true;
191 }
192
193 return false;
194 }
195
lowerInterleavedLoad(LoadInst * LI,SmallVector<Instruction *,32> & DeadInsts)196 bool InterleavedAccess::lowerInterleavedLoad(
197 LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) {
198 if (!LI->isSimple())
199 return false;
200
201 SmallVector<ShuffleVectorInst *, 4> Shuffles;
202 SmallVector<ExtractElementInst *, 4> Extracts;
203
204 // Check if all users of this load are shufflevectors. If we encounter any
205 // users that are extractelement instructions, we save them to later check if
206 // they can be modifed to extract from one of the shufflevectors instead of
207 // the load.
208 for (auto UI = LI->user_begin(), E = LI->user_end(); UI != E; UI++) {
209 auto *Extract = dyn_cast<ExtractElementInst>(*UI);
210 if (Extract && isa<ConstantInt>(Extract->getIndexOperand())) {
211 Extracts.push_back(Extract);
212 continue;
213 }
214 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(*UI);
215 if (!SVI || !isa<UndefValue>(SVI->getOperand(1)))
216 return false;
217
218 Shuffles.push_back(SVI);
219 }
220
221 if (Shuffles.empty())
222 return false;
223
224 unsigned Factor, Index;
225
226 // Check if the first shufflevector is DE-interleave shuffle.
227 if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index))
228 return false;
229
230 // Holds the corresponding index for each DE-interleave shuffle.
231 SmallVector<unsigned, 4> Indices;
232 Indices.push_back(Index);
233
234 Type *VecTy = Shuffles[0]->getType();
235
236 // Check if other shufflevectors are also DE-interleaved of the same type
237 // and factor as the first shufflevector.
238 for (unsigned i = 1; i < Shuffles.size(); i++) {
239 if (Shuffles[i]->getType() != VecTy)
240 return false;
241
242 if (!isDeInterleaveMaskOfFactor(Shuffles[i]->getShuffleMask(), Factor,
243 Index))
244 return false;
245
246 Indices.push_back(Index);
247 }
248
249 // Try and modify users of the load that are extractelement instructions to
250 // use the shufflevector instructions instead of the load.
251 if (!tryReplaceExtracts(Extracts, Shuffles))
252 return false;
253
254 DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
255
256 // Try to create target specific intrinsics to replace the load and shuffles.
257 if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor))
258 return false;
259
260 for (auto SVI : Shuffles)
261 DeadInsts.push_back(SVI);
262
263 DeadInsts.push_back(LI);
264 return true;
265 }
266
tryReplaceExtracts(ArrayRef<ExtractElementInst * > Extracts,ArrayRef<ShuffleVectorInst * > Shuffles)267 bool InterleavedAccess::tryReplaceExtracts(
268 ArrayRef<ExtractElementInst *> Extracts,
269 ArrayRef<ShuffleVectorInst *> Shuffles) {
270
271 // If there aren't any extractelement instructions to modify, there's nothing
272 // to do.
273 if (Extracts.empty())
274 return true;
275
276 // Maps extractelement instructions to vector-index pairs. The extractlement
277 // instructions will be modified to use the new vector and index operands.
278 DenseMap<ExtractElementInst *, std::pair<Value *, int>> ReplacementMap;
279
280 for (auto *Extract : Extracts) {
281
282 // The vector index that is extracted.
283 auto *IndexOperand = cast<ConstantInt>(Extract->getIndexOperand());
284 auto Index = IndexOperand->getSExtValue();
285
286 // Look for a suitable shufflevector instruction. The goal is to modify the
287 // extractelement instruction (which uses an interleaved load) to use one
288 // of the shufflevector instructions instead of the load.
289 for (auto *Shuffle : Shuffles) {
290
291 // If the shufflevector instruction doesn't dominate the extract, we
292 // can't create a use of it.
293 if (!DT->dominates(Shuffle, Extract))
294 continue;
295
296 // Inspect the indices of the shufflevector instruction. If the shuffle
297 // selects the same index that is extracted, we can modify the
298 // extractelement instruction.
299 SmallVector<int, 4> Indices;
300 Shuffle->getShuffleMask(Indices);
301 for (unsigned I = 0; I < Indices.size(); ++I)
302 if (Indices[I] == Index) {
303 assert(Extract->getOperand(0) == Shuffle->getOperand(0) &&
304 "Vector operations do not match");
305 ReplacementMap[Extract] = std::make_pair(Shuffle, I);
306 break;
307 }
308
309 // If we found a suitable shufflevector instruction, stop looking.
310 if (ReplacementMap.count(Extract))
311 break;
312 }
313
314 // If we did not find a suitable shufflevector instruction, the
315 // extractelement instruction cannot be modified, so we must give up.
316 if (!ReplacementMap.count(Extract))
317 return false;
318 }
319
320 // Finally, perform the replacements.
321 IRBuilder<> Builder(Extracts[0]->getContext());
322 for (auto &Replacement : ReplacementMap) {
323 auto *Extract = Replacement.first;
324 auto *Vector = Replacement.second.first;
325 auto Index = Replacement.second.second;
326 Builder.SetInsertPoint(Extract);
327 Extract->replaceAllUsesWith(Builder.CreateExtractElement(Vector, Index));
328 Extract->eraseFromParent();
329 }
330
331 return true;
332 }
333
lowerInterleavedStore(StoreInst * SI,SmallVector<Instruction *,32> & DeadInsts)334 bool InterleavedAccess::lowerInterleavedStore(
335 StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts) {
336 if (!SI->isSimple())
337 return false;
338
339 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
340 if (!SVI || !SVI->hasOneUse())
341 return false;
342
343 // Check if the shufflevector is RE-interleave shuffle.
344 unsigned Factor;
345 if (!isReInterleaveMask(SVI->getShuffleMask(), Factor))
346 return false;
347
348 DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
349
350 // Try to create target specific intrinsics to replace the store and shuffle.
351 if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
352 return false;
353
354 // Already have a new target specific interleaved store. Erase the old store.
355 DeadInsts.push_back(SI);
356 DeadInsts.push_back(SVI);
357 return true;
358 }
359
runOnFunction(Function & F)360 bool InterleavedAccess::runOnFunction(Function &F) {
361 if (!TM || !LowerInterleavedAccesses)
362 return false;
363
364 DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
365
366 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
367 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
368 MaxFactor = TLI->getMaxSupportedInterleaveFactor();
369
370 // Holds dead instructions that will be erased later.
371 SmallVector<Instruction *, 32> DeadInsts;
372 bool Changed = false;
373
374 for (auto &I : instructions(F)) {
375 if (LoadInst *LI = dyn_cast<LoadInst>(&I))
376 Changed |= lowerInterleavedLoad(LI, DeadInsts);
377
378 if (StoreInst *SI = dyn_cast<StoreInst>(&I))
379 Changed |= lowerInterleavedStore(SI, DeadInsts);
380 }
381
382 for (auto I : DeadInsts)
383 I->eraseFromParent();
384
385 return Changed;
386 }
387