1 //===- ARMParallelDSP.cpp - Parallel DSP Pass -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv6 introduced instructions to perform 32-bit SIMD operations. The
11 /// purpose of this pass is do some IR pattern matching to create ACLE
12 /// DSP intrinsics, which map on these 32-bit SIMD operations.
13 /// This pass runs only when unaligned accesses is supported/enabled.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "ARM.h"
18 #include "ARMSubtarget.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/LoopAccessAnalysis.h"
23 #include "llvm/Analysis/OrderedBasicBlock.h"
24 #include "llvm/CodeGen/TargetPassConfig.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicsARM.h"
27 #include "llvm/IR/NoFolder.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/PassRegistry.h"
31 #include "llvm/PassSupport.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Transforms/Scalar.h"
34 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35
36 using namespace llvm;
37 using namespace PatternMatch;
38
39 #define DEBUG_TYPE "arm-parallel-dsp"
40
41 STATISTIC(NumSMLAD , "Number of smlad instructions generated");
42
43 static cl::opt<bool>
44 DisableParallelDSP("disable-arm-parallel-dsp", cl::Hidden, cl::init(false),
45 cl::desc("Disable the ARM Parallel DSP pass"));
46
47 static cl::opt<unsigned>
48 NumLoadLimit("arm-parallel-dsp-load-limit", cl::Hidden, cl::init(16),
49 cl::desc("Limit the number of loads analysed"));
50
51 namespace {
52 struct MulCandidate;
53 class Reduction;
54
55 using MulCandList = SmallVector<std::unique_ptr<MulCandidate>, 8>;
56 using MemInstList = SmallVectorImpl<LoadInst*>;
57 using MulPairList = SmallVector<std::pair<MulCandidate*, MulCandidate*>, 8>;
58
59 // 'MulCandidate' holds the multiplication instructions that are candidates
60 // for parallel execution.
61 struct MulCandidate {
62 Instruction *Root;
63 Value* LHS;
64 Value* RHS;
65 bool Exchange = false;
66 bool ReadOnly = true;
67 bool Paired = false;
68 SmallVector<LoadInst*, 2> VecLd; // Container for loads to widen.
69
MulCandidate__anon1db754740111::MulCandidate70 MulCandidate(Instruction *I, Value *lhs, Value *rhs) :
71 Root(I), LHS(lhs), RHS(rhs) { }
72
HasTwoLoadInputs__anon1db754740111::MulCandidate73 bool HasTwoLoadInputs() const {
74 return isa<LoadInst>(LHS) && isa<LoadInst>(RHS);
75 }
76
getBaseLoad__anon1db754740111::MulCandidate77 LoadInst *getBaseLoad() const {
78 return VecLd.front();
79 }
80 };
81
82 /// Represent a sequence of multiply-accumulate operations with the aim to
83 /// perform the multiplications in parallel.
84 class Reduction {
85 Instruction *Root = nullptr;
86 Value *Acc = nullptr;
87 MulCandList Muls;
88 MulPairList MulPairs;
89 SetVector<Instruction*> Adds;
90
91 public:
92 Reduction() = delete;
93
Reduction(Instruction * Add)94 Reduction (Instruction *Add) : Root(Add) { }
95
96 /// Record an Add instruction that is a part of the this reduction.
InsertAdd(Instruction * I)97 void InsertAdd(Instruction *I) { Adds.insert(I); }
98
99 /// Create MulCandidates, each rooted at a Mul instruction, that is a part
100 /// of this reduction.
InsertMuls()101 void InsertMuls() {
102 auto GetMulOperand = [](Value *V) -> Instruction* {
103 if (auto *SExt = dyn_cast<SExtInst>(V)) {
104 if (auto *I = dyn_cast<Instruction>(SExt->getOperand(0)))
105 if (I->getOpcode() == Instruction::Mul)
106 return I;
107 } else if (auto *I = dyn_cast<Instruction>(V)) {
108 if (I->getOpcode() == Instruction::Mul)
109 return I;
110 }
111 return nullptr;
112 };
113
114 auto InsertMul = [this](Instruction *I) {
115 Value *LHS = cast<Instruction>(I->getOperand(0))->getOperand(0);
116 Value *RHS = cast<Instruction>(I->getOperand(1))->getOperand(0);
117 Muls.push_back(std::make_unique<MulCandidate>(I, LHS, RHS));
118 };
119
120 for (auto *Add : Adds) {
121 if (Add == Acc)
122 continue;
123 if (auto *Mul = GetMulOperand(Add->getOperand(0)))
124 InsertMul(Mul);
125 if (auto *Mul = GetMulOperand(Add->getOperand(1)))
126 InsertMul(Mul);
127 }
128 }
129
130 /// Add the incoming accumulator value, returns true if a value had not
131 /// already been added. Returning false signals to the user that this
132 /// reduction already has a value to initialise the accumulator.
InsertAcc(Value * V)133 bool InsertAcc(Value *V) {
134 if (Acc)
135 return false;
136 Acc = V;
137 return true;
138 }
139
140 /// Set two MulCandidates, rooted at muls, that can be executed as a single
141 /// parallel operation.
AddMulPair(MulCandidate * Mul0,MulCandidate * Mul1,bool Exchange=false)142 void AddMulPair(MulCandidate *Mul0, MulCandidate *Mul1,
143 bool Exchange = false) {
144 LLVM_DEBUG(dbgs() << "Pairing:\n"
145 << *Mul0->Root << "\n"
146 << *Mul1->Root << "\n");
147 Mul0->Paired = true;
148 Mul1->Paired = true;
149 if (Exchange)
150 Mul1->Exchange = true;
151 MulPairs.push_back(std::make_pair(Mul0, Mul1));
152 }
153
154 /// Return true if enough mul operations are found that can be executed in
155 /// parallel.
156 bool CreateParallelPairs();
157
158 /// Return the add instruction which is the root of the reduction.
getRoot()159 Instruction *getRoot() { return Root; }
160
is64Bit() const161 bool is64Bit() const { return Root->getType()->isIntegerTy(64); }
162
getType() const163 Type *getType() const { return Root->getType(); }
164
165 /// Return the incoming value to be accumulated. This maybe null.
getAccumulator()166 Value *getAccumulator() { return Acc; }
167
168 /// Return the set of adds that comprise the reduction.
getAdds()169 SetVector<Instruction*> &getAdds() { return Adds; }
170
171 /// Return the MulCandidate, rooted at mul instruction, that comprise the
172 /// the reduction.
getMuls()173 MulCandList &getMuls() { return Muls; }
174
175 /// Return the MulCandidate, rooted at mul instructions, that have been
176 /// paired for parallel execution.
getMulPairs()177 MulPairList &getMulPairs() { return MulPairs; }
178
179 /// To finalise, replace the uses of the root with the intrinsic call.
UpdateRoot(Instruction * SMLAD)180 void UpdateRoot(Instruction *SMLAD) {
181 Root->replaceAllUsesWith(SMLAD);
182 }
183
dump()184 void dump() {
185 LLVM_DEBUG(dbgs() << "Reduction:\n";
186 for (auto *Add : Adds)
187 LLVM_DEBUG(dbgs() << *Add << "\n");
188 for (auto &Mul : Muls)
189 LLVM_DEBUG(dbgs() << *Mul->Root << "\n"
190 << " " << *Mul->LHS << "\n"
191 << " " << *Mul->RHS << "\n");
192 LLVM_DEBUG(if (Acc) dbgs() << "Acc in: " << *Acc << "\n")
193 );
194 }
195 };
196
197 class WidenedLoad {
198 LoadInst *NewLd = nullptr;
199 SmallVector<LoadInst*, 4> Loads;
200
201 public:
WidenedLoad(SmallVectorImpl<LoadInst * > & Lds,LoadInst * Wide)202 WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide)
203 : NewLd(Wide) {
204 for (auto *I : Lds)
205 Loads.push_back(I);
206 }
getLoad()207 LoadInst *getLoad() {
208 return NewLd;
209 }
210 };
211
212 class ARMParallelDSP : public FunctionPass {
213 ScalarEvolution *SE;
214 AliasAnalysis *AA;
215 TargetLibraryInfo *TLI;
216 DominatorTree *DT;
217 const DataLayout *DL;
218 Module *M;
219 std::map<LoadInst*, LoadInst*> LoadPairs;
220 SmallPtrSet<LoadInst*, 4> OffsetLoads;
221 std::map<LoadInst*, std::unique_ptr<WidenedLoad>> WideLoads;
222
223 template<unsigned>
224 bool IsNarrowSequence(Value *V);
225 bool Search(Value *V, BasicBlock *BB, Reduction &R);
226 bool RecordMemoryOps(BasicBlock *BB);
227 void InsertParallelMACs(Reduction &Reduction);
228 bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
229 LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy);
230 bool CreateParallelPairs(Reduction &R);
231
232 /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
233 /// Dual performs two signed 16x16-bit multiplications. It adds the
234 /// products to a 32-bit accumulate operand. Optionally, the instruction can
235 /// exchange the halfwords of the second operand before performing the
236 /// arithmetic.
237 bool MatchSMLAD(Function &F);
238
239 public:
240 static char ID;
241
ARMParallelDSP()242 ARMParallelDSP() : FunctionPass(ID) { }
243
getAnalysisUsage(AnalysisUsage & AU) const244 void getAnalysisUsage(AnalysisUsage &AU) const override {
245 FunctionPass::getAnalysisUsage(AU);
246 AU.addRequired<AssumptionCacheTracker>();
247 AU.addRequired<ScalarEvolutionWrapperPass>();
248 AU.addRequired<AAResultsWrapperPass>();
249 AU.addRequired<TargetLibraryInfoWrapperPass>();
250 AU.addRequired<DominatorTreeWrapperPass>();
251 AU.addRequired<TargetPassConfig>();
252 AU.addPreserved<ScalarEvolutionWrapperPass>();
253 AU.addPreserved<GlobalsAAWrapperPass>();
254 AU.setPreservesCFG();
255 }
256
runOnFunction(Function & F)257 bool runOnFunction(Function &F) override {
258 if (DisableParallelDSP)
259 return false;
260 if (skipFunction(F))
261 return false;
262
263 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
264 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
265 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
266 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
267 auto &TPC = getAnalysis<TargetPassConfig>();
268
269 M = F.getParent();
270 DL = &M->getDataLayout();
271
272 auto &TM = TPC.getTM<TargetMachine>();
273 auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
274
275 if (!ST->allowsUnalignedMem()) {
276 LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
277 "running pass ARMParallelDSP\n");
278 return false;
279 }
280
281 if (!ST->hasDSP()) {
282 LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
283 "ARMParallelDSP\n");
284 return false;
285 }
286
287 if (!ST->isLittle()) {
288 LLVM_DEBUG(dbgs() << "Only supporting little endian: not running pass "
289 << "ARMParallelDSP\n");
290 return false;
291 }
292
293 LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
294 LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
295
296 bool Changes = MatchSMLAD(F);
297 return Changes;
298 }
299 };
300 }
301
302 template<typename MemInst>
AreSequentialAccesses(MemInst * MemOp0,MemInst * MemOp1,const DataLayout & DL,ScalarEvolution & SE)303 static bool AreSequentialAccesses(MemInst *MemOp0, MemInst *MemOp1,
304 const DataLayout &DL, ScalarEvolution &SE) {
305 if (isConsecutiveAccess(MemOp0, MemOp1, DL, SE))
306 return true;
307 return false;
308 }
309
AreSequentialLoads(LoadInst * Ld0,LoadInst * Ld1,MemInstList & VecMem)310 bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
311 MemInstList &VecMem) {
312 if (!Ld0 || !Ld1)
313 return false;
314
315 if (!LoadPairs.count(Ld0) || LoadPairs[Ld0] != Ld1)
316 return false;
317
318 LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
319 dbgs() << "Ld0:"; Ld0->dump();
320 dbgs() << "Ld1:"; Ld1->dump();
321 );
322
323 VecMem.clear();
324 VecMem.push_back(Ld0);
325 VecMem.push_back(Ld1);
326 return true;
327 }
328
329 // MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
330 // instructions, which is set to 16. So here we should collect all i8 and i16
331 // narrow operations.
332 // TODO: we currently only collect i16, and will support i8 later, so that's
333 // why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
334 template<unsigned MaxBitWidth>
IsNarrowSequence(Value * V)335 bool ARMParallelDSP::IsNarrowSequence(Value *V) {
336 if (auto *SExt = dyn_cast<SExtInst>(V)) {
337 if (SExt->getSrcTy()->getIntegerBitWidth() != MaxBitWidth)
338 return false;
339
340 if (auto *Ld = dyn_cast<LoadInst>(SExt->getOperand(0))) {
341 // Check that this load could be paired.
342 return LoadPairs.count(Ld) || OffsetLoads.count(Ld);
343 }
344 }
345 return false;
346 }
347
348 /// Iterate through the block and record base, offset pairs of loads which can
349 /// be widened into a single load.
RecordMemoryOps(BasicBlock * BB)350 bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
351 SmallVector<LoadInst*, 8> Loads;
352 SmallVector<Instruction*, 8> Writes;
353 LoadPairs.clear();
354 WideLoads.clear();
355 OrderedBasicBlock OrderedBB(BB);
356
357 // Collect loads and instruction that may write to memory. For now we only
358 // record loads which are simple, sign-extended and have a single user.
359 // TODO: Allow zero-extended loads.
360 for (auto &I : *BB) {
361 if (I.mayWriteToMemory())
362 Writes.push_back(&I);
363 auto *Ld = dyn_cast<LoadInst>(&I);
364 if (!Ld || !Ld->isSimple() ||
365 !Ld->hasOneUse() || !isa<SExtInst>(Ld->user_back()))
366 continue;
367 Loads.push_back(Ld);
368 }
369
370 if (Loads.empty() || Loads.size() > NumLoadLimit)
371 return false;
372
373 using InstSet = std::set<Instruction*>;
374 using DepMap = std::map<Instruction*, InstSet>;
375 DepMap RAWDeps;
376
377 // Record any writes that may alias a load.
378 const auto Size = LocationSize::unknown();
379 for (auto Write : Writes) {
380 for (auto Read : Loads) {
381 MemoryLocation ReadLoc =
382 MemoryLocation(Read->getPointerOperand(), Size);
383
384 if (!isModOrRefSet(intersectModRef(AA->getModRefInfo(Write, ReadLoc),
385 ModRefInfo::ModRef)))
386 continue;
387 if (OrderedBB.dominates(Write, Read))
388 RAWDeps[Read].insert(Write);
389 }
390 }
391
392 // Check whether there's not a write between the two loads which would
393 // prevent them from being safely merged.
394 auto SafeToPair = [&](LoadInst *Base, LoadInst *Offset) {
395 LoadInst *Dominator = OrderedBB.dominates(Base, Offset) ? Base : Offset;
396 LoadInst *Dominated = OrderedBB.dominates(Base, Offset) ? Offset : Base;
397
398 if (RAWDeps.count(Dominated)) {
399 InstSet &WritesBefore = RAWDeps[Dominated];
400
401 for (auto Before : WritesBefore) {
402 // We can't move the second load backward, past a write, to merge
403 // with the first load.
404 if (OrderedBB.dominates(Dominator, Before))
405 return false;
406 }
407 }
408 return true;
409 };
410
411 // Record base, offset load pairs.
412 for (auto *Base : Loads) {
413 for (auto *Offset : Loads) {
414 if (Base == Offset || OffsetLoads.count(Offset))
415 continue;
416
417 if (AreSequentialAccesses<LoadInst>(Base, Offset, *DL, *SE) &&
418 SafeToPair(Base, Offset)) {
419 LoadPairs[Base] = Offset;
420 OffsetLoads.insert(Offset);
421 break;
422 }
423 }
424 }
425
426 LLVM_DEBUG(if (!LoadPairs.empty()) {
427 dbgs() << "Consecutive load pairs:\n";
428 for (auto &MapIt : LoadPairs) {
429 LLVM_DEBUG(dbgs() << *MapIt.first << ", "
430 << *MapIt.second << "\n");
431 }
432 });
433 return LoadPairs.size() > 1;
434 }
435
436 // Search recursively back through the operands to find a tree of values that
437 // form a multiply-accumulate chain. The search records the Add and Mul
438 // instructions that form the reduction and allows us to find a single value
439 // to be used as the initial input to the accumlator.
Search(Value * V,BasicBlock * BB,Reduction & R)440 bool ARMParallelDSP::Search(Value *V, BasicBlock *BB, Reduction &R) {
441 // If we find a non-instruction, try to use it as the initial accumulator
442 // value. This may have already been found during the search in which case
443 // this function will return false, signaling a search fail.
444 auto *I = dyn_cast<Instruction>(V);
445 if (!I)
446 return R.InsertAcc(V);
447
448 if (I->getParent() != BB)
449 return false;
450
451 switch (I->getOpcode()) {
452 default:
453 break;
454 case Instruction::PHI:
455 // Could be the accumulator value.
456 return R.InsertAcc(V);
457 case Instruction::Add: {
458 // Adds should be adding together two muls, or another add and a mul to
459 // be within the mac chain. One of the operands may also be the
460 // accumulator value at which point we should stop searching.
461 R.InsertAdd(I);
462 Value *LHS = I->getOperand(0);
463 Value *RHS = I->getOperand(1);
464 bool ValidLHS = Search(LHS, BB, R);
465 bool ValidRHS = Search(RHS, BB, R);
466
467 if (ValidLHS && ValidRHS)
468 return true;
469
470 return R.InsertAcc(I);
471 }
472 case Instruction::Mul: {
473 Value *MulOp0 = I->getOperand(0);
474 Value *MulOp1 = I->getOperand(1);
475 return IsNarrowSequence<16>(MulOp0) && IsNarrowSequence<16>(MulOp1);
476 }
477 case Instruction::SExt:
478 return Search(I->getOperand(0), BB, R);
479 }
480 return false;
481 }
482
483 // The pass needs to identify integer add/sub reductions of 16-bit vector
484 // multiplications.
485 // To use SMLAD:
486 // 1) we first need to find integer add then look for this pattern:
487 //
488 // acc0 = ...
489 // ld0 = load i16
490 // sext0 = sext i16 %ld0 to i32
491 // ld1 = load i16
492 // sext1 = sext i16 %ld1 to i32
493 // mul0 = mul %sext0, %sext1
494 // ld2 = load i16
495 // sext2 = sext i16 %ld2 to i32
496 // ld3 = load i16
497 // sext3 = sext i16 %ld3 to i32
498 // mul1 = mul i32 %sext2, %sext3
499 // add0 = add i32 %mul0, %acc0
500 // acc1 = add i32 %add0, %mul1
501 //
502 // Which can be selected to:
503 //
504 // ldr r0
505 // ldr r1
506 // smlad r2, r0, r1, r2
507 //
508 // If constants are used instead of loads, these will need to be hoisted
509 // out and into a register.
510 //
511 // If loop invariants are used instead of loads, these need to be packed
512 // before the loop begins.
513 //
MatchSMLAD(Function & F)514 bool ARMParallelDSP::MatchSMLAD(Function &F) {
515 bool Changed = false;
516
517 for (auto &BB : F) {
518 SmallPtrSet<Instruction*, 4> AllAdds;
519 if (!RecordMemoryOps(&BB))
520 continue;
521
522 for (Instruction &I : reverse(BB)) {
523 if (I.getOpcode() != Instruction::Add)
524 continue;
525
526 if (AllAdds.count(&I))
527 continue;
528
529 const auto *Ty = I.getType();
530 if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
531 continue;
532
533 Reduction R(&I);
534 if (!Search(&I, &BB, R))
535 continue;
536
537 R.InsertMuls();
538 LLVM_DEBUG(dbgs() << "After search, Reduction:\n"; R.dump());
539
540 if (!CreateParallelPairs(R))
541 continue;
542
543 InsertParallelMACs(R);
544 Changed = true;
545 AllAdds.insert(R.getAdds().begin(), R.getAdds().end());
546 }
547 }
548
549 return Changed;
550 }
551
CreateParallelPairs(Reduction & R)552 bool ARMParallelDSP::CreateParallelPairs(Reduction &R) {
553
554 // Not enough mul operations to make a pair.
555 if (R.getMuls().size() < 2)
556 return false;
557
558 // Check that the muls operate directly upon sign extended loads.
559 for (auto &MulCand : R.getMuls()) {
560 if (!MulCand->HasTwoLoadInputs())
561 return false;
562 }
563
564 auto CanPair = [&](Reduction &R, MulCandidate *PMul0, MulCandidate *PMul1) {
565 // The first elements of each vector should be loads with sexts. If we
566 // find that its two pairs of consecutive loads, then these can be
567 // transformed into two wider loads and the users can be replaced with
568 // DSP intrinsics.
569 auto Ld0 = static_cast<LoadInst*>(PMul0->LHS);
570 auto Ld1 = static_cast<LoadInst*>(PMul1->LHS);
571 auto Ld2 = static_cast<LoadInst*>(PMul0->RHS);
572 auto Ld3 = static_cast<LoadInst*>(PMul1->RHS);
573
574 if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
575 if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
576 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
577 R.AddMulPair(PMul0, PMul1);
578 return true;
579 } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
580 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
581 LLVM_DEBUG(dbgs() << " exchanging Ld2 and Ld3\n");
582 R.AddMulPair(PMul0, PMul1, true);
583 return true;
584 }
585 } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd) &&
586 AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
587 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
588 LLVM_DEBUG(dbgs() << " exchanging Ld0 and Ld1\n");
589 LLVM_DEBUG(dbgs() << " and swapping muls\n");
590 // Only the second operand can be exchanged, so swap the muls.
591 R.AddMulPair(PMul1, PMul0, true);
592 return true;
593 }
594 return false;
595 };
596
597 MulCandList &Muls = R.getMuls();
598 const unsigned Elems = Muls.size();
599 for (unsigned i = 0; i < Elems; ++i) {
600 MulCandidate *PMul0 = static_cast<MulCandidate*>(Muls[i].get());
601 if (PMul0->Paired)
602 continue;
603
604 for (unsigned j = 0; j < Elems; ++j) {
605 if (i == j)
606 continue;
607
608 MulCandidate *PMul1 = static_cast<MulCandidate*>(Muls[j].get());
609 if (PMul1->Paired)
610 continue;
611
612 const Instruction *Mul0 = PMul0->Root;
613 const Instruction *Mul1 = PMul1->Root;
614 if (Mul0 == Mul1)
615 continue;
616
617 assert(PMul0 != PMul1 && "expected different chains");
618
619 if (CanPair(R, PMul0, PMul1))
620 break;
621 }
622 }
623 return !R.getMulPairs().empty();
624 }
625
InsertParallelMACs(Reduction & R)626 void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
627
628 auto CreateSMLAD = [&](LoadInst* WideLd0, LoadInst *WideLd1,
629 Value *Acc, bool Exchange,
630 Instruction *InsertAfter) {
631 // Replace the reduction chain with an intrinsic call
632
633 Value* Args[] = { WideLd0, WideLd1, Acc };
634 Function *SMLAD = nullptr;
635 if (Exchange)
636 SMLAD = Acc->getType()->isIntegerTy(32) ?
637 Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
638 Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
639 else
640 SMLAD = Acc->getType()->isIntegerTy(32) ?
641 Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
642 Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
643
644 IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
645 BasicBlock::iterator(InsertAfter));
646 Instruction *Call = Builder.CreateCall(SMLAD, Args);
647 NumSMLAD++;
648 return Call;
649 };
650
651 // Return the instruction after the dominated instruction.
652 auto GetInsertPoint = [this](Value *A, Value *B) {
653 assert((isa<Instruction>(A) || isa<Instruction>(B)) &&
654 "expected at least one instruction");
655
656 Value *V = nullptr;
657 if (!isa<Instruction>(A))
658 V = B;
659 else if (!isa<Instruction>(B))
660 V = A;
661 else
662 V = DT->dominates(cast<Instruction>(A), cast<Instruction>(B)) ? B : A;
663
664 return &*++BasicBlock::iterator(cast<Instruction>(V));
665 };
666
667 Value *Acc = R.getAccumulator();
668
669 // For any muls that were discovered but not paired, accumulate their values
670 // as before.
671 IRBuilder<NoFolder> Builder(R.getRoot()->getParent());
672 MulCandList &MulCands = R.getMuls();
673 for (auto &MulCand : MulCands) {
674 if (MulCand->Paired)
675 continue;
676
677 Instruction *Mul = cast<Instruction>(MulCand->Root);
678 LLVM_DEBUG(dbgs() << "Accumulating unpaired mul: " << *Mul << "\n");
679
680 if (R.getType() != Mul->getType()) {
681 assert(R.is64Bit() && "expected 64-bit result");
682 Builder.SetInsertPoint(&*++BasicBlock::iterator(Mul));
683 Mul = cast<Instruction>(Builder.CreateSExt(Mul, R.getRoot()->getType()));
684 }
685
686 if (!Acc) {
687 Acc = Mul;
688 continue;
689 }
690
691 // If Acc is the original incoming value to the reduction, it could be a
692 // phi. But the phi will dominate Mul, meaning that Mul will be the
693 // insertion point.
694 Builder.SetInsertPoint(GetInsertPoint(Mul, Acc));
695 Acc = Builder.CreateAdd(Mul, Acc);
696 }
697
698 if (!Acc) {
699 Acc = R.is64Bit() ?
700 ConstantInt::get(IntegerType::get(M->getContext(), 64), 0) :
701 ConstantInt::get(IntegerType::get(M->getContext(), 32), 0);
702 } else if (Acc->getType() != R.getType()) {
703 Builder.SetInsertPoint(R.getRoot());
704 Acc = Builder.CreateSExt(Acc, R.getType());
705 }
706
707 // Roughly sort the mul pairs in their program order.
708 OrderedBasicBlock OrderedBB(R.getRoot()->getParent());
709 llvm::sort(R.getMulPairs(), [&OrderedBB](auto &PairA, auto &PairB) {
710 const Instruction *A = PairA.first->Root;
711 const Instruction *B = PairB.first->Root;
712 return OrderedBB.dominates(A, B);
713 });
714
715 IntegerType *Ty = IntegerType::get(M->getContext(), 32);
716 for (auto &Pair : R.getMulPairs()) {
717 MulCandidate *LHSMul = Pair.first;
718 MulCandidate *RHSMul = Pair.second;
719 LoadInst *BaseLHS = LHSMul->getBaseLoad();
720 LoadInst *BaseRHS = RHSMul->getBaseLoad();
721 LoadInst *WideLHS = WideLoads.count(BaseLHS) ?
722 WideLoads[BaseLHS]->getLoad() : CreateWideLoad(LHSMul->VecLd, Ty);
723 LoadInst *WideRHS = WideLoads.count(BaseRHS) ?
724 WideLoads[BaseRHS]->getLoad() : CreateWideLoad(RHSMul->VecLd, Ty);
725
726 Instruction *InsertAfter = GetInsertPoint(WideLHS, WideRHS);
727 InsertAfter = GetInsertPoint(InsertAfter, Acc);
728 Acc = CreateSMLAD(WideLHS, WideRHS, Acc, RHSMul->Exchange, InsertAfter);
729 }
730 R.UpdateRoot(cast<Instruction>(Acc));
731 }
732
CreateWideLoad(MemInstList & Loads,IntegerType * LoadTy)733 LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
734 IntegerType *LoadTy) {
735 assert(Loads.size() == 2 && "currently only support widening two loads");
736
737 LoadInst *Base = Loads[0];
738 LoadInst *Offset = Loads[1];
739
740 Instruction *BaseSExt = dyn_cast<SExtInst>(Base->user_back());
741 Instruction *OffsetSExt = dyn_cast<SExtInst>(Offset->user_back());
742
743 assert((BaseSExt && OffsetSExt)
744 && "Loads should have a single, extending, user");
745
746 std::function<void(Value*, Value*)> MoveBefore =
747 [&](Value *A, Value *B) -> void {
748 if (!isa<Instruction>(A) || !isa<Instruction>(B))
749 return;
750
751 auto *Source = cast<Instruction>(A);
752 auto *Sink = cast<Instruction>(B);
753
754 if (DT->dominates(Source, Sink) ||
755 Source->getParent() != Sink->getParent() ||
756 isa<PHINode>(Source) || isa<PHINode>(Sink))
757 return;
758
759 Source->moveBefore(Sink);
760 for (auto &Op : Source->operands())
761 MoveBefore(Op, Source);
762 };
763
764 // Insert the load at the point of the original dominating load.
765 LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
766 IRBuilder<NoFolder> IRB(DomLoad->getParent(),
767 ++BasicBlock::iterator(DomLoad));
768
769 // Bitcast the pointer to a wider type and create the wide load, while making
770 // sure to maintain the original alignment as this prevents ldrd from being
771 // generated when it could be illegal due to memory alignment.
772 const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
773 Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
774 LoadTy->getPointerTo(AddrSpace));
775 LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
776 Base->getAlignment());
777
778 // Make sure everything is in the correct order in the basic block.
779 MoveBefore(Base->getPointerOperand(), VecPtr);
780 MoveBefore(VecPtr, WideLoad);
781
782 // From the wide load, create two values that equal the original two loads.
783 // Loads[0] needs trunc while Loads[1] needs a lshr and trunc.
784 // TODO: Support big-endian as well.
785 Value *Bottom = IRB.CreateTrunc(WideLoad, Base->getType());
786 Value *NewBaseSExt = IRB.CreateSExt(Bottom, BaseSExt->getType());
787 BaseSExt->replaceAllUsesWith(NewBaseSExt);
788
789 IntegerType *OffsetTy = cast<IntegerType>(Offset->getType());
790 Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth());
791 Value *Top = IRB.CreateLShr(WideLoad, ShiftVal);
792 Value *Trunc = IRB.CreateTrunc(Top, OffsetTy);
793 Value *NewOffsetSExt = IRB.CreateSExt(Trunc, OffsetSExt->getType());
794 OffsetSExt->replaceAllUsesWith(NewOffsetSExt);
795
796 LLVM_DEBUG(dbgs() << "From Base and Offset:\n"
797 << *Base << "\n" << *Offset << "\n"
798 << "Created Wide Load:\n"
799 << *WideLoad << "\n"
800 << *Bottom << "\n"
801 << *NewBaseSExt << "\n"
802 << *Top << "\n"
803 << *Trunc << "\n"
804 << *NewOffsetSExt << "\n");
805 WideLoads.emplace(std::make_pair(Base,
806 std::make_unique<WidenedLoad>(Loads, WideLoad)));
807 return WideLoad;
808 }
809
createARMParallelDSPPass()810 Pass *llvm::createARMParallelDSPPass() {
811 return new ARMParallelDSP();
812 }
813
814 char ARMParallelDSP::ID = 0;
815
816 INITIALIZE_PASS_BEGIN(ARMParallelDSP, "arm-parallel-dsp",
817 "Transform functions to use DSP intrinsics", false, false)
818 INITIALIZE_PASS_END(ARMParallelDSP, "arm-parallel-dsp",
819 "Transform functions to use DSP intrinsics", false, false)
820