• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines vectorizer utilities.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/Analysis/VectorUtils.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/IR/Value.h"
25 #include "llvm/IR/Constants.h"
26 
27 using namespace llvm;
28 using namespace llvm::PatternMatch;
29 
30 /// \brief Identify if the intrinsic is trivially vectorizable.
31 /// This method returns true if the intrinsic's argument types are all
32 /// scalars for the scalar form of the intrinsic and all vectors for
33 /// the vector form of the intrinsic.
isTriviallyVectorizable(Intrinsic::ID ID)34 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
35   switch (ID) {
36   case Intrinsic::sqrt:
37   case Intrinsic::sin:
38   case Intrinsic::cos:
39   case Intrinsic::exp:
40   case Intrinsic::exp2:
41   case Intrinsic::log:
42   case Intrinsic::log10:
43   case Intrinsic::log2:
44   case Intrinsic::fabs:
45   case Intrinsic::minnum:
46   case Intrinsic::maxnum:
47   case Intrinsic::copysign:
48   case Intrinsic::floor:
49   case Intrinsic::ceil:
50   case Intrinsic::trunc:
51   case Intrinsic::rint:
52   case Intrinsic::nearbyint:
53   case Intrinsic::round:
54   case Intrinsic::bswap:
55   case Intrinsic::bitreverse:
56   case Intrinsic::ctpop:
57   case Intrinsic::pow:
58   case Intrinsic::fma:
59   case Intrinsic::fmuladd:
60   case Intrinsic::ctlz:
61   case Intrinsic::cttz:
62   case Intrinsic::powi:
63     return true;
64   default:
65     return false;
66   }
67 }
68 
69 /// \brief Identifies if the intrinsic has a scalar operand. It check for
70 /// ctlz,cttz and powi special intrinsics whose argument is scalar.
hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,unsigned ScalarOpdIdx)71 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
72                                         unsigned ScalarOpdIdx) {
73   switch (ID) {
74   case Intrinsic::ctlz:
75   case Intrinsic::cttz:
76   case Intrinsic::powi:
77     return (ScalarOpdIdx == 1);
78   default:
79     return false;
80   }
81 }
82 
83 /// \brief Returns intrinsic ID for call.
84 /// For the input call instruction it finds mapping intrinsic and returns
85 /// its ID, in case it does not found it return not_intrinsic.
getVectorIntrinsicIDForCall(const CallInst * CI,const TargetLibraryInfo * TLI)86 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
87                                                 const TargetLibraryInfo *TLI) {
88   Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI);
89   if (ID == Intrinsic::not_intrinsic)
90     return Intrinsic::not_intrinsic;
91 
92   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
93       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume)
94     return ID;
95   return Intrinsic::not_intrinsic;
96 }
97 
98 /// \brief Find the operand of the GEP that should be checked for consecutive
99 /// stores. This ignores trailing indices that have no effect on the final
100 /// pointer.
getGEPInductionOperand(const GetElementPtrInst * Gep)101 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
102   const DataLayout &DL = Gep->getModule()->getDataLayout();
103   unsigned LastOperand = Gep->getNumOperands() - 1;
104   unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
105 
106   // Walk backwards and try to peel off zeros.
107   while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
108     // Find the type we're currently indexing into.
109     gep_type_iterator GEPTI = gep_type_begin(Gep);
110     std::advance(GEPTI, LastOperand - 1);
111 
112     // If it's a type with the same allocation size as the result of the GEP we
113     // can peel off the zero index.
114     if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
115       break;
116     --LastOperand;
117   }
118 
119   return LastOperand;
120 }
121 
122 /// \brief If the argument is a GEP, then returns the operand identified by
123 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
124 /// operand, it returns that instead.
stripGetElementPtr(Value * Ptr,ScalarEvolution * SE,Loop * Lp)125 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
126   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
127   if (!GEP)
128     return Ptr;
129 
130   unsigned InductionOperand = getGEPInductionOperand(GEP);
131 
132   // Check that all of the gep indices are uniform except for our induction
133   // operand.
134   for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
135     if (i != InductionOperand &&
136         !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
137       return Ptr;
138   return GEP->getOperand(InductionOperand);
139 }
140 
141 /// \brief If a value has only one user that is a CastInst, return it.
getUniqueCastUse(Value * Ptr,Loop * Lp,Type * Ty)142 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
143   Value *UniqueCast = nullptr;
144   for (User *U : Ptr->users()) {
145     CastInst *CI = dyn_cast<CastInst>(U);
146     if (CI && CI->getType() == Ty) {
147       if (!UniqueCast)
148         UniqueCast = CI;
149       else
150         return nullptr;
151     }
152   }
153   return UniqueCast;
154 }
155 
156 /// \brief Get the stride of a pointer access in a loop. Looks for symbolic
157 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
getStrideFromPointer(Value * Ptr,ScalarEvolution * SE,Loop * Lp)158 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
159   auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
160   if (!PtrTy || PtrTy->isAggregateType())
161     return nullptr;
162 
163   // Try to remove a gep instruction to make the pointer (actually index at this
164   // point) easier analyzable. If OrigPtr is equal to Ptr we are analzying the
165   // pointer, otherwise, we are analyzing the index.
166   Value *OrigPtr = Ptr;
167 
168   // The size of the pointer access.
169   int64_t PtrAccessSize = 1;
170 
171   Ptr = stripGetElementPtr(Ptr, SE, Lp);
172   const SCEV *V = SE->getSCEV(Ptr);
173 
174   if (Ptr != OrigPtr)
175     // Strip off casts.
176     while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
177       V = C->getOperand();
178 
179   const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
180   if (!S)
181     return nullptr;
182 
183   V = S->getStepRecurrence(*SE);
184   if (!V)
185     return nullptr;
186 
187   // Strip off the size of access multiplication if we are still analyzing the
188   // pointer.
189   if (OrigPtr == Ptr) {
190     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
191       if (M->getOperand(0)->getSCEVType() != scConstant)
192         return nullptr;
193 
194       const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
195 
196       // Huge step value - give up.
197       if (APStepVal.getBitWidth() > 64)
198         return nullptr;
199 
200       int64_t StepVal = APStepVal.getSExtValue();
201       if (PtrAccessSize != StepVal)
202         return nullptr;
203       V = M->getOperand(1);
204     }
205   }
206 
207   // Strip off casts.
208   Type *StripedOffRecurrenceCast = nullptr;
209   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
210     StripedOffRecurrenceCast = C->getType();
211     V = C->getOperand();
212   }
213 
214   // Look for the loop invariant symbolic value.
215   const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
216   if (!U)
217     return nullptr;
218 
219   Value *Stride = U->getValue();
220   if (!Lp->isLoopInvariant(Stride))
221     return nullptr;
222 
223   // If we have stripped off the recurrence cast we have to make sure that we
224   // return the value that is used in this loop so that we can replace it later.
225   if (StripedOffRecurrenceCast)
226     Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
227 
228   return Stride;
229 }
230 
231 /// \brief Given a vector and an element number, see if the scalar value is
232 /// already around as a register, for example if it were inserted then extracted
233 /// from the vector.
findScalarElement(Value * V,unsigned EltNo)234 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
235   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
236   VectorType *VTy = cast<VectorType>(V->getType());
237   unsigned Width = VTy->getNumElements();
238   if (EltNo >= Width)  // Out of range access.
239     return UndefValue::get(VTy->getElementType());
240 
241   if (Constant *C = dyn_cast<Constant>(V))
242     return C->getAggregateElement(EltNo);
243 
244   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
245     // If this is an insert to a variable element, we don't know what it is.
246     if (!isa<ConstantInt>(III->getOperand(2)))
247       return nullptr;
248     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
249 
250     // If this is an insert to the element we are looking for, return the
251     // inserted value.
252     if (EltNo == IIElt)
253       return III->getOperand(1);
254 
255     // Otherwise, the insertelement doesn't modify the value, recurse on its
256     // vector input.
257     return findScalarElement(III->getOperand(0), EltNo);
258   }
259 
260   if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
261     unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
262     int InEl = SVI->getMaskValue(EltNo);
263     if (InEl < 0)
264       return UndefValue::get(VTy->getElementType());
265     if (InEl < (int)LHSWidth)
266       return findScalarElement(SVI->getOperand(0), InEl);
267     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
268   }
269 
270   // Extract a value from a vector add operation with a constant zero.
271   Value *Val = nullptr; Constant *Con = nullptr;
272   if (match(V, m_Add(m_Value(Val), m_Constant(Con))))
273     if (Constant *Elt = Con->getAggregateElement(EltNo))
274       if (Elt->isNullValue())
275         return findScalarElement(Val, EltNo);
276 
277   // Otherwise, we don't know.
278   return nullptr;
279 }
280 
281 /// \brief Get splat value if the input is a splat vector or return nullptr.
282 /// This function is not fully general. It checks only 2 cases:
283 /// the input value is (1) a splat constants vector or (2) a sequence
284 /// of instructions that broadcast a single value into a vector.
285 ///
getSplatValue(const Value * V)286 const llvm::Value *llvm::getSplatValue(const Value *V) {
287 
288   if (auto *C = dyn_cast<Constant>(V))
289     if (isa<VectorType>(V->getType()))
290       return C->getSplatValue();
291 
292   auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V);
293   if (!ShuffleInst)
294     return nullptr;
295   // All-zero (or undef) shuffle mask elements.
296   for (int MaskElt : ShuffleInst->getShuffleMask())
297     if (MaskElt != 0 && MaskElt != -1)
298       return nullptr;
299   // The first shuffle source is 'insertelement' with index 0.
300   auto *InsertEltInst =
301     dyn_cast<InsertElementInst>(ShuffleInst->getOperand(0));
302   if (!InsertEltInst || !isa<ConstantInt>(InsertEltInst->getOperand(2)) ||
303       !cast<ConstantInt>(InsertEltInst->getOperand(2))->isNullValue())
304     return nullptr;
305 
306   return InsertEltInst->getOperand(1);
307 }
308 
309 MapVector<Instruction *, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock * > Blocks,DemandedBits & DB,const TargetTransformInfo * TTI)310 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
311                                const TargetTransformInfo *TTI) {
312 
313   // DemandedBits will give us every value's live-out bits. But we want
314   // to ensure no extra casts would need to be inserted, so every DAG
315   // of connected values must have the same minimum bitwidth.
316   EquivalenceClasses<Value *> ECs;
317   SmallVector<Value *, 16> Worklist;
318   SmallPtrSet<Value *, 4> Roots;
319   SmallPtrSet<Value *, 16> Visited;
320   DenseMap<Value *, uint64_t> DBits;
321   SmallPtrSet<Instruction *, 4> InstructionSet;
322   MapVector<Instruction *, uint64_t> MinBWs;
323 
324   // Determine the roots. We work bottom-up, from truncs or icmps.
325   bool SeenExtFromIllegalType = false;
326   for (auto *BB : Blocks)
327     for (auto &I : *BB) {
328       InstructionSet.insert(&I);
329 
330       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
331           !TTI->isTypeLegal(I.getOperand(0)->getType()))
332         SeenExtFromIllegalType = true;
333 
334       // Only deal with non-vector integers up to 64-bits wide.
335       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
336           !I.getType()->isVectorTy() &&
337           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
338         // Don't make work for ourselves. If we know the loaded type is legal,
339         // don't add it to the worklist.
340         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
341           continue;
342 
343         Worklist.push_back(&I);
344         Roots.insert(&I);
345       }
346     }
347   // Early exit.
348   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
349     return MinBWs;
350 
351   // Now proceed breadth-first, unioning values together.
352   while (!Worklist.empty()) {
353     Value *Val = Worklist.pop_back_val();
354     Value *Leader = ECs.getOrInsertLeaderValue(Val);
355 
356     if (Visited.count(Val))
357       continue;
358     Visited.insert(Val);
359 
360     // Non-instructions terminate a chain successfully.
361     if (!isa<Instruction>(Val))
362       continue;
363     Instruction *I = cast<Instruction>(Val);
364 
365     // If we encounter a type that is larger than 64 bits, we can't represent
366     // it so bail out.
367     if (DB.getDemandedBits(I).getBitWidth() > 64)
368       return MapVector<Instruction *, uint64_t>();
369 
370     uint64_t V = DB.getDemandedBits(I).getZExtValue();
371     DBits[Leader] |= V;
372     DBits[I] = V;
373 
374     // Casts, loads and instructions outside of our range terminate a chain
375     // successfully.
376     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
377         !InstructionSet.count(I))
378       continue;
379 
380     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
381     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
382     // transform anything that relies on them.
383     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
384         !I->getType()->isIntegerTy()) {
385       DBits[Leader] |= ~0ULL;
386       continue;
387     }
388 
389     // We don't modify the types of PHIs. Reductions will already have been
390     // truncated if possible, and inductions' sizes will have been chosen by
391     // indvars.
392     if (isa<PHINode>(I))
393       continue;
394 
395     if (DBits[Leader] == ~0ULL)
396       // All bits demanded, no point continuing.
397       continue;
398 
399     for (Value *O : cast<User>(I)->operands()) {
400       ECs.unionSets(Leader, O);
401       Worklist.push_back(O);
402     }
403   }
404 
405   // Now we've discovered all values, walk them to see if there are
406   // any users we didn't see. If there are, we can't optimize that
407   // chain.
408   for (auto &I : DBits)
409     for (auto *U : I.first->users())
410       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
411         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
412 
413   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
414     uint64_t LeaderDemandedBits = 0;
415     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
416       LeaderDemandedBits |= DBits[*MI];
417 
418     uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
419                      llvm::countLeadingZeros(LeaderDemandedBits);
420     // Round up to a power of 2
421     if (!isPowerOf2_64((uint64_t)MinBW))
422       MinBW = NextPowerOf2(MinBW);
423 
424     // We don't modify the types of PHIs. Reductions will already have been
425     // truncated if possible, and inductions' sizes will have been chosen by
426     // indvars.
427     // If we are required to shrink a PHI, abandon this entire equivalence class.
428     bool Abort = false;
429     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
430       if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
431         Abort = true;
432         break;
433       }
434     if (Abort)
435       continue;
436 
437     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
438       if (!isa<Instruction>(*MI))
439         continue;
440       Type *Ty = (*MI)->getType();
441       if (Roots.count(*MI))
442         Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
443       if (MinBW < Ty->getScalarSizeInBits())
444         MinBWs[cast<Instruction>(*MI)] = MinBW;
445     }
446   }
447 
448   return MinBWs;
449 }
450 
451 /// \returns \p I after propagating metadata from \p VL.
propagateMetadata(Instruction * Inst,ArrayRef<Value * > VL)452 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
453   Instruction *I0 = cast<Instruction>(VL[0]);
454   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
455   I0->getAllMetadataOtherThanDebugLoc(Metadata);
456 
457   for (auto Kind : { LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
458                      LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
459                      LLVMContext::MD_nontemporal }) {
460     MDNode *MD = I0->getMetadata(Kind);
461 
462     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
463       const Instruction *IJ = cast<Instruction>(VL[J]);
464       MDNode *IMD = IJ->getMetadata(Kind);
465       switch (Kind) {
466       case LLVMContext::MD_tbaa:
467         MD = MDNode::getMostGenericTBAA(MD, IMD);
468         break;
469       case LLVMContext::MD_alias_scope:
470         MD = MDNode::getMostGenericAliasScope(MD, IMD);
471         break;
472       case LLVMContext::MD_noalias:
473         MD = MDNode::intersect(MD, IMD);
474         break;
475       case LLVMContext::MD_fpmath:
476         MD = MDNode::getMostGenericFPMath(MD, IMD);
477         break;
478       case LLVMContext::MD_nontemporal:
479         MD = MDNode::intersect(MD, IMD);
480         break;
481       default:
482         llvm_unreachable("unhandled metadata");
483       }
484     }
485 
486     Inst->setMetadata(Kind, MD);
487   }
488 
489   return Inst;
490 }
491