1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Transforms/Utils/GlobalStatus.h"
29
30 using namespace llvm;
31
32 /// Compute the linearized index of a member in a nested aggregate/struct/array
33 /// by recursing and accumulating CurIndex as long as there are indices in the
34 /// index list.
ComputeLinearIndex(Type * Ty,const unsigned * Indices,const unsigned * IndicesEnd,unsigned CurIndex)35 unsigned llvm::ComputeLinearIndex(Type *Ty,
36 const unsigned *Indices,
37 const unsigned *IndicesEnd,
38 unsigned CurIndex) {
39 // Base case: We're done.
40 if (Indices && Indices == IndicesEnd)
41 return CurIndex;
42
43 // Given a struct type, recursively traverse the elements.
44 if (StructType *STy = dyn_cast<StructType>(Ty)) {
45 for (StructType::element_iterator EB = STy->element_begin(),
46 EI = EB,
47 EE = STy->element_end();
48 EI != EE; ++EI) {
49 if (Indices && *Indices == unsigned(EI - EB))
50 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
51 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
52 }
53 assert(!Indices && "Unexpected out of bound");
54 return CurIndex;
55 }
56 // Given an array type, recursively traverse the elements.
57 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
58 Type *EltTy = ATy->getElementType();
59 unsigned NumElts = ATy->getNumElements();
60 // Compute the Linear offset when jumping one element of the array
61 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
62 if (Indices) {
63 assert(*Indices < NumElts && "Unexpected out of bound");
64 // If the indice is inside the array, compute the index to the requested
65 // elt and recurse inside the element with the end of the indices list
66 CurIndex += EltLinearOffset* *Indices;
67 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
68 }
69 CurIndex += EltLinearOffset*NumElts;
70 return CurIndex;
71 }
72 // We haven't found the type we're looking for, so keep searching.
73 return CurIndex + 1;
74 }
75
76 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
77 /// EVTs that represent all the individual underlying
78 /// non-aggregate types that comprise it.
79 ///
80 /// If Offsets is non-null, it points to a vector to be filled in
81 /// with the in-memory offsets of each of the individual values.
82 ///
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<EVT> * MemVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)83 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
84 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
85 SmallVectorImpl<EVT> *MemVTs,
86 SmallVectorImpl<uint64_t> *Offsets,
87 uint64_t StartingOffset) {
88 // Given a struct type, recursively traverse the elements.
89 if (StructType *STy = dyn_cast<StructType>(Ty)) {
90 const StructLayout *SL = DL.getStructLayout(STy);
91 for (StructType::element_iterator EB = STy->element_begin(),
92 EI = EB,
93 EE = STy->element_end();
94 EI != EE; ++EI)
95 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
96 StartingOffset + SL->getElementOffset(EI - EB));
97 return;
98 }
99 // Given an array type, recursively traverse the elements.
100 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
101 Type *EltTy = ATy->getElementType();
102 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
103 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
104 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
105 StartingOffset + i * EltSize);
106 return;
107 }
108 // Interpret void as zero return values.
109 if (Ty->isVoidTy())
110 return;
111 // Base case: we can get an EVT for this LLVM IR type.
112 ValueVTs.push_back(TLI.getValueType(DL, Ty));
113 if (MemVTs)
114 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
115 if (Offsets)
116 Offsets->push_back(StartingOffset);
117 }
118
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)119 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
120 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
121 SmallVectorImpl<uint64_t> *Offsets,
122 uint64_t StartingOffset) {
123 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
124 StartingOffset);
125 }
126
computeValueLLTs(const DataLayout & DL,Type & Ty,SmallVectorImpl<LLT> & ValueTys,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)127 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
128 SmallVectorImpl<LLT> &ValueTys,
129 SmallVectorImpl<uint64_t> *Offsets,
130 uint64_t StartingOffset) {
131 // Given a struct type, recursively traverse the elements.
132 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
133 const StructLayout *SL = DL.getStructLayout(STy);
134 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
135 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
136 StartingOffset + SL->getElementOffset(I));
137 return;
138 }
139 // Given an array type, recursively traverse the elements.
140 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
141 Type *EltTy = ATy->getElementType();
142 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
143 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
144 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
145 StartingOffset + i * EltSize);
146 return;
147 }
148 // Interpret void as zero return values.
149 if (Ty.isVoidTy())
150 return;
151 // Base case: we can get an LLT for this LLVM IR type.
152 ValueTys.push_back(getLLTForType(Ty, DL));
153 if (Offsets != nullptr)
154 Offsets->push_back(StartingOffset * 8);
155 }
156
157 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
ExtractTypeInfo(Value * V)158 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
159 V = V->stripPointerCasts();
160 GlobalValue *GV = dyn_cast<GlobalValue>(V);
161 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
162
163 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
164 assert(Var->hasInitializer() &&
165 "The EH catch-all value must have an initializer");
166 Value *Init = Var->getInitializer();
167 GV = dyn_cast<GlobalValue>(Init);
168 if (!GV) V = cast<ConstantPointerNull>(Init);
169 }
170
171 assert((GV || isa<ConstantPointerNull>(V)) &&
172 "TypeInfo must be a global variable or NULL");
173 return GV;
174 }
175
176 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
177 /// processed uses a memory 'm' constraint.
178 bool
hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector & CInfos,const TargetLowering & TLI)179 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
180 const TargetLowering &TLI) {
181 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
182 InlineAsm::ConstraintInfo &CI = CInfos[i];
183 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
184 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
185 if (CType == TargetLowering::C_Memory)
186 return true;
187 }
188
189 // Indirect operand accesses access memory.
190 if (CI.isIndirect)
191 return true;
192 }
193
194 return false;
195 }
196
197 /// getFCmpCondCode - Return the ISD condition code corresponding to
198 /// the given LLVM IR floating-point condition code. This includes
199 /// consideration of global floating-point math flags.
200 ///
getFCmpCondCode(FCmpInst::Predicate Pred)201 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
202 switch (Pred) {
203 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
204 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
205 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
206 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
207 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
208 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
209 case FCmpInst::FCMP_ONE: return ISD::SETONE;
210 case FCmpInst::FCMP_ORD: return ISD::SETO;
211 case FCmpInst::FCMP_UNO: return ISD::SETUO;
212 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
213 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
214 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
215 case FCmpInst::FCMP_ULT: return ISD::SETULT;
216 case FCmpInst::FCMP_ULE: return ISD::SETULE;
217 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
218 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
219 default: llvm_unreachable("Invalid FCmp predicate opcode!");
220 }
221 }
222
getFCmpCodeWithoutNaN(ISD::CondCode CC)223 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
224 switch (CC) {
225 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
226 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
227 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
228 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
229 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
230 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
231 default: return CC;
232 }
233 }
234
235 /// getICmpCondCode - Return the ISD condition code corresponding to
236 /// the given LLVM IR integer condition code.
237 ///
getICmpCondCode(ICmpInst::Predicate Pred)238 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
239 switch (Pred) {
240 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
241 case ICmpInst::ICMP_NE: return ISD::SETNE;
242 case ICmpInst::ICMP_SLE: return ISD::SETLE;
243 case ICmpInst::ICMP_ULE: return ISD::SETULE;
244 case ICmpInst::ICMP_SGE: return ISD::SETGE;
245 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
246 case ICmpInst::ICMP_SLT: return ISD::SETLT;
247 case ICmpInst::ICMP_ULT: return ISD::SETULT;
248 case ICmpInst::ICMP_SGT: return ISD::SETGT;
249 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
250 default:
251 llvm_unreachable("Invalid ICmp predicate opcode!");
252 }
253 }
254
isNoopBitcast(Type * T1,Type * T2,const TargetLoweringBase & TLI)255 static bool isNoopBitcast(Type *T1, Type *T2,
256 const TargetLoweringBase& TLI) {
257 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
258 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
259 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
260 }
261
262 /// Look through operations that will be free to find the earliest source of
263 /// this value.
264 ///
265 /// @param ValLoc If V has aggregate type, we will be interested in a particular
266 /// scalar component. This records its address; the reverse of this list gives a
267 /// sequence of indices appropriate for an extractvalue to locate the important
268 /// value. This value is updated during the function and on exit will indicate
269 /// similar information for the Value returned.
270 ///
271 /// @param DataBits If this function looks through truncate instructions, this
272 /// will record the smallest size attained.
getNoopInput(const Value * V,SmallVectorImpl<unsigned> & ValLoc,unsigned & DataBits,const TargetLoweringBase & TLI,const DataLayout & DL)273 static const Value *getNoopInput(const Value *V,
274 SmallVectorImpl<unsigned> &ValLoc,
275 unsigned &DataBits,
276 const TargetLoweringBase &TLI,
277 const DataLayout &DL) {
278 while (true) {
279 // Try to look through V1; if V1 is not an instruction, it can't be looked
280 // through.
281 const Instruction *I = dyn_cast<Instruction>(V);
282 if (!I || I->getNumOperands() == 0) return V;
283 const Value *NoopInput = nullptr;
284
285 Value *Op = I->getOperand(0);
286 if (isa<BitCastInst>(I)) {
287 // Look through truly no-op bitcasts.
288 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
289 NoopInput = Op;
290 } else if (isa<GetElementPtrInst>(I)) {
291 // Look through getelementptr
292 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
293 NoopInput = Op;
294 } else if (isa<IntToPtrInst>(I)) {
295 // Look through inttoptr.
296 // Make sure this isn't a truncating or extending cast. We could
297 // support this eventually, but don't bother for now.
298 if (!isa<VectorType>(I->getType()) &&
299 DL.getPointerSizeInBits() ==
300 cast<IntegerType>(Op->getType())->getBitWidth())
301 NoopInput = Op;
302 } else if (isa<PtrToIntInst>(I)) {
303 // Look through ptrtoint.
304 // Make sure this isn't a truncating or extending cast. We could
305 // support this eventually, but don't bother for now.
306 if (!isa<VectorType>(I->getType()) &&
307 DL.getPointerSizeInBits() ==
308 cast<IntegerType>(I->getType())->getBitWidth())
309 NoopInput = Op;
310 } else if (isa<TruncInst>(I) &&
311 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
312 DataBits = std::min((uint64_t)DataBits,
313 I->getType()->getPrimitiveSizeInBits().getFixedSize());
314 NoopInput = Op;
315 } else if (auto CS = ImmutableCallSite(I)) {
316 const Value *ReturnedOp = CS.getReturnedArgOperand();
317 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
318 NoopInput = ReturnedOp;
319 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
320 // Value may come from either the aggregate or the scalar
321 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
322 if (ValLoc.size() >= InsertLoc.size() &&
323 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
324 // The type being inserted is a nested sub-type of the aggregate; we
325 // have to remove those initial indices to get the location we're
326 // interested in for the operand.
327 ValLoc.resize(ValLoc.size() - InsertLoc.size());
328 NoopInput = IVI->getInsertedValueOperand();
329 } else {
330 // The struct we're inserting into has the value we're interested in, no
331 // change of address.
332 NoopInput = Op;
333 }
334 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
335 // The part we're interested in will inevitably be some sub-section of the
336 // previous aggregate. Combine the two paths to obtain the true address of
337 // our element.
338 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
339 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
340 NoopInput = Op;
341 }
342 // Terminate if we couldn't find anything to look through.
343 if (!NoopInput)
344 return V;
345
346 V = NoopInput;
347 }
348 }
349
350 /// Return true if this scalar return value only has bits discarded on its path
351 /// from the "tail call" to the "ret". This includes the obvious noop
352 /// instructions handled by getNoopInput above as well as free truncations (or
353 /// extensions prior to the call).
slotOnlyDiscardsData(const Value * RetVal,const Value * CallVal,SmallVectorImpl<unsigned> & RetIndices,SmallVectorImpl<unsigned> & CallIndices,bool AllowDifferingSizes,const TargetLoweringBase & TLI,const DataLayout & DL)354 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
355 SmallVectorImpl<unsigned> &RetIndices,
356 SmallVectorImpl<unsigned> &CallIndices,
357 bool AllowDifferingSizes,
358 const TargetLoweringBase &TLI,
359 const DataLayout &DL) {
360
361 // Trace the sub-value needed by the return value as far back up the graph as
362 // possible, in the hope that it will intersect with the value produced by the
363 // call. In the simple case with no "returned" attribute, the hope is actually
364 // that we end up back at the tail call instruction itself.
365 unsigned BitsRequired = UINT_MAX;
366 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
367
368 // If this slot in the value returned is undef, it doesn't matter what the
369 // call puts there, it'll be fine.
370 if (isa<UndefValue>(RetVal))
371 return true;
372
373 // Now do a similar search up through the graph to find where the value
374 // actually returned by the "tail call" comes from. In the simple case without
375 // a "returned" attribute, the search will be blocked immediately and the loop
376 // a Noop.
377 unsigned BitsProvided = UINT_MAX;
378 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
379
380 // There's no hope if we can't actually trace them to (the same part of!) the
381 // same value.
382 if (CallVal != RetVal || CallIndices != RetIndices)
383 return false;
384
385 // However, intervening truncates may have made the call non-tail. Make sure
386 // all the bits that are needed by the "ret" have been provided by the "tail
387 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
388 // extensions too.
389 if (BitsProvided < BitsRequired ||
390 (!AllowDifferingSizes && BitsProvided != BitsRequired))
391 return false;
392
393 return true;
394 }
395
396 /// For an aggregate type, determine whether a given index is within bounds or
397 /// not.
indexReallyValid(CompositeType * T,unsigned Idx)398 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
399 if (ArrayType *AT = dyn_cast<ArrayType>(T))
400 return Idx < AT->getNumElements();
401
402 return Idx < cast<StructType>(T)->getNumElements();
403 }
404
405 /// Move the given iterators to the next leaf type in depth first traversal.
406 ///
407 /// Performs a depth-first traversal of the type as specified by its arguments,
408 /// stopping at the next leaf node (which may be a legitimate scalar type or an
409 /// empty struct or array).
410 ///
411 /// @param SubTypes List of the partial components making up the type from
412 /// outermost to innermost non-empty aggregate. The element currently
413 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
414 ///
415 /// @param Path Set of extractvalue indices leading from the outermost type
416 /// (SubTypes[0]) to the leaf node currently represented.
417 ///
418 /// @returns true if a new type was found, false otherwise. Calling this
419 /// function again on a finished iterator will repeatedly return
420 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
421 /// aggregate or a non-aggregate
advanceToNextLeafType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)422 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
423 SmallVectorImpl<unsigned> &Path) {
424 // First march back up the tree until we can successfully increment one of the
425 // coordinates in Path.
426 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
427 Path.pop_back();
428 SubTypes.pop_back();
429 }
430
431 // If we reached the top, then the iterator is done.
432 if (Path.empty())
433 return false;
434
435 // We know there's *some* valid leaf now, so march back down the tree picking
436 // out the left-most element at each node.
437 ++Path.back();
438 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
439 while (DeeperType->isAggregateType()) {
440 CompositeType *CT = cast<CompositeType>(DeeperType);
441 if (!indexReallyValid(CT, 0))
442 return true;
443
444 SubTypes.push_back(CT);
445 Path.push_back(0);
446
447 DeeperType = CT->getTypeAtIndex(0U);
448 }
449
450 return true;
451 }
452
453 /// Find the first non-empty, scalar-like type in Next and setup the iterator
454 /// components.
455 ///
456 /// Assuming Next is an aggregate of some kind, this function will traverse the
457 /// tree from left to right (i.e. depth-first) looking for the first
458 /// non-aggregate type which will play a role in function return.
459 ///
460 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
461 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
462 /// i32 in that type.
firstRealType(Type * Next,SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)463 static bool firstRealType(Type *Next,
464 SmallVectorImpl<CompositeType *> &SubTypes,
465 SmallVectorImpl<unsigned> &Path) {
466 // First initialise the iterator components to the first "leaf" node
467 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
468 // despite nominally being an aggregate).
469 while (Next->isAggregateType() &&
470 indexReallyValid(cast<CompositeType>(Next), 0)) {
471 SubTypes.push_back(cast<CompositeType>(Next));
472 Path.push_back(0);
473 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
474 }
475
476 // If there's no Path now, Next was originally scalar already (or empty
477 // leaf). We're done.
478 if (Path.empty())
479 return true;
480
481 // Otherwise, use normal iteration to keep looking through the tree until we
482 // find a non-aggregate type.
483 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
484 if (!advanceToNextLeafType(SubTypes, Path))
485 return false;
486 }
487
488 return true;
489 }
490
491 /// Set the iterator data-structures to the next non-empty, non-aggregate
492 /// subtype.
nextRealType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)493 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
494 SmallVectorImpl<unsigned> &Path) {
495 do {
496 if (!advanceToNextLeafType(SubTypes, Path))
497 return false;
498
499 assert(!Path.empty() && "found a leaf but didn't set the path?");
500 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
501
502 return true;
503 }
504
505
506 /// Test if the given instruction is in a position to be optimized
507 /// with a tail-call. This roughly means that it's in a block with
508 /// a return and there's nothing that needs to be scheduled
509 /// between it and the return.
510 ///
511 /// This function only tests target-independent requirements.
isInTailCallPosition(ImmutableCallSite CS,const TargetMachine & TM)512 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
513 const Instruction *I = CS.getInstruction();
514 const BasicBlock *ExitBB = I->getParent();
515 const Instruction *Term = ExitBB->getTerminator();
516 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
517
518 // The block must end in a return statement or unreachable.
519 //
520 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
521 // an unreachable, for now. The way tailcall optimization is currently
522 // implemented means it will add an epilogue followed by a jump. That is
523 // not profitable. Also, if the callee is a special function (e.g.
524 // longjmp on x86), it can end up causing miscompilation that has not
525 // been fully understood.
526 if (!Ret &&
527 ((!TM.Options.GuaranteedTailCallOpt &&
528 CS.getCallingConv() != CallingConv::Tail) || !isa<UnreachableInst>(Term)))
529 return false;
530
531 // If I will have a chain, make sure no other instruction that will have a
532 // chain interposes between I and the return.
533 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
534 !isSafeToSpeculativelyExecute(I))
535 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
536 if (&*BBI == I)
537 break;
538 // Debug info intrinsics do not get in the way of tail call optimization.
539 if (isa<DbgInfoIntrinsic>(BBI))
540 continue;
541 // A lifetime end or assume intrinsic should not stop tail call
542 // optimization.
543 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
544 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
545 II->getIntrinsicID() == Intrinsic::assume)
546 continue;
547 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
548 !isSafeToSpeculativelyExecute(&*BBI))
549 return false;
550 }
551
552 const Function *F = ExitBB->getParent();
553 return returnTypeIsEligibleForTailCall(
554 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
555 }
556
attributesPermitTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI,bool * AllowDifferingSizes)557 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
558 const ReturnInst *Ret,
559 const TargetLoweringBase &TLI,
560 bool *AllowDifferingSizes) {
561 // ADS may be null, so don't write to it directly.
562 bool DummyADS;
563 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
564 ADS = true;
565
566 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
567 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
568 AttributeList::ReturnIndex);
569
570 // Following attributes are completely benign as far as calling convention
571 // goes, they shouldn't affect whether the call is a tail call.
572 CallerAttrs.removeAttribute(Attribute::NoAlias);
573 CalleeAttrs.removeAttribute(Attribute::NoAlias);
574 CallerAttrs.removeAttribute(Attribute::NonNull);
575 CalleeAttrs.removeAttribute(Attribute::NonNull);
576 CallerAttrs.removeAttribute(Attribute::Dereferenceable);
577 CalleeAttrs.removeAttribute(Attribute::Dereferenceable);
578 CallerAttrs.removeAttribute(Attribute::DereferenceableOrNull);
579 CalleeAttrs.removeAttribute(Attribute::DereferenceableOrNull);
580
581 if (CallerAttrs.contains(Attribute::ZExt)) {
582 if (!CalleeAttrs.contains(Attribute::ZExt))
583 return false;
584
585 ADS = false;
586 CallerAttrs.removeAttribute(Attribute::ZExt);
587 CalleeAttrs.removeAttribute(Attribute::ZExt);
588 } else if (CallerAttrs.contains(Attribute::SExt)) {
589 if (!CalleeAttrs.contains(Attribute::SExt))
590 return false;
591
592 ADS = false;
593 CallerAttrs.removeAttribute(Attribute::SExt);
594 CalleeAttrs.removeAttribute(Attribute::SExt);
595 }
596
597 // Drop sext and zext return attributes if the result is not used.
598 // This enables tail calls for code like:
599 //
600 // define void @caller() {
601 // entry:
602 // %unused_result = tail call zeroext i1 @callee()
603 // br label %retlabel
604 // retlabel:
605 // ret void
606 // }
607 if (I->use_empty()) {
608 CalleeAttrs.removeAttribute(Attribute::SExt);
609 CalleeAttrs.removeAttribute(Attribute::ZExt);
610 }
611
612 // If they're still different, there's some facet we don't understand
613 // (currently only "inreg", but in future who knows). It may be OK but the
614 // only safe option is to reject the tail call.
615 return CallerAttrs == CalleeAttrs;
616 }
617
618 /// Check whether B is a bitcast of a pointer type to another pointer type,
619 /// which is equal to A.
isPointerBitcastEqualTo(const Value * A,const Value * B)620 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
621 assert(A && B && "Expected non-null inputs!");
622
623 auto *BitCastIn = dyn_cast<BitCastInst>(B);
624
625 if (!BitCastIn)
626 return false;
627
628 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
629 return false;
630
631 return A == BitCastIn->getOperand(0);
632 }
633
returnTypeIsEligibleForTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI)634 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
635 const Instruction *I,
636 const ReturnInst *Ret,
637 const TargetLoweringBase &TLI) {
638 // If the block ends with a void return or unreachable, it doesn't matter
639 // what the call's return type is.
640 if (!Ret || Ret->getNumOperands() == 0) return true;
641
642 // If the return value is undef, it doesn't matter what the call's
643 // return type is.
644 if (isa<UndefValue>(Ret->getOperand(0))) return true;
645
646 // Make sure the attributes attached to each return are compatible.
647 bool AllowDifferingSizes;
648 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
649 return false;
650
651 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
652 // Intrinsic like llvm.memcpy has no return value, but the expanded
653 // libcall may or may not have return value. On most platforms, it
654 // will be expanded as memcpy in libc, which returns the first
655 // argument. On other platforms like arm-none-eabi, memcpy may be
656 // expanded as library call without return value, like __aeabi_memcpy.
657 const CallInst *Call = cast<CallInst>(I);
658 if (Function *F = Call->getCalledFunction()) {
659 Intrinsic::ID IID = F->getIntrinsicID();
660 if (((IID == Intrinsic::memcpy &&
661 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
662 (IID == Intrinsic::memmove &&
663 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
664 (IID == Intrinsic::memset &&
665 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
666 (RetVal == Call->getArgOperand(0) ||
667 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
668 return true;
669 }
670
671 SmallVector<unsigned, 4> RetPath, CallPath;
672 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
673
674 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
675 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
676
677 // Nothing's actually returned, it doesn't matter what the callee put there
678 // it's a valid tail call.
679 if (RetEmpty)
680 return true;
681
682 // Iterate pairwise through each of the value types making up the tail call
683 // and the corresponding return. For each one we want to know whether it's
684 // essentially going directly from the tail call to the ret, via operations
685 // that end up not generating any code.
686 //
687 // We allow a certain amount of covariance here. For example it's permitted
688 // for the tail call to define more bits than the ret actually cares about
689 // (e.g. via a truncate).
690 do {
691 if (CallEmpty) {
692 // We've exhausted the values produced by the tail call instruction, the
693 // rest are essentially undef. The type doesn't really matter, but we need
694 // *something*.
695 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
696 CallVal = UndefValue::get(SlotType);
697 }
698
699 // The manipulations performed when we're looking through an insertvalue or
700 // an extractvalue would happen at the front of the RetPath list, so since
701 // we have to copy it anyway it's more efficient to create a reversed copy.
702 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
703 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
704
705 // Finally, we can check whether the value produced by the tail call at this
706 // index is compatible with the value we return.
707 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
708 AllowDifferingSizes, TLI,
709 F->getParent()->getDataLayout()))
710 return false;
711
712 CallEmpty = !nextRealType(CallSubTypes, CallPath);
713 } while(nextRealType(RetSubTypes, RetPath));
714
715 return true;
716 }
717
collectEHScopeMembers(DenseMap<const MachineBasicBlock *,int> & EHScopeMembership,int EHScope,const MachineBasicBlock * MBB)718 static void collectEHScopeMembers(
719 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
720 const MachineBasicBlock *MBB) {
721 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
722 while (!Worklist.empty()) {
723 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
724 // Don't follow blocks which start new scopes.
725 if (Visiting->isEHPad() && Visiting != MBB)
726 continue;
727
728 // Add this MBB to our scope.
729 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
730
731 // Don't revisit blocks.
732 if (!P.second) {
733 assert(P.first->second == EHScope && "MBB is part of two scopes!");
734 continue;
735 }
736
737 // Returns are boundaries where scope transfer can occur, don't follow
738 // successors.
739 if (Visiting->isEHScopeReturnBlock())
740 continue;
741
742 for (const MachineBasicBlock *Succ : Visiting->successors())
743 Worklist.push_back(Succ);
744 }
745 }
746
747 DenseMap<const MachineBasicBlock *, int>
getEHScopeMembership(const MachineFunction & MF)748 llvm::getEHScopeMembership(const MachineFunction &MF) {
749 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
750
751 // We don't have anything to do if there aren't any EH pads.
752 if (!MF.hasEHScopes())
753 return EHScopeMembership;
754
755 int EntryBBNumber = MF.front().getNumber();
756 bool IsSEH = isAsynchronousEHPersonality(
757 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
758
759 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
760 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
761 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
762 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
763 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
764 for (const MachineBasicBlock &MBB : MF) {
765 if (MBB.isEHScopeEntry()) {
766 EHScopeBlocks.push_back(&MBB);
767 } else if (IsSEH && MBB.isEHPad()) {
768 SEHCatchPads.push_back(&MBB);
769 } else if (MBB.pred_empty()) {
770 UnreachableBlocks.push_back(&MBB);
771 }
772
773 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
774
775 // CatchPads are not scopes for SEH so do not consider CatchRet to
776 // transfer control to another scope.
777 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
778 continue;
779
780 // FIXME: SEH CatchPads are not necessarily in the parent function:
781 // they could be inside a finally block.
782 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
783 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
784 CatchRetSuccessors.push_back(
785 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
786 }
787
788 // We don't have anything to do if there aren't any EH pads.
789 if (EHScopeBlocks.empty())
790 return EHScopeMembership;
791
792 // Identify all the basic blocks reachable from the function entry.
793 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
794 // All blocks not part of a scope are in the parent function.
795 for (const MachineBasicBlock *MBB : UnreachableBlocks)
796 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
797 // Next, identify all the blocks inside the scopes.
798 for (const MachineBasicBlock *MBB : EHScopeBlocks)
799 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
800 // SEH CatchPads aren't really scopes, handle them separately.
801 for (const MachineBasicBlock *MBB : SEHCatchPads)
802 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
803 // Finally, identify all the targets of a catchret.
804 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
805 CatchRetSuccessors)
806 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
807 CatchRetPair.first);
808 return EHScopeMembership;
809 }
810