1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Transforms/Utils/GlobalStatus.h"
30
31 using namespace llvm;
32
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
ComputeLinearIndex(Type * Ty,const unsigned * Indices,const unsigned * IndicesEnd,unsigned CurIndex)36 unsigned llvm::ComputeLinearIndex(Type *Ty,
37 const unsigned *Indices,
38 const unsigned *IndicesEnd,
39 unsigned CurIndex) {
40 // Base case: We're done.
41 if (Indices && Indices == IndicesEnd)
42 return CurIndex;
43
44 // Given a struct type, recursively traverse the elements.
45 if (StructType *STy = dyn_cast<StructType>(Ty)) {
46 for (StructType::element_iterator EB = STy->element_begin(),
47 EI = EB,
48 EE = STy->element_end();
49 EI != EE; ++EI) {
50 if (Indices && *Indices == unsigned(EI - EB))
51 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
52 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
53 }
54 assert(!Indices && "Unexpected out of bound");
55 return CurIndex;
56 }
57 // Given an array type, recursively traverse the elements.
58 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
59 Type *EltTy = ATy->getElementType();
60 unsigned NumElts = ATy->getNumElements();
61 // Compute the Linear offset when jumping one element of the array
62 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
63 if (Indices) {
64 assert(*Indices < NumElts && "Unexpected out of bound");
65 // If the indice is inside the array, compute the index to the requested
66 // elt and recurse inside the element with the end of the indices list
67 CurIndex += EltLinearOffset* *Indices;
68 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
69 }
70 CurIndex += EltLinearOffset*NumElts;
71 return CurIndex;
72 }
73 // We haven't found the type we're looking for, so keep searching.
74 return CurIndex + 1;
75 }
76
77 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
78 /// EVTs that represent all the individual underlying
79 /// non-aggregate types that comprise it.
80 ///
81 /// If Offsets is non-null, it points to a vector to be filled in
82 /// with the in-memory offsets of each of the individual values.
83 ///
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)84 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
85 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
86 SmallVectorImpl<uint64_t> *Offsets,
87 uint64_t StartingOffset) {
88 // Given a struct type, recursively traverse the elements.
89 if (StructType *STy = dyn_cast<StructType>(Ty)) {
90 const StructLayout *SL = DL.getStructLayout(STy);
91 for (StructType::element_iterator EB = STy->element_begin(),
92 EI = EB,
93 EE = STy->element_end();
94 EI != EE; ++EI)
95 ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
96 StartingOffset + SL->getElementOffset(EI - EB));
97 return;
98 }
99 // Given an array type, recursively traverse the elements.
100 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
101 Type *EltTy = ATy->getElementType();
102 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
103 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
104 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
105 StartingOffset + i * EltSize);
106 return;
107 }
108 // Interpret void as zero return values.
109 if (Ty->isVoidTy())
110 return;
111 // Base case: we can get an EVT for this LLVM IR type.
112 ValueVTs.push_back(TLI.getValueType(DL, Ty));
113 if (Offsets)
114 Offsets->push_back(StartingOffset);
115 }
116
117 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
ExtractTypeInfo(Value * V)118 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
119 V = V->stripPointerCasts();
120 GlobalValue *GV = dyn_cast<GlobalValue>(V);
121 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
122
123 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
124 assert(Var->hasInitializer() &&
125 "The EH catch-all value must have an initializer");
126 Value *Init = Var->getInitializer();
127 GV = dyn_cast<GlobalValue>(Init);
128 if (!GV) V = cast<ConstantPointerNull>(Init);
129 }
130
131 assert((GV || isa<ConstantPointerNull>(V)) &&
132 "TypeInfo must be a global variable or NULL");
133 return GV;
134 }
135
136 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
137 /// processed uses a memory 'm' constraint.
138 bool
hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector & CInfos,const TargetLowering & TLI)139 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
140 const TargetLowering &TLI) {
141 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
142 InlineAsm::ConstraintInfo &CI = CInfos[i];
143 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
144 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
145 if (CType == TargetLowering::C_Memory)
146 return true;
147 }
148
149 // Indirect operand accesses access memory.
150 if (CI.isIndirect)
151 return true;
152 }
153
154 return false;
155 }
156
157 /// getFCmpCondCode - Return the ISD condition code corresponding to
158 /// the given LLVM IR floating-point condition code. This includes
159 /// consideration of global floating-point math flags.
160 ///
getFCmpCondCode(FCmpInst::Predicate Pred)161 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
162 switch (Pred) {
163 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
164 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
165 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
166 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
167 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
168 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
169 case FCmpInst::FCMP_ONE: return ISD::SETONE;
170 case FCmpInst::FCMP_ORD: return ISD::SETO;
171 case FCmpInst::FCMP_UNO: return ISD::SETUO;
172 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
173 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
174 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
175 case FCmpInst::FCMP_ULT: return ISD::SETULT;
176 case FCmpInst::FCMP_ULE: return ISD::SETULE;
177 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
178 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
179 default: llvm_unreachable("Invalid FCmp predicate opcode!");
180 }
181 }
182
getFCmpCodeWithoutNaN(ISD::CondCode CC)183 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
184 switch (CC) {
185 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
186 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
187 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
188 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
189 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
190 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
191 default: return CC;
192 }
193 }
194
195 /// getICmpCondCode - Return the ISD condition code corresponding to
196 /// the given LLVM IR integer condition code.
197 ///
getICmpCondCode(ICmpInst::Predicate Pred)198 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
199 switch (Pred) {
200 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
201 case ICmpInst::ICMP_NE: return ISD::SETNE;
202 case ICmpInst::ICMP_SLE: return ISD::SETLE;
203 case ICmpInst::ICMP_ULE: return ISD::SETULE;
204 case ICmpInst::ICMP_SGE: return ISD::SETGE;
205 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
206 case ICmpInst::ICMP_SLT: return ISD::SETLT;
207 case ICmpInst::ICMP_ULT: return ISD::SETULT;
208 case ICmpInst::ICMP_SGT: return ISD::SETGT;
209 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
210 default:
211 llvm_unreachable("Invalid ICmp predicate opcode!");
212 }
213 }
214
isNoopBitcast(Type * T1,Type * T2,const TargetLoweringBase & TLI)215 static bool isNoopBitcast(Type *T1, Type *T2,
216 const TargetLoweringBase& TLI) {
217 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
218 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
219 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
220 }
221
222 /// Look through operations that will be free to find the earliest source of
223 /// this value.
224 ///
225 /// @param ValLoc If V has aggegate type, we will be interested in a particular
226 /// scalar component. This records its address; the reverse of this list gives a
227 /// sequence of indices appropriate for an extractvalue to locate the important
228 /// value. This value is updated during the function and on exit will indicate
229 /// similar information for the Value returned.
230 ///
231 /// @param DataBits If this function looks through truncate instructions, this
232 /// will record the smallest size attained.
getNoopInput(const Value * V,SmallVectorImpl<unsigned> & ValLoc,unsigned & DataBits,const TargetLoweringBase & TLI,const DataLayout & DL)233 static const Value *getNoopInput(const Value *V,
234 SmallVectorImpl<unsigned> &ValLoc,
235 unsigned &DataBits,
236 const TargetLoweringBase &TLI,
237 const DataLayout &DL) {
238 while (true) {
239 // Try to look through V1; if V1 is not an instruction, it can't be looked
240 // through.
241 const Instruction *I = dyn_cast<Instruction>(V);
242 if (!I || I->getNumOperands() == 0) return V;
243 const Value *NoopInput = nullptr;
244
245 Value *Op = I->getOperand(0);
246 if (isa<BitCastInst>(I)) {
247 // Look through truly no-op bitcasts.
248 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
249 NoopInput = Op;
250 } else if (isa<GetElementPtrInst>(I)) {
251 // Look through getelementptr
252 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
253 NoopInput = Op;
254 } else if (isa<IntToPtrInst>(I)) {
255 // Look through inttoptr.
256 // Make sure this isn't a truncating or extending cast. We could
257 // support this eventually, but don't bother for now.
258 if (!isa<VectorType>(I->getType()) &&
259 DL.getPointerSizeInBits() ==
260 cast<IntegerType>(Op->getType())->getBitWidth())
261 NoopInput = Op;
262 } else if (isa<PtrToIntInst>(I)) {
263 // Look through ptrtoint.
264 // Make sure this isn't a truncating or extending cast. We could
265 // support this eventually, but don't bother for now.
266 if (!isa<VectorType>(I->getType()) &&
267 DL.getPointerSizeInBits() ==
268 cast<IntegerType>(I->getType())->getBitWidth())
269 NoopInput = Op;
270 } else if (isa<TruncInst>(I) &&
271 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
272 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
273 NoopInput = Op;
274 } else if (auto CS = ImmutableCallSite(I)) {
275 const Value *ReturnedOp = CS.getReturnedArgOperand();
276 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
277 NoopInput = ReturnedOp;
278 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
279 // Value may come from either the aggregate or the scalar
280 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
281 if (ValLoc.size() >= InsertLoc.size() &&
282 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
283 // The type being inserted is a nested sub-type of the aggregate; we
284 // have to remove those initial indices to get the location we're
285 // interested in for the operand.
286 ValLoc.resize(ValLoc.size() - InsertLoc.size());
287 NoopInput = IVI->getInsertedValueOperand();
288 } else {
289 // The struct we're inserting into has the value we're interested in, no
290 // change of address.
291 NoopInput = Op;
292 }
293 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
294 // The part we're interested in will inevitably be some sub-section of the
295 // previous aggregate. Combine the two paths to obtain the true address of
296 // our element.
297 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
298 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
299 NoopInput = Op;
300 }
301 // Terminate if we couldn't find anything to look through.
302 if (!NoopInput)
303 return V;
304
305 V = NoopInput;
306 }
307 }
308
309 /// Return true if this scalar return value only has bits discarded on its path
310 /// from the "tail call" to the "ret". This includes the obvious noop
311 /// instructions handled by getNoopInput above as well as free truncations (or
312 /// extensions prior to the call).
slotOnlyDiscardsData(const Value * RetVal,const Value * CallVal,SmallVectorImpl<unsigned> & RetIndices,SmallVectorImpl<unsigned> & CallIndices,bool AllowDifferingSizes,const TargetLoweringBase & TLI,const DataLayout & DL)313 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
314 SmallVectorImpl<unsigned> &RetIndices,
315 SmallVectorImpl<unsigned> &CallIndices,
316 bool AllowDifferingSizes,
317 const TargetLoweringBase &TLI,
318 const DataLayout &DL) {
319
320 // Trace the sub-value needed by the return value as far back up the graph as
321 // possible, in the hope that it will intersect with the value produced by the
322 // call. In the simple case with no "returned" attribute, the hope is actually
323 // that we end up back at the tail call instruction itself.
324 unsigned BitsRequired = UINT_MAX;
325 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
326
327 // If this slot in the value returned is undef, it doesn't matter what the
328 // call puts there, it'll be fine.
329 if (isa<UndefValue>(RetVal))
330 return true;
331
332 // Now do a similar search up through the graph to find where the value
333 // actually returned by the "tail call" comes from. In the simple case without
334 // a "returned" attribute, the search will be blocked immediately and the loop
335 // a Noop.
336 unsigned BitsProvided = UINT_MAX;
337 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
338
339 // There's no hope if we can't actually trace them to (the same part of!) the
340 // same value.
341 if (CallVal != RetVal || CallIndices != RetIndices)
342 return false;
343
344 // However, intervening truncates may have made the call non-tail. Make sure
345 // all the bits that are needed by the "ret" have been provided by the "tail
346 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
347 // extensions too.
348 if (BitsProvided < BitsRequired ||
349 (!AllowDifferingSizes && BitsProvided != BitsRequired))
350 return false;
351
352 return true;
353 }
354
355 /// For an aggregate type, determine whether a given index is within bounds or
356 /// not.
indexReallyValid(CompositeType * T,unsigned Idx)357 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
358 if (ArrayType *AT = dyn_cast<ArrayType>(T))
359 return Idx < AT->getNumElements();
360
361 return Idx < cast<StructType>(T)->getNumElements();
362 }
363
364 /// Move the given iterators to the next leaf type in depth first traversal.
365 ///
366 /// Performs a depth-first traversal of the type as specified by its arguments,
367 /// stopping at the next leaf node (which may be a legitimate scalar type or an
368 /// empty struct or array).
369 ///
370 /// @param SubTypes List of the partial components making up the type from
371 /// outermost to innermost non-empty aggregate. The element currently
372 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
373 ///
374 /// @param Path Set of extractvalue indices leading from the outermost type
375 /// (SubTypes[0]) to the leaf node currently represented.
376 ///
377 /// @returns true if a new type was found, false otherwise. Calling this
378 /// function again on a finished iterator will repeatedly return
379 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
380 /// aggregate or a non-aggregate
advanceToNextLeafType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)381 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
382 SmallVectorImpl<unsigned> &Path) {
383 // First march back up the tree until we can successfully increment one of the
384 // coordinates in Path.
385 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
386 Path.pop_back();
387 SubTypes.pop_back();
388 }
389
390 // If we reached the top, then the iterator is done.
391 if (Path.empty())
392 return false;
393
394 // We know there's *some* valid leaf now, so march back down the tree picking
395 // out the left-most element at each node.
396 ++Path.back();
397 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
398 while (DeeperType->isAggregateType()) {
399 CompositeType *CT = cast<CompositeType>(DeeperType);
400 if (!indexReallyValid(CT, 0))
401 return true;
402
403 SubTypes.push_back(CT);
404 Path.push_back(0);
405
406 DeeperType = CT->getTypeAtIndex(0U);
407 }
408
409 return true;
410 }
411
412 /// Find the first non-empty, scalar-like type in Next and setup the iterator
413 /// components.
414 ///
415 /// Assuming Next is an aggregate of some kind, this function will traverse the
416 /// tree from left to right (i.e. depth-first) looking for the first
417 /// non-aggregate type which will play a role in function return.
418 ///
419 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
420 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
421 /// i32 in that type.
firstRealType(Type * Next,SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)422 static bool firstRealType(Type *Next,
423 SmallVectorImpl<CompositeType *> &SubTypes,
424 SmallVectorImpl<unsigned> &Path) {
425 // First initialise the iterator components to the first "leaf" node
426 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
427 // despite nominally being an aggregate).
428 while (Next->isAggregateType() &&
429 indexReallyValid(cast<CompositeType>(Next), 0)) {
430 SubTypes.push_back(cast<CompositeType>(Next));
431 Path.push_back(0);
432 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
433 }
434
435 // If there's no Path now, Next was originally scalar already (or empty
436 // leaf). We're done.
437 if (Path.empty())
438 return true;
439
440 // Otherwise, use normal iteration to keep looking through the tree until we
441 // find a non-aggregate type.
442 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
443 if (!advanceToNextLeafType(SubTypes, Path))
444 return false;
445 }
446
447 return true;
448 }
449
450 /// Set the iterator data-structures to the next non-empty, non-aggregate
451 /// subtype.
nextRealType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)452 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
453 SmallVectorImpl<unsigned> &Path) {
454 do {
455 if (!advanceToNextLeafType(SubTypes, Path))
456 return false;
457
458 assert(!Path.empty() && "found a leaf but didn't set the path?");
459 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
460
461 return true;
462 }
463
464
465 /// Test if the given instruction is in a position to be optimized
466 /// with a tail-call. This roughly means that it's in a block with
467 /// a return and there's nothing that needs to be scheduled
468 /// between it and the return.
469 ///
470 /// This function only tests target-independent requirements.
isInTailCallPosition(ImmutableCallSite CS,const TargetMachine & TM)471 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
472 const Instruction *I = CS.getInstruction();
473 const BasicBlock *ExitBB = I->getParent();
474 const TerminatorInst *Term = ExitBB->getTerminator();
475 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
476
477 // The block must end in a return statement or unreachable.
478 //
479 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
480 // an unreachable, for now. The way tailcall optimization is currently
481 // implemented means it will add an epilogue followed by a jump. That is
482 // not profitable. Also, if the callee is a special function (e.g.
483 // longjmp on x86), it can end up causing miscompilation that has not
484 // been fully understood.
485 if (!Ret &&
486 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
487 return false;
488
489 // If I will have a chain, make sure no other instruction that will have a
490 // chain interposes between I and the return.
491 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
492 !isSafeToSpeculativelyExecute(I))
493 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
494 if (&*BBI == I)
495 break;
496 // Debug info intrinsics do not get in the way of tail call optimization.
497 if (isa<DbgInfoIntrinsic>(BBI))
498 continue;
499 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
500 !isSafeToSpeculativelyExecute(&*BBI))
501 return false;
502 }
503
504 const Function *F = ExitBB->getParent();
505 return returnTypeIsEligibleForTailCall(
506 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
507 }
508
attributesPermitTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI,bool * AllowDifferingSizes)509 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
510 const ReturnInst *Ret,
511 const TargetLoweringBase &TLI,
512 bool *AllowDifferingSizes) {
513 // ADS may be null, so don't write to it directly.
514 bool DummyADS;
515 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
516 ADS = true;
517
518 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
519 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
520 AttributeList::ReturnIndex);
521
522 // Noalias is completely benign as far as calling convention goes, it
523 // shouldn't affect whether the call is a tail call.
524 CallerAttrs.removeAttribute(Attribute::NoAlias);
525 CalleeAttrs.removeAttribute(Attribute::NoAlias);
526
527 if (CallerAttrs.contains(Attribute::ZExt)) {
528 if (!CalleeAttrs.contains(Attribute::ZExt))
529 return false;
530
531 ADS = false;
532 CallerAttrs.removeAttribute(Attribute::ZExt);
533 CalleeAttrs.removeAttribute(Attribute::ZExt);
534 } else if (CallerAttrs.contains(Attribute::SExt)) {
535 if (!CalleeAttrs.contains(Attribute::SExt))
536 return false;
537
538 ADS = false;
539 CallerAttrs.removeAttribute(Attribute::SExt);
540 CalleeAttrs.removeAttribute(Attribute::SExt);
541 }
542
543 // If they're still different, there's some facet we don't understand
544 // (currently only "inreg", but in future who knows). It may be OK but the
545 // only safe option is to reject the tail call.
546 return CallerAttrs == CalleeAttrs;
547 }
548
returnTypeIsEligibleForTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI)549 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
550 const Instruction *I,
551 const ReturnInst *Ret,
552 const TargetLoweringBase &TLI) {
553 // If the block ends with a void return or unreachable, it doesn't matter
554 // what the call's return type is.
555 if (!Ret || Ret->getNumOperands() == 0) return true;
556
557 // If the return value is undef, it doesn't matter what the call's
558 // return type is.
559 if (isa<UndefValue>(Ret->getOperand(0))) return true;
560
561 // Make sure the attributes attached to each return are compatible.
562 bool AllowDifferingSizes;
563 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
564 return false;
565
566 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
567 // Intrinsic like llvm.memcpy has no return value, but the expanded
568 // libcall may or may not have return value. On most platforms, it
569 // will be expanded as memcpy in libc, which returns the first
570 // argument. On other platforms like arm-none-eabi, memcpy may be
571 // expanded as library call without return value, like __aeabi_memcpy.
572 const CallInst *Call = cast<CallInst>(I);
573 if (Function *F = Call->getCalledFunction()) {
574 Intrinsic::ID IID = F->getIntrinsicID();
575 if (((IID == Intrinsic::memcpy &&
576 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
577 (IID == Intrinsic::memmove &&
578 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
579 (IID == Intrinsic::memset &&
580 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
581 RetVal == Call->getArgOperand(0))
582 return true;
583 }
584
585 SmallVector<unsigned, 4> RetPath, CallPath;
586 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
587
588 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
589 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
590
591 // Nothing's actually returned, it doesn't matter what the callee put there
592 // it's a valid tail call.
593 if (RetEmpty)
594 return true;
595
596 // Iterate pairwise through each of the value types making up the tail call
597 // and the corresponding return. For each one we want to know whether it's
598 // essentially going directly from the tail call to the ret, via operations
599 // that end up not generating any code.
600 //
601 // We allow a certain amount of covariance here. For example it's permitted
602 // for the tail call to define more bits than the ret actually cares about
603 // (e.g. via a truncate).
604 do {
605 if (CallEmpty) {
606 // We've exhausted the values produced by the tail call instruction, the
607 // rest are essentially undef. The type doesn't really matter, but we need
608 // *something*.
609 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
610 CallVal = UndefValue::get(SlotType);
611 }
612
613 // The manipulations performed when we're looking through an insertvalue or
614 // an extractvalue would happen at the front of the RetPath list, so since
615 // we have to copy it anyway it's more efficient to create a reversed copy.
616 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
617 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
618
619 // Finally, we can check whether the value produced by the tail call at this
620 // index is compatible with the value we return.
621 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
622 AllowDifferingSizes, TLI,
623 F->getParent()->getDataLayout()))
624 return false;
625
626 CallEmpty = !nextRealType(CallSubTypes, CallPath);
627 } while(nextRealType(RetSubTypes, RetPath));
628
629 return true;
630 }
631
collectEHScopeMembers(DenseMap<const MachineBasicBlock *,int> & EHScopeMembership,int EHScope,const MachineBasicBlock * MBB)632 static void collectEHScopeMembers(
633 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
634 const MachineBasicBlock *MBB) {
635 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
636 while (!Worklist.empty()) {
637 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
638 // Don't follow blocks which start new scopes.
639 if (Visiting->isEHPad() && Visiting != MBB)
640 continue;
641
642 // Add this MBB to our scope.
643 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
644
645 // Don't revisit blocks.
646 if (!P.second) {
647 assert(P.first->second == EHScope && "MBB is part of two scopes!");
648 continue;
649 }
650
651 // Returns are boundaries where scope transfer can occur, don't follow
652 // successors.
653 if (Visiting->isReturnBlock())
654 continue;
655
656 for (const MachineBasicBlock *Succ : Visiting->successors())
657 Worklist.push_back(Succ);
658 }
659 }
660
661 DenseMap<const MachineBasicBlock *, int>
getEHScopeMembership(const MachineFunction & MF)662 llvm::getEHScopeMembership(const MachineFunction &MF) {
663 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
664
665 // We don't have anything to do if there aren't any EH pads.
666 if (!MF.hasEHScopes())
667 return EHScopeMembership;
668
669 int EntryBBNumber = MF.front().getNumber();
670 bool IsSEH = isAsynchronousEHPersonality(
671 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
672
673 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
674 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
675 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
676 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
677 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
678 for (const MachineBasicBlock &MBB : MF) {
679 if (MBB.isEHScopeEntry()) {
680 EHScopeBlocks.push_back(&MBB);
681 } else if (IsSEH && MBB.isEHPad()) {
682 SEHCatchPads.push_back(&MBB);
683 } else if (MBB.pred_empty()) {
684 UnreachableBlocks.push_back(&MBB);
685 }
686
687 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
688
689 // CatchPads are not scopes for SEH so do not consider CatchRet to
690 // transfer control to another scope.
691 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
692 continue;
693
694 // FIXME: SEH CatchPads are not necessarily in the parent function:
695 // they could be inside a finally block.
696 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
697 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
698 CatchRetSuccessors.push_back(
699 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
700 }
701
702 // We don't have anything to do if there aren't any EH pads.
703 if (EHScopeBlocks.empty())
704 return EHScopeMembership;
705
706 // Identify all the basic blocks reachable from the function entry.
707 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
708 // All blocks not part of a scope are in the parent function.
709 for (const MachineBasicBlock *MBB : UnreachableBlocks)
710 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
711 // Next, identify all the blocks inside the scopes.
712 for (const MachineBasicBlock *MBB : EHScopeBlocks)
713 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
714 // SEH CatchPads aren't really scopes, handle them separately.
715 for (const MachineBasicBlock *MBB : SEHCatchPads)
716 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
717 // Finally, identify all the targets of a catchret.
718 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
719 CatchRetSuccessors)
720 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
721 CatchRetPair.first);
722 return EHScopeMembership;
723 }
724