1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineModuleInfo.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetSubtargetInfo.h"
30 #include "llvm/Transforms/Utils/GlobalStatus.h"
31
32 using namespace llvm;
33
34 /// Compute the linearized index of a member in a nested aggregate/struct/array
35 /// by recursing and accumulating CurIndex as long as there are indices in the
36 /// index list.
ComputeLinearIndex(Type * Ty,const unsigned * Indices,const unsigned * IndicesEnd,unsigned CurIndex)37 unsigned llvm::ComputeLinearIndex(Type *Ty,
38 const unsigned *Indices,
39 const unsigned *IndicesEnd,
40 unsigned CurIndex) {
41 // Base case: We're done.
42 if (Indices && Indices == IndicesEnd)
43 return CurIndex;
44
45 // Given a struct type, recursively traverse the elements.
46 if (StructType *STy = dyn_cast<StructType>(Ty)) {
47 for (StructType::element_iterator EB = STy->element_begin(),
48 EI = EB,
49 EE = STy->element_end();
50 EI != EE; ++EI) {
51 if (Indices && *Indices == unsigned(EI - EB))
52 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
53 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
54 }
55 assert(!Indices && "Unexpected out of bound");
56 return CurIndex;
57 }
58 // Given an array type, recursively traverse the elements.
59 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
60 Type *EltTy = ATy->getElementType();
61 unsigned NumElts = ATy->getNumElements();
62 // Compute the Linear offset when jumping one element of the array
63 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
64 if (Indices) {
65 assert(*Indices < NumElts && "Unexpected out of bound");
66 // If the indice is inside the array, compute the index to the requested
67 // elt and recurse inside the element with the end of the indices list
68 CurIndex += EltLinearOffset* *Indices;
69 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
70 }
71 CurIndex += EltLinearOffset*NumElts;
72 return CurIndex;
73 }
74 // We haven't found the type we're looking for, so keep searching.
75 return CurIndex + 1;
76 }
77
78 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
79 /// EVTs that represent all the individual underlying
80 /// non-aggregate types that comprise it.
81 ///
82 /// If Offsets is non-null, it points to a vector to be filled in
83 /// with the in-memory offsets of each of the individual values.
84 ///
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)85 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
86 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
87 SmallVectorImpl<uint64_t> *Offsets,
88 uint64_t StartingOffset) {
89 // Given a struct type, recursively traverse the elements.
90 if (StructType *STy = dyn_cast<StructType>(Ty)) {
91 const StructLayout *SL = DL.getStructLayout(STy);
92 for (StructType::element_iterator EB = STy->element_begin(),
93 EI = EB,
94 EE = STy->element_end();
95 EI != EE; ++EI)
96 ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
97 StartingOffset + SL->getElementOffset(EI - EB));
98 return;
99 }
100 // Given an array type, recursively traverse the elements.
101 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
102 Type *EltTy = ATy->getElementType();
103 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
104 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
105 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
106 StartingOffset + i * EltSize);
107 return;
108 }
109 // Interpret void as zero return values.
110 if (Ty->isVoidTy())
111 return;
112 // Base case: we can get an EVT for this LLVM IR type.
113 ValueVTs.push_back(TLI.getValueType(DL, Ty));
114 if (Offsets)
115 Offsets->push_back(StartingOffset);
116 }
117
118 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
ExtractTypeInfo(Value * V)119 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
120 V = V->stripPointerCasts();
121 GlobalValue *GV = dyn_cast<GlobalValue>(V);
122 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
123
124 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
125 assert(Var->hasInitializer() &&
126 "The EH catch-all value must have an initializer");
127 Value *Init = Var->getInitializer();
128 GV = dyn_cast<GlobalValue>(Init);
129 if (!GV) V = cast<ConstantPointerNull>(Init);
130 }
131
132 assert((GV || isa<ConstantPointerNull>(V)) &&
133 "TypeInfo must be a global variable or NULL");
134 return GV;
135 }
136
137 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
138 /// processed uses a memory 'm' constraint.
139 bool
hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector & CInfos,const TargetLowering & TLI)140 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
141 const TargetLowering &TLI) {
142 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
143 InlineAsm::ConstraintInfo &CI = CInfos[i];
144 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
145 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
146 if (CType == TargetLowering::C_Memory)
147 return true;
148 }
149
150 // Indirect operand accesses access memory.
151 if (CI.isIndirect)
152 return true;
153 }
154
155 return false;
156 }
157
158 /// getFCmpCondCode - Return the ISD condition code corresponding to
159 /// the given LLVM IR floating-point condition code. This includes
160 /// consideration of global floating-point math flags.
161 ///
getFCmpCondCode(FCmpInst::Predicate Pred)162 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
163 switch (Pred) {
164 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
165 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
166 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
167 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
168 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
169 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
170 case FCmpInst::FCMP_ONE: return ISD::SETONE;
171 case FCmpInst::FCMP_ORD: return ISD::SETO;
172 case FCmpInst::FCMP_UNO: return ISD::SETUO;
173 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
174 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
175 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
176 case FCmpInst::FCMP_ULT: return ISD::SETULT;
177 case FCmpInst::FCMP_ULE: return ISD::SETULE;
178 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
179 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
180 default: llvm_unreachable("Invalid FCmp predicate opcode!");
181 }
182 }
183
getFCmpCodeWithoutNaN(ISD::CondCode CC)184 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
185 switch (CC) {
186 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
187 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
188 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
189 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
190 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
191 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
192 default: return CC;
193 }
194 }
195
196 /// getICmpCondCode - Return the ISD condition code corresponding to
197 /// the given LLVM IR integer condition code.
198 ///
getICmpCondCode(ICmpInst::Predicate Pred)199 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
200 switch (Pred) {
201 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
202 case ICmpInst::ICMP_NE: return ISD::SETNE;
203 case ICmpInst::ICMP_SLE: return ISD::SETLE;
204 case ICmpInst::ICMP_ULE: return ISD::SETULE;
205 case ICmpInst::ICMP_SGE: return ISD::SETGE;
206 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
207 case ICmpInst::ICMP_SLT: return ISD::SETLT;
208 case ICmpInst::ICMP_ULT: return ISD::SETULT;
209 case ICmpInst::ICMP_SGT: return ISD::SETGT;
210 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
211 default:
212 llvm_unreachable("Invalid ICmp predicate opcode!");
213 }
214 }
215
isNoopBitcast(Type * T1,Type * T2,const TargetLoweringBase & TLI)216 static bool isNoopBitcast(Type *T1, Type *T2,
217 const TargetLoweringBase& TLI) {
218 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
219 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
220 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
221 }
222
223 /// Look through operations that will be free to find the earliest source of
224 /// this value.
225 ///
226 /// @param ValLoc If V has aggegate type, we will be interested in a particular
227 /// scalar component. This records its address; the reverse of this list gives a
228 /// sequence of indices appropriate for an extractvalue to locate the important
229 /// value. This value is updated during the function and on exit will indicate
230 /// similar information for the Value returned.
231 ///
232 /// @param DataBits If this function looks through truncate instructions, this
233 /// will record the smallest size attained.
getNoopInput(const Value * V,SmallVectorImpl<unsigned> & ValLoc,unsigned & DataBits,const TargetLoweringBase & TLI,const DataLayout & DL)234 static const Value *getNoopInput(const Value *V,
235 SmallVectorImpl<unsigned> &ValLoc,
236 unsigned &DataBits,
237 const TargetLoweringBase &TLI,
238 const DataLayout &DL) {
239 while (true) {
240 // Try to look through V1; if V1 is not an instruction, it can't be looked
241 // through.
242 const Instruction *I = dyn_cast<Instruction>(V);
243 if (!I || I->getNumOperands() == 0) return V;
244 const Value *NoopInput = nullptr;
245
246 Value *Op = I->getOperand(0);
247 if (isa<BitCastInst>(I)) {
248 // Look through truly no-op bitcasts.
249 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
250 NoopInput = Op;
251 } else if (isa<GetElementPtrInst>(I)) {
252 // Look through getelementptr
253 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
254 NoopInput = Op;
255 } else if (isa<IntToPtrInst>(I)) {
256 // Look through inttoptr.
257 // Make sure this isn't a truncating or extending cast. We could
258 // support this eventually, but don't bother for now.
259 if (!isa<VectorType>(I->getType()) &&
260 DL.getPointerSizeInBits() ==
261 cast<IntegerType>(Op->getType())->getBitWidth())
262 NoopInput = Op;
263 } else if (isa<PtrToIntInst>(I)) {
264 // Look through ptrtoint.
265 // Make sure this isn't a truncating or extending cast. We could
266 // support this eventually, but don't bother for now.
267 if (!isa<VectorType>(I->getType()) &&
268 DL.getPointerSizeInBits() ==
269 cast<IntegerType>(I->getType())->getBitWidth())
270 NoopInput = Op;
271 } else if (isa<TruncInst>(I) &&
272 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
273 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
274 NoopInput = Op;
275 } else if (isa<CallInst>(I)) {
276 // Look through call (skipping callee)
277 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
278 i != e; ++i) {
279 unsigned attrInd = i - I->op_begin() + 1;
280 if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
281 isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
282 NoopInput = *i;
283 break;
284 }
285 }
286 } else if (isa<InvokeInst>(I)) {
287 // Look through invoke (skipping BB, BB, Callee)
288 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
289 i != e; ++i) {
290 unsigned attrInd = i - I->op_begin() + 1;
291 if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
292 isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
293 NoopInput = *i;
294 break;
295 }
296 }
297 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
298 // Value may come from either the aggregate or the scalar
299 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
300 if (ValLoc.size() >= InsertLoc.size() &&
301 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
302 // The type being inserted is a nested sub-type of the aggregate; we
303 // have to remove those initial indices to get the location we're
304 // interested in for the operand.
305 ValLoc.resize(ValLoc.size() - InsertLoc.size());
306 NoopInput = IVI->getInsertedValueOperand();
307 } else {
308 // The struct we're inserting into has the value we're interested in, no
309 // change of address.
310 NoopInput = Op;
311 }
312 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
313 // The part we're interested in will inevitably be some sub-section of the
314 // previous aggregate. Combine the two paths to obtain the true address of
315 // our element.
316 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
317 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
318 NoopInput = Op;
319 }
320 // Terminate if we couldn't find anything to look through.
321 if (!NoopInput)
322 return V;
323
324 V = NoopInput;
325 }
326 }
327
328 /// Return true if this scalar return value only has bits discarded on its path
329 /// from the "tail call" to the "ret". This includes the obvious noop
330 /// instructions handled by getNoopInput above as well as free truncations (or
331 /// extensions prior to the call).
slotOnlyDiscardsData(const Value * RetVal,const Value * CallVal,SmallVectorImpl<unsigned> & RetIndices,SmallVectorImpl<unsigned> & CallIndices,bool AllowDifferingSizes,const TargetLoweringBase & TLI,const DataLayout & DL)332 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
333 SmallVectorImpl<unsigned> &RetIndices,
334 SmallVectorImpl<unsigned> &CallIndices,
335 bool AllowDifferingSizes,
336 const TargetLoweringBase &TLI,
337 const DataLayout &DL) {
338
339 // Trace the sub-value needed by the return value as far back up the graph as
340 // possible, in the hope that it will intersect with the value produced by the
341 // call. In the simple case with no "returned" attribute, the hope is actually
342 // that we end up back at the tail call instruction itself.
343 unsigned BitsRequired = UINT_MAX;
344 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
345
346 // If this slot in the value returned is undef, it doesn't matter what the
347 // call puts there, it'll be fine.
348 if (isa<UndefValue>(RetVal))
349 return true;
350
351 // Now do a similar search up through the graph to find where the value
352 // actually returned by the "tail call" comes from. In the simple case without
353 // a "returned" attribute, the search will be blocked immediately and the loop
354 // a Noop.
355 unsigned BitsProvided = UINT_MAX;
356 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
357
358 // There's no hope if we can't actually trace them to (the same part of!) the
359 // same value.
360 if (CallVal != RetVal || CallIndices != RetIndices)
361 return false;
362
363 // However, intervening truncates may have made the call non-tail. Make sure
364 // all the bits that are needed by the "ret" have been provided by the "tail
365 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
366 // extensions too.
367 if (BitsProvided < BitsRequired ||
368 (!AllowDifferingSizes && BitsProvided != BitsRequired))
369 return false;
370
371 return true;
372 }
373
374 /// For an aggregate type, determine whether a given index is within bounds or
375 /// not.
indexReallyValid(CompositeType * T,unsigned Idx)376 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
377 if (ArrayType *AT = dyn_cast<ArrayType>(T))
378 return Idx < AT->getNumElements();
379
380 return Idx < cast<StructType>(T)->getNumElements();
381 }
382
383 /// Move the given iterators to the next leaf type in depth first traversal.
384 ///
385 /// Performs a depth-first traversal of the type as specified by its arguments,
386 /// stopping at the next leaf node (which may be a legitimate scalar type or an
387 /// empty struct or array).
388 ///
389 /// @param SubTypes List of the partial components making up the type from
390 /// outermost to innermost non-empty aggregate. The element currently
391 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
392 ///
393 /// @param Path Set of extractvalue indices leading from the outermost type
394 /// (SubTypes[0]) to the leaf node currently represented.
395 ///
396 /// @returns true if a new type was found, false otherwise. Calling this
397 /// function again on a finished iterator will repeatedly return
398 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
399 /// aggregate or a non-aggregate
advanceToNextLeafType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)400 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
401 SmallVectorImpl<unsigned> &Path) {
402 // First march back up the tree until we can successfully increment one of the
403 // coordinates in Path.
404 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
405 Path.pop_back();
406 SubTypes.pop_back();
407 }
408
409 // If we reached the top, then the iterator is done.
410 if (Path.empty())
411 return false;
412
413 // We know there's *some* valid leaf now, so march back down the tree picking
414 // out the left-most element at each node.
415 ++Path.back();
416 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
417 while (DeeperType->isAggregateType()) {
418 CompositeType *CT = cast<CompositeType>(DeeperType);
419 if (!indexReallyValid(CT, 0))
420 return true;
421
422 SubTypes.push_back(CT);
423 Path.push_back(0);
424
425 DeeperType = CT->getTypeAtIndex(0U);
426 }
427
428 return true;
429 }
430
431 /// Find the first non-empty, scalar-like type in Next and setup the iterator
432 /// components.
433 ///
434 /// Assuming Next is an aggregate of some kind, this function will traverse the
435 /// tree from left to right (i.e. depth-first) looking for the first
436 /// non-aggregate type which will play a role in function return.
437 ///
438 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
439 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
440 /// i32 in that type.
firstRealType(Type * Next,SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)441 static bool firstRealType(Type *Next,
442 SmallVectorImpl<CompositeType *> &SubTypes,
443 SmallVectorImpl<unsigned> &Path) {
444 // First initialise the iterator components to the first "leaf" node
445 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
446 // despite nominally being an aggregate).
447 while (Next->isAggregateType() &&
448 indexReallyValid(cast<CompositeType>(Next), 0)) {
449 SubTypes.push_back(cast<CompositeType>(Next));
450 Path.push_back(0);
451 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
452 }
453
454 // If there's no Path now, Next was originally scalar already (or empty
455 // leaf). We're done.
456 if (Path.empty())
457 return true;
458
459 // Otherwise, use normal iteration to keep looking through the tree until we
460 // find a non-aggregate type.
461 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
462 if (!advanceToNextLeafType(SubTypes, Path))
463 return false;
464 }
465
466 return true;
467 }
468
469 /// Set the iterator data-structures to the next non-empty, non-aggregate
470 /// subtype.
nextRealType(SmallVectorImpl<CompositeType * > & SubTypes,SmallVectorImpl<unsigned> & Path)471 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
472 SmallVectorImpl<unsigned> &Path) {
473 do {
474 if (!advanceToNextLeafType(SubTypes, Path))
475 return false;
476
477 assert(!Path.empty() && "found a leaf but didn't set the path?");
478 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
479
480 return true;
481 }
482
483
484 /// Test if the given instruction is in a position to be optimized
485 /// with a tail-call. This roughly means that it's in a block with
486 /// a return and there's nothing that needs to be scheduled
487 /// between it and the return.
488 ///
489 /// This function only tests target-independent requirements.
isInTailCallPosition(ImmutableCallSite CS,const TargetMachine & TM)490 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
491 const Instruction *I = CS.getInstruction();
492 const BasicBlock *ExitBB = I->getParent();
493 const TerminatorInst *Term = ExitBB->getTerminator();
494 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
495
496 // The block must end in a return statement or unreachable.
497 //
498 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
499 // an unreachable, for now. The way tailcall optimization is currently
500 // implemented means it will add an epilogue followed by a jump. That is
501 // not profitable. Also, if the callee is a special function (e.g.
502 // longjmp on x86), it can end up causing miscompilation that has not
503 // been fully understood.
504 if (!Ret &&
505 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
506 return false;
507
508 // If I will have a chain, make sure no other instruction that will have a
509 // chain interposes between I and the return.
510 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
511 !isSafeToSpeculativelyExecute(I))
512 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
513 if (&*BBI == I)
514 break;
515 // Debug info intrinsics do not get in the way of tail call optimization.
516 if (isa<DbgInfoIntrinsic>(BBI))
517 continue;
518 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
519 !isSafeToSpeculativelyExecute(&*BBI))
520 return false;
521 }
522
523 const Function *F = ExitBB->getParent();
524 return returnTypeIsEligibleForTailCall(
525 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
526 }
527
returnTypeIsEligibleForTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI)528 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
529 const Instruction *I,
530 const ReturnInst *Ret,
531 const TargetLoweringBase &TLI) {
532 // If the block ends with a void return or unreachable, it doesn't matter
533 // what the call's return type is.
534 if (!Ret || Ret->getNumOperands() == 0) return true;
535
536 // If the return value is undef, it doesn't matter what the call's
537 // return type is.
538 if (isa<UndefValue>(Ret->getOperand(0))) return true;
539
540 // Make sure the attributes attached to each return are compatible.
541 AttrBuilder CallerAttrs(F->getAttributes(),
542 AttributeSet::ReturnIndex);
543 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
544 AttributeSet::ReturnIndex);
545
546 // Noalias is completely benign as far as calling convention goes, it
547 // shouldn't affect whether the call is a tail call.
548 CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
549 CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
550
551 bool AllowDifferingSizes = true;
552 if (CallerAttrs.contains(Attribute::ZExt)) {
553 if (!CalleeAttrs.contains(Attribute::ZExt))
554 return false;
555
556 AllowDifferingSizes = false;
557 CallerAttrs.removeAttribute(Attribute::ZExt);
558 CalleeAttrs.removeAttribute(Attribute::ZExt);
559 } else if (CallerAttrs.contains(Attribute::SExt)) {
560 if (!CalleeAttrs.contains(Attribute::SExt))
561 return false;
562
563 AllowDifferingSizes = false;
564 CallerAttrs.removeAttribute(Attribute::SExt);
565 CalleeAttrs.removeAttribute(Attribute::SExt);
566 }
567
568 // If they're still different, there's some facet we don't understand
569 // (currently only "inreg", but in future who knows). It may be OK but the
570 // only safe option is to reject the tail call.
571 if (CallerAttrs != CalleeAttrs)
572 return false;
573
574 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
575 SmallVector<unsigned, 4> RetPath, CallPath;
576 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
577
578 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
579 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
580
581 // Nothing's actually returned, it doesn't matter what the callee put there
582 // it's a valid tail call.
583 if (RetEmpty)
584 return true;
585
586 // Iterate pairwise through each of the value types making up the tail call
587 // and the corresponding return. For each one we want to know whether it's
588 // essentially going directly from the tail call to the ret, via operations
589 // that end up not generating any code.
590 //
591 // We allow a certain amount of covariance here. For example it's permitted
592 // for the tail call to define more bits than the ret actually cares about
593 // (e.g. via a truncate).
594 do {
595 if (CallEmpty) {
596 // We've exhausted the values produced by the tail call instruction, the
597 // rest are essentially undef. The type doesn't really matter, but we need
598 // *something*.
599 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
600 CallVal = UndefValue::get(SlotType);
601 }
602
603 // The manipulations performed when we're looking through an insertvalue or
604 // an extractvalue would happen at the front of the RetPath list, so since
605 // we have to copy it anyway it's more efficient to create a reversed copy.
606 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
607 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
608
609 // Finally, we can check whether the value produced by the tail call at this
610 // index is compatible with the value we return.
611 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
612 AllowDifferingSizes, TLI,
613 F->getParent()->getDataLayout()))
614 return false;
615
616 CallEmpty = !nextRealType(CallSubTypes, CallPath);
617 } while(nextRealType(RetSubTypes, RetPath));
618
619 return true;
620 }
621
canBeOmittedFromSymbolTable(const GlobalValue * GV)622 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
623 if (!GV->hasLinkOnceODRLinkage())
624 return false;
625
626 // We assume that anyone who sets global unnamed_addr on a non-constant knows
627 // what they're doing.
628 if (GV->hasGlobalUnnamedAddr())
629 return true;
630
631 // If it is a non constant variable, it needs to be uniqued across shared
632 // objects.
633 if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
634 if (!Var->isConstant())
635 return false;
636 }
637
638 return GV->hasAtLeastLocalUnnamedAddr();
639 }
640
collectFuncletMembers(DenseMap<const MachineBasicBlock *,int> & FuncletMembership,int Funclet,const MachineBasicBlock * MBB)641 static void collectFuncletMembers(
642 DenseMap<const MachineBasicBlock *, int> &FuncletMembership, int Funclet,
643 const MachineBasicBlock *MBB) {
644 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
645 while (!Worklist.empty()) {
646 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
647 // Don't follow blocks which start new funclets.
648 if (Visiting->isEHPad() && Visiting != MBB)
649 continue;
650
651 // Add this MBB to our funclet.
652 auto P = FuncletMembership.insert(std::make_pair(Visiting, Funclet));
653
654 // Don't revisit blocks.
655 if (!P.second) {
656 assert(P.first->second == Funclet && "MBB is part of two funclets!");
657 continue;
658 }
659
660 // Returns are boundaries where funclet transfer can occur, don't follow
661 // successors.
662 if (Visiting->isReturnBlock())
663 continue;
664
665 for (const MachineBasicBlock *Succ : Visiting->successors())
666 Worklist.push_back(Succ);
667 }
668 }
669
670 DenseMap<const MachineBasicBlock *, int>
getFuncletMembership(const MachineFunction & MF)671 llvm::getFuncletMembership(const MachineFunction &MF) {
672 DenseMap<const MachineBasicBlock *, int> FuncletMembership;
673
674 // We don't have anything to do if there aren't any EH pads.
675 if (!MF.getMMI().hasEHFunclets())
676 return FuncletMembership;
677
678 int EntryBBNumber = MF.front().getNumber();
679 bool IsSEH = isAsynchronousEHPersonality(
680 classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
681
682 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
683 SmallVector<const MachineBasicBlock *, 16> FuncletBlocks;
684 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
685 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
686 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
687 for (const MachineBasicBlock &MBB : MF) {
688 if (MBB.isEHFuncletEntry()) {
689 FuncletBlocks.push_back(&MBB);
690 } else if (IsSEH && MBB.isEHPad()) {
691 SEHCatchPads.push_back(&MBB);
692 } else if (MBB.pred_empty()) {
693 UnreachableBlocks.push_back(&MBB);
694 }
695
696 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
697 // CatchPads are not funclets for SEH so do not consider CatchRet to
698 // transfer control to another funclet.
699 if (MBBI->getOpcode() != TII->getCatchReturnOpcode())
700 continue;
701
702 // FIXME: SEH CatchPads are not necessarily in the parent function:
703 // they could be inside a finally block.
704 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
705 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
706 CatchRetSuccessors.push_back(
707 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
708 }
709
710 // We don't have anything to do if there aren't any EH pads.
711 if (FuncletBlocks.empty())
712 return FuncletMembership;
713
714 // Identify all the basic blocks reachable from the function entry.
715 collectFuncletMembers(FuncletMembership, EntryBBNumber, &MF.front());
716 // All blocks not part of a funclet are in the parent function.
717 for (const MachineBasicBlock *MBB : UnreachableBlocks)
718 collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
719 // Next, identify all the blocks inside the funclets.
720 for (const MachineBasicBlock *MBB : FuncletBlocks)
721 collectFuncletMembers(FuncletMembership, MBB->getNumber(), MBB);
722 // SEH CatchPads aren't really funclets, handle them separately.
723 for (const MachineBasicBlock *MBB : SEHCatchPads)
724 collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
725 // Finally, identify all the targets of a catchret.
726 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
727 CatchRetSuccessors)
728 collectFuncletMembers(FuncletMembership, CatchRetPair.second,
729 CatchRetPair.first);
730 return FuncletMembership;
731 }
732