• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines several CodeGen-specific LLVM IR analysis utilties.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/Function.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Module.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Target/TargetOptions.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 using namespace llvm;
29 
30 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
31 /// of insertvalue or extractvalue indices that identify a member, return
32 /// the linearized index of the start of the member.
33 ///
ComputeLinearIndex(Type * Ty,const unsigned * Indices,const unsigned * IndicesEnd,unsigned CurIndex)34 unsigned llvm::ComputeLinearIndex(Type *Ty,
35                                   const unsigned *Indices,
36                                   const unsigned *IndicesEnd,
37                                   unsigned CurIndex) {
38   // Base case: We're done.
39   if (Indices && Indices == IndicesEnd)
40     return CurIndex;
41 
42   // Given a struct type, recursively traverse the elements.
43   if (StructType *STy = dyn_cast<StructType>(Ty)) {
44     for (StructType::element_iterator EB = STy->element_begin(),
45                                       EI = EB,
46                                       EE = STy->element_end();
47         EI != EE; ++EI) {
48       if (Indices && *Indices == unsigned(EI - EB))
49         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
50       CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
51     }
52     return CurIndex;
53   }
54   // Given an array type, recursively traverse the elements.
55   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
56     Type *EltTy = ATy->getElementType();
57     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
58       if (Indices && *Indices == i)
59         return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
60       CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
61     }
62     return CurIndex;
63   }
64   // We haven't found the type we're looking for, so keep searching.
65   return CurIndex + 1;
66 }
67 
68 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
69 /// EVTs that represent all the individual underlying
70 /// non-aggregate types that comprise it.
71 ///
72 /// If Offsets is non-null, it points to a vector to be filled in
73 /// with the in-memory offsets of each of the individual values.
74 ///
ComputeValueVTs(const TargetLowering & TLI,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)75 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
76                            SmallVectorImpl<EVT> &ValueVTs,
77                            SmallVectorImpl<uint64_t> *Offsets,
78                            uint64_t StartingOffset) {
79   // Given a struct type, recursively traverse the elements.
80   if (StructType *STy = dyn_cast<StructType>(Ty)) {
81     const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
82     for (StructType::element_iterator EB = STy->element_begin(),
83                                       EI = EB,
84                                       EE = STy->element_end();
85          EI != EE; ++EI)
86       ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
87                       StartingOffset + SL->getElementOffset(EI - EB));
88     return;
89   }
90   // Given an array type, recursively traverse the elements.
91   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
92     Type *EltTy = ATy->getElementType();
93     uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
94     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
95       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
96                       StartingOffset + i * EltSize);
97     return;
98   }
99   // Interpret void as zero return values.
100   if (Ty->isVoidTy())
101     return;
102   // Base case: we can get an EVT for this LLVM IR type.
103   ValueVTs.push_back(TLI.getValueType(Ty));
104   if (Offsets)
105     Offsets->push_back(StartingOffset);
106 }
107 
108 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
ExtractTypeInfo(Value * V)109 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
110   V = V->stripPointerCasts();
111   GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
112 
113   if (GV && GV->getName() == "llvm.eh.catch.all.value") {
114     assert(GV->hasInitializer() &&
115            "The EH catch-all value must have an initializer");
116     Value *Init = GV->getInitializer();
117     GV = dyn_cast<GlobalVariable>(Init);
118     if (!GV) V = cast<ConstantPointerNull>(Init);
119   }
120 
121   assert((GV || isa<ConstantPointerNull>(V)) &&
122          "TypeInfo must be a global variable or NULL");
123   return GV;
124 }
125 
126 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
127 /// processed uses a memory 'm' constraint.
128 bool
hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector & CInfos,const TargetLowering & TLI)129 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
130                                 const TargetLowering &TLI) {
131   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
132     InlineAsm::ConstraintInfo &CI = CInfos[i];
133     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
134       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
135       if (CType == TargetLowering::C_Memory)
136         return true;
137     }
138 
139     // Indirect operand accesses access memory.
140     if (CI.isIndirect)
141       return true;
142   }
143 
144   return false;
145 }
146 
147 /// getFCmpCondCode - Return the ISD condition code corresponding to
148 /// the given LLVM IR floating-point condition code.  This includes
149 /// consideration of global floating-point math flags.
150 ///
getFCmpCondCode(FCmpInst::Predicate Pred)151 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
152   ISD::CondCode FPC, FOC;
153   switch (Pred) {
154   case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
155   case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
156   case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
157   case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
158   case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
159   case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
160   case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
161   case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
162   case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
163   case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
164   case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
165   case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
166   case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
167   case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
168   case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
169   case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
170   default:
171     llvm_unreachable("Invalid FCmp predicate opcode!");
172     FOC = FPC = ISD::SETFALSE;
173     break;
174   }
175   if (NoNaNsFPMath)
176     return FOC;
177   else
178     return FPC;
179 }
180 
181 /// getICmpCondCode - Return the ISD condition code corresponding to
182 /// the given LLVM IR integer condition code.
183 ///
getICmpCondCode(ICmpInst::Predicate Pred)184 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
185   switch (Pred) {
186   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
187   case ICmpInst::ICMP_NE:  return ISD::SETNE;
188   case ICmpInst::ICMP_SLE: return ISD::SETLE;
189   case ICmpInst::ICMP_ULE: return ISD::SETULE;
190   case ICmpInst::ICMP_SGE: return ISD::SETGE;
191   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
192   case ICmpInst::ICMP_SLT: return ISD::SETLT;
193   case ICmpInst::ICMP_ULT: return ISD::SETULT;
194   case ICmpInst::ICMP_SGT: return ISD::SETGT;
195   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
196   default:
197     llvm_unreachable("Invalid ICmp predicate opcode!");
198     return ISD::SETNE;
199   }
200 }
201 
202 /// Test if the given instruction is in a position to be optimized
203 /// with a tail-call. This roughly means that it's in a block with
204 /// a return and there's nothing that needs to be scheduled
205 /// between it and the return.
206 ///
207 /// This function only tests target-independent requirements.
isInTailCallPosition(ImmutableCallSite CS,Attributes CalleeRetAttr,const TargetLowering & TLI)208 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
209                                 const TargetLowering &TLI) {
210   const Instruction *I = CS.getInstruction();
211   const BasicBlock *ExitBB = I->getParent();
212   const TerminatorInst *Term = ExitBB->getTerminator();
213   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
214 
215   // The block must end in a return statement or unreachable.
216   //
217   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
218   // an unreachable, for now. The way tailcall optimization is currently
219   // implemented means it will add an epilogue followed by a jump. That is
220   // not profitable. Also, if the callee is a special function (e.g.
221   // longjmp on x86), it can end up causing miscompilation that has not
222   // been fully understood.
223   if (!Ret &&
224       (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
225 
226   // If I will have a chain, make sure no other instruction that will have a
227   // chain interposes between I and the return.
228   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
229       !I->isSafeToSpeculativelyExecute())
230     for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
231          --BBI) {
232       if (&*BBI == I)
233         break;
234       // Debug info intrinsics do not get in the way of tail call optimization.
235       if (isa<DbgInfoIntrinsic>(BBI))
236         continue;
237       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
238           !BBI->isSafeToSpeculativelyExecute())
239         return false;
240     }
241 
242   // If the block ends with a void return or unreachable, it doesn't matter
243   // what the call's return type is.
244   if (!Ret || Ret->getNumOperands() == 0) return true;
245 
246   // If the return value is undef, it doesn't matter what the call's
247   // return type is.
248   if (isa<UndefValue>(Ret->getOperand(0))) return true;
249 
250   // Conservatively require the attributes of the call to match those of
251   // the return. Ignore noalias because it doesn't affect the call sequence.
252   const Function *F = ExitBB->getParent();
253   unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
254   if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
255     return false;
256 
257   // It's not safe to eliminate the sign / zero extension of the return value.
258   if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
259     return false;
260 
261   // Otherwise, make sure the unmodified return value of I is the return value.
262   for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
263        U = dyn_cast<Instruction>(U->getOperand(0))) {
264     if (!U)
265       return false;
266     if (!U->hasOneUse())
267       return false;
268     if (U == I)
269       break;
270     // Check for a truly no-op truncate.
271     if (isa<TruncInst>(U) &&
272         TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
273       continue;
274     // Check for a truly no-op bitcast.
275     if (isa<BitCastInst>(U) &&
276         (U->getOperand(0)->getType() == U->getType() ||
277          (U->getOperand(0)->getType()->isPointerTy() &&
278           U->getType()->isPointerTy())))
279       continue;
280     // Otherwise it's not a true no-op.
281     return false;
282   }
283 
284   return true;
285 }
286 
isInTailCallPosition(SelectionDAG & DAG,SDNode * Node,const TargetLowering & TLI)287 bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
288                                 const TargetLowering &TLI) {
289   const Function *F = DAG.getMachineFunction().getFunction();
290 
291   // Conservatively require the attributes of the call to match those of
292   // the return. Ignore noalias because it doesn't affect the call sequence.
293   unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
294   if (CallerRetAttr & ~Attribute::NoAlias)
295     return false;
296 
297   // It's not safe to eliminate the sign / zero extension of the return value.
298   if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
299     return false;
300 
301   // Check if the only use is a function return node.
302   return TLI.isUsedByReturnOnly(Node);
303 }
304