1 //===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements code cost measurement utilities.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/CodeMetrics.h"
15 #include "llvm/Function.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/Target/TargetData.h"
19
20 using namespace llvm;
21
22 /// callIsSmall - If a call is likely to lower to a single target instruction,
23 /// or is otherwise deemed small return true.
24 /// TODO: Perhaps calls like memcpy, strcpy, etc?
callIsSmall(ImmutableCallSite CS)25 bool llvm::callIsSmall(ImmutableCallSite CS) {
26 if (isa<IntrinsicInst>(CS.getInstruction()))
27 return true;
28
29 const Function *F = CS.getCalledFunction();
30 if (!F) return false;
31
32 if (F->hasLocalLinkage()) return false;
33
34 if (!F->hasName()) return false;
35
36 StringRef Name = F->getName();
37
38 // These will all likely lower to a single selection DAG node.
39 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
40 Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
41 Name == "sin" || Name == "sinf" || Name == "sinl" ||
42 Name == "cos" || Name == "cosf" || Name == "cosl" ||
43 Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
44 return true;
45
46 // These are all likely to be optimized into something smaller.
47 if (Name == "pow" || Name == "powf" || Name == "powl" ||
48 Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
49 Name == "floor" || Name == "floorf" || Name == "ceil" ||
50 Name == "round" || Name == "ffs" || Name == "ffsl" ||
51 Name == "abs" || Name == "labs" || Name == "llabs")
52 return true;
53
54 return false;
55 }
56
isInstructionFree(const Instruction * I,const TargetData * TD)57 bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
58 if (isa<PHINode>(I))
59 return true;
60
61 // If a GEP has all constant indices, it will probably be folded with
62 // a load/store.
63 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
64 return GEP->hasAllConstantIndices();
65
66 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
67 switch (II->getIntrinsicID()) {
68 default:
69 return false;
70 case Intrinsic::dbg_declare:
71 case Intrinsic::dbg_value:
72 case Intrinsic::invariant_start:
73 case Intrinsic::invariant_end:
74 case Intrinsic::lifetime_start:
75 case Intrinsic::lifetime_end:
76 case Intrinsic::objectsize:
77 case Intrinsic::ptr_annotation:
78 case Intrinsic::var_annotation:
79 // These intrinsics don't count as size.
80 return true;
81 }
82 }
83
84 if (const CastInst *CI = dyn_cast<CastInst>(I)) {
85 // Noop casts, including ptr <-> int, don't count.
86 if (CI->isLosslessCast())
87 return true;
88
89 Value *Op = CI->getOperand(0);
90 // An inttoptr cast is free so long as the input is a legal integer type
91 // which doesn't contain values outside the range of a pointer.
92 if (isa<IntToPtrInst>(CI) && TD &&
93 TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
94 Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
95 return true;
96
97 // A ptrtoint cast is free so long as the result is large enough to store
98 // the pointer, and a legal integer type.
99 if (isa<PtrToIntInst>(CI) && TD &&
100 TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
101 Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
102 return true;
103
104 // trunc to a native type is free (assuming the target has compare and
105 // shift-right of the same width).
106 if (TD && isa<TruncInst>(CI) &&
107 TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
108 return true;
109 // Result of a cmp instruction is often extended (to be used by other
110 // cmp instructions, logical or return instructions). These are usually
111 // nop on most sane targets.
112 if (isa<CmpInst>(CI->getOperand(0)))
113 return true;
114 }
115
116 return false;
117 }
118
119 /// analyzeBasicBlock - Fill in the current structure with information gleaned
120 /// from the specified block.
analyzeBasicBlock(const BasicBlock * BB,const TargetData * TD)121 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
122 const TargetData *TD) {
123 ++NumBlocks;
124 unsigned NumInstsBeforeThisBB = NumInsts;
125 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
126 II != E; ++II) {
127 if (isInstructionFree(II, TD))
128 continue;
129
130 // Special handling for calls.
131 if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
132 ImmutableCallSite CS(cast<Instruction>(II));
133
134 if (const Function *F = CS.getCalledFunction()) {
135 // If a function is both internal and has a single use, then it is
136 // extremely likely to get inlined in the future (it was probably
137 // exposed by an interleaved devirtualization pass).
138 if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
139 ++NumInlineCandidates;
140
141 // If this call is to function itself, then the function is recursive.
142 // Inlining it into other functions is a bad idea, because this is
143 // basically just a form of loop peeling, and our metrics aren't useful
144 // for that case.
145 if (F == BB->getParent())
146 isRecursive = true;
147 }
148
149 if (!callIsSmall(CS)) {
150 // Each argument to a call takes on average one instruction to set up.
151 NumInsts += CS.arg_size();
152
153 // We don't want inline asm to count as a call - that would prevent loop
154 // unrolling. The argument setup cost is still real, though.
155 if (!isa<InlineAsm>(CS.getCalledValue()))
156 ++NumCalls;
157 }
158 }
159
160 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
161 if (!AI->isStaticAlloca())
162 this->usesDynamicAlloca = true;
163 }
164
165 if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
166 ++NumVectorInsts;
167
168 ++NumInsts;
169 }
170
171 if (isa<ReturnInst>(BB->getTerminator()))
172 ++NumRets;
173
174 // We never want to inline functions that contain an indirectbr. This is
175 // incorrect because all the blockaddress's (in static global initializers
176 // for example) would be referring to the original function, and this indirect
177 // jump would jump from the inlined copy of the function into the original
178 // function which is extremely undefined behavior.
179 // FIXME: This logic isn't really right; we can safely inline functions
180 // with indirectbr's as long as no other function or global references the
181 // blockaddress of a block within the current function. And as a QOI issue,
182 // if someone is using a blockaddress without an indirectbr, and that
183 // reference somehow ends up in another function or global, we probably
184 // don't want to inline this function.
185 if (isa<IndirectBrInst>(BB->getTerminator()))
186 containsIndirectBr = true;
187
188 // Remember NumInsts for this BB.
189 NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
190 }
191
analyzeFunction(Function * F,const TargetData * TD)192 void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
193 // If this function contains a call that "returns twice" (e.g., setjmp or
194 // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
195 // This is a hack because we depend on the user marking their local variables
196 // as volatile if they are live across a setjmp call, and they probably
197 // won't do this in callers.
198 exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
199 !F->hasFnAttr(Attribute::ReturnsTwice);
200
201 // Look at the size of the callee.
202 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
203 analyzeBasicBlock(&*BB, TD);
204 }
205