1 //===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass promotes "by reference" arguments to be "by value" arguments. In
11 // practice, this means looking for internal functions that have pointer
12 // arguments. If it can prove, through the use of alias analysis, that an
13 // argument is *only* loaded, then it can pass the value into the function
14 // instead of the address of the value. This can cause recursive simplification
15 // of code and lead to the elimination of allocas (especially in C++ template
16 // code like the STL).
17 //
18 // This pass also handles aggregate arguments that are passed into a function,
19 // scalarizing them if the elements of the aggregate are only loaded. Note that
20 // by default it refuses to scalarize aggregates which would require passing in
21 // more than three operands to the function, because passing thousands of
22 // operands for a large array or structure is unprofitable! This limit can be
23 // configured or disabled, however.
24 //
25 // Note that this transformation could also be done for arguments that are only
26 // stored to (returning the value instead), but does not currently. This case
27 // would be best handled when and if LLVM begins supporting multiple return
28 // values from functions.
29 //
30 //===----------------------------------------------------------------------===//
31
32 #define DEBUG_TYPE "argpromotion"
33 #include "llvm/Transforms/IPO.h"
34 #include "llvm/Constants.h"
35 #include "llvm/DerivedTypes.h"
36 #include "llvm/Module.h"
37 #include "llvm/CallGraphSCCPass.h"
38 #include "llvm/Instructions.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CallGraph.h"
42 #include "llvm/Support/CallSite.h"
43 #include "llvm/Support/CFG.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/ADT/DepthFirstIterator.h"
47 #include "llvm/ADT/Statistic.h"
48 #include "llvm/ADT/StringExtras.h"
49 #include <set>
50 using namespace llvm;
51
52 STATISTIC(NumArgumentsPromoted , "Number of pointer arguments promoted");
53 STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted");
54 STATISTIC(NumByValArgsPromoted , "Number of byval arguments promoted");
55 STATISTIC(NumArgumentsDead , "Number of dead pointer args eliminated");
56
57 namespace {
58 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
59 ///
60 struct ArgPromotion : public CallGraphSCCPass {
getAnalysisUsage__anonda5d86bc0111::ArgPromotion61 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<AliasAnalysis>();
63 CallGraphSCCPass::getAnalysisUsage(AU);
64 }
65
66 virtual bool runOnSCC(CallGraphSCC &SCC);
67 static char ID; // Pass identification, replacement for typeid
ArgPromotion__anonda5d86bc0111::ArgPromotion68 explicit ArgPromotion(unsigned maxElements = 3)
69 : CallGraphSCCPass(ID), maxElements(maxElements) {
70 initializeArgPromotionPass(*PassRegistry::getPassRegistry());
71 }
72
73 /// A vector used to hold the indices of a single GEP instruction
74 typedef std::vector<uint64_t> IndicesVector;
75
76 private:
77 CallGraphNode *PromoteArguments(CallGraphNode *CGN);
78 bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const;
79 CallGraphNode *DoPromotion(Function *F,
80 SmallPtrSet<Argument*, 8> &ArgsToPromote,
81 SmallPtrSet<Argument*, 8> &ByValArgsToTransform);
82 /// The maximum number of elements to expand, or 0 for unlimited.
83 unsigned maxElements;
84 };
85 }
86
87 char ArgPromotion::ID = 0;
88 INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
89 "Promote 'by reference' arguments to scalars", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)90 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
91 INITIALIZE_AG_DEPENDENCY(CallGraph)
92 INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
93 "Promote 'by reference' arguments to scalars", false, false)
94
95 Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
96 return new ArgPromotion(maxElements);
97 }
98
runOnSCC(CallGraphSCC & SCC)99 bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
100 bool Changed = false, LocalChange;
101
102 do { // Iterate until we stop promoting from this SCC.
103 LocalChange = false;
104 // Attempt to promote arguments from all functions in this SCC.
105 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
106 if (CallGraphNode *CGN = PromoteArguments(*I)) {
107 LocalChange = true;
108 SCC.ReplaceNode(*I, CGN);
109 }
110 }
111 Changed |= LocalChange; // Remember that we changed something.
112 } while (LocalChange);
113
114 return Changed;
115 }
116
117 /// PromoteArguments - This method checks the specified function to see if there
118 /// are any promotable arguments and if it is safe to promote the function (for
119 /// example, all callers are direct). If safe to promote some arguments, it
120 /// calls the DoPromotion method.
121 ///
PromoteArguments(CallGraphNode * CGN)122 CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
123 Function *F = CGN->getFunction();
124
125 // Make sure that it is local to this module.
126 if (!F || !F->hasLocalLinkage()) return 0;
127
128 // First check: see if there are any pointer arguments! If not, quick exit.
129 SmallVector<std::pair<Argument*, unsigned>, 16> PointerArgs;
130 unsigned ArgNo = 0;
131 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
132 I != E; ++I, ++ArgNo)
133 if (I->getType()->isPointerTy())
134 PointerArgs.push_back(std::pair<Argument*, unsigned>(I, ArgNo));
135 if (PointerArgs.empty()) return 0;
136
137 // Second check: make sure that all callers are direct callers. We can't
138 // transform functions that have indirect callers. Also see if the function
139 // is self-recursive.
140 bool isSelfRecursive = false;
141 for (Value::use_iterator UI = F->use_begin(), E = F->use_end();
142 UI != E; ++UI) {
143 CallSite CS(*UI);
144 // Must be a direct call.
145 if (CS.getInstruction() == 0 || !CS.isCallee(UI)) return 0;
146
147 if (CS.getInstruction()->getParent()->getParent() == F)
148 isSelfRecursive = true;
149 }
150
151 // Check to see which arguments are promotable. If an argument is promotable,
152 // add it to ArgsToPromote.
153 SmallPtrSet<Argument*, 8> ArgsToPromote;
154 SmallPtrSet<Argument*, 8> ByValArgsToTransform;
155 for (unsigned i = 0; i != PointerArgs.size(); ++i) {
156 bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal);
157 Argument *PtrArg = PointerArgs[i].first;
158 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
159
160 // If this is a byval argument, and if the aggregate type is small, just
161 // pass the elements, which is always safe.
162 if (isByVal) {
163 if (StructType *STy = dyn_cast<StructType>(AgTy)) {
164 if (maxElements > 0 && STy->getNumElements() > maxElements) {
165 DEBUG(dbgs() << "argpromotion disable promoting argument '"
166 << PtrArg->getName() << "' because it would require adding more"
167 << " than " << maxElements << " arguments to the function.\n");
168 continue;
169 }
170
171 // If all the elements are single-value types, we can promote it.
172 bool AllSimple = true;
173 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
174 if (!STy->getElementType(i)->isSingleValueType()) {
175 AllSimple = false;
176 break;
177 }
178 }
179
180 // Safe to transform, don't even bother trying to "promote" it.
181 // Passing the elements as a scalar will allow scalarrepl to hack on
182 // the new alloca we introduce.
183 if (AllSimple) {
184 ByValArgsToTransform.insert(PtrArg);
185 continue;
186 }
187 }
188 }
189
190 // If the argument is a recursive type and we're in a recursive
191 // function, we could end up infinitely peeling the function argument.
192 if (isSelfRecursive) {
193 if (StructType *STy = dyn_cast<StructType>(AgTy)) {
194 bool RecursiveType = false;
195 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
196 if (STy->getElementType(i) == PtrArg->getType()) {
197 RecursiveType = true;
198 break;
199 }
200 }
201 if (RecursiveType)
202 continue;
203 }
204 }
205
206 // Otherwise, see if we can promote the pointer to its value.
207 if (isSafeToPromoteArgument(PtrArg, isByVal))
208 ArgsToPromote.insert(PtrArg);
209 }
210
211 // No promotable pointer arguments.
212 if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
213 return 0;
214
215 return DoPromotion(F, ArgsToPromote, ByValArgsToTransform);
216 }
217
218 /// AllCallersPassInValidPointerForArgument - Return true if we can prove that
219 /// all callees pass in a valid pointer for the specified function argument.
AllCallersPassInValidPointerForArgument(Argument * Arg)220 static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
221 Function *Callee = Arg->getParent();
222
223 unsigned ArgNo = std::distance(Callee->arg_begin(),
224 Function::arg_iterator(Arg));
225
226 // Look at all call sites of the function. At this pointer we know we only
227 // have direct callees.
228 for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
229 UI != E; ++UI) {
230 CallSite CS(*UI);
231 assert(CS && "Should only have direct calls!");
232
233 if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
234 return false;
235 }
236 return true;
237 }
238
239 /// Returns true if Prefix is a prefix of longer. That means, Longer has a size
240 /// that is greater than or equal to the size of prefix, and each of the
241 /// elements in Prefix is the same as the corresponding elements in Longer.
242 ///
243 /// This means it also returns true when Prefix and Longer are equal!
IsPrefix(const ArgPromotion::IndicesVector & Prefix,const ArgPromotion::IndicesVector & Longer)244 static bool IsPrefix(const ArgPromotion::IndicesVector &Prefix,
245 const ArgPromotion::IndicesVector &Longer) {
246 if (Prefix.size() > Longer.size())
247 return false;
248 for (unsigned i = 0, e = Prefix.size(); i != e; ++i)
249 if (Prefix[i] != Longer[i])
250 return false;
251 return true;
252 }
253
254
255 /// Checks if Indices, or a prefix of Indices, is in Set.
PrefixIn(const ArgPromotion::IndicesVector & Indices,std::set<ArgPromotion::IndicesVector> & Set)256 static bool PrefixIn(const ArgPromotion::IndicesVector &Indices,
257 std::set<ArgPromotion::IndicesVector> &Set) {
258 std::set<ArgPromotion::IndicesVector>::iterator Low;
259 Low = Set.upper_bound(Indices);
260 if (Low != Set.begin())
261 Low--;
262 // Low is now the last element smaller than or equal to Indices. This means
263 // it points to a prefix of Indices (possibly Indices itself), if such
264 // prefix exists.
265 //
266 // This load is safe if any prefix of its operands is safe to load.
267 return Low != Set.end() && IsPrefix(*Low, Indices);
268 }
269
270 /// Mark the given indices (ToMark) as safe in the given set of indices
271 /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there
272 /// is already a prefix of Indices in Safe, Indices are implicitely marked safe
273 /// already. Furthermore, any indices that Indices is itself a prefix of, are
274 /// removed from Safe (since they are implicitely safe because of Indices now).
MarkIndicesSafe(const ArgPromotion::IndicesVector & ToMark,std::set<ArgPromotion::IndicesVector> & Safe)275 static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark,
276 std::set<ArgPromotion::IndicesVector> &Safe) {
277 std::set<ArgPromotion::IndicesVector>::iterator Low;
278 Low = Safe.upper_bound(ToMark);
279 // Guard against the case where Safe is empty
280 if (Low != Safe.begin())
281 Low--;
282 // Low is now the last element smaller than or equal to Indices. This
283 // means it points to a prefix of Indices (possibly Indices itself), if
284 // such prefix exists.
285 if (Low != Safe.end()) {
286 if (IsPrefix(*Low, ToMark))
287 // If there is already a prefix of these indices (or exactly these
288 // indices) marked a safe, don't bother adding these indices
289 return;
290
291 // Increment Low, so we can use it as a "insert before" hint
292 ++Low;
293 }
294 // Insert
295 Low = Safe.insert(Low, ToMark);
296 ++Low;
297 // If there we're a prefix of longer index list(s), remove those
298 std::set<ArgPromotion::IndicesVector>::iterator End = Safe.end();
299 while (Low != End && IsPrefix(ToMark, *Low)) {
300 std::set<ArgPromotion::IndicesVector>::iterator Remove = Low;
301 ++Low;
302 Safe.erase(Remove);
303 }
304 }
305
306 /// isSafeToPromoteArgument - As you might guess from the name of this method,
307 /// it checks to see if it is both safe and useful to promote the argument.
308 /// This method limits promotion of aggregates to only promote up to three
309 /// elements of the aggregate in order to avoid exploding the number of
310 /// arguments passed in.
isSafeToPromoteArgument(Argument * Arg,bool isByVal) const311 bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
312 typedef std::set<IndicesVector> GEPIndicesSet;
313
314 // Quick exit for unused arguments
315 if (Arg->use_empty())
316 return true;
317
318 // We can only promote this argument if all of the uses are loads, or are GEP
319 // instructions (with constant indices) that are subsequently loaded.
320 //
321 // Promoting the argument causes it to be loaded in the caller
322 // unconditionally. This is only safe if we can prove that either the load
323 // would have happened in the callee anyway (ie, there is a load in the entry
324 // block) or the pointer passed in at every call site is guaranteed to be
325 // valid.
326 // In the former case, invalid loads can happen, but would have happened
327 // anyway, in the latter case, invalid loads won't happen. This prevents us
328 // from introducing an invalid load that wouldn't have happened in the
329 // original code.
330 //
331 // This set will contain all sets of indices that are loaded in the entry
332 // block, and thus are safe to unconditionally load in the caller.
333 GEPIndicesSet SafeToUnconditionallyLoad;
334
335 // This set contains all the sets of indices that we are planning to promote.
336 // This makes it possible to limit the number of arguments added.
337 GEPIndicesSet ToPromote;
338
339 // If the pointer is always valid, any load with first index 0 is valid.
340 if (isByVal || AllCallersPassInValidPointerForArgument(Arg))
341 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
342
343 // First, iterate the entry block and mark loads of (geps of) arguments as
344 // safe.
345 BasicBlock *EntryBlock = Arg->getParent()->begin();
346 // Declare this here so we can reuse it
347 IndicesVector Indices;
348 for (BasicBlock::iterator I = EntryBlock->begin(), E = EntryBlock->end();
349 I != E; ++I)
350 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
351 Value *V = LI->getPointerOperand();
352 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
353 V = GEP->getPointerOperand();
354 if (V == Arg) {
355 // This load actually loads (part of) Arg? Check the indices then.
356 Indices.reserve(GEP->getNumIndices());
357 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
358 II != IE; ++II)
359 if (ConstantInt *CI = dyn_cast<ConstantInt>(*II))
360 Indices.push_back(CI->getSExtValue());
361 else
362 // We found a non-constant GEP index for this argument? Bail out
363 // right away, can't promote this argument at all.
364 return false;
365
366 // Indices checked out, mark them as safe
367 MarkIndicesSafe(Indices, SafeToUnconditionallyLoad);
368 Indices.clear();
369 }
370 } else if (V == Arg) {
371 // Direct loads are equivalent to a GEP with a single 0 index.
372 MarkIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad);
373 }
374 }
375
376 // Now, iterate all uses of the argument to see if there are any uses that are
377 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
378 SmallVector<LoadInst*, 16> Loads;
379 IndicesVector Operands;
380 for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
381 UI != E; ++UI) {
382 User *U = *UI;
383 Operands.clear();
384 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
385 // Don't hack volatile/atomic loads
386 if (!LI->isSimple()) return false;
387 Loads.push_back(LI);
388 // Direct loads are equivalent to a GEP with a zero index and then a load.
389 Operands.push_back(0);
390 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
391 if (GEP->use_empty()) {
392 // Dead GEP's cause trouble later. Just remove them if we run into
393 // them.
394 getAnalysis<AliasAnalysis>().deleteValue(GEP);
395 GEP->eraseFromParent();
396 // TODO: This runs the above loop over and over again for dead GEPs
397 // Couldn't we just do increment the UI iterator earlier and erase the
398 // use?
399 return isSafeToPromoteArgument(Arg, isByVal);
400 }
401
402 // Ensure that all of the indices are constants.
403 for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end();
404 i != e; ++i)
405 if (ConstantInt *C = dyn_cast<ConstantInt>(*i))
406 Operands.push_back(C->getSExtValue());
407 else
408 return false; // Not a constant operand GEP!
409
410 // Ensure that the only users of the GEP are load instructions.
411 for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
412 UI != E; ++UI)
413 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
414 // Don't hack volatile/atomic loads
415 if (!LI->isSimple()) return false;
416 Loads.push_back(LI);
417 } else {
418 // Other uses than load?
419 return false;
420 }
421 } else {
422 return false; // Not a load or a GEP.
423 }
424
425 // Now, see if it is safe to promote this load / loads of this GEP. Loading
426 // is safe if Operands, or a prefix of Operands, is marked as safe.
427 if (!PrefixIn(Operands, SafeToUnconditionallyLoad))
428 return false;
429
430 // See if we are already promoting a load with these indices. If not, check
431 // to make sure that we aren't promoting too many elements. If so, nothing
432 // to do.
433 if (ToPromote.find(Operands) == ToPromote.end()) {
434 if (maxElements > 0 && ToPromote.size() == maxElements) {
435 DEBUG(dbgs() << "argpromotion not promoting argument '"
436 << Arg->getName() << "' because it would require adding more "
437 << "than " << maxElements << " arguments to the function.\n");
438 // We limit aggregate promotion to only promoting up to a fixed number
439 // of elements of the aggregate.
440 return false;
441 }
442 ToPromote.insert(Operands);
443 }
444 }
445
446 if (Loads.empty()) return true; // No users, this is a dead argument.
447
448 // Okay, now we know that the argument is only used by load instructions and
449 // it is safe to unconditionally perform all of them. Use alias analysis to
450 // check to see if the pointer is guaranteed to not be modified from entry of
451 // the function to each of the load instructions.
452
453 // Because there could be several/many load instructions, remember which
454 // blocks we know to be transparent to the load.
455 SmallPtrSet<BasicBlock*, 16> TranspBlocks;
456
457 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
458
459 for (unsigned i = 0, e = Loads.size(); i != e; ++i) {
460 // Check to see if the load is invalidated from the start of the block to
461 // the load itself.
462 LoadInst *Load = Loads[i];
463 BasicBlock *BB = Load->getParent();
464
465 AliasAnalysis::Location Loc = AA.getLocation(Load);
466 if (AA.canInstructionRangeModify(BB->front(), *Load, Loc))
467 return false; // Pointer is invalidated!
468
469 // Now check every path from the entry block to the load for transparency.
470 // To do this, we perform a depth first search on the inverse CFG from the
471 // loading block.
472 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
473 BasicBlock *P = *PI;
474 for (idf_ext_iterator<BasicBlock*, SmallPtrSet<BasicBlock*, 16> >
475 I = idf_ext_begin(P, TranspBlocks),
476 E = idf_ext_end(P, TranspBlocks); I != E; ++I)
477 if (AA.canBasicBlockModify(**I, Loc))
478 return false;
479 }
480 }
481
482 // If the path from the entry of the function to each load is free of
483 // instructions that potentially invalidate the load, we can make the
484 // transformation!
485 return true;
486 }
487
488 /// DoPromotion - This method actually performs the promotion of the specified
489 /// arguments, and returns the new function. At this point, we know that it's
490 /// safe to do so.
DoPromotion(Function * F,SmallPtrSet<Argument *,8> & ArgsToPromote,SmallPtrSet<Argument *,8> & ByValArgsToTransform)491 CallGraphNode *ArgPromotion::DoPromotion(Function *F,
492 SmallPtrSet<Argument*, 8> &ArgsToPromote,
493 SmallPtrSet<Argument*, 8> &ByValArgsToTransform) {
494
495 // Start by computing a new prototype for the function, which is the same as
496 // the old function, but has modified arguments.
497 FunctionType *FTy = F->getFunctionType();
498 std::vector<Type*> Params;
499
500 typedef std::set<IndicesVector> ScalarizeTable;
501
502 // ScalarizedElements - If we are promoting a pointer that has elements
503 // accessed out of it, keep track of which elements are accessed so that we
504 // can add one argument for each.
505 //
506 // Arguments that are directly loaded will have a zero element value here, to
507 // handle cases where there are both a direct load and GEP accesses.
508 //
509 std::map<Argument*, ScalarizeTable> ScalarizedElements;
510
511 // OriginalLoads - Keep track of a representative load instruction from the
512 // original function so that we can tell the alias analysis implementation
513 // what the new GEP/Load instructions we are inserting look like.
514 std::map<IndicesVector, LoadInst*> OriginalLoads;
515
516 // Attributes - Keep track of the parameter attributes for the arguments
517 // that we are *not* promoting. For the ones that we do promote, the parameter
518 // attributes are lost
519 SmallVector<AttributeWithIndex, 8> AttributesVec;
520 const AttrListPtr &PAL = F->getAttributes();
521
522 // Add any return attributes.
523 if (Attributes attrs = PAL.getRetAttributes())
524 AttributesVec.push_back(AttributeWithIndex::get(0, attrs));
525
526 // First, determine the new argument list
527 unsigned ArgIndex = 1;
528 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
529 ++I, ++ArgIndex) {
530 if (ByValArgsToTransform.count(I)) {
531 // Simple byval argument? Just add all the struct element types.
532 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
533 StructType *STy = cast<StructType>(AgTy);
534 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
535 Params.push_back(STy->getElementType(i));
536 ++NumByValArgsPromoted;
537 } else if (!ArgsToPromote.count(I)) {
538 // Unchanged argument
539 Params.push_back(I->getType());
540 if (Attributes attrs = PAL.getParamAttributes(ArgIndex))
541 AttributesVec.push_back(AttributeWithIndex::get(Params.size(), attrs));
542 } else if (I->use_empty()) {
543 // Dead argument (which are always marked as promotable)
544 ++NumArgumentsDead;
545 } else {
546 // Okay, this is being promoted. This means that the only uses are loads
547 // or GEPs which are only used by loads
548
549 // In this table, we will track which indices are loaded from the argument
550 // (where direct loads are tracked as no indices).
551 ScalarizeTable &ArgIndices = ScalarizedElements[I];
552 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
553 ++UI) {
554 Instruction *User = cast<Instruction>(*UI);
555 assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User));
556 IndicesVector Indices;
557 Indices.reserve(User->getNumOperands() - 1);
558 // Since loads will only have a single operand, and GEPs only a single
559 // non-index operand, this will record direct loads without any indices,
560 // and gep+loads with the GEP indices.
561 for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end();
562 II != IE; ++II)
563 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
564 // GEPs with a single 0 index can be merged with direct loads
565 if (Indices.size() == 1 && Indices.front() == 0)
566 Indices.clear();
567 ArgIndices.insert(Indices);
568 LoadInst *OrigLoad;
569 if (LoadInst *L = dyn_cast<LoadInst>(User))
570 OrigLoad = L;
571 else
572 // Take any load, we will use it only to update Alias Analysis
573 OrigLoad = cast<LoadInst>(User->use_back());
574 OriginalLoads[Indices] = OrigLoad;
575 }
576
577 // Add a parameter to the function for each element passed in.
578 for (ScalarizeTable::iterator SI = ArgIndices.begin(),
579 E = ArgIndices.end(); SI != E; ++SI) {
580 // not allowed to dereference ->begin() if size() is 0
581 Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), *SI));
582 assert(Params.back());
583 }
584
585 if (ArgIndices.size() == 1 && ArgIndices.begin()->empty())
586 ++NumArgumentsPromoted;
587 else
588 ++NumAggregatesPromoted;
589 }
590 }
591
592 // Add any function attributes.
593 if (Attributes attrs = PAL.getFnAttributes())
594 AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
595
596 Type *RetTy = FTy->getReturnType();
597
598 // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
599 // have zero fixed arguments.
600 bool ExtraArgHack = false;
601 if (Params.empty() && FTy->isVarArg()) {
602 ExtraArgHack = true;
603 Params.push_back(Type::getInt32Ty(F->getContext()));
604 }
605
606 // Construct the new function type using the new arguments.
607 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
608
609 // Create the new function body and insert it into the module.
610 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
611 NF->copyAttributesFrom(F);
612
613
614 DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
615 << "From: " << *F);
616
617 // Recompute the parameter attributes list based on the new arguments for
618 // the function.
619 NF->setAttributes(AttrListPtr::get(AttributesVec.begin(),
620 AttributesVec.end()));
621 AttributesVec.clear();
622
623 F->getParent()->getFunctionList().insert(F, NF);
624 NF->takeName(F);
625
626 // Get the alias analysis information that we need to update to reflect our
627 // changes.
628 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
629
630 // Get the callgraph information that we need to update to reflect our
631 // changes.
632 CallGraph &CG = getAnalysis<CallGraph>();
633
634 // Get a new callgraph node for NF.
635 CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
636
637 // Loop over all of the callers of the function, transforming the call sites
638 // to pass in the loaded pointers.
639 //
640 SmallVector<Value*, 16> Args;
641 while (!F->use_empty()) {
642 CallSite CS(F->use_back());
643 assert(CS.getCalledFunction() == F);
644 Instruction *Call = CS.getInstruction();
645 const AttrListPtr &CallPAL = CS.getAttributes();
646
647 // Add any return attributes.
648 if (Attributes attrs = CallPAL.getRetAttributes())
649 AttributesVec.push_back(AttributeWithIndex::get(0, attrs));
650
651 // Loop over the operands, inserting GEP and loads in the caller as
652 // appropriate.
653 CallSite::arg_iterator AI = CS.arg_begin();
654 ArgIndex = 1;
655 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
656 I != E; ++I, ++AI, ++ArgIndex)
657 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) {
658 Args.push_back(*AI); // Unmodified argument
659
660 if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
661 AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
662
663 } else if (ByValArgsToTransform.count(I)) {
664 // Emit a GEP and load for each element of the struct.
665 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
666 StructType *STy = cast<StructType>(AgTy);
667 Value *Idxs[2] = {
668 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
669 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
670 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
671 Value *Idx = GetElementPtrInst::Create(*AI, Idxs,
672 (*AI)->getName()+"."+utostr(i),
673 Call);
674 // TODO: Tell AA about the new values?
675 Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call));
676 }
677 } else if (!I->use_empty()) {
678 // Non-dead argument: insert GEPs and loads as appropriate.
679 ScalarizeTable &ArgIndices = ScalarizedElements[I];
680 // Store the Value* version of the indices in here, but declare it now
681 // for reuse.
682 std::vector<Value*> Ops;
683 for (ScalarizeTable::iterator SI = ArgIndices.begin(),
684 E = ArgIndices.end(); SI != E; ++SI) {
685 Value *V = *AI;
686 LoadInst *OrigLoad = OriginalLoads[*SI];
687 if (!SI->empty()) {
688 Ops.reserve(SI->size());
689 Type *ElTy = V->getType();
690 for (IndicesVector::const_iterator II = SI->begin(),
691 IE = SI->end(); II != IE; ++II) {
692 // Use i32 to index structs, and i64 for others (pointers/arrays).
693 // This satisfies GEP constraints.
694 Type *IdxTy = (ElTy->isStructTy() ?
695 Type::getInt32Ty(F->getContext()) :
696 Type::getInt64Ty(F->getContext()));
697 Ops.push_back(ConstantInt::get(IdxTy, *II));
698 // Keep track of the type we're currently indexing.
699 ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
700 }
701 // And create a GEP to extract those indices.
702 V = GetElementPtrInst::Create(V, Ops, V->getName()+".idx", Call);
703 Ops.clear();
704 AA.copyValue(OrigLoad->getOperand(0), V);
705 }
706 // Since we're replacing a load make sure we take the alignment
707 // of the previous load.
708 LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
709 newLoad->setAlignment(OrigLoad->getAlignment());
710 // Transfer the TBAA info too.
711 newLoad->setMetadata(LLVMContext::MD_tbaa,
712 OrigLoad->getMetadata(LLVMContext::MD_tbaa));
713 Args.push_back(newLoad);
714 AA.copyValue(OrigLoad, Args.back());
715 }
716 }
717
718 if (ExtraArgHack)
719 Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));
720
721 // Push any varargs arguments on the list.
722 for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
723 Args.push_back(*AI);
724 if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
725 AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
726 }
727
728 // Add any function attributes.
729 if (Attributes attrs = CallPAL.getFnAttributes())
730 AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
731
732 Instruction *New;
733 if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
734 New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
735 Args, "", Call);
736 cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
737 cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
738 AttributesVec.end()));
739 } else {
740 New = CallInst::Create(NF, Args, "", Call);
741 cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
742 cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
743 AttributesVec.end()));
744 if (cast<CallInst>(Call)->isTailCall())
745 cast<CallInst>(New)->setTailCall();
746 }
747 Args.clear();
748 AttributesVec.clear();
749
750 // Update the alias analysis implementation to know that we are replacing
751 // the old call with a new one.
752 AA.replaceWithNewValue(Call, New);
753
754 // Update the callgraph to know that the callsite has been transformed.
755 CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
756 CalleeNode->replaceCallEdge(Call, New, NF_CGN);
757
758 if (!Call->use_empty()) {
759 Call->replaceAllUsesWith(New);
760 New->takeName(Call);
761 }
762
763 // Finally, remove the old call from the program, reducing the use-count of
764 // F.
765 Call->eraseFromParent();
766 }
767
768 // Since we have now created the new function, splice the body of the old
769 // function right into the new function, leaving the old rotting hulk of the
770 // function empty.
771 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
772
773 // Loop over the argument list, transferring uses of the old arguments over to
774 // the new arguments, also transferring over the names as well.
775 //
776 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
777 I2 = NF->arg_begin(); I != E; ++I) {
778 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) {
779 // If this is an unmodified argument, move the name and users over to the
780 // new version.
781 I->replaceAllUsesWith(I2);
782 I2->takeName(I);
783 AA.replaceWithNewValue(I, I2);
784 ++I2;
785 continue;
786 }
787
788 if (ByValArgsToTransform.count(I)) {
789 // In the callee, we create an alloca, and store each of the new incoming
790 // arguments into the alloca.
791 Instruction *InsertPt = NF->begin()->begin();
792
793 // Just add all the struct element types.
794 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
795 Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
796 StructType *STy = cast<StructType>(AgTy);
797 Value *Idxs[2] = {
798 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
799
800 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
801 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
802 Value *Idx =
803 GetElementPtrInst::Create(TheAlloca, Idxs,
804 TheAlloca->getName()+"."+Twine(i),
805 InsertPt);
806 I2->setName(I->getName()+"."+Twine(i));
807 new StoreInst(I2++, Idx, InsertPt);
808 }
809
810 // Anything that used the arg should now use the alloca.
811 I->replaceAllUsesWith(TheAlloca);
812 TheAlloca->takeName(I);
813 AA.replaceWithNewValue(I, TheAlloca);
814 continue;
815 }
816
817 if (I->use_empty()) {
818 AA.deleteValue(I);
819 continue;
820 }
821
822 // Otherwise, if we promoted this argument, then all users are load
823 // instructions (or GEPs with only load users), and all loads should be
824 // using the new argument that we added.
825 ScalarizeTable &ArgIndices = ScalarizedElements[I];
826
827 while (!I->use_empty()) {
828 if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) {
829 assert(ArgIndices.begin()->empty() &&
830 "Load element should sort to front!");
831 I2->setName(I->getName()+".val");
832 LI->replaceAllUsesWith(I2);
833 AA.replaceWithNewValue(LI, I2);
834 LI->eraseFromParent();
835 DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
836 << "' in function '" << F->getName() << "'\n");
837 } else {
838 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
839 IndicesVector Operands;
840 Operands.reserve(GEP->getNumIndices());
841 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
842 II != IE; ++II)
843 Operands.push_back(cast<ConstantInt>(*II)->getSExtValue());
844
845 // GEPs with a single 0 index can be merged with direct loads
846 if (Operands.size() == 1 && Operands.front() == 0)
847 Operands.clear();
848
849 Function::arg_iterator TheArg = I2;
850 for (ScalarizeTable::iterator It = ArgIndices.begin();
851 *It != Operands; ++It, ++TheArg) {
852 assert(It != ArgIndices.end() && "GEP not handled??");
853 }
854
855 std::string NewName = I->getName();
856 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
857 NewName += "." + utostr(Operands[i]);
858 }
859 NewName += ".val";
860 TheArg->setName(NewName);
861
862 DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
863 << "' of function '" << NF->getName() << "'\n");
864
865 // All of the uses must be load instructions. Replace them all with
866 // the argument specified by ArgNo.
867 while (!GEP->use_empty()) {
868 LoadInst *L = cast<LoadInst>(GEP->use_back());
869 L->replaceAllUsesWith(TheArg);
870 AA.replaceWithNewValue(L, TheArg);
871 L->eraseFromParent();
872 }
873 AA.deleteValue(GEP);
874 GEP->eraseFromParent();
875 }
876 }
877
878 // Increment I2 past all of the arguments added for this promoted pointer.
879 for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i)
880 ++I2;
881 }
882
883 // Notify the alias analysis implementation that we inserted a new argument.
884 if (ExtraArgHack)
885 AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())),
886 NF->arg_begin());
887
888
889 // Tell the alias analysis that the old function is about to disappear.
890 AA.replaceWithNewValue(F, NF);
891
892
893 NF_CGN->stealCalledFunctionsFrom(CG[F]);
894
895 // Now that the old function is dead, delete it. If there is a dangling
896 // reference to the CallgraphNode, just leave the dead function around for
897 // someone else to nuke.
898 CallGraphNode *CGN = CG[F];
899 if (CGN->getNumReferences() == 0)
900 delete CG.removeFunctionFromModule(CGN);
901 else
902 F->setLinkage(Function::ExternalLinkage);
903
904 return NF_CGN;
905 }
906