1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/Dominators.h"
21 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/GlobalVariable.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetLibraryInfo.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include <list>
34 using namespace llvm;
35
36 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
37 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
38 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
39 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
40
GetOffsetFromIndex(const GEPOperator * GEP,unsigned Idx,bool & VariableIdxFound,const DataLayout & TD)41 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
42 bool &VariableIdxFound, const DataLayout &TD){
43 // Skip over the first indices.
44 gep_type_iterator GTI = gep_type_begin(GEP);
45 for (unsigned i = 1; i != Idx; ++i, ++GTI)
46 /*skip along*/;
47
48 // Compute the offset implied by the rest of the indices.
49 int64_t Offset = 0;
50 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
51 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
52 if (OpC == 0)
53 return VariableIdxFound = true;
54 if (OpC->isZero()) continue; // No offset.
55
56 // Handle struct indices, which add their field offset to the pointer.
57 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
58 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
59 continue;
60 }
61
62 // Otherwise, we have a sequential type like an array or vector. Multiply
63 // the index by the ElementSize.
64 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
65 Offset += Size*OpC->getSExtValue();
66 }
67
68 return Offset;
69 }
70
71 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
72 /// constant offset, and return that constant offset. For example, Ptr1 might
73 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
IsPointerOffset(Value * Ptr1,Value * Ptr2,int64_t & Offset,const DataLayout & TD)74 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
75 const DataLayout &TD) {
76 Ptr1 = Ptr1->stripPointerCasts();
77 Ptr2 = Ptr2->stripPointerCasts();
78 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
79 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
80
81 bool VariableIdxFound = false;
82
83 // If one pointer is a GEP and the other isn't, then see if the GEP is a
84 // constant offset from the base, as in "P" and "gep P, 1".
85 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
86 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
87 return !VariableIdxFound;
88 }
89
90 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
91 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
92 return !VariableIdxFound;
93 }
94
95 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
96 // base. After that base, they may have some number of common (and
97 // potentially variable) indices. After that they handle some constant
98 // offset, which determines their offset from each other. At this point, we
99 // handle no other case.
100 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
101 return false;
102
103 // Skip any common indices and track the GEP types.
104 unsigned Idx = 1;
105 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
106 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
107 break;
108
109 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
110 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
111 if (VariableIdxFound) return false;
112
113 Offset = Offset2-Offset1;
114 return true;
115 }
116
117
118 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
119 /// This allows us to analyze stores like:
120 /// store 0 -> P+1
121 /// store 0 -> P+0
122 /// store 0 -> P+3
123 /// store 0 -> P+2
124 /// which sometimes happens with stores to arrays of structs etc. When we see
125 /// the first store, we make a range [1, 2). The second store extends the range
126 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
127 /// two ranges into [0, 3) which is memset'able.
128 namespace {
129 struct MemsetRange {
130 // Start/End - A semi range that describes the span that this range covers.
131 // The range is closed at the start and open at the end: [Start, End).
132 int64_t Start, End;
133
134 /// StartPtr - The getelementptr instruction that points to the start of the
135 /// range.
136 Value *StartPtr;
137
138 /// Alignment - The known alignment of the first store.
139 unsigned Alignment;
140
141 /// TheStores - The actual stores that make up this range.
142 SmallVector<Instruction*, 16> TheStores;
143
144 bool isProfitableToUseMemset(const DataLayout &TD) const;
145
146 };
147 } // end anon namespace
148
isProfitableToUseMemset(const DataLayout & TD) const149 bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
150 // If we found more than 4 stores to merge or 16 bytes, use memset.
151 if (TheStores.size() >= 4 || End-Start >= 16) return true;
152
153 // If there is nothing to merge, don't do anything.
154 if (TheStores.size() < 2) return false;
155
156 // If any of the stores are a memset, then it is always good to extend the
157 // memset.
158 for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
159 if (!isa<StoreInst>(TheStores[i]))
160 return true;
161
162 // Assume that the code generator is capable of merging pairs of stores
163 // together if it wants to.
164 if (TheStores.size() == 2) return false;
165
166 // If we have fewer than 8 stores, it can still be worthwhile to do this.
167 // For example, merging 4 i8 stores into an i32 store is useful almost always.
168 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
169 // memset will be split into 2 32-bit stores anyway) and doing so can
170 // pessimize the llvm optimizer.
171 //
172 // Since we don't have perfect knowledge here, make some assumptions: assume
173 // the maximum GPR width is the same size as the pointer size and assume that
174 // this width can be stored. If so, check to see whether we will end up
175 // actually reducing the number of stores used.
176 unsigned Bytes = unsigned(End-Start);
177 unsigned NumPointerStores = Bytes/TD.getPointerSize();
178
179 // Assume the remaining bytes if any are done a byte at a time.
180 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
181
182 // If we will reduce the # stores (according to this heuristic), do the
183 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
184 // etc.
185 return TheStores.size() > NumPointerStores+NumByteStores;
186 }
187
188
189 namespace {
190 class MemsetRanges {
191 /// Ranges - A sorted list of the memset ranges. We use std::list here
192 /// because each element is relatively large and expensive to copy.
193 std::list<MemsetRange> Ranges;
194 typedef std::list<MemsetRange>::iterator range_iterator;
195 const DataLayout &TD;
196 public:
MemsetRanges(const DataLayout & td)197 MemsetRanges(const DataLayout &td) : TD(td) {}
198
199 typedef std::list<MemsetRange>::const_iterator const_iterator;
begin() const200 const_iterator begin() const { return Ranges.begin(); }
end() const201 const_iterator end() const { return Ranges.end(); }
empty() const202 bool empty() const { return Ranges.empty(); }
203
addInst(int64_t OffsetFromFirst,Instruction * Inst)204 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
205 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
206 addStore(OffsetFromFirst, SI);
207 else
208 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
209 }
210
addStore(int64_t OffsetFromFirst,StoreInst * SI)211 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
212 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
213
214 addRange(OffsetFromFirst, StoreSize,
215 SI->getPointerOperand(), SI->getAlignment(), SI);
216 }
217
addMemSet(int64_t OffsetFromFirst,MemSetInst * MSI)218 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
219 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
220 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
221 }
222
223 void addRange(int64_t Start, int64_t Size, Value *Ptr,
224 unsigned Alignment, Instruction *Inst);
225
226 };
227
228 } // end anon namespace
229
230
231 /// addRange - Add a new store to the MemsetRanges data structure. This adds a
232 /// new range for the specified store at the specified offset, merging into
233 /// existing ranges as appropriate.
234 ///
235 /// Do a linear search of the ranges to see if this can be joined and/or to
236 /// find the insertion point in the list. We keep the ranges sorted for
237 /// simplicity here. This is a linear search of a linked list, which is ugly,
238 /// however the number of ranges is limited, so this won't get crazy slow.
addRange(int64_t Start,int64_t Size,Value * Ptr,unsigned Alignment,Instruction * Inst)239 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
240 unsigned Alignment, Instruction *Inst) {
241 int64_t End = Start+Size;
242 range_iterator I = Ranges.begin(), E = Ranges.end();
243
244 while (I != E && Start > I->End)
245 ++I;
246
247 // We now know that I == E, in which case we didn't find anything to merge
248 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
249 // to insert a new range. Handle this now.
250 if (I == E || End < I->Start) {
251 MemsetRange &R = *Ranges.insert(I, MemsetRange());
252 R.Start = Start;
253 R.End = End;
254 R.StartPtr = Ptr;
255 R.Alignment = Alignment;
256 R.TheStores.push_back(Inst);
257 return;
258 }
259
260 // This store overlaps with I, add it.
261 I->TheStores.push_back(Inst);
262
263 // At this point, we may have an interval that completely contains our store.
264 // If so, just add it to the interval and return.
265 if (I->Start <= Start && I->End >= End)
266 return;
267
268 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
269 // but is not entirely contained within the range.
270
271 // See if the range extends the start of the range. In this case, it couldn't
272 // possibly cause it to join the prior range, because otherwise we would have
273 // stopped on *it*.
274 if (Start < I->Start) {
275 I->Start = Start;
276 I->StartPtr = Ptr;
277 I->Alignment = Alignment;
278 }
279
280 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
281 // is in or right at the end of I), and that End >= I->Start. Extend I out to
282 // End.
283 if (End > I->End) {
284 I->End = End;
285 range_iterator NextI = I;
286 while (++NextI != E && End >= NextI->Start) {
287 // Merge the range in.
288 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
289 if (NextI->End > I->End)
290 I->End = NextI->End;
291 Ranges.erase(NextI);
292 NextI = I;
293 }
294 }
295 }
296
297 //===----------------------------------------------------------------------===//
298 // MemCpyOpt Pass
299 //===----------------------------------------------------------------------===//
300
301 namespace {
302 class MemCpyOpt : public FunctionPass {
303 MemoryDependenceAnalysis *MD;
304 TargetLibraryInfo *TLI;
305 const DataLayout *TD;
306 public:
307 static char ID; // Pass identification, replacement for typeid
MemCpyOpt()308 MemCpyOpt() : FunctionPass(ID) {
309 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
310 MD = 0;
311 TLI = 0;
312 TD = 0;
313 }
314
315 bool runOnFunction(Function &F);
316
317 private:
318 // This transformation requires dominator postdominator info
getAnalysisUsage(AnalysisUsage & AU) const319 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
320 AU.setPreservesCFG();
321 AU.addRequired<DominatorTree>();
322 AU.addRequired<MemoryDependenceAnalysis>();
323 AU.addRequired<AliasAnalysis>();
324 AU.addRequired<TargetLibraryInfo>();
325 AU.addPreserved<AliasAnalysis>();
326 AU.addPreserved<MemoryDependenceAnalysis>();
327 }
328
329 // Helper fuctions
330 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
331 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
332 bool processMemCpy(MemCpyInst *M);
333 bool processMemMove(MemMoveInst *M);
334 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
335 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
336 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
337 uint64_t MSize);
338 bool processByValArgument(CallSite CS, unsigned ArgNo);
339 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
340 Value *ByteVal);
341
342 bool iterateOnFunction(Function &F);
343 };
344
345 char MemCpyOpt::ID = 0;
346 }
347
348 // createMemCpyOptPass - The public interface to this file...
createMemCpyOptPass()349 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
350
351 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
352 false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)353 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
354 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
355 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
356 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
357 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
358 false, false)
359
360 /// tryMergingIntoMemset - When scanning forward over instructions, we look for
361 /// some other patterns to fold away. In particular, this looks for stores to
362 /// neighboring locations of memory. If it sees enough consecutive ones, it
363 /// attempts to merge them together into a memcpy/memset.
364 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
365 Value *StartPtr, Value *ByteVal) {
366 if (TD == 0) return 0;
367
368 // Okay, so we now have a single store that can be splatable. Scan to find
369 // all subsequent stores of the same value to offset from the same pointer.
370 // Join these together into ranges, so we can decide whether contiguous blocks
371 // are stored.
372 MemsetRanges Ranges(*TD);
373
374 BasicBlock::iterator BI = StartInst;
375 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
376 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
377 // If the instruction is readnone, ignore it, otherwise bail out. We
378 // don't even allow readonly here because we don't want something like:
379 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
380 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
381 break;
382 continue;
383 }
384
385 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
386 // If this is a store, see if we can merge it in.
387 if (!NextStore->isSimple()) break;
388
389 // Check to see if this stored value is of the same byte-splattable value.
390 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
391 break;
392
393 // Check to see if this store is to a constant offset from the start ptr.
394 int64_t Offset;
395 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
396 Offset, *TD))
397 break;
398
399 Ranges.addStore(Offset, NextStore);
400 } else {
401 MemSetInst *MSI = cast<MemSetInst>(BI);
402
403 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
404 !isa<ConstantInt>(MSI->getLength()))
405 break;
406
407 // Check to see if this store is to a constant offset from the start ptr.
408 int64_t Offset;
409 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
410 break;
411
412 Ranges.addMemSet(Offset, MSI);
413 }
414 }
415
416 // If we have no ranges, then we just had a single store with nothing that
417 // could be merged in. This is a very common case of course.
418 if (Ranges.empty())
419 return 0;
420
421 // If we had at least one store that could be merged in, add the starting
422 // store as well. We try to avoid this unless there is at least something
423 // interesting as a small compile-time optimization.
424 Ranges.addInst(0, StartInst);
425
426 // If we create any memsets, we put it right before the first instruction that
427 // isn't part of the memset block. This ensure that the memset is dominated
428 // by any addressing instruction needed by the start of the block.
429 IRBuilder<> Builder(BI);
430
431 // Now that we have full information about ranges, loop over the ranges and
432 // emit memset's for anything big enough to be worthwhile.
433 Instruction *AMemSet = 0;
434 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
435 I != E; ++I) {
436 const MemsetRange &Range = *I;
437
438 if (Range.TheStores.size() == 1) continue;
439
440 // If it is profitable to lower this range to memset, do so now.
441 if (!Range.isProfitableToUseMemset(*TD))
442 continue;
443
444 // Otherwise, we do want to transform this! Create a new memset.
445 // Get the starting pointer of the block.
446 StartPtr = Range.StartPtr;
447
448 // Determine alignment
449 unsigned Alignment = Range.Alignment;
450 if (Alignment == 0) {
451 Type *EltType =
452 cast<PointerType>(StartPtr->getType())->getElementType();
453 Alignment = TD->getABITypeAlignment(EltType);
454 }
455
456 AMemSet =
457 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
458
459 DEBUG(dbgs() << "Replace stores:\n";
460 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
461 dbgs() << *Range.TheStores[i] << '\n';
462 dbgs() << "With: " << *AMemSet << '\n');
463
464 if (!Range.TheStores.empty())
465 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
466
467 // Zap all the stores.
468 for (SmallVector<Instruction*, 16>::const_iterator
469 SI = Range.TheStores.begin(),
470 SE = Range.TheStores.end(); SI != SE; ++SI) {
471 MD->removeInstruction(*SI);
472 (*SI)->eraseFromParent();
473 }
474 ++NumMemSetInfer;
475 }
476
477 return AMemSet;
478 }
479
480
processStore(StoreInst * SI,BasicBlock::iterator & BBI)481 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
482 if (!SI->isSimple()) return false;
483
484 if (TD == 0) return false;
485
486 // Detect cases where we're performing call slot forwarding, but
487 // happen to be using a load-store pair to implement it, rather than
488 // a memcpy.
489 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
490 if (LI->isSimple() && LI->hasOneUse() &&
491 LI->getParent() == SI->getParent()) {
492 MemDepResult ldep = MD->getDependency(LI);
493 CallInst *C = 0;
494 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
495 C = dyn_cast<CallInst>(ldep.getInst());
496
497 if (C) {
498 // Check that nothing touches the dest of the "copy" between
499 // the call and the store.
500 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
501 AliasAnalysis::Location StoreLoc = AA.getLocation(SI);
502 for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
503 E = C; I != E; --I) {
504 if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
505 C = 0;
506 break;
507 }
508 }
509 }
510
511 if (C) {
512 unsigned storeAlign = SI->getAlignment();
513 if (!storeAlign)
514 storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());
515 unsigned loadAlign = LI->getAlignment();
516 if (!loadAlign)
517 loadAlign = TD->getABITypeAlignment(LI->getType());
518
519 bool changed = performCallSlotOptzn(LI,
520 SI->getPointerOperand()->stripPointerCasts(),
521 LI->getPointerOperand()->stripPointerCasts(),
522 TD->getTypeStoreSize(SI->getOperand(0)->getType()),
523 std::min(storeAlign, loadAlign), C);
524 if (changed) {
525 MD->removeInstruction(SI);
526 SI->eraseFromParent();
527 MD->removeInstruction(LI);
528 LI->eraseFromParent();
529 ++NumMemCpyInstr;
530 return true;
531 }
532 }
533 }
534 }
535
536 // There are two cases that are interesting for this code to handle: memcpy
537 // and memset. Right now we only handle memset.
538
539 // Ensure that the value being stored is something that can be memset'able a
540 // byte at a time like "0" or "-1" or any width, as well as things like
541 // 0xA0A0A0A0 and 0.0.
542 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
543 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
544 ByteVal)) {
545 BBI = I; // Don't invalidate iterator.
546 return true;
547 }
548
549 return false;
550 }
551
processMemSet(MemSetInst * MSI,BasicBlock::iterator & BBI)552 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
553 // See if there is another memset or store neighboring this memset which
554 // allows us to widen out the memset to do a single larger store.
555 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
556 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
557 MSI->getValue())) {
558 BBI = I; // Don't invalidate iterator.
559 return true;
560 }
561 return false;
562 }
563
564
565 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
566 /// and checks for the possibility of a call slot optimization by having
567 /// the call write its result directly into the destination of the memcpy.
performCallSlotOptzn(Instruction * cpy,Value * cpyDest,Value * cpySrc,uint64_t cpyLen,unsigned cpyAlign,CallInst * C)568 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
569 Value *cpyDest, Value *cpySrc,
570 uint64_t cpyLen, unsigned cpyAlign,
571 CallInst *C) {
572 // The general transformation to keep in mind is
573 //
574 // call @func(..., src, ...)
575 // memcpy(dest, src, ...)
576 //
577 // ->
578 //
579 // memcpy(dest, src, ...)
580 // call @func(..., dest, ...)
581 //
582 // Since moving the memcpy is technically awkward, we additionally check that
583 // src only holds uninitialized values at the moment of the call, meaning that
584 // the memcpy can be discarded rather than moved.
585
586 // Deliberately get the source and destination with bitcasts stripped away,
587 // because we'll need to do type comparisons based on the underlying type.
588 CallSite CS(C);
589
590 // Require that src be an alloca. This simplifies the reasoning considerably.
591 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
592 if (!srcAlloca)
593 return false;
594
595 // Check that all of src is copied to dest.
596 if (TD == 0) return false;
597
598 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
599 if (!srcArraySize)
600 return false;
601
602 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
603 srcArraySize->getZExtValue();
604
605 if (cpyLen < srcSize)
606 return false;
607
608 // Check that accessing the first srcSize bytes of dest will not cause a
609 // trap. Otherwise the transform is invalid since it might cause a trap
610 // to occur earlier than it otherwise would.
611 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
612 // The destination is an alloca. Check it is larger than srcSize.
613 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
614 if (!destArraySize)
615 return false;
616
617 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
618 destArraySize->getZExtValue();
619
620 if (destSize < srcSize)
621 return false;
622 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
623 // If the destination is an sret parameter then only accesses that are
624 // outside of the returned struct type can trap.
625 if (!A->hasStructRetAttr())
626 return false;
627
628 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
629 uint64_t destSize = TD->getTypeAllocSize(StructTy);
630
631 if (destSize < srcSize)
632 return false;
633 } else {
634 return false;
635 }
636
637 // Check that dest points to memory that is at least as aligned as src.
638 unsigned srcAlign = srcAlloca->getAlignment();
639 if (!srcAlign)
640 srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
641 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
642 // If dest is not aligned enough and we can't increase its alignment then
643 // bail out.
644 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
645 return false;
646
647 // Check that src is not accessed except via the call and the memcpy. This
648 // guarantees that it holds only undefined values when passed in (so the final
649 // memcpy can be dropped), that it is not read or written between the call and
650 // the memcpy, and that writing beyond the end of it is undefined.
651 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
652 srcAlloca->use_end());
653 while (!srcUseList.empty()) {
654 User *UI = srcUseList.pop_back_val();
655
656 if (isa<BitCastInst>(UI)) {
657 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
658 I != E; ++I)
659 srcUseList.push_back(*I);
660 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
661 if (G->hasAllZeroIndices())
662 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
663 I != E; ++I)
664 srcUseList.push_back(*I);
665 else
666 return false;
667 } else if (UI != C && UI != cpy) {
668 return false;
669 }
670 }
671
672 // Since we're changing the parameter to the callsite, we need to make sure
673 // that what would be the new parameter dominates the callsite.
674 DominatorTree &DT = getAnalysis<DominatorTree>();
675 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
676 if (!DT.dominates(cpyDestInst, C))
677 return false;
678
679 // In addition to knowing that the call does not access src in some
680 // unexpected manner, for example via a global, which we deduce from
681 // the use analysis, we also need to know that it does not sneakily
682 // access dest. We rely on AA to figure this out for us.
683 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
684 AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize);
685 // If necessary, perform additional analysis.
686 if (MR != AliasAnalysis::NoModRef)
687 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
688 if (MR != AliasAnalysis::NoModRef)
689 return false;
690
691 // All the checks have passed, so do the transformation.
692 bool changedArgument = false;
693 for (unsigned i = 0; i < CS.arg_size(); ++i)
694 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
695 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
696 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
697 cpyDest->getName(), C);
698 changedArgument = true;
699 if (CS.getArgument(i)->getType() == Dest->getType())
700 CS.setArgument(i, Dest);
701 else
702 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
703 CS.getArgument(i)->getType(), Dest->getName(), C));
704 }
705
706 if (!changedArgument)
707 return false;
708
709 // If the destination wasn't sufficiently aligned then increase its alignment.
710 if (!isDestSufficientlyAligned) {
711 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
712 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
713 }
714
715 // Drop any cached information about the call, because we may have changed
716 // its dependence information by changing its parameter.
717 MD->removeInstruction(C);
718
719 // Remove the memcpy.
720 MD->removeInstruction(cpy);
721 ++NumMemCpyInstr;
722
723 return true;
724 }
725
726 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
727 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
728 /// copy from MDep's input if we can. MSize is the size of M's copy.
729 ///
processMemCpyMemCpyDependence(MemCpyInst * M,MemCpyInst * MDep,uint64_t MSize)730 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
731 uint64_t MSize) {
732 // We can only transforms memcpy's where the dest of one is the source of the
733 // other.
734 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
735 return false;
736
737 // If dep instruction is reading from our current input, then it is a noop
738 // transfer and substituting the input won't change this instruction. Just
739 // ignore the input and let someone else zap MDep. This handles cases like:
740 // memcpy(a <- a)
741 // memcpy(b <- a)
742 if (M->getSource() == MDep->getSource())
743 return false;
744
745 // Second, the length of the memcpy's must be the same, or the preceding one
746 // must be larger than the following one.
747 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
748 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
749 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
750 return false;
751
752 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
753
754 // Verify that the copied-from memory doesn't change in between the two
755 // transfers. For example, in:
756 // memcpy(a <- b)
757 // *b = 42;
758 // memcpy(c <- a)
759 // It would be invalid to transform the second memcpy into memcpy(c <- b).
760 //
761 // TODO: If the code between M and MDep is transparent to the destination "c",
762 // then we could still perform the xform by moving M up to the first memcpy.
763 //
764 // NOTE: This is conservative, it will stop on any read from the source loc,
765 // not just the defining memcpy.
766 MemDepResult SourceDep =
767 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
768 false, M, M->getParent());
769 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
770 return false;
771
772 // If the dest of the second might alias the source of the first, then the
773 // source and dest might overlap. We still want to eliminate the intermediate
774 // value, but we have to generate a memmove instead of memcpy.
775 bool UseMemMove = false;
776 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
777 UseMemMove = true;
778
779 // If all checks passed, then we can transform M.
780
781 // Make sure to use the lesser of the alignment of the source and the dest
782 // since we're changing where we're reading from, but don't want to increase
783 // the alignment past what can be read from or written to.
784 // TODO: Is this worth it if we're creating a less aligned memcpy? For
785 // example we could be moving from movaps -> movq on x86.
786 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
787
788 IRBuilder<> Builder(M);
789 if (UseMemMove)
790 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
791 Align, M->isVolatile());
792 else
793 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
794 Align, M->isVolatile());
795
796 // Remove the instruction we're replacing.
797 MD->removeInstruction(M);
798 M->eraseFromParent();
799 ++NumMemCpyInstr;
800 return true;
801 }
802
803
804 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
805 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
806 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
807 /// circumstances). This allows later passes to remove the first memcpy
808 /// altogether.
processMemCpy(MemCpyInst * M)809 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
810 // We can only optimize statically-sized memcpy's that are non-volatile.
811 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
812 if (CopySize == 0 || M->isVolatile()) return false;
813
814 // If the source and destination of the memcpy are the same, then zap it.
815 if (M->getSource() == M->getDest()) {
816 MD->removeInstruction(M);
817 M->eraseFromParent();
818 return false;
819 }
820
821 // If copying from a constant, try to turn the memcpy into a memset.
822 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
823 if (GV->isConstant() && GV->hasDefinitiveInitializer())
824 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
825 IRBuilder<> Builder(M);
826 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize,
827 M->getAlignment(), false);
828 MD->removeInstruction(M);
829 M->eraseFromParent();
830 ++NumCpyToSet;
831 return true;
832 }
833
834 // The are two possible optimizations we can do for memcpy:
835 // a) memcpy-memcpy xform which exposes redundance for DSE.
836 // b) call-memcpy xform for return slot optimization.
837 MemDepResult DepInfo = MD->getDependency(M);
838 if (DepInfo.isClobber()) {
839 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
840 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
841 CopySize->getZExtValue(), M->getAlignment(),
842 C)) {
843 MD->removeInstruction(M);
844 M->eraseFromParent();
845 return true;
846 }
847 }
848 }
849
850 AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
851 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
852 M, M->getParent());
853 if (SrcDepInfo.isClobber()) {
854 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
855 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
856 }
857
858 return false;
859 }
860
861 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
862 /// are guaranteed not to alias.
processMemMove(MemMoveInst * M)863 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
864 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
865
866 if (!TLI->has(LibFunc::memmove))
867 return false;
868
869 // See if the pointers alias.
870 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
871 return false;
872
873 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
874
875 // If not, then we know we can transform this.
876 Module *Mod = M->getParent()->getParent()->getParent();
877 Type *ArgTys[3] = { M->getRawDest()->getType(),
878 M->getRawSource()->getType(),
879 M->getLength()->getType() };
880 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
881 ArgTys));
882
883 // MemDep may have over conservative information about this instruction, just
884 // conservatively flush it from the cache.
885 MD->removeInstruction(M);
886
887 ++NumMoveToCpy;
888 return true;
889 }
890
891 /// processByValArgument - This is called on every byval argument in call sites.
processByValArgument(CallSite CS,unsigned ArgNo)892 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
893 if (TD == 0) return false;
894
895 // Find out what feeds this byval argument.
896 Value *ByValArg = CS.getArgument(ArgNo);
897 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
898 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
899 MemDepResult DepInfo =
900 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
901 true, CS.getInstruction(),
902 CS.getInstruction()->getParent());
903 if (!DepInfo.isClobber())
904 return false;
905
906 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
907 // a memcpy, see if we can byval from the source of the memcpy instead of the
908 // result.
909 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
910 if (MDep == 0 || MDep->isVolatile() ||
911 ByValArg->stripPointerCasts() != MDep->getDest())
912 return false;
913
914 // The length of the memcpy must be larger or equal to the size of the byval.
915 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
916 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
917 return false;
918
919 // Get the alignment of the byval. If the call doesn't specify the alignment,
920 // then it is some target specific value that we can't know.
921 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
922 if (ByValAlign == 0) return false;
923
924 // If it is greater than the memcpy, then we check to see if we can force the
925 // source of the memcpy to the alignment we need. If we fail, we bail out.
926 if (MDep->getAlignment() < ByValAlign &&
927 getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)
928 return false;
929
930 // Verify that the copied-from memory doesn't change in between the memcpy and
931 // the byval call.
932 // memcpy(a <- b)
933 // *b = 42;
934 // foo(*a)
935 // It would be invalid to transform the second memcpy into foo(*b).
936 //
937 // NOTE: This is conservative, it will stop on any read from the source loc,
938 // not just the defining memcpy.
939 MemDepResult SourceDep =
940 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
941 false, CS.getInstruction(), MDep->getParent());
942 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
943 return false;
944
945 Value *TmpCast = MDep->getSource();
946 if (MDep->getSource()->getType() != ByValArg->getType())
947 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
948 "tmpcast", CS.getInstruction());
949
950 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
951 << " " << *MDep << "\n"
952 << " " << *CS.getInstruction() << "\n");
953
954 // Otherwise we're good! Update the byval argument.
955 CS.setArgument(ArgNo, TmpCast);
956 ++NumMemCpyInstr;
957 return true;
958 }
959
960 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
iterateOnFunction(Function & F)961 bool MemCpyOpt::iterateOnFunction(Function &F) {
962 bool MadeChange = false;
963
964 // Walk all instruction in the function.
965 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
966 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
967 // Avoid invalidating the iterator.
968 Instruction *I = BI++;
969
970 bool RepeatInstruction = false;
971
972 if (StoreInst *SI = dyn_cast<StoreInst>(I))
973 MadeChange |= processStore(SI, BI);
974 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
975 RepeatInstruction = processMemSet(M, BI);
976 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
977 RepeatInstruction = processMemCpy(M);
978 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
979 RepeatInstruction = processMemMove(M);
980 else if (CallSite CS = (Value*)I) {
981 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
982 if (CS.isByValArgument(i))
983 MadeChange |= processByValArgument(CS, i);
984 }
985
986 // Reprocess the instruction if desired.
987 if (RepeatInstruction) {
988 if (BI != BB->begin()) --BI;
989 MadeChange = true;
990 }
991 }
992 }
993
994 return MadeChange;
995 }
996
997 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
998 // function.
999 //
runOnFunction(Function & F)1000 bool MemCpyOpt::runOnFunction(Function &F) {
1001 bool MadeChange = false;
1002 MD = &getAnalysis<MemoryDependenceAnalysis>();
1003 TD = getAnalysisIfAvailable<DataLayout>();
1004 TLI = &getAnalysis<TargetLibraryInfo>();
1005
1006 // If we don't have at least memset and memcpy, there is little point of doing
1007 // anything here. These are required by a freestanding implementation, so if
1008 // even they are disabled, there is no point in trying hard.
1009 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1010 return false;
1011
1012 while (1) {
1013 if (!iterateOnFunction(F))
1014 break;
1015 MadeChange = true;
1016 }
1017
1018 MD = 0;
1019 return MadeChange;
1020 }
1021