1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CallSite.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/GetElementPtrTypeIterator.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PassManager.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Transforms/Utils/Local.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <utility>
63
64 using namespace llvm;
65
66 #define DEBUG_TYPE "memcpyopt"
67
68 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
69 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
70 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
71 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
72
73 namespace {
74
75 /// Represents a range of memset'd bytes with the ByteVal value.
76 /// This allows us to analyze stores like:
77 /// store 0 -> P+1
78 /// store 0 -> P+0
79 /// store 0 -> P+3
80 /// store 0 -> P+2
81 /// which sometimes happens with stores to arrays of structs etc. When we see
82 /// the first store, we make a range [1, 2). The second store extends the range
83 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
84 /// two ranges into [0, 3) which is memset'able.
85 struct MemsetRange {
86 // Start/End - A semi range that describes the span that this range covers.
87 // The range is closed at the start and open at the end: [Start, End).
88 int64_t Start, End;
89
90 /// StartPtr - The getelementptr instruction that points to the start of the
91 /// range.
92 Value *StartPtr;
93
94 /// Alignment - The known alignment of the first store.
95 unsigned Alignment;
96
97 /// TheStores - The actual stores that make up this range.
98 SmallVector<Instruction*, 16> TheStores;
99
100 bool isProfitableToUseMemset(const DataLayout &DL) const;
101 };
102
103 } // end anonymous namespace
104
isProfitableToUseMemset(const DataLayout & DL) const105 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
106 // If we found more than 4 stores to merge or 16 bytes, use memset.
107 if (TheStores.size() >= 4 || End-Start >= 16) return true;
108
109 // If there is nothing to merge, don't do anything.
110 if (TheStores.size() < 2) return false;
111
112 // If any of the stores are a memset, then it is always good to extend the
113 // memset.
114 for (Instruction *SI : TheStores)
115 if (!isa<StoreInst>(SI))
116 return true;
117
118 // Assume that the code generator is capable of merging pairs of stores
119 // together if it wants to.
120 if (TheStores.size() == 2) return false;
121
122 // If we have fewer than 8 stores, it can still be worthwhile to do this.
123 // For example, merging 4 i8 stores into an i32 store is useful almost always.
124 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
125 // memset will be split into 2 32-bit stores anyway) and doing so can
126 // pessimize the llvm optimizer.
127 //
128 // Since we don't have perfect knowledge here, make some assumptions: assume
129 // the maximum GPR width is the same size as the largest legal integer
130 // size. If so, check to see whether we will end up actually reducing the
131 // number of stores used.
132 unsigned Bytes = unsigned(End-Start);
133 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
134 if (MaxIntSize == 0)
135 MaxIntSize = 1;
136 unsigned NumPointerStores = Bytes / MaxIntSize;
137
138 // Assume the remaining bytes if any are done a byte at a time.
139 unsigned NumByteStores = Bytes % MaxIntSize;
140
141 // If we will reduce the # stores (according to this heuristic), do the
142 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
143 // etc.
144 return TheStores.size() > NumPointerStores+NumByteStores;
145 }
146
147 namespace {
148
149 class MemsetRanges {
150 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
151
152 /// A sorted list of the memset ranges.
153 SmallVector<MemsetRange, 8> Ranges;
154
155 const DataLayout &DL;
156
157 public:
MemsetRanges(const DataLayout & DL)158 MemsetRanges(const DataLayout &DL) : DL(DL) {}
159
160 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
161
begin() const162 const_iterator begin() const { return Ranges.begin(); }
end() const163 const_iterator end() const { return Ranges.end(); }
empty() const164 bool empty() const { return Ranges.empty(); }
165
addInst(int64_t OffsetFromFirst,Instruction * Inst)166 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
167 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
168 addStore(OffsetFromFirst, SI);
169 else
170 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
171 }
172
addStore(int64_t OffsetFromFirst,StoreInst * SI)173 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
174 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
175
176 addRange(OffsetFromFirst, StoreSize,
177 SI->getPointerOperand(), SI->getAlignment(), SI);
178 }
179
addMemSet(int64_t OffsetFromFirst,MemSetInst * MSI)180 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
181 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
182 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
183 }
184
185 void addRange(int64_t Start, int64_t Size, Value *Ptr,
186 unsigned Alignment, Instruction *Inst);
187 };
188
189 } // end anonymous namespace
190
191 /// Add a new store to the MemsetRanges data structure. This adds a
192 /// new range for the specified store at the specified offset, merging into
193 /// existing ranges as appropriate.
addRange(int64_t Start,int64_t Size,Value * Ptr,unsigned Alignment,Instruction * Inst)194 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
195 unsigned Alignment, Instruction *Inst) {
196 int64_t End = Start+Size;
197
198 range_iterator I = partition_point(
199 Ranges, [=](const MemsetRange &O) { return O.End < Start; });
200
201 // We now know that I == E, in which case we didn't find anything to merge
202 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
203 // to insert a new range. Handle this now.
204 if (I == Ranges.end() || End < I->Start) {
205 MemsetRange &R = *Ranges.insert(I, MemsetRange());
206 R.Start = Start;
207 R.End = End;
208 R.StartPtr = Ptr;
209 R.Alignment = Alignment;
210 R.TheStores.push_back(Inst);
211 return;
212 }
213
214 // This store overlaps with I, add it.
215 I->TheStores.push_back(Inst);
216
217 // At this point, we may have an interval that completely contains our store.
218 // If so, just add it to the interval and return.
219 if (I->Start <= Start && I->End >= End)
220 return;
221
222 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
223 // but is not entirely contained within the range.
224
225 // See if the range extends the start of the range. In this case, it couldn't
226 // possibly cause it to join the prior range, because otherwise we would have
227 // stopped on *it*.
228 if (Start < I->Start) {
229 I->Start = Start;
230 I->StartPtr = Ptr;
231 I->Alignment = Alignment;
232 }
233
234 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
235 // is in or right at the end of I), and that End >= I->Start. Extend I out to
236 // End.
237 if (End > I->End) {
238 I->End = End;
239 range_iterator NextI = I;
240 while (++NextI != Ranges.end() && End >= NextI->Start) {
241 // Merge the range in.
242 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
243 if (NextI->End > I->End)
244 I->End = NextI->End;
245 Ranges.erase(NextI);
246 NextI = I;
247 }
248 }
249 }
250
251 //===----------------------------------------------------------------------===//
252 // MemCpyOptLegacyPass Pass
253 //===----------------------------------------------------------------------===//
254
255 namespace {
256
257 class MemCpyOptLegacyPass : public FunctionPass {
258 MemCpyOptPass Impl;
259
260 public:
261 static char ID; // Pass identification, replacement for typeid
262
MemCpyOptLegacyPass()263 MemCpyOptLegacyPass() : FunctionPass(ID) {
264 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
265 }
266
267 bool runOnFunction(Function &F) override;
268
269 private:
270 // This transformation requires dominator postdominator info
getAnalysisUsage(AnalysisUsage & AU) const271 void getAnalysisUsage(AnalysisUsage &AU) const override {
272 AU.setPreservesCFG();
273 AU.addRequired<AssumptionCacheTracker>();
274 AU.addRequired<DominatorTreeWrapperPass>();
275 AU.addRequired<MemoryDependenceWrapperPass>();
276 AU.addRequired<AAResultsWrapperPass>();
277 AU.addRequired<TargetLibraryInfoWrapperPass>();
278 AU.addPreserved<GlobalsAAWrapperPass>();
279 AU.addPreserved<MemoryDependenceWrapperPass>();
280 }
281 };
282
283 } // end anonymous namespace
284
285 char MemCpyOptLegacyPass::ID = 0;
286
287 /// The public interface to this file...
createMemCpyOptPass()288 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
289
290 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
291 false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)292 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
293 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
294 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
295 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
296 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
297 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
298 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
299 false, false)
300
301 /// When scanning forward over instructions, we look for some other patterns to
302 /// fold away. In particular, this looks for stores to neighboring locations of
303 /// memory. If it sees enough consecutive ones, it attempts to merge them
304 /// together into a memcpy/memset.
305 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
306 Value *StartPtr,
307 Value *ByteVal) {
308 const DataLayout &DL = StartInst->getModule()->getDataLayout();
309
310 // Okay, so we now have a single store that can be splatable. Scan to find
311 // all subsequent stores of the same value to offset from the same pointer.
312 // Join these together into ranges, so we can decide whether contiguous blocks
313 // are stored.
314 MemsetRanges Ranges(DL);
315
316 BasicBlock::iterator BI(StartInst);
317 for (++BI; !BI->isTerminator(); ++BI) {
318 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
319 // If the instruction is readnone, ignore it, otherwise bail out. We
320 // don't even allow readonly here because we don't want something like:
321 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
322 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
323 break;
324 continue;
325 }
326
327 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
328 // If this is a store, see if we can merge it in.
329 if (!NextStore->isSimple()) break;
330
331 // Check to see if this stored value is of the same byte-splattable value.
332 Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL);
333 if (isa<UndefValue>(ByteVal) && StoredByte)
334 ByteVal = StoredByte;
335 if (ByteVal != StoredByte)
336 break;
337
338 // Check to see if this store is to a constant offset from the start ptr.
339 Optional<int64_t> Offset =
340 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
341 if (!Offset)
342 break;
343
344 Ranges.addStore(*Offset, NextStore);
345 } else {
346 MemSetInst *MSI = cast<MemSetInst>(BI);
347
348 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
349 !isa<ConstantInt>(MSI->getLength()))
350 break;
351
352 // Check to see if this store is to a constant offset from the start ptr.
353 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
354 if (!Offset)
355 break;
356
357 Ranges.addMemSet(*Offset, MSI);
358 }
359 }
360
361 // If we have no ranges, then we just had a single store with nothing that
362 // could be merged in. This is a very common case of course.
363 if (Ranges.empty())
364 return nullptr;
365
366 // If we had at least one store that could be merged in, add the starting
367 // store as well. We try to avoid this unless there is at least something
368 // interesting as a small compile-time optimization.
369 Ranges.addInst(0, StartInst);
370
371 // If we create any memsets, we put it right before the first instruction that
372 // isn't part of the memset block. This ensure that the memset is dominated
373 // by any addressing instruction needed by the start of the block.
374 IRBuilder<> Builder(&*BI);
375
376 // Now that we have full information about ranges, loop over the ranges and
377 // emit memset's for anything big enough to be worthwhile.
378 Instruction *AMemSet = nullptr;
379 for (const MemsetRange &Range : Ranges) {
380 if (Range.TheStores.size() == 1) continue;
381
382 // If it is profitable to lower this range to memset, do so now.
383 if (!Range.isProfitableToUseMemset(DL))
384 continue;
385
386 // Otherwise, we do want to transform this! Create a new memset.
387 // Get the starting pointer of the block.
388 StartPtr = Range.StartPtr;
389
390 // Determine alignment
391 const Align Alignment = DL.getValueOrABITypeAlignment(
392 MaybeAlign(Range.Alignment),
393 cast<PointerType>(StartPtr->getType())->getElementType());
394
395 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
396 Alignment);
397 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
398 : Range.TheStores) dbgs()
399 << *SI << '\n';
400 dbgs() << "With: " << *AMemSet << '\n');
401
402 if (!Range.TheStores.empty())
403 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
404
405 // Zap all the stores.
406 for (Instruction *SI : Range.TheStores) {
407 MD->removeInstruction(SI);
408 SI->eraseFromParent();
409 }
410 ++NumMemSetInfer;
411 }
412
413 return AMemSet;
414 }
415
findStoreAlignment(const DataLayout & DL,const StoreInst * SI)416 static Align findStoreAlignment(const DataLayout &DL, const StoreInst *SI) {
417 return DL.getValueOrABITypeAlignment(MaybeAlign(SI->getAlignment()),
418 SI->getOperand(0)->getType());
419 }
420
findLoadAlignment(const DataLayout & DL,const LoadInst * LI)421 static Align findLoadAlignment(const DataLayout &DL, const LoadInst *LI) {
422 return DL.getValueOrABITypeAlignment(MaybeAlign(LI->getAlignment()),
423 LI->getType());
424 }
425
findCommonAlignment(const DataLayout & DL,const StoreInst * SI,const LoadInst * LI)426 static Align findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
427 const LoadInst *LI) {
428 Align StoreAlign = findStoreAlignment(DL, SI);
429 Align LoadAlign = findLoadAlignment(DL, LI);
430 return commonAlignment(StoreAlign, LoadAlign);
431 }
432
433 // This method try to lift a store instruction before position P.
434 // It will lift the store and its argument + that anything that
435 // may alias with these.
436 // The method returns true if it was successful.
moveUp(AliasAnalysis & AA,StoreInst * SI,Instruction * P,const LoadInst * LI)437 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
438 const LoadInst *LI) {
439 // If the store alias this position, early bail out.
440 MemoryLocation StoreLoc = MemoryLocation::get(SI);
441 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
442 return false;
443
444 // Keep track of the arguments of all instruction we plan to lift
445 // so we can make sure to lift them as well if appropriate.
446 DenseSet<Instruction*> Args;
447 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
448 if (Ptr->getParent() == SI->getParent())
449 Args.insert(Ptr);
450
451 // Instruction to lift before P.
452 SmallVector<Instruction*, 8> ToLift;
453
454 // Memory locations of lifted instructions.
455 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
456
457 // Lifted calls.
458 SmallVector<const CallBase *, 8> Calls;
459
460 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
461
462 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
463 auto *C = &*I;
464
465 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
466
467 bool NeedLift = false;
468 if (Args.erase(C))
469 NeedLift = true;
470 else if (MayAlias) {
471 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
472 return isModOrRefSet(AA.getModRefInfo(C, ML));
473 });
474
475 if (!NeedLift)
476 NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
477 return isModOrRefSet(AA.getModRefInfo(C, Call));
478 });
479 }
480
481 if (!NeedLift)
482 continue;
483
484 if (MayAlias) {
485 // Since LI is implicitly moved downwards past the lifted instructions,
486 // none of them may modify its source.
487 if (isModSet(AA.getModRefInfo(C, LoadLoc)))
488 return false;
489 else if (const auto *Call = dyn_cast<CallBase>(C)) {
490 // If we can't lift this before P, it's game over.
491 if (isModOrRefSet(AA.getModRefInfo(P, Call)))
492 return false;
493
494 Calls.push_back(Call);
495 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
496 // If we can't lift this before P, it's game over.
497 auto ML = MemoryLocation::get(C);
498 if (isModOrRefSet(AA.getModRefInfo(P, ML)))
499 return false;
500
501 MemLocs.push_back(ML);
502 } else
503 // We don't know how to lift this instruction.
504 return false;
505 }
506
507 ToLift.push_back(C);
508 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
509 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
510 if (A->getParent() == SI->getParent()) {
511 // Cannot hoist user of P above P
512 if(A == P) return false;
513 Args.insert(A);
514 }
515 }
516 }
517
518 // We made it, we need to lift
519 for (auto *I : llvm::reverse(ToLift)) {
520 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
521 I->moveBefore(P);
522 }
523
524 return true;
525 }
526
processStore(StoreInst * SI,BasicBlock::iterator & BBI)527 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
528 if (!SI->isSimple()) return false;
529
530 // Avoid merging nontemporal stores since the resulting
531 // memcpy/memset would not be able to preserve the nontemporal hint.
532 // In theory we could teach how to propagate the !nontemporal metadata to
533 // memset calls. However, that change would force the backend to
534 // conservatively expand !nontemporal memset calls back to sequences of
535 // store instructions (effectively undoing the merging).
536 if (SI->getMetadata(LLVMContext::MD_nontemporal))
537 return false;
538
539 const DataLayout &DL = SI->getModule()->getDataLayout();
540
541 // Load to store forwarding can be interpreted as memcpy.
542 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
543 if (LI->isSimple() && LI->hasOneUse() &&
544 LI->getParent() == SI->getParent()) {
545
546 auto *T = LI->getType();
547 if (T->isAggregateType()) {
548 AliasAnalysis &AA = LookupAliasAnalysis();
549 MemoryLocation LoadLoc = MemoryLocation::get(LI);
550
551 // We use alias analysis to check if an instruction may store to
552 // the memory we load from in between the load and the store. If
553 // such an instruction is found, we try to promote there instead
554 // of at the store position.
555 Instruction *P = SI;
556 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
557 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
558 P = &I;
559 break;
560 }
561 }
562
563 // We found an instruction that may write to the loaded memory.
564 // We can try to promote at this position instead of the store
565 // position if nothing alias the store memory after this and the store
566 // destination is not in the range.
567 if (P && P != SI) {
568 if (!moveUp(AA, SI, P, LI))
569 P = nullptr;
570 }
571
572 // If a valid insertion position is found, then we can promote
573 // the load/store pair to a memcpy.
574 if (P) {
575 // If we load from memory that may alias the memory we store to,
576 // memmove must be used to preserve semantic. If not, memcpy can
577 // be used.
578 bool UseMemMove = false;
579 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
580 UseMemMove = true;
581
582 uint64_t Size = DL.getTypeStoreSize(T);
583
584 IRBuilder<> Builder(P);
585 Instruction *M;
586 if (UseMemMove)
587 M = Builder.CreateMemMove(
588 SI->getPointerOperand(), findStoreAlignment(DL, SI),
589 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
590 else
591 M = Builder.CreateMemCpy(
592 SI->getPointerOperand(), findStoreAlignment(DL, SI),
593 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
594
595 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
596 << *M << "\n");
597
598 MD->removeInstruction(SI);
599 SI->eraseFromParent();
600 MD->removeInstruction(LI);
601 LI->eraseFromParent();
602 ++NumMemCpyInstr;
603
604 // Make sure we do not invalidate the iterator.
605 BBI = M->getIterator();
606 return true;
607 }
608 }
609
610 // Detect cases where we're performing call slot forwarding, but
611 // happen to be using a load-store pair to implement it, rather than
612 // a memcpy.
613 MemDepResult ldep = MD->getDependency(LI);
614 CallInst *C = nullptr;
615 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
616 C = dyn_cast<CallInst>(ldep.getInst());
617
618 if (C) {
619 // Check that nothing touches the dest of the "copy" between
620 // the call and the store.
621 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
622 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
623 AliasAnalysis &AA = LookupAliasAnalysis();
624 MemoryLocation StoreLoc = MemoryLocation::get(SI);
625 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
626 I != E; --I) {
627 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
628 C = nullptr;
629 break;
630 }
631 // The store to dest may never happen if an exception can be thrown
632 // between the load and the store.
633 if (I->mayThrow() && !CpyDestIsLocal) {
634 C = nullptr;
635 break;
636 }
637 }
638 }
639
640 if (C) {
641 bool changed = performCallSlotOptzn(
642 LI, SI->getPointerOperand()->stripPointerCasts(),
643 LI->getPointerOperand()->stripPointerCasts(),
644 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
645 findCommonAlignment(DL, SI, LI).value(), C);
646 if (changed) {
647 MD->removeInstruction(SI);
648 SI->eraseFromParent();
649 MD->removeInstruction(LI);
650 LI->eraseFromParent();
651 ++NumMemCpyInstr;
652 return true;
653 }
654 }
655 }
656 }
657
658 // There are two cases that are interesting for this code to handle: memcpy
659 // and memset. Right now we only handle memset.
660
661 // Ensure that the value being stored is something that can be memset'able a
662 // byte at a time like "0" or "-1" or any width, as well as things like
663 // 0xA0A0A0A0 and 0.0.
664 auto *V = SI->getOperand(0);
665 if (Value *ByteVal = isBytewiseValue(V, DL)) {
666 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
667 ByteVal)) {
668 BBI = I->getIterator(); // Don't invalidate iterator.
669 return true;
670 }
671
672 // If we have an aggregate, we try to promote it to memset regardless
673 // of opportunity for merging as it can expose optimization opportunities
674 // in subsequent passes.
675 auto *T = V->getType();
676 if (T->isAggregateType()) {
677 uint64_t Size = DL.getTypeStoreSize(T);
678 const Align MA =
679 DL.getValueOrABITypeAlignment(MaybeAlign(SI->getAlignment()), T);
680 IRBuilder<> Builder(SI);
681 auto *M =
682 Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, MA);
683
684 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
685
686 MD->removeInstruction(SI);
687 SI->eraseFromParent();
688 NumMemSetInfer++;
689
690 // Make sure we do not invalidate the iterator.
691 BBI = M->getIterator();
692 return true;
693 }
694 }
695
696 return false;
697 }
698
processMemSet(MemSetInst * MSI,BasicBlock::iterator & BBI)699 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
700 // See if there is another memset or store neighboring this memset which
701 // allows us to widen out the memset to do a single larger store.
702 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
703 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
704 MSI->getValue())) {
705 BBI = I->getIterator(); // Don't invalidate iterator.
706 return true;
707 }
708 return false;
709 }
710
711 /// Takes a memcpy and a call that it depends on,
712 /// and checks for the possibility of a call slot optimization by having
713 /// the call write its result directly into the destination of the memcpy.
performCallSlotOptzn(Instruction * cpy,Value * cpyDest,Value * cpySrc,uint64_t cpyLen,unsigned cpyAlign,CallInst * C)714 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
715 Value *cpySrc, uint64_t cpyLen,
716 unsigned cpyAlign, CallInst *C) {
717 // The general transformation to keep in mind is
718 //
719 // call @func(..., src, ...)
720 // memcpy(dest, src, ...)
721 //
722 // ->
723 //
724 // memcpy(dest, src, ...)
725 // call @func(..., dest, ...)
726 //
727 // Since moving the memcpy is technically awkward, we additionally check that
728 // src only holds uninitialized values at the moment of the call, meaning that
729 // the memcpy can be discarded rather than moved.
730
731 // Lifetime marks shouldn't be operated on.
732 if (Function *F = C->getCalledFunction())
733 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
734 return false;
735
736 // Deliberately get the source and destination with bitcasts stripped away,
737 // because we'll need to do type comparisons based on the underlying type.
738 CallSite CS(C);
739
740 // Require that src be an alloca. This simplifies the reasoning considerably.
741 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
742 if (!srcAlloca)
743 return false;
744
745 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
746 if (!srcArraySize)
747 return false;
748
749 const DataLayout &DL = cpy->getModule()->getDataLayout();
750 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
751 srcArraySize->getZExtValue();
752
753 if (cpyLen < srcSize)
754 return false;
755
756 // Check that accessing the first srcSize bytes of dest will not cause a
757 // trap. Otherwise the transform is invalid since it might cause a trap
758 // to occur earlier than it otherwise would.
759 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
760 // The destination is an alloca. Check it is larger than srcSize.
761 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
762 if (!destArraySize)
763 return false;
764
765 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
766 destArraySize->getZExtValue();
767
768 if (destSize < srcSize)
769 return false;
770 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
771 // The store to dest may never happen if the call can throw.
772 if (C->mayThrow())
773 return false;
774
775 if (A->getDereferenceableBytes() < srcSize) {
776 // If the destination is an sret parameter then only accesses that are
777 // outside of the returned struct type can trap.
778 if (!A->hasStructRetAttr())
779 return false;
780
781 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
782 if (!StructTy->isSized()) {
783 // The call may never return and hence the copy-instruction may never
784 // be executed, and therefore it's not safe to say "the destination
785 // has at least <cpyLen> bytes, as implied by the copy-instruction",
786 return false;
787 }
788
789 uint64_t destSize = DL.getTypeAllocSize(StructTy);
790 if (destSize < srcSize)
791 return false;
792 }
793 } else {
794 return false;
795 }
796
797 // Check that dest points to memory that is at least as aligned as src.
798 unsigned srcAlign = srcAlloca->getAlignment();
799 if (!srcAlign)
800 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
801 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
802 // If dest is not aligned enough and we can't increase its alignment then
803 // bail out.
804 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
805 return false;
806
807 // Check that src is not accessed except via the call and the memcpy. This
808 // guarantees that it holds only undefined values when passed in (so the final
809 // memcpy can be dropped), that it is not read or written between the call and
810 // the memcpy, and that writing beyond the end of it is undefined.
811 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
812 srcAlloca->user_end());
813 while (!srcUseList.empty()) {
814 User *U = srcUseList.pop_back_val();
815
816 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
817 for (User *UU : U->users())
818 srcUseList.push_back(UU);
819 continue;
820 }
821 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
822 if (!G->hasAllZeroIndices())
823 return false;
824
825 for (User *UU : U->users())
826 srcUseList.push_back(UU);
827 continue;
828 }
829 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
830 if (IT->isLifetimeStartOrEnd())
831 continue;
832
833 if (U != C && U != cpy)
834 return false;
835 }
836
837 // Check that src isn't captured by the called function since the
838 // transformation can cause aliasing issues in that case.
839 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
840 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
841 return false;
842
843 // Since we're changing the parameter to the callsite, we need to make sure
844 // that what would be the new parameter dominates the callsite.
845 DominatorTree &DT = LookupDomTree();
846 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
847 if (!DT.dominates(cpyDestInst, C))
848 return false;
849
850 // In addition to knowing that the call does not access src in some
851 // unexpected manner, for example via a global, which we deduce from
852 // the use analysis, we also need to know that it does not sneakily
853 // access dest. We rely on AA to figure this out for us.
854 AliasAnalysis &AA = LookupAliasAnalysis();
855 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
856 // If necessary, perform additional analysis.
857 if (isModOrRefSet(MR))
858 MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT);
859 if (isModOrRefSet(MR))
860 return false;
861
862 // We can't create address space casts here because we don't know if they're
863 // safe for the target.
864 if (cpySrc->getType()->getPointerAddressSpace() !=
865 cpyDest->getType()->getPointerAddressSpace())
866 return false;
867 for (unsigned i = 0; i < CS.arg_size(); ++i)
868 if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
869 cpySrc->getType()->getPointerAddressSpace() !=
870 CS.getArgument(i)->getType()->getPointerAddressSpace())
871 return false;
872
873 // All the checks have passed, so do the transformation.
874 bool changedArgument = false;
875 for (unsigned i = 0; i < CS.arg_size(); ++i)
876 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
877 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
878 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
879 cpyDest->getName(), C);
880 changedArgument = true;
881 if (CS.getArgument(i)->getType() == Dest->getType())
882 CS.setArgument(i, Dest);
883 else
884 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
885 CS.getArgument(i)->getType(), Dest->getName(), C));
886 }
887
888 if (!changedArgument)
889 return false;
890
891 // If the destination wasn't sufficiently aligned then increase its alignment.
892 if (!isDestSufficientlyAligned) {
893 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
894 cast<AllocaInst>(cpyDest)->setAlignment(MaybeAlign(srcAlign));
895 }
896
897 // Drop any cached information about the call, because we may have changed
898 // its dependence information by changing its parameter.
899 MD->removeInstruction(C);
900
901 // Update AA metadata
902 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
903 // handled here, but combineMetadata doesn't support them yet
904 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
905 LLVMContext::MD_noalias,
906 LLVMContext::MD_invariant_group,
907 LLVMContext::MD_access_group};
908 combineMetadata(C, cpy, KnownIDs, true);
909
910 // Remove the memcpy.
911 MD->removeInstruction(cpy);
912 ++NumMemCpyInstr;
913
914 return true;
915 }
916
917 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
918 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
processMemCpyMemCpyDependence(MemCpyInst * M,MemCpyInst * MDep)919 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
920 MemCpyInst *MDep) {
921 // We can only transforms memcpy's where the dest of one is the source of the
922 // other.
923 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
924 return false;
925
926 // If dep instruction is reading from our current input, then it is a noop
927 // transfer and substituting the input won't change this instruction. Just
928 // ignore the input and let someone else zap MDep. This handles cases like:
929 // memcpy(a <- a)
930 // memcpy(b <- a)
931 if (M->getSource() == MDep->getSource())
932 return false;
933
934 // Second, the length of the memcpy's must be the same, or the preceding one
935 // must be larger than the following one.
936 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
937 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
938 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
939 return false;
940
941 AliasAnalysis &AA = LookupAliasAnalysis();
942
943 // Verify that the copied-from memory doesn't change in between the two
944 // transfers. For example, in:
945 // memcpy(a <- b)
946 // *b = 42;
947 // memcpy(c <- a)
948 // It would be invalid to transform the second memcpy into memcpy(c <- b).
949 //
950 // TODO: If the code between M and MDep is transparent to the destination "c",
951 // then we could still perform the xform by moving M up to the first memcpy.
952 //
953 // NOTE: This is conservative, it will stop on any read from the source loc,
954 // not just the defining memcpy.
955 MemDepResult SourceDep =
956 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
957 M->getIterator(), M->getParent());
958 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
959 return false;
960
961 // If the dest of the second might alias the source of the first, then the
962 // source and dest might overlap. We still want to eliminate the intermediate
963 // value, but we have to generate a memmove instead of memcpy.
964 bool UseMemMove = false;
965 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
966 MemoryLocation::getForSource(MDep)))
967 UseMemMove = true;
968
969 // If all checks passed, then we can transform M.
970 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
971 << *MDep << '\n' << *M << '\n');
972
973 // TODO: Is this worth it if we're creating a less aligned memcpy? For
974 // example we could be moving from movaps -> movq on x86.
975 IRBuilder<> Builder(M);
976 if (UseMemMove)
977 Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
978 MDep->getRawSource(), MDep->getSourceAlign(),
979 M->getLength(), M->isVolatile());
980 else
981 Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
982 MDep->getRawSource(), MDep->getSourceAlign(),
983 M->getLength(), M->isVolatile());
984
985 // Remove the instruction we're replacing.
986 MD->removeInstruction(M);
987 M->eraseFromParent();
988 ++NumMemCpyInstr;
989 return true;
990 }
991
992 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
993 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
994 /// weren't copied over by \p MemCpy.
995 ///
996 /// In other words, transform:
997 /// \code
998 /// memset(dst, c, dst_size);
999 /// memcpy(dst, src, src_size);
1000 /// \endcode
1001 /// into:
1002 /// \code
1003 /// memcpy(dst, src, src_size);
1004 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1005 /// \endcode
processMemSetMemCpyDependence(MemCpyInst * MemCpy,MemSetInst * MemSet)1006 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1007 MemSetInst *MemSet) {
1008 // We can only transform memset/memcpy with the same destination.
1009 if (MemSet->getDest() != MemCpy->getDest())
1010 return false;
1011
1012 // Check that there are no other dependencies on the memset destination.
1013 MemDepResult DstDepInfo =
1014 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1015 MemCpy->getIterator(), MemCpy->getParent());
1016 if (DstDepInfo.getInst() != MemSet)
1017 return false;
1018
1019 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1020 Value *Dest = MemCpy->getRawDest();
1021 Value *DestSize = MemSet->getLength();
1022 Value *SrcSize = MemCpy->getLength();
1023
1024 // By default, create an unaligned memset.
1025 unsigned Align = 1;
1026 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1027 // of the sum.
1028 const unsigned DestAlign =
1029 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1030 if (DestAlign > 1)
1031 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1032 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1033
1034 IRBuilder<> Builder(MemCpy);
1035
1036 // If the sizes have different types, zext the smaller one.
1037 if (DestSize->getType() != SrcSize->getType()) {
1038 if (DestSize->getType()->getIntegerBitWidth() >
1039 SrcSize->getType()->getIntegerBitWidth())
1040 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1041 else
1042 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1043 }
1044
1045 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1046 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1047 Value *MemsetLen = Builder.CreateSelect(
1048 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1049 Builder.CreateMemSet(
1050 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
1051 SrcSize),
1052 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1053
1054 MD->removeInstruction(MemSet);
1055 MemSet->eraseFromParent();
1056 return true;
1057 }
1058
1059 /// Determine whether the instruction has undefined content for the given Size,
1060 /// either because it was freshly alloca'd or started its lifetime.
hasUndefContents(Instruction * I,ConstantInt * Size)1061 static bool hasUndefContents(Instruction *I, ConstantInt *Size) {
1062 if (isa<AllocaInst>(I))
1063 return true;
1064
1065 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1066 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1067 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1068 if (LTSize->getZExtValue() >= Size->getZExtValue())
1069 return true;
1070
1071 return false;
1072 }
1073
1074 /// Transform memcpy to memset when its source was just memset.
1075 /// In other words, turn:
1076 /// \code
1077 /// memset(dst1, c, dst1_size);
1078 /// memcpy(dst2, dst1, dst2_size);
1079 /// \endcode
1080 /// into:
1081 /// \code
1082 /// memset(dst1, c, dst1_size);
1083 /// memset(dst2, c, dst2_size);
1084 /// \endcode
1085 /// When dst2_size <= dst1_size.
1086 ///
1087 /// The \p MemCpy must have a Constant length.
performMemCpyToMemSetOptzn(MemCpyInst * MemCpy,MemSetInst * MemSet)1088 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1089 MemSetInst *MemSet) {
1090 AliasAnalysis &AA = LookupAliasAnalysis();
1091
1092 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1093 // memcpying from the same address. Otherwise it is hard to reason about.
1094 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1095 return false;
1096
1097 // A known memset size is required.
1098 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1099 if (!MemSetSize)
1100 return false;
1101
1102 // Make sure the memcpy doesn't read any more than what the memset wrote.
1103 // Don't worry about sizes larger than i64.
1104 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1105 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) {
1106 // If the memcpy is larger than the memset, but the memory was undef prior
1107 // to the memset, we can just ignore the tail. Technically we're only
1108 // interested in the bytes from MemSetSize..CopySize here, but as we can't
1109 // easily represent this location, we use the full 0..CopySize range.
1110 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1111 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1112 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1113 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1114 CopySize = MemSetSize;
1115 else
1116 return false;
1117 }
1118
1119 IRBuilder<> Builder(MemCpy);
1120 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), CopySize,
1121 MaybeAlign(MemCpy->getDestAlignment()));
1122 return true;
1123 }
1124
1125 /// Perform simplification of memcpy's. If we have memcpy A
1126 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1127 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1128 /// circumstances). This allows later passes to remove the first memcpy
1129 /// altogether.
processMemCpy(MemCpyInst * M)1130 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
1131 // We can only optimize non-volatile memcpy's.
1132 if (M->isVolatile()) return false;
1133
1134 // If the source and destination of the memcpy are the same, then zap it.
1135 if (M->getSource() == M->getDest()) {
1136 MD->removeInstruction(M);
1137 M->eraseFromParent();
1138 return false;
1139 }
1140
1141 // If copying from a constant, try to turn the memcpy into a memset.
1142 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1143 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1144 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1145 M->getModule()->getDataLayout())) {
1146 IRBuilder<> Builder(M);
1147 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1148 MaybeAlign(M->getDestAlignment()), false);
1149 MD->removeInstruction(M);
1150 M->eraseFromParent();
1151 ++NumCpyToSet;
1152 return true;
1153 }
1154
1155 MemDepResult DepInfo = MD->getDependency(M);
1156
1157 // Try to turn a partially redundant memset + memcpy into
1158 // memcpy + smaller memset. We don't need the memcpy size for this.
1159 if (DepInfo.isClobber())
1160 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1161 if (processMemSetMemCpyDependence(M, MDep))
1162 return true;
1163
1164 // The optimizations after this point require the memcpy size.
1165 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
1166 if (!CopySize) return false;
1167
1168 // There are four possible optimizations we can do for memcpy:
1169 // a) memcpy-memcpy xform which exposes redundance for DSE.
1170 // b) call-memcpy xform for return slot optimization.
1171 // c) memcpy from freshly alloca'd space or space that has just started its
1172 // lifetime copies undefined data, and we can therefore eliminate the
1173 // memcpy in favor of the data that was already at the destination.
1174 // d) memcpy from a just-memset'd source can be turned into memset.
1175 if (DepInfo.isClobber()) {
1176 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1177 // FIXME: Can we pass in either of dest/src alignment here instead
1178 // of conservatively taking the minimum?
1179 unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment());
1180 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
1181 CopySize->getZExtValue(), Align,
1182 C)) {
1183 MD->removeInstruction(M);
1184 M->eraseFromParent();
1185 return true;
1186 }
1187 }
1188 }
1189
1190 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1191 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1192 SrcLoc, true, M->getIterator(), M->getParent());
1193
1194 if (SrcDepInfo.isClobber()) {
1195 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1196 return processMemCpyMemCpyDependence(M, MDep);
1197 } else if (SrcDepInfo.isDef()) {
1198 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) {
1199 MD->removeInstruction(M);
1200 M->eraseFromParent();
1201 ++NumMemCpyInstr;
1202 return true;
1203 }
1204 }
1205
1206 if (SrcDepInfo.isClobber())
1207 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1208 if (performMemCpyToMemSetOptzn(M, MDep)) {
1209 MD->removeInstruction(M);
1210 M->eraseFromParent();
1211 ++NumCpyToSet;
1212 return true;
1213 }
1214
1215 return false;
1216 }
1217
1218 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1219 /// not to alias.
processMemMove(MemMoveInst * M)1220 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1221 AliasAnalysis &AA = LookupAliasAnalysis();
1222
1223 if (!TLI->has(LibFunc_memmove))
1224 return false;
1225
1226 // See if the pointers alias.
1227 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1228 MemoryLocation::getForSource(M)))
1229 return false;
1230
1231 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1232 << "\n");
1233
1234 // If not, then we know we can transform this.
1235 Type *ArgTys[3] = { M->getRawDest()->getType(),
1236 M->getRawSource()->getType(),
1237 M->getLength()->getType() };
1238 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1239 Intrinsic::memcpy, ArgTys));
1240
1241 // MemDep may have over conservative information about this instruction, just
1242 // conservatively flush it from the cache.
1243 MD->removeInstruction(M);
1244
1245 ++NumMoveToCpy;
1246 return true;
1247 }
1248
1249 /// This is called on every byval argument in call sites.
processByValArgument(CallSite CS,unsigned ArgNo)1250 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
1251 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1252 // Find out what feeds this byval argument.
1253 Value *ByValArg = CS.getArgument(ArgNo);
1254 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1255 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1256 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1257 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
1258 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
1259 if (!DepInfo.isClobber())
1260 return false;
1261
1262 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1263 // a memcpy, see if we can byval from the source of the memcpy instead of the
1264 // result.
1265 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1266 if (!MDep || MDep->isVolatile() ||
1267 ByValArg->stripPointerCasts() != MDep->getDest())
1268 return false;
1269
1270 // The length of the memcpy must be larger or equal to the size of the byval.
1271 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1272 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1273 return false;
1274
1275 // Get the alignment of the byval. If the call doesn't specify the alignment,
1276 // then it is some target specific value that we can't know.
1277 unsigned ByValAlign = CS.getParamAlignment(ArgNo);
1278 if (ByValAlign == 0) return false;
1279
1280 // If it is greater than the memcpy, then we check to see if we can force the
1281 // source of the memcpy to the alignment we need. If we fail, we bail out.
1282 AssumptionCache &AC = LookupAssumptionCache();
1283 DominatorTree &DT = LookupDomTree();
1284 if (MDep->getSourceAlignment() < ByValAlign &&
1285 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1286 CS.getInstruction(), &AC, &DT) < ByValAlign)
1287 return false;
1288
1289 // The address space of the memcpy source must match the byval argument
1290 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1291 ByValArg->getType()->getPointerAddressSpace())
1292 return false;
1293
1294 // Verify that the copied-from memory doesn't change in between the memcpy and
1295 // the byval call.
1296 // memcpy(a <- b)
1297 // *b = 42;
1298 // foo(*a)
1299 // It would be invalid to transform the second memcpy into foo(*b).
1300 //
1301 // NOTE: This is conservative, it will stop on any read from the source loc,
1302 // not just the defining memcpy.
1303 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1304 MemoryLocation::getForSource(MDep), false,
1305 CS.getInstruction()->getIterator(), MDep->getParent());
1306 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1307 return false;
1308
1309 Value *TmpCast = MDep->getSource();
1310 if (MDep->getSource()->getType() != ByValArg->getType())
1311 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1312 "tmpcast", CS.getInstruction());
1313
1314 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1315 << " " << *MDep << "\n"
1316 << " " << *CS.getInstruction() << "\n");
1317
1318 // Otherwise we're good! Update the byval argument.
1319 CS.setArgument(ArgNo, TmpCast);
1320 ++NumMemCpyInstr;
1321 return true;
1322 }
1323
1324 /// Executes one iteration of MemCpyOptPass.
iterateOnFunction(Function & F)1325 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1326 bool MadeChange = false;
1327
1328 DominatorTree &DT = LookupDomTree();
1329
1330 // Walk all instruction in the function.
1331 for (BasicBlock &BB : F) {
1332 // Skip unreachable blocks. For example processStore assumes that an
1333 // instruction in a BB can't be dominated by a later instruction in the
1334 // same BB (which is a scenario that can happen for an unreachable BB that
1335 // has itself as a predecessor).
1336 if (!DT.isReachableFromEntry(&BB))
1337 continue;
1338
1339 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1340 // Avoid invalidating the iterator.
1341 Instruction *I = &*BI++;
1342
1343 bool RepeatInstruction = false;
1344
1345 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1346 MadeChange |= processStore(SI, BI);
1347 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1348 RepeatInstruction = processMemSet(M, BI);
1349 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1350 RepeatInstruction = processMemCpy(M);
1351 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1352 RepeatInstruction = processMemMove(M);
1353 else if (auto CS = CallSite(I)) {
1354 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1355 if (CS.isByValArgument(i))
1356 MadeChange |= processByValArgument(CS, i);
1357 }
1358
1359 // Reprocess the instruction if desired.
1360 if (RepeatInstruction) {
1361 if (BI != BB.begin())
1362 --BI;
1363 MadeChange = true;
1364 }
1365 }
1366 }
1367
1368 return MadeChange;
1369 }
1370
run(Function & F,FunctionAnalysisManager & AM)1371 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1372 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1373 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1374
1375 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1376 return AM.getResult<AAManager>(F);
1377 };
1378 auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1379 return AM.getResult<AssumptionAnalysis>(F);
1380 };
1381 auto LookupDomTree = [&]() -> DominatorTree & {
1382 return AM.getResult<DominatorTreeAnalysis>(F);
1383 };
1384
1385 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1386 LookupAssumptionCache, LookupDomTree);
1387 if (!MadeChange)
1388 return PreservedAnalyses::all();
1389
1390 PreservedAnalyses PA;
1391 PA.preserveSet<CFGAnalyses>();
1392 PA.preserve<GlobalsAA>();
1393 PA.preserve<MemoryDependenceAnalysis>();
1394 return PA;
1395 }
1396
runImpl(Function & F,MemoryDependenceResults * MD_,TargetLibraryInfo * TLI_,std::function<AliasAnalysis & ()> LookupAliasAnalysis_,std::function<AssumptionCache & ()> LookupAssumptionCache_,std::function<DominatorTree & ()> LookupDomTree_)1397 bool MemCpyOptPass::runImpl(
1398 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1399 std::function<AliasAnalysis &()> LookupAliasAnalysis_,
1400 std::function<AssumptionCache &()> LookupAssumptionCache_,
1401 std::function<DominatorTree &()> LookupDomTree_) {
1402 bool MadeChange = false;
1403 MD = MD_;
1404 TLI = TLI_;
1405 LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
1406 LookupAssumptionCache = std::move(LookupAssumptionCache_);
1407 LookupDomTree = std::move(LookupDomTree_);
1408
1409 // If we don't have at least memset and memcpy, there is little point of doing
1410 // anything here. These are required by a freestanding implementation, so if
1411 // even they are disabled, there is no point in trying hard.
1412 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1413 return false;
1414
1415 while (true) {
1416 if (!iterateOnFunction(F))
1417 break;
1418 MadeChange = true;
1419 }
1420
1421 MD = nullptr;
1422 return MadeChange;
1423 }
1424
1425 /// This is the main transformation entry point for a function.
runOnFunction(Function & F)1426 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1427 if (skipFunction(F))
1428 return false;
1429
1430 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1431 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1432
1433 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1434 return getAnalysis<AAResultsWrapperPass>().getAAResults();
1435 };
1436 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1437 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1438 };
1439 auto LookupDomTree = [this]() -> DominatorTree & {
1440 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1441 };
1442
1443 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
1444 LookupDomTree);
1445 }
1446