1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for load, store and alloca.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
21 using namespace llvm;
22
23 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
24 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
25
26 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
27 /// some part of a constant global variable. This intentionally only accepts
28 /// constant expressions because we can't rewrite arbitrary instructions.
pointsToConstantGlobal(Value * V)29 static bool pointsToConstantGlobal(Value *V) {
30 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
31 return GV->isConstant();
32 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
33 if (CE->getOpcode() == Instruction::BitCast ||
34 CE->getOpcode() == Instruction::GetElementPtr)
35 return pointsToConstantGlobal(CE->getOperand(0));
36 return false;
37 }
38
39 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
40 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
41 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
42 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
43 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
44 /// the alloca, and if the source pointer is a pointer to a constant global, we
45 /// can optimize this.
46 static bool
isOnlyCopiedFromConstantGlobal(Value * V,MemTransferInst * & TheCopy,SmallVectorImpl<Instruction * > & ToDelete,bool IsOffset=false)47 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
48 SmallVectorImpl<Instruction *> &ToDelete,
49 bool IsOffset = false) {
50 // We track lifetime intrinsics as we encounter them. If we decide to go
51 // ahead and replace the value with the global, this lets the caller quickly
52 // eliminate the markers.
53
54 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
55 User *U = cast<Instruction>(*UI);
56
57 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
58 // Ignore non-volatile loads, they are always ok.
59 if (!LI->isSimple()) return false;
60 continue;
61 }
62
63 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
64 // If uses of the bitcast are ok, we are ok.
65 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
66 return false;
67 continue;
68 }
69 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
70 // If the GEP has all zero indices, it doesn't offset the pointer. If it
71 // doesn't, it does.
72 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, ToDelete,
73 IsOffset || !GEP->hasAllZeroIndices()))
74 return false;
75 continue;
76 }
77
78 if (CallSite CS = U) {
79 // If this is the function being called then we treat it like a load and
80 // ignore it.
81 if (CS.isCallee(UI))
82 continue;
83
84 // If this is a readonly/readnone call site, then we know it is just a
85 // load (but one that potentially returns the value itself), so we can
86 // ignore it if we know that the value isn't captured.
87 unsigned ArgNo = CS.getArgumentNo(UI);
88 if (CS.onlyReadsMemory() &&
89 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
90 continue;
91
92 // If this is being passed as a byval argument, the caller is making a
93 // copy, so it is only a read of the alloca.
94 if (CS.isByValArgument(ArgNo))
95 continue;
96 }
97
98 // Lifetime intrinsics can be handled by the caller.
99 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
100 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
101 II->getIntrinsicID() == Intrinsic::lifetime_end) {
102 assert(II->use_empty() && "Lifetime markers have no result to use!");
103 ToDelete.push_back(II);
104 continue;
105 }
106 }
107
108 // If this is isn't our memcpy/memmove, reject it as something we can't
109 // handle.
110 MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
111 if (MI == 0)
112 return false;
113
114 // If the transfer is using the alloca as a source of the transfer, then
115 // ignore it since it is a load (unless the transfer is volatile).
116 if (UI.getOperandNo() == 1) {
117 if (MI->isVolatile()) return false;
118 continue;
119 }
120
121 // If we already have seen a copy, reject the second one.
122 if (TheCopy) return false;
123
124 // If the pointer has been offset from the start of the alloca, we can't
125 // safely handle this.
126 if (IsOffset) return false;
127
128 // If the memintrinsic isn't using the alloca as the dest, reject it.
129 if (UI.getOperandNo() != 0) return false;
130
131 // If the source of the memcpy/move is not a constant global, reject it.
132 if (!pointsToConstantGlobal(MI->getSource()))
133 return false;
134
135 // Otherwise, the transform is safe. Remember the copy instruction.
136 TheCopy = MI;
137 }
138 return true;
139 }
140
141 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
142 /// modified by a copy from a constant global. If we can prove this, we can
143 /// replace any uses of the alloca with uses of the global directly.
144 static MemTransferInst *
isOnlyCopiedFromConstantGlobal(AllocaInst * AI,SmallVectorImpl<Instruction * > & ToDelete)145 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
146 SmallVectorImpl<Instruction *> &ToDelete) {
147 MemTransferInst *TheCopy = 0;
148 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
149 return TheCopy;
150 return 0;
151 }
152
153 /// getPointeeAlignment - Compute the minimum alignment of the value pointed
154 /// to by the given pointer.
getPointeeAlignment(Value * V,const TargetData & TD)155 static unsigned getPointeeAlignment(Value *V, const TargetData &TD) {
156 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
157 if (CE->getOpcode() == Instruction::BitCast ||
158 (CE->getOpcode() == Instruction::GetElementPtr &&
159 cast<GEPOperator>(CE)->hasAllZeroIndices()))
160 return getPointeeAlignment(CE->getOperand(0), TD);
161
162 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
163 if (!GV->isDeclaration())
164 return TD.getPreferredAlignment(GV);
165
166 if (PointerType *PT = dyn_cast<PointerType>(V->getType()))
167 return TD.getABITypeAlignment(PT->getElementType());
168
169 return 0;
170 }
171
visitAllocaInst(AllocaInst & AI)172 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
173 // Ensure that the alloca array size argument has type intptr_t, so that
174 // any casting is exposed early.
175 if (TD) {
176 Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
177 if (AI.getArraySize()->getType() != IntPtrTy) {
178 Value *V = Builder->CreateIntCast(AI.getArraySize(),
179 IntPtrTy, false);
180 AI.setOperand(0, V);
181 return &AI;
182 }
183 }
184
185 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
186 if (AI.isArrayAllocation()) { // Check C != 1
187 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
188 Type *NewTy =
189 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
190 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
191 New->setAlignment(AI.getAlignment());
192
193 // Scan to the end of the allocation instructions, to skip over a block of
194 // allocas if possible...also skip interleaved debug info
195 //
196 BasicBlock::iterator It = New;
197 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
198
199 // Now that I is pointing to the first non-allocation-inst in the block,
200 // insert our getelementptr instruction...
201 //
202 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
203 Value *Idx[2];
204 Idx[0] = NullIdx;
205 Idx[1] = NullIdx;
206 Instruction *GEP =
207 GetElementPtrInst::CreateInBounds(New, Idx, New->getName()+".sub");
208 InsertNewInstBefore(GEP, *It);
209
210 // Now make everything use the getelementptr instead of the original
211 // allocation.
212 return ReplaceInstUsesWith(AI, GEP);
213 } else if (isa<UndefValue>(AI.getArraySize())) {
214 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
215 }
216 }
217
218 if (TD && AI.getAllocatedType()->isSized()) {
219 // If the alignment is 0 (unspecified), assign it the preferred alignment.
220 if (AI.getAlignment() == 0)
221 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
222
223 // Move all alloca's of zero byte objects to the entry block and merge them
224 // together. Note that we only do this for alloca's, because malloc should
225 // allocate and return a unique pointer, even for a zero byte allocation.
226 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
227 // For a zero sized alloca there is no point in doing an array allocation.
228 // This is helpful if the array size is a complicated expression not used
229 // elsewhere.
230 if (AI.isArrayAllocation()) {
231 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
232 return &AI;
233 }
234
235 // Get the first instruction in the entry block.
236 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
237 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
238 if (FirstInst != &AI) {
239 // If the entry block doesn't start with a zero-size alloca then move
240 // this one to the start of the entry block. There is no problem with
241 // dominance as the array size was forced to a constant earlier already.
242 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
243 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
244 TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
245 AI.moveBefore(FirstInst);
246 return &AI;
247 }
248
249 // Replace this zero-sized alloca with the one at the start of the entry
250 // block after ensuring that the address will be aligned enough for both
251 // types.
252 unsigned MaxAlign =
253 std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
254 TD->getPrefTypeAlignment(AI.getAllocatedType()));
255 EntryAI->setAlignment(MaxAlign);
256 if (AI.getType() != EntryAI->getType())
257 return new BitCastInst(EntryAI, AI.getType());
258 return ReplaceInstUsesWith(AI, EntryAI);
259 }
260 }
261 }
262
263 // Check to see if this allocation is only modified by a memcpy/memmove from
264 // a constant global whose alignment is equal to or exceeds that of the
265 // allocation. If this is the case, we can change all users to use
266 // the constant global instead. This is commonly produced by the CFE by
267 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
268 // is only subsequently read.
269 SmallVector<Instruction *, 4> ToDelete;
270 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
271 if (AI.getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
272 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
273 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
274 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
275 EraseInstFromFunction(*ToDelete[i]);
276 Constant *TheSrc = cast<Constant>(Copy->getSource());
277 Instruction *NewI
278 = ReplaceInstUsesWith(AI, ConstantExpr::getBitCast(TheSrc,
279 AI.getType()));
280 EraseInstFromFunction(*Copy);
281 ++NumGlobalCopies;
282 return NewI;
283 }
284 }
285
286 // At last, use the generic allocation site handler to aggressively remove
287 // unused allocas.
288 return visitAllocSite(AI);
289 }
290
291
292 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
InstCombineLoadCast(InstCombiner & IC,LoadInst & LI,const TargetData * TD)293 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
294 const TargetData *TD) {
295 User *CI = cast<User>(LI.getOperand(0));
296 Value *CastOp = CI->getOperand(0);
297
298 PointerType *DestTy = cast<PointerType>(CI->getType());
299 Type *DestPTy = DestTy->getElementType();
300 if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
301
302 // If the address spaces don't match, don't eliminate the cast.
303 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
304 return 0;
305
306 Type *SrcPTy = SrcTy->getElementType();
307
308 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
309 DestPTy->isVectorTy()) {
310 // If the source is an array, the code below will not succeed. Check to
311 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
312 // constants.
313 if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
314 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
315 if (ASrcTy->getNumElements() != 0) {
316 Value *Idxs[2];
317 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
318 Idxs[1] = Idxs[0];
319 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
320 SrcTy = cast<PointerType>(CastOp->getType());
321 SrcPTy = SrcTy->getElementType();
322 }
323
324 if (IC.getTargetData() &&
325 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
326 SrcPTy->isVectorTy()) &&
327 // Do not allow turning this into a load of an integer, which is then
328 // casted to a pointer, this pessimizes pointer analysis a lot.
329 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
330 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
331 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
332
333 // Okay, we are casting from one integer or pointer type to another of
334 // the same size. Instead of casting the pointer before the load, cast
335 // the result of the loaded value.
336 LoadInst *NewLoad =
337 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
338 NewLoad->setAlignment(LI.getAlignment());
339 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
340 // Now cast the result of the load.
341 return new BitCastInst(NewLoad, LI.getType());
342 }
343 }
344 }
345 return 0;
346 }
347
visitLoadInst(LoadInst & LI)348 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
349 Value *Op = LI.getOperand(0);
350
351 // Attempt to improve the alignment.
352 if (TD) {
353 unsigned KnownAlign =
354 getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
355 unsigned LoadAlign = LI.getAlignment();
356 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
357 TD->getABITypeAlignment(LI.getType());
358
359 if (KnownAlign > EffectiveLoadAlign)
360 LI.setAlignment(KnownAlign);
361 else if (LoadAlign == 0)
362 LI.setAlignment(EffectiveLoadAlign);
363 }
364
365 // load (cast X) --> cast (load X) iff safe.
366 if (isa<CastInst>(Op))
367 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
368 return Res;
369
370 // None of the following transforms are legal for volatile/atomic loads.
371 // FIXME: Some of it is okay for atomic loads; needs refactoring.
372 if (!LI.isSimple()) return 0;
373
374 // Do really simple store-to-load forwarding and load CSE, to catch cases
375 // where there are several consecutive memory accesses to the same location,
376 // separated by a few arithmetic operations.
377 BasicBlock::iterator BBI = &LI;
378 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
379 return ReplaceInstUsesWith(LI, AvailableVal);
380
381 // load(gep null, ...) -> unreachable
382 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
383 const Value *GEPI0 = GEPI->getOperand(0);
384 // TODO: Consider a target hook for valid address spaces for this xform.
385 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
386 // Insert a new store to null instruction before the load to indicate
387 // that this code is not reachable. We do this instead of inserting
388 // an unreachable instruction directly because we cannot modify the
389 // CFG.
390 new StoreInst(UndefValue::get(LI.getType()),
391 Constant::getNullValue(Op->getType()), &LI);
392 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
393 }
394 }
395
396 // load null/undef -> unreachable
397 // TODO: Consider a target hook for valid address spaces for this xform.
398 if (isa<UndefValue>(Op) ||
399 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
400 // Insert a new store to null instruction before the load to indicate that
401 // this code is not reachable. We do this instead of inserting an
402 // unreachable instruction directly because we cannot modify the CFG.
403 new StoreInst(UndefValue::get(LI.getType()),
404 Constant::getNullValue(Op->getType()), &LI);
405 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
406 }
407
408 // Instcombine load (constantexpr_cast global) -> cast (load global)
409 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
410 if (CE->isCast())
411 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
412 return Res;
413
414 if (Op->hasOneUse()) {
415 // Change select and PHI nodes to select values instead of addresses: this
416 // helps alias analysis out a lot, allows many others simplifications, and
417 // exposes redundancy in the code.
418 //
419 // Note that we cannot do the transformation unless we know that the
420 // introduced loads cannot trap! Something like this is valid as long as
421 // the condition is always false: load (select bool %C, int* null, int* %G),
422 // but it would not be valid if we transformed it to load from null
423 // unconditionally.
424 //
425 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
426 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
427 unsigned Align = LI.getAlignment();
428 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
429 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
430 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
431 SI->getOperand(1)->getName()+".val");
432 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
433 SI->getOperand(2)->getName()+".val");
434 V1->setAlignment(Align);
435 V2->setAlignment(Align);
436 return SelectInst::Create(SI->getCondition(), V1, V2);
437 }
438
439 // load (select (cond, null, P)) -> load P
440 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
441 if (C->isNullValue()) {
442 LI.setOperand(0, SI->getOperand(2));
443 return &LI;
444 }
445
446 // load (select (cond, P, null)) -> load P
447 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
448 if (C->isNullValue()) {
449 LI.setOperand(0, SI->getOperand(1));
450 return &LI;
451 }
452 }
453 }
454 return 0;
455 }
456
457 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
458 /// when possible. This makes it generally easy to do alias analysis and/or
459 /// SROA/mem2reg of the memory object.
InstCombineStoreToCast(InstCombiner & IC,StoreInst & SI)460 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
461 User *CI = cast<User>(SI.getOperand(1));
462 Value *CastOp = CI->getOperand(0);
463
464 Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
465 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
466 if (SrcTy == 0) return 0;
467
468 Type *SrcPTy = SrcTy->getElementType();
469
470 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
471 return 0;
472
473 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
474 /// to its first element. This allows us to handle things like:
475 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
476 /// on 32-bit hosts.
477 SmallVector<Value*, 4> NewGEPIndices;
478
479 // If the source is an array, the code below will not succeed. Check to
480 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
481 // constants.
482 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
483 // Index through pointer.
484 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
485 NewGEPIndices.push_back(Zero);
486
487 while (1) {
488 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
489 if (!STy->getNumElements()) /* Struct can be empty {} */
490 break;
491 NewGEPIndices.push_back(Zero);
492 SrcPTy = STy->getElementType(0);
493 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
494 NewGEPIndices.push_back(Zero);
495 SrcPTy = ATy->getElementType();
496 } else {
497 break;
498 }
499 }
500
501 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
502 }
503
504 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
505 return 0;
506
507 // If the pointers point into different address spaces or if they point to
508 // values with different sizes, we can't do the transformation.
509 if (!IC.getTargetData() ||
510 SrcTy->getAddressSpace() !=
511 cast<PointerType>(CI->getType())->getAddressSpace() ||
512 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
513 IC.getTargetData()->getTypeSizeInBits(DestPTy))
514 return 0;
515
516 // Okay, we are casting from one integer or pointer type to another of
517 // the same size. Instead of casting the pointer before
518 // the store, cast the value to be stored.
519 Value *NewCast;
520 Value *SIOp0 = SI.getOperand(0);
521 Instruction::CastOps opcode = Instruction::BitCast;
522 Type* CastSrcTy = SIOp0->getType();
523 Type* CastDstTy = SrcPTy;
524 if (CastDstTy->isPointerTy()) {
525 if (CastSrcTy->isIntegerTy())
526 opcode = Instruction::IntToPtr;
527 } else if (CastDstTy->isIntegerTy()) {
528 if (SIOp0->getType()->isPointerTy())
529 opcode = Instruction::PtrToInt;
530 }
531
532 // SIOp0 is a pointer to aggregate and this is a store to the first field,
533 // emit a GEP to index into its first field.
534 if (!NewGEPIndices.empty())
535 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
536
537 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
538 SIOp0->getName()+".c");
539 SI.setOperand(0, NewCast);
540 SI.setOperand(1, CastOp);
541 return &SI;
542 }
543
544 /// equivalentAddressValues - Test if A and B will obviously have the same
545 /// value. This includes recognizing that %t0 and %t1 will have the same
546 /// value in code like this:
547 /// %t0 = getelementptr \@a, 0, 3
548 /// store i32 0, i32* %t0
549 /// %t1 = getelementptr \@a, 0, 3
550 /// %t2 = load i32* %t1
551 ///
equivalentAddressValues(Value * A,Value * B)552 static bool equivalentAddressValues(Value *A, Value *B) {
553 // Test if the values are trivially equivalent.
554 if (A == B) return true;
555
556 // Test if the values come form identical arithmetic instructions.
557 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
558 // its only used to compare two uses within the same basic block, which
559 // means that they'll always either have the same value or one of them
560 // will have an undefined value.
561 if (isa<BinaryOperator>(A) ||
562 isa<CastInst>(A) ||
563 isa<PHINode>(A) ||
564 isa<GetElementPtrInst>(A))
565 if (Instruction *BI = dyn_cast<Instruction>(B))
566 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
567 return true;
568
569 // Otherwise they may not be equivalent.
570 return false;
571 }
572
visitStoreInst(StoreInst & SI)573 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
574 Value *Val = SI.getOperand(0);
575 Value *Ptr = SI.getOperand(1);
576
577 // Attempt to improve the alignment.
578 if (TD) {
579 unsigned KnownAlign =
580 getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
581 TD);
582 unsigned StoreAlign = SI.getAlignment();
583 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
584 TD->getABITypeAlignment(Val->getType());
585
586 if (KnownAlign > EffectiveStoreAlign)
587 SI.setAlignment(KnownAlign);
588 else if (StoreAlign == 0)
589 SI.setAlignment(EffectiveStoreAlign);
590 }
591
592 // Don't hack volatile/atomic stores.
593 // FIXME: Some bits are legal for atomic stores; needs refactoring.
594 if (!SI.isSimple()) return 0;
595
596 // If the RHS is an alloca with a single use, zapify the store, making the
597 // alloca dead.
598 if (Ptr->hasOneUse()) {
599 if (isa<AllocaInst>(Ptr))
600 return EraseInstFromFunction(SI);
601 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
602 if (isa<AllocaInst>(GEP->getOperand(0))) {
603 if (GEP->getOperand(0)->hasOneUse())
604 return EraseInstFromFunction(SI);
605 }
606 }
607 }
608
609 // Do really simple DSE, to catch cases where there are several consecutive
610 // stores to the same location, separated by a few arithmetic operations. This
611 // situation often occurs with bitfield accesses.
612 BasicBlock::iterator BBI = &SI;
613 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
614 --ScanInsts) {
615 --BBI;
616 // Don't count debug info directives, lest they affect codegen,
617 // and we skip pointer-to-pointer bitcasts, which are NOPs.
618 if (isa<DbgInfoIntrinsic>(BBI) ||
619 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
620 ScanInsts++;
621 continue;
622 }
623
624 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
625 // Prev store isn't volatile, and stores to the same location?
626 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
627 SI.getOperand(1))) {
628 ++NumDeadStore;
629 ++BBI;
630 EraseInstFromFunction(*PrevSI);
631 continue;
632 }
633 break;
634 }
635
636 // If this is a load, we have to stop. However, if the loaded value is from
637 // the pointer we're loading and is producing the pointer we're storing,
638 // then *this* store is dead (X = load P; store X -> P).
639 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
640 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
641 LI->isSimple())
642 return EraseInstFromFunction(SI);
643
644 // Otherwise, this is a load from some other location. Stores before it
645 // may not be dead.
646 break;
647 }
648
649 // Don't skip over loads or things that can modify memory.
650 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
651 break;
652 }
653
654 // store X, null -> turns into 'unreachable' in SimplifyCFG
655 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
656 if (!isa<UndefValue>(Val)) {
657 SI.setOperand(0, UndefValue::get(Val->getType()));
658 if (Instruction *U = dyn_cast<Instruction>(Val))
659 Worklist.Add(U); // Dropped a use.
660 }
661 return 0; // Do not modify these!
662 }
663
664 // store undef, Ptr -> noop
665 if (isa<UndefValue>(Val))
666 return EraseInstFromFunction(SI);
667
668 // If the pointer destination is a cast, see if we can fold the cast into the
669 // source instead.
670 if (isa<CastInst>(Ptr))
671 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
672 return Res;
673 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
674 if (CE->isCast())
675 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
676 return Res;
677
678
679 // If this store is the last instruction in the basic block (possibly
680 // excepting debug info instructions), and if the block ends with an
681 // unconditional branch, try to move it to the successor block.
682 BBI = &SI;
683 do {
684 ++BBI;
685 } while (isa<DbgInfoIntrinsic>(BBI) ||
686 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
687 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
688 if (BI->isUnconditional())
689 if (SimplifyStoreAtEndOfBlock(SI))
690 return 0; // xform done!
691
692 return 0;
693 }
694
695 /// SimplifyStoreAtEndOfBlock - Turn things like:
696 /// if () { *P = v1; } else { *P = v2 }
697 /// into a phi node with a store in the successor.
698 ///
699 /// Simplify things like:
700 /// *P = v1; if () { *P = v2; }
701 /// into a phi node with a store in the successor.
702 ///
SimplifyStoreAtEndOfBlock(StoreInst & SI)703 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
704 BasicBlock *StoreBB = SI.getParent();
705
706 // Check to see if the successor block has exactly two incoming edges. If
707 // so, see if the other predecessor contains a store to the same location.
708 // if so, insert a PHI node (if needed) and move the stores down.
709 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
710
711 // Determine whether Dest has exactly two predecessors and, if so, compute
712 // the other predecessor.
713 pred_iterator PI = pred_begin(DestBB);
714 BasicBlock *P = *PI;
715 BasicBlock *OtherBB = 0;
716
717 if (P != StoreBB)
718 OtherBB = P;
719
720 if (++PI == pred_end(DestBB))
721 return false;
722
723 P = *PI;
724 if (P != StoreBB) {
725 if (OtherBB)
726 return false;
727 OtherBB = P;
728 }
729 if (++PI != pred_end(DestBB))
730 return false;
731
732 // Bail out if all the relevant blocks aren't distinct (this can happen,
733 // for example, if SI is in an infinite loop)
734 if (StoreBB == DestBB || OtherBB == DestBB)
735 return false;
736
737 // Verify that the other block ends in a branch and is not otherwise empty.
738 BasicBlock::iterator BBI = OtherBB->getTerminator();
739 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
740 if (!OtherBr || BBI == OtherBB->begin())
741 return false;
742
743 // If the other block ends in an unconditional branch, check for the 'if then
744 // else' case. there is an instruction before the branch.
745 StoreInst *OtherStore = 0;
746 if (OtherBr->isUnconditional()) {
747 --BBI;
748 // Skip over debugging info.
749 while (isa<DbgInfoIntrinsic>(BBI) ||
750 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
751 if (BBI==OtherBB->begin())
752 return false;
753 --BBI;
754 }
755 // If this isn't a store, isn't a store to the same location, or is not the
756 // right kind of store, bail out.
757 OtherStore = dyn_cast<StoreInst>(BBI);
758 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
759 !SI.isSameOperationAs(OtherStore))
760 return false;
761 } else {
762 // Otherwise, the other block ended with a conditional branch. If one of the
763 // destinations is StoreBB, then we have the if/then case.
764 if (OtherBr->getSuccessor(0) != StoreBB &&
765 OtherBr->getSuccessor(1) != StoreBB)
766 return false;
767
768 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
769 // if/then triangle. See if there is a store to the same ptr as SI that
770 // lives in OtherBB.
771 for (;; --BBI) {
772 // Check to see if we find the matching store.
773 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
774 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
775 !SI.isSameOperationAs(OtherStore))
776 return false;
777 break;
778 }
779 // If we find something that may be using or overwriting the stored
780 // value, or if we run out of instructions, we can't do the xform.
781 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
782 BBI == OtherBB->begin())
783 return false;
784 }
785
786 // In order to eliminate the store in OtherBr, we have to
787 // make sure nothing reads or overwrites the stored value in
788 // StoreBB.
789 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
790 // FIXME: This should really be AA driven.
791 if (I->mayReadFromMemory() || I->mayWriteToMemory())
792 return false;
793 }
794 }
795
796 // Insert a PHI node now if we need it.
797 Value *MergedVal = OtherStore->getOperand(0);
798 if (MergedVal != SI.getOperand(0)) {
799 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
800 PN->addIncoming(SI.getOperand(0), SI.getParent());
801 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
802 MergedVal = InsertNewInstBefore(PN, DestBB->front());
803 }
804
805 // Advance to a place where it is safe to insert the new store and
806 // insert it.
807 BBI = DestBB->getFirstInsertionPt();
808 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
809 SI.isVolatile(),
810 SI.getAlignment(),
811 SI.getOrdering(),
812 SI.getSynchScope());
813 InsertNewInstBefore(NewSI, *BBI);
814 NewSI->setDebugLoc(OtherStore->getDebugLoc());
815
816 // Nuke the old stores.
817 EraseInstFromFunction(SI);
818 EraseInstFromFunction(*OtherStore);
819 return true;
820 }
821