/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Scalar/ |
D | MergedLoadStoreMotion.cpp | 114 StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI); 115 PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1); 118 bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst); 181 StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1, in canSinkFromBlock() 182 StoreInst *Store0) { in canSinkFromBlock() 186 auto *Store1 = dyn_cast<StoreInst>(&Inst); in canSinkFromBlock() 204 PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, in getPHIOperand() 205 StoreInst *S1) { in getPHIOperand() 224 bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, in sinkStore() 225 StoreInst *S1) { in sinkStore() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86AvoidStoreForwardingBlocks.cpp | 106 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst, 110 MachineInstr *StoreInst, int64_t StDispImm, 114 MachineInstr *StoreInst, unsigned NStoreOpcode, 386 int64_t LoadDisp, MachineInstr *StoreInst, in buildCopy() argument 391 MachineOperand &StoreBase = getBaseOperand(StoreInst); in buildCopy() 394 MachineMemOperand *SMMO = *StoreInst->memoperands_begin(); in buildCopy() 413 MachineInstr *StInst = StoreInst; in buildCopy() 414 if (StoreInst->getPrevNode() == LoadInst) in buildCopy() 428 MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands); in buildCopy() 435 int64_t LdDispImm, MachineInstr *StoreInst, in buildCopies() argument [all …]
|
/external/llvm/lib/Transforms/Scalar/ |
D | MergedLoadStoreMotion.cpp | 132 StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI); 133 PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1); 136 bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst); 378 StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1, in canSinkFromBlock() 379 StoreInst *Store0) { in canSinkFromBlock() 383 auto *Store1 = dyn_cast<StoreInst>(&Inst); in canSinkFromBlock() 401 PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, in getPHIOperand() 402 StoreInst *S1) { in getPHIOperand() 423 bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, in sinkStore() 424 StoreInst *S1) { in sinkStore() [all …]
|
D | LoopIdiomRecognize.cpp | 92 typedef SmallVector<StoreInst *, 8> StoreList; 109 bool isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern, 111 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 121 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 282 static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) { in getStoreSizeInBytes() 335 bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset, in isLegalStore() 431 StoreInst *SI = dyn_cast<StoreInst>(&I); in collectStores() 507 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, in processLoopStores() 511 SetVector<StoreInst *> Heads, Tails; in processLoopStores() 512 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; in processLoopStores() [all …]
|
/external/llvm/unittests/Transforms/Utils/ |
D | MemorySSA.cpp | 83 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() local 97 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); in TEST_F() 124 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() local 134 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); in TEST_F() 167 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() local 180 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); in TEST_F() 228 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); in TEST_F() 229 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca); in TEST_F() 230 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca); in TEST_F() 237 for (StoreInst *V : {S1, S2, S3}) { in TEST_F() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/Analysis/ |
D | MemorySSA.cpp | 135 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 159 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 184 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 225 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 307 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 310 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 343 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 389 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 433 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() 490 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); in TEST_F() local [all …]
|
/external/llvm/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 63 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { in isAllocaPromotable() 100 StoreInst *OnlyStore; 128 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { in AnalyzeAlloca() 190 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); in isInterestingInstruction() 311 if (isa<LoadInst>(I) || isa<StoreInst>(I)) in removeLifetimeIntrinsicUsers() 340 StoreInst *OnlyStore = Info.OnlyStore; in rewriteSingleStoreAlloca() 445 typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy; in promoteSingleBlockAlloca() 449 if (StoreInst *SI = dyn_cast<StoreInst>(U)) in promoteSingleBlockAlloca() 469 static_cast<StoreInst *>(nullptr)), in promoteSingleBlockAlloca() 492 StoreInst *SI = cast<StoreInst>(AI->user_back()); in promoteSingleBlockAlloca() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 77 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { in isAllocaPromotable() 114 StoreInst *OnlyStore; 142 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { in AnalyzeAlloca() 199 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); in isInterestingInstruction() 333 if (isa<LoadInst>(I) || isa<StoreInst>(I)) in removeLifetimeIntrinsicUsers() 361 StoreInst *OnlyStore = Info.OnlyStore; in rewriteSingleStoreAlloca() 471 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>; in promoteSingleBlockAlloca() 475 if (StoreInst *SI = dyn_cast<StoreInst>(U)) in promoteSingleBlockAlloca() 495 static_cast<StoreInst *>(nullptr)), in promoteSingleBlockAlloca() 528 StoreInst *SI = cast<StoreInst>(AI->user_back()); in promoteSingleBlockAlloca() [all …]
|
/external/llvm/lib/Transforms/Vectorize/ |
D | LoadStoreVectorizer.cpp | 81 unsigned getAlignment(StoreInst *SI) const { in getAlignment() 212 if (StoreInst *SI = dyn_cast<StoreInst>(I)) in getPointerOperand() 220 if (StoreInst *S = dyn_cast<StoreInst>(I)) in getPointerAddressSpace() 436 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { in getVectorizablePrefixEndIdx() 464 if (isa<StoreInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue) && in getVectorizablePrefixEndIdx() 469 if (isa<LoadInst>(MemInstrValue) && isa<StoreInst>(ChainInstrValue) && in getVectorizablePrefixEndIdx() 540 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { in collectInstructions() 665 StoreInst *S0 = cast<StoreInst>(Chain[0]); in vectorizeStoreChain() 670 StoreTy = cast<StoreInst>(V)->getValueOperand()->getType(); in vectorizeStoreChain() 779 StoreInst *Store = cast<StoreInst>(Chain[I]); in vectorizeStoreChain() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/NVPTX/ |
D | NVPTXLowerAggrCopies.cpp | 80 if (StoreInst *SI = dyn_cast<StoreInst>(LI->user_back())) { in runOnFunction() 107 StoreInst *SI = dyn_cast<StoreInst>(*LI->user_begin()); in runOnFunction()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Vectorize/ |
D | LoadStoreVectorizer.cpp | 139 unsigned getAlignment(StoreInst *SI) const { in getAlignment() 289 if (StoreInst *S = dyn_cast<StoreInst>(I)) in getPointerAddressSpace() 581 assert(isa<StoreInst>(I) && in getVectorizablePrefix() 587 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { in getVectorizablePrefix() 643 if (isa<StoreInst>(MemInstr) && ChainLoad && in getVectorizablePrefix() 648 if (MemLoad && isa<StoreInst>(ChainInstr) && in getVectorizablePrefix() 764 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { in collectInstructions() 909 StoreInst *S0 = cast<StoreInst>(Chain[0]); in vectorizeStoreChain() 914 StoreTy = cast<StoreInst>(I)->getValueOperand()->getType(); in vectorizeStoreChain() 1013 StoreInst *Store = cast<StoreInst>(Chain[I]); in vectorizeStoreChain() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Instrumentation/ |
D | ThreadSanitizer.cpp | 345 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { in chooseInstructionsToInstrument() 365 Value *Addr = isa<StoreInst>(*I) in chooseInstructionsToInstrument() 366 ? cast<StoreInst>(I)->getPointerOperand() in chooseInstructionsToInstrument() 385 if (StoreInst *SI = dyn_cast<StoreInst>(I)) in isAtomic() 427 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) in runOnFunction() 490 bool IsWrite = isa<StoreInst>(*I); in instrumentLoadOrStore() 492 ? cast<StoreInst>(I)->getPointerOperand() in instrumentLoadOrStore() 506 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); in instrumentLoadOrStore() 529 ? cast<StoreInst>(I)->getAlignment() in instrumentLoadOrStore() 614 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { in instrumentAtomic()
|
/external/llvm/lib/Transforms/Instrumentation/ |
D | ThreadSanitizer.cpp | 330 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { in chooseInstructionsToInstrument() 350 Value *Addr = isa<StoreInst>(*I) in chooseInstructionsToInstrument() 351 ? cast<StoreInst>(I)->getPointerOperand() in chooseInstructionsToInstrument() 369 if (StoreInst *SI = dyn_cast<StoreInst>(I)) in isAtomic() 403 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) in runOnFunction() 461 bool IsWrite = isa<StoreInst>(*I); in instrumentLoadOrStore() 463 ? cast<StoreInst>(I)->getPointerOperand() in instrumentLoadOrStore() 470 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); in instrumentLoadOrStore() 493 ? cast<StoreInst>(I)->getAlignment() in instrumentLoadOrStore() 589 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { in instrumentAtomic()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPURewriteOutArguments.cpp | 141 StoreInst *SI = dyn_cast<StoreInst>(U.getUser()); in checkArgumentUses() 179 U.getOperandNo() != StoreInst::getPointerOperandIndex()) in checkArgumentUses() 288 SmallVector<std::pair<ReturnInst *, StoreInst *>, 4> ReplaceableStores; in runOnFunction() 308 StoreInst *SI = nullptr; in runOnFunction() 310 SI = dyn_cast<StoreInst>(Q.getInst()); in runOnFunction() 324 for (std::pair<ReturnInst *, StoreInst *> Store : ReplaceableStores) { in runOnFunction()
|
/external/llvm/lib/CodeGen/ |
D | GCRootLowering.cpp | 138 if (isa<AllocaInst>(I) || isa<GetElementPtrInst>(I) || isa<StoreInst>(I) || in CouldBecomeSafePoint() 162 if (StoreInst *SI = dyn_cast<StoreInst>(IP)) in InsertRootInitializers() 172 StoreInst *SI = new StoreInst( in InsertRootInitializers() 217 new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI); in PerformDefaultLowering()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | GCRootLowering.cpp | 137 if (isa<AllocaInst>(I) || isa<GetElementPtrInst>(I) || isa<StoreInst>(I) || in CouldBecomeSafePoint() 161 if (StoreInst *SI = dyn_cast<StoreInst>(IP)) in InsertRootInitializers() 171 StoreInst *SI = new StoreInst( in InsertRootInitializers() 216 new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI); in PerformDefaultLowering()
|
/external/llvm/lib/Transforms/IPO/ |
D | GlobalOpt.cpp | 168 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { in CleanupPointerRootUsers() 256 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { in CleanupConstantGlobalUsers() 333 if (StoreInst *SI = dyn_cast<StoreInst>(I)) in isSafeSROAElementUse() 586 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { in AllUsesOfValueWillTrapIfNull() 630 } else if (isa<StoreInst>(U)) { in AllUsesOfLoadedValueWillTrapIfNull() 647 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { in OptimizeAwayTrappingUsesOfValue() 728 } else if (isa<StoreInst>(GlobalUser)) { in OptimizeAwayTrappingUsesOfLoads() 846 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { in OptimizeGlobalAddressOfMalloc() 848 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, in OptimizeGlobalAddressOfMalloc() 896 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); in OptimizeGlobalAddressOfMalloc() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 518 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { in combineStoreToNewValue() 527 StoreInst *NewStore = IC.Builder.CreateAlignedStore( in combineStoreToNewValue() 638 auto *SI = dyn_cast<StoreInst>(U); in combineLoadToOperationType() 647 auto *SI = cast<StoreInst>(*UI++); in combineLoadToOperationType() 966 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) { in canSimplifyNullStoreOrGEP() 1046 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()), in visitLoadInst() 1175 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { in combineStoreToValueType() 1207 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { in unpackStoreToAggregate() 1352 StoreInst &SI) { in removeBitcastsFromLoadStoreOnMinMax() 1367 auto *SI = dyn_cast<StoreInst>(U); in removeBitcastsFromLoadStoreOnMinMax() [all …]
|
/external/llvm/include/llvm/Analysis/ |
D | MemoryLocation.h | 26 class StoreInst; variable 66 static MemoryLocation get(const StoreInst *SI); 73 else if (auto *I = dyn_cast<StoreInst>(Inst)) in get()
|
/external/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 394 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { in combineStoreToNewValue() 400 StoreInst *NewStore = IC.Builder->CreateAlignedStore( in combineStoreToNewValue() 480 auto *SI = dyn_cast<StoreInst>(U); in combineLoadToOperationType() 488 auto *SI = cast<StoreInst>(*UI++); in combineLoadToOperationType() 856 new StoreInst(UndefValue::get(LI.getType()), in visitLoadInst() 869 new StoreInst(UndefValue::get(LI.getType()), in visitLoadInst() 995 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { in combineStoreToValueType() 1020 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { in unpackStoreToAggregate() 1148 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { in visitStoreInst() 1210 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { in visitStoreInst() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/IPO/ |
D | GlobalOpt.cpp | 209 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { in CleanupPointerRootUsers() 297 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { in CleanupConstantGlobalUsers() 409 if (StoreInst *SI = dyn_cast<StoreInst>(I)) in isSafeSROAElementUse() 624 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { in AllUsesOfValueWillTrapIfNull() 668 } else if (isa<StoreInst>(U)) { in AllUsesOfLoadedValueWillTrapIfNull() 689 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { in OptimizeAwayTrappingUsesOfValue() 769 } else if (isa<StoreInst>(GlobalUser)) { in OptimizeAwayTrappingUsesOfLoads() 890 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { in OptimizeGlobalAddressOfMalloc() 892 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, in OptimizeGlobalAddressOfMalloc() 940 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); in OptimizeGlobalAddressOfMalloc() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Transforms/Vectorize/ |
D | SLPVectorizer.h | 46 class StoreInst; variable 60 using StoreList = SmallVector<StoreInst *, 8>; 143 bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);
|
/external/llvm/lib/Analysis/ |
D | Delinearization.cpp | 75 else if (StoreInst *Store = dyn_cast<StoreInst>(&Inst)) in getPointerOperand() 88 if (!isa<StoreInst>(Inst) && !isa<LoadInst>(Inst) && in print()
|
/external/llvm/lib/IR/ |
D | Instruction.cpp | 356 if (const StoreInst *SI = dyn_cast<StoreInst>(I1)) in haveSameSpecialState() 357 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() && in haveSameSpecialState() 358 (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() || in haveSameSpecialState() 360 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() && in haveSameSpecialState() 361 SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope(); in haveSameSpecialState() 497 return !cast<StoreInst>(this)->isUnordered(); in mayReadFromMemory() 534 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic; in isAtomic()
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Analysis/ |
D | MemoryLocation.h | 27 class StoreInst; variable 75 static MemoryLocation get(const StoreInst *SI); 87 return get(cast<StoreInst>(Inst)); in getOrNone()
|