/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | LoopIdiomRecognize.cpp | 174 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 482 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); in isLegalStore() local 483 if (StoreSize != Stride && StoreSize != -Stride) in isLegalStore() 709 unsigned StoreSize = 0; in processLoopStores() local 717 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); in processLoopStores() 729 if (StoreSize != Stride && StoreSize != -Stride) in processLoopStores() 732 bool NegStride = StoreSize == -Stride; in processLoopStores() 734 if (processLoopStridedStore(StorePtr, StoreSize, in processLoopStores() 800 const SCEV *BECount, unsigned StoreSize, in mayLoopAccessLocation() argument 812 StoreSize); in mayLoopAccessLocation() [all …]
|
D | MemCpyOptimizer.cpp | 174 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); in addStore() local 176 addRange(OffsetFromFirst, StoreSize, in addStore()
|
D | SROA.cpp | 3965 uint64_t StoreSize = Ty->getBitWidth() / 8; in presplitLoadsAndStores() local 3966 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); in presplitLoadsAndStores() 3969 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && in presplitLoadsAndStores() 3972 assert(BaseOffset + StoreSize > BaseOffset && in presplitLoadsAndStores() 4044 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; in presplitLoadsAndStores()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Utils/ |
D | VNCoercion.cpp | 27 uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy); in canCoerceMustAliasedValueToLoad() local 30 if (llvm::alignTo(StoreSize, 8) != StoreSize) in canCoerceMustAliasedValueToLoad() 34 if (StoreSize < DL.getTypeSizeInBits(LoadTy)) in canCoerceMustAliasedValueToLoad() 191 uint64_t StoreSize = WriteSizeInBits / 8; // Convert to bytes. in analyzeLoadFromClobberingWrite() local 196 isAAFailure = StoreOffset + int64_t(StoreSize) <= LoadOffset; in analyzeLoadFromClobberingWrite() 208 StoreOffset + StoreSize < LoadOffset + LoadSize) in analyzeLoadFromClobberingWrite() 237 uint64_t StoreSize = in analyzeLoadFromClobberingStore() local 239 return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, StorePtr, StoreSize, in analyzeLoadFromClobberingStore() 358 uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; in getStoreValueForLoadHelper() local 365 SrcVal = Helper.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize * 8)); in getStoreValueForLoadHelper() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64StackTagging.cpp | 109 int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType()); in addStore() local 110 if (!addRange(Offset, Offset + StoreSize, SI)) in addStore() 113 applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0)); in addStore() 118 uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue(); in addMemSet() local 119 if (!addRange(Offset, Offset + StoreSize, MSI)) in addMemSet() 122 applyMemSet(IRB, Offset, Offset + StoreSize, in addMemSet()
|
D | AArch64LoadStoreOptimizer.cpp | 581 int StoreSize = TII->getMemScale(StoreInst); in isLdOffsetInRangeOfSt() local 584 : getLdStOffsetOp(StoreInst).getImm() * StoreSize; in isLdOffsetInRangeOfSt() 589 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize)); in isLdOffsetInRangeOfSt() 1015 int StoreSize = TII->getMemScale(*StoreI); in promoteLoadFromStore() local 1026 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) { in promoteLoadFromStore() 1059 assert(LoadSize <= StoreSize && "Invalid load size"); in promoteLoadFromStore() 1065 : getLdStOffsetOp(*StoreI).getImm() * StoreSize; in promoteLoadFromStore() 1073 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && in promoteLoadFromStore()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | StackSlotColoring.cpp | 449 unsigned StoreSize = 0; in RemoveDeadStores() local 458 if (!(StoreReg = TII->isStoreToStackSlot(*NextMI, SecondSS, StoreSize))) in RemoveDeadStores() 461 LoadSize != StoreSize) in RemoveDeadStores()
|
D | MachineFunction.cpp | 1069 uint64_t StoreSize = DL.getTypeStoreSize(A->getType()); in CanShareConstantPoolEntry() local 1070 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128) in CanShareConstantPoolEntry() 1073 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8); in CanShareConstantPoolEntry()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCHazardRecognizers.h | 73 uint64_t StoreSize[4]; variable
|
D | PPCHazardRecognizers.cpp | 311 if (int64_t(StoreOffset[i]+StoreSize[i]) > LoadOffset) return true; in isLoadOfStoredAddress() 404 StoreSize[NumStores] = MO->getSize(); in EmitInstruction()
|
D | PPCISelLowering.cpp | 6916 const unsigned StoreSize = LocVT.getStoreSize(); in CC_AIX() local 6919 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); in CC_AIX() 6929 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) { in CC_AIX() 8664 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); in LowerBUILD_VECTOR() local 8665 if (StoreSize > 4) { in LowerBUILD_VECTOR() 8671 if (StoreSize < 4) in LowerBUILD_VECTOR()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonLoopIdiomRecognition.cpp | 1940 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); in isLegalStore() local 1941 if (StoreSize != unsigned(std::abs(Stride))) in isLegalStore() 1970 const SCEV *BECount, unsigned StoreSize, in mayLoopAccessLocation() argument 1982 StoreSize); in mayLoopAccessLocation() 2018 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); in processCopyingStore() local 2019 if (Stride != StoreSize) in processCopyingStore() 2055 if (StoreSize != 4 || DL->getTypeSizeInBits(BECountTy) > 32) { in processCopyingStore() 2074 StoreSize, *AA, Ignore1)) { in processCopyingStore() 2078 BECount, StoreSize, *AA, Ignore1)) { in processCopyingStore() 2121 StoreSize, *AA, Ignore2)) in processCopyingStore() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUTargetTransformInfo.cpp | 247 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize, in getStoreVectorFactor() argument 250 unsigned VecRegBitWidth = VF * StoreSize; in getStoreVectorFactor() 252 return 128 / StoreSize; in getStoreVectorFactor()
|
D | AMDGPUTargetTransformInfo.h | 155 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
|
D | AMDGPUISelLowering.cpp | 43 unsigned StoreSize = VT.getStoreSizeInBits(); in getEquivalentMemType() local 44 if (StoreSize <= 32) in getEquivalentMemType() 45 return EVT::getIntegerVT(Ctx, StoreSize); in getEquivalentMemType() 47 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); in getEquivalentMemType() 48 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); in getEquivalentMemType()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
D | TargetTransformInfo.h | 1127 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 1391 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 1864 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, in getStoreVectorFactor() argument 1867 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); in getStoreVectorFactor()
|
D | TargetTransformInfoImpl.h | 600 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, in getStoreVectorFactor() argument
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86AvoidStoreForwardingBlocks.cpp | 629 int64_t StoreDispImm, unsigned StoreSize) { in isBlockingStore() argument 631 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize))); in isBlockingStore()
|
D | X86ISelDAGToDAG.cpp | 491 unsigned StoreSize = N->getMemoryVT().getStoreSize(); in useNonTemporalLoad() local 493 if (N->getAlignment() < StoreSize) in useNonTemporalLoad() 496 switch (StoreSize) { in useNonTemporalLoad()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | TargetTransformInfo.cpp | 836 unsigned StoreSize, in getStoreVectorFactor() argument 839 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); in getStoreVectorFactor()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Instrumentation/ |
D | MemorySanitizer.cpp | 1125 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); in storeOrigin() local 1127 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize, in storeOrigin() 1133 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize, in storeOrigin() 1154 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize, in storeOrigin() 3920 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); in visitCallSite() local 3921 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize, in visitCallSite()
|