/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SILoadStoreOptimizer.cpp | 118 unsigned EltSize; member 224 unsigned read2Opcode(unsigned EltSize) const; 225 unsigned read2ST64Opcode(unsigned EltSize) const; 228 unsigned write2Opcode(unsigned EltSize) const; 229 unsigned write2ST64Opcode(unsigned EltSize) const; 479 EltSize = in setMI() 484 EltSize = in setMI() 489 EltSize = AMDGPU::getSMRDEncodedOffset(STM, 4); in setMI() 492 EltSize = 4; in setMI() 732 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0)) in offsetsCanBeCombined() [all …]
|
D | SIRegisterInfo.cpp | 636 const unsigned EltSize = 4; in buildSpillLoadStore() local 638 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT); in buildSpillLoadStore() 639 unsigned Size = NumSubRegs * EltSize; in buildSpillLoadStore() 650 assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset"); in buildSpillLoadStore() 652 if (!isUInt<12>(Offset + Size - EltSize)) { in buildSpillLoadStore() 686 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) { in buildSpillLoadStore() 710 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i); in buildSpillLoadStore() 713 EltSize, MinAlign(Align, EltSize * i)); in buildSpillLoadStore() 774 unsigned EltSize = 4; in spillSGPR() local 777 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize); in spillSGPR() [all …]
|
D | AMDGPUTargetTransformInfo.cpp | 556 unsigned EltSize in getVectorInstrCost() local 558 if (EltSize < 32) { in getVectorInstrCost() 559 if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) in getVectorInstrCost() 951 unsigned EltSize in getVectorInstrCost() local 953 if (EltSize < 32) { in getVectorInstrCost()
|
D | AMDGPULegalizerInfo.cpp | 102 const int EltSize = EltTy.getSizeInBits(); in moreEltsToNext32Bit() local 105 assert(EltSize < 32); in moreEltsToNext32Bit() 107 const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize; in moreEltsToNext32Bit() 139 const int EltSize = Ty.getElementType().getSizeInBits(); in isRegisterType() local 140 return EltSize == 32 || EltSize == 64 || in isRegisterType() 141 (EltSize == 16 && Ty.getNumElements() % 2 == 0) || in isRegisterType() 142 EltSize == 128 || EltSize == 256; in isRegisterType() 807 unsigned EltSize = EltTy.getSizeInBits(); in AMDGPULegalizerInfo() local 808 if (EltSize > Align && in AMDGPULegalizerInfo() 809 (EltSize / Align < DstTy.getNumElements())) { in AMDGPULegalizerInfo() [all …]
|
D | SIRegisterInfo.h | 234 unsigned EltSize) const;
|
D | SIInstrInfo.cpp | 301 unsigned EltSize; in getMemOperandWithOffset() local 303 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; in getMemOperandWithOffset() 307 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; in getMemOperandWithOffset() 311 EltSize *= 64; in getMemOperandWithOffset() 317 Offset = EltSize * Offset0; in getMemOperandWithOffset() 688 unsigned EltSize = 4; in copyPhysReg() local 694 EltSize = 8; in copyPhysReg() 697 EltSize = 4; in copyPhysReg() 711 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); in copyPhysReg() 792 unsigned EltSize = 4; in materializeImmediate() local [all …]
|
D | AMDGPURegisterBankInfo.cpp | 1368 int EltSize = Ty.getScalarSizeInBits(); in selectStoreIntrinsic() local 1372 if (EltSize != 32) in selectStoreIntrinsic() 2672 unsigned EltSize = getSizeInBits(MI.getOperand(2).getReg(), MRI, *TRI); in getInstrMapping() local 2675 OpdsMapping[2] = AMDGPU::getValueMapping(BankID, EltSize); in getInstrMapping()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/Utils/ |
D | X86ShuffleDecode.cpp | 415 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeEXTRQIMask() argument 425 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeEXTRQIMask() 439 Len /= EltSize; in DecodeEXTRQIMask() 440 Idx /= EltSize; in DecodeEXTRQIMask() 452 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeINSERTQIMask() argument 462 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeINSERTQIMask() 476 Len /= EltSize; in DecodeINSERTQIMask() 477 Idx /= EltSize; in DecodeINSERTQIMask()
|
D | X86ShuffleDecode.h | 150 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, 154 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | Loads.cpp | 203 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()), in isDereferenceableAndAlignedInLoop() local 213 return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL, in isDereferenceableAndAlignedInLoop() 225 if (Step->getAPInt() != EltSize) in isDereferenceableAndAlignedInLoop() 234 const APInt AccessSize = TC * EltSize; in isDereferenceableAndAlignedInLoop() 245 if (EltSize.urem(Alignment.value()) != 0) in isDereferenceableAndAlignedInLoop()
|
D | ConstantFolding.cpp | 433 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local 435 if (ByteOffset < EltSize && in ReadDataFromGlobal() 464 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ReadDataFromGlobal() local 465 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal() 466 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal() 478 uint64_t BytesWritten = EltSize - Offset; in ReadDataFromGlobal() 479 assert(BytesWritten <= EltSize && "Not indexing into this element?"); in ReadDataFromGlobal()
|
D | ModuleSummaryAnalysis.cpp | 520 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in findFuncPointers() local 523 StartingOffset + i * EltSize, M, Index, VTableFuncs); in findFuncPointers()
|
D | LoopAccessAnalysis.cpp | 225 unsigned EltSize = in insert() local 227 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize); in insert()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64InstructionSelector.cpp | 99 MachineInstr *emitScalarToVector(unsigned EltSize, 1566 unsigned EltSize = DstTy.getElementType().getSizeInBits(); in select() local 1567 if (EltSize == 32) in select() 1570 else if (EltSize == 64) in select() 2779 unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar, in emitScalarToVector() argument 2793 switch (EltSize) { in emitScalarToVector() 2876 const unsigned EltSize) { in getLaneCopyOpcode() argument 2879 switch (EltSize) { in getLaneCopyOpcode() 2894 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n"); in getLaneCopyOpcode() 3208 getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) { in getInsertVecEltOpInfo() argument [all …]
|
D | AArch64StackTagging.cpp | 258 uint32_t EltSize = DL->getTypeSizeInBits(EltTy); in flatten() local 259 Type *NewTy = VectorType::get(IntegerType::get(Ctx, EltSize), in flatten()
|
D | AArch64ISelLowering.cpp | 977 unsigned EltSize = Size; in optimizeLogicalImm() local 994 ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) & in optimizeLogicalImm() 997 bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1)); in optimizeLogicalImm() 1008 if (EltSize == 2) in optimizeLogicalImm() 1011 EltSize /= 2; in optimizeLogicalImm() 1012 Mask >>= EltSize; in optimizeLogicalImm() 1013 uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize; in optimizeLogicalImm() 1027 while (EltSize < Size) { in optimizeLogicalImm() 1028 NewImm |= NewImm << EltSize; in optimizeLogicalImm() 1029 EltSize *= 2; in optimizeLogicalImm() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | Analysis.cpp | 102 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local 105 StartingOffset + i * EltSize); in ComputeValueVTs() 142 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in computeValueLLTs() local 145 StartingOffset + i * EltSize); in computeValueLLTs()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCTargetTransformInfo.cpp | 788 unsigned EltSize = Val->getScalarSizeInBits(); in getVectorInstrCost() local 789 if (EltSize == 64) { in getVectorInstrCost() 793 } else if (EltSize == 32) { in getVectorInstrCost()
|
D | PPCISelLowering.h | 584 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 599 unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 727 auto EltSize = DL.getTypeAllocSize(ET); in unpackLoadToAggregate() local 751 Offset += EltSize; in unpackLoadToAggregate() 1254 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); in unpackStoreToAggregate() local 1282 Offset += EltSize; in unpackStoreToAggregate()
|
D | InstructionCombining.cpp | 1168 uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType()); in FindElementAtOffset() local 1169 assert(EltSize && "Cannot index into a zero-sized array"); in FindElementAtOffset() 1170 NewIndices.push_back(ConstantInt::get(IndexTy,Offset/EltSize)); in FindElementAtOffset() 1171 Offset %= EltSize; in FindElementAtOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrAVX512.td | 56 int EltSize = EltVT.Size; 507 EVEX_CD8<From.EltSize, From.CD8TupleForm>, 1176 EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>; 1190 EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>; 1207 EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>; 1976 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>, 1982 []>, EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>, 1988 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>, 2001 EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>, 2009 EVEX_4V, EVEX_KZ, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>, [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | LegalizerHelper.cpp | 54 unsigned EltSize = OrigTy.getScalarSizeInBits(); in getNarrowTypeBreakDown() local 55 if (LeftoverSize % EltSize != 0) in getNarrowTypeBreakDown() 57 LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); in getNarrowTypeBreakDown() 150 unsigned EltSize = MainTy.getScalarSizeInBits(); in extractParts() local 151 if (LeftoverSize % EltSize != 0) in extractParts() 153 LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); in extractParts() 2365 const unsigned EltSize = EltTy.getSizeInBits(); in fewerElementsVectorBasic() local 2370 if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size) in fewerElementsVectorBasic()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMISelLowering.cpp | 6017 unsigned EltSize = 8; in LowerCTPOP() local 6019 while (EltSize != VT.getScalarSizeInBits()) { in LowerCTPOP() 6025 EltSize *= 2; in LowerCTPOP() 6027 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); in LowerCTPOP() 7247 unsigned EltSize = VT.getScalarSizeInBits(); in LowerBUILD_VECTOR() local 7251 if (hasDominantValue && EltSize <= 32) { in LowerBUILD_VECTOR() 7350 if (EltSize >= 32) { in LowerBUILD_VECTOR() 7353 EVT EltVT = EVT::getFloatingPointVT(EltSize); in LowerBUILD_VECTOR() 7643 unsigned EltSize = VT.getScalarSizeInBits(); in isShuffleMaskLegal() local 7644 if (EltSize >= 32 || in isShuffleMaskLegal() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/NVPTX/ |
D | NVPTXISelLowering.cpp | 247 unsigned EltSize = EltVT.getStoreSize(); in CanMergeParamLoadStoresStartingAt() local 250 if (EltSize >= AccessSize) in CanMergeParamLoadStoresStartingAt() 253 unsigned NumElts = AccessSize / EltSize; in CanMergeParamLoadStoresStartingAt() 255 if (AccessSize != EltSize * NumElts) in CanMergeParamLoadStoresStartingAt() 272 if (Offsets[j] - Offsets[j - 1] != EltSize) in CanMergeParamLoadStoresStartingAt()
|