Home
last modified time | relevance | path

Searched refs:EltSize (Results 1 – 25 of 74) sorted by relevance

123

/external/llvm/lib/Target/AMDGPU/
DSILoadStoreOptimizer.cpp67 unsigned EltSize);
70 unsigned EltSize);
75 unsigned EltSize);
80 unsigned EltSize);
159 unsigned EltSize){ in findMatchingDSInst() argument
185 if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) in findMatchingDSInst()
195 unsigned EltSize) { in mergeRead2Pair() argument
210 unsigned NewOffset0 = Offset0 / EltSize; in mergeRead2Pair()
211 unsigned NewOffset1 = Offset1 / EltSize; in mergeRead2Pair()
212 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64; in mergeRead2Pair()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/
DSILoadStoreOptimizer.cpp91 unsigned EltSize; member
117 unsigned read2Opcode(unsigned EltSize) const;
118 unsigned read2ST64Opcode(unsigned EltSize) const;
121 unsigned write2Opcode(unsigned EltSize) const;
122 unsigned write2ST64Opcode(unsigned EltSize) const;
254 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0)) in offsetsCanBeCombined()
257 unsigned EltOffset0 = CI.Offset0 / CI.EltSize; in offsetsCanBeCombined()
258 unsigned EltOffset1 = CI.Offset1 / CI.EltSize; in offsetsCanBeCombined()
293 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64; in offsetsCanBeCombined()
294 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64; in offsetsCanBeCombined()
[all …]
DSIRegisterInfo.cpp538 const unsigned EltSize = 4; in buildSpillLoadStore() local
540 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT); in buildSpillLoadStore()
541 unsigned Size = NumSubRegs * EltSize; in buildSpillLoadStore()
548 assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset"); in buildSpillLoadStore()
550 if (!isUInt<12>(Offset + Size - EltSize)) { in buildSpillLoadStore()
584 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) { in buildSpillLoadStore()
596 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i); in buildSpillLoadStore()
599 EltSize, MinAlign(Align, EltSize * i)); in buildSpillLoadStore()
686 unsigned EltSize = 4; in spillSGPR() local
691 std::tie(EltSize, ScalarStoreOp) = in spillSGPR()
[all …]
DAMDGPUTargetTransformInfo.cpp494 unsigned EltSize in getVectorInstrCost() local
496 if (EltSize < 32) { in getVectorInstrCost()
497 if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) in getVectorInstrCost()
711 unsigned EltSize in getVectorInstrCost() local
713 if (EltSize < 32) { in getVectorInstrCost()
DSIRegisterInfo.h207 unsigned EltSize) const;
DSIInstrInfo.cpp300 unsigned EltSize; in getMemOpBaseRegImmOfs() local
302 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; in getMemOpBaseRegImmOfs()
306 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; in getMemOpBaseRegImmOfs()
310 EltSize *= 64; in getMemOpBaseRegImmOfs()
315 Offset = EltSize * Offset0; in getMemOpBaseRegImmOfs()
560 unsigned EltSize = 4; in copyPhysReg() local
565 EltSize = 8; in copyPhysReg()
568 EltSize = 4; in copyPhysReg()
577 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); in copyPhysReg()
652 unsigned EltSize = 4; in materializeImmediate() local
[all …]
/external/swiftshader/third_party/LLVM/lib/Transforms/Scalar/
DScalarReplAggregates.cpp363 unsigned EltSize = In->getPrimitiveSizeInBits()/8; in MergeInTypeForLoadOrStore() local
364 if (EltSize == AllocaSize) in MergeInTypeForLoadOrStore()
370 if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 && in MergeInTypeForLoadOrStore()
371 (!VectorTy || EltSize == VectorTy->getElementType() in MergeInTypeForLoadOrStore()
375 VectorTy = VectorType::get(In, AllocaSize/EltSize); in MergeInTypeForLoadOrStore()
695 unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType()); in ConvertScalar_ExtractValue() local
696 Elt = Offset/EltSize; in ConvertScalar_ExtractValue()
697 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); in ConvertScalar_ExtractValue()
721 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()); in ConvertScalar_ExtractValue() local
725 Offset+i*EltSize, Builder); in ConvertScalar_ExtractValue()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/Utils/
DX86ShuffleDecode.cpp415 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeEXTRQIMask() argument
425 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeEXTRQIMask()
439 Len /= EltSize; in DecodeEXTRQIMask()
440 Idx /= EltSize; in DecodeEXTRQIMask()
452 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeINSERTQIMask() argument
462 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeINSERTQIMask()
476 Len /= EltSize; in DecodeINSERTQIMask()
477 Idx /= EltSize; in DecodeINSERTQIMask()
DX86ShuffleDecode.h150 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
154 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
/external/llvm/lib/Target/X86/Utils/
DX86ShuffleDecode.cpp515 unsigned EltSize = VT.getScalarSizeInBits(); in DecodeVPERMILPMask() local
520 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); in DecodeVPERMILPMask()
524 M = (EltSize == 64 ? ((M >> 1) & 0x1) : (M & 0x3)); in DecodeVPERMILPMask()
533 unsigned EltSize = VT.getScalarSizeInBits(); in DecodeVPERMIL2PMask() local
538 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); in DecodeVPERMIL2PMask()
560 if (EltSize == 64) in DecodeVPERMIL2PMask()
/external/clang/lib/CodeGen/
DCGBuilder.h206 Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
212 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
223 CharUnits EltSize,
227 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
237 Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
241 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
/external/swiftshader/third_party/LLVM/lib/Analysis/
DConstantFolding.cpp285 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local
287 if (ByteOffset < EltSize && in ReadDataFromGlobal()
314 uint64_t EltSize = TD.getTypeAllocSize(CA->getType()->getElementType()); in ReadDataFromGlobal() local
315 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
316 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal()
321 if (EltSize >= BytesLeft) in ReadDataFromGlobal()
325 BytesLeft -= EltSize; in ReadDataFromGlobal()
326 CurPtr += EltSize; in ReadDataFromGlobal()
332 uint64_t EltSize = TD.getTypeAllocSize(CV->getType()->getElementType()); in ReadDataFromGlobal() local
333 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
[all …]
/external/swiftshader/third_party/LLVM/include/llvm/Support/
DAllocator.h171 size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment);
172 return static_cast<T*>(Allocate(Num * EltSize, Alignment));
/external/swiftshader/third_party/LLVM/lib/CodeGen/
DAnalysis.cpp93 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); in ComputeValueVTs() local
96 StartingOffset + i * EltSize); in ComputeValueVTs()
/external/swiftshader/third_party/LLVM/lib/Target/PowerPC/
DPPCISelLowering.h219 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
227 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
/external/llvm/lib/Target/X86/
DX86InstrAVX512.td61 int EltSize = EltVT.Size;
87 !if (!eq (EltSize, 64), "v8i64", "v16i32"),
93 // Note: For EltSize < 32, FloatVT is illegal and TableGen
96 !if (!eq (!srl(EltSize,5),0),
99 "v" # NumElts # "f" # EltSize,
103 !if (!eq (!srl(EltSize,5),0),
106 "v" # NumElts # "i" # EltSize,
452 EVEX_CD8<From.EltSize, From.CD8TupleForm>;
604 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
614 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
[all …]
/external/llvm/lib/Analysis/
DConstantFolding.cpp339 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local
341 if (ByteOffset < EltSize && in ReadDataFromGlobal()
370 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ReadDataFromGlobal() local
371 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
372 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal()
384 uint64_t BytesWritten = EltSize - Offset; in ReadDataFromGlobal()
385 assert(BytesWritten <= EltSize && "Not indexing into this element?"); in ReadDataFromGlobal()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Analysis/
DConstantFolding.cpp417 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local
419 if (ByteOffset < EltSize && in ReadDataFromGlobal()
448 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ReadDataFromGlobal() local
449 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
450 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal()
462 uint64_t BytesWritten = EltSize - Offset; in ReadDataFromGlobal()
463 assert(BytesWritten <= EltSize && "Not indexing into this element?"); in ReadDataFromGlobal()
/external/llvm/lib/Transforms/InstCombine/
DInstCombineLoadStoreAlloca.cpp583 auto EltSize = DL.getTypeAllocSize(ET); in unpackLoadToAggregate() local
604 Offset += EltSize; in unpackLoadToAggregate()
1085 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); in unpackStoreToAggregate() local
1110 Offset += EltSize; in unpackStoreToAggregate()
/external/llvm/lib/Target/Hexagon/
DHexagonISelLowering.cpp2465 unsigned EltSize = Size / NElts; in LowerBUILD_VECTOR() local
2467 uint64_t Mask = ~uint64_t(0ULL) >> (64 - EltSize); in LowerBUILD_VECTOR()
2485 Res = (Res << EltSize) | Val; in LowerBUILD_VECTOR()
2602 int EltSize = EltVT.getSizeInBits(); in LowerEXTRACT_VECTOR() local
2604 EltSize : VTN * EltSize, dl, MVT::i64); in LowerEXTRACT_VECTOR()
2609 SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32); in LowerEXTRACT_VECTOR()
2648 DAG.getConstant(EltSize, dl, MVT::i32)); in LowerEXTRACT_VECTOR()
2677 int EltSize = EltVT.getSizeInBits(); in LowerINSERT_VECTOR() local
2679 EltSize : VTN * EltSize, dl, MVT::i64); in LowerINSERT_VECTOR()
2682 SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32); in LowerINSERT_VECTOR()
[all …]
/external/llvm/lib/CodeGen/
DAnalysis.cpp103 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local
106 StartingOffset + i * EltSize); in ComputeValueVTs()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/
DAnalysis.cpp102 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local
105 StartingOffset + i * EltSize); in ComputeValueVTs()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/InstCombine/
DInstCombineLoadStoreAlloca.cpp759 auto EltSize = DL.getTypeAllocSize(ET); in unpackLoadToAggregate() local
783 Offset += EltSize; in unpackLoadToAggregate()
1282 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); in unpackStoreToAggregate() local
1310 Offset += EltSize; in unpackStoreToAggregate()
/external/llvm/lib/CodeGen/SelectionDAG/
DLegalizeTypes.cpp1029 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size. in GetVectorElementPointer() local
1030 assert(EltSize * 8 == EltVT.getSizeInBits() && in GetVectorElementPointer()
1034 DAG.getConstant(EltSize, dl, Index.getValueType())); in GetVectorElementPointer()
/external/llvm/lib/Target/PowerPC/
DPPCISelLowering.h429 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
443 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);

123