Home
last modified time | relevance | path

Searched refs:EltSize (Results 1 – 25 of 61) sorted by relevance

123

/external/llvm/lib/Target/AMDGPU/
DSILoadStoreOptimizer.cpp67 unsigned EltSize);
70 unsigned EltSize);
75 unsigned EltSize);
80 unsigned EltSize);
159 unsigned EltSize){ in findMatchingDSInst() argument
185 if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) in findMatchingDSInst()
195 unsigned EltSize) { in mergeRead2Pair() argument
210 unsigned NewOffset0 = Offset0 / EltSize; in mergeRead2Pair()
211 unsigned NewOffset1 = Offset1 / EltSize; in mergeRead2Pair()
212 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64; in mergeRead2Pair()
[all …]
DSIInstrInfo.cpp238 unsigned EltSize; in getMemOpBaseRegImmOfs() local
240 EltSize = getOpRegClass(LdSt, 0)->getSize() / 2; in getMemOpBaseRegImmOfs()
244 EltSize = getOpRegClass(LdSt, Data0Idx)->getSize(); in getMemOpBaseRegImmOfs()
248 EltSize *= 64; in getMemOpBaseRegImmOfs()
253 Offset = EltSize * Offset0; in getMemOpBaseRegImmOfs()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/
DSILoadStoreOptimizer.cpp91 unsigned EltSize; member
117 unsigned read2Opcode(unsigned EltSize) const;
118 unsigned read2ST64Opcode(unsigned EltSize) const;
121 unsigned write2Opcode(unsigned EltSize) const;
122 unsigned write2ST64Opcode(unsigned EltSize) const;
254 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0)) in offsetsCanBeCombined()
257 unsigned EltOffset0 = CI.Offset0 / CI.EltSize; in offsetsCanBeCombined()
258 unsigned EltOffset1 = CI.Offset1 / CI.EltSize; in offsetsCanBeCombined()
293 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64; in offsetsCanBeCombined()
294 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64; in offsetsCanBeCombined()
[all …]
DSIRegisterInfo.cpp538 const unsigned EltSize = 4; in buildSpillLoadStore() local
540 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT); in buildSpillLoadStore()
541 unsigned Size = NumSubRegs * EltSize; in buildSpillLoadStore()
548 assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset"); in buildSpillLoadStore()
550 if (!isUInt<12>(Offset + Size - EltSize)) { in buildSpillLoadStore()
584 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) { in buildSpillLoadStore()
596 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i); in buildSpillLoadStore()
599 EltSize, MinAlign(Align, EltSize * i)); in buildSpillLoadStore()
686 unsigned EltSize = 4; in spillSGPR() local
691 std::tie(EltSize, ScalarStoreOp) = in spillSGPR()
[all …]
DAMDGPUTargetTransformInfo.cpp494 unsigned EltSize in getVectorInstrCost() local
496 if (EltSize < 32) { in getVectorInstrCost()
497 if (EltSize == 16 && Index == 0 && ST->has16BitInsts()) in getVectorInstrCost()
711 unsigned EltSize in getVectorInstrCost() local
713 if (EltSize < 32) { in getVectorInstrCost()
DSIRegisterInfo.h207 unsigned EltSize) const;
DSIInstrInfo.cpp300 unsigned EltSize; in getMemOpBaseRegImmOfs() local
302 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; in getMemOpBaseRegImmOfs()
306 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; in getMemOpBaseRegImmOfs()
310 EltSize *= 64; in getMemOpBaseRegImmOfs()
315 Offset = EltSize * Offset0; in getMemOpBaseRegImmOfs()
560 unsigned EltSize = 4; in copyPhysReg() local
565 EltSize = 8; in copyPhysReg()
568 EltSize = 4; in copyPhysReg()
577 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); in copyPhysReg()
652 unsigned EltSize = 4; in materializeImmediate() local
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/Utils/
DX86ShuffleDecode.cpp415 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeEXTRQIMask() argument
425 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeEXTRQIMask()
439 Len /= EltSize; in DecodeEXTRQIMask()
440 Idx /= EltSize; in DecodeEXTRQIMask()
452 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeINSERTQIMask() argument
462 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeINSERTQIMask()
476 Len /= EltSize; in DecodeINSERTQIMask()
477 Idx /= EltSize; in DecodeINSERTQIMask()
DX86ShuffleDecode.h150 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
154 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
/external/llvm/lib/Target/X86/Utils/
DX86ShuffleDecode.cpp515 unsigned EltSize = VT.getScalarSizeInBits(); in DecodeVPERMILPMask() local
520 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); in DecodeVPERMILPMask()
524 M = (EltSize == 64 ? ((M >> 1) & 0x1) : (M & 0x3)); in DecodeVPERMILPMask()
533 unsigned EltSize = VT.getScalarSizeInBits(); in DecodeVPERMIL2PMask() local
538 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); in DecodeVPERMIL2PMask()
560 if (EltSize == 64) in DecodeVPERMIL2PMask()
/external/clang/lib/CodeGen/
DCGBuilder.h206 Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
212 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
223 CharUnits EltSize,
227 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
237 Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
241 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
DTargetInfo.cpp2511 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); in classify() local
2517 if (Size > 128 && EltSize != 256) in classify()
2520 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { in classify()
2778 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); in BitsContainNoUserData() local
2784 unsigned EltOffset = i*EltSize; in BitsContainNoUserData()
2866 unsigned EltSize = TD.getTypeAllocSize(EltTy); in ContainsFloatAtOffset() local
2867 IROffset -= IROffset/EltSize*EltSize; in ContainsFloatAtOffset()
2955 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); in GetINTEGERTypeAtOffset() local
2956 unsigned EltOffset = IROffset/EltSize*EltSize; in GetINTEGERTypeAtOffset()
4350 CharUnits EltSize = TypeInfo.first / 2; in EmitVAArg() local
[all …]
/external/llvm/lib/Target/X86/
DX86InstrAVX512.td61 int EltSize = EltVT.Size;
87 !if (!eq (EltSize, 64), "v8i64", "v16i32"),
93 // Note: For EltSize < 32, FloatVT is illegal and TableGen
96 !if (!eq (!srl(EltSize,5),0),
99 "v" # NumElts # "f" # EltSize,
103 !if (!eq (!srl(EltSize,5),0),
106 "v" # NumElts # "i" # EltSize,
452 EVEX_CD8<From.EltSize, From.CD8TupleForm>;
604 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
614 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
[all …]
/external/llvm/lib/Analysis/
DConstantFolding.cpp339 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local
341 if (ByteOffset < EltSize && in ReadDataFromGlobal()
370 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ReadDataFromGlobal() local
371 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
372 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal()
384 uint64_t BytesWritten = EltSize - Offset; in ReadDataFromGlobal()
385 assert(BytesWritten <= EltSize && "Not indexing into this element?"); in ReadDataFromGlobal()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Analysis/
DConstantFolding.cpp417 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); in ReadDataFromGlobal() local
419 if (ByteOffset < EltSize && in ReadDataFromGlobal()
448 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ReadDataFromGlobal() local
449 uint64_t Index = ByteOffset / EltSize; in ReadDataFromGlobal()
450 uint64_t Offset = ByteOffset - Index * EltSize; in ReadDataFromGlobal()
462 uint64_t BytesWritten = EltSize - Offset; in ReadDataFromGlobal()
463 assert(BytesWritten <= EltSize && "Not indexing into this element?"); in ReadDataFromGlobal()
/external/llvm/lib/Transforms/InstCombine/
DInstCombineLoadStoreAlloca.cpp583 auto EltSize = DL.getTypeAllocSize(ET); in unpackLoadToAggregate() local
604 Offset += EltSize; in unpackLoadToAggregate()
1085 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); in unpackStoreToAggregate() local
1110 Offset += EltSize; in unpackStoreToAggregate()
/external/llvm/lib/Target/Hexagon/
DHexagonISelLowering.cpp2465 unsigned EltSize = Size / NElts; in LowerBUILD_VECTOR() local
2467 uint64_t Mask = ~uint64_t(0ULL) >> (64 - EltSize); in LowerBUILD_VECTOR()
2485 Res = (Res << EltSize) | Val; in LowerBUILD_VECTOR()
2602 int EltSize = EltVT.getSizeInBits(); in LowerEXTRACT_VECTOR() local
2604 EltSize : VTN * EltSize, dl, MVT::i64); in LowerEXTRACT_VECTOR()
2609 SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32); in LowerEXTRACT_VECTOR()
2648 DAG.getConstant(EltSize, dl, MVT::i32)); in LowerEXTRACT_VECTOR()
2677 int EltSize = EltVT.getSizeInBits(); in LowerINSERT_VECTOR() local
2679 EltSize : VTN * EltSize, dl, MVT::i64); in LowerINSERT_VECTOR()
2682 SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32); in LowerINSERT_VECTOR()
[all …]
/external/llvm/lib/CodeGen/
DAnalysis.cpp103 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local
106 StartingOffset + i * EltSize); in ComputeValueVTs()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/
DAnalysis.cpp102 uint64_t EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local
105 StartingOffset + i * EltSize); in ComputeValueVTs()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/InstCombine/
DInstCombineLoadStoreAlloca.cpp759 auto EltSize = DL.getTypeAllocSize(ET); in unpackLoadToAggregate() local
783 Offset += EltSize; in unpackLoadToAggregate()
1282 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); in unpackStoreToAggregate() local
1310 Offset += EltSize; in unpackStoreToAggregate()
/external/llvm/lib/CodeGen/SelectionDAG/
DLegalizeTypes.cpp1029 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size. in GetVectorElementPointer() local
1030 assert(EltSize * 8 == EltVT.getSizeInBits() && in GetVectorElementPointer()
1034 DAG.getConstant(EltSize, dl, Index.getValueType())); in GetVectorElementPointer()
/external/llvm/lib/Target/PowerPC/
DPPCISelLowering.h429 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
443 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
/external/llvm/lib/IR/
DAutoUpgrade.cpp380 unsigned EltSize = Idx->getScalarSizeInBits(); in UpgradeIntrinsicFunction1() local
382 if (EltSize == 64 && IdxSize == 128) in UpgradeIntrinsicFunction1()
384 else if (EltSize == 32 && IdxSize == 128) in UpgradeIntrinsicFunction1()
386 else if (EltSize == 64 && IdxSize == 256) in UpgradeIntrinsicFunction1()
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/
DPPCISelLowering.h521 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
535 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/
DX86InstrAVX512.td53 int EltSize = EltVT.Size;
526 EVEX_CD8<From.EltSize, From.CD8TupleForm>,
1173 T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>,
1957 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>,
1963 []>, EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>,
1969 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>,
1982 EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
1990 EVEX_4V, EVEX_KZ, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
1998 EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
2069 imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>,
[all …]

123