Searched refs:HalfSize (Results 1 – 8 of 8) sorted by relevance
/external/llvm/lib/Target/X86/Utils/ |
D | X86ShuffleDecode.cpp | 289 unsigned HalfSize = VT.getVectorNumElements() / 2; in DecodeVPERM2X128Mask() local 293 unsigned HalfBegin = (HalfMask & 0x3) * HalfSize; in DecodeVPERM2X128Mask() 294 for (unsigned i = HalfBegin, e = HalfBegin + HalfSize; i != e; ++i) in DecodeVPERM2X128Mask()
|
/external/llvm/lib/Transforms/InstCombine/ |
D | InstCombineCalls.cpp | 603 unsigned HalfSize = NumElts / 2; in SimplifyX86vperm2() local 624 unsigned StartIndex = LowHalfSelect ? HalfSize : 0; in SimplifyX86vperm2() 625 for (unsigned i = 0; i < HalfSize; ++i) in SimplifyX86vperm2() 629 StartIndex = HighHalfSelect ? HalfSize : 0; in SimplifyX86vperm2() 631 for (unsigned i = 0; i < HalfSize; ++i) in SimplifyX86vperm2() 632 ShuffleMask[i + HalfSize] = StartIndex + i; in SimplifyX86vperm2()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 2244 unsigned HalfSize = HalfRC->getSize(); in splitSMRD() local 2260 unsigned HiOffset = LoOffset + HalfSize; in splitSMRD() 2291 .addImm(HalfSize); in splitSMRD() 2300 switch (HalfSize) { in splitSMRD()
|
/external/llvm/lib/Target/Mips/ |
D | MipsSEISelLowering.cpp | 2695 unsigned HalfSize = Indices.size() / 2; in lowerVECTOR_SHUFFLE_ILVL() local 2703 if (fitsRegularPattern<int>(Begin, 2, End, HalfSize, 1)) in lowerVECTOR_SHUFFLE_ILVL() 2705 else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size() + HalfSize, 1)) in lowerVECTOR_SHUFFLE_ILVL() 2712 if (fitsRegularPattern<int>(Begin + 1, 2, End, HalfSize, 1)) in lowerVECTOR_SHUFFLE_ILVL() 2714 else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size() + HalfSize, in lowerVECTOR_SHUFFLE_ILVL()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | SelectionDAG.cpp | 7208 unsigned HalfSize = sz / 2; in isConstantSplat() local 7209 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); in isConstantSplat() 7210 APInt LowValue = SplatValue.trunc(HalfSize); in isConstantSplat() 7211 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); in isConstantSplat() 7212 APInt LowUndef = SplatUndef.trunc(HalfSize); in isConstantSplat() 7216 MinSplatBits > HalfSize) in isConstantSplat() 7222 sz = HalfSize; in isConstantSplat()
|
/external/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 7498 int HalfSize = Size / 2; in lowerVectorShuffleWithSSE4A() local 7502 if (!isUndefInRange(Mask, HalfSize, HalfSize)) in lowerVectorShuffleWithSSE4A() 7510 int Len = HalfSize; in lowerVectorShuffleWithSSE4A() 7528 if (i > M || M >= HalfSize) in lowerVectorShuffleWithSSE4A() 7542 assert((Idx + Len) <= HalfSize && "Illegal extraction mask"); in lowerVectorShuffleWithSSE4A() 7557 for (int Idx = 0; Idx != HalfSize; ++Idx) { in lowerVectorShuffleWithSSE4A() 7573 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) { in lowerVectorShuffleWithSSE4A() 7587 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) { in lowerVectorShuffleWithSSE4A() 7590 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) { in lowerVectorShuffleWithSSE4A() 7593 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, in lowerVectorShuffleWithSSE4A()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.cpp | 2062 unsigned HalfSize = EltSize / 2; in isExtendedBUILD_VECTOR() local 2064 if (!isIntN(HalfSize, C->getSExtValue())) in isExtendedBUILD_VECTOR() 2067 if (!isUIntN(HalfSize, C->getZExtValue())) in isExtendedBUILD_VECTOR()
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.cpp | 6188 unsigned HalfSize = EltSize / 2; in isExtendedBUILD_VECTOR() local 6190 if (!isIntN(HalfSize, C->getSExtValue())) in isExtendedBUILD_VECTOR() 6193 if (!isUIntN(HalfSize, C->getZExtValue())) in isExtendedBUILD_VECTOR()
|