Lines Matching refs:hasAVX2
4535 MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32; in insert128BitVector()
5842 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) { in LowerVectorBroadcast()
5851 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) { in LowerVectorBroadcast()
6449 if (Subtarget.hasAVX2()) in LowerToHorizontalOp()
7314 (Subtarget.hasAVX2() && VT.is256BitVector()) || in lowerVectorShuffleWithPSHUFB()
7531 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!"); in lowerVectorShuffleAsBlend()
7537 if (Subtarget.hasAVX2()) { in lowerVectorShuffleAsBlend()
7562 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!"); in lowerVectorShuffleAsBlend()
7578 assert((VT.is128BitVector() || Subtarget.hasAVX2()) && in lowerVectorShuffleAsBlend()
8485 assert(Subtarget.hasAVX2() && in lowerVectorShuffleAsTruncBroadcast()
8543 (Subtarget.hasAVX2() && VT.isInteger()))) in lowerVectorShuffleAsBroadcast()
8550 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2(); in lowerVectorShuffleAsBroadcast()
10718 if (Subtarget.hasAVX2() && V2.isUndef()) in lowerV2X128VectorShuffle()
10949 if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) { in lowerVectorShuffleWithUndefHalf()
11005 if (Subtarget.hasAVX2()) { in lowerShuffleAsRepeatedMaskAndLanePermute()
11054 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1; in lowerShuffleAsRepeatedMaskAndLanePermute()
11190 if (Subtarget.hasAVX2()) in lowerV4F64VectorShuffle()
11229 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) || in lowerV4F64VectorShuffle()
11237 if (Subtarget.hasAVX2()) in lowerV4F64VectorShuffle()
11256 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!"); in lowerV4I64VectorShuffle()
11307 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) || in lowerV4I64VectorShuffle()
11383 if (Subtarget.hasAVX2()) in lowerV8F32VectorShuffle()
11400 if (Subtarget.hasAVX2()) in lowerV8F32VectorShuffle()
11419 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!"); in lowerV8I32VectorShuffle()
11502 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!"); in lowerV16I16VectorShuffle()
11583 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!"); in lowerV32I8VectorShuffle()
11672 if (VT.isInteger() && !Subtarget.hasAVX2()) { in lower256BitVectorShuffle()
12303 if (Subtarget.hasAVX2()) in LowerVSELECT()
12596 (Subtarget.hasAVX2() && EltVT == MVT::i32)) { in LowerINSERT_VECTOR_ELT()
21816 assert((!Subtarget.hasAVX2() || RegSize < 256) && in ReplaceNodeResults()
24754 (!Subtarget.hasAVX2() && SrcVT.is256BitVector()); in matchUnaryVectorShuffle()
24827 if (Subtarget.hasAVX2()) { in matchUnaryVectorShuffle()
24907 if (SrcVT.is256BitVector() && !Subtarget.hasAVX2()) in matchPermuteVectorShuffle()
24913 if (Subtarget.hasAVX2() && SrcVT.is256BitVector() && Mask.size() == 4) { in matchPermuteVectorShuffle()
25066 MVT ShuffleVT = (VT.isFloatingPoint() || !Subtarget.hasAVX2() ? MVT::v4f64 in combineX86ShuffleChain()
25099 (VT.isFloatingPoint() || (VT.is256BitVector() && !Subtarget.hasAVX2())) && in combineX86ShuffleChain()
25154 if (Subtarget.hasAVX2()) { in combineX86ShuffleChain()
25263 (VT.is256BitVector() && Subtarget.hasAVX2()) || in combineX86ShuffleChain()
26742 (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) { in combineSelect()
26880 if (VT == MVT::v32i8 && !Subtarget.hasAVX2()) in combineSelect()
28610 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break; in foldVectorXorShiftIntoCmp()
28685 } else if (Subtarget.hasAVX2()) { in detectAVGPattern()
29673 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2()) in combineVectorTruncation()
30642 else if (Subtarget.hasAVX2()) in detectSADPattern()