Lines Matching refs:hasNEON
789 if (Subtarget->hasNEON()) { in ARMTargetLowering()
814 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { in ARMTargetLowering()
852 if (Subtarget->hasNEON()) { in ARMTargetLowering()
972 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { in ARMTargetLowering()
1437 if (Subtarget->hasNEON()) { in ARMTargetLowering()
1474 if (Subtarget->hasNEON()) { in ARMTargetLowering()
1832 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { in getRegClassFor()
5756 bool UseNEON = !InGPR && Subtarget->hasNEON(); in LowerFCOPYSIGN()
6142 if (VT.isVector() && ST->hasNEON()) { in LowerCTTZ()
6199 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); in LowerCTPOP()
6419 if (ST->hasNEON()) in LowerVSETCC()
6530 if (ST->hasNEON() && Opc == ARMCC::EQ) { in LowerVSETCC()
6829 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) in LowerConstantFP()
7464 if ((ST->hasNEON() && SplatBitSize <= 64) || in LowerBUILD_VECTOR()
7635 if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { in LowerBUILD_VECTOR()
7944 if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) in isShuffleMaskLegal()
7959 else if (Subtarget->hasNEON() && in isShuffleMaskLegal()
7964 else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && in isShuffleMaskLegal()
8296 if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { in LowerVECTOR_SHUFFLE()
8310 if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { in LowerVECTOR_SHUFFLE()
8322 if (ST->hasNEON()) { in LowerVECTOR_SHUFFLE()
8355 if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { in LowerVECTOR_SHUFFLE()
8398 if (ST->hasNEON()) in LowerVECTOR_SHUFFLE()
8433 if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) in LowerVECTOR_SHUFFLE()
8436 if (ST->hasNEON() && VT == MVT::v8i8) in LowerVECTOR_SHUFFLE()
10626 Subtarget->hasNEON()) { in EmitStructByval()
11637 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() in AddCombineBUILD_VECTORToVPADDL()
12517 if (!Subtarget->hasNEON()) { in preferIncOfAddToSubOfNot()
13005 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && in PerformANDCombine()
13302 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && in PerformORCombine()
13331 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && in PerformORCombine()
14525 if (!Subtarget->hasNEON()) in PerformVDUPCombine()
14760 if (Subtarget->hasNEON()) in PerformSTORECombine()
14818 if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && in PerformSTORECombine()
14836 if (!Subtarget->hasNEON()) in PerformVCVTCombine()
14893 if (!Subtarget->hasNEON()) in PerformVDIVCombine()
15624 if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && in PerformExtendCombine()
16497 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { in allowsMisalignedMemoryAccesses()
16547 if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && in getOptimalMemOpType()
16653 if (Subtarget->hasNEON()) { in shouldSinkOperands()
16894 if (VT.isVector() && Subtarget->hasNEON()) in isLegalT2AddressImmediate()
17671 if (ConstraintVT.isVector() && Subtarget->hasNEON() && in LowerXConstraint()
18688 if (!Subtarget->hasNEON()) in canCombineStoreAndExtract()
18822 if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) in isLegalInterleavedAccessType()
18828 if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) in isLegalInterleavedAccessType()
18843 if (Subtarget->hasNEON() && VecSize == 64) in isLegalInterleavedAccessType()
18849 if (Subtarget->hasNEON()) in getMaxSupportedInterleaveFactor()
18916 if (Subtarget->hasNEON()) { in lowerInterleavedLoad()
19083 if (Subtarget->hasNEON()) { in lowerInterleavedStore()