Lines Matching refs:VT
141 for (MVT VT : MVT::integer_valuetypes()) in X86TargetLowering() local
142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); in X86TargetLowering()
282 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { in X86TargetLowering()
283 setOperationAction(ISD::MULHS, VT, Expand); in X86TargetLowering()
284 setOperationAction(ISD::MULHU, VT, Expand); in X86TargetLowering()
285 setOperationAction(ISD::SDIV, VT, Expand); in X86TargetLowering()
286 setOperationAction(ISD::UDIV, VT, Expand); in X86TargetLowering()
287 setOperationAction(ISD::SREM, VT, Expand); in X86TargetLowering()
288 setOperationAction(ISD::UREM, VT, Expand); in X86TargetLowering()
291 setOperationAction(ISD::ADDC, VT, Custom); in X86TargetLowering()
292 setOperationAction(ISD::ADDE, VT, Custom); in X86TargetLowering()
293 setOperationAction(ISD::SUBC, VT, Custom); in X86TargetLowering()
294 setOperationAction(ISD::SUBE, VT, Custom); in X86TargetLowering()
299 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128, in X86TargetLowering()
301 setOperationAction(ISD::BR_CC, VT, Expand); in X86TargetLowering()
302 setOperationAction(ISD::SELECT_CC, VT, Expand); in X86TargetLowering()
388 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) { in X86TargetLowering()
389 setOperationAction(ISD::SELECT, VT, Custom); in X86TargetLowering()
390 setOperationAction(ISD::SETCC, VT, Custom); in X86TargetLowering()
392 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { in X86TargetLowering()
393 if (VT == MVT::i64 && !Subtarget.is64Bit()) in X86TargetLowering()
395 setOperationAction(ISD::SELECT, VT, Custom); in X86TargetLowering()
396 setOperationAction(ISD::SETCC, VT, Custom); in X86TargetLowering()
397 setOperationAction(ISD::SETCCE, VT, Custom); in X86TargetLowering()
413 for (auto VT : { MVT::i32, MVT::i64 }) { in X86TargetLowering()
414 if (VT == MVT::i64 && !Subtarget.is64Bit()) in X86TargetLowering()
416 setOperationAction(ISD::ConstantPool , VT, Custom); in X86TargetLowering()
417 setOperationAction(ISD::JumpTable , VT, Custom); in X86TargetLowering()
418 setOperationAction(ISD::GlobalAddress , VT, Custom); in X86TargetLowering()
419 setOperationAction(ISD::GlobalTLSAddress, VT, Custom); in X86TargetLowering()
420 setOperationAction(ISD::ExternalSymbol , VT, Custom); in X86TargetLowering()
421 setOperationAction(ISD::BlockAddress , VT, Custom); in X86TargetLowering()
424 for (auto VT : { MVT::i32, MVT::i64 }) { in X86TargetLowering()
425 if (VT == MVT::i64 && !Subtarget.is64Bit()) in X86TargetLowering()
427 setOperationAction(ISD::SHL_PARTS, VT, Custom); in X86TargetLowering()
428 setOperationAction(ISD::SRA_PARTS, VT, Custom); in X86TargetLowering()
429 setOperationAction(ISD::SRL_PARTS, VT, Custom); in X86TargetLowering()
438 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { in X86TargetLowering()
439 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); in X86TargetLowering()
440 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); in X86TargetLowering()
441 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); in X86TargetLowering()
442 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); in X86TargetLowering()
443 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); in X86TargetLowering()
444 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); in X86TargetLowering()
445 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); in X86TargetLowering()
490 for (auto VT : { MVT::f32, MVT::f64 }) { in X86TargetLowering()
492 setOperationAction(ISD::FABS, VT, Custom); in X86TargetLowering()
495 setOperationAction(ISD::FNEG, VT, Custom); in X86TargetLowering()
498 setOperationAction(ISD::FCOPYSIGN, VT, Custom); in X86TargetLowering()
501 setOperationAction(ISD::FSIN , VT, Expand); in X86TargetLowering()
502 setOperationAction(ISD::FCOS , VT, Expand); in X86TargetLowering()
503 setOperationAction(ISD::FSINCOS, VT, Expand); in X86TargetLowering()
555 for (auto VT : { MVT::f32, MVT::f64 }) { in X86TargetLowering()
556 setOperationAction(ISD::UNDEF, VT, Expand); in X86TargetLowering()
557 setOperationAction(ISD::FCOPYSIGN, VT, Expand); in X86TargetLowering()
560 setOperationAction(ISD::FSIN , VT, Expand); in X86TargetLowering()
561 setOperationAction(ISD::FCOS , VT, Expand); in X86TargetLowering()
562 setOperationAction(ISD::FSINCOS, VT, Expand); in X86TargetLowering()
635 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, in X86TargetLowering()
637 setOperationAction(ISD::FSIN, VT, Expand); in X86TargetLowering()
638 setOperationAction(ISD::FSINCOS, VT, Expand); in X86TargetLowering()
639 setOperationAction(ISD::FCOS, VT, Expand); in X86TargetLowering()
640 setOperationAction(ISD::FREM, VT, Expand); in X86TargetLowering()
641 setOperationAction(ISD::FPOWI, VT, Expand); in X86TargetLowering()
642 setOperationAction(ISD::FCOPYSIGN, VT, Expand); in X86TargetLowering()
643 setOperationAction(ISD::FPOW, VT, Expand); in X86TargetLowering()
644 setOperationAction(ISD::FLOG, VT, Expand); in X86TargetLowering()
645 setOperationAction(ISD::FLOG2, VT, Expand); in X86TargetLowering()
646 setOperationAction(ISD::FLOG10, VT, Expand); in X86TargetLowering()
647 setOperationAction(ISD::FEXP, VT, Expand); in X86TargetLowering()
648 setOperationAction(ISD::FEXP2, VT, Expand); in X86TargetLowering()
654 for (MVT VT : MVT::vector_valuetypes()) { in X86TargetLowering() local
655 setOperationAction(ISD::SDIV, VT, Expand); in X86TargetLowering()
656 setOperationAction(ISD::UDIV, VT, Expand); in X86TargetLowering()
657 setOperationAction(ISD::SREM, VT, Expand); in X86TargetLowering()
658 setOperationAction(ISD::UREM, VT, Expand); in X86TargetLowering()
659 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); in X86TargetLowering()
660 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); in X86TargetLowering()
661 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); in X86TargetLowering()
662 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); in X86TargetLowering()
663 setOperationAction(ISD::FMA, VT, Expand); in X86TargetLowering()
664 setOperationAction(ISD::FFLOOR, VT, Expand); in X86TargetLowering()
665 setOperationAction(ISD::FCEIL, VT, Expand); in X86TargetLowering()
666 setOperationAction(ISD::FTRUNC, VT, Expand); in X86TargetLowering()
667 setOperationAction(ISD::FRINT, VT, Expand); in X86TargetLowering()
668 setOperationAction(ISD::FNEARBYINT, VT, Expand); in X86TargetLowering()
669 setOperationAction(ISD::SMUL_LOHI, VT, Expand); in X86TargetLowering()
670 setOperationAction(ISD::MULHS, VT, Expand); in X86TargetLowering()
671 setOperationAction(ISD::UMUL_LOHI, VT, Expand); in X86TargetLowering()
672 setOperationAction(ISD::MULHU, VT, Expand); in X86TargetLowering()
673 setOperationAction(ISD::SDIVREM, VT, Expand); in X86TargetLowering()
674 setOperationAction(ISD::UDIVREM, VT, Expand); in X86TargetLowering()
675 setOperationAction(ISD::CTPOP, VT, Expand); in X86TargetLowering()
676 setOperationAction(ISD::CTTZ, VT, Expand); in X86TargetLowering()
677 setOperationAction(ISD::CTLZ, VT, Expand); in X86TargetLowering()
678 setOperationAction(ISD::ROTL, VT, Expand); in X86TargetLowering()
679 setOperationAction(ISD::ROTR, VT, Expand); in X86TargetLowering()
680 setOperationAction(ISD::BSWAP, VT, Expand); in X86TargetLowering()
681 setOperationAction(ISD::SETCC, VT, Expand); in X86TargetLowering()
682 setOperationAction(ISD::FP_TO_UINT, VT, Expand); in X86TargetLowering()
683 setOperationAction(ISD::FP_TO_SINT, VT, Expand); in X86TargetLowering()
684 setOperationAction(ISD::UINT_TO_FP, VT, Expand); in X86TargetLowering()
685 setOperationAction(ISD::SINT_TO_FP, VT, Expand); in X86TargetLowering()
686 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); in X86TargetLowering()
687 setOperationAction(ISD::TRUNCATE, VT, Expand); in X86TargetLowering()
688 setOperationAction(ISD::SIGN_EXTEND, VT, Expand); in X86TargetLowering()
689 setOperationAction(ISD::ZERO_EXTEND, VT, Expand); in X86TargetLowering()
690 setOperationAction(ISD::ANY_EXTEND, VT, Expand); in X86TargetLowering()
691 setOperationAction(ISD::SELECT_CC, VT, Expand); in X86TargetLowering()
693 setTruncStoreAction(InnerVT, VT, Expand); in X86TargetLowering()
695 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand); in X86TargetLowering()
696 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand); in X86TargetLowering()
702 if (VT.getVectorElementType() == MVT::i1) in X86TargetLowering()
703 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); in X86TargetLowering()
707 if (VT.getVectorElementType() == MVT::f16) in X86TargetLowering()
708 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); in X86TargetLowering()
782 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { in X86TargetLowering()
783 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in X86TargetLowering()
784 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); in X86TargetLowering()
785 setOperationAction(ISD::VSELECT, VT, Custom); in X86TargetLowering()
786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
793 for (MVT VT : MVT::integer_vector_valuetypes()) { in X86TargetLowering() local
794 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom); in X86TargetLowering()
795 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom); in X86TargetLowering()
796 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom); in X86TargetLowering()
797 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom); in X86TargetLowering()
798 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom); in X86TargetLowering()
799 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom); in X86TargetLowering()
800 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom); in X86TargetLowering()
801 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom); in X86TargetLowering()
802 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom); in X86TargetLowering()
805 for (auto VT : { MVT::v2f64, MVT::v2i64 }) { in X86TargetLowering()
806 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in X86TargetLowering()
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); in X86TargetLowering()
808 setOperationAction(ISD::VSELECT, VT, Custom); in X86TargetLowering()
810 if (VT == MVT::v2i64 && !Subtarget.is64Bit()) in X86TargetLowering()
813 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
814 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
818 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { in X86TargetLowering()
819 setOperationPromotedToType(ISD::AND, VT, MVT::v2i64); in X86TargetLowering()
820 setOperationPromotedToType(ISD::OR, VT, MVT::v2i64); in X86TargetLowering()
821 setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64); in X86TargetLowering()
822 setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64); in X86TargetLowering()
823 setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64); in X86TargetLowering()
845 for (MVT VT : MVT::fp_vector_valuetypes()) in X86TargetLowering() local
846 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal); in X86TargetLowering()
856 for (auto VT : { MVT::v8i16, MVT::v16i8 }) { in X86TargetLowering()
857 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
858 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
859 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
864 for (auto VT : { MVT::v4i32, MVT::v2i64 }) { in X86TargetLowering()
865 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
866 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
867 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
906 for (MVT VT : MVT::integer_vector_valuetypes()) { in X86TargetLowering() local
907 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom); in X86TargetLowering()
908 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom); in X86TargetLowering()
909 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom); in X86TargetLowering()
933 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, in X86TargetLowering()
935 setOperationAction(ISD::ROTL, VT, Custom); in X86TargetLowering()
938 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) in X86TargetLowering()
939 setOperationAction(ISD::BITREVERSE, VT, Custom); in X86TargetLowering()
941 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, in X86TargetLowering()
943 setOperationAction(ISD::BITREVERSE, VT, Custom); in X86TargetLowering()
956 for (auto VT : { MVT::v8f32, MVT::v4f64 }) { in X86TargetLowering()
957 setOperationAction(ISD::FFLOOR, VT, Legal); in X86TargetLowering()
958 setOperationAction(ISD::FCEIL, VT, Legal); in X86TargetLowering()
959 setOperationAction(ISD::FTRUNC, VT, Legal); in X86TargetLowering()
960 setOperationAction(ISD::FRINT, VT, Legal); in X86TargetLowering()
961 setOperationAction(ISD::FNEARBYINT, VT, Legal); in X86TargetLowering()
962 setOperationAction(ISD::FNEG, VT, Custom); in X86TargetLowering()
963 setOperationAction(ISD::FABS, VT, Custom); in X86TargetLowering()
979 for (MVT VT : MVT::fp_vector_valuetypes()) in X86TargetLowering() local
980 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal); in X86TargetLowering()
982 for (auto VT : { MVT::v32i8, MVT::v16i16 }) { in X86TargetLowering()
983 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
984 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
985 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
1011 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { in X86TargetLowering()
1012 setOperationAction(ISD::CTPOP, VT, Custom); in X86TargetLowering()
1013 setOperationAction(ISD::CTTZ, VT, Custom); in X86TargetLowering()
1018 for (auto VT : { MVT::v32i8, MVT::v16i16 }) in X86TargetLowering()
1019 setOperationAction(ISD::CTLZ, VT, Custom); in X86TargetLowering()
1022 for (auto VT : { MVT::v8i32, MVT::v4i64 }) in X86TargetLowering()
1023 setOperationAction(ISD::CTLZ, VT, Custom); in X86TargetLowering()
1026 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, in X86TargetLowering()
1028 setOperationAction(ISD::FMA, VT, Legal); in X86TargetLowering()
1031 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { in X86TargetLowering()
1032 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1033 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1049 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { in X86TargetLowering()
1050 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1051 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1052 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1053 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom); in X86TargetLowering()
1083 for (auto VT : { MVT::v8i32, MVT::v4i64 }) { in X86TargetLowering()
1084 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
1085 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
1086 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
1089 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, in X86TargetLowering()
1091 setOperationAction(ISD::MLOAD, VT, Legal); in X86TargetLowering()
1092 setOperationAction(ISD::MSTORE, VT, Legal); in X86TargetLowering()
1097 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, in X86TargetLowering()
1099 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); in X86TargetLowering()
1103 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, in X86TargetLowering()
1105 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in X86TargetLowering()
1106 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); in X86TargetLowering()
1107 setOperationAction(ISD::VSELECT, VT, Custom); in X86TargetLowering()
1108 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
1109 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
1110 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering()
1111 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); in X86TargetLowering()
1112 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); in X86TargetLowering()
1119 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { in X86TargetLowering()
1120 setOperationPromotedToType(ISD::AND, VT, MVT::v4i64); in X86TargetLowering()
1121 setOperationPromotedToType(ISD::OR, VT, MVT::v4i64); in X86TargetLowering()
1122 setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64); in X86TargetLowering()
1123 setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64); in X86TargetLowering()
1124 setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64); in X86TargetLowering()
1138 for (MVT VT : MVT::fp_vector_valuetypes()) in X86TargetLowering() local
1139 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal); in X86TargetLowering()
1160 for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16, in X86TargetLowering()
1163 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in X86TargetLowering()
1164 setLoadExtAction(ISD::SEXTLOAD, VT, MaskVT, Custom); in X86TargetLowering()
1165 setLoadExtAction(ISD::ZEXTLOAD, VT, MaskVT, Custom); in X86TargetLowering()
1166 setLoadExtAction(ISD::EXTLOAD, VT, MaskVT, Custom); in X86TargetLowering()
1167 setTruncStoreAction(VT, MaskVT, Custom); in X86TargetLowering()
1170 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) { in X86TargetLowering()
1171 setOperationAction(ISD::FNEG, VT, Custom); in X86TargetLowering()
1172 setOperationAction(ISD::FABS, VT, Custom); in X86TargetLowering()
1173 setOperationAction(ISD::FMA, VT, Legal); in X86TargetLowering()
1280 for (auto VT : { MVT::v16f32, MVT::v8f64 }) { in X86TargetLowering()
1281 setOperationAction(ISD::FFLOOR, VT, Legal); in X86TargetLowering()
1282 setOperationAction(ISD::FCEIL, VT, Legal); in X86TargetLowering()
1283 setOperationAction(ISD::FTRUNC, VT, Legal); in X86TargetLowering()
1284 setOperationAction(ISD::FRINT, VT, Legal); in X86TargetLowering()
1285 setOperationAction(ISD::FNEARBYINT, VT, Legal); in X86TargetLowering()
1330 for (auto VT : { MVT::v16i32, MVT::v8i64 }) { in X86TargetLowering()
1331 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
1332 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
1333 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
1334 setOperationAction(ISD::AND, VT, Legal); in X86TargetLowering()
1335 setOperationAction(ISD::OR, VT, Legal); in X86TargetLowering()
1336 setOperationAction(ISD::XOR, VT, Legal); in X86TargetLowering()
1337 setOperationAction(ISD::CTPOP, VT, Custom); in X86TargetLowering()
1338 setOperationAction(ISD::CTTZ, VT, Custom); in X86TargetLowering()
1379 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, in X86TargetLowering()
1381 setOperationAction(ISD::MGATHER, VT, Custom); in X86TargetLowering()
1382 setOperationAction(ISD::MSCATTER, VT, Custom); in X86TargetLowering()
1387 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, in X86TargetLowering()
1389 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); in X86TargetLowering()
1390 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, in X86TargetLowering()
1392 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); in X86TargetLowering()
1394 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) { in X86TargetLowering()
1395 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); in X86TargetLowering()
1396 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
1397 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in X86TargetLowering()
1398 setOperationAction(ISD::VSELECT, VT, Legal); in X86TargetLowering()
1399 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); in X86TargetLowering()
1400 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering()
1401 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); in X86TargetLowering()
1402 setOperationAction(ISD::MLOAD, VT, Legal); in X86TargetLowering()
1403 setOperationAction(ISD::MSTORE, VT, Legal); in X86TargetLowering()
1404 setOperationAction(ISD::MGATHER, VT, Legal); in X86TargetLowering()
1405 setOperationAction(ISD::MSCATTER, VT, Custom); in X86TargetLowering()
1407 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) { in X86TargetLowering()
1408 setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64); in X86TargetLowering()
1487 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { in X86TargetLowering()
1488 setOperationAction(ISD::MLOAD, VT, Action); in X86TargetLowering()
1489 setOperationAction(ISD::MSTORE, VT, Action); in X86TargetLowering()
1497 for (auto VT : { MVT::v64i8, MVT::v32i16 }) { in X86TargetLowering()
1498 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in X86TargetLowering()
1499 setOperationAction(ISD::VSELECT, VT, Legal); in X86TargetLowering()
1500 setOperationAction(ISD::SRL, VT, Custom); in X86TargetLowering()
1501 setOperationAction(ISD::SHL, VT, Custom); in X86TargetLowering()
1502 setOperationAction(ISD::SRA, VT, Custom); in X86TargetLowering()
1503 setOperationAction(ISD::MLOAD, VT, Legal); in X86TargetLowering()
1504 setOperationAction(ISD::MSTORE, VT, Legal); in X86TargetLowering()
1505 setOperationAction(ISD::CTPOP, VT, Custom); in X86TargetLowering()
1506 setOperationAction(ISD::CTTZ, VT, Custom); in X86TargetLowering()
1508 setOperationPromotedToType(ISD::AND, VT, MVT::v8i64); in X86TargetLowering()
1509 setOperationPromotedToType(ISD::OR, VT, MVT::v8i64); in X86TargetLowering()
1510 setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64); in X86TargetLowering()
1551 for (auto VT : { MVT::v4i32, MVT::v8i32 }) { in X86TargetLowering()
1552 setOperationAction(ISD::AND, VT, Legal); in X86TargetLowering()
1553 setOperationAction(ISD::OR, VT, Legal); in X86TargetLowering()
1554 setOperationAction(ISD::XOR, VT, Legal); in X86TargetLowering()
1557 for (auto VT : { MVT::v2i64, MVT::v4i64 }) { in X86TargetLowering()
1558 setOperationAction(ISD::SMAX, VT, Legal); in X86TargetLowering()
1559 setOperationAction(ISD::UMAX, VT, Legal); in X86TargetLowering()
1560 setOperationAction(ISD::SMIN, VT, Legal); in X86TargetLowering()
1561 setOperationAction(ISD::UMIN, VT, Legal); in X86TargetLowering()
1580 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { in X86TargetLowering()
1581 if (VT == MVT::i64 && !Subtarget.is64Bit()) in X86TargetLowering()
1584 setOperationAction(ISD::SADDO, VT, Custom); in X86TargetLowering()
1585 setOperationAction(ISD::UADDO, VT, Custom); in X86TargetLowering()
1586 setOperationAction(ISD::SSUBO, VT, Custom); in X86TargetLowering()
1587 setOperationAction(ISD::USUBO, VT, Custom); in X86TargetLowering()
1588 setOperationAction(ISD::SMULO, VT, Custom); in X86TargetLowering()
1589 setOperationAction(ISD::UMULO, VT, Custom); in X86TargetLowering()
1692 X86TargetLowering::getPreferredVectorAction(EVT VT) const { in getPreferredVectorAction()
1694 VT.getVectorNumElements() != 1 && in getPreferredVectorAction()
1695 VT.getVectorElementType().getSimpleVT() != MVT::i1) in getPreferredVectorAction()
1698 return TargetLoweringBase::getPreferredVectorAction(VT); in getPreferredVectorAction()
1703 EVT VT) const { in getSetCCResultType()
1704 if (!VT.isVector()) in getSetCCResultType()
1707 if (VT.isSimple()) { in getSetCCResultType()
1708 MVT VVT = VT.getSimpleVT(); in getSetCCResultType()
1730 if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) { in getSetCCResultType()
1731 EVT LegalVT = getTypeToTransformTo(Context, VT); in getSetCCResultType()
1743 return VT.changeVectorElementTypeToInteger(); in getSetCCResultType()
1847 bool X86TargetLowering::isSafeMemOpType(MVT VT) const { in isSafeMemOpType()
1848 if (VT == MVT::f32) in isSafeMemOpType()
1850 else if (VT == MVT::f64) in isSafeMemOpType()
1856 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, in allowsMisalignedMemoryAccesses() argument
1861 switch (VT.getSizeInBits()) { in allowsMisalignedMemoryAccesses()
1933 MVT VT) const { in findRepresentativeClass()
1936 switch (VT.SimpleTy) { in findRepresentativeClass()
1938 return TargetLowering::findRepresentativeClass(TRI, VT); in findRepresentativeClass()
2259 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT, in getTypeForExtReturn() argument
2264 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) { in getTypeForExtReturn()
2274 return VT.bitsLT(MinVT) ? MinVT : VT; in getTypeForExtReturn()
2581 (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) || in LowerFormalArguments()
2582 (!Is64Bit && Ins[1].VT == MVT::i32))); in LowerFormalArguments()
2841 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT); in LowerFormalArguments()
2842 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT)); in LowerFormalArguments()
2918 EVT VT = getPointerTy(DAG.getDataLayout()); in EmitTailCallLoadRetAddr() local
2922 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), in EmitTailCallLoadRetAddr()
2949 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1, in getMOVL() argument
2951 unsigned NumElems = VT.getVectorNumElements(); in getMOVL()
2956 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); in getMOVL()
3220 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT); in LowerCall()
3837 static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT, in getTargetShuffleNode() argument
3847 return DAG.getNode(Opc, dl, VT, V1, in getTargetShuffleNode()
3852 static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT, in getTargetShuffleNode() argument
3865 return DAG.getNode(Opc, dl, VT, V1, V2); in getTargetShuffleNode()
4103 MVT VT = MVT::getVT(I.getArgOperand(1)->getType()); in getTgtMemIntrinsic() local
4112 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements()); in getTgtMemIntrinsic()
4127 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { in isFPImmLegal()
4182 EVT VT = Y.getValueType(); in hasAndNotCompare() local
4183 if (VT != MVT::i32 && VT != MVT::i64) in hasAndNotCompare()
4257 MVT VT = N->getSimpleValueType(0); in isVEXTRACTIndex() local
4258 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); in isVEXTRACTIndex()
4275 MVT VT = N->getSimpleValueType(0); in isVINSERTIndex() local
4276 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); in isVINSERTIndex()
4360 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG, in getConstVector() argument
4366 MVT ConstVecVT = VT; in getConstVector()
4367 unsigned NumElts = VT.getVectorNumElements(); in getConstVector()
4369 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) { in getConstVector()
4386 ConstsNode = DAG.getBitcast(VT, ConstsNode); in getConstVector()
4391 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget, in getZeroVector() argument
4393 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || in getZeroVector()
4394 VT.getVectorElementType() == MVT::i1) && in getZeroVector()
4401 if (!Subtarget.hasSSE2() && VT.is128BitVector()) { in getZeroVector()
4403 } else if (VT.getVectorElementType() == MVT::i1) { in getZeroVector()
4404 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && in getZeroVector()
4406 assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8) && in getZeroVector()
4408 Vec = DAG.getConstant(0, dl, VT); in getZeroVector()
4410 unsigned Num32BitElts = VT.getSizeInBits() / 32; in getZeroVector()
4413 return DAG.getBitcast(VT, Vec); in getZeroVector()
4420 EVT VT = Vec.getValueType(); in extractSubVector() local
4421 EVT ElVT = VT.getVectorElementType(); in extractSubVector()
4422 unsigned Factor = VT.getSizeInBits()/vectorWidth; in extractSubVector()
4424 VT.getVectorNumElements()/Factor); in extractSubVector()
4475 EVT VT = Vec.getValueType(); in insertSubVector() local
4476 EVT ElVT = VT.getVectorElementType(); in insertSubVector()
4661 static SDValue concat128BitVectors(SDValue V1, SDValue V2, EVT VT, in concat128BitVectors() argument
4664 SDValue V = insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); in concat128BitVectors()
4668 static SDValue concat256BitVectors(SDValue V1, SDValue V2, EVT VT, in concat256BitVectors() argument
4671 SDValue V = insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); in concat256BitVectors()
4679 static SDValue getOnesVector(EVT VT, const X86Subtarget &Subtarget, in getOnesVector() argument
4681 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && in getOnesVector()
4685 unsigned NumElts = VT.getSizeInBits() / 32; in getOnesVector()
4693 return DAG.getBitcast(VT, Vec); in getOnesVector()
4697 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT, in getUnpackl() argument
4699 assert(VT.is128BitVector() && "Expected a 128-bit vector type"); in getUnpackl()
4700 unsigned NumElems = VT.getVectorNumElements(); in getUnpackl()
4706 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); in getUnpackl()
4710 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT, in getUnpackh() argument
4712 assert(VT.is128BitVector() && "Expected a 128-bit vector type"); in getUnpackh()
4713 unsigned NumElems = VT.getVectorNumElements(); in getUnpackh()
4719 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); in getUnpackh()
4730 MVT VT = V2.getSimpleValueType(); in getShuffleVectorZeroOrUndef() local
4732 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); in getShuffleVectorZeroOrUndef()
4733 int NumElems = VT.getVectorNumElements(); in getShuffleVectorZeroOrUndef()
4738 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec); in getShuffleVectorZeroOrUndef()
4752 MVT VT = MaskNode.getSimpleValueType(); in getTargetShuffleMaskIndices() local
4753 assert(VT.isVector() && "Can't produce a non-vector with a build_vector!"); in getTargetShuffleMaskIndices()
4760 int Split = VT.getScalarSizeInBits() / MaskEltSizeInBits; in getTargetShuffleMaskIndices()
4771 if (VT.getScalarSizeInBits() != MaskEltSizeInBits) in getTargetShuffleMaskIndices()
4775 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { in getTargetShuffleMaskIndices()
4787 if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0) in getTargetShuffleMaskIndices()
4789 unsigned ElementSplit = VT.getScalarSizeInBits() / MaskEltSizeInBits; in getTargetShuffleMaskIndices()
4794 RawMask.append((VT.getVectorNumElements() - 1) * ElementSplit, 0); in getTargetShuffleMaskIndices()
4806 RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0); in getTargetShuffleMaskIndices()
4811 if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0) in getTargetShuffleMaskIndices()
4852 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero, in getTargetShuffleMask() argument
4855 unsigned NumElems = VT.getVectorNumElements(); in getTargetShuffleMask()
4866 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4870 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4879 DecodeUNPCKHMask(VT, Mask); in getTargetShuffleMask()
4883 DecodeUNPCKLMask(VT, Mask); in getTargetShuffleMask()
4895 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected"); in getTargetShuffleMask()
4897 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4900 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected"); in getTargetShuffleMask()
4902 DecodePSLLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4906 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected"); in getTargetShuffleMask()
4908 DecodePSRLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4914 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4919 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4924 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4928 DecodeZeroMoveLowMask(VT, Mask); in getTargetShuffleMask()
4934 unsigned MaskEltSize = VT.getScalarSizeInBits(); in getTargetShuffleMask()
4937 DecodeVPERMILPMask(VT, RawMask, Mask); in getTargetShuffleMask()
4962 DecodeVPERMMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4967 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask); in getTargetShuffleMask()
4971 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); in getTargetShuffleMask()
4975 DecodeMOVSLDUPMask(VT, Mask); in getTargetShuffleMask()
4979 DecodeMOVSHDUPMask(VT, Mask); in getTargetShuffleMask()
4983 DecodeMOVDDUPMask(VT, Mask); in getTargetShuffleMask()
4993 unsigned MaskEltSize = VT.getScalarSizeInBits(); in getTargetShuffleMask()
5000 DecodeVPERMIL2PMask(VT, CtrlImm, RawMask, Mask); in getTargetShuffleMask()
5030 unsigned MaskEltSize = VT.getScalarSizeInBits(); in getTargetShuffleMask()
5036 DecodeVPERMVMask(C, VT, Mask); in getTargetShuffleMask()
5048 DecodeVPERMV3Mask(C, VT, Mask); in getTargetShuffleMask()
5201 EVT VT = V.getValueType(); in getShuffleScalarElt() local
5209 return DAG.getUNDEF(VT.getVectorElementType()); in getShuffleScalarElt()
5211 unsigned NumElems = VT.getVectorNumElements(); in getShuffleScalarElt()
5246 unsigned NumElems = VT.getVectorNumElements(); in getShuffleScalarElt()
5254 : DAG.getUNDEF(VT.getVectorElementType()); in getShuffleScalarElt()
5388 MVT VT = Elt.getOperand(0).getSimpleValueType(); in LowerBuildVectorv4x32() local
5389 if (!VT.is128BitVector()) in LowerBuildVectorv4x32()
5399 MVT VT = V1.getSimpleValueType(); in LowerBuildVectorv4x32() local
5422 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op)); in LowerBuildVectorv4x32()
5423 if (V1.getSimpleValueType() != VT) in LowerBuildVectorv4x32()
5424 V1 = DAG.getBitcast(VT, V1); in LowerBuildVectorv4x32()
5425 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask); in LowerBuildVectorv4x32()
5466 return DAG.getBitcast(VT, Result); in LowerBuildVectorv4x32()
5470 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits, in getVShift() argument
5473 assert(VT.is128BitVector() && "Unknown type for VShift"); in getVShift()
5477 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT); in getVShift()
5480 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal)); in getVShift()
5483 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl, in LowerAsSplatVectorLoad() argument
5513 unsigned RequiredAlign = VT.getSizeInBits()/8; in LowerAsSplatVectorLoad()
5542 unsigned NumElems = VT.getVectorNumElements(); in LowerAsSplatVectorLoad()
5562 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts, in EltsFromConsecutiveLoads() argument
5588 if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits()) in EltsFromConsecutiveLoads()
5598 return DAG.getUNDEF(VT); in EltsFromConsecutiveLoads()
5602 return VT.isInteger() ? DAG.getConstant(0, DL, VT) in EltsFromConsecutiveLoads()
5603 : DAG.getConstantFP(0.0, DL, VT); in EltsFromConsecutiveLoads()
5632 auto CreateLoad = [&DAG, &DL](EVT VT, LoadSDNode *LDBase) { in EltsFromConsecutiveLoads() argument
5634 VT, DL, LDBase->getChain(), LDBase->getBasePtr(), in EltsFromConsecutiveLoads()
5660 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems) in EltsFromConsecutiveLoads()
5663 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT)) in EltsFromConsecutiveLoads()
5667 return CreateLoad(VT, LDBase); in EltsFromConsecutiveLoads()
5671 if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) { in EltsFromConsecutiveLoads()
5679 SDValue V = CreateLoad(VT, LDBase); in EltsFromConsecutiveLoads()
5680 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT) in EltsFromConsecutiveLoads()
5681 : DAG.getConstantFP(0.0, DL, VT); in EltsFromConsecutiveLoads()
5682 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask); in EltsFromConsecutiveLoads()
5691 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) { in EltsFromConsecutiveLoads()
5692 MVT VecSVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64; in EltsFromConsecutiveLoads()
5693 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 64); in EltsFromConsecutiveLoads()
5716 return DAG.getBitcast(VT, ResNode); in EltsFromConsecutiveLoads()
5722 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) { in EltsFromConsecutiveLoads()
5723 MVT VecSVT = VT.isFloatingPoint() ? MVT::f32 : MVT::i32; in EltsFromConsecutiveLoads()
5724 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 32); in EltsFromConsecutiveLoads()
5730 return DAG.getBitcast(VT, V); in EltsFromConsecutiveLoads()
5752 MVT VT = Op.getSimpleValueType(); in LowerVectorBroadcast() local
5755 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && in LowerVectorBroadcast()
5773 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1) in LowerVectorBroadcast()
5803 if (VT.getSizeInBits() >= 256) in LowerVectorBroadcast()
5805 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); in LowerVectorBroadcast()
5817 bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() && in LowerVectorBroadcast()
5827 bool IsGE256 = (VT.getSizeInBits() >= 256); in LowerVectorBroadcast()
5869 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); in LowerVectorBroadcast()
5878 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); in LowerVectorBroadcast()
5886 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); in LowerVectorBroadcast()
5892 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); in LowerVectorBroadcast()
5935 MVT VT = Op.getSimpleValueType(); in buildFromShuffleMostly() local
5939 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) in buildFromShuffleMostly()
5973 if (ExtractedFromVec.getValueType() != VT) in buildFromShuffleMostly()
5995 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); in buildFromShuffleMostly()
5996 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask); in buildFromShuffleMostly()
5999 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), in buildFromShuffleMostly()
6017 MVT VT = in ConvertI1VectorToInteger() local
6019 return DAG.getConstant(Immediate, dl, VT); in ConvertI1VectorToInteger()
6025 MVT VT = Op.getSimpleValueType(); in LowerBUILD_VECTORvXi1() local
6026 assert((VT.getVectorElementType() == MVT::i1) && in LowerBUILD_VECTORvXi1()
6031 return DAG.getTargetConstant(0, dl, VT); in LowerBUILD_VECTORvXi1()
6034 return DAG.getTargetConstant(1, dl, VT); in LowerBUILD_VECTORvXi1()
6038 if (Imm.getValueSizeInBits() == VT.getSizeInBits()) in LowerBUILD_VECTORvXi1()
6039 return DAG.getBitcast(VT, Imm); in LowerBUILD_VECTORvXi1()
6041 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, in LowerBUILD_VECTORvXi1()
6069 return DAG.getNode(ISD::SELECT, dl, VT, Op.getOperand(SplatIdx), in LowerBUILD_VECTORvXi1()
6070 DAG.getConstant(1, dl, VT), in LowerBUILD_VECTORvXi1()
6071 DAG.getConstant(0, dl, VT)); in LowerBUILD_VECTORvXi1()
6077 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8)); in LowerBUILD_VECTORvXi1()
6081 Imm = DAG.getConstant(0, dl, VT); in LowerBUILD_VECTORvXi1()
6083 Imm = DAG.getUNDEF(VT); in LowerBUILD_VECTORvXi1()
6084 if (Imm.getValueSizeInBits() == VT.getSizeInBits()) in LowerBUILD_VECTORvXi1()
6085 DstVec = DAG.getBitcast(VT, Imm); in LowerBUILD_VECTORvXi1()
6088 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, in LowerBUILD_VECTORvXi1()
6094 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, in LowerBUILD_VECTORvXi1()
6119 EVT VT = N->getValueType(0); in isHorizontalBinOp() local
6122 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx && in isHorizontalBinOp()
6129 V0 = DAG.getUNDEF(VT); in isHorizontalBinOp()
6130 V1 = DAG.getUNDEF(VT); in isHorizontalBinOp()
6169 if (V0.getValueType() != VT) in isHorizontalBinOp()
6175 if (V1.getValueType() != VT) in isHorizontalBinOp()
6233 MVT VT = V0.getSimpleValueType(); in ExpandHorizontalBinOp() local
6234 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() && in ExpandHorizontalBinOp()
6237 unsigned NumElts = VT.getVectorNumElements(); in ExpandHorizontalBinOp()
6262 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI); in ExpandHorizontalBinOp()
6269 MVT VT = BV->getSimpleValueType(0); in LowerToAddSub() local
6270 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) && in LowerToAddSub()
6271 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64))) in LowerToAddSub()
6275 unsigned NumElts = VT.getVectorNumElements(); in LowerToAddSub()
6276 SDValue InVec0 = DAG.getUNDEF(VT); in LowerToAddSub()
6277 SDValue InVec1 = DAG.getUNDEF(VT); in LowerToAddSub()
6279 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 || in LowerToAddSub()
6280 VT == MVT::v2f64) && "build_vector with an invalid type found!"); in LowerToAddSub()
6331 if (InVec0.getSimpleValueType() != VT) in LowerToAddSub()
6336 if (InVec1.getSimpleValueType() != VT) in LowerToAddSub()
6362 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1); in LowerToAddSub()
6371 MVT VT = BV->getSimpleValueType(0); in LowerToHorizontalOp() local
6372 unsigned NumElts = VT.getVectorNumElements(); in LowerToHorizontalOp()
6393 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) { in LowerToHorizontalOp()
6396 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6399 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6400 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) { in LowerToHorizontalOp()
6403 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6406 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6412 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) { in LowerToHorizontalOp()
6420 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6426 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6427 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) { in LowerToHorizontalOp()
6450 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1); in LowerToHorizontalOp()
6466 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 || in LowerToHorizontalOp()
6467 VT == MVT::v16i16) && Subtarget.hasAVX()) { in LowerToHorizontalOp()
6504 MVT VT = Op.getSimpleValueType(); in lowerBuildVectorToBitOp() local
6505 unsigned NumElems = VT.getVectorNumElements(); in lowerBuildVectorToBitOp()
6522 if (!TLI.isOperationLegalOrPromote(Opcode, VT)) in lowerBuildVectorToBitOp()
6539 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts); in lowerBuildVectorToBitOp()
6540 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts); in lowerBuildVectorToBitOp()
6541 return DAG.getNode(Opcode, DL, VT, LHS, RHS); in lowerBuildVectorToBitOp()
6550 MVT VT = Op.getSimpleValueType(); in materializeVectorConstant() local
6556 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) in materializeVectorConstant()
6559 return getZeroVector(VT, Subtarget, DAG, DL); in materializeVectorConstant()
6566 if (VT == MVT::v4i32 || VT == MVT::v16i32 || in materializeVectorConstant()
6567 (VT == MVT::v8i32 && Subtarget.hasInt256())) in materializeVectorConstant()
6570 return getOnesVector(VT, Subtarget, DAG, DL); in materializeVectorConstant()
6580 MVT VT = Op.getSimpleValueType(); in LowerBUILD_VECTOR() local
6581 MVT ExtVT = VT.getVectorElementType(); in LowerBUILD_VECTOR()
6585 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) in LowerBUILD_VECTOR()
6627 return DAG.getUNDEF(VT); in LowerBUILD_VECTOR()
6643 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); in LowerBUILD_VECTOR()
6650 return DAG.getBitcast(VT, getShuffleVectorZeroOrUndef( in LowerBUILD_VECTOR()
6661 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); in LowerBUILD_VECTOR()
6665 if (VT.is512BitVector()) { in LowerBUILD_VECTOR()
6666 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); in LowerBUILD_VECTOR()
6667 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, in LowerBUILD_VECTOR()
6670 assert((VT.is128BitVector() || VT.is256BitVector()) && in LowerBUILD_VECTOR()
6672 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); in LowerBUILD_VECTOR()
6681 if (VT.getSizeInBits() >= 256) { in LowerBUILD_VECTOR()
6682 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32); in LowerBUILD_VECTOR()
6694 assert(VT.is128BitVector() && "Expected an SSE value type!"); in LowerBUILD_VECTOR()
6698 return DAG.getBitcast(VT, Item); in LowerBUILD_VECTOR()
6706 unsigned NumBits = VT.getSizeInBits(); in LowerBUILD_VECTOR()
6707 return getVShift(true, VT, in LowerBUILD_VECTOR()
6709 VT, Op.getOperand(1)), in LowerBUILD_VECTOR()
6722 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); in LowerBUILD_VECTOR()
6737 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); in LowerBUILD_VECTOR()
6748 if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) { in LowerBUILD_VECTOR()
6750 if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false)) in LowerBUILD_VECTOR()
6756 if (VT.is256BitVector() || VT.is512BitVector()) { in LowerBUILD_VECTOR()
6768 if (VT.is256BitVector()) in LowerBUILD_VECTOR()
6769 return concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); in LowerBUILD_VECTOR()
6770 return concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl); in LowerBUILD_VECTOR()
6778 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, in LowerBUILD_VECTOR()
6807 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl); in LowerBUILD_VECTOR()
6809 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); in LowerBUILD_VECTOR()
6819 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]); in LowerBUILD_VECTOR()
6822 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]); in LowerBUILD_VECTOR()
6825 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]); in LowerBUILD_VECTOR()
6838 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec); in LowerBUILD_VECTOR()
6841 if (Values.size() > 1 && VT.is128BitVector()) { in LowerBUILD_VECTOR()
6850 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); in LowerBUILD_VECTOR()
6852 Result = DAG.getUNDEF(VT); in LowerBUILD_VECTOR()
6856 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, in LowerBUILD_VECTOR()
6868 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); in LowerBUILD_VECTOR()
6870 Ops[i] = DAG.getUNDEF(VT); in LowerBUILD_VECTOR()
6889 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i], Ops[i + EltStride]); in LowerBUILD_VECTOR()
7006 MVT VT = Op.getSimpleValueType(); in LowerCONCAT_VECTORS() local
7007 if (VT.getVectorElementType() == MVT::i1) in LowerCONCAT_VECTORS()
7010 assert((VT.is256BitVector() && Op.getNumOperands() == 2) || in LowerCONCAT_VECTORS()
7011 (VT.is512BitVector() && (Op.getNumOperands() == 2 || in LowerCONCAT_VECTORS()
7053 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) { in is128BitLaneCrossingShuffleMask() argument
7054 int LaneSize = 128 / VT.getScalarSizeInBits(); in is128BitLaneCrossingShuffleMask()
7073 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, in isRepeatedShuffleMask() argument
7076 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits(); in isRepeatedShuffleMask()
7102 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask, in is128BitLaneRepeatedShuffleMask() argument
7104 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask); in is128BitLaneRepeatedShuffleMask()
7109 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask, in is256BitLaneRepeatedShuffleMask() argument
7111 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask); in is256BitLaneRepeatedShuffleMask()
7303 static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithPSHUFB() argument
7309 int LaneSize = 128 / VT.getScalarSizeInBits(); in lowerVectorShuffleWithPSHUFB()
7310 const int NumBytes = VT.getSizeInBits() / 8; in lowerVectorShuffleWithPSHUFB()
7311 const int NumEltBytes = VT.getScalarSizeInBits() / 8; in lowerVectorShuffleWithPSHUFB()
7313 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) || in lowerVectorShuffleWithPSHUFB()
7314 (Subtarget.hasAVX2() && VT.is256BitVector()) || in lowerVectorShuffleWithPSHUFB()
7315 (Subtarget.hasBWI() && VT.is512BitVector())); in lowerVectorShuffleWithPSHUFB()
7348 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V1), in lowerVectorShuffleWithPSHUFB()
7354 static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithUNPCK() argument
7357 int NumElts = VT.getVectorNumElements(); in lowerVectorShuffleWithUNPCK()
7358 int NumEltsInLane = 128 / VT.getScalarSizeInBits(); in lowerVectorShuffleWithUNPCK()
7371 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2); in lowerVectorShuffleWithUNPCK()
7373 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2); in lowerVectorShuffleWithUNPCK()
7378 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1); in lowerVectorShuffleWithUNPCK()
7382 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1); in lowerVectorShuffleWithUNPCK()
7391 static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1, in lowerVectorShuffleAsBitMask() argument
7394 MVT EltVT = VT.getVectorElementType(); in lowerVectorShuffleAsBitMask()
7422 SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps); in lowerVectorShuffleAsBitMask()
7423 V = DAG.getNode(VT.isFloatingPoint() in lowerVectorShuffleAsBitMask()
7425 DL, VT, V, VMask); in lowerVectorShuffleAsBitMask()
7434 static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1, in lowerVectorShuffleAsBitBlend() argument
7437 assert(VT.isInteger() && "Only supports integer vector types!"); in lowerVectorShuffleAsBitBlend()
7438 MVT EltVT = VT.getVectorElementType(); in lowerVectorShuffleAsBitBlend()
7450 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps); in lowerVectorShuffleAsBitBlend()
7451 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask); in lowerVectorShuffleAsBitBlend()
7453 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64); in lowerVectorShuffleAsBitBlend()
7454 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::ANDNP, DL, MaskVT, in lowerVectorShuffleAsBitBlend()
7457 return DAG.getNode(ISD::OR, DL, VT, V1, V2); in lowerVectorShuffleAsBitBlend()
7466 static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1, in lowerVectorShuffleAsBlend() argument
7508 V1 = getZeroVector(VT, Subtarget, DAG, DL); in lowerVectorShuffleAsBlend()
7510 V2 = getZeroVector(VT, Subtarget, DAG, DL); in lowerVectorShuffleAsBlend()
7521 switch (VT.SimpleTy) { in lowerVectorShuffleAsBlend()
7526 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2, in lowerVectorShuffleAsBlend()
7539 int Scale = VT.getScalarSizeInBits() / 32; in lowerVectorShuffleAsBlend()
7541 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32; in lowerVectorShuffleAsBlend()
7545 VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2, in lowerVectorShuffleAsBlend()
7552 int Scale = 8 / VT.getVectorNumElements(); in lowerVectorShuffleAsBlend()
7556 return DAG.getBitcast(VT, in lowerVectorShuffleAsBlend()
7578 assert((VT.is128BitVector() || Subtarget.hasAVX2()) && in lowerVectorShuffleAsBlend()
7582 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG)) in lowerVectorShuffleAsBlend()
7586 int Scale = VT.getScalarSizeInBits() / 8; in lowerVectorShuffleAsBlend()
7590 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8); in lowerVectorShuffleAsBlend()
7614 VT, DAG.getNode(ISD::VSELECT, DL, BlendVT, in lowerVectorShuffleAsBlend()
7628 static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsBlendAndPermute() argument
7651 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask); in lowerVectorShuffleAsBlendAndPermute()
7652 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask); in lowerVectorShuffleAsBlendAndPermute()
7663 MVT VT, SDValue V1, in lowerVectorShuffleAsDecomposedShuffleBlend() argument
7688 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG)) in lowerVectorShuffleAsDecomposedShuffleBlend()
7691 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask); in lowerVectorShuffleAsDecomposedShuffleBlend()
7692 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask); in lowerVectorShuffleAsDecomposedShuffleBlend()
7693 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask); in lowerVectorShuffleAsDecomposedShuffleBlend()
7712 static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsByteRotate() argument
7720 int NumLanes = VT.getSizeInBits() / 128; in lowerVectorShuffleAsByteRotate()
7799 assert((!VT.is512BitVector() || Subtarget.hasBWI()) && in lowerVectorShuffleAsByteRotate()
7802 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi, in lowerVectorShuffleAsByteRotate()
7806 assert(VT.is128BitVector() && in lowerVectorShuffleAsByteRotate()
7821 return DAG.getBitcast(VT, in lowerVectorShuffleAsByteRotate()
7848 static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1, in lowerVectorShuffleAsShift() argument
7855 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size"); in lowerVectorShuffleAsShift()
7876 int ShiftEltBits = VT.getScalarSizeInBits() * Scale; in lowerVectorShuffleAsShift()
7880 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1); in lowerVectorShuffleAsShift()
7887 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale); in lowerVectorShuffleAsShift()
7888 MVT ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8) in lowerVectorShuffleAsShift()
7896 return DAG.getBitcast(VT, V); in lowerVectorShuffleAsShift()
7905 unsigned MaxWidth = (VT.is512BitVector() && !Subtarget.hasBWI() ? 64 : 128); in lowerVectorShuffleAsShift()
7906 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= MaxWidth; Scale *= 2) in lowerVectorShuffleAsShift()
7919 static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1, in lowerVectorShuffleWithSSE4A() argument
7927 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size"); in lowerVectorShuffleWithSSE4A()
7971 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f; in lowerVectorShuffleWithSSE4A()
7972 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f; in lowerVectorShuffleWithSSE4A()
7973 return DAG.getNode(X86ISD::EXTRQI, DL, VT, Src, in lowerVectorShuffleWithSSE4A()
8030 Base = DAG.getUNDEF(VT); in lowerVectorShuffleWithSSE4A()
8032 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f; in lowerVectorShuffleWithSSE4A()
8033 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f; in lowerVectorShuffleWithSSE4A()
8034 return DAG.getNode(X86ISD::INSERTQI, DL, VT, Base, Insert, in lowerVectorShuffleWithSSE4A()
8059 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV, in lowerVectorShuffleAsSpecificZeroOrAnyExtend() argument
8062 int EltBits = VT.getScalarSizeInBits(); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8063 int NumElements = VT.getVectorNumElements(); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8088 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8096 if (Offset && Scale == 2 && VT.is128BitVector()) in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8103 if (VT.is256BitVector()) in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8107 return DAG.getBitcast(VT, InputV); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8110 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended."); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8118 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8131 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8140 assert(VT.is128BitVector() && "Unexpected vector width!"); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8144 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8150 return DAG.getBitcast(VT, Lo); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8154 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8157 return DAG.getBitcast(VT, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8174 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV, in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8185 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8206 return DAG.getBitcast(VT, InputV); in lowerVectorShuffleAsSpecificZeroOrAnyExtend()
8222 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, in lowerVectorShuffleAsZeroOrAnyExtend() argument
8226 int Bits = VT.getSizeInBits(); in lowerVectorShuffleAsZeroOrAnyExtend()
8228 int NumElements = VT.getVectorNumElements(); in lowerVectorShuffleAsZeroOrAnyExtend()
8230 assert(VT.getScalarSizeInBits() <= 32 && in lowerVectorShuffleAsZeroOrAnyExtend()
8294 DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG); in lowerVectorShuffleAsZeroOrAnyExtend()
8331 return DAG.getBitcast(VT, V); in lowerVectorShuffleAsZeroOrAnyExtend()
8343 MVT VT = V.getSimpleValueType(); in getScalarValueForVectorElement() local
8344 MVT EltVT = VT.getVectorElementType(); in getScalarValueForVectorElement()
8350 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) in getScalarValueForVectorElement()
8379 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, in lowerVectorShuffleAsElementInsertion() argument
8382 MVT ExtVT = VT; in lowerVectorShuffleAsElementInsertion()
8383 MVT EltVT = VT.getVectorElementType(); in lowerVectorShuffleAsElementInsertion()
8427 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!"); in lowerVectorShuffleAsElementInsertion()
8428 if (!VT.isFloatingPoint() || V2Index != 0) in lowerVectorShuffleAsElementInsertion()
8448 if (VT.isFloatingPoint() && V2Index != 0) in lowerVectorShuffleAsElementInsertion()
8452 if (ExtVT != VT) in lowerVectorShuffleAsElementInsertion()
8453 V2 = DAG.getBitcast(VT, V2); in lowerVectorShuffleAsElementInsertion()
8460 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) { in lowerVectorShuffleAsElementInsertion()
8463 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle); in lowerVectorShuffleAsElementInsertion()
8470 DAG.getDataLayout(), VT))); in lowerVectorShuffleAsElementInsertion()
8471 V2 = DAG.getBitcast(VT, V2); in lowerVectorShuffleAsElementInsertion()
8481 static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsTruncBroadcast() argument
8488 EVT EltVT = VT.getVectorElementType(); in lowerVectorShuffleAsTruncBroadcast()
8491 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!"); in lowerVectorShuffleAsTruncBroadcast()
8526 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, in lowerVectorShuffleAsTruncBroadcast()
8536 static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsBroadcast() argument
8541 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) || in lowerVectorShuffleAsBroadcast()
8542 (Subtarget.hasAVX() && VT.isFloatingPoint()) || in lowerVectorShuffleAsBroadcast()
8543 (Subtarget.hasAVX2() && VT.isInteger()))) in lowerVectorShuffleAsBroadcast()
8549 unsigned Opcode = VT == MVT::v2f64 ? X86ISD::MOVDDUP : X86ISD::VBROADCAST; in lowerVectorShuffleAsBroadcast()
8576 if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits()) in lowerVectorShuffleAsBroadcast()
8613 if (V.getOpcode() == ISD::BITCAST && VT.isInteger()) in lowerVectorShuffleAsBroadcast()
8615 DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG)) in lowerVectorShuffleAsBroadcast()
8618 MVT BroadcastVT = VT; in lowerVectorShuffleAsBroadcast()
8633 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) { in lowerVectorShuffleAsBroadcast()
8634 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements()); in lowerVectorShuffleAsBroadcast()
8655 if (!VT.is256BitVector() && !VT.is512BitVector()) in lowerVectorShuffleAsBroadcast()
8659 if (VT == MVT::v4f64 || VT == MVT::v4i64) in lowerVectorShuffleAsBroadcast()
8663 unsigned EltSize = VT.getScalarSizeInBits(); in lowerVectorShuffleAsBroadcast()
8667 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 128 / EltSize); in lowerVectorShuffleAsBroadcast()
8690 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V)); in lowerVectorShuffleAsBroadcast()
8792 static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsPermuteAndUnpack() argument
8796 assert(!VT.isFloatingPoint() && in lowerVectorShuffleAsPermuteAndUnpack()
8798 assert(VT.is128BitVector() && in lowerVectorShuffleAsPermuteAndUnpack()
8843 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask); in lowerVectorShuffleAsPermuteAndUnpack()
8844 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask); in lowerVectorShuffleAsPermuteAndUnpack()
8852 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL, in lowerVectorShuffleAsPermuteAndUnpack()
8858 int OrigNumElements = VT.getVectorNumElements(); in lowerVectorShuffleAsPermuteAndUnpack()
8859 int OrigScalarSize = VT.getScalarSizeInBits(); in lowerVectorShuffleAsPermuteAndUnpack()
8891 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, in lowerVectorShuffleAsPermuteAndUnpack()
8892 DL, VT, V1, V2), in lowerVectorShuffleAsPermuteAndUnpack()
8893 DAG.getUNDEF(VT), PermMask); in lowerVectorShuffleAsPermuteAndUnpack()
9115 static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithSHUFPS() argument
9144 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1, in lowerVectorShuffleWithSHUFPS()
9183 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2, in lowerVectorShuffleWithSHUFPS()
9195 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV, in lowerVectorShuffleWithSHUFPS()
9398 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask, in lowerV8I16GeneralSingleInputVectorShuffle() argument
9400 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!"); in lowerV8I16GeneralSingleInputVectorShuffle()
9401 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2); in lowerV8I16GeneralSingleInputVectorShuffle()
9435 V = DAG.getNode(ShufWOp, DL, VT, V, in lowerV8I16GeneralSingleInputVectorShuffle()
9440 return DAG.getBitcast(VT, V); in lowerV8I16GeneralSingleInputVectorShuffle()
9578 VT, in lowerV8I16GeneralSingleInputVectorShuffle()
9591 return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget, in lowerV8I16GeneralSingleInputVectorShuffle()
9815 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, in lowerV8I16GeneralSingleInputVectorShuffle()
9818 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, in lowerV8I16GeneralSingleInputVectorShuffle()
9822 VT, in lowerV8I16GeneralSingleInputVectorShuffle()
9835 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, in lowerV8I16GeneralSingleInputVectorShuffle()
9843 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, in lowerV8I16GeneralSingleInputVectorShuffle()
9852 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, in lowerVectorShuffleAsBlendOfPSHUFBs() argument
9898 return DAG.getBitcast(VT, V); in lowerVectorShuffleAsBlendOfPSHUFBs()
10384 MVT VT, SDValue V1, SDValue V2, in lower128BitVectorShuffle() argument
10387 switch (VT.SimpleTy) { in lower128BitVectorShuffle()
10467 static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, in splitAndLowerVectorShuffle() argument
10470 assert(VT.getSizeInBits() >= 256 && in splitAndLowerVectorShuffle()
10472 assert(V1.getSimpleValueType() == VT && "Bad operand type!"); in splitAndLowerVectorShuffle()
10473 assert(V2.getSimpleValueType() == VT && "Bad operand type!"); in splitAndLowerVectorShuffle()
10478 int NumElements = VT.getVectorNumElements(); in splitAndLowerVectorShuffle()
10480 MVT ScalarVT = VT.getVectorElementType(); in splitAndLowerVectorShuffle()
10582 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); in splitAndLowerVectorShuffle()
10593 static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsSplitOrBlend() argument
10621 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, in lowerVectorShuffleAsSplitOrBlend()
10627 int LaneCount = VT.getSizeInBits() / 128; in lowerVectorShuffleAsSplitOrBlend()
10636 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); in lowerVectorShuffleAsSplitOrBlend()
10640 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG); in lowerVectorShuffleAsSplitOrBlend()
10652 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT, in lowerVectorShuffleAsLanePermuteAndBlend() argument
10657 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!"); in lowerVectorShuffleAsLanePermuteAndBlend()
10669 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); in lowerVectorShuffleAsLanePermuteAndBlend()
10688 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT), in lowerVectorShuffleAsLanePermuteAndBlend()
10690 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask); in lowerVectorShuffleAsLanePermuteAndBlend()
10694 static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, in lowerV2X128VectorShuffle() argument
10703 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask, in lowerV2X128VectorShuffle()
10721 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), in lowerV2X128VectorShuffle()
10722 VT.getVectorNumElements() / 2); in lowerV2X128VectorShuffle()
10728 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV); in lowerV2X128VectorShuffle()
10761 V1 = DAG.getUNDEF(VT); in lowerV2X128VectorShuffle()
10768 V2 = DAG.getUNDEF(VT); in lowerV2X128VectorShuffle()
10775 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2, in lowerV2X128VectorShuffle()
10792 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, in lowerVectorShuffleByMerging128BitLanes() argument
10797 int LaneSize = 128 / VT.getScalarSizeInBits(); in lowerVectorShuffleByMerging128BitLanes()
10830 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64, in lowerVectorShuffleByMerging128BitLanes()
10831 VT.getSizeInBits() / 64); in lowerVectorShuffleByMerging128BitLanes()
10844 LaneShuffle = DAG.getBitcast(VT, LaneShuffle); in lowerVectorShuffleByMerging128BitLanes()
10851 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) && in lowerVectorShuffleByMerging128BitLanes()
10854 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask); in lowerVectorShuffleByMerging128BitLanes()
10860 static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithUndefHalf() argument
10865 assert(VT.is256BitVector() && "Expected 256-bit vector"); in lowerVectorShuffleWithUndefHalf()
10867 unsigned NumElts = VT.getVectorNumElements(); in lowerVectorShuffleWithUndefHalf()
10869 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts); in lowerVectorShuffleWithUndefHalf()
10882 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi, in lowerVectorShuffleWithUndefHalf()
10892 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi, in lowerVectorShuffleWithUndefHalf()
10951 if (VT == MVT::v4f64 || VT == MVT::v4i64) in lowerVectorShuffleWithUndefHalf()
10954 if (VT == MVT::v8f32 || VT == MVT::v8i32) { in lowerVectorShuffleWithUndefHalf()
10973 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, in lowerVectorShuffleWithUndefHalf()
10997 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, in lowerShuffleAsRepeatedMaskAndLanePermute() argument
10999 int NumElts = VT.getVectorNumElements(); in lowerShuffleAsRepeatedMaskAndLanePermute()
11000 int NumLanes = VT.getSizeInBits() / 128; in lowerShuffleAsRepeatedMaskAndLanePermute()
11007 if (BroadcastSize <= VT.getScalarSizeInBits()) in lowerShuffleAsRepeatedMaskAndLanePermute()
11009 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits(); in lowerShuffleAsRepeatedMaskAndLanePermute()
11035 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask); in lowerShuffleAsRepeatedMaskAndLanePermute()
11042 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT), in lowerShuffleAsRepeatedMaskAndLanePermute()
11049 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask)) in lowerShuffleAsRepeatedMaskAndLanePermute()
11054 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1; in lowerShuffleAsRepeatedMaskAndLanePermute()
11105 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask); in lowerShuffleAsRepeatedMaskAndLanePermute()
11117 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT), in lowerShuffleAsRepeatedMaskAndLanePermute()
11121 static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithSHUFPD() argument
11127 assert(VT.getScalarSizeInBits() == 64 && "Unexpected data type for VSHUFPD"); in lowerVectorShuffleWithSHUFPD()
11128 int NumElts = VT.getVectorNumElements(); in lowerVectorShuffleWithSHUFPD()
11144 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2, in lowerVectorShuffleWithSHUFPD()
11147 return DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1, in lowerVectorShuffleWithSHUFPD()
11648 MVT VT, SDValue V1, SDValue V2, in lower256BitVectorShuffle() argument
11653 int NumElts = VT.getVectorNumElements(); in lower256BitVectorShuffle()
11658 DL, VT, V1, V2, Mask, Subtarget, DAG)) in lower256BitVectorShuffle()
11663 lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG)) in lower256BitVectorShuffle()
11672 if (VT.isInteger() && !Subtarget.hasAVX2()) { in lower256BitVectorShuffle()
11673 int ElementBits = VT.getScalarSizeInBits(); in lower256BitVectorShuffle()
11677 if (SDValue V = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG)) in lower256BitVectorShuffle()
11679 if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG)) in lower256BitVectorShuffle()
11681 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); in lower256BitVectorShuffle()
11685 VT.getVectorNumElements()); in lower256BitVectorShuffle()
11688 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask)); in lower256BitVectorShuffle()
11691 switch (VT.SimpleTy) { in lower256BitVectorShuffle()
11711 static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT, in lowerV4X128VectorShuffle() argument
11714 assert(VT.getScalarSizeInBits() == 64 && in lowerV4X128VectorShuffle()
11719 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle."); in lowerV4X128VectorShuffle()
11725 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)}; in lowerV4X128VectorShuffle()
11727 int MaxOp1Index = VT.getVectorNumElements()/2 - 1; in lowerV4X128VectorShuffle()
11754 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1], in lowerV4X128VectorShuffle()
11758 static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT, in lowerVectorShuffleWithPERMV() argument
11762 assert(VT.getScalarSizeInBits() >= 16 && "Unexpected data type for PERMV"); in lowerVectorShuffleWithPERMV()
11764 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); in lowerVectorShuffleWithPERMV()
11765 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements()); in lowerVectorShuffleWithPERMV()
11769 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1); in lowerVectorShuffleWithPERMV()
11771 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2); in lowerVectorShuffleWithPERMV()
12021 MVT VT, SDValue V1, SDValue V2, in lower512BitVectorShuffle() argument
12029 lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG)) in lower512BitVectorShuffle()
12036 switch (VT.SimpleTy) { in lower512BitVectorShuffle()
12060 MVT VT, SDValue V1, SDValue V2, in lower1BitVectorShuffle() argument
12066 switch (VT.SimpleTy) { in lower1BitVectorShuffle()
12104 return DAG.getNode(ISD::TRUNCATE, DL, VT, in lower1BitVectorShuffle()
12120 MVT VT = Op.getSimpleValueType(); in lowerVectorShuffle() local
12121 int NumElements = VT.getVectorNumElements(); in lowerVectorShuffle()
12123 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1); in lowerVectorShuffle()
12125 assert((VT.getSizeInBits() != 64 || Is1BitVector) && in lowerVectorShuffle()
12131 return DAG.getUNDEF(VT); in lowerVectorShuffle()
12149 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask); in lowerVectorShuffle()
12157 return getZeroVector(VT, Subtarget, DAG, DL); in lowerVectorShuffle()
12164 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector && in lowerVectorShuffle()
12166 MVT NewEltVT = VT.isFloatingPoint() in lowerVectorShuffle()
12167 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2) in lowerVectorShuffle()
12168 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2); in lowerVectorShuffle()
12169 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2); in lowerVectorShuffle()
12176 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask)); in lowerVectorShuffle()
12235 if (VT.is128BitVector()) in lowerVectorShuffle()
12236 return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG); in lowerVectorShuffle()
12238 if (VT.is256BitVector()) in lowerVectorShuffle()
12239 return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG); in lowerVectorShuffle()
12241 if (VT.is512BitVector()) in lowerVectorShuffle()
12242 return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG); in lowerVectorShuffle()
12245 return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG); in lowerVectorShuffle()
12258 MVT VT = Op.getSimpleValueType(); in lowerVSELECTtoVectorShuffle() local
12267 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) { in lowerVSELECTtoVectorShuffle()
12273 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask); in lowerVSELECTtoVectorShuffle()
12321 MVT VT = Op.getSimpleValueType(); in LowerEXTRACT_VECTOR_ELT_SSE4() local
12327 if (VT.getSizeInBits() == 8) { in LowerEXTRACT_VECTOR_ELT_SSE4()
12331 DAG.getValueType(VT)); in LowerEXTRACT_VECTOR_ELT_SSE4()
12332 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); in LowerEXTRACT_VECTOR_ELT_SSE4()
12335 if (VT.getSizeInBits() == 16) { in LowerEXTRACT_VECTOR_ELT_SSE4()
12346 DAG.getValueType(VT)); in LowerEXTRACT_VECTOR_ELT_SSE4()
12347 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); in LowerEXTRACT_VECTOR_ELT_SSE4()
12350 if (VT == MVT::f32) { in LowerEXTRACT_VECTOR_ELT_SSE4()
12370 if (VT == MVT::i32 || VT == MVT::i64) { in LowerEXTRACT_VECTOR_ELT_SSE4()
12478 MVT VT = Op.getSimpleValueType(); in LowerEXTRACT_VECTOR_ELT() local
12480 if (VT.getSizeInBits() == 16) { in LowerEXTRACT_VECTOR_ELT()
12490 DAG.getValueType(VT)); in LowerEXTRACT_VECTOR_ELT()
12491 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); in LowerEXTRACT_VECTOR_ELT()
12494 if (VT.getSizeInBits() == 32) { in LowerEXTRACT_VECTOR_ELT()
12501 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, in LowerEXTRACT_VECTOR_ELT()
12505 if (VT.getSizeInBits() == 64) { in LowerEXTRACT_VECTOR_ELT()
12517 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, in LowerEXTRACT_VECTOR_ELT()
12557 MVT VT = Op.getSimpleValueType(); in LowerINSERT_VECTOR_ELT() local
12558 MVT EltVT = VT.getVectorElementType(); in LowerINSERT_VECTOR_ELT()
12559 unsigned NumElts = VT.getVectorNumElements(); in LowerINSERT_VECTOR_ELT()
12582 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl); in LowerINSERT_VECTOR_ELT()
12583 return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask); in LowerINSERT_VECTOR_ELT()
12588 if (VT.is256BitVector() || VT.is512BitVector()) { in LowerINSERT_VECTOR_ELT()
12591 if (VT.is256BitVector() && IdxVal == 0) { in LowerINSERT_VECTOR_ELT()
12597 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); in LowerINSERT_VECTOR_ELT()
12599 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2); in LowerINSERT_VECTOR_ELT()
12618 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!"); in LowerINSERT_VECTOR_ELT()
12623 if (VT == MVT::v8i16) { in LowerINSERT_VECTOR_ELT()
12626 assert(VT == MVT::v16i8); in LowerINSERT_VECTOR_ELT()
12636 return DAG.getNode(Opc, dl, VT, N0, N1, N2); in LowerINSERT_VECTOR_ELT()
12660 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2); in LowerINSERT_VECTOR_ELT()
12665 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); in LowerINSERT_VECTOR_ELT()
12684 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); in LowerINSERT_VECTOR_ELT()
13285 MVT VT = Op.getSimpleValueType(); in LowerShiftParts() local
13286 unsigned VTBits = VT.getSizeInBits(); in LowerShiftParts()
13297 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, in LowerShiftParts()
13299 : DAG.getConstant(0, dl, VT); in LowerShiftParts()
13303 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); in LowerShiftParts()
13304 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); in LowerShiftParts()
13306 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); in LowerShiftParts()
13307 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); in LowerShiftParts()
13324 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0); in LowerShiftParts()
13325 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1); in LowerShiftParts()
13327 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0); in LowerShiftParts()
13328 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1); in LowerShiftParts()
13339 MVT VT = Op.getSimpleValueType(); in LowerSINT_TO_FP() local
13343 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) { in LowerSINT_TO_FP()
13344 return DAG.getNode(X86ISD::CVTDQ2PD, dl, VT, in LowerSINT_TO_FP()
13979 MVT VT = Op->getSimpleValueType(0); in LowerAVXExtend() local
13984 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1) in LowerAVXExtend()
13985 return DAG.getNode(ISD::ZERO_EXTEND, dl, VT, In); in LowerAVXExtend()
14000 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) && in LowerAVXExtend()
14001 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && in LowerAVXExtend()
14002 ((VT != MVT::v4i64) || (InVT != MVT::v4i32))) in LowerAVXExtend()
14006 return DAG.getNode(X86ISD::VZEXT, dl, VT, In); in LowerAVXExtend()
14014 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(), in LowerAVXExtend()
14015 VT.getVectorNumElements()/2); in LowerAVXExtend()
14020 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); in LowerAVXExtend()
14025 MVT VT = Op->getSimpleValueType(0); in LowerZERO_EXTEND_AVX512() local
14029 unsigned int NumElts = VT.getVectorNumElements(); in LowerZERO_EXTEND_AVX512()
14033 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) in LowerZERO_EXTEND_AVX512()
14034 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); in LowerZERO_EXTEND_AVX512()
14039 MVT ExtVT = VT; in LowerZERO_EXTEND_AVX512()
14040 if (!VT.is512BitVector() && !Subtarget.hasVLX()) in LowerZERO_EXTEND_AVX512()
14049 if (VT == ExtVT) in LowerZERO_EXTEND_AVX512()
14051 return DAG.getNode(X86ISD::VTRUNC, DL, VT, SelectedVal); in LowerZERO_EXTEND_AVX512()
14066 MVT VT = Op.getSimpleValueType(); in LowerZERO_EXTEND() local
14070 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1) in LowerZERO_EXTEND()
14077 assert(!VT.is256BitVector() || !SVT.is128BitVector() || in LowerZERO_EXTEND()
14078 VT.getVectorNumElements() != SVT.getVectorNumElements()); in LowerZERO_EXTEND()
14086 MVT VT = Op.getSimpleValueType(); in LowerTruncateVecI1() local
14090 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type."); in LowerTruncateVecI1()
14103 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode); in LowerTruncateVecI1()
14117 return DAG.getNode(X86ISD::TESTM, DL, VT, ShiftNode, ShiftNode); in LowerTruncateVecI1()
14122 MVT VT = Op.getSimpleValueType(); in LowerTRUNCATE() local
14126 if (VT == MVT::i1) { in LowerTRUNCATE()
14132 return DAG.getNode(ISD::TRUNCATE, DL, VT, In); in LowerTRUNCATE()
14134 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() && in LowerTRUNCATE()
14137 if (VT.getVectorElementType() == MVT::i1) in LowerTRUNCATE()
14144 return DAG.getNode(X86ISD::VTRUNC, DL, VT, in LowerTRUNCATE()
14146 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In); in LowerTRUNCATE()
14148 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) { in LowerTRUNCATE()
14155 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, in LowerTRUNCATE()
14166 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask); in LowerTRUNCATE()
14169 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) { in LowerTRUNCATE()
14196 return DAG.getBitcast(VT, In); in LowerTRUNCATE()
14226 if (!VT.is128BitVector() || !InVT.is256BitVector()) in LowerTRUNCATE()
14231 unsigned NumElems = VT.getVectorNumElements(); in LowerTRUNCATE()
14232 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2); in LowerTRUNCATE()
14240 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, in LowerTRUNCATE()
14286 MVT VT = Op.getSimpleValueType(); in LowerFP_EXTEND() local
14292 return DAG.getNode(X86ISD::VFPEXT, DL, VT, in LowerFP_EXTEND()
14313 MVT VT = Op.getSimpleValueType(); in LowerFABSorFNEG() local
14315 bool IsF128 = (VT == MVT::f128); in LowerFABSorFNEG()
14325 if (VT.isVector()) { in LowerFABSorFNEG()
14326 LogicVT = VT; in LowerFABSorFNEG()
14327 EltVT = VT.getVectorElementType(); in LowerFABSorFNEG()
14328 NumElts = VT.getVectorNumElements(); in LowerFABSorFNEG()
14332 EltVT = VT; in LowerFABSorFNEG()
14339 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32; in LowerFABSorFNEG()
14340 EltVT = VT; in LowerFABSorFNEG()
14341 NumElts = (VT == MVT::f64) ? 2 : 4; in LowerFABSorFNEG()
14365 if (VT.isVector() || IsF128) in LowerFABSorFNEG()
14372 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode, in LowerFABSorFNEG()
14382 MVT VT = Op.getSimpleValueType(); in LowerFCOPYSIGN() local
14384 bool IsF128 = (VT == MVT::f128); in LowerFCOPYSIGN()
14387 if (SrcVT.bitsLT(VT)) { in LowerFCOPYSIGN()
14388 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); in LowerFCOPYSIGN()
14389 SrcVT = VT; in LowerFCOPYSIGN()
14392 if (SrcVT.bitsGT(VT)) { in LowerFCOPYSIGN()
14393 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1, dl)); in LowerFCOPYSIGN()
14394 SrcVT = VT; in LowerFCOPYSIGN()
14399 assert((VT == MVT::f64 || VT == MVT::f32 || IsF128) && in LowerFCOPYSIGN()
14403 VT == MVT::f64 ? APFloat::IEEEdouble : in LowerFCOPYSIGN()
14405 const unsigned SizeInBits = VT.getSizeInBits(); in LowerFCOPYSIGN()
14408 VT == MVT::f64 ? 2 : (IsF128 ? 1 : 4), in LowerFCOPYSIGN()
14421 MVT LogicVT = (VT == MVT::f64) ? MVT::v2f64 : (IsF128 ? MVT::f128 : MVT::v4f32); in LowerFCOPYSIGN()
14468 MVT VT = Op.getSimpleValueType(); in LowerFGETSIGN() local
14478 Res = DAG.getZExtOrTrunc(Res, dl, VT); in LowerFGETSIGN()
14479 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT)); in LowerFGETSIGN()
14500 EVT VT = MVT::Other; in LowerVectorAllZeroTest() local
14530 VT = ExtractedFromVec.getValueType(); in LowerVectorAllZeroTest()
14532 if (!VT.is128BitVector() && !VT.is256BitVector()) in LowerVectorAllZeroTest()
14536 VT != VecInMap.begin()->first.getValueType()) in LowerVectorAllZeroTest()
14544 assert((VT.is128BitVector() || VT.is256BitVector()) && in LowerVectorAllZeroTest()
14547 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; in LowerVectorAllZeroTest()
14556 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; in LowerVectorAllZeroTest()
14598 auto hasKTEST = [&](MVT VT) { in EmitKTEST() argument
14599 unsigned SizeInBits = VT.getSizeInBits(); in EmitKTEST()
14739 EVT VT = Op.getValueType(); in EmitTest() local
14740 unsigned BitWidth = VT.getSizeInBits(); in EmitTest()
14749 Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0), in EmitTest()
14750 DAG.getConstant(Mask, dl, VT)); in EmitTest()
14760 EVT VT = ArithOp.getValueType(); in EmitTest() local
14762 bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64; in EmitTest()
14814 EVT VT = Op.getValueType(); in EmitTest() local
14833 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); in EmitTest()
14834 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); in EmitTest()
14835 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); in EmitTest()
14922 EVT VT = Op.getValueType(); in getRsqrtEstimate() local
14932 if (VT == MVT::f32 && Subtarget.hasSSE1()) in getRsqrtEstimate()
14934 else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) || in getRsqrtEstimate()
14935 (VT == MVT::v8f32 && Subtarget.hasAVX())) in getRsqrtEstimate()
14946 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op); in getRsqrtEstimate()
14954 EVT VT = Op.getValueType(); in getRecipEstimate() local
14964 if (VT == MVT::f32 && Subtarget.hasSSE1()) in getRecipEstimate()
14966 else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) || in getRecipEstimate()
14967 (VT == MVT::v8f32 && Subtarget.hasAVX())) in getRecipEstimate()
14977 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op); in getRecipEstimate()
15107 MVT VT = Op.getSimpleValueType(); in Lower256IntVSETCC() local
15109 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && in Lower256IntVSETCC()
15112 unsigned NumElems = VT.getVectorNumElements(); in Lower256IntVSETCC()
15127 MVT EltVT = VT.getVectorElementType(); in Lower256IntVSETCC()
15129 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, in Lower256IntVSETCC()
15138 MVT VT = Op.getSimpleValueType(); in LowerBoolVSETCC_AVX512() local
15144 SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0, in LowerBoolVSETCC_AVX512()
15145 DAG.getConstant(-1, dl, VT)); in LowerBoolVSETCC_AVX512()
15146 SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1, in LowerBoolVSETCC_AVX512()
15147 DAG.getConstant(-1, dl, VT)); in LowerBoolVSETCC_AVX512()
15152 return DAG.getNode(ISD::XOR, dl, VT, in LowerBoolVSETCC_AVX512()
15153 DAG.getNode(ISD::XOR, dl, VT, Op0, Op1), in LowerBoolVSETCC_AVX512()
15154 DAG.getConstant(-1, dl, VT)); in LowerBoolVSETCC_AVX512()
15157 return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1); in LowerBoolVSETCC_AVX512()
15161 return DAG.getNode(ISD::AND, dl, VT, Op0, NotOp1); in LowerBoolVSETCC_AVX512()
15165 return DAG.getNode(ISD::AND, dl, VT, NotOp0, Op1); in LowerBoolVSETCC_AVX512()
15169 return DAG.getNode(ISD::OR, dl, VT, NotOp0, Op1); in LowerBoolVSETCC_AVX512()
15173 return DAG.getNode(ISD::OR, dl, VT, Op0, NotOp1); in LowerBoolVSETCC_AVX512()
15182 MVT VT = Op.getSimpleValueType(); in LowerIntVSETCC_AVX512() local
15185 assert(VT.getVectorElementType() == MVT::i1 && in LowerIntVSETCC_AVX512()
15210 return DAG.getNode(Opc, dl, VT, Op0, Op1); in LowerIntVSETCC_AVX512()
15212 return DAG.getNode(Opc, dl, VT, Op0, Op1, in LowerIntVSETCC_AVX512()
15225 MVT VT = Op1.getSimpleValueType(); in ChangeVSETULTtoVSETULE() local
15226 MVT EVT = VT.getVectorElementType(); in ChangeVSETULTtoVSETULE()
15227 unsigned n = VT.getVectorNumElements(); in ChangeVSETULTtoVSETULE()
15243 return DAG.getBuildVector(VT, dl, ULTOp1); in ChangeVSETULTtoVSETULE()
15251 MVT VT = Op.getSimpleValueType(); in LowerVSETCC() local
15263 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) { in LowerVSETCC()
15264 assert(VT.getVectorNumElements() <= 16); in LowerVSETCC()
15271 VT = Op0.getSimpleValueType(); in LowerVSETCC()
15297 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1, in LowerVSETCC()
15299 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1, in LowerVSETCC()
15301 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); in LowerVSETCC()
15304 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1, in LowerVSETCC()
15320 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() && in LowerVSETCC()
15323 if (VT.is128BitVector() && VTOp0.is256BitVector()) { in LowerVSETCC()
15338 return DAG.getZExtOrTrunc(NewOp, dl, VT); in LowerVSETCC()
15343 assert((Subtarget.hasAVX512() || (VT == VTOp0)) && in LowerVSETCC()
15347 if (VT.is256BitVector() && !Subtarget.hasInt256()) in LowerVSETCC()
15356 if (VT.getVectorElementType() == MVT::i1) { in LowerVSETCC()
15368 return DAG.getNode(ISD::TRUNCATE, dl, VT, in LowerVSETCC()
15373 if ((VT == MVT::v16i8 || VT == MVT::v8i16 || in LowerVSETCC()
15374 VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget.hasXOP()) { in LowerVSETCC()
15395 return DAG.getNode(Opc, dl, VT, Op0, Op1, in LowerVSETCC()
15424 MVT VET = VT.getVectorElementType(); in LowerVSETCC()
15477 if (VT == MVT::v2i64) { in LowerVSETCC()
15516 return DAG.getBitcast(VT, Result); in LowerVSETCC()
15539 return DAG.getBitcast(VT, Result); in LowerVSETCC()
15546 MVT EltVT = VT.getVectorElementType(); in LowerVSETCC()
15548 VT); in LowerVSETCC()
15549 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); in LowerVSETCC()
15550 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); in LowerVSETCC()
15553 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); in LowerVSETCC()
15557 Result = DAG.getNOT(dl, Result, VT); in LowerVSETCC()
15560 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result); in LowerVSETCC()
15563 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result, in LowerVSETCC()
15564 getZeroVector(VT, Subtarget, DAG, dl)); in LowerVSETCC()
15571 MVT VT = Op.getSimpleValueType(); in LowerSETCC() local
15573 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); in LowerSETCC()
15575 assert(((!Subtarget.hasAVX512() && VT == MVT::i8) || (VT == MVT::i1)) in LowerSETCC()
15590 if (VT == MVT::i1) { in LowerSETCC()
15616 if (VT == MVT::i1) { in LowerSETCC()
15627 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC); in LowerSETCC()
15631 return DAG.getSetCC(dl, VT, Xor, DAG.getConstant(0, dl, MVT::i1), CC); in LowerSETCC()
15644 if (VT == MVT::i1) { in LowerSETCC()
15726 MVT VT = Op1.getSimpleValueType(); in LowerSELECT() local
15733 ((Subtarget.hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) || in LowerSELECT()
15734 (Subtarget.hasSSE1() && VT == MVT::f32)) && in LowerSELECT()
15735 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) { in LowerSELECT()
15744 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2); in LowerSELECT()
15747 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1, in LowerSELECT()
15769 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64; in LowerSELECT()
15774 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64; in LowerSELECT()
15779 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, in LowerSELECT()
15782 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2); in LowerSELECT()
15783 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1); in LowerSELECT()
15784 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And); in LowerSELECT()
15788 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) { in LowerSELECT()
15803 if (newSelect.getValueSizeInBits() == VT.getSizeInBits()) in LowerSELECT()
15804 return DAG.getBitcast(VT, newSelect); in LowerSELECT()
15806 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec, in LowerSELECT()
15811 if (VT == MVT::v4i1 || VT == MVT::v2i1) { in LowerSELECT()
15819 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst); in LowerSELECT()
15891 MVT VT = Op.getSimpleValueType(); in LowerSELECT() local
15894 if (VT.isFloatingPoint() && !VT.isVector() && in LowerSELECT()
15895 !isScalarFPTypeInSSEReg(VT)) // FPStack? in LowerSELECT()
16003 MVT VT = Op->getSimpleValueType(0); in LowerSIGN_EXTEND_AVX512() local
16006 MVT VTElt = VT.getVectorElementType(); in LowerSIGN_EXTEND_AVX512()
16013 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) || in LowerSIGN_EXTEND_AVX512()
16015 ((Subtarget.hasBWI() && VT.is512BitVector() && in LowerSIGN_EXTEND_AVX512()
16019 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) || in LowerSIGN_EXTEND_AVX512()
16021 ((Subtarget.hasDQI() && VT.is512BitVector() && in LowerSIGN_EXTEND_AVX512()
16023 return DAG.getNode(X86ISD::VSEXT, dl, VT, In); in LowerSIGN_EXTEND_AVX512()
16025 unsigned int NumElts = VT.getVectorNumElements(); in LowerSIGN_EXTEND_AVX512()
16030 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) { in LowerSIGN_EXTEND_AVX512()
16032 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0)); in LowerSIGN_EXTEND_AVX512()
16033 return DAG.getNode(X86ISD::VSEXT, dl, VT, In); in LowerSIGN_EXTEND_AVX512()
16045 if (VT.is512BitVector()) in LowerSIGN_EXTEND_AVX512()
16047 return DAG.getNode(X86ISD::VTRUNC, dl, VT, V); in LowerSIGN_EXTEND_AVX512()
16054 MVT VT = Op->getSimpleValueType(0); in LowerSIGN_EXTEND_VECTOR_INREG() local
16056 assert(VT.getSizeInBits() == InVT.getSizeInBits()); in LowerSIGN_EXTEND_VECTOR_INREG()
16058 MVT SVT = VT.getVectorElementType(); in LowerSIGN_EXTEND_VECTOR_INREG()
16066 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) && in LowerSIGN_EXTEND_VECTOR_INREG()
16067 !(VT.is256BitVector() && Subtarget.hasInt256())) in LowerSIGN_EXTEND_VECTOR_INREG()
16073 if (VT.is256BitVector()) in LowerSIGN_EXTEND_VECTOR_INREG()
16080 return DAG.getNode(X86ISD::VSEXT, dl, VT, In); in LowerSIGN_EXTEND_VECTOR_INREG()
16088 while (CurrVT != VT && CurrVT.getVectorElementType() != MVT::i32) { in LowerSIGN_EXTEND_VECTOR_INREG()
16103 if (CurrVT == VT) in LowerSIGN_EXTEND_VECTOR_INREG()
16106 if (VT == MVT::v2i64 && CurrVT == MVT::v4i32) { in LowerSIGN_EXTEND_VECTOR_INREG()
16110 return DAG.getBitcast(VT, Ext); in LowerSIGN_EXTEND_VECTOR_INREG()
16118 MVT VT = Op->getSimpleValueType(0); in LowerSIGN_EXTEND() local
16123 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1) in LowerSIGN_EXTEND()
16126 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && in LowerSIGN_EXTEND()
16127 (VT != MVT::v8i32 || InVT != MVT::v8i16) && in LowerSIGN_EXTEND()
16128 (VT != MVT::v16i16 || InVT != MVT::v16i8)) in LowerSIGN_EXTEND()
16132 return DAG.getNode(X86ISD::VSEXT, dl, VT, In); in LowerSIGN_EXTEND()
16158 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), in LowerSIGN_EXTEND()
16159 VT.getVectorNumElements()/2); in LowerSIGN_EXTEND()
16164 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); in LowerSIGN_EXTEND()
16237 MVT VT = Op.getValueType().getSimpleVT(); in LowerExtended1BitVectorLoad() local
16238 unsigned NumElts = VT.getVectorNumElements(); in LowerExtended1BitVectorLoad()
16250 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8); in LowerExtended1BitVectorLoad()
16253 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, in LowerExtended1BitVectorLoad()
16282 return DAG.getNode(ExtOpcode, dl, VT, BitVec); in LowerExtended1BitVectorLoad()
16286 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8); in LowerExtended1BitVectorLoad()
16288 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, in LowerExtended1BitVectorLoad()
16292 assert(VT == MVT::v32i8 && "Unexpected extload type"); in LowerExtended1BitVectorLoad()
16820 EVT VT = Node->getValueType(0); in LowerDYNAMIC_STACKALLOC() local
16836 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); in LowerDYNAMIC_STACKALLOC()
16840 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value in LowerDYNAMIC_STACKALLOC()
16842 Result = DAG.getNode(ISD::AND, dl, VT, Result, in LowerDYNAMIC_STACKALLOC()
16843 DAG.getConstant(-(uint64_t)Align, dl, VT)); in LowerDYNAMIC_STACKALLOC()
16875 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), in LowerDYNAMIC_STACKALLOC()
16876 DAG.getConstant(-(uint64_t)Align, dl, VT)); in LowerDYNAMIC_STACKALLOC()
17036 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT, in getTargetVShiftByConstNode() argument
17039 MVT ElementType = VT.getVectorElementType(); in getTargetVShiftByConstNode()
17050 return DAG.getConstant(0, dl, VT); in getTargetVShiftByConstNode()
17058 if (VT == SrcOp.getSimpleValueType() && in getTargetVShiftByConstNode()
17104 return DAG.getBuildVector(VT, dl, Elts); in getTargetVShiftByConstNode()
17107 return DAG.getNode(Opc, dl, VT, SrcOp, in getTargetVShiftByConstNode()
17113 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT, in getTargetVShiftNode() argument
17121 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp, in getTargetVShiftNode()
17157 MVT EltVT = VT.getVectorElementType(); in getTargetVShiftNode()
17161 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); in getTargetVShiftNode()
17221 MVT VT = Op.getSimpleValueType(); in getVectorMaskingNode() local
17222 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in getVectorMaskingNode()
17237 return DAG.getNode(ISD::AND, dl, VT, Op, VMask); in getVectorMaskingNode()
17240 return DAG.getNode(ISD::OR, dl, VT, Op, VMask); in getVectorMaskingNode()
17252 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); in getVectorMaskingNode()
17253 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc); in getVectorMaskingNode()
17270 MVT VT = Op.getSimpleValueType(); in getScalarMaskingNode() local
17276 return DAG.getNode(ISD::AND, dl, VT, Op, IMask); in getScalarMaskingNode()
17279 return DAG.getNode(ISD::OR, dl, VT, Op, IMask); in getScalarMaskingNode()
17282 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); in getScalarMaskingNode()
17283 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc); in getScalarMaskingNode()
17351 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17389 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src, in LowerINTRINSIC_WO_CHAIN()
17411 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src), in LowerINTRINSIC_WO_CHAIN()
17419 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2), in LowerINTRINSIC_WO_CHAIN()
17433 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, in LowerINTRINSIC_WO_CHAIN()
17440 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, in LowerINTRINSIC_WO_CHAIN()
17469 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2), in LowerINTRINSIC_WO_CHAIN()
17486 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17498 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, in LowerINTRINSIC_WO_CHAIN()
17517 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17554 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17565 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1), in LowerINTRINSIC_WO_CHAIN()
17575 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17580 PassThru = getZeroVector(VT, Subtarget, DAG, dl); in LowerINTRINSIC_WO_CHAIN()
17582 PassThru = DAG.getBitcast(VT, Src2); in LowerINTRINSIC_WO_CHAIN()
17597 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17602 PassThru = getZeroVector(VT, Subtarget, DAG, dl); in LowerINTRINSIC_WO_CHAIN()
17633 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17638 PassThru = getZeroVector(VT, Subtarget, DAG, dl); in LowerINTRINSIC_WO_CHAIN()
17657 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17661 PassThru = getZeroVector(VT, Subtarget, DAG, dl); in LowerINTRINSIC_WO_CHAIN()
17663 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17670 MVT VT = Src1.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17671 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in LowerINTRINSIC_WO_CHAIN()
17705 MVT VT = Op.getOperand(1).getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17706 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in LowerINTRINSIC_WO_CHAIN()
17842 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17854 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_WO_CHAIN() local
17855 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2); in LowerINTRINSIC_WO_CHAIN()
17861 MVT::getVectorVT(MVT::i1, VT.getSizeInBits()), in LowerINTRINSIC_WO_CHAIN()
17863 return DAG.getBitcast(VT, Res); in LowerINTRINSIC_WO_CHAIN()
17875 Src1 : getZeroVector(VT, Subtarget, DAG, dl); in LowerINTRINSIC_WO_CHAIN()
17886 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17890 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17897 MVT BitcastVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()); in LowerINTRINSIC_WO_CHAIN()
17908 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in LowerINTRINSIC_WO_CHAIN()
17910 return DAG.getNode(IntrData->Opc0, dl, VT, VMask); in LowerINTRINSIC_WO_CHAIN()
17925 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, in LowerINTRINSIC_WO_CHAIN()
17934 assert((VT.getScalarType() == MVT::i32 || in LowerINTRINSIC_WO_CHAIN()
17935 VT.getScalarType() == MVT::f32) && "Unexpected type!"); in LowerINTRINSIC_WO_CHAIN()
17937 MVT ScalarVT = VT.getScalarType() == MVT::i32 ? MVT::i64 : MVT::f64; in LowerINTRINSIC_WO_CHAIN()
17941 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src), in LowerINTRINSIC_WO_CHAIN()
18119 return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT)); in LowerINTRINSIC_WO_CHAIN()
18132 SDValue Result = DAG.getMCSymbol(LSDASym, VT); in LowerINTRINSIC_WO_CHAIN()
18133 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result); in LowerINTRINSIC_WO_CHAIN()
18157 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); in LowerINTRINSIC_WO_CHAIN()
18496 MVT VT = DataToCompress.getSimpleValueType(); in LowerINTRINSIC_W_CHAIN() local
18506 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress), in LowerINTRINSIC_W_CHAIN()
18507 Mask, DAG.getUNDEF(VT), Subtarget, DAG); in LowerINTRINSIC_W_CHAIN()
18522 EVT VT = MemIntr->getMemoryVT(); in LowerINTRINSIC_W_CHAIN() local
18525 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, VT, in LowerINTRINSIC_W_CHAIN()
18528 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); in LowerINTRINSIC_W_CHAIN()
18531 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, VT, in LowerINTRINSIC_W_CHAIN()
18539 MVT VT = Op.getSimpleValueType(); in LowerINTRINSIC_W_CHAIN() local
18544 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, in LowerINTRINSIC_W_CHAIN()
18551 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand), in LowerINTRINSIC_W_CHAIN()
18591 EVT VT = Op.getValueType(); in LowerFRAMEADDR() local
18607 return DAG.getFrameIndex(FrameAddrIndex, VT); in LowerFRAMEADDR()
18614 assert(((FrameReg == X86::RBP && VT == MVT::i64) || in LowerFRAMEADDR()
18615 (FrameReg == X86::EBP && VT == MVT::i32)) && in LowerFRAMEADDR()
18617 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); in LowerFRAMEADDR()
18619 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, in LowerFRAMEADDR()
18627 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT, in getRegisterByName() argument
18924 MVT VT = Op.getSimpleValueType(); in LowerFLT_ROUNDS_() local
18964 return DAG.getNode((VT.getSizeInBits() < 16 ? in LowerFLT_ROUNDS_()
18965 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); in LowerFLT_ROUNDS_()
18979 MVT VT = Op.getSimpleValueType(); in LowerVectorCTLZ_AVX512() local
18980 MVT EltVT = VT.getVectorElementType(); in LowerVectorCTLZ_AVX512()
18981 unsigned NumElems = VT.getVectorNumElements(); in LowerVectorCTLZ_AVX512()
18985 assert((VT.is256BitVector() || VT.is128BitVector()) && in LowerVectorCTLZ_AVX512()
18988 MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits()); in LowerVectorCTLZ_AVX512()
18995 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode, in LowerVectorCTLZ_AVX512()
19011 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); in LowerVectorCTLZ_AVX512()
19022 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode); in LowerVectorCTLZ_AVX512()
19023 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT); in LowerVectorCTLZ_AVX512()
19025 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta); in LowerVectorCTLZ_AVX512()
19032 MVT VT = Op.getSimpleValueType(); in LowerVectorCTLZInRegLUT() local
19033 int NumElts = VT.getVectorNumElements(); in LowerVectorCTLZInRegLUT()
19034 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8); in LowerVectorCTLZInRegLUT()
19072 while (CurrVT != VT) { in LowerVectorCTLZInRegLUT()
19101 MVT VT = Op.getSimpleValueType(); in LowerVectorCTLZ() local
19108 if (VT.is256BitVector() && !Subtarget.hasInt256()) { in LowerVectorCTLZ()
19109 unsigned NumElems = VT.getVectorNumElements(); in LowerVectorCTLZ()
19115 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, in LowerVectorCTLZ()
19126 MVT VT = Op.getSimpleValueType(); in LowerCTLZ() local
19127 MVT OpVT = VT; in LowerCTLZ()
19128 unsigned NumBits = VT.getSizeInBits(); in LowerCTLZ()
19132 if (VT.isVector()) in LowerCTLZ()
19136 if (VT == MVT::i8) { in LowerCTLZ()
19161 if (VT == MVT::i8) in LowerCTLZ()
19167 MVT VT = Op.getSimpleValueType(); in LowerCTTZ() local
19168 unsigned NumBits = VT.getScalarSizeInBits(); in LowerCTTZ()
19171 if (VT.isVector()) { in LowerCTTZ()
19173 SDValue Zero = DAG.getConstant(0, dl, VT); in LowerCTTZ()
19176 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0, in LowerCTTZ()
19177 DAG.getNode(ISD::SUB, dl, VT, Zero, N0)); in LowerCTTZ()
19181 SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT); in LowerCTTZ()
19182 return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne, in LowerCTTZ()
19183 DAG.getNode(ISD::CTLZ, dl, VT, LSB)); in LowerCTTZ()
19187 SDValue One = DAG.getConstant(1, dl, VT); in LowerCTTZ()
19188 return DAG.getNode(ISD::CTPOP, dl, VT, in LowerCTTZ()
19189 DAG.getNode(ISD::SUB, dl, VT, LSB, One)); in LowerCTTZ()
19196 SDVTList VTs = DAG.getVTList(VT, MVT::i32); in LowerCTTZ()
19202 DAG.getConstant(NumBits, dl, VT), in LowerCTTZ()
19206 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops); in LowerCTTZ()
19212 MVT VT = Op.getSimpleValueType(); in Lower256IntArith() local
19214 assert(VT.is256BitVector() && VT.isInteger() && in Lower256IntArith()
19217 unsigned NumElems = VT.getVectorNumElements(); in Lower256IntArith()
19230 MVT EltVT = VT.getVectorElementType(); in Lower256IntArith()
19233 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, in Lower256IntArith()
19241 MVT VT = Op.getSimpleValueType(); in Lower512IntArith() local
19243 assert(VT.is512BitVector() && VT.isInteger() && in Lower512IntArith()
19246 unsigned NumElems = VT.getVectorNumElements(); in Lower512IntArith()
19259 MVT EltVT = VT.getVectorElementType(); in Lower512IntArith()
19262 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, in Lower512IntArith()
19297 MVT VT = Op.getSimpleValueType(); in LowerMUL() local
19299 if (VT == MVT::i1) in LowerMUL()
19300 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1)); in LowerMUL()
19303 if (VT.is256BitVector() && !Subtarget.hasInt256()) in LowerMUL()
19311 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) { in LowerMUL()
19315 if (VT == MVT::v64i8) in LowerMUL()
19321 if (VT == MVT::v32i8 && !Subtarget.hasBWI()) in LowerMUL()
19324 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements()); in LowerMUL()
19326 ISD::TRUNCATE, dl, VT, in LowerMUL()
19332 assert(VT == MVT::v16i8 && in LowerMUL()
19344 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMUL()
19345 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMUL()
19357 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMUL()
19358 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMUL()
19364 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMUL()
19365 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMUL()
19377 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); in LowerMUL()
19381 if (VT == MVT::v4i32) { in LowerMUL()
19387 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); in LowerMUL()
19388 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); in LowerMUL()
19395 Evens = DAG.getBitcast(VT, Evens); in LowerMUL()
19396 Odds = DAG.getBitcast(VT, Odds); in LowerMUL()
19401 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); in LowerMUL()
19404 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && in LowerMUL()
19418 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG); in LowerMUL()
19419 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG); in LowerMUL()
19424 MVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : in LowerMUL()
19425 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32; in LowerMUL()
19431 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); in LowerMUL()
19434 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); in LowerMUL()
19435 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG); in LowerMUL()
19438 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); in LowerMUL()
19439 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG); in LowerMUL()
19442 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); in LowerMUL()
19443 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); in LowerMUL()
19449 MVT VT = Op.getSimpleValueType(); in LowerMULH() local
19452 if (VT.is256BitVector() && !Subtarget.hasInt256()) in LowerMULH()
19456 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) && in LowerMULH()
19473 SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl); in LowerMULH()
19475 if (VT == MVT::v32i8) { in LowerMULH()
19496 return DAG.getNode(X86ISD::PACKUS, dl, VT, in LowerMULH()
19508 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); in LowerMULH()
19511 assert(VT == MVT::v16i8 && in LowerMULH()
19523 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMULH()
19524 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMULH()
19536 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMULH()
19537 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMULH()
19543 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); in LowerMULH()
19544 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); in LowerMULH()
19557 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); in LowerMULH()
19562 EVT VT = Op.getValueType(); in LowerWin64_i128OP() local
19563 assert(VT.isInteger() && VT.getSizeInBits() == 128 && in LowerWin64_i128OP()
19609 return DAG.getBitcast(VT, CallInfo.first); in LowerWin64_i128OP()
19615 MVT VT = Op0.getSimpleValueType(); in LowerMUL_LOHI() local
19619 if (VT.is256BitVector() && !Subtarget.hasInt256()) { in LowerMUL_LOHI()
19621 unsigned NumElems = VT.getVectorNumElements(); in LowerMUL_LOHI()
19622 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2); in LowerMUL_LOHI()
19630 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)), in LowerMUL_LOHI()
19631 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1)) in LowerMUL_LOHI()
19636 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) || in LowerMUL_LOHI()
19637 (VT == MVT::v8i32 && Subtarget.hasInt256())); in LowerMUL_LOHI()
19653 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, in LowerMUL_LOHI()
19654 makeArrayRef(&Mask[0], VT.getVectorNumElements())); in LowerMUL_LOHI()
19656 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, in LowerMUL_LOHI()
19657 makeArrayRef(&Mask[0], VT.getVectorNumElements())); in LowerMUL_LOHI()
19661 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64; in LowerMUL_LOHI()
19667 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1)); in LowerMUL_LOHI()
19670 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1)); in LowerMUL_LOHI()
19674 if (VT == MVT::v8i32) { in LowerMUL_LOHI()
19676 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask); in LowerMUL_LOHI()
19678 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask); in LowerMUL_LOHI()
19681 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask); in LowerMUL_LOHI()
19683 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask); in LowerMUL_LOHI()
19691 DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout())); in LowerMUL_LOHI()
19692 SDValue T1 = DAG.getNode(ISD::AND, dl, VT, in LowerMUL_LOHI()
19693 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1); in LowerMUL_LOHI()
19694 SDValue T2 = DAG.getNode(ISD::AND, dl, VT, in LowerMUL_LOHI()
19695 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0); in LowerMUL_LOHI()
19697 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2); in LowerMUL_LOHI()
19698 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup); in LowerMUL_LOHI()
19709 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget, in SupportedVectorShiftWithImm() argument
19711 if (VT.getScalarSizeInBits() < 16) in SupportedVectorShiftWithImm()
19714 if (VT.is512BitVector() && in SupportedVectorShiftWithImm()
19715 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI())) in SupportedVectorShiftWithImm()
19718 bool LShift = VT.is128BitVector() || in SupportedVectorShiftWithImm()
19719 (VT.is256BitVector() && Subtarget.hasInt256()); in SupportedVectorShiftWithImm()
19722 (VT != MVT::v2i64 && VT != MVT::v4i64)); in SupportedVectorShiftWithImm()
19729 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget, in SupportedVectorShiftWithBaseAmnt() argument
19731 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode); in SupportedVectorShiftWithBaseAmnt()
19736 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget, in SupportedVectorVarShift() argument
19739 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16) in SupportedVectorVarShift()
19743 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI()) in SupportedVectorVarShift()
19746 if (VT.is512BitVector() || Subtarget.hasVLX()) in SupportedVectorVarShift()
19749 bool LShift = VT.is128BitVector() || VT.is256BitVector(); in SupportedVectorVarShift()
19750 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64; in SupportedVectorVarShift()
19756 MVT VT = Op.getSimpleValueType(); in LowerScalarImmediateShift() local
19765 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type"); in LowerScalarImmediateShift()
19766 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2); in LowerScalarImmediateShift()
19775 if (VT == MVT::v2i64) in LowerScalarImmediateShift()
19777 if (VT == MVT::v4i64) in LowerScalarImmediateShift()
19785 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG); in LowerScalarImmediateShift()
19787 if (VT == MVT::v2i64) in LowerScalarImmediateShift()
19789 if (VT == MVT::v4i64) in LowerScalarImmediateShift()
19793 return DAG.getBitcast(VT, Ex); in LowerScalarImmediateShift()
19801 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) in LowerScalarImmediateShift()
19802 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG); in LowerScalarImmediateShift()
19805 if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) && in LowerScalarImmediateShift()
19809 if (VT == MVT::v16i8 || in LowerScalarImmediateShift()
19810 (Subtarget.hasInt256() && VT == MVT::v32i8) || in LowerScalarImmediateShift()
19811 VT == MVT::v64i8) { in LowerScalarImmediateShift()
19812 unsigned NumElts = VT.getVectorNumElements(); in LowerScalarImmediateShift()
19817 return DAG.getNode(ISD::ADD, dl, VT, R, R); in LowerScalarImmediateShift()
19821 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); in LowerScalarImmediateShift()
19822 if (VT.is512BitVector()) { in LowerScalarImmediateShift()
19823 assert(VT == MVT::v64i8 && "Unexpected element type!"); in LowerScalarImmediateShift()
19825 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP); in LowerScalarImmediateShift()
19827 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); in LowerScalarImmediateShift()
19831 if (VT == MVT::v16i8 && Subtarget.hasXOP()) in LowerScalarImmediateShift()
19838 SHL = DAG.getBitcast(VT, SHL); in LowerScalarImmediateShift()
19840 return DAG.getNode(ISD::AND, dl, VT, SHL, in LowerScalarImmediateShift()
19841 DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT)); in LowerScalarImmediateShift()
19847 SRL = DAG.getBitcast(VT, SRL); in LowerScalarImmediateShift()
19849 return DAG.getNode(ISD::AND, dl, VT, SRL, in LowerScalarImmediateShift()
19850 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT)); in LowerScalarImmediateShift()
19854 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); in LowerScalarImmediateShift()
19856 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT); in LowerScalarImmediateShift()
19857 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); in LowerScalarImmediateShift()
19858 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); in LowerScalarImmediateShift()
19868 (VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64))) { in LowerScalarImmediateShift()
19876 assert(SplatIndex < (int)VT.getVectorNumElements() && in LowerScalarImmediateShift()
19886 VT.getVectorNumElements(); in LowerScalarImmediateShift()
19914 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) in LowerScalarImmediateShift()
19915 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG); in LowerScalarImmediateShift()
19926 MVT VT = Op.getSimpleValueType(); in LowerScalarVariableShift() local
19937 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) { in LowerScalarVariableShift()
19939 MVT EltVT = VT.getVectorElementType(); in LowerScalarVariableShift()
19981 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, DAG); in LowerScalarVariableShift()
19986 if (!Subtarget.is64Bit() && VT == MVT::v2i64 && in LowerScalarVariableShift()
19991 VT.getVectorNumElements(); in LowerScalarVariableShift()
20001 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) in LowerScalarVariableShift()
20002 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1)); in LowerScalarVariableShift()
20009 MVT VT = Op.getSimpleValueType(); in LowerShift() local
20015 assert(VT.isVector() && "Custom lowering only for vector shifts!"); in LowerShift()
20024 if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode())) in LowerShift()
20030 (VT == MVT::v2i64 || VT == MVT::v4i32 || in LowerShift()
20031 VT == MVT::v8i16 || VT == MVT::v16i8)) { in LowerShift()
20033 SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl); in LowerShift()
20034 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt); in LowerShift()
20037 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt); in LowerShift()
20039 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt); in LowerShift()
20044 if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) { in LowerShift()
20046 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0}); in LowerShift()
20047 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1}); in LowerShift()
20048 SDValue R0 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt0); in LowerShift()
20049 SDValue R1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt1); in LowerShift()
20050 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3}); in LowerShift()
20056 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) && in LowerShift()
20058 SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT); in LowerShift()
20059 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt); in LowerShift()
20060 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt); in LowerShift()
20061 R = DAG.getNode(ISD::XOR, dl, VT, R, M); in LowerShift()
20062 R = DAG.getNode(ISD::SUB, dl, VT, R, M); in LowerShift()
20070 (VT == MVT::v8i16 || VT == MVT::v4i32 || in LowerShift()
20071 (Subtarget.hasInt256() && VT == MVT::v16i16))) { in LowerShift()
20073 MVT SVT = VT.getVectorElementType(); in LowerShift()
20076 unsigned NumElems = VT.getVectorNumElements(); in LowerShift()
20094 SDValue BV = DAG.getBuildVector(VT, dl, Elts); in LowerShift()
20095 return DAG.getNode(ISD::MUL, dl, VT, R, BV); in LowerShift()
20099 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { in LowerShift()
20100 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT)); in LowerShift()
20102 Op = DAG.getNode(ISD::ADD, dl, VT, Op, in LowerShift()
20103 DAG.getConstant(0x3f800000U, dl, VT)); in LowerShift()
20105 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); in LowerShift()
20106 return DAG.getNode(ISD::MUL, dl, VT, Op, R); in LowerShift()
20121 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32)) { in LowerShift()
20127 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : Amt->getOperand(2); in LowerShift()
20131 if (VT == MVT::v4i32) { in LowerShift()
20165 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT); in LowerShift()
20166 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1); in LowerShift()
20168 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT); in LowerShift()
20169 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2); in LowerShift()
20176 return DAG.getBitcast(VT, Result); in LowerShift()
20185 if (VT == MVT::v4i32) { in LowerShift()
20189 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0}); in LowerShift()
20190 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1}); in LowerShift()
20191 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2}); in LowerShift()
20192 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3}); in LowerShift()
20211 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl); in LowerShift()
20212 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1}); in LowerShift()
20213 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1}); in LowerShift()
20214 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1}); in LowerShift()
20215 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1}); in LowerShift()
20218 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0); in LowerShift()
20219 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1); in LowerShift()
20220 SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2); in LowerShift()
20221 SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3); in LowerShift()
20222 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1}); in LowerShift()
20223 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7}); in LowerShift()
20224 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7}); in LowerShift()
20227 if (VT == MVT::v16i8 || in LowerShift()
20228 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP())) { in LowerShift()
20229 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2); in LowerShift()
20236 V0 = DAG.getBitcast(VT, V0); in LowerShift()
20237 V1 = DAG.getBitcast(VT, V1); in LowerShift()
20238 Sel = DAG.getBitcast(VT, Sel); in LowerShift()
20240 DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1)); in LowerShift()
20255 Amt = DAG.getBitcast(VT, Amt); in LowerShift()
20260 DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT)); in LowerShift()
20261 R = SignBitSelect(VT, Amt, M, R); in LowerShift()
20264 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); in LowerShift()
20267 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT)); in LowerShift()
20268 R = SignBitSelect(VT, Amt, M, R); in LowerShift()
20271 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); in LowerShift()
20274 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT)); in LowerShift()
20275 R = SignBitSelect(VT, Amt, M, R); in LowerShift()
20283 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt); in LowerShift()
20284 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt); in LowerShift()
20285 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R); in LowerShift()
20286 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R); in LowerShift()
20331 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); in LowerShift()
20338 if (Subtarget.hasInt256() && VT == MVT::v8i16) { in LowerShift()
20344 return DAG.getNode(ISD::TRUNCATE, dl, VT, in LowerShift()
20348 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) { in LowerShift()
20350 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl); in LowerShift()
20351 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z); in LowerShift()
20352 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z); in LowerShift()
20353 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R); in LowerShift()
20354 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R); in LowerShift()
20363 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); in LowerShift()
20366 if (VT == MVT::v8i16) { in LowerShift()
20378 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2); in LowerShift()
20383 VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1)); in LowerShift()
20389 DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT)); in LowerShift()
20390 return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1); in LowerShift()
20398 ISD::OR, dl, VT, in LowerShift()
20399 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)), in LowerShift()
20400 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT))); in LowerShift()
20402 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)); in LowerShift()
20406 SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT)); in LowerShift()
20410 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); in LowerShift()
20413 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT)); in LowerShift()
20417 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); in LowerShift()
20420 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT)); in LowerShift()
20424 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); in LowerShift()
20427 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT)); in LowerShift()
20433 if (VT.is256BitVector()) in LowerShift()
20441 MVT VT = Op.getSimpleValueType(); in LowerRotate() local
20446 assert(VT.isVector() && "Custom lowering only for vector rotates!"); in LowerRotate()
20454 if (VT.is256BitVector()) in LowerRotate()
20457 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!"); in LowerRotate()
20463 assert(RotateAmt < VT.getScalarSizeInBits() && "Rotation out of range"); in LowerRotate()
20464 return DAG.getNode(X86ISD::VPROTI, DL, VT, R, in LowerRotate()
20470 return DAG.getNode(X86ISD::VPROT, DL, VT, R, Amt); in LowerRotate()
20832 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT, in LowerHorizontalByteSum() argument
20837 MVT EltVT = VT.getVectorElementType(); in LowerHorizontalByteSum()
20842 unsigned VecSize = VT.getSizeInBits(); in LowerHorizontalByteSum()
20851 return DAG.getBitcast(VT, V); in LowerHorizontalByteSum()
20860 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, DL); in LowerHorizontalByteSum()
20861 SDValue Low = DAG.getNode(X86ISD::UNPCKL, DL, VT, V, Zeros); in LowerHorizontalByteSum()
20862 SDValue High = DAG.getNode(X86ISD::UNPCKH, DL, VT, V, Zeros); in LowerHorizontalByteSum()
20878 return DAG.getBitcast(VT, V); in LowerHorizontalByteSum()
20888 SDValue ShifterV = DAG.getConstant(8, DL, VT); in LowerHorizontalByteSum()
20889 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV); in LowerHorizontalByteSum()
20892 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV); in LowerHorizontalByteSum()
20898 MVT VT = Op.getSimpleValueType(); in LowerVectorCTPOPInRegLUT() local
20899 MVT EltVT = VT.getVectorElementType(); in LowerVectorCTPOPInRegLUT()
20900 unsigned VecSize = VT.getSizeInBits(); in LowerVectorCTPOPInRegLUT()
20949 return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG); in LowerVectorCTPOPInRegLUT()
20955 MVT VT = Op.getSimpleValueType(); in LowerVectorCTPOPBitmath() local
20956 assert(VT.is128BitVector() && in LowerVectorCTPOPBitmath()
20959 int VecSize = VT.getSizeInBits(); in LowerVectorCTPOPBitmath()
20960 MVT EltVT = VT.getVectorElementType(); in LowerVectorCTPOPBitmath()
20971 MVT VT = V.getSimpleValueType(); in LowerVectorCTPOPBitmath() local
20972 SDValue ShifterV = DAG.getConstant(Shifter, DL, VT); in LowerVectorCTPOPBitmath()
20973 return DAG.getNode(OpCode, DL, VT, V, ShifterV); in LowerVectorCTPOPBitmath()
20976 MVT VT = V.getSimpleValueType(); in LowerVectorCTPOPBitmath() local
20977 SDValue MaskV = DAG.getConstant(Mask, DL, VT); in LowerVectorCTPOPBitmath()
20978 return DAG.getNode(ISD::AND, DL, VT, V, MaskV); in LowerVectorCTPOPBitmath()
20985 MVT SrlVT = Len > 8 ? VT : MVT::getVectorVT(MVT::i16, VecSize / 16); in LowerVectorCTPOPBitmath()
20991 DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 1)); in LowerVectorCTPOPBitmath()
20993 V = DAG.getNode(ISD::SUB, DL, VT, V, And); in LowerVectorCTPOPBitmath()
20997 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 2)); in LowerVectorCTPOPBitmath()
20999 V = DAG.getNode(ISD::ADD, DL, VT, AndLHS, AndRHS); in LowerVectorCTPOPBitmath()
21002 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 4)); in LowerVectorCTPOPBitmath()
21003 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, V, Srl); in LowerVectorCTPOPBitmath()
21013 DAG.getBitcast(MVT::getVectorVT(MVT::i8, VecSize / 8), V), VT, Subtarget, in LowerVectorCTPOPBitmath()
21019 MVT VT = Op.getSimpleValueType(); in LowerVectorCTPOP() local
21020 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) && in LowerVectorCTPOP()
21027 assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!"); in LowerVectorCTPOP()
21031 if (VT.is256BitVector() && !Subtarget.hasInt256()) { in LowerVectorCTPOP()
21032 unsigned NumElems = VT.getVectorNumElements(); in LowerVectorCTPOP()
21038 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, in LowerVectorCTPOP()
21043 if (VT.is512BitVector() && !Subtarget.hasBWI()) { in LowerVectorCTPOP()
21044 unsigned NumElems = VT.getVectorNumElements(); in LowerVectorCTPOP()
21050 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, in LowerVectorCTPOP()
21066 MVT VT = Op.getSimpleValueType(); in LowerBITREVERSE_XOP() local
21072 if (!VT.isVector()) { in LowerBITREVERSE_XOP()
21073 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits()); in LowerBITREVERSE_XOP()
21076 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res, in LowerBITREVERSE_XOP()
21080 MVT SVT = VT.getVectorElementType(); in LowerBITREVERSE_XOP()
21081 int NumElts = VT.getVectorNumElements(); in LowerBITREVERSE_XOP()
21082 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; in LowerBITREVERSE_XOP()
21085 if (VT.is256BitVector()) { in LowerBITREVERSE_XOP()
21090 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, in LowerBITREVERSE_XOP()
21095 assert(VT.is128BitVector() && in LowerBITREVERSE_XOP()
21115 return DAG.getBitcast(VT, Res); in LowerBITREVERSE_XOP()
21125 MVT VT = Op.getSimpleValueType(); in LowerBITREVERSE() local
21129 unsigned NumElts = VT.getVectorNumElements(); in LowerBITREVERSE()
21130 assert(VT.getScalarType() == MVT::i8 && in LowerBITREVERSE()
21134 if (VT.is256BitVector() && !Subtarget.hasInt256()) { in LowerBITREVERSE()
21140 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); in LowerBITREVERSE()
21146 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT); in LowerBITREVERSE()
21147 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask); in LowerBITREVERSE()
21148 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT)); in LowerBITREVERSE()
21167 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts); in LowerBITREVERSE()
21168 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts); in LowerBITREVERSE()
21169 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo); in LowerBITREVERSE()
21170 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi); in LowerBITREVERSE()
21171 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi); in LowerBITREVERSE()
21210 MVT VT = N->getSimpleValueType(0); in lowerAtomicArith() local
21221 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS); in lowerAtomicArith()
21222 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, in lowerAtomicArith()
21241 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); in LowerATOMIC_STORE() local
21250 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { in LowerATOMIC_STORE()
21265 MVT VT = Op.getNode()->getSimpleValueType(0); in LowerADDC_ADDE_SUBC_SUBE() local
21268 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) in LowerADDC_ADDE_SUBC_SUBE()
21271 SDVTList VTs = DAG.getVTList(VT, MVT::i32); in LowerADDC_ADDE_SUBC_SUBE()
21407 MVT VT = Src.getSimpleValueType(); in LowerMSCATTER() local
21408 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op"); in LowerMSCATTER()
21420 if (MemVT.getScalarSizeInBits() < VT.getScalarSizeInBits()) { in LowerMSCATTER()
21425 assert((MemVT == MVT::v2i32 && VT == MVT::v2i64) && in LowerMSCATTER()
21441 VT = MVT::v4i32; in LowerMSCATTER()
21444 unsigned NumElts = VT.getVectorNumElements(); in LowerMSCATTER()
21445 if (!Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMSCATTER()
21471 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts); in LowerMSCATTER()
21492 MVT VT = Op.getSimpleValueType(); in LowerMLOAD() local
21493 MVT ScalarVT = VT.getScalarType(); in LowerMLOAD()
21497 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMLOAD()
21507 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits(); in LowerMLOAD()
21518 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, in LowerMLOAD()
21529 MVT VT = DataToStore.getSimpleValueType(); in LowerMSTORE() local
21530 MVT ScalarVT = VT.getScalarType(); in LowerMSTORE()
21534 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMSTORE()
21544 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits(); in LowerMSTORE()
21561 MVT VT = Op.getSimpleValueType(); in LowerMGATHER() local
21568 unsigned NumElts = VT.getVectorNumElements(); in LowerMGATHER()
21569 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op"); in LowerMGATHER()
21571 if (!Subtarget.hasVLX() && !VT.is512BitVector() && in LowerMGATHER()
21601 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts); in LowerMGATHER()
21608 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, in LowerMGATHER()
21841 EVT VT = N->getValueType(0); in ReplaceNodeResults() local
21842 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX."); in ReplaceNodeResults()
21843 SDValue UNDEF = DAG.getUNDEF(VT); in ReplaceNodeResults()
21876 EVT VT = N->getValueType(0); in ReplaceNodeResults() local
21879 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, in ReplaceNodeResults()
22483 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { in isFMAFasterThanFMulAndFAdd()
22487 VT = VT.getScalarType(); in isFMAFasterThanFMulAndFAdd()
22489 if (!VT.isSimple()) in isFMAFasterThanFMulAndFAdd()
22492 switch (VT.getSimpleVT().SimpleTy) { in isFMAFasterThanFMulAndFAdd()
22514 EVT VT) const { in isShuffleMaskLegal()
22515 if (!VT.isSimple()) in isShuffleMaskLegal()
22519 if (VT.getSimpleVT().getScalarType() == MVT::i1) in isShuffleMaskLegal()
22523 if (VT.getSimpleVT().getSizeInBits() == 64) in isShuffleMaskLegal()
22528 return isTypeLegal(VT.getSimpleVT()); in isShuffleMaskLegal()
22533 EVT VT) const { in isVectorClearMaskLegal()
22535 return isShuffleMaskLegal(Mask, VT); in isVectorClearMaskLegal()
24679 MVT VT = SVOp->getSimpleValueType(0); in combineShuffle256() local
24680 unsigned NumElems = VT.getVectorNumElements(); in combineShuffle256()
24733 return DAG.getBitcast(VT, ResNode); in combineShuffle256()
24739 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); in combineShuffle256()
25030 MVT VT = Input.getSimpleValueType(); in combineX86ShuffleChain() local
25062 if (VT.is256BitVector() && NumBaseMaskElts == 2 && in combineX86ShuffleChain()
25066 MVT ShuffleVT = (VT.isFloatingPoint() || !Subtarget.hasAVX2() ? MVT::v4f64 in combineX86ShuffleChain()
25099 (VT.isFloatingPoint() || (VT.is256BitVector() && !Subtarget.hasAVX2())) && in combineX86ShuffleChain()
25109 if (matchUnaryVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT)) { in combineX86ShuffleChain()
25121 if (matchPermuteVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT, in combineX86ShuffleChain()
25135 if (matchBinaryVectorShuffle(VT, Mask, Shuffle, ShuffleVT)) { in combineX86ShuffleChain()
25149 ((Subtarget.hasSSE41() && VT.is128BitVector()) || in combineX86ShuffleChain()
25150 (Subtarget.hasAVX() && VT.is256BitVector()))) { in combineX86ShuffleChain()
25196 (VT == MVT::v2f64 || VT == MVT::v4f32)) { in combineX86ShuffleChain()
25262 ((VT.is128BitVector() && Subtarget.hasSSSE3()) || in combineX86ShuffleChain()
25263 (VT.is256BitVector() && Subtarget.hasAVX2()) || in combineX86ShuffleChain()
25264 (VT.is512BitVector() && Subtarget.hasBWI()))) { in combineX86ShuffleChain()
25266 int NumBytes = VT.getSizeInBits() / 8; in combineX86ShuffleChain()
25342 MVT VT = Op.getSimpleValueType(); in combineX86ShufflesRecursively() local
25343 if (!VT.isVector()) in combineX86ShufflesRecursively()
25348 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() && in combineX86ShufflesRecursively()
25357 assert(VT.getVectorNumElements() == OpMask.size() && in combineX86ShufflesRecursively()
25458 MVT VT = N.getSimpleValueType(); in getPSHUFShuffleMask() local
25463 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary); in getPSHUFShuffleMask()
25469 if (VT.getSizeInBits() > 128) { in getPSHUFShuffleMask()
25470 int LaneElts = 128 / VT.getScalarSizeInBits(); in getPSHUFShuffleMask()
25472 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i) in getPSHUFShuffleMask()
25703 MVT VT = N.getSimpleValueType(); in combineTargetShuffle() local
25728 if (!VT.is128BitVector()) in combineTargetShuffle()
25736 unsigned NumElts = VT.getVectorNumElements(); in combineTargetShuffle()
25743 return DAG.getNode(X86ISD::UNPCKH, DL, VT, N.getOperand(0), ShufOp); in combineTargetShuffle()
25750 assert(VT == V0.getSimpleValueType() && VT == V1.getSimpleValueType() && in combineTargetShuffle()
25763 if (VT == MVT::v2f64) in combineTargetShuffle()
25767 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask); in combineTargetShuffle()
25773 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32"); in combineTargetShuffle()
25811 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32"); in combineTargetShuffle()
25822 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1, in combineTargetShuffle()
25827 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT), in combineTargetShuffle()
25838 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT), in combineTargetShuffle()
25845 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1, in combineTargetShuffle()
25890 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1, in combineTargetShuffle()
25910 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!"); in combineTargetShuffle()
25923 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2); in combineTargetShuffle()
25929 return DAG.getBitcast(VT, V); in combineTargetShuffle()
25960 V = DAG.getBitcast(VT, D.getOperand(0)); in combineTargetShuffle()
25964 DL, VT, V, V); in combineTargetShuffle()
25991 EVT VT = N->getValueType(0); in combineShuffleToAddSub() local
25992 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) && in combineShuffleToAddSub()
25993 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64))) in combineShuffleToAddSub()
26036 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS); in combineShuffleToAddSub()
26043 EVT VT = N->getValueType(0); in combineShuffle() local
26047 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) in combineShuffle()
26052 if (TLI.isTypeLegal(VT)) in combineShuffle()
26057 if (TLI.isTypeLegal(VT) && Subtarget.hasFp256() && VT.is256BitVector() && in combineShuffle()
26083 unsigned NumElts = VT.getVectorNumElements(); in combineShuffle()
26087 TLI.isOperationLegal(Opcode, VT)) { in combineShuffle()
26108 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0)); in combineShuffle()
26109 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1)); in combineShuffle()
26110 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01); in combineShuffle()
26111 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask()); in combineShuffle()
26120 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) in combineShuffle()
26123 if (SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true)) in combineShuffle()
26254 EVT VT = N->getValueType(0); in combineBitcast() local
26260 if (VT == MVT::x86mmx && N0.getOpcode() == ISD::BUILD_VECTOR && in combineBitcast()
26265 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(N00), VT, N00); in combineBitcast()
26281 if (((Subtarget.hasSSE1() && VT == MVT::f32) || in combineBitcast()
26282 (Subtarget.hasSSE2() && VT == MVT::f64)) && in combineBitcast()
26285 N0.getOperand(0).getOperand(0).getValueType() == VT) { in combineBitcast()
26287 SDValue FPConst = DAG.getBitcast(VT, N0.getOperand(1)); in combineBitcast()
26288 return DAG.getNode(FPOpcode, SDLoc(N0), VT, N000, FPConst); in combineBitcast()
26318 EVT VT = N->getValueType(0); in combineExtractVectorElt() local
26320 if (VT == MVT::i1 && isa<ConstantSDNode>(N->getOperand(1)) && in combineExtractVectorElt()
26442 EVT VT = LHS.getValueType(); in combineSelect() local
26450 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && in combineSelect()
26451 VT != MVT::f80 && VT != MVT::f128 && in combineSelect()
26452 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) && in combineSelect()
26454 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) { in combineSelect()
26592 if (Subtarget.hasAVX512() && VT.isVector() && CondVT.isVector() && in combineSelect()
26729 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); in combineSelect()
26735 if (!TLI.isTypeLegal(VT)) in combineSelect()
26741 ((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) || in combineSelect()
26742 (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) { in combineSelect()
26765 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); in combineSelect()
26778 X86ISD::SUBUS, DL, VT, OpLHS, in combineSelect()
26779 DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT)); in combineSelect()
26792 X86ISD::SUBUS, DL, VT, OpLHS, in combineSelect()
26793 DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT)); in combineSelect()
26800 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) { in combineSelect()
26813 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) == in combineSelect()
26842 return DAG.getBitcast(VT, Ret); in combineSelect()
26869 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT)) in combineSelect()
26874 if (VT.getVectorElementType() == MVT::i16) in combineSelect()
26877 if (VT.is128BitVector() && !Subtarget.hasSSE41()) in combineSelect()
26880 if (VT == MVT::v32i8 && !Subtarget.hasAVX2()) in combineSelect()
27396 EVT VT = N->getOperand(0).getValueType(); in canReduceVMulWidth() local
27397 if (VT.getScalarSizeInBits() != 32) in canReduceVMulWidth()
27503 EVT VT = N->getOperand(0).getValueType(); in reduceVMULWidth() local
27507 EVT::getVectorVT(*DAG.getContext(), MVT::i16, VT.getVectorNumElements()); in reduceVMULWidth()
27512 if (VT.getVectorNumElements() >= OpsVT.getVectorNumElements()) { in reduceVMULWidth()
27518 DL, VT, MulLo); in reduceVMULWidth()
27520 MVT ResVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2); in reduceVMULWidth()
27529 SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements()); in reduceVMULWidth()
27530 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) { in reduceVMULWidth()
27532 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements(); in reduceVMULWidth()
27538 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) { in reduceVMULWidth()
27539 ShuffleMask[2 * i] = i + VT.getVectorNumElements() / 2; in reduceVMULWidth()
27540 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements() * 3 / 2; in reduceVMULWidth()
27545 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi); in reduceVMULWidth()
27573 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, in reduceVMULWidth()
27587 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, in reduceVMULWidth()
27598 EVT VT = N->getValueType(0); in combineMul() local
27599 if (DCI.isBeforeLegalize() && VT.isVector()) in combineMul()
27609 if (VT != MVT::i64 && VT != MVT::i32) in combineMul()
27645 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), in combineMul()
27648 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), in combineMul()
27649 DAG.getConstant(MulAmt1, DL, VT)); in combineMul()
27652 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, in combineMul()
27655 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, in combineMul()
27656 DAG.getConstant(MulAmt2, DL, VT)); in combineMul()
27660 assert(MulAmt != 0 && MulAmt != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) in combineMul()
27665 NewMul = DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), in combineMul()
27666 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), in combineMul()
27672 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getNode(ISD::SHL, DL, VT, in combineMul()
27689 EVT VT = N0.getValueType(); in combineShiftLeft() local
27693 if (VT.isInteger() && !VT.isVector() && in combineShiftLeft()
27724 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT)); in combineShiftLeft()
27739 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); in combineShiftLeft()
27748 EVT VT = N0.getValueType(); in combineShiftRightAlgebraic() local
27749 unsigned Size = VT.getSizeInBits(); in combineShiftRightAlgebraic()
27762 if (!VT.isInteger() || VT.isVector() || N1.getOpcode() != ISD::Constant || in combineShiftRightAlgebraic()
27784 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT)); in combineShiftRightAlgebraic()
27789 return DAG.getNode(ISD::SHL, DL, VT, NN, in combineShiftRightAlgebraic()
27792 return DAG.getNode(ISD::SRA, DL, VT, NN, in combineShiftRightAlgebraic()
27803 EVT VT = N->getValueType(0); in performShiftToAllZeros() local
27805 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && in performShiftToAllZeros()
27807 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) in performShiftToAllZeros()
27816 VT.getSimpleVT().getVectorElementType().getSizeInBits(); in performShiftToAllZeros()
27823 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, DL); in performShiftToAllZeros()
27871 EVT VT = CMP00.getValueType(); in combineCompareEqual() local
27873 if (VT == MVT::f32 || VT == MVT::f64) { in combineCompareEqual()
27955 EVT VT = N->getValueType(0); in combineANDXORWithAllOnesIntoANDNP() local
27960 if (VT != MVT::v2i64 && VT != MVT::v4i64 && in combineANDXORWithAllOnesIntoANDNP()
27961 VT != MVT::v8i64 && VT != MVT::v16i32 && in combineANDXORWithAllOnesIntoANDNP()
27962 VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX in combineANDXORWithAllOnesIntoANDNP()
27980 if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR) in combineANDXORWithAllOnesIntoANDNP()
27991 return DAG.getNode(X86ISD::ANDNP, DL, VT, N00, N1); in combineANDXORWithAllOnesIntoANDNP()
28001 EVT VT = N->getValueType(0); in WidenMaskArithmetic() local
28002 if (!VT.is256BitVector()) in WidenMaskArithmetic()
28029 if (WideVT != VT) in WidenMaskArithmetic()
28064 Mask = Mask.zext(VT.getScalarSizeInBits()); in WidenMaskArithmetic()
28065 return DAG.getNode(ISD::AND, DL, VT, in WidenMaskArithmetic()
28066 Op, DAG.getConstant(Mask, DL, VT)); in WidenMaskArithmetic()
28069 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, in WidenMaskArithmetic()
28181 EVT VT = N->getValueType(0); in convertIntLogicToFPLogic() local
28186 ((Subtarget.hasSSE1() && VT == MVT::i32) || in convertIntLogicToFPLogic()
28187 (Subtarget.hasSSE2() && VT == MVT::i64))) { in convertIntLogicToFPLogic()
28194 return DAG.getBitcast(VT, FPLogic); in convertIntLogicToFPLogic()
28257 EVT VT = N->getValueType(0); in combineAnd() local
28264 if (VT != MVT::i32 && VT != MVT::i64) in combineAnd()
28279 if (Shift + MaskSize <= VT.getSizeInBits()) in combineAnd()
28280 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0), in combineAnd()
28282 VT)); in combineAnd()
28302 EVT VT = N->getValueType(0); in combineLogicBlendIntoPBLENDV() local
28304 if (!((VT == MVT::v2i64) || (VT == MVT::v4i64 && Subtarget.hasInt256()))) in combineLogicBlendIntoPBLENDV()
28397 return DAG.getBitcast(VT, in combineLogicBlendIntoPBLENDV()
28406 MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; in combineLogicBlendIntoPBLENDV()
28412 return DAG.getBitcast(VT, Mask); in combineLogicBlendIntoPBLENDV()
28432 EVT VT = N->getValueType(0); in combineOr() local
28434 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) in combineOr()
28476 unsigned Bits = VT.getSizeInBits(); in combineOr()
28484 return DAG.getNode(Opc, DL, VT, in combineOr()
28493 return DAG.getNode(Opc, DL, VT, in combineOr()
28504 EVT VT = N->getValueType(0); in combineIntegerAbs() local
28508 if (VT.isInteger() && VT.getSizeInBits() == 8) in combineIntegerAbs()
28517 if (VT.isInteger() && N->getOpcode() == ISD::XOR && in combineIntegerAbs()
28523 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { in combineIntegerAbs()
28525 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), in combineIntegerAbs()
28526 DAG.getConstant(0, DL, VT), N0.getOperand(0)); in combineIntegerAbs()
28531 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops); in combineIntegerAbs()
28597 EVT VT = N->getValueType(0); in foldVectorXorShiftIntoCmp() local
28598 if (!VT.isSimple()) in foldVectorXorShiftIntoCmp()
28601 switch (VT.getSimpleVT().SimpleTy) { in foldVectorXorShiftIntoCmp()
28633 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones); in foldVectorXorShiftIntoCmp()
28661 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG, in detectAVGPattern() argument
28664 if (!VT.isVector() || !VT.isSimple()) in detectAVGPattern()
28667 unsigned NumElems = VT.getVectorNumElements(); in detectAVGPattern()
28669 EVT ScalarVT = VT.getVectorElementType(); in detectAVGPattern()
28683 if (VT.getSizeInBits() > 512) in detectAVGPattern()
28686 if (VT.getSizeInBits() > 256) in detectAVGPattern()
28689 if (VT.getSizeInBits() > 128) in detectAVGPattern()
28741 Operands[0].getOperand(0).getValueType() == VT) { in detectAVGPattern()
28746 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]); in detectAVGPattern()
28747 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0), in detectAVGPattern()
28768 Operands[j].getOperand(0).getValueType() != VT) in detectAVGPattern()
28772 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0), in detectAVGPattern()
28907 EVT VT = ML->getValueType(0); in reduceMaskedLoadToScalarLoad() local
28908 EVT EltVT = VT.getVectorElementType(); in reduceMaskedLoadToScalarLoad()
28914 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(), in reduceMaskedLoadToScalarLoad()
28926 EVT VT = ML->getValueType(0); in combineMaskedLoadConstantMask() local
28931 unsigned NumElts = VT.getVectorNumElements(); in combineMaskedLoadConstantMask()
28936 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(), in combineMaskedLoadConstantMask()
28938 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0()); in combineMaskedLoadConstantMask()
28953 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(), in combineMaskedLoadConstantMask()
28954 ML->getMask(), DAG.getUNDEF(VT), in combineMaskedLoadConstantMask()
28957 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0()); in combineMaskedLoadConstantMask()
28979 EVT VT = Mld->getValueType(0); in combineMaskedLoad() local
28980 unsigned NumElems = VT.getVectorNumElements(); in combineMaskedLoad()
28984 assert(LdVT != VT && "Cannot extend to the same type"); in combineMaskedLoad()
28985 unsigned ToSz = VT.getVectorElementType().getSizeInBits(); in combineMaskedLoad()
28992 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits()); in combineMaskedLoad()
28997 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); in combineMaskedLoad()
29015 if (Mask.getValueType() == VT) { in combineMaskedLoad()
29029 unsigned MaskNumElts = VT.getVectorNumElements(); in combineMaskedLoad()
29047 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd); in combineMaskedLoad()
29068 EVT VT = MS->getValue().getValueType(); in reduceMaskedStoreToScalarStore() local
29069 EVT EltVT = VT.getVectorElementType(); in reduceMaskedStoreToScalarStore()
29085 EVT VT = Mst->getValue().getValueType(); in combineMaskedStore() local
29086 unsigned NumElems = VT.getVectorNumElements(); in combineMaskedStore()
29090 assert(StVT != VT && "Cannot truncate to the same type"); in combineMaskedStore()
29091 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); in combineMaskedStore()
29100 if (TLI.isTruncStoreLegal(VT, StVT)) in combineMaskedStore()
29112 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); in combineMaskedStore()
29118 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); in combineMaskedStore()
29135 if (Mask.getValueType() == VT) { in combineMaskedStore()
29148 unsigned MaskNumElts = VT.getVectorNumElements(); in combineMaskedStore()
29170 EVT VT = St->getValue().getValueType(); in combineStore() local
29181 if (VT.is256BitVector() && StVT == VT && in combineStore()
29182 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, in combineStore()
29185 unsigned NumElems = VT.getVectorNumElements(); in combineStore()
29208 if (St->isTruncatingStore() && VT.isVector()) { in combineStore()
29219 unsigned NumElems = VT.getVectorNumElements(); in combineStore()
29220 assert(StVT != VT && "Cannot truncate to the same type"); in combineStore()
29221 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); in combineStore()
29228 if (TLI.isTruncStoreLegalOrCustom(VT, StVT)) in combineStore()
29239 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); in combineStore()
29245 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); in combineStore()
29276 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); in combineStore()
29277 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); in combineStore()
29303 if (VT.getSizeInBits() != 64) in combineStore()
29310 if ((VT.isVector() || in combineStore()
29311 (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) && in combineStore()
29342 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) in combineStore()
29409 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() && in combineStore()
29456 MVT VT = LHS.getSimpleValueType(); in isHorizontalBinOp() local
29458 assert((VT.is128BitVector() || VT.is256BitVector()) && in isHorizontalBinOp()
29463 unsigned NumElts = VT.getVectorNumElements(); in isHorizontalBinOp()
29464 unsigned NumLanes = VT.getSizeInBits()/128; in isHorizontalBinOp()
29555 EVT VT = N->getValueType(0); in combineFaddFsub() local
29562 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || in combineFaddFsub()
29563 (Subtarget.hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && in combineFaddFsub()
29566 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); in combineFaddFsub()
29714 EVT VT = N->getValueType(0); in combineTruncate() local
29719 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL)) in combineTruncate()
29724 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) { in combineTruncate()
29736 EVT VT = N->getValueType(0); in combineFneg() local
29737 EVT SVT = VT.getScalarType(); in combineFneg()
29742 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) in combineFneg()
29750 SDValue Zero = DAG.getConstantFP(0.0, DL, VT); in combineFneg()
29751 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), in combineFneg()
29760 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), in combineFneg()
29763 return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), in combineFneg()
29766 return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0), in combineFneg()
29769 return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0), in combineFneg()
29778 EVT VT = N->getValueType(0); in lowerX86FPLogicOp() local
29779 if (VT.is512BitVector() && !Subtarget.hasDQI()) { in lowerX86FPLogicOp()
29783 MVT IntScalar = MVT::getIntegerVT(VT.getScalarSizeInBits()); in lowerX86FPLogicOp()
29784 MVT IntVT = MVT::getVectorVT(IntScalar, VT.getVectorNumElements()); in lowerX86FPLogicOp()
29797 return DAG.getBitcast(VT, IntOp); in lowerX86FPLogicOp()
29850 EVT VT = N->getValueType(0); in combineFMinNumFMaxNum() local
29851 if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) || in combineFMinNumFMaxNum()
29852 (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) || in combineFMinNumFMaxNum()
29853 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64)))) in combineFMinNumFMaxNum()
29858 if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize()) in combineFMinNumFMaxNum()
29865 DAG.getDataLayout(), *DAG.getContext(), VT); in combineFMinNumFMaxNum()
29887 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0); in combineFMinNumFMaxNum()
29892 auto SelectOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT; in combineFMinNumFMaxNum()
29893 return DAG.getNode(SelectOpcode, DL, VT, IsOp0Nan, Op1, MinOrMax); in combineFMinNumFMaxNum()
29948 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); in combineVZextMovl() local
29950 VT.getVectorElementType().getSizeInBits() == in combineVZextMovl()
29952 return DAG.getBitcast(VT, Op); in combineVZextMovl()
29959 EVT VT = N->getValueType(0); in combineSignExtendInReg() local
29960 if (!VT.isVector()) in combineSignExtendInReg()
29973 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND || in combineSignExtendInReg()
29999 EVT VT = Sext->getValueType(0); in promoteSextBeforeAddNSW() local
30000 if (VT != MVT::i64) in promoteSextBeforeAddNSW()
30033 SDValue NewSext = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Sext), VT, AddOp0); in promoteSextBeforeAddNSW()
30034 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT); in promoteSextBeforeAddNSW()
30040 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewSext, NewConstant, &Flags); in promoteSextBeforeAddNSW()
30055 EVT VT = N->getValueType(0); in getDivRem8() local
30057 if (N0.getResNo() != 1 || InVT != MVT::i8 || VT != MVT::i32) in getDivRem8()
30060 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT); in getDivRem8()
30085 EVT VT = N->getValueType(0); in combineToExtendVectorInReg() local
30086 EVT SVT = VT.getScalarType(); in combineToExtendVectorInReg()
30091 if (!VT.isVector()) in combineToExtendVectorInReg()
30100 if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && in combineToExtendVectorInReg()
30118 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) { in combineToExtendVectorInReg()
30119 unsigned Scale = 128 / VT.getSizeInBits(); in combineToExtendVectorInReg()
30124 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt, in combineToExtendVectorInReg()
30131 if (!Subtarget.hasSSE41() || VT.is128BitVector() || in combineToExtendVectorInReg()
30132 (VT.is256BitVector() && Subtarget.hasInt256())) { in combineToExtendVectorInReg()
30133 SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits()); in combineToExtendVectorInReg()
30135 ? DAG.getSignExtendVectorInReg(ExOp, DL, VT) in combineToExtendVectorInReg()
30136 : DAG.getZeroExtendVectorInReg(ExOp, DL, VT); in combineToExtendVectorInReg()
30141 if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128)) { in combineToExtendVectorInReg()
30142 unsigned NumVecs = VT.getSizeInBits() / 128; in combineToExtendVectorInReg()
30157 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds); in combineToExtendVectorInReg()
30167 EVT VT = N->getValueType(0); in combineSext() local
30176 SDValue Zero = DAG.getConstant(0, DL, VT); in combineSext()
30178 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); in combineSext()
30179 return DAG.getNode(ISD::SELECT, DL, VT, N0, AllOnes, Zero); in combineSext()
30187 if (Subtarget.hasAVX() && VT.is256BitVector()) in combineSext()
30200 EVT VT = N->getValueType(0); in combineFMA() local
30203 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) in combineFMA()
30206 EVT ScalarVT = VT.getScalarType(); in combineFMA()
30233 return DAG.getNode(Opcode, dl, VT, A, B, C); in combineFMA()
30245 EVT VT = N->getValueType(0); in combineZext() local
30254 return DAG.getNode(ISD::AND, dl, VT, in combineZext()
30255 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, in combineZext()
30257 DAG.getConstant(1, dl, VT)); in combineZext()
30266 return DAG.getNode(ISD::AND, dl, VT, in combineZext()
30267 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, in combineZext()
30269 DAG.getConstant(1, dl, VT)); in combineZext()
30276 if (VT.is256BitVector()) in combineZext()
30293 EVT VT = N->getValueType(0); in combineSetCC() local
30311 if (VT.getScalarType() == MVT::i1 && in combineSetCC()
30329 assert(VT == LHS.getOperand(0).getValueType() && in combineSetCC()
30332 return DAG.getConstant(0, DL, VT); in combineSetCC()
30334 return DAG.getConstant(1, DL, VT); in combineSetCC()
30336 return DAG.getNOT(DL, LHS.getOperand(0), VT); in combineSetCC()
30346 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) in combineSetCC()
30370 SelectionDAG &DAG, MVT VT) { in MaterializeSETB() argument
30371 if (VT == MVT::i8) in MaterializeSETB()
30372 return DAG.getNode(ISD::AND, DL, VT, in MaterializeSETB()
30376 DAG.getConstant(1, DL, VT)); in MaterializeSETB()
30377 assert (VT == MVT::i1 && "Unexpected type for SECCC node"); in MaterializeSETB()
30458 EVT VT = N->getValueType(0); in combineVectorCompareAndMaskUnaryOp() local
30459 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || in combineVectorCompareAndMaskUnaryOp()
30461 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) in combineVectorCompareAndMaskUnaryOp()
30479 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); in combineVectorCompareAndMaskUnaryOp()
30484 SDValue Res = DAG.getBitcast(VT, NewAnd); in combineVectorCompareAndMaskUnaryOp()
30494 EVT VT = N->getValueType(0); in combineUIntToFP() local
30508 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P); in combineUIntToFP()
30510 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); in combineUIntToFP()
30525 EVT VT = N->getValueType(0); in combineSIntToFP() local
30536 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); in combineSIntToFP()
30546 if (VT == MVT::f16 || VT == MVT::f128) in combineSIntToFP()
30549 if (!Ld->isVolatile() && !VT.isVector() && in combineSIntToFP()
30573 EVT VT = N->getValueType(0); in combineADC() local
30575 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, in combineADC()
30576 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, in combineADC()
30580 DAG.getConstant(1, DL, VT)); in combineADC()
30631 EVT VT = N->getValueType(0); in detectSADPattern() local
30635 if (!VT.isVector() || !VT.isSimple() || in detectSADPattern()
30636 !(VT.getVectorElementType() == MVT::i32)) in detectSADPattern()
30646 if (VT.getSizeInBits() / 4 > RegSize) in detectSADPattern()
30745 if (VT.getSizeInBits() >= ResVT.getSizeInBits()) in detectSADPattern()
30748 Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad); in detectSADPattern()
30750 if (VT.getSizeInBits() > ResVT.getSizeInBits()) { in detectSADPattern()
30757 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Phi, Res, in detectSADPattern()
30760 return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi); in detectSADPattern()
30770 EVT VT = N->getValueType(0); in combineAdd() local
30775 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || in combineAdd()
30776 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && in combineAdd()
30778 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); in combineAdd()
30797 EVT VT = Op0.getValueType(); in combineSub() local
30798 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, in combineSub()
30800 DAG.getConstant(~XorC, SDLoc(Op1), VT)); in combineSub()
30801 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, in combineSub()
30802 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT)); in combineSub()
30807 EVT VT = N->getValueType(0); in combineSub() local
30808 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || in combineSub()
30809 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && in combineSub()
30811 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); in combineSub()
30820 MVT VT = N->getSimpleValueType(0); in combineVZext() local
30821 MVT SVT = VT.getVectorElementType(); in combineVZext()
30825 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements(); in combineVZext()
30830 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { in combineVZext()
30841 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Vals); in combineVZext()
30854 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0)); in combineVZext()
30865 return DAG.getNode(X86ISD::VZEXT, DL, VT, DAG.getBitcast(OpVT, V)); in combineVZext()
30887 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op); in combineVZext()
30900 MVT VT = RHS.getSimpleValueType(); in combineLockSub() local
30907 RHS = DAG.getConstant(-1, DL, VT); in combineLockSub()
30911 {Chain, LHS, RHS}, VT, MMO); in combineLockSub()
30922 EVT VT = N->getValueType(0); in combineTestM() local
30925 return DAG.getNode(X86ISD::TESTM, DL, VT, in combineTestM()
30931 MVT VT = N->getSimpleValueType(0); in combineVectorCompare() local
30936 return getOnesVector(VT, Subtarget, DAG, DL); in combineVectorCompare()
30938 return getZeroVector(VT, Subtarget, DAG, DL); in combineVectorCompare()
31037 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { in isTypeDesirableForOp()
31038 if (!isTypeLegal(VT)) in isTypeDesirableForOp()
31040 if (VT != MVT::i16) in isTypeDesirableForOp()
31078 EVT VT = Op.getValueType(); in IsDesirableToPromoteOp() local
31079 if (VT != MVT::i16) in IsDesirableToPromoteOp()
31633 MVT VT) const { in getRegForInlineAsmConstraint()
31645 if (VT == MVT::i32 || VT == MVT::f32) in getRegForInlineAsmConstraint()
31647 if (VT == MVT::i16) in getRegForInlineAsmConstraint()
31649 if (VT == MVT::i8 || VT == MVT::i1) in getRegForInlineAsmConstraint()
31651 if (VT == MVT::i64 || VT == MVT::f64) in getRegForInlineAsmConstraint()
31657 if (VT == MVT::i32 || VT == MVT::f32) in getRegForInlineAsmConstraint()
31659 if (VT == MVT::i16) in getRegForInlineAsmConstraint()
31661 if (VT == MVT::i8 || VT == MVT::i1) in getRegForInlineAsmConstraint()
31663 if (VT == MVT::i64) in getRegForInlineAsmConstraint()
31668 if (VT == MVT::i8 || VT == MVT::i1) in getRegForInlineAsmConstraint()
31670 if (VT == MVT::i16) in getRegForInlineAsmConstraint()
31672 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit()) in getRegForInlineAsmConstraint()
31676 if (VT == MVT::i8 || VT == MVT::i1) in getRegForInlineAsmConstraint()
31678 if (VT == MVT::i16) in getRegForInlineAsmConstraint()
31680 if (VT == MVT::i32 || !Subtarget.is64Bit()) in getRegForInlineAsmConstraint()
31686 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) in getRegForInlineAsmConstraint()
31688 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) in getRegForInlineAsmConstraint()
31700 switch (VT.SimpleTy) { in getRegForInlineAsmConstraint()
31739 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); in getRegForInlineAsmConstraint()
31784 if (Res.second->hasType(VT) || VT == MVT::Other) in getRegForInlineAsmConstraint()
31796 unsigned Size = VT.getSizeInBits(); in getRegForInlineAsmConstraint()
31818 if (VT == MVT::f32 || VT == MVT::i32) in getRegForInlineAsmConstraint()
31820 else if (VT == MVT::f64 || VT == MVT::i64) in getRegForInlineAsmConstraint()
31822 else if (X86::VR128RegClass.hasType(VT)) in getRegForInlineAsmConstraint()
31824 else if (X86::VR256RegClass.hasType(VT)) in getRegForInlineAsmConstraint()
31826 else if (X86::VR512RegClass.hasType(VT)) in getRegForInlineAsmConstraint()
31866 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const { in isIntDivCheap() argument
31876 return OptSize && !VT.isVector(); in isIntDivCheap()