/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | bitcast.ll | 3 ; PR23065: SCALAR_TO_VECTOR implies the top elements 1 to N-1 of the N-element vector are undefined.
|
/external/llvm/test/CodeGen/AArch64/ |
D | bitcast.ll | 3 ; PR23065: SCALAR_TO_VECTOR implies the top elements 1 to N-1 of the N-element vector are undefined.
|
/external/llvm/include/llvm/CodeGen/ |
D | ISDOpcodes.h | 316 SCALAR_TO_VECTOR, enumerator
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | ISDOpcodes.h | 429 SCALAR_TO_VECTOR, enumerator
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | scalar_to_vector.ll | 32 ; Getting a SCALAR_TO_VECTOR seems to be tricky. These cases managed
|
/external/llvm-project/llvm/include/llvm/CodeGen/ |
D | ISDOpcodes.h | 558 SCALAR_TO_VECTOR, enumerator
|
/external/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
D | LegalizeVectorTypes.cpp | 59 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break; in ScalarizeVectorResult() 275 ISD::SCALAR_TO_VECTOR, DL, OtherVT, SDValue(ScalarNode, OtherNo)); in ScalarizeVecRes_OverflowOp() 670 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op); in ScalarizeVecOp_UnaryOp() 687 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_UnaryOp_StrictFP() 753 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Res); in ScalarizeVecOp_VSETCC() 784 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_ROUND() 799 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_STRICT_FP_ROUND() 813 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_EXTEND() 828 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_STRICT_FP_EXTEND() 895 case ISD::SCALAR_TO_VECTOR: in SplitVectorResult() [all …]
|
D | LegalizeDAG.cpp | 410 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, in ExpandINSERT_VECTOR_ELT() 1890 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); in ExpandBVWithShuffles() 1998 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); in ExpandBUILD_VECTOR() 2053 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); in ExpandBUILD_VECTOR() 2056 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); in ExpandBUILD_VECTOR() 3166 case ISD::SCALAR_TO_VECTOR: in ExpandNode() 4886 case ISD::SCALAR_TO_VECTOR: { in PromoteNode()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | LegalizeVectorTypes.cpp | 58 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break; in ScalarizeVectorResult() 262 ISD::SCALAR_TO_VECTOR, DL, OtherVT, SDValue(ScalarNode, OtherNo)); in ScalarizeVecRes_OverflowOp() 655 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op); in ScalarizeVecOp_UnaryOp() 672 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_UnaryOp_StrictFP() 738 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Res); in ScalarizeVecOp_VSETCC() 768 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_ROUND() 783 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_STRICT_FP_ROUND() 838 case ISD::SCALAR_TO_VECTOR: SplitVecRes_SCALAR_TO_VECTOR(N, Lo, Hi); break; in SplitVectorResult() 1489 Lo = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoVT, N->getOperand(0)); in SplitVecRes_SCALAR_TO_VECTOR() 2694 case ISD::SCALAR_TO_VECTOR: Res = WidenVecRes_SCALAR_TO_VECTOR(N); break; in WidenVectorResult() [all …]
|
D | LegalizeDAG.cpp | 406 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, in ExpandINSERT_VECTOR_ELT() 1833 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); in ExpandBVWithShuffles() 1941 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); in ExpandBUILD_VECTOR() 1996 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); in ExpandBUILD_VECTOR() 1999 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); in ExpandBUILD_VECTOR() 3025 case ISD::SCALAR_TO_VECTOR: in ExpandNode() 4660 case ISD::SCALAR_TO_VECTOR: { in PromoteNode()
|
D | SelectionDAGDumper.cpp | 285 case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector"; in getOperationName()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | LegalizeVectorTypes.cpp | 61 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break; in ScalarizeVectorResult() 558 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); in ScalarizeVecOp_FP_ROUND() 603 case ISD::SCALAR_TO_VECTOR: SplitVecRes_SCALAR_TO_VECTOR(N, Lo, Hi); break; in SplitVectorResult() 1038 Lo = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoVT, N->getOperand(0)); in SplitVecRes_SCALAR_TO_VECTOR() 2066 case ISD::SCALAR_TO_VECTOR: Res = WidenVecRes_SCALAR_TO_VECTOR(N); break; in WidenVectorResult() 2932 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), in WidenVecRes_SCALAR_TO_VECTOR() 3468 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT,LdOps[Start]); in BuildVectorFromScalar() 3526 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp); in GenWidenVectorLoads()
|
D | SelectionDAGDumper.cpp | 223 case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector"; in getOperationName()
|
D | LegalizeDAG.cpp | 367 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, in ExpandINSERT_VECTOR_ELT() 1751 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); in ExpandBVWithShuffles() 1859 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); in ExpandBUILD_VECTOR() 1914 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); in ExpandBUILD_VECTOR() 1917 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); in ExpandBUILD_VECTOR() 2985 case ISD::SCALAR_TO_VECTOR: in ExpandNode() 4366 case ISD::SCALAR_TO_VECTOR: { in PromoteNode()
|
/external/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 765 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); in X86TargetLowering() 766 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); in X86TargetLowering() 1110 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1400 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1442 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom); in X86TargetLowering() 1443 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom); in X86TargetLowering() 2137 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, in LowerReturn() 3119 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); in LowerCall() 4784 MaskNode.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR) { in getTargetShuffleMaskIndices() 5252 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) in getShuffleScalarElt() [all …]
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 947 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1377 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1692 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1971 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); in X86TargetLowering() 2729 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, in LowerReturn() 2969 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned); in lowerRegToMasks() 3307 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val) in LowerMemArgument() 4055 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); in LowerCall() 6644 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && in getTargetConstantBitsFromNode() 7329 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR && in getTargetShuffleAndZeroables() [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | scalar_to_vector.ll | 185 ; Getting a SCALAR_TO_VECTOR seems to be tricky. These cases managed
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 940 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1349 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1619 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in X86TargetLowering() 1788 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom); in X86TargetLowering() 1789 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom); in X86TargetLowering() 1985 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); in X86TargetLowering() 2729 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, in LowerReturn() 2961 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned); in lowerRegToMasks() 3289 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val) in LowerMemArgument() 3957 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); in LowerCall() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelDAGToDAG.cpp | 436 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); in matchLoadD16FromBuildVector() 716 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); in SelectBuildVector() 788 case ISD::SCALAR_TO_VECTOR: in Select() 2787 case ISD::SCALAR_TO_VECTOR: in Select()
|
/external/llvm/lib/Target/PowerPC/ |
D | README_ALTIVEC.txt | 61 We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
|
/external/llvm-project/llvm/lib/Target/PowerPC/ |
D | README_ALTIVEC.txt | 56 We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | README_ALTIVEC.txt | 61 We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelDAGToDAG.cpp | 441 SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); in matchLoadD16FromBuildVector() 701 assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); in SelectBuildVector() 774 case ISD::SCALAR_TO_VECTOR: in Select() 3054 case ISD::SCALAR_TO_VECTOR: in Select()
|
/external/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelDAGToDAG.cpp | 286 case ISD::SCALAR_TO_VECTOR: in Select() 347 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); in Select()
|
/external/llvm/lib/Target/SystemZ/ |
D | SystemZISelLowering.cpp | 319 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in SystemZTargetLowering() 381 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); in SystemZTargetLowering() 382 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); in SystemZTargetLowering() 3962 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); in buildScalarToVector() 4298 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) in lowerBUILD_VECTOR() 4322 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || in lowerVECTOR_SHUFFLE() 4467 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || in lowerShift() 4571 case ISD::SCALAR_TO_VECTOR: in LowerOperation()
|