/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 253 %v2f64 = type <2 x double> 255 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 257 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 258 ret %v2f64 %1 261 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 263 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 264 ret %v2f64 %1 267 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 269 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 270 ret %v2f64 %1 [all …]
|
D | arm64-vminmaxnm.ll | 20 …%vmaxnm2.i = tail call <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double> %a, <2 x double> … 41 …%vminnm2.i = tail call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> %a, <2 x double> … 45 declare <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double>, <2 x double>) nounwind readnone 48 declare <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double>, <2 x double>) nounwind readnone 56 %max = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in) 63 %min = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in) 67 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>) 68 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
|
D | arm64-vcvt.ll | 26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A) 32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone 57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A) 63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone 88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A) 94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone 119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A) 125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone 150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A) 156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone [all …]
|
D | arm64-fminv.ll | 20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in) 26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>) 45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in) 51 declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>) 70 %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in) 76 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>) 95 %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in) 101 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
|
D | sincospow-vector-expansion.ll | 8 %1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %v1) 17 %1 = call <2 x double> @llvm.sin.v2f64(<2 x double> %v1) 26 %1 = call <2 x double> @llvm.pow.v2f64(<2 x double> %v1, <2 x double> %v2) 31 declare <2 x double> @llvm.cos.v2f64(<2 x double>) 32 declare <2 x double> @llvm.sin.v2f64(<2 x double>) 33 declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
|
D | arm64-vcvt_n.ll | 35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12) 40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9) 48 declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone 49 declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
|
/external/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 255 %v2f64 = type <2 x double> 257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 260 ret %v2f64 %1 263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 266 ret %v2f64 %1 269 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 272 ret %v2f64 %1 [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 26 // Handle all vector types as either f64 or v2f64. 28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 30 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 31 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 38 CCIfType<[v2f64], CCAssignToStack<16, 4>> 45 // Handle all vector types as either f64 or v2f64. 47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 49 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 59 // Handle all vector types as either f64 or v2f64. 61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, [all …]
|
D | ARMTargetTransformInfo.cpp | 191 { ISD::FP_ROUND, MVT::v2f64, 2 }, in getCastInstrCost() 271 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 272 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 274 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost() 275 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost() 276 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, in getCastInstrCost() 277 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, in getCastInstrCost() 278 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 279 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 281 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, in getCastInstrCost() [all …]
|
D | ARMCallingConv.h | 64 if (LocVT == MVT::v2f64 && in CC_ARM_APCS_Custom_f64() 118 if (LocVT == MVT::v2f64 && in CC_ARM_AAPCS_Custom_f64() 150 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) in RetCC_ARM_APCS_Custom_f64() 208 case MVT::v2f64: in CC_ARM_AAPCS_Custom_HA() 240 if (LocVT.SimpleTy == MVT::v2f64 || LocVT.SimpleTy == MVT::i32) { in CC_ARM_AAPCS_Custom_HA()
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrVSX.td | 58 [(set v2f64:$XT, (load xoaddr:$src))]>; 79 [(store v2f64:$XT, xoaddr:$dst)]>; 100 [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>; 110 [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>; 127 [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>; 203 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>, 235 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>, 267 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>, 299 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi))))]>, 355 [(set v2f64:$XT, (fdiv v2f64:$XA, v2f64:$XB))]>; [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations_float.ll | 5 @v2f64 = global <2 x double> <double 0.0, double 0.0> 44 store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64 47 …volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64 51 …ore volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64 55 store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64 59 store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64 63 store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64 67 store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64 97 store volatile <2 x double> %3, <2 x double>*@v2f64 183 %1 = load <2 x double>* @v2f64 [all …]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64TargetTransformInfo.cpp | 310 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost() 313 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost() 330 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost() 331 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, in getCastInstrCost() 332 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 333 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost() 334 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, in getCastInstrCost() 335 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost() 341 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost() 344 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost() [all …]
|
D | AArch64CallingConvention.td | 27 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 43 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 64 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], 72 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], 78 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 84 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 98 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], 109 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 118 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vec_floor.ll | 8 %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) 11 declare <2 x double> @llvm.floor.v2f64(<2 x double> %p) 44 %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) 47 declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p) 80 %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) 83 declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p) 116 %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) 119 declare <2 x double> @llvm.rint.v2f64(<2 x double> %p) 152 %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) 155 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
|
D | vec_round.ll | 13 %tmp = call <2 x double> @llvm.round.v2f64(<2 x double> undef) 19 declare <2 x double> @llvm.round.v2f64(<2 x double>) #0
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vsx-fma-m.ll | 125 %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a) 127 %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a) 143 %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a) 145 %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a) 148 %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a) 167 %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a) 169 %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a) 170 %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1) 173 %3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a) 202 %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a) [all …]
|
D | vec_rounding.ll | 9 declare <2 x double> @llvm.floor.v2f64(<2 x double> %p) 12 %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) 31 declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p) 34 %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) 53 declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p) 56 %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) 75 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) 78 %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | call.ll | 14 ; CHECK: call <2 x double> @llvm.sin.v2f64 35 ; CHECK: call <2 x double> @llvm.cos.v2f64 56 ; CHECK: call <2 x double> @llvm.pow.v2f64 78 ; CHECK: call <2 x double> @llvm.exp2.v2f64 122 ; CHECK: declare <2 x double> @llvm.sin.v2f64(<2 x double>) #0 123 ; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0 124 ; CHECK: declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) #0 125 ; CHECK: declare <2 x double> @llvm.exp2.v2f64(<2 x double>) #0
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 101 v2f64 = 51, // 2 x f64 enumerator 213 SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64); in is128BitVector() 306 case v2f64: in getVectorElementType() 350 case v2f64: return 2; in getVectorNumElements() 415 case v2f64: return 128; in getSizeInBits() 562 if (NumElements == 2) return MVT::v2f64; in getVectorVT()
|
/external/llvm/test/Transforms/BBVectorize/ |
D | simple-int.ll | 39 ; CHECK: %Y1 = call <2 x double> @llvm.fma.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 x do… 65 ; CHECK: %Y1 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 … 89 ; CHECK: %Y1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %X1) 114 ; CHECK: %Y1 = call <2 x double> @llvm.powi.v2f64(<2 x double> %X1, i32 %P) 155 ; CHECK: %Y1 = call <2 x double> @llvm.round.v2f64(<2 x double> %X1) 181 ; CHECK: %Y1 = call <2 x double> @llvm.copysign.v2f64(<2 x double> %X1, <2 x double> %Y1.v.i1.2) 206 ; CHECK: %Y1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %X1) 231 ; CHECK: %Y1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %X1) 256 ; CHECK: %Y1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %X1) 281 ; CHECK: %Y1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %X1) [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrSSE.td | 326 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))), 327 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>; 338 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))), 339 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>; 351 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)), 369 def : Pat<(v2f64 (scalar_to_vector FR64:$src)), 380 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>; 385 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>; 390 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>; 395 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>; [all …]
|
D | X86InstrFMA.td | 103 loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W; 105 loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W; 108 v2f64, v4f64>, VEX_W; 111 v2f64, v4f64>, VEX_W; 123 loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W; 125 loadv2f64, loadv4f64, X86Fnmsub, v2f64, 377 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64, 379 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64, 381 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64, 383 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64, [all …]
|
/external/mesa3d/src/gallium/drivers/radeon/ |
D | AMDILISelLowering.cpp | 65 (int)MVT::v2f64, in InitAMDILLowering() 93 (int)MVT::v2f64, in InitAMDILLowering() 189 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in InitAMDILLowering() 190 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in InitAMDILLowering() 191 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in InitAMDILLowering() 192 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand); in InitAMDILLowering() 193 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() 197 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand); in InitAMDILLowering() 198 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() 199 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() [all …]
|
/external/chromium_org/third_party/mesa/src/src/gallium/drivers/radeon/ |
D | AMDILISelLowering.cpp | 65 (int)MVT::v2f64, in InitAMDILLowering() 93 (int)MVT::v2f64, in InitAMDILLowering() 189 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in InitAMDILLowering() 190 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in InitAMDILLowering() 191 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in InitAMDILLowering() 192 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand); in InitAMDILLowering() 193 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() 197 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand); in InitAMDILLowering() 198 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() 199 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering() [all …]
|