/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 253 %v2f64 = type <2 x double> 255 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 257 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 258 ret %v2f64 %1 261 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 263 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 264 ret %v2f64 %1 267 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 269 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 270 ret %v2f64 %1 [all …]
|
D | arm64-vcvt.ll | 26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A) 32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone 57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A) 63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone 88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A) 94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone 119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A) 125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone 150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A) 156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone [all …]
|
D | arm64-fminv.ll | 20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in) 26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>) 45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in) 51 declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>) 70 %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in) 76 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>) 95 %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in) 101 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
|
/external/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 255 %v2f64 = type <2 x double> 257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 260 ret %v2f64 %1 263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 266 ret %v2f64 %1 269 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 272 ret %v2f64 %1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 255 %v2f64 = type <2 x double> 257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 260 ret %v2f64 %1 263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 266 ret %v2f64 %1 269 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 272 ret %v2f64 %1 [all …]
|
/external/llvm-project/llvm/test/Assembler/ |
D | invalid-vecreduce.ll | 4 ; CHECK-NEXT: float (double, <2 x double>)* @llvm.vector.reduce.fadd.f32.f64.v2f64 6 %res = call float @llvm.vector.reduce.fadd.f32.f64.v2f64(double %acc, <2 x double> %in) 11 ; CHECK-NEXT: double (float, <2 x double>)* @llvm.vector.reduce.fadd.f64.f32.v2f64 13 %res = call double @llvm.vector.reduce.fadd.f64.f32.v2f64(float %acc, <2 x double> %in) 18 ; CHECK-NEXT: <2 x double> (double, <2 x double>)* @llvm.vector.reduce.fadd.v2f64.f64.v2f64 20 %res = call <2 x double> @llvm.vector.reduce.fadd.v2f64.f64.v2f64(double %acc, <2 x double> %in) 25 ; CHECK-NEXT: double (<2 x double>, <2 x double>)* @llvm.vector.reduce.fadd.f64.v2f64.v2f64 27 %res = call double @llvm.vector.reduce.fadd.f64.v2f64.v2f64(<2 x double> %acc, <2 x double> %in) 31 declare float @llvm.vector.reduce.fadd.f32.f64.v2f64(double %acc, <2 x double> %in) 32 declare double @llvm.vector.reduce.fadd.f64.f32.v2f64(float %acc, <2 x double> %in) [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-vfloatintrinsics.ll | 882 %v2f64 = type <2 x double> 886 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { 889 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a) 890 ret %v2f64 %1 893 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { 895 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) 896 ret %v2f64 %1 901 define %v2f64 @test_v2f64.sin(%v2f64 %a) { 904 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a) 905 ret %v2f64 %1 [all …]
|
D | arm64_32-neon.ll | 33 declare {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0i8(i8*) 37 %res = call {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0i8(i8* %addr) 41 declare {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0i8(%vec, %vec, i64, i8*) 45 …%res = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 0, i8* %a… 49 declare void @llvm.aarch64.neon.st2.v2f64.p0i8(%vec, %vec, i8*) 53 call void @llvm.aarch64.neon.st2.v2f64.p0i8(%vec %in1, %vec %in2, i8* %addr) 57 declare void @llvm.aarch64.neon.st2lane.v2f64.p0i8(%vec, %vec, i64, i8*) 61 call void @llvm.aarch64.neon.st2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 1, i8* %addr) 65 declare {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0i8(i8*) 71 %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0i8(i8* %addr) [all …]
|
D | arm64-fminv.ll | 20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in) 26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>) 45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in) 51 declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>) 70 %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in) 76 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>) 95 %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in) 101 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 // Handle all vector types as either f64 or v2f64. 34 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 36 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 37 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 44 CCIfType<[v2f64], CCAssignToStack<16, 4>> 57 // Handle all vector types as either f64 or v2f64. 59 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 61 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 71 // Handle all vector types as either f64 or v2f64. 73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | vec-round-01.ll | 1 ; Test v2f64 rounding. 11 declare <2 x double> @llvm.rint.v2f64(<2 x double>) 12 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) 13 declare <2 x double> @llvm.floor.v2f64(<2 x double>) 14 declare <2 x double> @llvm.ceil.v2f64(<2 x double>) 15 declare <2 x double> @llvm.trunc.v2f64(<2 x double>) 16 declare <2 x double> @llvm.round.v2f64(<2 x double>) 22 %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %val) 30 %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %val) 38 %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %val) [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | vec-round-01.ll | 1 ; Test v2f64 rounding. 11 declare <2 x double> @llvm.rint.v2f64(<2 x double>) 12 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) 13 declare <2 x double> @llvm.floor.v2f64(<2 x double>) 14 declare <2 x double> @llvm.ceil.v2f64(<2 x double>) 15 declare <2 x double> @llvm.trunc.v2f64(<2 x double>) 16 declare <2 x double> @llvm.round.v2f64(<2 x double>) 22 %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %val) 30 %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %val) 38 %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %val) [all …]
|
D | vec-strict-conv-01.ll | 5 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata) 6 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata) 7 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadat… 8 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadat… 10 declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata) 11 declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata) 12 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadat… 13 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadat… 25 %dwords = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %doubles, 35 %dwords = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %doubles, [all …]
|
D | vec-strict-round-01.ll | 1 ; Test strict v2f64 rounding. 11 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata) 12 declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadat… 13 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) 14 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) 15 declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) 16 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) 22 %res = call <2 x double> @llvm.experimental.constrained.rint.v2f64( 33 %res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( 44 %res = call <2 x double> @llvm.experimental.constrained.floor.v2f64( [all …]
|
D | vec-strict-mul-02.ll | 5 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x doub… 7 ; Test a v2f64 multiply-and-add. 13 %ret = call <2 x double> @llvm.experimental.constrained.fma.v2f64 ( 22 ; Test a v2f64 multiply-and-subtract. 29 %ret = call <2 x double> @llvm.experimental.constrained.fma.v2f64 (
|
D | vec-strict-cmps-02.ll | 1 ; Test f64 and v2f64 signaling comparisons on z14. 10 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 25 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 38 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 51 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 64 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 77 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 92 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 106 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( 120 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64( [all …]
|
D | vec-strict-cmp-02.ll | 1 ; Test f64 and v2f64 strict comparisons. 10 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 25 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 38 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 51 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 64 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 77 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 92 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 106 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( 120 %cmp = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64( [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x… 4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>… 5 declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x doubl… 6 declare void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask) 9 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroi… 17 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 26 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 35 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> … 43 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> … 52 …%res = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitia… [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 // Handle all vector types as either f64 or v2f64. 34 CCIfType<[v2i64, v4i32, v8i16, v8f16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 36 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 37 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 44 CCIfType<[v2f64], CCAssignToStack<16, 4>> 58 // Handle all vector types as either f64 or v2f64. 60 CCIfType<[v2i64, v4i32, v8i16, v8f16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 62 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 73 // Handle all vector types as either f64 or v2f64. 75 CCIfType<[v2i64, v4i32, v8i16, v8f16, v16i8, v4f32], CCBitConvertToType<v2f64>>, [all …]
|
/external/llvm-project/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 // Handle all vector types as either f64 or v2f64. 34 CCIfType<[v2i64, v4i32, v8i16, v8f16, v8bf16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 36 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 37 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 44 CCIfType<[v2f64], CCAssignToStack<16, 4>> 58 // Handle all vector types as either f64 or v2f64. 60 CCIfType<[v2i64, v4i32, v8i16, v8f16, v8bf16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 62 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 73 // Handle all vector types as either f64 or v2f64. 75 CCIfType<[v2i64, v4i32, v8i16, v8f16, v8bf16, v16i8, v4f32], CCBitConvertToType<v2f64>>, [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 4 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x… 5 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>… 6 declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2… 8 declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i… 14 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroi… 23 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 32 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 40 ; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PT… 43 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1… 50 ; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[PT… [all …]
|
/external/llvm-project/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations_float.ll | 15 @v2f64 = global <2 x double> <double 0.0, double 0.0> 59 store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64 62 …volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64 68 …ore volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64 74 store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64 80 store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64 86 store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64 92 store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64 122 store volatile <2 x double> %3, <2 x double>*@v2f64 207 %1 = load <2 x double>, <2 x double>* @v2f64 [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations_float.ll | 9 @v2f64 = global <2 x double> <double 0.0, double 0.0> 53 store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64 56 …volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64 62 …ore volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64 68 store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64 74 store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64 80 store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64 86 store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64 116 store volatile <2 x double> %3, <2 x double>*@v2f64 201 %1 = load <2 x double>, <2 x double>* @v2f64 [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrVSX.td | 52 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 55 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 111 [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>; 133 [(store v2f64:$XT, xoaddr:$dst)]>; 156 [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>; 166 [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>; 183 [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>; 259 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>, 291 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>, 323 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>, [all …]
|
/external/llvm-project/llvm/lib/Target/PowerPC/ |
D | PPCInstrVSX.td | 88 SDTCisVT<0, v2f64>, SDTCisVT<1, v4f32>, SDTCisPtrTy<2> 97 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 100 SDTCisVT<0, v2f64>, SDTCisPtrTy<1> 323 [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>; 381 [(set v2f64:$XT, (any_fadd v2f64:$XA, v2f64:$XB))]>; 391 [(set v2f64:$XT, (any_fmul v2f64:$XA, v2f64:$XB))]>; 408 [(set v2f64:$XT, (any_fsub v2f64:$XA, v2f64:$XB))]>; 484 [(set v2f64:$XT, (any_fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>, 516 [(set v2f64:$XT, (any_fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>, 548 [(set v2f64:$XT, (fneg (any_fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>, [all …]
|