Home
last modified time | relevance | path

Searched refs:v2f64 (Results 1 – 25 of 446) sorted by relevance

12345678910>>...18

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dvfloatintrinsics.ll255 %v2f64 = type <2 x double>
257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
260 ret %v2f64 %1
263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
266 ret %v2f64 %1
269 define %v2f64 @test_v2f64.sin(%v2f64 %a) {
271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
272 ret %v2f64 %1
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-vfloatintrinsics.ll253 %v2f64 = type <2 x double>
255 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
257 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
258 ret %v2f64 %1
261 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
263 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
264 ret %v2f64 %1
267 define %v2f64 @test_v2f64.sin(%v2f64 %a) {
269 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
270 ret %v2f64 %1
[all …]
Darm64-vcvt.ll26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A)
32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone
57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A)
63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone
88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A)
94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone
119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A)
125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone
150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A)
156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone
[all …]
Darm64-fminv.ll20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in)
26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>)
45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in)
51 declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>)
70 %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
76 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
95 %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
101 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
Darm64-vminmaxnm.ll20 …%vmaxnm2.i = tail call <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double> %a, <2 x double> …
41 …%vminnm2.i = tail call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> %a, <2 x double> …
59 declare <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
62 declare <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
71 %max = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
78 %min = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
82 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
83 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
/external/llvm/test/CodeGen/ARM/
Dvfloatintrinsics.ll255 %v2f64 = type <2 x double>
257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
260 ret %v2f64 %1
263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
266 ret %v2f64 %1
269 define %v2f64 @test_v2f64.sin(%v2f64 %a) {
271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
272 ret %v2f64 %1
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Darm64-vfloatintrinsics.ll469 %v2f64 = type <2 x double>
471 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
473 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
474 ret %v2f64 %1
477 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
479 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
480 ret %v2f64 %1
483 define %v2f64 @test_v2f64.sin(%v2f64 %a) {
485 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
486 ret %v2f64 %1
[all …]
Darm64-vcvt.ll26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A)
32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone
57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A)
63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone
88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A)
94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone
119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A)
125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone
150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A)
156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone
[all …]
Darm64-fminv.ll20 %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in)
26 declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>)
45 %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in)
51 declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>)
70 %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
76 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
95 %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
101 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
Darm64-vminmaxnm.ll20 …%vmaxnm2.i = tail call <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double> %a, <2 x double> …
41 …%vminnm2.i = tail call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> %a, <2 x double> …
59 declare <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
62 declare <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
71 %max = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
78 %min = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
82 declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
83 declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
/external/swiftshader/third_party/LLVM/lib/Target/ARM/
DARMCallingConv.td30 // Handle all vector types as either f64 or v2f64.
32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
34 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
35 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
42 CCIfType<[v2f64], CCAssignToStack<16, 4>>
48 // Handle all vector types as either f64 or v2f64.
50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
52 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
62 // Handle all vector types as either f64 or v2f64.
64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/ARM/
DARMCallingConv.td32 // Handle all vector types as either f64 or v2f64.
34 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
36 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
37 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
44 CCIfType<[v2f64], CCAssignToStack<16, 4>>
57 // Handle all vector types as either f64 or v2f64.
59 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
61 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
71 // Handle all vector types as either f64 or v2f64.
73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
[all …]
/external/llvm/lib/Target/ARM/
DARMCallingConv.td32 // Handle all vector types as either f64 or v2f64.
34 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
36 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
37 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
44 CCIfType<[v2f64], CCAssignToStack<16, 4>>
57 // Handle all vector types as either f64 or v2f64.
59 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
61 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
71 // Handle all vector types as either f64 or v2f64.
73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/
Dvec-round-01.ll1 ; Test v2f64 rounding.
11 declare <2 x double> @llvm.rint.v2f64(<2 x double>)
12 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
13 declare <2 x double> @llvm.floor.v2f64(<2 x double>)
14 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
15 declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
16 declare <2 x double> @llvm.round.v2f64(<2 x double>)
22 %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %val)
30 %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %val)
38 %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %val)
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dvec-round-01.ll1 ; Test v2f64 rounding.
11 declare <2 x double> @llvm.rint.v2f64(<2 x double>)
12 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
13 declare <2 x double> @llvm.floor.v2f64(<2 x double>)
14 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
15 declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
16 declare <2 x double> @llvm.round.v2f64(<2 x double>)
22 %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %val)
30 %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %val)
38 %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %val)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dmasked_intrinsics.ll3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x…
4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>…
5 declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2…
6 declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i…
9 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroi…
17 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1…
26 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1…
35 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> …
43 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> …
52 …%res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 5, <2 x i1> ze…
[all …]
/external/llvm/test/Transforms/InstCombine/
Dmasked_intrinsics.ll3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x…
4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>…
5 declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x doubl…
6 declare void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask)
9 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 1, <2 x i1> zeroi…
17 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1…
26 …%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptr, i32 2, <2 x i1> <i1 1…
35 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> …
43 …call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, i32 4, <2 x i1> …
52 …%res = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitia…
[all …]
/external/llvm/lib/Target/PowerPC/
DPPCInstrVSX.td52 SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
55 SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
111 [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>;
133 [(store v2f64:$XT, xoaddr:$dst)]>;
156 [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>;
166 [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>;
183 [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>;
259 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>,
291 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>,
323 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>,
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/msa/
Dbasic_operations_float.ll9 @v2f64 = global <2 x double> <double 0.0, double 0.0>
53 store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
56 …volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
62 …ore volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
68 store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
74 store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
80 store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
86 store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
116 store volatile <2 x double> %3, <2 x double>*@v2f64
201 %1 = load <2 x double>, <2 x double>* @v2f64
[all …]
/external/llvm/test/CodeGen/Mips/msa/
Dbasic_operations_float.ll9 @v2f64 = global <2 x double> <double 0.0, double 0.0>
53 store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
56 …volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
62 …ore volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
68 store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
74 store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
80 store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
86 store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
116 store volatile <2 x double> %3, <2 x double>*@v2f64
201 %1 = load <2 x double>, <2 x double>* @v2f64
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/
DPPCInstrVSX.td59 SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
62 SDTCisVT<0, v2f64>, SDTCisPtrTy<1>
144 [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>;
200 [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>;
210 [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>;
227 [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>;
303 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>,
335 [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>,
367 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>,
399 [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi))))]>,
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/
Dauto_upgrade_intrinsics.ll70 declare <2 x double> @llvm.masked.load.v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x double>…
74 ; CHECK: @llvm.masked.load.v2f64.p0v2f64
75 …%res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %ptr, i32 1, <2 x i1> %mask, <2 x d…
79 declare void @llvm.masked.store.v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1> %mask)
83 ; CHECK: @llvm.masked.store.v2f64.p0v2f64
84 call void @llvm.masked.store.v2f64(<2 x double> %val, <2 x double>* %ptr, i32 3, <2 x i1> %mask)
88 declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x doubl…
92 ; CHECK: @llvm.masked.gather.v2f64.v2p0f64
93 …%res = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptr, i32 1, <2 x i1> %mask, <2 x…
97 declare void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask)
[all …]
/external/llvm/lib/Target/AArch64/
DAArch64TargetTransformInfo.cpp217 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost()
220 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, in getCastInstrCost()
247 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost()
248 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, in getCastInstrCost()
249 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
250 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost()
251 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, in getCastInstrCost()
252 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
258 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost()
261 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, in getCastInstrCost()
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/
DCellSDKIntrinsics.td412 [(set (v2f64 VECREG:$rT), (int_spu_si_dfa (v2f64 VECREG:$rA),
413 (v2f64 VECREG:$rB)))]>;
418 [(set (v2f64 VECREG:$rT), (int_spu_si_dfs (v2f64 VECREG:$rA),
419 (v2f64 VECREG:$rB)))]>;
424 [(set (v2f64 VECREG:$rT), (int_spu_si_dfm (v2f64 VECREG:$rA),
425 (v2f64 VECREG:$rB)))]>;
430 [(set (v2f64 VECREG:$rT), (int_spu_si_dfma (v2f64 VECREG:$rA),
431 (v2f64 VECREG:$rB)))]>;
436 [(set (v2f64 VECREG:$rT), (int_spu_si_dfnma (v2f64 VECREG:$rA),
437 (v2f64 VECREG:$rB)))]>;
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/
DX86InstrVecCompiler.td24 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
29 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
34 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
39 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
45 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
46 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
47 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
48 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
49 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
50 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
[all …]

12345678910>>...18