Home
last modified time | relevance | path

Searched refs:v2f64 (Results 1 – 25 of 49) sorted by relevance

12

/external/llvm/test/CodeGen/ARM/
Dvfloatintrinsics.ll255 %v2f64 = type <2 x double>
257 define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
259 %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
260 ret %v2f64 %1
263 define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
265 %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
266 ret %v2f64 %1
269 define %v2f64 @test_v2f64.sin(%v2f64 %a) {
271 %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
272 ret %v2f64 %1
[all …]
/external/llvm/lib/Target/ARM/
DARMCallingConv.td26 // Handle all vector types as either f64 or v2f64.
28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
30 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
31 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
38 CCIfType<[v2f64], CCAssignToStack<16, 4>>
45 // Handle all vector types as either f64 or v2f64.
47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
49 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
59 // Handle all vector types as either f64 or v2f64.
61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
[all …]
DARMTargetTransformInfo.cpp187 { ISD::FP_ROUND, MVT::v2f64, 2 }, in getCastInstrCost()
267 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
268 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
270 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost()
271 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, in getCastInstrCost()
272 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, in getCastInstrCost()
273 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, in getCastInstrCost()
274 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
275 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, in getCastInstrCost()
277 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, in getCastInstrCost()
[all …]
DARMCallingConv.h64 if (LocVT == MVT::v2f64 && in CC_ARM_APCS_Custom_f64()
118 if (LocVT == MVT::v2f64 && in CC_ARM_AAPCS_Custom_f64()
150 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) in RetCC_ARM_APCS_Custom_f64()
/external/llvm/test/CodeGen/X86/
Dvec_floor.ll8 %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p)
11 declare <2 x double> @llvm.floor.v2f64(<2 x double> %p)
44 %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
47 declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
80 %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
83 declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
116 %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p)
119 declare <2 x double> @llvm.rint.v2f64(<2 x double> %p)
152 %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
155 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
Dvec_fabs.ll8 %t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
11 declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
/external/llvm/test/CodeGen/PowerPC/
Dvec_rounding.ll9 declare <2 x double> @llvm.floor.v2f64(<2 x double> %p)
12 %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p)
31 declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
34 %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
53 declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
56 %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
75 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
78 %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
Dvec_fmuladd.ll9 declare <2 x double> @llvm.fmuladd.v2f64(<2 x double> %val, <2 x double>, <2 x double>)
40 …%fmuladd = call <2 x double> @llvm.fmuladd.v2f64 (<2 x double> %x, <2 x double> %x, <2 x double> %…
Dvec_sqrt.ll12 declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %val)
55 %sqrt = call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %x)
/external/llvm/lib/Target/R600/
DAMDILISelLowering.cpp57 (int)MVT::v2f64, in InitAMDILLowering()
82 (int)MVT::v2f64, in InitAMDILLowering()
172 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in InitAMDILLowering()
173 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in InitAMDILLowering()
174 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in InitAMDILLowering()
175 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand); in InitAMDILLowering()
176 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
180 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand); in InitAMDILLowering()
181 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
182 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
[all …]
/external/llvm/lib/Target/AArch64/
DAArch64InstrNEON.td170 (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
185 v2f32, v4f32, v2f64, 1>;
191 v2f32, v4f32, v2f64, 0>;
197 v2f32, v4f32, v2f64, 1>;
266 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
273 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
283 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
290 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
296 v2f32, v4f32, v2f64, 0>;
465 def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
[all …]
/external/llvm/test/CodeGen/AArch64/
Dneon-frsqrt-frecp.ll7 declare <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double>, <2 x double>)
26 %val = call <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double> %lhs, <2 x double> %rhs)
32 declare <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double>, <2 x double>)
51 %val = call <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double> %lhs, <2 x double> %rhs)
Dneon-fma.ll51 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
67 %val = call <2 x double> @llvm.fma.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
88 … %val = call <2 x double> @llvm.fma.v2f64(<2 x double> %negA, <2 x double> %B, <2 x double> %C)
94 declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)
110 … %val = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
Dneon-max-min-pairwise.ll213 declare <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double>, <2 x double>)
231 %val = call <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
238 declare <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double>, <2 x double>)
256 %val = call <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
263 declare <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double>, <2 x double>)
281 … %val = call <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
288 declare <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double>, <2 x double>)
306 … %val = call <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
Dneon-max-min.ll213 declare <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double>, <2 x double>)
231 %val = call <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
238 declare <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double>, <2 x double>)
256 %val = call <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
264 declare <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double>, <2 x double>)
282 … %val = call <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
289 declare <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double>, <2 x double>)
307 … %val = call <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
/external/llvm/test/Transforms/BBVectorize/
Dsimple-int.ll27 ; CHECK: %Y1 = call <2 x double> @llvm.fma.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 x do…
53 ; CHECK: %Y1 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 …
77 ; CHECK: %Y1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %X1)
102 ; CHECK: %Y1 = call <2 x double> @llvm.powi.v2f64(<2 x double> %X1, i32 %P)
127 ; CHECK: declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
128 ; CHECK: declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
129 ; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #1
130 ; CHECK: declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) #1
/external/llvm/lib/Target/X86/
DX86TargetTransformInfo.cpp370 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, in getCastInstrCost()
371 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, in getCastInstrCost()
372 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, in getCastInstrCost()
373 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, in getCastInstrCost()
374 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, in getCastInstrCost()
375 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, in getCastInstrCost()
376 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, in getCastInstrCost()
377 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, in getCastInstrCost()
470 { ISD::SETCC, MVT::v2f64, 1 }, in getCmpSelInstrCost()
DX86InstrFMA.td91 memopv4f64, X86Fmadd, v2f64, v4f64>, VEX_W;
93 memopv4f64, X86Fmsub, v2f64, v4f64>, VEX_W;
96 v2f64, v4f64>, VEX_W;
99 v2f64, v4f64>, VEX_W;
111 memopv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
113 memopv2f64, memopv4f64, X86Fnmsub, v2f64,
355 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
357 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
359 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
361 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
[all …]
DX86InstrAVX512.td21 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
26 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
31 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
36 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
42 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
[all …]
DX86InstrSSE.td252 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
253 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
264 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
265 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
277 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
295 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
306 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
311 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
316 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
321 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
[all …]
/external/chromium_org/third_party/mesa/src/src/gallium/drivers/radeon/
DAMDILISelLowering.cpp65 (int)MVT::v2f64, in InitAMDILLowering()
93 (int)MVT::v2f64, in InitAMDILLowering()
189 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in InitAMDILLowering()
190 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in InitAMDILLowering()
191 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in InitAMDILLowering()
192 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand); in InitAMDILLowering()
193 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
197 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand); in InitAMDILLowering()
198 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
199 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
[all …]
/external/mesa3d/src/gallium/drivers/radeon/
DAMDILISelLowering.cpp65 (int)MVT::v2f64, in InitAMDILLowering()
93 (int)MVT::v2f64, in InitAMDILLowering()
189 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in InitAMDILLowering()
190 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in InitAMDILLowering()
191 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in InitAMDILLowering()
192 setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand); in InitAMDILLowering()
193 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
197 setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand); in InitAMDILLowering()
198 setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
199 setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand); in InitAMDILLowering()
[all …]
/external/llvm/include/llvm/CodeGen/
DValueTypes.h101 v2f64 = 46, // 2 x f64 enumerator
213 SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64); in is128BitVector()
295 case v2f64: in getVectorElementType()
337 case v2f64: return 2; in getVectorNumElements()
394 case v2f64: return 128; in getSizeInBits()
532 if (NumElements == 2) return MVT::v2f64; in getVectorVT()
/external/llvm/test/TableGen/
Dcast.td9 def v2f64 : ValueType<128, 28>; // 2 x f64 vector value
57 def VR128 : RegisterClass<[v2i64, v2f64],
DTargetInstrSpec.td10 def v2f64 : ValueType<128, 28>; // 2 x f64 vector value
58 def VR128 : RegisterClass<[v2i64, v2f64],

12