Home
last modified time | relevance | path

Searched refs:float64x2_t (Results 1 – 22 of 22) sorted by relevance

/external/clang/test/CodeGen/
Daarch64-neon-fma.c35 float64x2_t test_vmlaq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vmlaq_n_f64()
67 float64x2_t test_vmlsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vmlsq_n_f64()
225 float64x2_t test_vfmaq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vfmaq_n_f64()
241 float64x2_t test_vfmsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vfmsq_n_f64()
Darm64-vrnd.c9 int64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); } in rnd5()
17 int64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); } in rnd9()
19 int64x2_t rnd10(float64x2_t a) { return vrndnq_f64(a); } in rnd10()
26 int64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); } in rnd13()
28 int64x2_t rnd14(float64x2_t a) { return vrndmq_f64(a); } in rnd14()
35 int64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); } in rnd18()
42 int64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); } in rnd22()
49 int64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); } in rnd25()
Daarch64-neon-misc.c243 uint64x2_t test_vceqzq_f64(float64x2_t a) { in test_vceqzq_f64()
369 uint64x2_t test_vcgezq_f64(float64x2_t a) { in test_vcgezq_f64()
485 uint64x2_t test_vclezq_f64(float64x2_t a) { in test_vclezq_f64()
601 uint64x2_t test_vcgtzq_f64(float64x2_t a) { in test_vcgtzq_f64()
717 uint64x2_t test_vcltzq_f64(float64x2_t a) { in test_vcltzq_f64()
1413 float64x2_t test_vnegq_f64(float64x2_t a) { in test_vnegq_f64()
1499 float64x2_t test_vabsq_f64(float64x2_t a) { in test_vabsq_f64()
2383 float32x2_t test_vcvt_f32_f64(float64x2_t a) { in test_vcvt_f32_f64()
2393 float32x4_t test_vcvt_high_f32_f64(float32x2_t a, float64x2_t b) { in test_vcvt_high_f32_f64()
2402 float32x2_t test_vcvtx_f32_f64(float64x2_t a) { in test_vcvtx_f32_f64()
[all …]
Daarch64-neon-scalar-x-indexed-elem.c45 float64_t test_vmuld_laneq_f64(float64_t a, float64x2_t b) { in test_vmuld_laneq_f64()
96 float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) { in test_vmulxd_laneq_f64()
129 float64x1_t test_vmulx_laneq_f64_0(float64x1_t a, float64x2_t b) { in test_vmulx_laneq_f64_0()
145 float64x1_t test_vmulx_laneq_f64_1(float64x1_t a, float64x2_t b) { in test_vmulx_laneq_f64_1()
176 float64_t test_vfmad_laneq_f64(float64_t a, float64_t b, float64x2_t c) { in test_vfmad_laneq_f64()
231 float64x1_t test_vfma_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { in test_vfma_laneq_f64()
247 float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { in test_vfms_laneq_f64()
536 float64x2_t arg3; in test_vmulx_laneq_f64_2()
Darm64_vdupq_n_f64.c11 float64x2_t test_vdupq_n_f64(float64_t w) { in test_vdupq_n_f64()
33 float64x2_t test_vdupq_lane_f64(float64x1_t V) { in test_vdupq_lane_f64()
43 float64x2_t test_vmovq_n_f64(float64_t w) { in test_vmovq_n_f64()
Daarch64-neon-2velem.c405 float64x2_t test_vfmaq_lane_f64(float64x2_t a, float64x2_t b, float64x1_t v) { in test_vfmaq_lane_f64()
419 float64x2_t test_vfmaq_laneq_f64(float64x2_t a, float64x2_t b, float64x2_t v) { in test_vfmaq_laneq_f64()
434 float64x2_t test_vfmsq_lane_f64(float64x2_t a, float64x2_t b, float64x1_t v) { in test_vfmsq_lane_f64()
449 float64x2_t test_vfmsq_laneq_f64(float64x2_t a, float64x2_t b, float64x2_t v) { in test_vfmsq_laneq_f64()
492 float64_t test_vfmsd_laneq_f64(float64_t a, float64_t b, float64x2_t v) { in test_vfmsd_laneq_f64()
1515 float64x2_t test_vmulq_lane_f64(float64x2_t a, float64x1_t v) { in test_vmulq_lane_f64()
1536 float64x1_t test_vmul_laneq_f64(float64x1_t a, float64x2_t v) { in test_vmul_laneq_f64()
1553 float64x2_t test_vmulq_laneq_f64(float64x2_t a, float64x2_t v) { in test_vmulq_laneq_f64()
1589 float64x2_t test_vmulxq_lane_f64(float64x2_t a, float64x1_t v) { in test_vmulxq_lane_f64()
1625 float64x2_t test_vmulxq_laneq_f64(float64x2_t a, float64x2_t v) { in test_vmulxq_laneq_f64()
[all …]
Daarch64-neon-perm.c122 float64x2_t test_vuzp1q_f64(float64x2_t a, float64x2_t b) { in test_vuzp1q_f64()
269 float64x2_t test_vuzp2q_f64(float64x2_t a, float64x2_t b) { in test_vuzp2q_f64()
416 float64x2_t test_vzip1q_f64(float64x2_t a, float64x2_t b) { in test_vzip1q_f64()
563 float64x2_t test_vzip2q_f64(float64x2_t a, float64x2_t b) { in test_vzip2q_f64()
710 float64x2_t test_vtrn1q_f64(float64x2_t a, float64x2_t b) { in test_vtrn1q_f64()
857 float64x2_t test_vtrn2q_f64(float64x2_t a, float64x2_t b) { in test_vtrn2q_f64()
Darm64_vcopy.c118 float64x2_t test_vcopyq_laneq_f64(float64x2_t a1, float64x2_t a2) { in test_vcopyq_laneq_f64()
Daarch64-neon-intrinsics.c111 float64x2_t test_vaddq_f64(float64x2_t v1, float64x2_t v2) { in test_vaddq_f64()
242 float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) { in test_vsubq_f64()
376 float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) { in test_vmulq_f64()
516 float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vmlaq_f64()
635 float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vmlsq_f64()
673 float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vfmaq_f64()
714 float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vfmsq_f64()
721 float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) { in test_vdivq_f64()
1012 float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { in test_vabdq_f64()
1367 float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vbslq_f64()
[all …]
Daarch64-neon-extract.c209 float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) { in test_vextq_f64()
Daarch64-neon-vget-hilo.c102 float64x1_t test_vget_high_f64(float64x2_t a) { in test_vget_high_f64()
200 float64x1_t test_vget_low_f64(float64x2_t a) { in test_vget_low_f64()
Daarch64-poly128.c135 poly128_t test_vreinterpretq_p128_f64(float64x2_t a) { in test_vreinterpretq_p128_f64()
226 float64x2_t test_vreinterpretq_f64_p128(poly128_t a) { in test_vreinterpretq_f64_p128()
Daarch64-neon-vcombine.c101 float64x2_t test_vcombine_f64(float64x1_t low, float64x1_t high) { in test_vcombine_f64()
Daarch64-neon-scalar-copy.c42 float64_t test_vdupd_laneq_f64(float64x2_t a) { in test_vdupd_laneq_f64()
Daarch64-neon-ldst-one.c121 float64x2_t test_vld1q_dup_f64(float64_t *a) { in test_vld1q_dup_f64()
1930 float64x2_t test_vld1q_lane_f64(float64_t *a, float64x2_t b) { in test_vld1q_lane_f64()
5393 void test_vst1q_lane_f64(float64_t *a, float64x2_t b) { in test_vst1q_lane_f64()
/external/clang/test/Sema/
Dvector-cast.c49 typedef __attribute__((vector_size(16))) double float64x2_t; typedef
50 float64x1_t vget_low_f64(float64x2_t __p0);
55 float64x2_t v = {0.0, 1.0}; in f4()
Daarch64-neon-ranges.c22 void test_mul_lane_f64(float64x1_t small, float64x2_t big, float64x2_t rhs) { in test_mul_lane_f64()
/external/clang/test/CodeGenCXX/
Dmangle-neon-vectors.cpp26 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; typedef
73 void f11(float64x2_t v) { } in f11()
Daarch64-mangle-neon-vectors.cpp33 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; typedef
84 void f23(float64x2_t) {} in f23() argument
/external/eigen/Eigen/src/Core/arch/NEON/
DPacketMath.h550 float64x2_t vreinterpretq_f64_u64(T a)
552 return (float64x2_t) a;
555 typedef float64x2_t Packet2d;
674 float64x2_t trn1, trn2;
717 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
718 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
/external/arm-optimized-routines/math/
Dv_math.h372 typedef float64x2_t v_f64_t;
/external/neon_2_sse/
DNEON_2_SSE.h161 typedef __m128d float64x2_t; typedef
877 _NEON2SSESTORAGE float64x2_t vmaxq_f64(float64x2_t a, float64x2_t b); // VMAX.F64 q0,q0,q0
895 _NEON2SSESTORAGE float64x2_t vminq_f64(float64x2_t a, float64x2_t b); // VMIN.F64 q0,q0,q0
1247 _NEON2SSESTORAGE float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr); // VLD1.64 {d0, d1…
2030 _NEON2SSESTORAGE float64x2_t vabsq_f64(float64x2_t a); // VABS.F64 q0,q0
2277 _NEON2SSESTORAGE float64x2_t vrndnq_f64(float64x2_t a); // VRND.F64 q0,q0
2282 _NEON2SSESTORAGE float64x2_t vsqrtq_f64(float64x2_t a); // VSQRT.F64 q0,q0
6164 _NEON2SSESTORAGE float64x2_t vmaxq_f64(float64x2_t a, float64x2_t b); // VMAX.F64 q0,q0,q0
6253 _NEON2SSESTORAGE float64x2_t vminq_f64(float64x2_t a, float64x2_t b); // VMIN.F64 q0,q0,q0
9325 _NEON2SSESTORAGE float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr); // VLD1.64 {d0, d1…
[all …]