/external/clang/test/CodeGen/ |
D | aarch64-neon-fma.c | 35 float64x2_t test_vmlaq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vmlaq_n_f64() 67 float64x2_t test_vmlsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vmlsq_n_f64() 225 float64x2_t test_vfmaq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vfmaq_n_f64() 241 float64x2_t test_vfmsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) { in test_vfmsq_n_f64()
|
D | arm64-vrnd.c | 9 int64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); } in rnd5() 17 int64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); } in rnd9() 19 int64x2_t rnd10(float64x2_t a) { return vrndnq_f64(a); } in rnd10() 26 int64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); } in rnd13() 28 int64x2_t rnd14(float64x2_t a) { return vrndmq_f64(a); } in rnd14() 35 int64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); } in rnd18() 42 int64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); } in rnd22() 49 int64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); } in rnd25()
|
D | aarch64-neon-misc.c | 243 uint64x2_t test_vceqzq_f64(float64x2_t a) { in test_vceqzq_f64() 369 uint64x2_t test_vcgezq_f64(float64x2_t a) { in test_vcgezq_f64() 485 uint64x2_t test_vclezq_f64(float64x2_t a) { in test_vclezq_f64() 601 uint64x2_t test_vcgtzq_f64(float64x2_t a) { in test_vcgtzq_f64() 717 uint64x2_t test_vcltzq_f64(float64x2_t a) { in test_vcltzq_f64() 1413 float64x2_t test_vnegq_f64(float64x2_t a) { in test_vnegq_f64() 1499 float64x2_t test_vabsq_f64(float64x2_t a) { in test_vabsq_f64() 2383 float32x2_t test_vcvt_f32_f64(float64x2_t a) { in test_vcvt_f32_f64() 2393 float32x4_t test_vcvt_high_f32_f64(float32x2_t a, float64x2_t b) { in test_vcvt_high_f32_f64() 2402 float32x2_t test_vcvtx_f32_f64(float64x2_t a) { in test_vcvtx_f32_f64() [all …]
|
D | aarch64-neon-scalar-x-indexed-elem.c | 45 float64_t test_vmuld_laneq_f64(float64_t a, float64x2_t b) { in test_vmuld_laneq_f64() 96 float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) { in test_vmulxd_laneq_f64() 129 float64x1_t test_vmulx_laneq_f64_0(float64x1_t a, float64x2_t b) { in test_vmulx_laneq_f64_0() 145 float64x1_t test_vmulx_laneq_f64_1(float64x1_t a, float64x2_t b) { in test_vmulx_laneq_f64_1() 176 float64_t test_vfmad_laneq_f64(float64_t a, float64_t b, float64x2_t c) { in test_vfmad_laneq_f64() 231 float64x1_t test_vfma_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { in test_vfma_laneq_f64() 247 float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { in test_vfms_laneq_f64() 536 float64x2_t arg3; in test_vmulx_laneq_f64_2()
|
D | arm64_vdupq_n_f64.c | 11 float64x2_t test_vdupq_n_f64(float64_t w) { in test_vdupq_n_f64() 33 float64x2_t test_vdupq_lane_f64(float64x1_t V) { in test_vdupq_lane_f64() 43 float64x2_t test_vmovq_n_f64(float64_t w) { in test_vmovq_n_f64()
|
D | aarch64-neon-2velem.c | 405 float64x2_t test_vfmaq_lane_f64(float64x2_t a, float64x2_t b, float64x1_t v) { in test_vfmaq_lane_f64() 419 float64x2_t test_vfmaq_laneq_f64(float64x2_t a, float64x2_t b, float64x2_t v) { in test_vfmaq_laneq_f64() 434 float64x2_t test_vfmsq_lane_f64(float64x2_t a, float64x2_t b, float64x1_t v) { in test_vfmsq_lane_f64() 449 float64x2_t test_vfmsq_laneq_f64(float64x2_t a, float64x2_t b, float64x2_t v) { in test_vfmsq_laneq_f64() 492 float64_t test_vfmsd_laneq_f64(float64_t a, float64_t b, float64x2_t v) { in test_vfmsd_laneq_f64() 1515 float64x2_t test_vmulq_lane_f64(float64x2_t a, float64x1_t v) { in test_vmulq_lane_f64() 1536 float64x1_t test_vmul_laneq_f64(float64x1_t a, float64x2_t v) { in test_vmul_laneq_f64() 1553 float64x2_t test_vmulq_laneq_f64(float64x2_t a, float64x2_t v) { in test_vmulq_laneq_f64() 1589 float64x2_t test_vmulxq_lane_f64(float64x2_t a, float64x1_t v) { in test_vmulxq_lane_f64() 1625 float64x2_t test_vmulxq_laneq_f64(float64x2_t a, float64x2_t v) { in test_vmulxq_laneq_f64() [all …]
|
D | aarch64-neon-perm.c | 122 float64x2_t test_vuzp1q_f64(float64x2_t a, float64x2_t b) { in test_vuzp1q_f64() 269 float64x2_t test_vuzp2q_f64(float64x2_t a, float64x2_t b) { in test_vuzp2q_f64() 416 float64x2_t test_vzip1q_f64(float64x2_t a, float64x2_t b) { in test_vzip1q_f64() 563 float64x2_t test_vzip2q_f64(float64x2_t a, float64x2_t b) { in test_vzip2q_f64() 710 float64x2_t test_vtrn1q_f64(float64x2_t a, float64x2_t b) { in test_vtrn1q_f64() 857 float64x2_t test_vtrn2q_f64(float64x2_t a, float64x2_t b) { in test_vtrn2q_f64()
|
D | arm64_vcopy.c | 118 float64x2_t test_vcopyq_laneq_f64(float64x2_t a1, float64x2_t a2) { in test_vcopyq_laneq_f64()
|
D | aarch64-neon-intrinsics.c | 111 float64x2_t test_vaddq_f64(float64x2_t v1, float64x2_t v2) { in test_vaddq_f64() 242 float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) { in test_vsubq_f64() 376 float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) { in test_vmulq_f64() 516 float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vmlaq_f64() 635 float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vmlsq_f64() 673 float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vfmaq_f64() 714 float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vfmsq_f64() 721 float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) { in test_vdivq_f64() 1012 float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { in test_vabdq_f64() 1367 float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vbslq_f64() [all …]
|
D | aarch64-neon-extract.c | 209 float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) { in test_vextq_f64()
|
D | aarch64-neon-vget-hilo.c | 102 float64x1_t test_vget_high_f64(float64x2_t a) { in test_vget_high_f64() 200 float64x1_t test_vget_low_f64(float64x2_t a) { in test_vget_low_f64()
|
D | aarch64-poly128.c | 135 poly128_t test_vreinterpretq_p128_f64(float64x2_t a) { in test_vreinterpretq_p128_f64() 226 float64x2_t test_vreinterpretq_f64_p128(poly128_t a) { in test_vreinterpretq_f64_p128()
|
D | aarch64-neon-vcombine.c | 101 float64x2_t test_vcombine_f64(float64x1_t low, float64x1_t high) { in test_vcombine_f64()
|
D | aarch64-neon-scalar-copy.c | 42 float64_t test_vdupd_laneq_f64(float64x2_t a) { in test_vdupd_laneq_f64()
|
D | aarch64-neon-ldst-one.c | 121 float64x2_t test_vld1q_dup_f64(float64_t *a) { in test_vld1q_dup_f64() 1930 float64x2_t test_vld1q_lane_f64(float64_t *a, float64x2_t b) { in test_vld1q_lane_f64() 5393 void test_vst1q_lane_f64(float64_t *a, float64x2_t b) { in test_vst1q_lane_f64()
|
/external/clang/test/Sema/ |
D | vector-cast.c | 49 typedef __attribute__((vector_size(16))) double float64x2_t; typedef 50 float64x1_t vget_low_f64(float64x2_t __p0); 55 float64x2_t v = {0.0, 1.0}; in f4()
|
D | aarch64-neon-ranges.c | 22 void test_mul_lane_f64(float64x1_t small, float64x2_t big, float64x2_t rhs) { in test_mul_lane_f64()
|
/external/clang/test/CodeGenCXX/ |
D | mangle-neon-vectors.cpp | 26 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; typedef 73 void f11(float64x2_t v) { } in f11()
|
D | aarch64-mangle-neon-vectors.cpp | 33 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; typedef 84 void f23(float64x2_t) {} in f23() argument
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 550 float64x2_t vreinterpretq_f64_u64(T a) 552 return (float64x2_t) a; 555 typedef float64x2_t Packet2d; 674 float64x2_t trn1, trn2; 717 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); 718 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
|
/external/arm-optimized-routines/math/ |
D | v_math.h | 372 typedef float64x2_t v_f64_t;
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 161 typedef __m128d float64x2_t; typedef 877 _NEON2SSESTORAGE float64x2_t vmaxq_f64(float64x2_t a, float64x2_t b); // VMAX.F64 q0,q0,q0 895 _NEON2SSESTORAGE float64x2_t vminq_f64(float64x2_t a, float64x2_t b); // VMIN.F64 q0,q0,q0 1247 _NEON2SSESTORAGE float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr); // VLD1.64 {d0, d1… 2030 _NEON2SSESTORAGE float64x2_t vabsq_f64(float64x2_t a); // VABS.F64 q0,q0 2277 _NEON2SSESTORAGE float64x2_t vrndnq_f64(float64x2_t a); // VRND.F64 q0,q0 2282 _NEON2SSESTORAGE float64x2_t vsqrtq_f64(float64x2_t a); // VSQRT.F64 q0,q0 6164 _NEON2SSESTORAGE float64x2_t vmaxq_f64(float64x2_t a, float64x2_t b); // VMAX.F64 q0,q0,q0 6253 _NEON2SSESTORAGE float64x2_t vminq_f64(float64x2_t a, float64x2_t b); // VMIN.F64 q0,q0,q0 9325 _NEON2SSESTORAGE float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr); // VLD1.64 {d0, d1… [all …]
|