Searched refs:vaddq_s64 (Results 1 – 11 of 11) sorted by relevance
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_highbd_iht4x4_add_neon.c | 46 s[0].val[0] = vaddq_s64(s[0].val[0], s[3].val[0]); in highbd_iadst4() 47 s[0].val[1] = vaddq_s64(s[0].val[1], s[3].val[1]); in highbd_iadst4() 48 s[0].val[0] = vaddq_s64(s[0].val[0], s[5].val[0]); in highbd_iadst4() 49 s[0].val[1] = vaddq_s64(s[0].val[1], s[5].val[1]); in highbd_iadst4() 58 t[0].val[0] = vaddq_s64(s[0].val[0], s[3].val[0]); in highbd_iadst4() 59 t[0].val[1] = vaddq_s64(s[0].val[1], s[3].val[1]); in highbd_iadst4() 60 t[1].val[0] = vaddq_s64(s[1].val[0], s[3].val[0]); in highbd_iadst4() 61 t[1].val[1] = vaddq_s64(s[1].val[1], s[3].val[1]); in highbd_iadst4() 63 t[3].val[0] = vaddq_s64(s[0].val[0], s[1].val[0]); in highbd_iadst4() 64 t[3].val[1] = vaddq_s64(s[0].val[1], s[1].val[1]); in highbd_iadst4()
|
D | vp9_highbd_iht8x8_add_neon.c | 72 const int64x2_t sum_lo = vaddq_s64(in0[0], in1[0]); in highbd_add_dct_const_round_shift_low_8() 73 const int64x2_t sum_hi = vaddq_s64(in0[1], in1[1]); in highbd_add_dct_const_round_shift_low_8()
|
D | vp9_highbd_iht16x16_add_neon.c | 91 out.val[0] = vaddq_s64(in0.val[0], in1.val[0]); in vaddq_s64_dual() 92 out.val[1] = vaddq_s64(in0.val[1], in1.val[1]); in vaddq_s64_dual()
|
/external/libaom/libaom/av1/encoder/arm/neon/ |
D | av1_error_neon.c | 36 error = vaddq_s64(error, err2); in av1_block_error_neon() 44 sqcoeff = vaddq_s64(sqcoeff, sqcoeff2); in av1_block_error_neon() 78 error = vaddq_s64(error, err2); in av1_block_error_lp_neon()
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_error_neon.c | 34 error = vaddq_s64(error, err2); in vp9_block_error_fp_neon()
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | cross_correlation_neon.c | 53 sum0 = vaddq_s64(sum0, sum1); in DotProductWithScaleNeon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_idct_neon.h | 83 c[6] = vaddq_s64(c[6], c[10]); in idct4x4_16_kernel_bd12() 84 c[7] = vaddq_s64(c[7], c[11]); in idct4x4_16_kernel_bd12()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | neon_tensor_utils.cc | 1338 result.val[0] = vaddq_s64(vmovl_s32(vget_low_s32(acc)), mul_0); in MulAdd() 1339 result.val[1] = vaddq_s64(vmovl_s32(vget_high_s32(acc)), mul_1); in MulAdd()
|
/external/clang/test/CodeGen/ |
D | arm_neon_intrinsics.c | 638 return vaddq_s64(a, b); in test_vaddq_s64() 3060 return vaddq_s64(tmp, tmp); in test_vdupq_n_s64()
|
D | aarch64-neon-intrinsics.c | 98 return vaddq_s64(v1, v2); in test_vaddq_s64()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 457 _NEON2SSESTORAGE int64x2_t vaddq_s64(int64x2_t a, int64x2_t b); // VADD.I64 q0,q0,q0 2855 _NEON2SSESTORAGE int64x2_t vaddq_s64(int64x2_t a, int64x2_t b); // VADD.I64 q0,q0,q0 2856 #define vaddq_s64 _mm_add_epi64 macro 8038 return vaddq_s64( a, shift); 8171 return vaddq_s64(a, shift);
|