/external/XNNPACK/src/requantization/ |
D | precise-neon.c | 44 const int64x2_t vshift = vdupq_n_s64(-shift); in xnn_requantize_precise__neon() 60 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier)); in xnn_requantize_precise__neon() 61 const int64x2_t x23_product = vmull_high_s32(x, vmultiplier); in xnn_requantize_precise__neon() 62 const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier)); in xnn_requantize_precise__neon() 63 const int64x2_t y23_product = vmull_high_s32(y, vmultiplier); in xnn_requantize_precise__neon() 64 const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier)); in xnn_requantize_precise__neon() 65 const int64x2_t z23_product = vmull_high_s32(z, vmultiplier); in xnn_requantize_precise__neon() 66 const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier)); in xnn_requantize_precise__neon() 67 const int64x2_t w23_product = vmull_high_s32(w, vmultiplier); in xnn_requantize_precise__neon() 69 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier); in xnn_requantize_precise__neon() [all …]
|
/external/XNNPACK/src/q8-avgpool/ |
D | up9-neon.c | 39 const int64x2_t vleft_shift = vld1q_dup_s64(¶ms->neon.left_shift); in xnn_q8_avgpool_ukernel_up9__neon() 108 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier)); in xnn_q8_avgpool_ukernel_up9__neon() 109 const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier); in xnn_q8_avgpool_ukernel_up9__neon() 110 const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier)); in xnn_q8_avgpool_ukernel_up9__neon() 111 const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier); in xnn_q8_avgpool_ukernel_up9__neon() 113 const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo)); in xnn_q8_avgpool_ukernel_up9__neon() 114 const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo); in xnn_q8_avgpool_ukernel_up9__neon() 115 const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi)); in xnn_q8_avgpool_ukernel_up9__neon() 116 const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi); in xnn_q8_avgpool_ukernel_up9__neon() 118 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier); in xnn_q8_avgpool_ukernel_up9__neon() [all …]
|
D | mp9p8q-neon.c | 39 const int64x2_t vleft_shift = vld1q_dup_s64(¶ms->neon.left_shift); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 188 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier)); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 189 const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 190 const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier)); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 191 const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 193 const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo)); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 194 const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 195 const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi)); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 196 const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 198 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier); in xnn_q8_avgpool_ukernel_mp9p8q__neon() [all …]
|
/external/XNNPACK/src/q8-gavgpool/ |
D | up7-neon.c | 62 const int64x2_t vleft_shift = vld1q_dup_s64(¶ms->neon.left_shift); in xnn_q8_gavgpool_ukernel_up7__neon() 90 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier)); in xnn_q8_gavgpool_ukernel_up7__neon() 91 const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__neon() 92 const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier)); in xnn_q8_gavgpool_ukernel_up7__neon() 93 const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier); in xnn_q8_gavgpool_ukernel_up7__neon() 95 const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo)); in xnn_q8_gavgpool_ukernel_up7__neon() 96 const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo); in xnn_q8_gavgpool_ukernel_up7__neon() 97 const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi)); in xnn_q8_gavgpool_ukernel_up7__neon() 98 const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi); in xnn_q8_gavgpool_ukernel_up7__neon() 100 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier); in xnn_q8_gavgpool_ukernel_up7__neon() [all …]
|
D | mp7p7q-neon.c | 108 const int64x2_t vleft_shift = vld1q_dup_s64(¶ms->neon.left_shift); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 166 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vget_low_s32(vmultiplier)); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 167 const int64x2_t vproduct23 = vmull_high_s32(vacc_lo, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 168 const int64x2_t vproduct45 = vmull_s32(vget_low_s32(vacc_hi), vget_low_s32(vmultiplier)); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 169 const int64x2_t vproduct67 = vmull_high_s32(vacc_hi, vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 171 const int64x2_t vadjusted_product01 = vaddw_s32(vproduct01, vget_low_s32(vneg_mask_lo)); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 172 const int64x2_t vadjusted_product23 = vaddw_high_s32(vproduct23, vneg_mask_lo); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 173 const int64x2_t vadjusted_product45 = vaddw_s32(vproduct45, vget_low_s32(vneg_mask_hi)); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 174 const int64x2_t vadjusted_product67 = vaddw_high_s32(vproduct67, vneg_mask_hi); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 176 const int64x2_t vproduct01 = vmull_s32(vget_low_s32(vacc_lo), vmultiplier); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() [all …]
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-2velem.c | 518 int64x2_t test_vmlal_lane_s32(int64x2_t a, int32x2_t b, int32x2_t v) { in test_vmlal_lane_s32() 544 int64x2_t test_vmlal_laneq_s32(int64x2_t a, int32x2_t b, int32x4_t v) { in test_vmlal_laneq_s32() 572 int64x2_t test_vmlal_high_lane_s32(int64x2_t a, int32x4_t b, int32x2_t v) { in test_vmlal_high_lane_s32() 600 int64x2_t test_vmlal_high_laneq_s32(int64x2_t a, int32x4_t b, int32x4_t v) { in test_vmlal_high_laneq_s32() 626 int64x2_t test_vmlsl_lane_s32(int64x2_t a, int32x2_t b, int32x2_t v) { in test_vmlsl_lane_s32() 652 int64x2_t test_vmlsl_laneq_s32(int64x2_t a, int32x2_t b, int32x4_t v) { in test_vmlsl_laneq_s32() 680 int64x2_t test_vmlsl_high_lane_s32(int64x2_t a, int32x4_t b, int32x2_t v) { in test_vmlsl_high_lane_s32() 708 int64x2_t test_vmlsl_high_laneq_s32(int64x2_t a, int32x4_t b, int32x4_t v) { in test_vmlsl_high_laneq_s32() 734 int64x2_t test_vmlal_lane_u32(int64x2_t a, int32x2_t b, int32x2_t v) { in test_vmlal_lane_u32() 760 int64x2_t test_vmlal_laneq_u32(int64x2_t a, int32x2_t b, int32x4_t v) { in test_vmlal_laneq_u32() [all …]
|
D | arm64-vrnd.c | 9 int64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); } in rnd5() 17 int64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); } in rnd9() 19 int64x2_t rnd10(float64x2_t a) { return vrndnq_f64(a); } in rnd10() 26 int64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); } in rnd13() 28 int64x2_t rnd14(float64x2_t a) { return vrndmq_f64(a); } in rnd14() 35 int64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); } in rnd18() 42 int64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); } in rnd22() 49 int64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); } in rnd25()
|
D | aarch64-neon-3v.c | 59 int64x2_t test_vandq_s64(int64x2_t a, int64x2_t b) { in test_vandq_s64() 171 int64x2_t test_vorrq_s64(int64x2_t a, int64x2_t b) { in test_vorrq_s64() 283 int64x2_t test_veorq_s64(int64x2_t a, int64x2_t b) { in test_veorq_s64() 403 int64x2_t test_vbicq_s64(int64x2_t a, int64x2_t b) { in test_vbicq_s64() 531 int64x2_t test_vornq_s64(int64x2_t a, int64x2_t b) { in test_vornq_s64()
|
D | aarch64-neon-misc.c | 101 uint64x2_t test_vceqzq_s64(int64x2_t a) { in test_vceqzq_s64() 329 uint64x2_t test_vcgezq_s64(int64x2_t a) { in test_vcgezq_s64() 445 uint64x2_t test_vclezq_s64(int64x2_t a) { in test_vclezq_s64() 561 uint64x2_t test_vcgtzq_s64(int64x2_t a) { in test_vcgtzq_s64() 677 uint64x2_t test_vcltzq_s64(int64x2_t a) { in test_vcltzq_s64() 1044 int64x2_t test_vpaddlq_s32(int32x4_t a) { in test_vpaddlq_s32() 1171 int64x2_t test_vpadalq_s32(int64x2_t a, int32x4_t b) { in test_vpadalq_s32() 1274 int64x2_t test_vqabsq_s64(int64x2_t a) { in test_vqabsq_s64() 1343 int64x2_t test_vqnegq_s64(int64x2_t a) { in test_vqnegq_s64() 1392 int64x2_t test_vnegq_s64(int64x2_t a) { in test_vnegq_s64() [all …]
|
D | aarch64-neon-intrinsics.c | 97 int64x2_t test_vaddq_s64(int64x2_t v1, int64x2_t v2) { in test_vaddq_s64() 228 int64x2_t test_vsubq_s64(int64x2_t v1, int64x2_t v2) { in test_vsubq_s64() 1249 int64x2_t test_vbslq_s64(uint64x2_t v1, int64x2_t v2, int64x2_t v3) { in test_vbslq_s64() 1780 uint64x2_t test_vtstq_s64(int64x2_t v1, int64x2_t v2) { in test_vtstq_s64() 2024 uint64x2_t test_vceqq_s64(int64x2_t v1, int64x2_t v2) { in test_vceqq_s64() 2183 uint64x2_t test_vcgeq_s64(int64x2_t v1, int64x2_t v2) { in test_vcgeq_s64() 2346 uint64x2_t test_vcleq_s64(int64x2_t v1, int64x2_t v2) { in test_vcleq_s64() 2507 uint64x2_t test_vcgtq_s64(int64x2_t v1, int64x2_t v2) { in test_vcgtq_s64() 2672 uint64x2_t test_vcltq_s64(int64x2_t v1, int64x2_t v2) { in test_vcltq_s64() 3224 int64x2_t test_vqaddq_s64(int64x2_t a, int64x2_t b) { in test_vqaddq_s64() [all …]
|
D | arm_neon_intrinsics.c | 193 int64x2_t test_vabal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { in test_vabal_s32() 436 int64x2_t test_vabdl_s32(int32x2_t a, int32x2_t b) { in test_vabdl_s32() 637 int64x2_t test_vaddq_s64(int64x2_t a, int64x2_t b) { in test_vaddq_s64() 712 int32x2_t test_vaddhn_s64(int64x2_t a, int64x2_t b) { in test_vaddhn_s64() 787 int64x2_t test_vaddl_s32(int32x2_t a, int32x2_t b) { in test_vaddl_s32() 851 int64x2_t test_vaddw_s32(int64x2_t a, int32x2_t b) { in test_vaddw_s32() 964 int64x2_t test_vandq_s64(int64x2_t a, int64x2_t b) { in test_vandq_s64() 1089 int64x2_t test_vbicq_s64(int64x2_t a, int64x2_t b) { in test_vbicq_s64() 1271 int64x2_t test_vbslq_s64(uint64x2_t a, int64x2_t b, int64x2_t c) { in test_vbslq_s64() 2261 int64x2_t test_vcombine_s64(int64x1_t a, int64x1_t b) { in test_vcombine_s64() [all …]
|
D | aarch64-neon-perm.c | 52 int64x2_t test_vuzp1q_s64(int64x2_t a, int64x2_t b) { in test_vuzp1q_s64() 199 int64x2_t test_vuzp2q_s64(int64x2_t a, int64x2_t b) { in test_vuzp2q_s64() 346 int64x2_t test_vzip1q_s64(int64x2_t a, int64x2_t b) { in test_vzip1q_s64() 493 int64x2_t test_vzip2q_s64(int64x2_t a, int64x2_t b) { in test_vzip2q_s64() 640 int64x2_t test_vtrn1q_s64(int64x2_t a, int64x2_t b) { in test_vtrn1q_s64() 787 int64x2_t test_vtrn2q_s64(int64x2_t a, int64x2_t b) { in test_vtrn2q_s64()
|
D | arm64_vcopy.c | 82 int64x2_t test_vcopyq_laneq_s64(int64x2_t a1, int64x2_t a2) { in test_vcopyq_laneq_s64()
|
D | arm-asm-diag.c | 5 typedef __attribute__((neon_vector_type(2))) long long int64x2_t; typedef 7 int64x2_t val[4];
|
D | arm-asm-warn.c | 19 typedef __attribute__((neon_vector_type(2))) long long int64x2_t; typedef 21 int64x2_t val[4];
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_highbd_iht8x8_add_neon.c | 25 const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(sum), c, 0); in highbd_iadst_half_butterfly_neon() 26 const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(sub), c, 0); in highbd_iadst_half_butterfly_neon() 27 const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(sum), c, 0); in highbd_iadst_half_butterfly_neon() 28 const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(sub), c, 0); in highbd_iadst_half_butterfly_neon() 41 int64x2_t *const s0, in highbd_iadst_butterfly_lane_0_1_neon() 42 int64x2_t *const s1) { in highbd_iadst_butterfly_lane_0_1_neon() 43 const int64x2_t t0_lo = vmull_lane_s32(vget_low_s32(in0), c, 0); in highbd_iadst_butterfly_lane_0_1_neon() 44 const int64x2_t t1_lo = vmull_lane_s32(vget_low_s32(in0), c, 1); in highbd_iadst_butterfly_lane_0_1_neon() 45 const int64x2_t t0_hi = vmull_lane_s32(vget_high_s32(in0), c, 0); in highbd_iadst_butterfly_lane_0_1_neon() 46 const int64x2_t t1_hi = vmull_lane_s32(vget_high_s32(in0), c, 1); in highbd_iadst_butterfly_lane_0_1_neon() [all …]
|
/external/libaom/libaom/av1/encoder/arm/neon/ |
D | av1_error_neon.c | 19 int64x2_t error = vdupq_n_s64(0); in av1_block_error_neon() 20 int64x2_t sqcoeff = vdupq_n_s64(0); in av1_block_error_neon() 35 const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); in av1_block_error_neon() 42 const int64x2_t sqcoeff2 = in av1_block_error_neon() 62 int64x2_t error = vdupq_n_s64(0); in av1_block_error_lp_neon() 77 const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); in av1_block_error_lp_neon()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantization_utils.h | 432 inline int64x2_t Divide64x2PowRound(const int64x2_t val) { in Divide64x2PowRound() 433 const int64x2_t val_sign = vshrq_n_s64(val, 63); in Divide64x2PowRound() 434 const int64x2_t val_xor = veorq_s64(val, val_sign); in Divide64x2PowRound() 435 const int64x2_t val_pos = vsubq_s64(val_xor, val_sign); in Divide64x2PowRound() 436 const int64x2_t shifted_val_pos = vrshrq_n_s64(val_pos, POW); in Divide64x2PowRound() 437 const int64x2_t shifted_val_pos_xor = veorq_s64(shifted_val_pos, val_sign); in Divide64x2PowRound() 438 const int64x2_t shifted_val = vsubq_s64(shifted_val_pos_xor, val_sign); in Divide64x2PowRound() 459 inline int64x2_t Divide64x2Pow(const int64x2_t val) { in Divide64x2Pow() 461 static const int64x2_t FIRST_BIT = vmovq_n_s64(FIRST_BIT_VAL); in Divide64x2Pow() 462 const int64x2_t val_sign = vshrq_n_s64(val, 63); in Divide64x2Pow() [all …]
|
/external/libopus/silk/arm/ |
D | NSQ_neon.c | 60 int64x2_t e = vpaddlq_s32(d); in silk_noise_shape_quantizer_short_prediction_neon() 97 int64x2_t b0 = vmull_s32(vget_low_s32(a0), vget_low_s32(coef0)); in silk_NSQ_noise_shape_feedback_loop_neon() 98 int64x2_t b1 = vmlal_s32(b0, vget_high_s32(a0), vget_high_s32(coef0)); in silk_NSQ_noise_shape_feedback_loop_neon() 99 int64x2_t b2 = vmlal_s32(b1, vget_low_s32(a1), vget_low_s32(coef1)); in silk_NSQ_noise_shape_feedback_loop_neon() 100 int64x2_t b3 = vmlal_s32(b2, vget_high_s32(a1), vget_high_s32(coef1)); in silk_NSQ_noise_shape_feedback_loop_neon()
|
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | transform_neon.c | 406 int64x2_t xr0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre1_0)); in PostShiftAndDivideAndDemodulateNeon() 407 int64x2_t xi0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre2_0)); in PostShiftAndDivideAndDemodulateNeon() 408 int64x2_t xr2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre1_1)); in PostShiftAndDivideAndDemodulateNeon() 409 int64x2_t xi2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre2_1)); in PostShiftAndDivideAndDemodulateNeon() 416 int64x2_t xr1 = vmull_high_s32(tmpr_0, outre1_0); in PostShiftAndDivideAndDemodulateNeon() 417 int64x2_t xi1 = vmull_high_s32(tmpr_0, outre2_0); in PostShiftAndDivideAndDemodulateNeon() 418 int64x2_t xr3 = vmull_high_s32(tmpr_1, outre1_1); in PostShiftAndDivideAndDemodulateNeon() 419 int64x2_t xi3 = vmull_high_s32(tmpr_1, outre2_1); in PostShiftAndDivideAndDemodulateNeon() 425 int64x2_t xr1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre1_0)); in PostShiftAndDivideAndDemodulateNeon() 426 int64x2_t xi1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre2_0)); in PostShiftAndDivideAndDemodulateNeon() [all …]
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 166 typedef __m128i int64x2_t; typedef 206 int64x2_t val[2]; 274 int64x2_t val[4]; 338 int64x2_t val[3]; 457 _NEON2SSESTORAGE int64x2_t vaddq_s64(int64x2_t a, int64x2_t b); // VADD.I64 q0,q0,q0 466 _NEON2SSESTORAGE int64x2_t vaddl_s32(int32x2_t a, int32x2_t b); // VADDL.S32 q0,d0,d0 473 _NEON2SSESTORAGE int64x2_t vaddw_s32(int64x2_t a, int32x2_t b); // VADDW.S32 q0,q0,d0 515 _NEON2SSESTORAGE int64x2_t vqaddq_s64(int64x2_t a, int64x2_t b); // VQADD.S64 q0,q0,q0 523 _NEON2SSESTORAGE int32x2_t vaddhn_s64(int64x2_t a, int64x2_t b); // VADDHN.I64 d0,q0,q0 530 _NEON2SSESTORAGE int32x2_t vraddhn_s64(int64x2_t a, int64x2_t b); // VRADDHN.I64 d0,q0,q0 [all …]
|
/external/webrtc/webrtc/modules/video_processing/util/ |
D | denoiser_filter_neon.cc | 19 const int64x2_t b = vpaddlq_s32(a); in HorizontalAddS16x8() 26 const int64x2_t b = vpaddlq_s32(v_32x4); in HorizontalAddS32x4() 123 int64x2_t v_sum_diff_total = vdupq_n_s64(0); in MbDenoise() 179 const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); in MbDenoise() 250 const int64x2_t fedcba98_76543210 = in MbDenoise()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sum_neon.h | 19 const int64x2_t b = vpaddlq_s32(a); in horizontal_add_s16x8() 26 const int64x2_t b = vpaddlq_s32(v_32x4); in horizontal_add_s32x4()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | denoising_neon.c | 69 int64x2_t v_sum_diff_total = vdupq_n_s64(0); in vp8_denoiser_filter_neon() 132 const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); in vp8_denoiser_filter_neon() 202 const int64x2_t fedcba98_76543210 = in vp8_denoiser_filter_neon() 259 int64x2_t v_sum_diff_total = vdupq_n_s64(0); in vp8_denoiser_filter_uv_neon() 349 const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); in vp8_denoiser_filter_uv_neon() 427 const int64x2_t fedcba98_76543210 = in vp8_denoiser_filter_uv_neon()
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_error_neon.c | 18 int64x2_t error = vdupq_n_s64(0); in vp9_block_error_fp_neon() 33 const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); in vp9_block_error_fp_neon()
|