/external/libopus/silk/fixed/arm/ |
D | warped_autocorrelation_FIX_neon_intr.c | 93 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 95 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 97 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 99 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 101 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 103 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 115 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 117 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 119 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() 121 vst1q_s32( input_QS, vdupq_n_s32( 0 ) ); in silk_warped_autocorrelation_FIX_neon() [all …]
|
/external/libvpx/vp9/encoder/arm/neon/ |
D | vp9_diamond_search_sad_neon.c | 80 const int32x4_t zero_s32 = vdupq_n_s32(0); in vp9_diamond_search_sad_neon() 82 const int16x8_t v_max_mv_w = vreinterpretq_s16_s32(vdupq_n_s32(maxmv.as_int)); in vp9_diamond_search_sad_neon() 84 const int16x8_t v_min_mv_w = vreinterpretq_s16_s32(vdupq_n_s32(minmv.as_int)); in vp9_diamond_search_sad_neon() 86 const int32x4_t v_spb_d = vdupq_n_s32(sad_per_bit); in vp9_diamond_search_sad_neon() 88 const int32x4_t v_joint_cost_0_d = vdupq_n_s32(x->nmvjointsadcost[0]); in vp9_diamond_search_sad_neon() 89 const int32x4_t v_joint_cost_1_d = vdupq_n_s32(x->nmvjointsadcost[1]); in vp9_diamond_search_sad_neon() 102 const int16x8_t vfcmv = vreinterpretq_s16_s32(vdupq_n_s32(fcenter_mv.as_int)); in vp9_diamond_search_sad_neon() 109 int16x8_t v_bmv_w = vreinterpretq_s16_s32(vdupq_n_s32(bmv.as_int)); in vp9_diamond_search_sad_neon() 123 int32x4_t v_ba_d = vdupq_n_s32((intptr_t)best_address); in vp9_diamond_search_sad_neon() 250 vaddq_s32(v_cost_d, vdupq_n_s32(1 << (VP9_PROB_COST_SHIFT - 1))); in vp9_diamond_search_sad_neon() [all …]
|
/external/libvpx/vpx_dsp/arm/ |
D | fdct_partial_neon.c | 109 int32x4_t partial_sum[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0), in vpx_highbd_fdct16x16_1_neon() 110 vdupq_n_s32(0) }; in vpx_highbd_fdct16x16_1_neon() 136 int32x4_t partial_sum[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0), in vpx_highbd_fdct32x32_1_neon() 137 vdupq_n_s32(0) }; in vpx_highbd_fdct32x32_1_neon()
|
D | variance_neon.c | 164 int32x4_t sse_s32 = vdupq_n_s32(0); in variance_4xh_neon() 195 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_8xh_neon() 226 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_16xh_neon() 267 int32x4_t sum_s32 = vdupq_n_s32(0); in variance_large_neon() 268 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_large_neon() 445 int32x4_t sse_vec[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0), 446 vdupq_n_s32(0) };
|
D | vpx_convolve8_neon.h | 127 sum = vusdotq_lane_s32(vdupq_n_s32(0), samples_lo, filters, 0); in convolve8_4_usdot_partial() 147 sum = vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[0], filters, 0); in convolve8_4_usdot() 164 sum0 = vusdotq_lane_s32(vdupq_n_s32(0), samples0_lo, filters, 0); in convolve8_8_usdot_partial() 167 sum1 = vusdotq_lane_s32(vdupq_n_s32(0), samples1_lo, filters, 0); in convolve8_8_usdot_partial() 191 sum0 = vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[0], filters, 0); in convolve8_8_usdot() 194 sum1 = vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[1], filters, 0); in convolve8_8_usdot()
|
/external/flac/src/libFLAC/ |
D | lpc_intrin_neon.c | 137 …s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 138 …s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 139 …s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 194 …s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 195 …s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 196 …s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 250 …s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 251 …s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 252 …s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() 300 …s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantizatio… in FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon() [all …]
|
/external/libaom/aom_dsp/arm/ |
D | sum_squares_neon.c | 36 int32x4_t sum_squares[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in aom_sum_squares_2d_i16_4xn_neon() 65 int32x4_t ss_row[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in aom_sum_squares_2d_i16_nxn_neon() 134 int32x4_t sse[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in aom_sum_sse_2d_i16_4xn_neon() 167 int32x4_t sum_acc = vdupq_n_s32(0); in aom_sum_sse_2d_i16_nxn_neon() 171 int32x4_t sse_row[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in aom_sum_sse_2d_i16_nxn_neon()
|
D | highbd_variance_neon.c | 30 int32x4_t v_sse_lo = vdupq_n_s32(0); in aom_highbd_calc16x16var_neon() 31 int32x4_t v_sse_hi = vdupq_n_s32(0); in aom_highbd_calc16x16var_neon() 57 int32x4_t v_sse_lo = vdupq_n_s32(0); in aom_highbd_calc8x8var_neon() 58 int32x4_t v_sse_hi = vdupq_n_s32(0); in aom_highbd_calc8x8var_neon() 82 int32x4_t v_sse_lo = vdupq_n_s32(0); in aom_highbd_calc4x4var_neon() 83 int32x4_t v_sse_hi = vdupq_n_s32(0); in aom_highbd_calc4x4var_neon()
|
D | variance_neon.c | 168 int32x4_t sse_s32 = vdupq_n_s32(0); in variance_4xh_neon() 198 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_8xh_neon() 229 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_16xh_neon() 270 int32x4_t sum_s32 = vdupq_n_s32(0); in variance_large_neon() 271 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in variance_large_neon() 484 int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; in mse8xh_neon() 525 int32x4_t sse_s32[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0), in mse16xh_neon() 526 vdupq_n_s32(0) }; in mse16xh_neon()
|
/external/libaom/av1/common/arm/ |
D | highbd_inv_txfm_neon.c | 66 const int32x4_t v_bit = vdupq_n_s32(-bit); in av1_round_shift_array_32_neon() 67 const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); in av1_round_shift_array_32_neon() 80 const int32x4_t sqrt2 = vdupq_n_s32(val); in av1_round_shift_rect_array_32_neon() 81 const int32x4_t v_bit = vdupq_n_s32(-bit); in av1_round_shift_rect_array_32_neon() 82 const int32x4_t rnding = vdupq_n_s32(1 << (bit - 1)); in av1_round_shift_rect_array_32_neon() 83 const int32x4_t rnding2 = vdupq_n_s32(1 << (NewSqrt2Bits - 1)); in av1_round_shift_rect_array_32_neon() 186 const int32x4_t v_shift = vdupq_n_s32(-shift); in round_shift_4x4() 200 const int32x4_t v_shift = vdupq_n_s32(-shift); in round_shift_8x8() 261 int32x4_t max_clip_val = vdupq_n_s32((1 << bd) - 1); in highbd_get_recon_8x8_neon() 550 const int32x4_t clamp_lo_out = vdupq_n_s32(-(1 << (log_range_out - 1))); in idct32_stage9_neon() [all …]
|
D | convolve_neon.h | 86 const int32x4_t round_bits = vdupq_n_s32(-round1_bits); in wiener_convolve8_vert_4x8() 87 const int32x4_t round_vec = vdupq_n_s32(round_const); in wiener_convolve8_vert_4x8() 130 const int32x4_t round_bits = vdupq_n_s32(-round0_bits); in wiener_convolve8_horiz_8x8() 132 const int32x4_t round_vec_0 = vdupq_n_s32(round_const_0); in wiener_convolve8_horiz_8x8() 133 const int32x4_t round_vec_1 = vdupq_n_s32(round_const_1); in wiener_convolve8_horiz_8x8() 180 const int32x4_t round_bits = vdupq_n_s32(-round0_bits); in wiener_convolve8_horiz_4x8() 181 const int32x4_t round_vec_0 = vdupq_n_s32(round_const_0); in wiener_convolve8_horiz_4x8() 182 const int32x4_t round_vec_1 = vdupq_n_s32(round_const_1); in wiener_convolve8_horiz_4x8()
|
/external/ComputeLibrary/src/core/NEON/ |
D | NEAsymm.h | 89 const static int32x4_t zero_s32 = vdupq_n_s32(0); in finalize_quantization() 245 const static int32x4_t one_s32 = vdupq_n_s32(1); in finalize_quantization_symm() 282 vcltq_s32(result_shift.val[0], vdupq_n_s32(0)), in finalize_quantization_symm() 283 vcltq_s32(result_shift.val[1], vdupq_n_s32(0)), in finalize_quantization_symm() 284 vcltq_s32(result_shift.val[2], vdupq_n_s32(0)), in finalize_quantization_symm() 285 vcltq_s32(result_shift.val[3], vdupq_n_s32(0)), in finalize_quantization_symm() 337 int32x4_t in_s32 = vdupq_n_s32(in_value); in finalize_quantization() 380 int32x4_t in_s32 = vdupq_n_s32(in_value); in finalize_quantization() 419 const int32x4_t voffset = vdupq_n_s32(offset); in vdequantize() 442 const int32x4_t voffset = vdupq_n_s32(offset); in vdequantize() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | mean.h | 53 const int32x4_t bias_dup = vdupq_n_s32(bias); in MeanImpl() 54 const int32x4_t min_dup = vdupq_n_s32(kMinValue); in MeanImpl() 55 const int32x4_t max_dup = vdupq_n_s32(kMaxValue); in MeanImpl() 63 temp_sum.val[0] = vdupq_n_s32(0); in MeanImpl() 64 temp_sum.val[1] = vdupq_n_s32(0); in MeanImpl() 65 temp_sum.val[2] = vdupq_n_s32(0); in MeanImpl() 66 temp_sum.val[3] = vdupq_n_s32(0); in MeanImpl()
|
D | add.h | 54 const int32x4_t input1_left_dup = vdupq_n_s32(input1_left_shift); in AddElementwiseInt8() 55 const int32x4_t input2_left_dup = vdupq_n_s32(input2_left_shift); in AddElementwiseInt8() 240 vdupq_n_s32(params.quantized_activation_min); in AddElementwiseInt16() 242 vdupq_n_s32(params.quantized_activation_max); in AddElementwiseInt16() 246 const int32x4_t input1_left_dup = vdupq_n_s32(input1_left_shift); in AddElementwiseInt16() 247 const int32x4_t input2_left_dup = vdupq_n_s32(input2_left_shift); in AddElementwiseInt16() 249 const int32x4_t input1_offset_dup = vdupq_n_s32(params.input1_offset); in AddElementwiseInt16() 250 const int32x4_t input2_offset_dup = vdupq_n_s32(params.input2_offset); in AddElementwiseInt16() 251 const int32x4_t output_offset_dup = vdupq_n_s32(params.output_offset); in AddElementwiseInt16() 374 const int32x4_t left_shift_dup = vdupq_n_s32(params.left_shift); in AddScalarBroadcast() [all …]
|
/external/webrtc/common_audio/signal_processing/ |
D | downsample_fast_neon.c | 44 int32x4_t out32x4_0 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon() 45 int32x4_t out32x4_1 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon() 113 int32x4_t out32x4_0 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon() 114 int32x4_t out32x4_1 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon() 174 int32x4_t out32x4_0 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon() 175 int32x4_t out32x4_1 = vdupq_n_s32(2048); in WebRtcSpl_DownsampleFastNeon()
|
/external/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 26 q9s32 = vdupq_n_s32(14500); in vp8_short_fdct4x4_neon() 27 q10s32 = vdupq_n_s32(7500); in vp8_short_fdct4x4_neon() 28 q11s32 = vdupq_n_s32(12000); in vp8_short_fdct4x4_neon() 29 q12s32 = vdupq_n_s32(51000); in vp8_short_fdct4x4_neon() 126 q9s32 = vdupq_n_s32(14500); in vp8_short_fdct8x4_neon() 127 q10s32 = vdupq_n_s32(7500); in vp8_short_fdct8x4_neon() 187 q9s32 = vdupq_n_s32(12000); in vp8_short_fdct8x4_neon() 188 q10s32 = vdupq_n_s32(51000); in vp8_short_fdct8x4_neon()
|
/external/libgav1/src/dsp/arm/ |
D | inverse_transform_10bit_neon.cc | 198 const int32x4_t v_src = vdupq_n_s32(dst[0]); in DctDcOnly() 206 const int32x4_t xy_shifted = vqrshlq_s32(xy, vdupq_n_s32(-row_shift)); in DctDcOnly() 281 const int32x4_t min = vdupq_n_s32(-(1 << range)); in Dct4_NEON() 282 const int32x4_t max = vdupq_n_s32((1 << range) - 1); in Dct4_NEON() 303 const int32x4_t v_row_shift = vdupq_n_s32(-row_shift); in Dct4_NEON() 356 const int32x4_t min = vdupq_n_s32(-(1 << range)); in Dct8_NEON() 357 const int32x4_t max = vdupq_n_s32((1 << range) - 1); in Dct8_NEON() 384 const int32x4_t v_row_shift = vdupq_n_s32(-row_shift); in Dct8_NEON() 463 const int32x4_t min = vdupq_n_s32(-(1 << range)); in Dct16_NEON() 464 const int32x4_t max = vdupq_n_s32((1 << range) - 1); in Dct16_NEON() [all …]
|
/external/ComputeLibrary/src/cpu/kernels/ |
D | CpuGemmLowpMatrixMultiplyKernel.cpp | 291 vdupq_n_s32(0), in vector_matrix_multiply_s8() 292 vdupq_n_s32(0), in vector_matrix_multiply_s8() 293 vdupq_n_s32(0), in vector_matrix_multiply_s8() 294 vdupq_n_s32(0) in vector_matrix_multiply_s8() 704 vdupq_n_s32(0), in matrix_multiply_s8() 705 vdupq_n_s32(0), in matrix_multiply_s8() 706 vdupq_n_s32(0), in matrix_multiply_s8() 707 vdupq_n_s32(0) in matrix_multiply_s8() 715 vdupq_n_s32(0), in matrix_multiply_s8() 716 vdupq_n_s32(0), in matrix_multiply_s8() [all …]
|
D | CpuGemmLowpOffsetContributionKernel.cpp | 147 const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32); in run_offset_contribution() 172 vdupq_n_s32(k_offset), in run_offset_contribution() 173 vdupq_n_s32(k_offset), in run_offset_contribution() 174 vdupq_n_s32(k_offset), in run_offset_contribution() 175 vdupq_n_s32(k_offset) in run_offset_contribution() 245 const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32); in run_offset_contribution()
|
/external/libhevc/common/arm/ |
D | ihevc_quant_iquant_ssd_neon_intr.c | 101 int32x4_t ssd0 = vdupq_n_s32(0); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 129 int32x4_t add_iq = vdupq_n_s32(1); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 145 q_v_bits = vdupq_n_s32(-q_bits); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 146 add_q = vdupq_n_s32(temp); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 151 sh_iq_1 = vdupq_n_s32(sh_tmp); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 155 sh_iq = vdupq_n_s32(s_iq); in ihevc_quant_iquant_ssd_flat_scale_mat_neon() 375 int32x4_t ssd0 = vdupq_n_s32(0); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 413 int32x4_t add_iq = vdupq_n_s32(1); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 427 stmp = vdupq_n_s32(q_bits - QUANT_ROUND_FACTOR_Q); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 429 add_q = vdupq_n_s32((1 << QUANT_ROUND_FACTOR_Q) / 2); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() [all …]
|
/external/libaom/av1/encoder/arm/neon/ |
D | av1_highbd_quantize_neon.c | 27 vreinterpretq_s32_u32(vcltq_s32(v_coeff, vdupq_n_s32(0))); in quantize_4() 28 const int32x4_t v_log_scale = vdupq_n_s32(log_scale); in quantize_4() 32 vshlq_s32(v_abs_coeff, vdupq_n_s32(1 + log_scale)); in quantize_4() 54 const uint32x4_t nz_qcoeff_mask = vcgtq_s32(v_abs_qcoeff, vdupq_n_s32(0)); in quantize_4()
|
/external/XNNPACK/src/qs8-requantization/ |
D | rndnu-neon-qdmulh.c | 45 const int32x4_t vmultiplier = vdupq_n_s32(multiplier); in xnn_qs8_requantize_rndnu__neon_qdmulh() 47 const int32x4_t vpre_shift = vdupq_n_s32(-pre_shift); in xnn_qs8_requantize_rndnu__neon_qdmulh() 48 const int32x4_t vpost_shift = vdupq_n_s32(-post_shift); in xnn_qs8_requantize_rndnu__neon_qdmulh()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 69 return veorq_s32(a, vdupq_n_s32(-1)); 109 return vshlq_s32(a, vdupq_n_s32(offset)); 129 return vshlq_s32(a, vdupq_n_s32(-offset)); 171 return MaskIfEqual(a, vdupq_n_s32(0)); 281 const int32x4_t shift_vec = vdupq_n_s32(-exponent); 341 return vdupq_n_s32(x);
|
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/ |
D | quantized.cpp | 62 const int32x4_t v_mul = vdupq_n_s32(qp.per_layer_mul); in requantize_block_32_int() 63 const int32x4_t v_right_shift = vdupq_n_s32(qp.per_layer_right_shift); in requantize_block_32_int() 64 const int32x4_t v_left_shift = vdupq_n_s32(qp.per_layer_left_shift); in requantize_block_32_int() 65 const int32x4_t v_minval = vdupq_n_s32(qp.minval); in requantize_block_32_int() 66 const int32x4_t v_maxval = vdupq_n_s32(qp.maxval); in requantize_block_32_int() 67 const int32x4_t v_c_offset = vdupq_n_s32(qp.c_offset); in requantize_block_32_int() 103 const int32x4_t v_row_sum = vdupq_n_s32(row_sum); in requantize_block_32_int() 104 const int32x4_t v_row_sum1 = vdupq_n_s32(row_sum1); in requantize_block_32_int() 574 int32x4_t v_col0 = vdupq_n_s32(0); in requantize_block_32_int() 575 int32x4_t v_in00 = vdupq_n_s32(0); in requantize_block_32_int() [all …]
|
/external/libopus/silk/arm/ |
D | NSQ_del_dec_neon_intr.c | 189 const int32x4_t shift_s32x4 = vdupq_n_s32( -shift ); in copy_winner_state() 192 t0_s32x4 = t1_s32x4 = vdupq_n_s32( 0 ); /* initialization */ in copy_winner_state() 284 vst1q_s32( psDelDec->RD_Q10, vdupq_n_s32( 0 ) ); in silk_NSQ_del_dec_neon() 363 … RD_Q10_s32x4 = vaddq_s32( RD_Q10_s32x4, vdupq_n_s32( silk_int32_MAX >> 4 ) ); in silk_NSQ_del_dec_neon() 412 t_s32x4 = vdupq_n_s32( 0 ); /* initialization */ in silk_NSQ_del_dec_neon() 479 …t0_s32x4 = vdupq_n_s32( 0 ); /* zero zero zero zer… in silk_short_prediction_create_arch_coef_neon_local() 521 LPC_pred_Q14_s32x4 = vdupq_n_s32( silk_RSHIFT( order, 1 ) ); in silk_noise_shape_quantizer_short_prediction_neon_local() 660 …n_AR_Q14_s32x4 = vaddq_s32( vdupq_n_s32( silk_RSHIFT( shapingLPCOrder, 1 ) ), vqdmulhq_lane_s32( t… in silk_noise_shape_quantizer_del_dec_neon() 688 tmp2_s32x4 = vaddq_s32( vdupq_n_s32( n_LTP_Q14 ), LPC_pred_Q14_s32x4 ); /* Q13 */ in silk_noise_shape_quantizer_del_dec_neon() 691 … tmp1_s32x4 = vsubq_s32( vdupq_n_s32( x_Q10[ i ] ), tmp1_s32x4 ); /* residual error Q10 */ in silk_noise_shape_quantizer_del_dec_neon() [all …]
|