/external/libaom/libaom/av1/common/arm/ |
D | convolve_neon.h | 24 int32x4_t sum0, sum1; in wiener_convolve8_vert_4x8() local 38 sum0 = vmull_n_s16(vget_low_s16(ss0), filter_y[0]); in wiener_convolve8_vert_4x8() 39 sum0 = vmlal_n_s16(sum0, vget_low_s16(ss1), filter_y[1]); in wiener_convolve8_vert_4x8() 40 sum0 = vmlal_n_s16(sum0, vget_low_s16(ss2), filter_y[2]); in wiener_convolve8_vert_4x8() 41 sum0 = vmlal_n_s16(sum0, vget_low_s16(s3), filter_y[3]); in wiener_convolve8_vert_4x8() 48 sum0 = vsubq_s32(sum0, round_vec); in wiener_convolve8_vert_4x8() 52 sum0 = vrshlq_s32(sum0, round_bits); in wiener_convolve8_vert_4x8() 55 sum0 = vmaxq_s32(sum0, zero); in wiener_convolve8_vert_4x8() 59 tmp0 = vqmovn_u32(vreinterpretq_u32_s32(sum0)); in wiener_convolve8_vert_4x8() 207 int32x4_t sum0; in convolve8_4x4_s32() local [all …]
|
D | convolve_neon.c | 127 int32x4_t sum0; in convolve8_vert_4x4_s32() local 131 sum0 = vmull_n_s16(s0, y_filter[0]); in convolve8_vert_4x4_s32() 132 sum0 = vmlal_n_s16(sum0, s1, y_filter[1]); in convolve8_vert_4x4_s32() 133 sum0 = vmlal_n_s16(sum0, s2, y_filter[2]); in convolve8_vert_4x4_s32() 134 sum0 = vmlal_n_s16(sum0, s3, y_filter[3]); in convolve8_vert_4x4_s32() 135 sum0 = vmlal_n_s16(sum0, s4, y_filter[4]); in convolve8_vert_4x4_s32() 136 sum0 = vmlal_n_s16(sum0, s5, y_filter[5]); in convolve8_vert_4x4_s32() 137 sum0 = vmlal_n_s16(sum0, s6, y_filter[6]); in convolve8_vert_4x4_s32() 138 sum0 = vmlal_n_s16(sum0, s7, y_filter[7]); in convolve8_vert_4x4_s32() 140 sum0 = vaddq_s32(sum0, offset_const); in convolve8_vert_4x4_s32() [all …]
|
D | jnt_convolve_neon.c | 32 uint32x4_t sum0; in compute_avg_4x1() local 39 sum0 = vmull_n_u16(res0, fwd_offset); in compute_avg_4x1() 40 sum0 = vmlal_n_u16(sum0, d0, bck_offset); in compute_avg_4x1() 42 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS); in compute_avg_4x1() 44 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec)); in compute_avg_4x1() 72 uint32x4_t sum0, sum2; in compute_avg_8x1() local 81 sum0 = vmull_n_u16(vget_low_u16(res0), fwd_offset); in compute_avg_8x1() 82 sum0 = vmlal_n_u16(sum0, vget_low_u16(d0), bck_offset); in compute_avg_8x1() 83 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS); in compute_avg_8x1() 89 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x1() [all …]
|
/external/libopus/celt/x86/ |
D | pitch_sse4_1.c | 117 __m128i sum0, sum1, sum2, sum3, vecSum; in xcorr_kernel_sse4_1() local 122 sum0 = _mm_setzero_si128(); in xcorr_kernel_sse4_1() 135 sum0 = _mm_add_epi32(sum0, _mm_madd_epi16(vecX, vecY0)); in xcorr_kernel_sse4_1() 141 sum0 = _mm_add_epi32(sum0, _mm_unpackhi_epi64( sum0, sum0)); in xcorr_kernel_sse4_1() 142 sum0 = _mm_add_epi32(sum0, _mm_shufflelo_epi16( sum0, 0x0E)); in xcorr_kernel_sse4_1() 153 vecSum = _mm_unpacklo_epi64(_mm_unpacklo_epi32(sum0, sum1), in xcorr_kernel_sse4_1() 169 sum0 = _mm_mullo_epi32(vecX0, vecY0); in xcorr_kernel_sse4_1() 174 sum0 = _mm_add_epi32(sum0, sum1); in xcorr_kernel_sse4_1() 176 vecSum = _mm_add_epi32(vecSum, sum0); in xcorr_kernel_sse4_1() 187 sum0 = _mm_mullo_epi32(vecX0, vecY0); in xcorr_kernel_sse4_1() [all …]
|
/external/flac/libFLAC/ |
D | lpc_intrin_sse.c | 61 __m128 sum0; in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new() local 67 sum0 = _mm_setzero_ps(); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new() 73 sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d)); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new() 85 sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0)); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new() 89 _mm_storeu_ps(autoc, sum0); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new() 97 __m128 sum0, sum1; in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new() local 103 sum0 = _mm_setzero_ps(); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new() 111 sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d)); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new() 128 sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0)); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new() 132 _mm_storeu_ps(autoc, sum0); in FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sad_impl_avx2.c | 98 __m128i sum0, sum1; in aom_sad64x128x4d_avx2() local 105 sad64x64x4d(src, src_stride, rf, ref_stride, &sum0); in aom_sad64x128x4d_avx2() 112 sum0 = _mm_add_epi32(sum0, sum1); in aom_sad64x128x4d_avx2() 113 _mm_storeu_si128((__m128i *)res, sum0); in aom_sad64x128x4d_avx2() 119 __m128i sum0, sum1; in aom_sad128x64x4d_avx2() local 127 sad64x64x4d(src, src_stride, rf, ref_stride, &sum0); in aom_sad128x64x4d_avx2() 134 sum0 = _mm_add_epi32(sum0, sum1); in aom_sad128x64x4d_avx2() 135 _mm_storeu_si128((__m128i *)res, sum0); in aom_sad128x64x4d_avx2() 142 uint32_t sum0[4]; in aom_sad128x128x4d_avx2() local 149 aom_sad128x64x4d_avx2(src, src_stride, rf, ref_stride, sum0); in aom_sad128x128x4d_avx2() [all …]
|
D | sad4d_avx2.c | 175 uint32_t sum0[4]; in aom_sad32x64x4d_avx2() local 182 aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum0); in aom_sad32x64x4d_avx2() 189 res[0] = sum0[0] + sum1[0]; in aom_sad32x64x4d_avx2() 190 res[1] = sum0[1] + sum1[1]; in aom_sad32x64x4d_avx2() 191 res[2] = sum0[2] + sum1[2]; in aom_sad32x64x4d_avx2() 192 res[3] = sum0[3] + sum1[3]; in aom_sad32x64x4d_avx2() 199 uint32_t sum0[4]; in aom_sad64x32x4d_avx2() local 207 aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum0); in aom_sad64x32x4d_avx2() 214 res[0] = sum0[0] + sum1[0]; in aom_sad64x32x4d_avx2() 215 res[1] = sum0[1] + sum1[1]; in aom_sad64x32x4d_avx2() [all …]
|
D | sad_highbd_avx2.c | 61 __m256i sum0, sum1; in aom_highbd_sad16x8_avx2() local 68 sum0 = _mm256_add_epi16(u0, u1); in aom_highbd_sad16x8_avx2() 69 sum0 = _mm256_add_epi16(sum0, u2); in aom_highbd_sad16x8_avx2() 70 sum0 = _mm256_add_epi16(sum0, u3); in aom_highbd_sad16x8_avx2() 100 s0 = _mm256_unpacklo_epi16(sum0, zero); in aom_highbd_sad16x8_avx2() 101 s1 = _mm256_unpackhi_epi16(sum0, zero); in aom_highbd_sad16x8_avx2() 106 sum0 = _mm256_add_epi32(s0, r0); in aom_highbd_sad16x8_avx2() 109 return (unsigned int)get_sad_from_mm256_epi32(&sum0); in aom_highbd_sad16x8_avx2() 117 __m256i sum0; in aom_highbd_sad16x16_avx2() local 144 sum0 = _mm256_add_epi16(u0, u1); in aom_highbd_sad16x16_avx2() [all …]
|
D | highbd_variance_avx2.c | 95 int sum0; in highbd_10_variance_avx2() local 97 ref_stride, &sse0, &sum0); in highbd_10_variance_avx2() 99 sum_long += sum0; in highbd_10_variance_avx2()
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | cross_correlation_neon.c | 23 int64x2_t sum0 = vdupq_n_s64(0); in DotProductWithScaleNeon() local 39 sum0 = vpadalq_s32(sum0, tmp0); in DotProductWithScaleNeon() 53 sum0 = vaddq_s64(sum0, sum1); in DotProductWithScaleNeon() 55 int64_t sum2 = vaddvq_s64(sum0); in DotProductWithScaleNeon() 59 int64x1_t sum2 = vadd_s64(vget_low_s64(sum0), vget_high_s64(sum0)); in DotProductWithScaleNeon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | loopfilter_neon.c | 185 const uint8x16_t add1, uint16x8_t *sum0, in filter_update_16() argument 187 *sum0 = vsubw_u8(*sum0, vget_low_u8(sub0)); in filter_update_16() 189 *sum0 = vsubw_u8(*sum0, vget_low_u8(sub1)); in filter_update_16() 191 *sum0 = vaddw_u8(*sum0, vget_low_u8(add0)); in filter_update_16() 193 *sum0 = vaddw_u8(*sum0, vget_low_u8(add1)); in filter_update_16() 208 const uint8x16_t add1, uint16x8_t *sum0, uint16x8_t *sum1) { in calc_7_tap_filter_16_kernel() argument 209 filter_update_16(sub0, sub1, add0, add1, sum0, sum1); in calc_7_tap_filter_16_kernel() 210 return vcombine_u8(vrshrn_n_u16(*sum0, 3), vrshrn_n_u16(*sum1, 3)); in calc_7_tap_filter_16_kernel() 224 uint16x8_t *sum0, uint16x8_t *sum1) { in apply_15_tap_filter_16_kernel() argument 226 filter_update_16(sub0, sub1, add0, add1, sum0, sum1); in apply_15_tap_filter_16_kernel() [all …]
|
D | highbd_vpx_convolve8_neon.c | 114 int32x4_t sum0, sum1; in highbd_convolve8_8() local 117 sum0 = vmull_lane_s16(vget_low_s16(s0), filters_lo, 0); in highbd_convolve8_8() 118 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filters_lo, 1); in highbd_convolve8_8() 119 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filters_lo, 2); in highbd_convolve8_8() 120 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filters_lo, 3); in highbd_convolve8_8() 121 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filters_hi, 0); in highbd_convolve8_8() 122 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filters_hi, 1); in highbd_convolve8_8() 123 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filters_hi, 2); in highbd_convolve8_8() 124 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filters_hi, 3); in highbd_convolve8_8() 133 d = vcombine_u16(vqrshrun_n_s32(sum0, 7), vqrshrun_n_s32(sum1, 7)); in highbd_convolve8_8()
|
/external/libavc/common/x86/ |
D | ih264_resi_trans_quant_sse42.c | 114 __m128i sum0, sum1, sum2, cmp0, cmp1; in ih264_resi_trans_quant_4x4_sse42() local 302 sum0 = _mm_hadd_epi16(cmp0, zero_8x16b); in ih264_resi_trans_quant_4x4_sse42() 303 sum1 = _mm_hadd_epi16(sum0, zero_8x16b); in ih264_resi_trans_quant_4x4_sse42() 315 sum0 = _mm_hadd_epi16(cmp1, zero_8x16b); in ih264_resi_trans_quant_4x4_sse42() 316 sum1 = _mm_hadd_epi16(sum0, zero_8x16b); in ih264_resi_trans_quant_4x4_sse42() 388 __m128i cmp0, cmp1, sum0, sum1, sum2; in ih264_resi_trans_quant_chroma_4x4_sse42() local 587 sum0 = _mm_hadd_epi16(cmp0, zero_8x16b); in ih264_resi_trans_quant_chroma_4x4_sse42() 588 sum1 = _mm_hadd_epi16(sum0, zero_8x16b); in ih264_resi_trans_quant_chroma_4x4_sse42() 600 sum0 = _mm_hadd_epi16(cmp1, zero_8x16b); in ih264_resi_trans_quant_chroma_4x4_sse42() 601 sum1 = _mm_hadd_epi16(sum0, zero_8x16b); in ih264_resi_trans_quant_chroma_4x4_sse42() [all …]
|
/external/libopus/celt/arm/ |
D | celt_pitch_xcorr_arm_gnu.s | 360 @ r6 = opus_val32 sum0 429 @ maxcorr = max(maxcorr, sum0, sum1, sum2, sum3) 447 @ {r10, r11} = {sum0, sum1} = {0, 0} 455 SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0) 459 SMLATT r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_1, y_1) 463 SMLABB r10, r7, r9, r10 @ sum0 = MAC16_16(sum0, x_2, y_2) 465 SMLATT r10, r7, r9, r10 @ sum0 = MAC16_16(sum0, x_3, y_3) 474 SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0) 478 SMLATT r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_1, y_1) 485 SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0) [all …]
|
/external/llvm/test/Analysis/BasicAA/ |
D | pure-const-dce.ll | 14 ; CHECK-NEXT: %sum0 = add i32 %tmp0, %tmp1 15 ; CHECK-NEXT: %sum1 = add i32 %sum0, %tmp2 36 %sum0 = add i32 %tmp0, %tmp1 ; <i32> [#uses=1] 37 %sum1 = add i32 %sum0, %tmp2 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/ |
D | pure-const-dce.ll | 14 ; CHECK-NEXT: %sum0 = add i32 %tmp0, %tmp1 15 ; CHECK-NEXT: %sum1 = add i32 %sum0, %tmp2 36 %sum0 = add i32 %tmp0, %tmp1 ; <i32> [#uses=1] 37 %sum1 = add i32 %sum0, %tmp2 ; <i32> [#uses=1]
|
/external/skqp/src/core/ |
D | SkMaskBlurFilter.cpp | 117 uint32_t sum0 = 0; in blur() local 124 sum0 += leadingEdge; in blur() 125 sum1 += sum0; in blur() 135 *buffer1Cursor = sum0; in blur() 138 sum0 -= *buffer0Cursor; in blur() 146 sum0 += leadingEdge; in blur() 147 sum1 += sum0; in blur() 157 *buffer1Cursor = sum0; in blur() 160 sum0 -= *buffer0Cursor; in blur() 170 sum0 = sum1 = sum2 = 0; in blur() [all …]
|
/external/skia/src/core/ |
D | SkMaskBlurFilter.cpp | 117 uint32_t sum0 = 0; in blur() local 124 sum0 += leadingEdge; in blur() 125 sum1 += sum0; in blur() 135 *buffer1Cursor = sum0; in blur() 138 sum0 -= *buffer0Cursor; in blur() 146 sum0 += leadingEdge; in blur() 147 sum1 += sum0; in blur() 157 *buffer1Cursor = sum0; in blur() 160 sum0 -= *buffer0Cursor; in blur() 170 sum0 = sum1 = sum2 = 0; in blur() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | avg_msa.c | 18 v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7; in vpx_avg_8x8_msa() local 22 HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3); in vpx_avg_8x8_msa() 24 ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6); in vpx_avg_8x8_msa() 25 ADD2(sum0, sum2, sum4, sum6, sum0, sum4); in vpx_avg_8x8_msa() 26 sum0 += sum4; in vpx_avg_8x8_msa() 28 sum = __msa_hadd_u_w(sum0, sum0); in vpx_avg_8x8_msa() 29 sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum); in vpx_avg_8x8_msa() 30 sum = __msa_hadd_u_w(sum0, sum0); in vpx_avg_8x8_msa() 41 v8u16 sum0; in vpx_avg_4x4_msa() local 48 sum0 = __msa_hadd_u_h(vec, vec); in vpx_avg_4x4_msa() [all …]
|
/external/aac/libAACdec/src/ |
D | usacdec_ace_ltp.cpp | 215 FIXP_DBL sum0, sum1, a_exc0, a_exc1; in Pred_lt4_postfilter() local 221 sum0 = a_exc0 + fMult(B, exc[i]); in Pred_lt4_postfilter() 225 exc[i] = sum0 + a_exc0; in Pred_lt4_postfilter()
|
/external/libxcam/modules/soft/ |
D | soft_blender_tasks_priv.cpp | 45 float sum0[7] = {0.0f}; in gauss_luma_2x2() local 48 multiply_coeff_y (sum0, line, coeffs[0]); in gauss_luma_2x2() 50 multiply_coeff_y (sum0, line, coeffs[1]); in gauss_luma_2x2() 52 multiply_coeff_y (sum0, line, coeffs[2]); in gauss_luma_2x2() 55 multiply_coeff_y (sum0, line, coeffs[3]); in gauss_luma_2x2() 58 multiply_coeff_y (sum0, line, coeffs[4]); in gauss_luma_2x2() 67 value[0] = gauss_sum (&sum0[0]); in gauss_luma_2x2() 68 value[1] = gauss_sum (&sum0[2]); in gauss_luma_2x2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_variance_sse2.c | 39 int sum0; in highbd_8_variance_sse2() local 41 ref_stride, &sse0, &sum0); in highbd_8_variance_sse2() 43 *sum += sum0; in highbd_8_variance_sse2() 59 int sum0; in highbd_10_variance_sse2() local 61 ref_stride, &sse0, &sum0); in highbd_10_variance_sse2() 63 sum_long += sum0; in highbd_10_variance_sse2() 81 int sum0; in highbd_12_variance_sse2() local 83 ref_stride, &sse0, &sum0); in highbd_12_variance_sse2() 85 sum_long += sum0; in highbd_12_variance_sse2()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | fma-do-not-commute.ll | 20 %sum0 = phi float [ %fma, %loop ], [ %arg, %entry ] 24 %fma = fadd float %sum0, %fmul
|
/external/llvm/test/CodeGen/X86/ |
D | fma-do-not-commute.ll | 20 %sum0 = phi float [ %fma, %loop ], [ %arg, %entry ] 24 %fma = fadd float %sum0, %fmul
|
/external/swiftshader/third_party/LLVM/test/Analysis/BasicAA/ |
D | pure-const-dce.ll | 18 %sum0 = add i32 %tmp0, %tmp1 ; <i32> [#uses=1] 19 %sum1 = add i32 %sum0, %tmp2 ; <i32> [#uses=1]
|