Home
last modified time | relevance | path

Searched refs:_mm256_add_epi16 (Results 1 – 7 of 7) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/x86/
Dloopfilter_avx2.c600 pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5), in vpx_lpf_horizontal_16_dual_avx2()
601 _mm256_add_epi16(p256_4, p256_3)); in vpx_lpf_horizontal_16_dual_avx2()
602 pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5), in vpx_lpf_horizontal_16_dual_avx2()
603 _mm256_add_epi16(q256_4, q256_3)); in vpx_lpf_horizontal_16_dual_avx2()
606 _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1)); in vpx_lpf_horizontal_16_dual_avx2()
607 pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); in vpx_lpf_horizontal_16_dual_avx2()
610 _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1)); in vpx_lpf_horizontal_16_dual_avx2()
611 pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); in vpx_lpf_horizontal_16_dual_avx2()
613 pixelFilter_p = _mm256_add_epi16( in vpx_lpf_horizontal_16_dual_avx2()
614 eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q)); in vpx_lpf_horizontal_16_dual_avx2()
[all …]
Davg_intrin_avx2.c28 __m256i b0 = _mm256_add_epi16(a0, a1); in hadamard_col8x2_avx2()
30 __m256i b2 = _mm256_add_epi16(a2, a3); in hadamard_col8x2_avx2()
32 __m256i b4 = _mm256_add_epi16(a4, a5); in hadamard_col8x2_avx2()
34 __m256i b6 = _mm256_add_epi16(a6, a7); in hadamard_col8x2_avx2()
37 a0 = _mm256_add_epi16(b0, b2); in hadamard_col8x2_avx2()
38 a1 = _mm256_add_epi16(b1, b3); in hadamard_col8x2_avx2()
41 a4 = _mm256_add_epi16(b4, b6); in hadamard_col8x2_avx2()
42 a5 = _mm256_add_epi16(b5, b7); in hadamard_col8x2_avx2()
47 b0 = _mm256_add_epi16(a0, a4); in hadamard_col8x2_avx2()
48 b7 = _mm256_add_epi16(a1, a5); in hadamard_col8x2_avx2()
[all …]
Dfwd_dct32x32_impl_avx2.h131 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
132 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
133 step1a[2] = _mm256_add_epi16(ina2, inb2); in FDCT32x32_2D_AVX2()
134 step1a[3] = _mm256_add_epi16(ina3, inb3); in FDCT32x32_2D_AVX2()
167 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
168 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
169 step1a[2] = _mm256_add_epi16(ina2, inb2); in FDCT32x32_2D_AVX2()
170 step1a[3] = _mm256_add_epi16(ina3, inb3); in FDCT32x32_2D_AVX2()
203 step1a[0] = _mm256_add_epi16(ina0, inb0); in FDCT32x32_2D_AVX2()
204 step1a[1] = _mm256_add_epi16(ina1, inb1); in FDCT32x32_2D_AVX2()
[all …]
Dconvolve_avx2.h66 sum1 = _mm256_add_epi16(x0, x2); in convolve8_16_avx2()
67 sum2 = _mm256_add_epi16(x1, x3); in convolve8_16_avx2()
69 sum1 = _mm256_add_epi16(sum1, k_64); in convolve8_16_avx2()
Dvariance_avx2.c66 sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff0, diff1)); in vpx_get16x16var_avx2()
132 sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff0, diff1)); in get32x16var_avx2()
133 sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff2, diff3)); in get32x16var_avx2()
171 exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \
172 exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \
186 *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_lo); \
188 *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_hi); \
/external/clang/test/CodeGen/
Davx2-builtins.c38 return _mm256_add_epi16(a, b); in test_mm256_add_epi16()
/external/clang/lib/Headers/
Davx2intrin.h88 _mm256_add_epi16(__m256i __a, __m256i __b) in _mm256_add_epi16() function