Home
last modified time | relevance | path

Searched refs:_mm256_srli_si256 (Results 1 – 18 of 18) sorted by relevance

/external/libvpx/libvpx/vp9/encoder/x86/
Dvp9_error_avx2.c41 dqcoeff_hi = _mm256_srli_si256(dqcoeff_256, 8); in vp9_block_error_avx2()
42 coeff_hi = _mm256_srli_si256(coeff_256, 8); in vp9_block_error_avx2()
89 sse_hi = _mm256_srli_si256(sse_256, 8); in vp9_block_error_avx2()
90 ssz_hi = _mm256_srli_si256(ssz_256, 8); in vp9_block_error_avx2()
127 const __m256i error_hi = _mm256_srli_si256(error_lo, 8); in vp9_block_error_fp_avx2()
150 sse_hi = _mm256_srli_si256(sse_256, 8); in vp9_block_error_fp_avx2()
/external/libaom/libaom/av1/encoder/x86/
Derror_intrin_avx2.c49 const __m256i error_hi = _mm256_srli_si256(error_lo, 8); in av1_block_error_lp_avx2()
72 sse_hi = _mm256_srli_si256(sse_256, 8); in av1_block_error_lp_avx2()
122 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); in av1_block_error_avx2()
123 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); in av1_block_error_avx2()
Dwedge_utils_avx2.c71 v_acc0_q = _mm256_add_epi64(v_acc0_q, _mm256_srli_si256(v_acc0_q, 8)); in av1_wedge_sse_from_residuals_avx2()
138 __m256i v_acc_q = _mm256_add_epi64(v_acc0_d, _mm256_srli_si256(v_acc0_d, 8)); in av1_wedge_sign_from_residuals_avx2()
Dcorner_match_avx2.c62 __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8); in av1_compute_cross_correlation_avx2()
/external/libaom/libaom/aom_dsp/x86/
Dsad_avx2.c37 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
70 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
124 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
163 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
Dblk_sse_sum_avx2.c24 regx_sum = _mm256_add_epi32(regx_sum, _mm256_srli_si256(regx_sum, 8)); in accumulate_sse_sum()
25 regx_sum = _mm256_add_epi32(regx_sum, _mm256_srli_si256(regx_sum, 4)); in accumulate_sse_sum()
29 regx2_sum = _mm256_add_epi64(regx2_sum, _mm256_srli_si256(regx2_sum, 8)); in accumulate_sse_sum()
Dsad_impl_avx2.c34 sum = _mm256_add_epi32(sum, _mm256_srli_si256(sum, 8)); in sad32x32()
112 sum_sad_h = _mm256_srli_si256(sum_sad, 8); in sad_w64_avg_avx2()
Dsad_highbd_avx2.c25 __m256i u = _mm256_srli_si256(*v, 8); in get_sad_from_mm256_epi32()
532 u0 = _mm256_srli_si256(v[0], 4); in get_4d_sad_from_mm256_epi32()
533 u1 = _mm256_srli_si256(v[1], 4); in get_4d_sad_from_mm256_epi32()
534 u2 = _mm256_srli_si256(v[2], 4); in get_4d_sad_from_mm256_epi32()
535 u3 = _mm256_srli_si256(v[3], 4); in get_4d_sad_from_mm256_epi32()
Dvariance_impl_avx2.c89 sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \
95 sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \
96 sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \
102 sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
Dblend_a64_mask_avx2.c455 _mm256_and_si256(_mm256_srli_si256(v_rvsl_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w16_avx2()
491 _mm256_and_si256(_mm256_srli_si256(v_rvsl_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w32n_avx2()
493 _mm256_and_si256(_mm256_srli_si256(v_rvsh_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w32n_avx2()
589 _mm256_avg_epu8(v_rl_b, _mm256_srli_si256(v_rl_b, 1)); in blend_a64_mask_sx_w16_avx2()
620 _mm256_avg_epu8(v_r0_s_b, _mm256_srli_si256(v_r0_s_b, 8)); in blend_a64_mask_sx_w32n_avx2()
622 _mm256_avg_epu8(v_r1_s_b, _mm256_srli_si256(v_r1_s_b, 8)); in blend_a64_mask_sx_w32n_avx2()
Davg_intrin_avx2.c473 const __m256i a = _mm256_srli_si256(accum, 8); in aom_satd_avx2()
496 const __m256i a = _mm256_srli_si256(accum, 8); in aom_satd_lp_avx2()
/external/libvpx/libvpx/vpx_dsp/x86/
Dsad_avx2.c34 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
66 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
117 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
155 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
Dvariance_avx2.c207 sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \
213 sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \
214 sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \
220 sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
Davg_intrin_avx2.c451 const __m256i a = _mm256_srli_si256(accum, 8); in vpx_satd_avx2()
473 const __m256i a = _mm256_srli_si256(accum, 8); in vpx_highbd_satd_avx2()
/external/gemmlowp/fixedpoint/
Dfixedpoint_avx.h187 a1_a3 = _mm256_srli_si256(a, 4);
189 b1_b3 = _mm256_srli_si256(b, 4);
204 result = _mm256_blend_epi16(_mm256_srli_si256(a0b0_a2b2_rounded_2x, 4),
/external/libaom/libaom/aom_dsp/simd/
Dv256_intrinsics_x86.h475 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 8)); in v256_ssd_u8()
476 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 4)); in v256_ssd_u8()
477 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 8)); in v256_ssd_u8()
478 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 4)); in v256_ssd_u8()
/external/clang/lib/Headers/
Davx2intrin.h655 #define _mm256_srli_si256(a, imm) __extension__ ({ \ macro
692 #define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
/external/clang/test/CodeGen/
Davx2-builtins.c1093 return _mm256_srli_si256(a, 3); in test_mm256_srli_si256()