/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_error_avx2.c | 41 dqcoeff_hi = _mm256_srli_si256(dqcoeff_256, 8); in vp9_block_error_avx2() 42 coeff_hi = _mm256_srli_si256(coeff_256, 8); in vp9_block_error_avx2() 89 sse_hi = _mm256_srli_si256(sse_256, 8); in vp9_block_error_avx2() 90 ssz_hi = _mm256_srli_si256(ssz_256, 8); in vp9_block_error_avx2() 127 const __m256i error_hi = _mm256_srli_si256(error_lo, 8); in vp9_block_error_fp_avx2() 150 sse_hi = _mm256_srli_si256(sse_256, 8); in vp9_block_error_fp_avx2()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | error_intrin_avx2.c | 49 const __m256i error_hi = _mm256_srli_si256(error_lo, 8); in av1_block_error_lp_avx2() 72 sse_hi = _mm256_srli_si256(sse_256, 8); in av1_block_error_lp_avx2() 122 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); in av1_block_error_avx2() 123 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); in av1_block_error_avx2()
|
D | wedge_utils_avx2.c | 71 v_acc0_q = _mm256_add_epi64(v_acc0_q, _mm256_srli_si256(v_acc0_q, 8)); in av1_wedge_sse_from_residuals_avx2() 138 __m256i v_acc_q = _mm256_add_epi64(v_acc0_d, _mm256_srli_si256(v_acc0_d, 8)); in av1_wedge_sign_from_residuals_avx2()
|
D | corner_match_avx2.c | 62 __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8); in av1_compute_cross_correlation_avx2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sad_avx2.c | 37 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 70 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 124 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 163 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
D | blk_sse_sum_avx2.c | 24 regx_sum = _mm256_add_epi32(regx_sum, _mm256_srli_si256(regx_sum, 8)); in accumulate_sse_sum() 25 regx_sum = _mm256_add_epi32(regx_sum, _mm256_srli_si256(regx_sum, 4)); in accumulate_sse_sum() 29 regx2_sum = _mm256_add_epi64(regx2_sum, _mm256_srli_si256(regx2_sum, 8)); in accumulate_sse_sum()
|
D | sad_impl_avx2.c | 34 sum = _mm256_add_epi32(sum, _mm256_srli_si256(sum, 8)); in sad32x32() 112 sum_sad_h = _mm256_srli_si256(sum_sad, 8); in sad_w64_avg_avx2()
|
D | sad_highbd_avx2.c | 25 __m256i u = _mm256_srli_si256(*v, 8); in get_sad_from_mm256_epi32() 532 u0 = _mm256_srli_si256(v[0], 4); in get_4d_sad_from_mm256_epi32() 533 u1 = _mm256_srli_si256(v[1], 4); in get_4d_sad_from_mm256_epi32() 534 u2 = _mm256_srli_si256(v[2], 4); in get_4d_sad_from_mm256_epi32() 535 u3 = _mm256_srli_si256(v[3], 4); in get_4d_sad_from_mm256_epi32()
|
D | variance_impl_avx2.c | 89 sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \ 95 sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \ 96 sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \ 102 sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
|
D | blend_a64_mask_avx2.c | 455 _mm256_and_si256(_mm256_srli_si256(v_rvsl_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w16_avx2() 491 _mm256_and_si256(_mm256_srli_si256(v_rvsl_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w32n_avx2() 493 _mm256_and_si256(_mm256_srli_si256(v_rvsh_b, 1), v_zmask_b); in blend_a64_mask_sx_sy_w32n_avx2() 589 _mm256_avg_epu8(v_rl_b, _mm256_srli_si256(v_rl_b, 1)); in blend_a64_mask_sx_w16_avx2() 620 _mm256_avg_epu8(v_r0_s_b, _mm256_srli_si256(v_r0_s_b, 8)); in blend_a64_mask_sx_w32n_avx2() 622 _mm256_avg_epu8(v_r1_s_b, _mm256_srli_si256(v_r1_s_b, 8)); in blend_a64_mask_sx_w32n_avx2()
|
D | avg_intrin_avx2.c | 473 const __m256i a = _mm256_srli_si256(accum, 8); in aom_satd_avx2() 496 const __m256i a = _mm256_srli_si256(accum, 8); in aom_satd_lp_avx2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | sad_avx2.c | 34 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 66 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 117 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 155 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
|
D | variance_avx2.c | 207 sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \ 213 sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \ 214 sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \ 220 sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
|
D | avg_intrin_avx2.c | 451 const __m256i a = _mm256_srli_si256(accum, 8); in vpx_satd_avx2() 473 const __m256i a = _mm256_srli_si256(accum, 8); in vpx_highbd_satd_avx2()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_avx.h | 187 a1_a3 = _mm256_srli_si256(a, 4); 189 b1_b3 = _mm256_srli_si256(b, 4); 204 result = _mm256_blend_epi16(_mm256_srli_si256(a0b0_a2b2_rounded_2x, 4),
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 475 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 8)); in v256_ssd_u8() 476 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 4)); in v256_ssd_u8() 477 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 8)); in v256_ssd_u8() 478 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 4)); in v256_ssd_u8()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 655 #define _mm256_srli_si256(a, imm) __extension__ ({ \ macro 692 #define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1093 return _mm256_srli_si256(a, 3); in test_mm256_srli_si256()
|