/external/libaom/libaom/aom_dsp/x86/ |
D | avg_intrin_avx2.c | 158 b0 = _mm256_srai_epi16(b0, 1); in hadamard_16x16_avx2() 159 b1 = _mm256_srai_epi16(b1, 1); in hadamard_16x16_avx2() 160 b2 = _mm256_srai_epi16(b2, 1); in hadamard_16x16_avx2() 161 b3 = _mm256_srai_epi16(b3, 1); in hadamard_16x16_avx2() 212 b0 = _mm256_srai_epi16(b0, 2); in aom_hadamard_32x32_avx2() 213 b1 = _mm256_srai_epi16(b1, 2); in aom_hadamard_32x32_avx2() 214 b2 = _mm256_srai_epi16(b2, 2); in aom_hadamard_32x32_avx2() 215 b3 = _mm256_srai_epi16(b3, 2); in aom_hadamard_32x32_avx2()
|
D | aom_subpixel_8t_intrin_avx2.c | 116 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d4_h4_avx2() 212 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d4_h8_avx2() 312 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d8_h4_avx2() 426 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d8_h8_avx2() 555 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d16_h4_avx2() 556 srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 6); in aom_filter_block1d16_h4_avx2() 590 srcRegFilt1_1 = _mm256_srai_epi16(srcRegFilt1_1, 6); in aom_filter_block1d16_h4_avx2() 702 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 6); in aom_filter_block1d16_h8_avx2() 703 srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 6); in aom_filter_block1d16_h8_avx2() 867 resReglo = _mm256_srai_epi16(resReglo, 6); in aom_filter_block1d8_v4_avx2() [all …]
|
D | convolve_avx2.h | 261 const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1); in prepare_coeffs_lowbd() 395 res = _mm256_srai_epi16(wt_res, 1); in comp_avg() 405 const __m256i res_round = _mm256_srai_epi16( in convolve_rounding()
|
D | variance_impl_avx2.c | 49 exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ 50 exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
|
D | intrapred_avx2.c | 255 sum_left = _mm256_srai_epi16(sum_left, 6); in aom_dc_predictor_32x32_avx2() 269 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x32_avx2() 283 sum = _mm256_srai_epi16(sum, 5); in aom_dc_left_predictor_32x32_avx2() 444 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x16_avx2() 458 sum = _mm256_srai_epi16(sum, 5); in aom_dc_top_predictor_32x64_avx2() 472 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x64_avx2() 486 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x32_avx2() 500 sum = _mm256_srai_epi16(sum, 6); in aom_dc_top_predictor_64x16_avx2() 529 sum = _mm256_srai_epi16(sum, 6); in aom_dc_left_predictor_32x64_avx2() 543 sum = _mm256_srai_epi16(sum, 6); in aom_dc_left_predictor_64x64_avx2() [all …]
|
D | txfm_common_avx2.h | 213 in[i] = _mm256_srai_epi16(in[i], bit); in round_shift_16bit_w16_avx2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 364 b0 = _mm256_srai_epi16(b0, 1); in hadamard_16x16_avx2() 365 b1 = _mm256_srai_epi16(b1, 1); in hadamard_16x16_avx2() 366 b2 = _mm256_srai_epi16(b2, 1); in hadamard_16x16_avx2() 367 b3 = _mm256_srai_epi16(b3, 1); in hadamard_16x16_avx2() 422 b0 = _mm256_srai_epi16(b0, 2); in vpx_hadamard_32x32_avx2() 423 b1 = _mm256_srai_epi16(b1, 2); in vpx_hadamard_32x32_avx2() 424 b2 = _mm256_srai_epi16(b2, 2); in vpx_hadamard_32x32_avx2() 425 b3 = _mm256_srai_epi16(b3, 2); in vpx_hadamard_32x32_avx2()
|
D | fwd_dct32x32_impl_avx2.h | 544 step2[0] = _mm256_srai_epi16(step2[0], 2); in FDCT32x32_2D_AVX2() 545 step2[1] = _mm256_srai_epi16(step2[1], 2); in FDCT32x32_2D_AVX2() 546 step2[2] = _mm256_srai_epi16(step2[2], 2); in FDCT32x32_2D_AVX2() 547 step2[3] = _mm256_srai_epi16(step2[3], 2); in FDCT32x32_2D_AVX2() 548 step2[4] = _mm256_srai_epi16(step2[4], 2); in FDCT32x32_2D_AVX2() 549 step2[5] = _mm256_srai_epi16(step2[5], 2); in FDCT32x32_2D_AVX2() 550 step2[6] = _mm256_srai_epi16(step2[6], 2); in FDCT32x32_2D_AVX2() 551 step2[7] = _mm256_srai_epi16(step2[7], 2); in FDCT32x32_2D_AVX2() 552 step2[8] = _mm256_srai_epi16(step2[8], 2); in FDCT32x32_2D_AVX2() 553 step2[9] = _mm256_srai_epi16(step2[9], 2); in FDCT32x32_2D_AVX2() [all …]
|
D | convolve_avx2.h | 72 sum1 = _mm256_srai_epi16(sum1, 7); in convolve8_16_avx2() 148 return _mm256_srai_epi16(nearest_src, depth); in mm256_round_epi16()
|
D | variance_avx2.c | 185 exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ 186 exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 211 _mm256_packs_epi16(_mm256_srai_epi16(b, 8), _mm256_srai_epi16(a, 8)), in v256_unziphi_8() 280 return _mm256_srai_epi16( in v256_unpacklo_s8_s16() 287 return _mm256_srai_epi16( in v256_unpackhi_s8_s16() 687 _mm256_packs_epi16(_mm256_srai_epi16(_mm256_unpacklo_epi8(a, a), (c) + 8), \ 688 _mm256_srai_epi16(_mm256_unpackhi_epi8(a, a), (c) + 8)) 691 #define v256_shr_n_s16(a, c) _mm256_srai_epi16(a, c)
|
/external/libaom/libaom/av1/encoder/x86/ |
D | av1_quantize_avx2.c | 66 *thr = _mm256_srai_epi16(qp[2], 1 + log_scale); in init_qp() 73 *thr = _mm256_srai_epi16(qp[2], 1 + log_scale); in update_qp() 78 __m256i sign_bits = _mm256_srai_epi16(q, 15); \
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_avx2.c | 109 thr256 = _mm256_srai_epi16(dequant256, 1); in vp9_quantize_fp_avx2()
|
/external/libaom/libaom/av1/common/x86/ |
D | reconinter_avx2.c | 541 __m256i diff = _mm256_srai_epi16( in av1_build_compound_diffwtd_mask_highbd_avx2() 561 __m256i diff = _mm256_srai_epi16( in av1_build_compound_diffwtd_mask_highbd_avx2()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1033 return _mm256_srai_epi16(a, 3); in test_mm256_srai_epi16()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 632 _mm256_srai_epi16(__m256i __a, int __count) in _mm256_srai_epi16() function
|