Searched refs:_mm256_sra_epi32 (Results 1 – 11 of 11) sorted by relevance
/external/libaom/libaom/av1/common/x86/ |
D | highbd_convolve_2d_avx2.c | 85 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x), in av1_highbd_convolve_2d_sr_avx2() 95 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x), in av1_highbd_convolve_2d_sr_avx2() 135 __m256i res_a_round = _mm256_sra_epi32( in av1_highbd_convolve_2d_sr_avx2() 138 res_a_round = _mm256_sra_epi32( in av1_highbd_convolve_2d_sr_avx2() 143 __m256i res_b_round = _mm256_sra_epi32( in av1_highbd_convolve_2d_sr_avx2() 146 _mm256_sra_epi32(_mm256_add_epi32(res_b_round, round_const_bits), in av1_highbd_convolve_2d_sr_avx2()
|
D | jnt_convolve_avx2.c | 279 const __m256i res_lo_0_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 285 const __m256i res_lo_1_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 337 const __m256i res_hi_0_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 343 const __m256i res_hi_1_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 458 const __m256i res_lo_0_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 464 const __m256i res_lo_1_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 516 const __m256i res_hi_0_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 522 const __m256i res_hi_1_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_y_avx2() 710 const __m256i res_a_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_2d_avx2() 715 const __m256i res_b_round = _mm256_sra_epi32( in av1_dist_wtd_convolve_2d_avx2()
|
D | convolve_2d_avx2.c | 145 _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v); in av1_convolve_2d_sr_avx2() 147 _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v); in av1_convolve_2d_sr_avx2() 149 const __m256i res_a_round = _mm256_sra_epi32( in av1_convolve_2d_sr_avx2() 151 const __m256i res_b_round = _mm256_sra_epi32( in av1_convolve_2d_sr_avx2()
|
D | highbd_jnt_convolve_avx2.c | 304 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x), in av1_highbd_dist_wtd_convolve_2d_avx2() 314 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x), in av1_highbd_dist_wtd_convolve_2d_avx2() 355 const __m256i res_a_round = _mm256_sra_epi32( in av1_highbd_dist_wtd_convolve_2d_avx2() 402 const __m256i res_b_round = _mm256_sra_epi32( in av1_highbd_dist_wtd_convolve_2d_avx2() 527 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x), in av1_highbd_dist_wtd_convolve_x_avx2() 537 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x), in av1_highbd_dist_wtd_convolve_x_avx2() 750 res_a_round = _mm256_sra_epi32( in av1_highbd_dist_wtd_convolve_y_avx2() 797 res_b_round = _mm256_sra_epi32( in av1_highbd_dist_wtd_convolve_y_avx2()
|
D | highbd_inv_txfm_avx2.c | 107 a0 = _mm256_sra_epi32(a0, _mm_cvtsi32_si128(shift)); in neg_shift_avx2() 108 a1 = _mm256_sra_epi32(a1, _mm_cvtsi32_si128(shift)); in neg_shift_avx2() 256 a0 = _mm256_sra_epi32(a0, _mm_cvtsi32_si128(shift)); in addsub_shift_avx2() 257 a1 = _mm256_sra_epi32(a1, _mm_cvtsi32_si128(shift)); in addsub_shift_avx2() 497 x = _mm256_sra_epi32(x, _mm_cvtsi32_si128(out_shift)); in idct32_low1_avx2() 1245 in[0] = _mm256_sra_epi32(in[0], _mm_cvtsi32_si128(out_shift)); in idct16_low1_avx2() 2534 x = _mm256_sra_epi32(x, _mm_cvtsi32_si128(out_shift)); in idct8x8_low1_avx2() 3096 x = _mm256_sra_epi32(x, _mm_cvtsi32_si128(out_shift)); in idct64_low1_avx2()
|
/external/flac/libFLAC/ |
D | lpc_intrin_avx2.c | 92 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 123 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 154 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 181 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 210 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 233 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 256 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 275 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 296 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 311 summ = _mm256_sra_epi32(summ, cnt); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | convolve_avx2.h | 92 _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v); \ 94 _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v); \ 96 const __m256i res_a_round = _mm256_sra_epi32( \ 98 const __m256i res_b_round = _mm256_sra_epi32( \ 171 const __m256i res_a_round = _mm256_sra_epi32( \ 176 const __m256i res_b_round = _mm256_sra_epi32( \
|
D | highbd_convolve_avx2.c | 215 __m256i res_a_round = _mm256_sra_epi32( in av1_highbd_convolve_y_sr_avx2() 220 __m256i res_b_round = _mm256_sra_epi32( in av1_highbd_convolve_y_sr_avx2() 316 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x), in av1_highbd_convolve_x_sr_avx2() 326 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x), in av1_highbd_convolve_x_sr_avx2() 329 res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_bits), in av1_highbd_convolve_x_sr_avx2() 331 res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_bits), in av1_highbd_convolve_x_sr_avx2() 1109 __m256i res_a_round = _mm256_sra_epi32( in aom_highbd_filter_block1d4_v4_avx2() 1170 __m256i res_a_round = _mm256_sra_epi32( in aom_highbd_filter_block1d8_v4_avx2() 1174 __m256i res_b_round = _mm256_sra_epi32( in aom_highbd_filter_block1d8_v4_avx2()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1027 return _mm256_sra_epi32(a, b); in test_mm256_sra_epi32()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 637 return _mm256_sra_epi32(a, _mm_cvtsi32_si128(c)); in v256_shr_s32()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 650 _mm256_sra_epi32(__m256i __a, __m128i __count) in _mm256_sra_epi32() function
|