/external/libaom/libaom/av1/common/ |
D | convolve.c | 129 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in av1_convolve_2d_sr_c() 159 int16_t res = ROUND_POWER_OF_TWO(sum, conv_params->round_1) - in av1_convolve_2d_sr_c() 160 ((1 << (offset_bits - conv_params->round_1)) + in av1_convolve_2d_sr_c() 161 (1 << (offset_bits - conv_params->round_1 - 1))); in av1_convolve_2d_sr_c() 179 assert(((conv_params->round_0 + conv_params->round_1) <= (FILTER_BITS + 1)) || in av1_convolve_y_sr_c() 180 ((conv_params->round_0 + conv_params->round_1) == (2 * FILTER_BITS))); in av1_convolve_y_sr_c() 210 assert((FILTER_BITS - conv_params->round_1) >= 0 || in av1_convolve_x_sr_c() 211 ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS)); in av1_convolve_x_sr_c() 261 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_c() 291 CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); in av1_dist_wtd_convolve_2d_c() [all …]
|
D | convolve.h | 26 int round_1; member 75 conv_params.round_1 = is_compound ? COMPOUND_ROUND1_BITS in get_conv_params_no_round() 81 if (!is_compound) conv_params.round_1 -= intbufrange - 16; in get_conv_params_no_round() 106 conv_params.round_1 = 2 * FILTER_BITS - conv_params.round_0; in get_conv_params_wiener() 111 conv_params.round_1 -= intbufrange - 16; in get_conv_params_wiener()
|
D | warped_motion.c | 334 ? conv_params->round_1 in av1_highbd_warp_affine_c() 340 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_warp_affine_c() 425 tmp32 = tmp32 - (1 << (offset_bits - conv_params->round_1)) - in av1_highbd_warp_affine_c() 426 (1 << (offset_bits - conv_params->round_1 - 1)); in av1_highbd_warp_affine_c() 604 ? conv_params->round_1 in av1_warp_affine_c() 610 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_warp_affine_c() 702 tmp32 = tmp32 - (1 << (offset_bits - conv_params->round_1)) - in av1_warp_affine_c() 703 (1 << (offset_bits - conv_params->round_1 - 1)); in av1_warp_affine_c()
|
/external/libaom/libaom/av1/common/x86/ |
D | av1_convolve_scale_sse4.c | 114 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in vfilter8() 116 const int32_t sub32 = ((1 << (offset_bits - conv_params->round_1)) + in vfilter8() 117 (1 << (offset_bits - conv_params->round_1 - 1))); in vfilter8() 123 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in vfilter8() 127 _mm_set1_epi32(((1 << conv_params->round_1) >> 1)); in vfilter8() 205 CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); in vfilter8() 225 int32_t tmp = res - ((1 << (offset_bits - conv_params->round_1)) + in vfilter8() 226 (1 << (offset_bits - conv_params->round_1 - 1))); in vfilter8() 340 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in highbd_vfilter8() 342 const int32_t sub32 = ((1 << (offset_bits - conv_params->round_1)) + in highbd_vfilter8() [all …]
|
D | convolve_2d_sse2.c | 40 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in av1_convolve_2d_sr_sse2() 132 _mm_set1_epi32((1 << offset_bits) + ((1 << conv_params->round_1) >> 1)); in av1_convolve_2d_sr_sse2() 133 const __m128i sum_shift = _mm_cvtsi32_si128(conv_params->round_1); in av1_convolve_2d_sr_sse2() 136 ((1 << bits) >> 1) - (1 << (offset_bits - conv_params->round_1)) - in av1_convolve_2d_sr_sse2() 137 ((1 << (offset_bits - conv_params->round_1)) >> 1)); in av1_convolve_2d_sr_sse2() 371 FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; in av1_dist_wtd_convolve_2d_copy_sse2() 385 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_copy_sse2() 389 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_copy_sse2()
|
D | jnt_convolve_sse2.c | 30 const int bits = FILTER_BITS - conv_params->round_1; in av1_dist_wtd_convolve_x_sse2() 42 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_x_sse2() 46 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_x_sse2() 172 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_y_sse2() 176 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_y_sse2() 178 const __m128i round_const = _mm_set1_epi32((1 << conv_params->round_1) >> 1); in av1_dist_wtd_convolve_y_sse2() 179 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in av1_dist_wtd_convolve_y_sse2() 417 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_sse2() 421 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_sse2() 528 ((1 << conv_params->round_1) >> 1) - in av1_dist_wtd_convolve_2d_sse2() [all …]
|
D | highbd_jnt_convolve_avx2.c | 38 FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 50 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 55 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_copy_avx2() 265 ((1 << conv_params->round_1) >> 1) - in av1_highbd_dist_wtd_convolve_2d_avx2() 267 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1); in av1_highbd_dist_wtd_convolve_2d_avx2() 270 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_avx2() 274 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_avx2() 476 const int bits = FILTER_BITS - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_avx2() 497 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_avx2() 501 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_avx2() [all …]
|
D | jnt_convolve_avx2.c | 48 const int bits = FILTER_BITS - conv_params->round_1; in av1_dist_wtd_convolve_x_avx2() 53 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_x_avx2() 57 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_x_avx2() 203 _mm256_set1_epi32((1 << conv_params->round_1) >> 1); in av1_dist_wtd_convolve_y_avx2() 204 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in av1_dist_wtd_convolve_y_avx2() 209 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_y_avx2() 216 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_y_avx2() 613 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_avx2() 617 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_avx2() 627 ((1 << conv_params->round_1) >> 1) - in av1_dist_wtd_convolve_2d_avx2() [all …]
|
D | highbd_convolve_2d_sse4.c | 37 FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; in av1_highbd_dist_wtd_convolve_2d_copy_sse4_1() 49 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_copy_sse4_1() 54 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_copy_sse4_1() 195 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_sse4_1() 199 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_2d_sse4_1() 296 ((1 << conv_params->round_1) >> 1) - in av1_highbd_dist_wtd_convolve_2d_sse4_1() 298 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in av1_highbd_dist_wtd_convolve_2d_sse4_1()
|
D | highbd_jnt_convolve_sse4.c | 43 _mm_set1_epi32(((1 << conv_params->round_1) >> 1)); in av1_highbd_dist_wtd_convolve_y_sse4_1() 44 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1); in av1_highbd_dist_wtd_convolve_y_sse4_1() 48 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_y_sse4_1() 52 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_y_sse4_1() 271 const int bits = FILTER_BITS - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_sse4_1() 292 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_sse4_1() 296 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_highbd_dist_wtd_convolve_x_sse4_1()
|
D | highbd_wiener_convolve_ssse3.c | 134 _mm_set1_epi32((1 << (conv_params->round_1 - 1)) - in av1_highbd_wiener_convolve_add_src_ssse3() 135 (1 << (bd + conv_params->round_1 - 1))); in av1_highbd_wiener_convolve_add_src_ssse3() 189 _mm_add_epi32(res_lo, round_const), conv_params->round_1); in av1_highbd_wiener_convolve_add_src_ssse3() 191 _mm_add_epi32(res_hi, round_const), conv_params->round_1); in av1_highbd_wiener_convolve_add_src_ssse3()
|
D | wiener_convolve_sse2.c | 132 _mm_set1_epi32((1 << (conv_params->round_1 - 1)) - in av1_wiener_convolve_add_src_sse2() 133 (1 << (bd + conv_params->round_1 - 1))); in av1_wiener_convolve_add_src_sse2() 187 _mm_add_epi32(res_lo, round_const), conv_params->round_1); in av1_wiener_convolve_add_src_sse2() 189 _mm_add_epi32(res_hi, round_const), conv_params->round_1); in av1_wiener_convolve_add_src_sse2()
|
D | convolve_2d_avx2.c | 34 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in av1_convolve_2d_sr_avx2() 44 (1 << offset_bits) + ((1 << conv_params->round_1) >> 1)); in av1_convolve_2d_sr_avx2() 45 const __m128i sum_shift_v = _mm_cvtsi32_si128(conv_params->round_1); in av1_convolve_2d_sr_avx2() 48 ((1 << bits) >> 1) - (1 << (offset_bits - conv_params->round_1)) - in av1_convolve_2d_sr_avx2() 49 ((1 << (offset_bits - conv_params->round_1)) >> 1)); in av1_convolve_2d_sr_avx2()
|
D | highbd_wiener_convolve_avx2.c | 171 _mm256_set1_epi32((1 << (conv_params->round_1 - 1)) - in av1_highbd_wiener_convolve_add_src_avx2() 172 (1 << (bd + conv_params->round_1 - 1))); in av1_highbd_wiener_convolve_add_src_avx2() 229 _mm256_add_epi32(res_lo, round_const), conv_params->round_1); in av1_highbd_wiener_convolve_add_src_avx2() 231 _mm256_add_epi32(res_hi, round_const), conv_params->round_1); in av1_highbd_wiener_convolve_add_src_avx2()
|
D | jnt_convolve_ssse3.c | 48 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_ssse3() 52 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in av1_dist_wtd_convolve_2d_ssse3() 144 ((1 << conv_params->round_1) >> 1) - in av1_dist_wtd_convolve_2d_ssse3() 146 const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1); in av1_dist_wtd_convolve_2d_ssse3()
|
D | wiener_convolve_avx2.c | 116 _mm256_set1_epi32((1 << (conv_params->round_1 - 1)) - in av1_wiener_convolve_add_src_avx2() 117 (1 << (bd + conv_params->round_1 - 1))); in av1_wiener_convolve_add_src_avx2() 118 const __m128i round_shift_v = _mm_cvtsi32_si128(conv_params->round_1); in av1_wiener_convolve_add_src_avx2()
|
D | highbd_convolve_2d_ssse3.c | 45 _mm_set1_epi32(((1 << conv_params->round_1) >> 1) - in av1_highbd_convolve_2d_sr_ssse3() 47 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1); in av1_highbd_convolve_2d_sr_ssse3() 50 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in av1_highbd_convolve_2d_sr_ssse3()
|
D | convolve_sse2.c | 95 assert(((conv_params->round_0 + conv_params->round_1) <= (FILTER_BITS + 1)) || in av1_convolve_y_sr_sse2() 96 ((conv_params->round_0 + conv_params->round_1) == (2 * FILTER_BITS))); in av1_convolve_y_sr_sse2() 259 assert((FILTER_BITS - conv_params->round_1) >= 0 || in av1_convolve_x_sr_sse2() 260 ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS)); in av1_convolve_x_sr_sse2()
|
D | convolve_avx2.c | 35 assert(((conv_params->round_0 + conv_params->round_1) <= (FILTER_BITS + 1)) || in av1_convolve_y_sr_avx2() 36 ((conv_params->round_0 + conv_params->round_1) == (2 * FILTER_BITS))); in av1_convolve_y_sr_avx2() 281 assert((FILTER_BITS - conv_params->round_1) >= 0 || in av1_convolve_x_sr_avx2() 282 ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS)); in av1_convolve_x_sr_avx2()
|
D | highbd_convolve_2d_avx2.c | 49 ((1 << conv_params->round_1) >> 1) - in av1_highbd_convolve_2d_sr_avx2() 51 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1); in av1_highbd_convolve_2d_sr_avx2() 54 FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; in av1_highbd_convolve_2d_sr_avx2()
|
/external/libaom/libaom/aom_dsp/ |
D | blend_a64_mask.c | 44 const int round_offset = (1 << (offset_bits - conv_params->round_1)) + in aom_lowbd_blend_a64_d16_mask_c() 45 (1 << (offset_bits - conv_params->round_1 - 1)); in aom_lowbd_blend_a64_d16_mask_c() 47 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in aom_lowbd_blend_a64_d16_mask_c() 130 const int round_offset = (1 << (offset_bits - conv_params->round_1)) + in aom_highbd_blend_a64_d16_mask_c() 131 (1 << (offset_bits - conv_params->round_1 - 1)); in aom_highbd_blend_a64_d16_mask_c() 133 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in aom_highbd_blend_a64_d16_mask_c()
|
/external/libaom/libaom/av1/common/arm/ |
D | wiener_convolve_neon.c | 443 bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon() 445 bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon() 447 bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon() 449 bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon() 480 filter_y_tmp, bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon() 508 bd, conv_params->round_1); in av1_wiener_convolve_add_src_neon()
|
D | jnt_convolve_neon.c | 574 const int16_t sub_const = (1 << (offset_bits - conv_params->round_1)) + in dist_wtd_convolve_2d_vert_neon() 575 (1 << (offset_bits - conv_params->round_1 - 1)); in dist_wtd_convolve_2d_vert_neon() 578 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; in dist_wtd_convolve_2d_vert_neon() 580 const int32x4_t round_shift_vec = vdupq_n_s32(-(conv_params->round_1)); in dist_wtd_convolve_2d_vert_neon() 769 FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; in av1_dist_wtd_convolve_2d_copy_neon() 772 const int round_offset = (1 << (offset_bits - conv_params->round_1)) + in av1_dist_wtd_convolve_2d_copy_neon() 773 (1 << (offset_bits - conv_params->round_1 - 1)); in av1_dist_wtd_convolve_2d_copy_neon() 891 const int bits = FILTER_BITS - conv_params->round_1; in av1_dist_wtd_convolve_x_neon() 894 const int round_offset = (1 << (offset_bits - conv_params->round_1)) + in av1_dist_wtd_convolve_x_neon() 895 (1 << (offset_bits - conv_params->round_1 - 1)); in av1_dist_wtd_convolve_x_neon() [all …]
|
D | reconinter_neon.c | 31 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8); in av1_build_compound_diffwtd_mask_d16_neon()
|
/external/libaom/libaom/test/ |
D | reconinter_test.cc | 106 bd + 2 * FILTER_BITS - conv_params.round_0 - conv_params.round_1 + 2; in RunCheckOutput() 146 bd + 2 * FILTER_BITS - conv_params.round_0 - conv_params.round_1 + 2; in RunSpeedTest()
|