/external/libvpx/libvpx/vpx_dsp/x86/ |
D | quantize_avx.c | 34 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_avx() local 47 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_avx() 57 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_avx() 58 zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC in vpx_quantize_b_avx() 59 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_avx() 110 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_avx() 111 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_avx() 160 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_32x32_avx() local 174 zbin = _mm_load_si128((const __m128i *)zbin_ptr); in vpx_quantize_b_32x32_avx() 176 zbin = _mm_add_epi16(zbin, one); in vpx_quantize_b_32x32_avx() [all …]
|
D | quantize_ssse3.c | 30 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_ssse3() local 40 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_ssse3() 50 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_ssse3() 51 zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC in vpx_quantize_b_ssse3() 52 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_ssse3() 85 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_ssse3() 86 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_ssse3() 125 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_32x32_ssse3() local 139 zbin = _mm_load_si128((const __m128i *)zbin_ptr); in vpx_quantize_b_32x32_ssse3() 141 zbin = _mm_add_epi16(zbin, one); in vpx_quantize_b_32x32_ssse3() [all …]
|
D | quantize_sse2.c | 30 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_sse2() local 41 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_sse2() 54 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_sse2() 55 zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC in vpx_quantize_b_sse2() 56 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_sse2() 93 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in vpx_quantize_b_sse2() 94 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in vpx_quantize_b_sse2()
|
D | quantize_sse2.h | 19 static INLINE void load_b_values(const int16_t *zbin_ptr, __m128i *zbin, in load_b_values() argument 24 *zbin = _mm_load_si128((const __m128i *)zbin_ptr); in load_b_values() 27 *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1)); in load_b_values()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | quantize_ssse3.c | 75 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_64x64_ssse3() local 84 zbin = _mm_load_si128((const __m128i *)zbin_ptr); in aom_quantize_b_64x64_ssse3() 91 zbin = _mm_add_epi16(zbin, two); in aom_quantize_b_64x64_ssse3() 93 zbin = _mm_srli_epi16(zbin, 2); in aom_quantize_b_64x64_ssse3() 95 zbin = _mm_sub_epi16(zbin, one); in aom_quantize_b_64x64_ssse3() 103 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in aom_quantize_b_64x64_ssse3() 104 zbin = _mm_unpackhi_epi64(zbin, zbin); in aom_quantize_b_64x64_ssse3() 105 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in aom_quantize_b_64x64_ssse3() 154 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in aom_quantize_b_64x64_ssse3() 155 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in aom_quantize_b_64x64_ssse3()
|
D | adaptive_quantize_sse2.c | 28 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_adaptive_sse2() local 64 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in aom_quantize_b_adaptive_sse2() 80 cmp_mask0 = _mm_and_si128(prescan0, _mm_cmpgt_epi16(qcoeff0, zbin)); in aom_quantize_b_adaptive_sse2() 81 zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC in aom_quantize_b_adaptive_sse2() 82 cmp_mask1 = _mm_and_si128(prescan1, _mm_cmpgt_epi16(qcoeff1, zbin)); in aom_quantize_b_adaptive_sse2() 143 cmp_mask0 = _mm_and_si128(prescan0, _mm_cmpgt_epi16(qcoeff0, zbin)); in aom_quantize_b_adaptive_sse2() 144 cmp_mask1 = _mm_and_si128(prescan1, _mm_cmpgt_epi16(qcoeff1, zbin)); in aom_quantize_b_adaptive_sse2() 228 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_32x32_adaptive_sse2() local 264 zbin = _mm_load_si128((const __m128i *)zbin_ptr); in aom_quantize_b_32x32_adaptive_sse2() 271 zbin = _mm_add_epi16(zbin, log_scale_vec); in aom_quantize_b_32x32_adaptive_sse2() [all …]
|
D | quantize_sse2.c | 31 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_sse2() local 40 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in aom_quantize_b_sse2() 53 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in aom_quantize_b_sse2() 54 zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC in aom_quantize_b_sse2() 55 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in aom_quantize_b_sse2() 96 cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); in aom_quantize_b_sse2() 97 cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); in aom_quantize_b_sse2()
|
D | quantize_avx_x86_64.asm | 19 cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, zbin, round, quant, \ 40 mova m0, [zbinq] ; m0 = zbin 56 paddw m0, m4 ; m0 = zbin - 1 60 pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin 62 pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin 64 ; Check if all coeffs are less than zbin. If yes, we just write zeros 173 DEFINE_ARGS coeff, ncoeff, zbin, round, quant, shift, \ 183 mova m0, [zbinq] ; m0 = zbin 222 pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin 224 pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin [all …]
|
D | quantize_x86.h | 16 static INLINE void load_b_values(const int16_t *zbin_ptr, __m128i *zbin, in load_b_values() argument 21 *zbin = _mm_load_si128((const __m128i *)zbin_ptr); in load_b_values() 24 *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1)); in load_b_values()
|
D | quantize_ssse3_x86_64.asm | 22 cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, zbin, round, quant, \ 33 mova m0, [zbinq] ; m0 = zbin 67 pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin 69 pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin 172 pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin 173 pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
|
D | highbd_quantize_intrin_avx2.c | 35 const __m128i zbin = _mm_loadu_si128((const __m128i *)zbin_ptr); in init_qp() local 40 init_one_qp(&zbin, &qp[0]); in init_qp()
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | quantize_vsx.c | 108 int16x8_t zbin = vec_vsx_ld(0, zbin_ptr); in vpx_quantize_b_vsx() local 120 zero_mask0 = vec_cmpge(coeff0_abs, zbin); in vpx_quantize_b_vsx() 121 zbin = vec_splat(zbin, 1); in vpx_quantize_b_vsx() 122 zero_mask1 = vec_cmpge(coeff1_abs, zbin); in vpx_quantize_b_vsx() 161 zero_mask0 = vec_cmpge(coeff0_abs, zbin); in vpx_quantize_b_vsx() 162 zero_mask1 = vec_cmpge(coeff1_abs, zbin); in vpx_quantize_b_vsx() 163 zero_mask2 = vec_cmpge(coeff2_abs, zbin); in vpx_quantize_b_vsx() 217 int16x8_t zbin = vec_vsx_ld(0, zbin_ptr); in vpx_quantize_b_32x32_vsx() local 235 zbin = vec_sra(vec_add(zbin, vec_ones_s16), vec_ones_u16); in vpx_quantize_b_32x32_vsx() 238 zero_mask0 = vec_cmpge(coeff0_abs, zbin); in vpx_quantize_b_32x32_vsx() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_quantize.c | 180 x->skip_block, p->zbin, p->round, p->quant, in vp9_regular_quantize_b_4x4() 187 p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, in vp9_regular_quantize_b_4x4() 293 x->plane[0].zbin = quants->y_zbin[qindex]; in vp9_init_plane_quantizers() 297 x->plane[0].quant_thred[0] = x->plane[0].zbin[0] * x->plane[0].zbin[0]; in vp9_init_plane_quantizers() 298 x->plane[0].quant_thred[1] = x->plane[0].zbin[1] * x->plane[0].zbin[1]; in vp9_init_plane_quantizers() 306 x->plane[i].zbin = quants->uv_zbin[qindex]; in vp9_init_plane_quantizers() 310 x->plane[i].quant_thred[0] = x->plane[i].zbin[0] * x->plane[i].zbin[0]; in vp9_init_plane_quantizers() 311 x->plane[i].quant_thred[1] = x->plane[i].zbin[1] * x->plane[i].zbin[1]; in vp9_init_plane_quantizers()
|
D | vp9_encodemb.c | 515 vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, in vp9_xform_quant() 522 vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, in vp9_xform_quant() 529 vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, in vp9_xform_quant() 537 vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, in vp9_xform_quant() 550 vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, in vp9_xform_quant() 557 vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, in vp9_xform_quant() 563 vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, in vp9_xform_quant() 570 vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, in vp9_xform_quant() 817 vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, in vp9_encode_block_intra() 837 vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, in vp9_encode_block_intra() [all …]
|
D | vp9_block.h | 41 int16_t *zbin; member
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | quantize_neon.c | 51 const int16x8_t zbin = vld1q_s16(zbin_ptr); in vpx_quantize_b_neon() local 65 vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in vpx_quantize_b_neon() 99 const int16x8_t zbin = vdupq_n_s16(zbin_ptr[1]); in vpx_quantize_b_neon() local 115 vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in vpx_quantize_b_neon() 211 const int16x8_t zbin = vrshrq_n_s16(vld1q_s16(zbin_ptr), 1); in vpx_quantize_b_32x32_neon() local 225 vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in vpx_quantize_b_32x32_neon() 257 const int16x8_t zbin = vrshrq_n_s16(vdupq_n_s16(zbin_ptr[1]), 1); in vpx_quantize_b_32x32_neon() local 273 vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in vpx_quantize_b_32x32_neon()
|
/external/libvpx/libvpx/vp8/encoder/mips/mmi/ |
D | vp8_quantize_mmi.c | 21 zbin = zbin_ptr[rc] + *(zbin_boost_ptr++) + zbin_oq_value; \ 22 if (x >= zbin) { \ 203 int x, y, z, sz, zbin; in vp8_regular_quantize_b_mmi() local 206 const int16_t *zbin_ptr = b->zbin; in vp8_regular_quantize_b_mmi()
|
/external/libvpx/libvpx/test/ |
D | vp9_quantize_test.cc | 41 int skip_block, const int16_t *zbin, 61 const int16_t *zbin, const int16_t *round, in QuantFPWrapper() argument 66 (void)zbin; in QuantFPWrapper() 257 void GenerateHelperArrays(ACMRandom *rnd, int16_t *zbin, int16_t *round, in GenerateHelperArrays() argument 274 zbin[j] = rnd->RandRange(1200); in GenerateHelperArrays() 286 zbin[j] = zbin[1]; in GenerateHelperArrays()
|
/external/libaom/libaom/test/ |
D | quantize_func_test.cc | 122 const int16_t *zbin = qtab_->quant.y_zbin[q]; in QuantizeRun() local 142 quant_ref_(coeff_ptr, n_coeffs, zbin, round, quant, quant_shift, in QuantizeRun() 146 ASM_REGISTER_STATE_CHECK(quant_(coeff_ptr, n_coeffs, zbin, round, quant, in QuantizeRun() 290 const int16_t *zbin = qtab_->quant.y_zbin[q]; in TEST_P() local 304 quant_ref_(coeff_ptr, n_coeffs, zbin, round_fp, quant_fp, quant_shift, in TEST_P() 311 quant_(coeff_ptr, n_coeffs, zbin, round_fp, quant_fp, quant_shift, qcoeff, in TEST_P()
|
/external/libvpx/libvpx/vp8/encoder/ |
D | vp8_quantize.c | 50 int zbin; in vp8_regular_quantize_b_c() local 54 short *zbin_ptr = b->zbin; in vp8_regular_quantize_b_c() 72 zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value; in vp8_regular_quantize_b_c() 78 if (x >= zbin) { in vp8_regular_quantize_b_c() 349 x->block[i].zbin = cpi->Y1zbin[QIndex]; in vp8cx_mb_init_quantizer() 362 x->block[i].zbin = cpi->UVzbin[QIndex]; in vp8cx_mb_init_quantizer() 374 x->block[24].zbin = cpi->Y2zbin[QIndex]; in vp8cx_mb_init_quantizer()
|
D | block.h | 42 short *zbin; member
|
D | ethreading.c | 365 z->block[i].zbin = x->block[i].zbin; in setup_mbby_copy()
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | quantize_sse4.c | 41 __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); in vp8_regular_quantize_b_sse4_1() 42 __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); in vp8_regular_quantize_b_sse4_1()
|
D | vp8_quantize_sse2.c | 46 __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); in vp8_regular_quantize_b_sse2() 47 __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); in vp8_regular_quantize_b_sse2()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | quantize_msa.c | 82 int16_t *zbin_boost, int16_t *coeff_ptr, int16_t *zbin, int16_t *round, in exact_regular_quantize_b_msa() argument 114 LD_SH2(zbin, 8, coeff0, coeff1); in exact_regular_quantize_b_msa() 199 int16_t *zbin_ptr = b->zbin; in vp8_regular_quantize_b_msa()
|