/external/libaom/libaom/aom_dsp/ |
D | sad.c | 53 uint8_t comp_pred[m * n]; \ 54 aom_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \ 55 return sad(src, src_stride, comp_pred, m, m, n); \ 60 uint8_t comp_pred[m * n]; \ 61 aom_dist_wtd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, \ 63 return sad(src, src_stride, comp_pred, m, m, n); \ 206 uint16_t comp_pred[m * n]; \ 207 aom_highbd_comp_avg_pred(CONVERT_TO_BYTEPTR(comp_pred), second_pred, m, n, \ 209 return highbd_sadb(src, src_stride, comp_pred, m, m, n); \ 214 uint16_t comp_pred[m * n]; \ [all …]
|
D | variance.c | 267 void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, in aom_comp_avg_pred_c() argument 274 comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1); in aom_comp_avg_pred_c() 276 comp_pred += width; in aom_comp_avg_pred_c() 285 uint8_t *comp_pred, int width, int height, in aom_upsampled_pred_c() argument 361 av1_make_inter_predictor(pre, pre_buf->stride, comp_pred, width, in aom_upsampled_pred_c() 375 memcpy(comp_pred, ref, width * sizeof(*comp_pred)); in aom_upsampled_pred_c() 376 comp_pred += width; in aom_upsampled_pred_c() 382 aom_convolve8_horiz_c(ref, ref_stride, comp_pred, width, kernel, 16, NULL, in aom_upsampled_pred_c() 387 aom_convolve8_vert_c(ref, ref_stride, comp_pred, width, NULL, -1, kernel, in aom_upsampled_pred_c() 403 MAX_SB_SIZE, comp_pred, width, NULL, -1, kernel_y, 16, in aom_upsampled_pred_c() [all …]
|
D | aom_dsp_rtcd_defs.pl | 939 … const MV *const mv, uint8_t *comp_pred, int width, int height, int subpel_x_q3, 944 … const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, 950 … const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, 956 … const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, 1216 …add_proto qw/void aom_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int hei… 1218 …add_proto qw/void aom_dist_wtd_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width… 1636 …add_proto qw/void aom_comp_mask_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int he… 1639 …add_proto qw/void aom_highbd_comp_mask_pred/, "uint8_t *comp_pred, const uint8_t *pred8, int width…
|
/external/libaom/libaom/aom_dsp/x86/ |
D | jnt_variance_ssse3.c | 48 void aom_dist_wtd_comp_avg_pred_ssse3(uint8_t *comp_pred, const uint8_t *pred, in aom_dist_wtd_comp_avg_pred_ssse3() argument 70 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred); in aom_dist_wtd_comp_avg_pred_ssse3() 72 comp_pred += 16; in aom_dist_wtd_comp_avg_pred_ssse3() 88 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred); in aom_dist_wtd_comp_avg_pred_ssse3() 90 comp_pred += 16; in aom_dist_wtd_comp_avg_pred_ssse3() 110 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred); in aom_dist_wtd_comp_avg_pred_ssse3() 112 comp_pred += 16; in aom_dist_wtd_comp_avg_pred_ssse3() 121 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width, in aom_dist_wtd_comp_avg_upsampled_pred_ssse3() argument 126 aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height, in aom_dist_wtd_comp_avg_upsampled_pred_ssse3() 141 __m128i p0 = xx_loadu_128(comp_pred); in aom_dist_wtd_comp_avg_upsampled_pred_ssse3() [all …]
|
D | variance_sse2.c | 487 uint8_t *comp_pred, int width, int height, in aom_upsampled_pred_sse2() argument 564 av1_make_inter_predictor(pre, pre_buf->stride, comp_pred, width, in aom_upsampled_pred_sse2() 587 xx_storeu_128(comp_pred, xx_loadu_128(ref)); in aom_upsampled_pred_sse2() 588 comp_pred += 16; in aom_upsampled_pred_sse2() 601 xx_storeu_128(comp_pred, _mm_unpacklo_epi64(s0, s1)); in aom_upsampled_pred_sse2() 602 comp_pred += 16; in aom_upsampled_pred_sse2() 617 xx_storeu_128(comp_pred, reg); in aom_upsampled_pred_sse2() 618 comp_pred += 16; in aom_upsampled_pred_sse2() 625 aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL, -1, in aom_upsampled_pred_sse2() 630 aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel, 16, in aom_upsampled_pred_sse2() [all …]
|
D | variance_avx2.c | 335 uint8_t *comp_pred) { in comp_mask_pred_line_avx2() argument 353 _mm256_storeu_si256((__m256i *)(comp_pred), roundA); in comp_mask_pred_line_avx2() 356 void aom_comp_mask_pred_avx2(uint8_t *comp_pred, const uint8_t *pred, int width, in aom_comp_mask_pred_avx2() argument 366 comp_mask_pred_8_ssse3(comp_pred, height, src0, stride0, src1, stride1, in aom_comp_mask_pred_avx2() 383 comp_mask_pred_line_avx2(sA0, sA1, aA, comp_pred); in aom_comp_mask_pred_avx2() 384 comp_mask_pred_line_avx2(sB0, sB1, aB, comp_pred + 32); in aom_comp_mask_pred_avx2() 385 comp_pred += (16 << 2); in aom_comp_mask_pred_avx2() 399 comp_mask_pred_line_avx2(sA0, sA1, aA, comp_pred); in aom_comp_mask_pred_avx2() 400 comp_mask_pred_line_avx2(sB0, sB1, aB, comp_pred + 32); in aom_comp_mask_pred_avx2() 401 comp_pred += (32 << 1); in aom_comp_mask_pred_avx2() [all …]
|
D | jnt_sad_ssse3.c | 199 uint8_t comp_pred[m * n]; \ 200 aom_dist_wtd_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride, \ 202 return aom_sad##m##xh_sse2(src, src_stride, comp_pred, m, m, n); \ 209 uint8_t comp_pred[m * n]; \ 210 aom_dist_wtd_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride, \ 212 return aom_sad##m##xh_avx2(src, src_stride, comp_pred, m, m, n); \
|
D | masked_variance_intrin_ssse3.h | 50 static INLINE void comp_mask_pred_8_ssse3(uint8_t *comp_pred, int height, in comp_mask_pred_8_ssse3() argument 83 _mm_store_si128((__m128i *)(comp_pred), round); in comp_mask_pred_8_ssse3() 84 comp_pred += (8 << 1); in comp_mask_pred_8_ssse3()
|
D | highbd_variance_sse2.c | 707 uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8); in aom_highbd_upsampled_pred_sse2() local 716 _mm_storeu_si128((__m128i *)comp_pred, s0); in aom_highbd_upsampled_pred_sse2() 717 comp_pred += 8; in aom_highbd_upsampled_pred_sse2() 730 _mm_storeu_si128((__m128i *)comp_pred, t0); in aom_highbd_upsampled_pred_sse2() 731 comp_pred += 8; in aom_highbd_upsampled_pred_sse2() 820 uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8); in aom_highbd_dist_wtd_comp_avg_pred_sse2() local 831 highbd_compute_dist_wtd_comp_avg(&p0, &p1, &w0, &w1, &r, comp_pred); in aom_highbd_dist_wtd_comp_avg_pred_sse2() 833 comp_pred += 8; in aom_highbd_dist_wtd_comp_avg_pred_sse2() 848 highbd_compute_dist_wtd_comp_avg(&p0, &p1, &w0, &w1, &r, comp_pred); in aom_highbd_dist_wtd_comp_avg_pred_sse2() 850 comp_pred += 8; in aom_highbd_dist_wtd_comp_avg_pred_sse2()
|
D | masked_variance_intrin_ssse3.c | 1029 void aom_comp_mask_pred_ssse3(uint8_t *comp_pred, const uint8_t *pred, in aom_comp_mask_pred_ssse3() argument 1040 comp_mask_pred_8_ssse3(comp_pred, height, src0, stride0, src1, stride1, in aom_comp_mask_pred_ssse3() 1044 comp_mask_pred_16_ssse3(src0, src1, mask, comp_pred); in aom_comp_mask_pred_ssse3() 1046 mask + mask_stride, comp_pred + width); in aom_comp_mask_pred_ssse3() 1047 comp_pred += (width << 1); in aom_comp_mask_pred_ssse3() 1056 comp_mask_pred_16_ssse3(src0, src1, mask, comp_pred); in aom_comp_mask_pred_ssse3() 1057 comp_mask_pred_16_ssse3(src0 + 16, src1 + 16, mask + 16, comp_pred + 16); in aom_comp_mask_pred_ssse3() 1058 comp_pred += (width); in aom_comp_mask_pred_ssse3()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_pred_sse2.c | 18 void vpx_comp_avg_pred_sse2(uint8_t *comp_pred, const uint8_t *pred, int width, in vpx_comp_avg_pred_sse2() argument 21 assert(((intptr_t)comp_pred & 0xf) == 0); in vpx_comp_avg_pred_sse2() 30 _mm_store_si128((__m128i *)(comp_pred + x), avg); in vpx_comp_avg_pred_sse2() 32 comp_pred += width; in vpx_comp_avg_pred_sse2() 63 _mm_store_si128((__m128i *)comp_pred, avg); in vpx_comp_avg_pred_sse2() 66 comp_pred += 16; in vpx_comp_avg_pred_sse2()
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | sad_vsx.c | 118 DECLARE_ALIGNED(16, uint8_t, comp_pred[16 * height]); \ 119 vpx_comp_avg_pred_vsx(comp_pred, second_pred, 16, height, ref, \ 122 return vpx_sad16x##height##_vsx(src, src_stride, comp_pred, 16); \ 129 DECLARE_ALIGNED(32, uint8_t, comp_pred[32 * height]); \ 130 vpx_comp_avg_pred_vsx(comp_pred, second_pred, 32, height, ref, \ 133 return vpx_sad32x##height##_vsx(src, src_stride, comp_pred, 32); \ 140 DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \ 141 vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \ 143 return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
|
D | variance_vsx.c | 54 void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width, in vpx_comp_avg_pred_vsx() argument 58 assert(((intptr_t)comp_pred & 0xf) == 0); in vpx_comp_avg_pred_vsx() 64 vec_vsx_st(v, j, comp_pred); in vpx_comp_avg_pred_vsx() 66 comp_pred += width; in vpx_comp_avg_pred_vsx() 77 vec_vsx_st(v, 0, comp_pred); in vpx_comp_avg_pred_vsx() 78 comp_pred += 16; // width * 2; in vpx_comp_avg_pred_vsx() 93 vec_vsx_st(v, 0, comp_pred); in vpx_comp_avg_pred_vsx() 94 comp_pred += 16; // width * 4; in vpx_comp_avg_pred_vsx()
|
/external/libvpx/libvpx/vpx_dsp/ |
D | sad.c | 43 DECLARE_ALIGNED(16, uint8_t, comp_pred[m * n]); \ 44 vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref_ptr, ref_stride); \ 45 return sad(src_ptr, src_stride, comp_pred, m, m, n); \ 177 DECLARE_ALIGNED(16, uint16_t, comp_pred[m * n]); \ 178 vpx_highbd_comp_avg_pred_c(comp_pred, CONVERT_TO_SHORTPTR(second_pred), m, \ 180 return highbd_sadb(src_ptr, src_stride, comp_pred, m, m, n); \
|
D | variance.c | 223 void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, in vpx_comp_avg_pred_c() argument 230 comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1); in vpx_comp_avg_pred_c() 232 comp_pred += width; in vpx_comp_avg_pred_c() 552 void vpx_highbd_comp_avg_pred(uint16_t *comp_pred, const uint16_t *pred, in vpx_highbd_comp_avg_pred() argument 559 comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1); in vpx_highbd_comp_avg_pred() 561 comp_pred += width; in vpx_highbd_comp_avg_pred()
|
D | vpx_dsp_rtcd_defs.pl | 1172 add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int heig… 1401 …add_proto qw/void vpx_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint16_t *pred, int width…
|
/external/libaom/libaom/test/ |
D | sad_test.cc | 38 typedef void (*DistWtdCompAvgFunc)(uint8_t *comp_pred, const uint8_t *pred, 196 const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); in ReferenceSADavg() local 197 sad += abs(source8[h * source_stride_ + w] - comp_pred); in ReferenceSADavg() 201 const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); in ReferenceSADavg() local 202 sad += abs(source16[h * source_stride_ + w] - comp_pred); in ReferenceSADavg() 249 const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 4); in ReferenceDistWtdSADavg() local 250 sad += abs(source8[h * source_stride_ + w] - comp_pred); in ReferenceDistWtdSADavg() 255 const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 4); in ReferenceDistWtdSADavg() local 256 sad += abs(source16[h * source_stride_ + w] - comp_pred); in ReferenceDistWtdSADavg()
|
D | comp_avg_pred_test.h | 30 typedef void (*distwtdcompavg_func)(uint8_t *comp_pred, const uint8_t *pred, 37 const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
|
D | comp_mask_variance_test.cc | 32 typedef void (*comp_mask_pred_func)(uint8_t *comp_pred, const uint8_t *pred,
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_rdopt.c | 3162 int comp_pred, i, k; in vp9_rd_pick_inter_mode_sb() local 3439 comp_pred = second_ref_frame > INTRA_FRAME; in vp9_rd_pick_inter_mode_sb() 3440 if (comp_pred) { in vp9_rd_pick_inter_mode_sb() 3511 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; in vp9_rd_pick_inter_mode_sb() 3547 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred); in vp9_rd_pick_inter_mode_sb() 3554 if (comp_pred) { in vp9_rd_pick_inter_mode_sb() 3684 if (!comp_pred) { in vp9_rd_pick_inter_mode_sb() 3721 if (x->skip && !comp_pred) break; in vp9_rd_pick_inter_mode_sb() 3845 const int comp_pred = 0; in vp9_rd_pick_inter_mode_sb_seg_skip() local 3902 rate2 += vp9_cost_bit(comp_mode_p, comp_pred); in vp9_rd_pick_inter_mode_sb_seg_skip() [all …]
|
D | vp9_mcomp.c | 316 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); in setup_center_error() 317 vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); in setup_center_error() 318 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error() 331 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); in setup_center_error() 332 vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); in setup_center_error() 333 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error() 654 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); in accurate_sub_pel_search() 655 vpx_comp_avg_pred(comp_pred, second_pred, w, h, pred, w); in accurate_sub_pel_search() 656 besterr = vfp->vf(comp_pred, w, src_address, src_stride, sse); in accurate_sub_pel_search() 673 DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); in accurate_sub_pel_search() [all …]
|
D | vp9_pickmode.c | 1954 int comp_pred = 0; in vp9_pick_inter_mode() local 1973 comp_pred = 1; in vp9_pick_inter_mode() 2003 if (comp_pred) { in vp9_pick_inter_mode() 2110 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; in vp9_pick_inter_mode() 2147 if (inter_mv_mode == this_mode || comp_pred) continue; in vp9_pick_inter_mode() 2173 if (this_mode != NEARESTMV && !comp_pred && in vp9_pick_inter_mode()
|
/external/libvpx/libvpx/test/ |
D | sad_test.cc | 173 const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); in ReferenceSADavg() local 174 sad += abs(source8[h * source_stride_ + w] - comp_pred); in ReferenceSADavg() 179 const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); in ReferenceSADavg() local 180 sad += abs(source16[h * source_stride_ + w] - comp_pred); in ReferenceSADavg()
|
/external/libaom/libaom/av1/encoder/ |
D | rdopt.c | 11793 const int comp_pred = ref_frame[1] > INTRA_FRAME; in inter_mode_compatible_skip() local 11795 if (comp_pred) { in inter_mode_compatible_skip() 11852 const int comp_pred = ref_frame[1] > INTRA_FRAME; in inter_mode_search_order_independent_skip() local 11947 (sf->selective_ref_frame == 1 && comp_pred)) { in inter_mode_search_order_independent_skip() 11964 if ((sf->selective_ref_frame >= 2) && comp_pred && !cpi->all_one_sided_refs) { in inter_mode_search_order_independent_skip() 11982 if (sf->selective_ref_frame >= 4 && comp_pred) { in inter_mode_search_order_independent_skip() 12029 if (!cpi->oxcf.enable_onesided_comp && comp_pred && cpi->all_one_sided_refs) { in inter_mode_search_order_independent_skip() 12740 const int comp_pred = second_ref_frame > INTRA_FRAME; in av1_rd_pick_inter_mode_sb() local 12746 if (comp_pred && args.single_ref_first_pass) { in av1_rd_pick_inter_mode_sb() 12754 if (!comp_pred) { // single ref mode in av1_rd_pick_inter_mode_sb() [all …]
|
D | mcomp.c | 343 uint8_t *comp_pred = CONVERT_TO_BYTEPTR(comp_pred16); in setup_center_error() local 345 aom_highbd_comp_mask_pred(comp_pred, second_pred, w, h, y + offset, in setup_center_error() 348 aom_highbd_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, in setup_center_error() 351 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error() 353 DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]); in setup_center_error() 355 aom_comp_mask_pred(comp_pred, second_pred, w, h, y + offset, y_stride, in setup_center_error() 358 aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); in setup_center_error() 360 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error()
|