/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ |
D | findnearmv.h | 37 static void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) in vp8_clamp_mv2() argument 39 if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) in vp8_clamp_mv2() 40 mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN; in vp8_clamp_mv2() 41 else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN) in vp8_clamp_mv2() 42 mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; in vp8_clamp_mv2() 44 if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) in vp8_clamp_mv2() 45 mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; in vp8_clamp_mv2() 46 else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) in vp8_clamp_mv2() 47 mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; in vp8_clamp_mv2() 50 static void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge, int mb_to_right_edge, in vp8_clamp_mv() argument [all …]
|
D | reconinter.c | 131 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in vp8_build_inter_predictors_b() 133 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in vp8_build_inter_predictors_b() 135 sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); in vp8_build_inter_predictors_b() 154 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors4b() 156 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in build_inter_predictors4b() 158 …x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst… in build_inter_predictors4b() 169 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors2b() 171 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in build_inter_predictors2b() 173 …x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst… in build_inter_predictors2b() 185 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors_b() [all …]
|
D | findnearmv.c | 40 int_mv *mv = near_mvs; in vp8_find_near_mvs() local 45 mv[0].as_int = mv[1].as_int = mv[2].as_int = 0; in vp8_find_near_mvs() 51 if (above->mbmi.mv.as_int) in vp8_find_near_mvs() 53 (++mv)->as_int = above->mbmi.mv.as_int; in vp8_find_near_mvs() 54 mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, mv, ref_frame_sign_bias); in vp8_find_near_mvs() 64 if (left->mbmi.mv.as_int) in vp8_find_near_mvs() 68 this_mv.as_int = left->mbmi.mv.as_int; in vp8_find_near_mvs() 71 if (this_mv.as_int != mv->as_int) in vp8_find_near_mvs() 73 (++mv)->as_int = this_mv.as_int; in vp8_find_near_mvs() 86 if (aboveleft->mbmi.mv.as_int) in vp8_find_near_mvs() [all …]
|
D | postproc.c | 955 MV *mv = &bmi->mv.as_mv; in vp8_post_proc_frame() local 957 x1 = x0 + 8 + (mv->col >> 3); in vp8_post_proc_frame() 958 y1 = y0 + 4 + (mv->row >> 3); in vp8_post_proc_frame() 965 x1 = x0 + 8 + (mv->col >> 3); in vp8_post_proc_frame() 966 y1 = y0 +12 + (mv->row >> 3); in vp8_post_proc_frame() 976 MV *mv = &bmi->mv.as_mv; in vp8_post_proc_frame() local 978 x1 = x0 + 4 + (mv->col >> 3); in vp8_post_proc_frame() 979 y1 = y0 + 8 + (mv->row >> 3); in vp8_post_proc_frame() 986 x1 = x0 +12 + (mv->col >> 3); in vp8_post_proc_frame() 987 y1 = y0 + 8 + (mv->row >> 3); in vp8_post_proc_frame() [all …]
|
D | debugmodes.c | 119 … fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2, mi[mb_index].mbmi.mv.as_mv.col / 2); in vp8_print_modes_and_motion_vectors() 146 …fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row, mi[mb_index].bmi[bindex].mv.as_mv… in vp8_print_modes_and_motion_vectors()
|
D | blockd.h | 143 int_mv mv; member 160 int_mv mv; member 180 int_mv mv; member
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/ |
D | vp9_mvref_common.c | 118 static void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) { in clamp_mv_ref() argument 119 clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER, in clamp_mv_ref() 132 : candidate->mbmi.mv[which_mv]; in get_sub_block_mv() 140 int_mv mv = mbmi->mv[ref]; in scale_mv() local 142 mv.as_mv.row *= -1; in scale_mv() 143 mv.as_mv.col *= -1; in scale_mv() 145 return mv; in scale_mv() 151 #define ADD_MV_REF_LIST(mv) \ argument 154 if ((mv).as_int != mv_ref_list[0].as_int) { \ 155 mv_ref_list[refmv_count] = (mv); \ [all …]
|
D | vp9_mv.h | 22 typedef struct mv { struct 37 static INLINE void clamp_mv(MV *mv, int min_col, int max_col, in clamp_mv() argument 39 mv->col = clamp(mv->col, min_col, max_col); in clamp_mv() 40 mv->row = clamp(mv->row, min_row, max_row); in clamp_mv()
|
D | vp9_entropymv.h | 98 static INLINE MV_JOINT_TYPE vp9_get_mv_joint(const MV *mv) { in vp9_get_mv_joint() argument 99 if (mv->row == 0) { in vp9_get_mv_joint() 100 return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ; in vp9_get_mv_joint() 102 return mv->col == 0 ? MV_JOINT_HZVNZ : MV_JOINT_HNZVNZ; in vp9_get_mv_joint() 126 void vp9_inc_mv(const MV *mv, nmv_context_counts *mvctx);
|
D | vp9_postproc.c | 750 MV *mv = &bmi->mv.as_mv; in vp9_post_proc_frame() 752 x1 = x0 + 8 + (mv->col >> 3); in vp9_post_proc_frame() 753 y1 = y0 + 4 + (mv->row >> 3); in vp9_post_proc_frame() 760 x1 = x0 + 8 + (mv->col >> 3); in vp9_post_proc_frame() 761 y1 = y0 + 12 + (mv->row >> 3); in vp9_post_proc_frame() 770 MV *mv = &bmi->mv.as_mv; in vp9_post_proc_frame() 772 x1 = x0 + 4 + (mv->col >> 3); in vp9_post_proc_frame() 773 y1 = y0 + 8 + (mv->row >> 3); in vp9_post_proc_frame() 780 x1 = x0 + 12 + (mv->col >> 3); in vp9_post_proc_frame() 781 y1 = y0 + 8 + (mv->row >> 3); in vp9_post_proc_frame() [all …]
|
D | vp9_entropymv.c | 171 void vp9_inc_mv(const MV *mv, nmv_context_counts *counts) { in vp9_inc_mv() argument 173 const MV_JOINT_TYPE j = vp9_get_mv_joint(mv); in vp9_inc_mv() 177 inc_mv_component(mv->row, &counts->comps[0], 1, 1); in vp9_inc_mv() 181 inc_mv_component(mv->col, &counts->comps[1], 1, 1); in vp9_inc_mv() 201 const nmv_context_counts *counts = &cm->counts.mv; in vp9_adapt_mv_probs()
|
D | vp9_reconinter.c | 90 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); in vp9_build_inter_predictor() local 91 const int subpel_x = mv.col & SUBPEL_MASK; in vp9_build_inter_predictor() 92 const int subpel_y = mv.row & SUBPEL_MASK; in vp9_build_inter_predictor() 94 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); in vp9_build_inter_predictor() 162 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 in build_inter_predictors() local 165 : mi->mbmi.mv[ref].as_mv; in build_inter_predictors() 172 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, in build_inter_predictors() 266 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 in dec_build_inter_predictors() local 269 : mi->mbmi.mv[ref].as_mv; in dec_build_inter_predictors() 276 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, in dec_build_inter_predictors()
|
D | vp9_mvref_common.h | 25 static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) { in clamp_mv2() argument 26 clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN, in clamp_mv2()
|
D | vp9_scale.c | 44 MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { in vp9_scale_mv() argument 48 scaled_y(mv->row, sf) + y_off_q4, in vp9_scale_mv() 49 scaled_x(mv->col, sf) + x_off_q4 in vp9_scale_mv()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/decoder/ |
D | decodemv.c | 111 static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) in read_mv() argument 113 mv->row = (short)(read_mvcomponent(r, mvc) * 2); in read_mv() 114 mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); in read_mv() 268 leftmv.as_int = left_mb->mbmi.mv.as_int; in decode_split_mv() 270 leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; in decode_split_mv() 273 leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; in decode_split_mv() 279 abovemv.as_int = above_mb->mbmi.mv.as_int; in decode_split_mv() 281 abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; in decode_split_mv() 284 abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; in decode_split_mv() 328 mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int; in decode_split_mv() [all …]
|
D | error_concealment.c | 189 new_row = row - bmi->mv.as_mv.row; in vp8_calculate_overlaps() 190 new_col = col - bmi->mv.as_mv.col; in vp8_calculate_overlaps() 256 bmi->mv.as_int = 0; in estimate_mv() 261 col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col; in estimate_mv() 262 row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row; in estimate_mv() 268 bmi->mv.as_mv.col = col_acc / overlap_sum; in estimate_mv() 269 bmi->mv.as_mv.row = row_acc / overlap_sum; in estimate_mv() 273 bmi->mv.as_mv.col = 0; in estimate_mv() 274 bmi->mv.as_mv.row = 0; in estimate_mv() 290 MV * const filtered_mv = &(mi->mbmi.mv.as_mv); in estimate_mb_mvs() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/ |
D | vp9_mcomp.c | 27 const MV *mv) { in get_buf_from_mv() argument 28 return &buf->buf[mv->row * buf->stride + mv->col]; in get_buf_from_mv() 31 void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { in vp9_set_mv_search_range() argument 32 int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0); in vp9_set_mv_search_range() 33 int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); in vp9_set_mv_search_range() 34 int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; in vp9_set_mv_search_range() 35 int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; in vp9_set_mv_search_range() 68 static INLINE int mv_cost(const MV *mv, in mv_cost() argument 70 return joint_cost[vp9_get_mv_joint(mv)] + in mv_cost() 71 comp_cost[0][mv->row] + comp_cost[1][mv->col]; in mv_cost() [all …]
|
D | vp9_encodemv.c | 167 nmv_context_counts *const counts = &cm->counts.mv; in vp9_write_nmv_probs() 203 const MV* mv, const MV* ref, in vp9_encode_mv() argument 205 const MV diff = {mv->row - ref->row, in vp9_encode_mv() 206 mv->col - ref->col}; in vp9_encode_mv() 220 unsigned int maxv = MAX(abs(mv->row), abs(mv->col)) >> 3; in vp9_encode_mv() 257 inc_mvs(mbmi, mi->bmi[i].as_mv, &cm->counts.mv); in vp9_update_mv_count() 262 inc_mvs(mbmi, mbmi->mv, &cm->counts.mv); in vp9_update_mv_count()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ |
D | mr_dissim.c | 34 mvx[cnt] = x->mbmi.mv.as_mv.row; \ 35 mvy[cnt] = x->mbmi.mv.as_mv.col; \ 42 mvx[cnt] = x->mbmi.mv.as_mv.row; \ 43 mvy[cnt] = x->mbmi.mv.as_mv.col; \ 194 mmvx = MAX(abs(min_mvx - here->mbmi.mv.as_mv.row), in vp8_cal_dissimilarity() 195 abs(max_mvx - here->mbmi.mv.as_mv.row)); in vp8_cal_dissimilarity() 196 mmvy = MAX(abs(min_mvy - here->mbmi.mv.as_mv.col), in vp8_cal_dissimilarity() 197 abs(max_mvy - here->mbmi.mv.as_mv.col)); in vp8_cal_dissimilarity() 205 store_mode_info->mv.as_int = tmp->mbmi.mv.as_int; in vp8_cal_dissimilarity()
|
D | encodemv.c | 68 void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) in vp8_encode_motion_vector() argument 73 if (abs(mv->row >> 1) > max_mv_r) in vp8_encode_motion_vector() 76 max_mv_r = abs(mv->row >> 1); in vp8_encode_motion_vector() 77 fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1)); in vp8_encode_motion_vector() 79 if ((abs(mv->row) / 2) != max_mv_r) in vp8_encode_motion_vector() 80 fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2); in vp8_encode_motion_vector() 85 if (abs(mv->col >> 1) > max_mv_c) in vp8_encode_motion_vector() 88 fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1)); in vp8_encode_motion_vector() 89 max_mv_c = abs(mv->col >> 1); in vp8_encode_motion_vector() 95 encode_mvcomponent(w, mv->row >> 1, &mvc[0]); in vp8_encode_motion_vector() [all …]
|
D | mcomp.c | 26 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) in vp8_mv_bit_cost() argument 35 …return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.… in vp8_mv_bit_cost() 38 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit) in mv_err_cost() argument 42 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + in mv_err_cost() 43 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) in mv_err_cost() 48 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit) in mvsad_err_cost() argument 53 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] + in mvsad_err_cost() 54 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) in mvsad_err_cost() 67 x->ss[search_site_count].mv.col = 0; in vp8_init_dsmotion_compensation() 68 x->ss[search_site_count].mv.row = 0; in vp8_init_dsmotion_compensation() [all …]
|
D | pickinter.c | 401 x->MVcount[0][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.row - in update_mvcount() 403 x->MVcount[1][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.col - in update_mvcount() 450 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row in get_lower_res_motion_info() 453 (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col in get_lower_res_motion_info() 490 int_mv mv = x->e_mbd.mode_info_context->mbmi.mv; in evaluate_inter_mode() local 506 sse, mv); in evaluate_inter_mode() 538 mv_l = mic->mbmi.mv; in calculate_zeromv_rd_adjustment() 546 mv_al = mic->mbmi.mv; in calculate_zeromv_rd_adjustment() 554 mv_a = mic->mbmi.mv; in calculate_zeromv_rd_adjustment() 939 d->bmi.mv.as_int = mvp_full.as_int; in vp8_pick_inter_mode() [all …]
|
D | rdopt.c | 479 int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row; in VP8_UVSSE() 480 int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col; in VP8_UVSSE() 943 void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) in vp8_set_mbmode_and_mvs() argument 946 x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int; in vp8_set_mbmode_and_mvs() 995 this_mv->as_int = col ? d[-1].bmi.mv.as_int : (uint32_t)left_block_mv(mic, i); in labels2mode() 998 this_mv->as_int = row ? d[-4].bmi.mv.as_int : (uint32_t)above_block_mv(mic, i, mis); in labels2mode() 1011 left_mv.as_int = col ? d[-1].bmi.mv.as_int : in labels2mode() 1021 d->bmi.mv.as_int = this_mv->as_int; in labels2mode() 1024 x->partition_info->bmi[i].mv.as_int = this_mv->as_int; in labels2mode() 1216 bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int; in rd_check_segment() [all …]
|
D | temporal_filter.c | 190 bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv, in vp8_temporal_filter_find_matching_mb_c() 202 &d->bmi.mv, in vp8_temporal_filter_find_matching_mb_c() 288 mbd->block[0].bmi.mv.as_mv.row = 0; in vp8_temporal_filter_iterate_c() 289 mbd->block[0].bmi.mv.as_mv.col = 0; in vp8_temporal_filter_iterate_c() 326 mbd->block[0].bmi.mv.as_mv.row, in vp8_temporal_filter_iterate_c() 327 mbd->block[0].bmi.mv.as_mv.col, in vp8_temporal_filter_iterate_c()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/decoder/ |
D | vp9_decodemv.c | 237 static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, in read_mv() argument 253 mv->row = ref->row + diff.row; in read_mv() 254 mv->col = ref->col + diff.col; in read_mv() 360 static INLINE int is_mv_valid(const MV *mv) { in is_mv_valid() argument 361 return mv->row > MV_LOW && mv->row < MV_UPP && in is_mv_valid() 362 mv->col > MV_LOW && mv->col < MV_UPP; in is_mv_valid() 366 int_mv mv[2], int_mv ref_mv[2], in assign_mv() 375 NULL : &cm->counts.mv; in assign_mv() 377 read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc.nmvc, mv_counts, in assign_mv() 379 ret = ret && is_mv_valid(&mv[i].as_mv); in assign_mv() [all …]
|