Home
last modified time | relevance | path

Searched refs:mb_cols (Results 1 – 25 of 38) sorted by relevance

12

/external/webrtc/webrtc/modules/video_processing/
Dvideo_denoiser.cc22 int mb_cols, in TrailingReduction() argument
27 for (int mb_col = 1; mb_col < mb_cols - 1; ++mb_col) { in TrailingReduction()
28 int mb_index = mb_row * mb_cols + mb_col; in TrailingReduction()
39 metrics_[mb_index + mb_cols].denoise + in TrailingReduction()
40 metrics_[mb_index - mb_cols].denoise <= in TrailingReduction()
48 metrics_[mb_index + mb_cols + 1].denoise + in TrailingReduction()
49 metrics_[mb_index + mb_cols - 1].denoise + in TrailingReduction()
50 metrics_[mb_index - mb_cols + 1].denoise + in TrailingReduction()
51 metrics_[mb_index - mb_cols - 1].denoise + in TrailingReduction()
52 metrics_[mb_index + mb_cols].denoise + in TrailingReduction()
[all …]
Dvideo_denoiser.h26 int mb_cols,
/external/libvpx/libvpx/vp8/decoder/
Derror_concealment.c54 pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols, in vp8_alloc_overlap_lists()
172 int mb_rows, int mb_cols, in vp8_calculate_overlaps() argument
191 if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3)) in vp8_calculate_overlaps()
213 end_col = VPXMIN(mb_cols - overlap_mb_col, 2); in vp8_calculate_overlaps()
231 mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols + in vp8_calculate_overlaps()
329 int mb_rows, int mb_cols) in calc_prev_mb_overlaps() argument
338 overlaps, mb_rows, mb_cols, in calc_prev_mb_overlaps()
350 int mb_rows, int mb_cols, in estimate_missing_mvs() argument
354 memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols); in estimate_missing_mvs()
358 for (mb_col = 0; mb_col < mb_cols; ++mb_col) in estimate_missing_mvs()
[all …]
Derror_concealment.h37 int mb_rows, int mb_cols,
Dthreading.c306 const int first_row_no_sync_above = pc->mb_cols + nsync; in mt_decode_mb_rows()
408 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) in mt_decode_mb_rows()
426 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; in mt_decode_mb_rows()
452 pc->mb_rows, pc->mb_cols, in mt_decode_mb_rows()
509 if(mb_col != pc->mb_cols-1) in mt_decode_mb_rows()
Ddecodeframe.c599 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) in decode_mb_rows()
606 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; in decode_mb_rows()
628 pc->mb_rows, pc->mb_cols, in decode_mb_rows()
710 lf_mic += pc->mb_cols; in decode_mb_rows()
1341 pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) in vp8_decode_frame()
1349 memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); in vp8_decode_frame()
Ddecodemv.c630 mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; in vp8_decode_mode_mvs()
639 while (++mb_col < pbi->common.mb_cols) in vp8_decode_mode_mvs()
642 int mb_num = mb_row * pbi->common.mb_cols + mb_col; in vp8_decode_mode_mvs()
/external/libvpx/libvpx/test/
Dset_roi.cc54 cpi.common.mb_cols = 320 >> 4; in TEST()
55 const int mbs = (cpi.common.mb_rows * cpi.common.mb_cols); in TEST()
70 cpi.common.mb_cols, delta_q, delta_lf, in TEST()
138 cpi.common.mb_cols, rand_deltas, in TEST()
148 cpi.common.mb_cols, delta_q, in TEST()
160 cpi.common.mb_cols, delta_q, in TEST()
167 cpi.common.mb_cols, delta_q, in TEST()
172 cpi.common.mb_cols - 1, delta_q, in TEST()
Dactive_map_refresh_test.cc48 void GenerateMap(int mb_rows, int mb_cols, const vpx_image_t &current, in GenerateMap() argument
51 for (int mb_c = 0; mb_c < mb_cols; ++mb_c) { in GenerateMap()
52 map[mb_r * mb_cols + mb_c] = CheckMb(current, previous, mb_r, mb_c); in GenerateMap()
/external/libvpx/libvpx/vp8/common/
Dalloccommon.c84 oci->mb_cols = width >> 4; in vp8_alloc_frame_buffers()
85 oci->MBs = oci->mb_rows * oci->mb_cols; in vp8_alloc_frame_buffers()
86 oci->mode_info_stride = oci->mb_cols + 1; in vp8_alloc_frame_buffers()
87 oci->mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO)); in vp8_alloc_frame_buffers()
97 oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1); in vp8_alloc_frame_buffers()
115 oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1)); in vp8_alloc_frame_buffers()
Dvp8_loopfilter.c208 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in vp8_loop_filter_row_normal()
265 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in vp8_loop_filter_row_simple()
316 int mb_cols = cm->mb_cols; in vp8_loop_filter_frame() local
340 for (mb_col = 0; mb_col < mb_cols; mb_col++) in vp8_loop_filter_frame()
396 for (mb_col = 0; mb_col < mb_cols; mb_col++) in vp8_loop_filter_frame()
482 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in vp8_loop_filter_frame_yonly()
565 int mb_cols = post->y_width >> 4; in vp8_loop_filter_partial_frame() local
592 mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1); in vp8_loop_filter_partial_frame()
597 for (mb_col = 0; mb_col < mb_cols; mb_col++) in vp8_loop_filter_partial_frame()
Dmfqe.c305 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in vp8_multiframe_quality_enhance()
377 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; in vp8_multiframe_quality_enhance()
378 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; in vp8_multiframe_quality_enhance()
379 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; in vp8_multiframe_quality_enhance()
380 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; in vp8_multiframe_quality_enhance()
381 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; in vp8_multiframe_quality_enhance()
382 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; in vp8_multiframe_quality_enhance()
Dpostproc.c339 unsigned char *uvlimits = cm->pp_limits_buffer + 16 * cm->mb_cols; in vp8_deblock()
349 for (mbc = 0; mbc < cm->mb_cols; mbc++) in vp8_deblock()
400 int mb_cols = cm->mb_cols; in vp8_de_noise() local
406 memset(limits, (unsigned char)ppl, 16 * mb_cols); in vp8_de_noise()
851 oci->mb_cols, oci->mb_rows); in vp8_post_proc_frame()
861 int mb_cols = post->y_width >> 4; in vp8_post_proc_frame() local
870 for (j = 0; j < mb_cols; j++) in vp8_post_proc_frame()
893 int mb_cols = post->y_width >> 4; in vp8_post_proc_frame() local
902 for (j = 0; j < mb_cols; j++) in vp8_post_proc_frame()
Donyxc_int.h104 int mb_cols; member
/external/libvpx/libvpx/vp8/encoder/
Dlookahead.c114 int mb_cols = (src->y_width + 15) >> 4; in vp8_lookahead_push() local
135 for (; col < mb_cols; ++col) in vp8_lookahead_push()
142 if (col == mb_cols) in vp8_lookahead_push()
148 for (; active_end < mb_cols; ++active_end) in vp8_lookahead_push()
164 active_map += mb_cols; in vp8_lookahead_push()
Dsegmentation.c26 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); in vp8_update_gf_useage_maps()
27 cpi->gf_active_count = cm->mb_rows * cm->mb_cols; in vp8_update_gf_useage_maps()
35 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in vp8_update_gf_useage_maps()
Dmr_dissim.c94 for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++) in vp8_cal_dissimilarity()
124 if(mb_col < (cm->mb_cols-1)) in vp8_cal_dissimilarity()
140 if(mb_col < (cm->mb_cols-1) in vp8_cal_dissimilarity()
153 if(mb_col < (cm->mb_cols-1)) in vp8_cal_dissimilarity()
169 if(mb_col < (cm->mb_cols-1) in vp8_cal_dissimilarity()
Dethreading.c87 int map_index = (mb_row * cm->mb_cols); in thread_encoding_proc()
94 tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); in thread_encoding_proc()
114 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in thread_encoding_proc()
136 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; in thread_encoding_proc()
144 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); in thread_encoding_proc()
305 … x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; in thread_encoding_proc()
306 … x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; in thread_encoding_proc()
307 … x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; in thread_encoding_proc()
311 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; in thread_encoding_proc()
532 if(th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) in vp8cx_create_encoder_threads()
[all …]
Dencodeframe.c227 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in calc_activity_index()
289 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in build_activity_map()
317 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; in build_activity_map()
378 int map_index = (mb_row * cpi->common.mb_cols); in encode_mb_row()
388 const int rightmost_col = cm->mb_cols + nsync; in encode_mb_row()
432 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) in encode_mb_row()
443 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; in encode_mb_row()
449 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) in encode_mb_row()
669 memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); in init_encode_frame_mb_context()
848 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); in vp8_encode_frame()
[all …]
Dtemporal_filter.c237 int mb_cols = cpi->common.mb_cols; in vp8_temporal_filter_iterate_c() local
272 for (mb_col = 0; mb_col < mb_cols; mb_col++) in vp8_temporal_filter_iterate_c()
282 cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) in vp8_temporal_filter_iterate_c()
420 mb_y_offset += 16*(f->y_stride-mb_cols); in vp8_temporal_filter_iterate_c()
421 mb_uv_offset += 8*(f->uv_stride-mb_cols); in vp8_temporal_filter_iterate_c()
Donyx_if.c504 memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols)); in set_segmentation_map()
537 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols; in cyclic_background_refresh()
550 (cpi->common.mb_rows * cpi->common.mb_cols) / 10; in cyclic_background_refresh()
557 (cpi->common.mb_rows * cpi->common.mb_cols) / 20; in cyclic_background_refresh()
1166 cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) * in vp8_alloc_partition_data()
1216 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16; in vp8_alloc_compressor_data()
1229 cm->mb_rows * cm->mb_cols)); in vp8_alloc_compressor_data()
1230 cpi->gf_active_count = cm->mb_rows * cm->mb_cols; in vp8_alloc_compressor_data()
1235 cm->mb_rows * cm->mb_cols)); in vp8_alloc_compressor_data()
1239 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2), in vp8_alloc_compressor_data()
[all …]
/external/libvpx/libvpx/vp9/encoder/
Dvp9_lookahead.c102 int mb_cols = (src->y_width + 15) >> 4; in vp9_lookahead_push() local
141 for (; col < mb_cols; ++col) { in vp9_lookahead_push()
147 if (col == mb_cols) in vp9_lookahead_push()
153 for (; active_end < mb_cols; ++active_end) { in vp9_lookahead_push()
168 active_map += mb_cols; in vp9_lookahead_push()
Dvp9_mbgraph.c270 x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; in update_mbgraph_frame_stats()
273 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { in update_mbgraph_frame_stats()
297 offset += cm->mb_cols; in update_mbgraph_frame_stats()
312 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), in separate_arf_mbs()
324 offset += cm->mb_cols, mb_row++) { in separate_arf_mbs()
325 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { in separate_arf_mbs()
348 if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { in separate_arf_mbs()
399 cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats)); in vp9_update_mbgraph_stats()
Dvp9_temporal_filter.c283 int mb_cols = (frames[alt_ref_index]->y_crop_width + 15) >> 4; in temporal_filter_iterate_c() local
332 for (mb_col = 0; mb_col < mb_cols; mb_col++) { in temporal_filter_iterate_c()
340 cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16) in temporal_filter_iterate_c()
584 mb_y_offset += 16 * (f->y_stride - mb_cols); in temporal_filter_iterate_c()
585 mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols; in temporal_filter_iterate_c()
/external/libvpx/libvpx/vp9/common/
Dvp9_alloccommon.c46 cm->mb_cols = (cm->mi_cols + 1) >> 1; in vp9_set_mb_mi()
48 cm->MBs = cm->mb_rows * cm->mb_cols; in vp9_set_mb_mi()

12