/external/libvpx/libvpx/vp8/encoder/ |
D | encodemb.c | 52 x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride); in vp8_subtract_mb() 54 x->src.uv_stride, x->e_mbd.dst.u_buffer, in vp8_subtract_mb() 55 x->e_mbd.dst.v_buffer, x->e_mbd.dst.uv_stride); in vp8_subtract_mb() 97 if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) build_dcblock(x); in transform_mb() 104 if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) { in transform_mb() 117 if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) { in transform_mby() 177 d = &mb->e_mbd.block[ib]; in optimize_b() 188 if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) { in optimize_b() 399 memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); in optimize_mb() 400 memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); in optimize_mb() [all …]
|
D | rdopt.c | 339 bd = &mb->e_mbd.block[i]; in vp8_mbblock_error_c() 363 bd = &mb->e_mbd.block[i]; in vp8_mbuverror_c() 379 int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row; in VP8_UVSSE() 380 int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col; in VP8_UVSSE() 382 int pre_stride = x->e_mbd.pre.uv_stride; in VP8_UVSSE() 400 uptr = x->e_mbd.pre.u_buffer + offset; in VP8_UVSSE() 401 vptr = x->e_mbd.pre.v_buffer + offset; in VP8_UVSSE() 449 MACROBLOCKD *x = &mb->e_mbd; in vp8_rdcost_mby() 454 memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); in vp8_rdcost_mby() 455 memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); in vp8_rdcost_mby() [all …]
|
D | pickinter.c | 146 BLOCKD *d = &mb->e_mbd.block[0]; in vp8_get_inter_mbpred_error() 149 int pre_stride = mb->e_mbd.pre.y_stride; in vp8_get_inter_mbpred_error() 150 unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset; in vp8_get_inter_mbpred_error() 179 BLOCKD *b = &x->e_mbd.block[ib]; in pick_intra4x4block() 181 int dst_stride = x->e_mbd.dst.y_stride; in pick_intra4x4block() 182 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; in pick_intra4x4block() 216 MACROBLOCKD *const xd = &mb->e_mbd; in pick_intra4x4mby_modes() 234 if (mb->e_mbd.frame_type == KEY_FRAME) { in pick_intra4x4mby_modes() 268 MACROBLOCKD *x = &mb->e_mbd; in pick_intra_mbuv_mode() 376 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode; in pick_intra_mbuv_mode() [all …]
|
D | encodeintra.c | 27 x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; in vp8_encode_intra() 28 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; in vp8_encode_intra() 29 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; in vp8_encode_intra() 33 vp8_inverse_transform_mby(&x->e_mbd); in vp8_encode_intra() 36 x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; in vp8_encode_intra() 47 BLOCKD *b = &x->e_mbd.block[ib]; in vp8_encode_intra4x4block() 49 int dst_stride = x->e_mbd.dst.y_stride; in vp8_encode_intra4x4block() 50 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; in vp8_encode_intra4x4block() 74 MACROBLOCKD *xd = &mb->e_mbd; in vp8_encode_intra4x4mby() 83 MACROBLOCKD *xd = &x->e_mbd; in vp8_encode_intra16x16mby() [all …]
|
D | vp8_quantize.c | 98 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && in vp8_quantize_mby() 99 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); in vp8_quantize_mby() 101 for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]); in vp8_quantize_mby() 103 if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]); in vp8_quantize_mby() 108 int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && in vp8_quantize_mb() 109 x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); in vp8_quantize_mb() 112 x->quantize_b(&x->block[i], &x->e_mbd.block[i]); in vp8_quantize_mb() 119 for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]); in vp8_quantize_mbuv() 293 MACROBLOCKD *xd = &x->e_mbd; in vp8cx_mb_init_quantizer() 337 for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1; in vp8cx_mb_init_quantizer() [all …]
|
D | picklpf.c | 172 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); in vp8cx_pick_filter_level_fast() 182 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); in vp8cx_pick_filter_level_fast() 210 vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); in vp8cx_pick_filter_level_fast() 244 MACROBLOCKD *mbd = &cpi->mb.e_mbd; in vp8cx_set_alt_lf_level() 310 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid); in vp8cx_pick_filter_level() 337 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low); in vp8cx_pick_filter_level() 361 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high); in vp8cx_pick_filter_level()
|
D | temporal_filter.c | 123 BLOCKD *d = &x->e_mbd.block[0]; in vp8_temporal_filter_find_matching_mb_c() 131 unsigned char *base_pre = x->e_mbd.pre.y_buffer; in vp8_temporal_filter_find_matching_mb_c() 133 int pre_stride = x->e_mbd.pre.y_stride; in vp8_temporal_filter_find_matching_mb_c() 146 x->e_mbd.pre.y_buffer = frame_ptr->y_buffer; in vp8_temporal_filter_find_matching_mb_c() 147 x->e_mbd.pre.y_stride = frame_ptr->y_stride; in vp8_temporal_filter_find_matching_mb_c() 179 x->e_mbd.pre.y_buffer = base_pre; in vp8_temporal_filter_find_matching_mb_c() 181 x->e_mbd.pre.y_stride = pre_stride; in vp8_temporal_filter_find_matching_mb_c() 199 MACROBLOCKD *mbd = &cpi->mb.e_mbd; in vp8_temporal_filter_iterate_c()
|
D | mcomp.c | 253 int pre_stride = x->e_mbd.pre.y_stride; in vp8_find_best_sub_pixel_step_iteratively() 254 unsigned char *base_pre = x->e_mbd.pre.y_buffer; in vp8_find_best_sub_pixel_step_iteratively() 257 MACROBLOCKD *xd = &x->e_mbd; in vp8_find_best_sub_pixel_step_iteratively() 382 int pre_stride = x->e_mbd.pre.y_stride; in vp8_find_best_sub_pixel_step() 383 unsigned char *base_pre = x->e_mbd.pre.y_buffer; in vp8_find_best_sub_pixel_step() 386 MACROBLOCKD *xd = &x->e_mbd; in vp8_find_best_sub_pixel_step() 678 int pre_stride = x->e_mbd.pre.y_stride; in vp8_find_best_half_pixel_step() 679 unsigned char *base_pre = x->e_mbd.pre.y_buffer; in vp8_find_best_half_pixel_step() 682 MACROBLOCKD *xd = &x->e_mbd; in vp8_find_best_half_pixel_step() 851 int pre_stride = x->e_mbd.pre.y_stride; in vp8_hex_search() [all …]
|
D | onyx_if.c | 380 if (cpi->mb.e_mbd.segmentation_enabled) { in setup_features() 381 cpi->mb.e_mbd.update_mb_segmentation_map = 1; in setup_features() 382 cpi->mb.e_mbd.update_mb_segmentation_data = 1; in setup_features() 384 cpi->mb.e_mbd.update_mb_segmentation_map = 0; in setup_features() 385 cpi->mb.e_mbd.update_mb_segmentation_data = 0; in setup_features() 388 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0; in setup_features() 389 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; in setup_features() 390 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); in setup_features() 391 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); in setup_features() 392 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, in setup_features() [all …]
|
D | ethreading.c | 58 MACROBLOCKD *xd = &x->e_mbd; in thread_encoding_proc() 376 MACROBLOCKD *xd = &x->e_mbd; in setup_mbby_copy() 377 MACROBLOCKD *zd = &z->e_mbd; in setup_mbby_copy() 438 MACROBLOCKD *const xd = &x->e_mbd; in vp8cx_init_mbrthread_data() 443 MACROBLOCKD *mbd = &mb->e_mbd; in vp8cx_init_mbrthread_data() 454 mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1); in vp8cx_init_mbrthread_data() 538 vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd); in vp8cx_create_encoder_threads()
|
D | encodeframe.c | 236 MACROBLOCKD *xd = &x->e_mbd; in build_activity_map() 578 MACROBLOCKD *const xd = &x->e_mbd; in init_encode_frame_mb_context() 670 MACROBLOCKD *const xd = &x->e_mbd; in vp8_encode_frame() 990 vp8_build_block_doffsets(&x->e_mbd); in vp8_build_block_offsets() 1028 const MACROBLOCKD *xd = &x->e_mbd; in sum_intra_stats() 1080 MACROBLOCKD *xd = &x->e_mbd; in vp8cx_encode_intra_macroblock() 1094 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) { in vp8cx_encode_intra_macroblock() 1122 MACROBLOCKD *const xd = &x->e_mbd; in vp8cx_encode_inter_macroblock()
|
D | tokenize.c | 106 MACROBLOCKD *xd = &x->e_mbd; in tokenize2nd_order_b() 187 MACROBLOCKD *xd = &x->e_mbd; in tokenize1st_order_b() 358 MACROBLOCKD *xd = &x->e_mbd; in vp8_tokenize_mb() 505 MACROBLOCKD *xd = &x->e_mbd; in vp8_stuff_mb()
|
D | block.h | 73 MACROBLOCKD e_mbd; member
|
/external/libvpx/libvpx/test/ |
D | set_roi.cc | 43 cpi.mb.e_mbd.mb_segement_abs_delta = SEGMENT_DELTADATA; in TEST() 45 cpi.mb.e_mbd.segmentation_enabled = 0; in TEST() 46 cpi.mb.e_mbd.update_mb_segmentation_map = 0; in TEST() 47 cpi.mb.e_mbd.update_mb_segmentation_data = 0; in TEST() 108 EXPECT_EQ(1, cpi.mb.e_mbd.segmentation_enabled) in TEST() 110 EXPECT_EQ(1, cpi.mb.e_mbd.update_mb_segmentation_map) in TEST() 112 EXPECT_EQ(1, cpi.mb.e_mbd.update_mb_segmentation_data) in TEST()
|
D | quantize_test.cc | 72 memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_)); in SetupCompressor() 81 memcpy(macroblockd_dst_, &vp8_comp_->mb.e_mbd, sizeof(*macroblockd_dst_)); in UpdateQuantizer() 98 EXPECT_EQ(0, memcmp(vp8_comp_->mb.e_mbd.qcoeff, macroblockd_dst_->qcoeff, in CheckOutput() 102 EXPECT_EQ(0, memcmp(vp8_comp_->mb.e_mbd.dqcoeff, macroblockd_dst_->dqcoeff, in CheckOutput() 106 EXPECT_EQ(0, memcmp(vp8_comp_->mb.e_mbd.eobs, macroblockd_dst_->eobs, in CheckOutput() 130 c_quant_(&vp8_comp_->mb.block[i], &vp8_comp_->mb.e_mbd.block[i])); in RunComparison()
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | vp8_enc_stubs_sse2.c | 19 short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff; in vp8_mbblock_error_sse2() 26 short *d_ptr = &mb->e_mbd.dqcoeff[256]; in vp8_mbuverror_sse2()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_tokenize.c | 323 MACROBLOCKD *const xd = &x->e_mbd; in set_entropy_context_b() 354 MACROBLOCKD *const xd = &x->e_mbd; in tokenize_b() 441 vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable, in vp9_is_skippable_in_plane() 461 vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, in vp9_has_high_freq_in_plane() 469 MACROBLOCKD *const xd = &x->e_mbd; in vp9_tokenize_sb()
|
D | vp9_mbgraph.c | 29 MACROBLOCKD *const xd = &x->e_mbd; in do_16x16_motion_iteration() 79 MACROBLOCKD *const xd = &x->e_mbd; in do_16x16_motion_search() 116 MACROBLOCKD *const xd = &x->e_mbd; in do_16x16_zerozero_search() 130 MACROBLOCKD *const xd = &x->e_mbd; in find_best_16x16_intra() 165 MACROBLOCKD *const xd = &x->e_mbd; in update_mbgraph_mb_stats() 217 MACROBLOCKD *const xd = &x->e_mbd; in update_mbgraph_frame_stats()
|
D | vp9_encodemb.c | 35 const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane]; in vp9_subtract_plane() 41 if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { in vp9_subtract_plane() 44 x->e_mbd.bd); in vp9_subtract_plane() 62 MACROBLOCKD *const xd = &mb->e_mbd; in vp9_optimize_b() 325 MACROBLOCKD *const xd = &x->e_mbd; in vp9_xform_quant_fp() 402 MACROBLOCKD *const xd = &x->e_mbd; in vp9_xform_quant_dc() 475 MACROBLOCKD *const xd = &x->e_mbd; in vp9_xform_quant() 560 MACROBLOCKD *const xd = &x->e_mbd; in encode_block() 673 MACROBLOCKD *const xd = &x->e_mbd; in encode_block_pass1() 696 vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0, in vp9_encode_sby_pass1() [all …]
|
D | vp9_picklpf.c | 45 vp9_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane, in try_filter_frame() 49 vp9_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, in try_filter_frame()
|
D | vp9_rdopt.c | 135 struct macroblockd_plane *const pd = &x->e_mbd.plane[i]; in swap_block_ptr() 355 MACROBLOCKD *const xd = &x->e_mbd; in cost_coeffs() 541 MACROBLOCKD *const xd = &x->e_mbd; in dist_block() 661 MACROBLOCKD *const xd = &x->e_mbd; in block_rd_txfm() 785 MACROBLOCKD *const xd = &x->e_mbd; in txfm_rd_in_plane() 822 MACROBLOCKD *const xd = &x->e_mbd; in choose_largest_tx_size() 837 MACROBLOCKD *const xd = &x->e_mbd; in choose_tx_size_from_rd() 923 MACROBLOCKD *xd = &x->e_mbd; in super_block_yrd() 962 MACROBLOCKD *const xd = &x->e_mbd; in rd_pick_intra4x4block() 1186 const MACROBLOCKD *const xd = &mb->e_mbd; in rd_pick_intra_sub_8x8_y_mode() [all …]
|
D | vp9_encodeframe.c | 224 MACROBLOCKD *const xd = &x->e_mbd; in set_offsets() 1135 MACROBLOCKD *xd = &x->e_mbd; in chroma_check() 1217 MACROBLOCKD *xd = &x->e_mbd; in choose_partitioning() 1676 MACROBLOCKD *const xd = &x->e_mbd; in update_state() 1805 x->e_mbd.cur_buf = src; in vp9_setup_src_planes() 1809 NULL, x->e_mbd.plane[i].subsampling_x, in vp9_setup_src_planes() 1810 x->e_mbd.plane[i].subsampling_y); in vp9_setup_src_planes() 1815 MACROBLOCKD *const xd = &x->e_mbd; in set_mode_info_seg_skip() 1855 MACROBLOCKD *const xd = &x->e_mbd; in rd_pick_sb_modes() 1994 const MACROBLOCKD *const xd = &x->e_mbd; in update_stats() [all …]
|
D | vp9_mcomp.c | 290 const MACROBLOCKD *xd = &x->e_mbd; \ 650 const MACROBLOCKD *xd = &x->e_mbd; in vp9_find_best_sub_pixel_tree() 820 const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0]; in calc_int_cost_list() 869 const MACROBLOCKD *const xd = &x->e_mbd; in vp9_pattern_search() 1039 const MACROBLOCKD *const xd = &x->e_mbd; in vp9_pattern_search_sad() 1313 const MACROBLOCKD *const xd = &x->e_mbd; in vp9_get_mvpred_var() 1339 const MACROBLOCKD *const xd = &x->e_mbd; in vp9_get_mvpred_av_var() 1497 const MACROBLOCKD *const xd = &x->e_mbd; in exhuastive_mesh_search() 1586 const MACROBLOCKD *const xd = &x->e_mbd; in vp9_diamond_search_sad_c() 1794 MACROBLOCKD *xd = &x->e_mbd; in vp9_int_pro_motion_estimation() [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | encodeopt_msa.c | 60 bd = &mb->e_mbd.block[2 * loop_cnt]; in vp8_mbblock_error_msa() 70 bd = &mb->e_mbd.block[2 * loop_cnt + 1]; in vp8_mbblock_error_msa() 120 bd = &mb->e_mbd.block[loop_cnt]; in vp8_mbuverror_msa() 130 bd = &mb->e_mbd.block[loop_cnt + 1]; in vp8_mbuverror_msa()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_diamond_search_sad_avx.c | 109 const int in_what_stride = x->e_mbd.plane[0].pre[0].stride; in vp9_diamond_search_sad_avx() 112 x->e_mbd.plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; in vp9_diamond_search_sad_avx()
|