/hardware/intel/common/libva/test/ |
D | loadsurface.h | 29 int row=0, col=0; in scale_2dimage() local 31 for (row=0; row<dst_imgh; row++) { in scale_2dimage() 33 …*(dst_img + row * dst_imgw + col) = *(src_img + (row * src_imgh/dst_imgh) * src_imgw + col * src_i… in scale_2dimage() 59 int row, col; in YUV_blend_with_pic() local 95 for (row=0; row<height; row++) { in YUV_blend_with_pic() 96 unsigned char *p = Y_start + row * Y_pitch; in YUV_blend_with_pic() 97 unsigned char *q = pic_y + row * width; in YUV_blend_with_pic() 121 for (row=0; row<height/2*v_factor_to_nv12; row++) { in YUV_blend_with_pic() 122 unsigned char *pU = U_start + row * U_pitch; in YUV_blend_with_pic() 123 unsigned char *pV = V_start + row * V_pitch; in YUV_blend_with_pic() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ |
D | mcomp.c | 35 …return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.… in vp8_mv_bit_cost() 42 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + in mv_err_cost() 53 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] + in mvsad_err_cost() 68 x->ss[search_site_count].mv.row = 0; in vp8_init_dsmotion_compensation() 77 x->ss[search_site_count].mv.row = -Len; in vp8_init_dsmotion_compensation() 83 x->ss[search_site_count].mv.row = Len; in vp8_init_dsmotion_compensation() 89 x->ss[search_site_count].mv.row = 0; in vp8_init_dsmotion_compensation() 95 x->ss[search_site_count].mv.row = 0; in vp8_init_dsmotion_compensation() 115 x->ss[search_site_count].mv.row = 0; in vp8_init3smotion_compensation() 124 x->ss[search_site_count].mv.row = -Len; in vp8_init3smotion_compensation() [all …]
|
D | lookahead.c | 112 int row, col, active_end; in vp8_lookahead_push() local 128 for (row = 0; row < mb_rows; ++row) in vp8_lookahead_push() 156 row << 4, in vp8_lookahead_push()
|
D | pickinter.c | 59 bestmv->as_mv.row <<= 3; in vp8_skip_fractional_mv_step() 79 int yoffset = this_mv.as_mv.row & 7; in vp8_get_inter_mbpred_error() 81 in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3); in vp8_get_inter_mbpred_error() 401 x->MVcount[0][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.row - in update_mvcount() 402 best_ref_mv->as_mv.row) >> 1)]++; in update_mvcount() 450 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row in get_lower_res_motion_info() 541 if( abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) in calculate_zeromv_rd_adjustment() 549 if( abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) in calculate_zeromv_rd_adjustment() 557 if( abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) in calculate_zeromv_rd_adjustment() 867 int row_min = ((best_ref_mv.as_mv.row+7)>>3) - MAX_FULL_PEL_VAL; in vp8_pick_inter_mode() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/decoder/ |
D | error_concealment.c | 28 int row; member 139 int row, col; in calculate_overlaps_mb() local 157 for (row = 0; row < end_row; ++row) in calculate_overlaps_mb() 163 (((first_blk_row + row) * in calculate_overlaps_mb() 167 assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap); in calculate_overlaps_mb() 178 int row, col, rel_row, rel_col; in vp8_calculate_overlaps() local 185 row = (4 * b_row) << 3; /* Q3 */ in vp8_calculate_overlaps() 189 new_row = row - bmi->mv.as_mv.row; in vp8_calculate_overlaps() 262 row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row; in estimate_mv() 269 bmi->mv.as_mv.row = row_acc / overlap_sum; in estimate_mv() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/ |
D | vp9_mcomp.c | 28 return &buf->buf[mv->row * buf->stride + mv->col]; in get_buf_from_mv() 33 int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); in vp9_set_mv_search_range() 35 int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; in vp9_set_mv_search_range() 71 comp_cost[0][mv->row] + comp_cost[1][mv->col]; in mv_cost() 76 const MV diff = { mv->row - ref->row, in vp9_mv_bit_cost() 85 const MV diff = { mv->row - ref->row, in mv_err_cost() 97 const MV diff = { mv->row - ref->row, in mvsad_err_cost() 108 x->ss[0].mv.col = x->ss[0].mv.row = 0; in vp9_init_dsmotion_compensation() 118 ss->offset = ss->mv.row * stride + ss->mv.col; in vp9_init_dsmotion_compensation() 129 x->ss[0].mv.col = x->ss[0].mv.row = 0; in vp9_init3smotion_compensation() [all …]
|
D | vp9_lookahead.c | 97 int row, col, active_end; in vp9_lookahead_push() local 116 for (row = 0; row < mb_rows; ++row) { in vp9_lookahead_push() 140 row << 4, in vp9_lookahead_push()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ |
D | findnearmv.h | 30 mvp->as_mv.row *= -1; in mv_bias() 44 if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) in vp8_clamp_mv2() 45 mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; in vp8_clamp_mv2() 46 else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) in vp8_clamp_mv2() 47 mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; in vp8_clamp_mv2() 57 mv->as_mv.row = (mv->as_mv.row < mb_to_top_edge) ? in vp8_clamp_mv() 58 mb_to_top_edge : mv->as_mv.row; in vp8_clamp_mv() 59 mv->as_mv.row = (mv->as_mv.row > mb_to_bottom_edge) ? in vp8_clamp_mv() 60 mb_to_bottom_edge : mv->as_mv.row; in vp8_clamp_mv() 69 need_to_clamp |= (mv->as_mv.row < mb_to_top_edge); in vp8_check_mv_bounds() [all …]
|
D | reconinter.c | 131 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in vp8_build_inter_predictors_b() 133 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in vp8_build_inter_predictors_b() 135 sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); in vp8_build_inter_predictors_b() 154 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors4b() 156 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in build_inter_predictors4b() 158 …x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst… in build_inter_predictors4b() 169 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors2b() 171 if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) in build_inter_predictors2b() 173 …x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst… in build_inter_predictors2b() 185 … ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); in build_inter_predictors_b() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/generic/ |
D | yv12extend.c | 127 int row; in vp8_yv12_copy_frame_c() local 139 for (row = 0; row < src_ybc->y_height; ++row) { in vp8_yv12_copy_frame_c() 148 for (row = 0; row < src_ybc->uv_height; ++row) { in vp8_yv12_copy_frame_c() 157 for (row = 0; row < src_ybc->uv_height; ++row) { in vp8_yv12_copy_frame_c() 168 int row; in vpx_yv12_copy_y_c() local 172 for (row = 0; row < src_ybc->y_height; ++row) { in vpx_yv12_copy_y_c()
|
/hardware/intel/common/libmix/mix_vbp/viddec_fw/fw/codecs/vc1/parser/ |
D | vc1parse_bitplane.c | 157 int32_t row[2], col[2]; in vc1_Norm2ModeDecode() local 170 row[0] = 0; /* i/width; */ in vc1_Norm2ModeDecode() 172 row[1] = 0; /* (i+1)/width; */ in vc1_Norm2ModeDecode() 190 row[0] = 1; /* i/width; */ in vc1_Norm2ModeDecode() 192 row[1] = 2; /* (i+1)/width; */ in vc1_Norm2ModeDecode() 209 put_bit(0, col[0],row[0], width, height, pBitplane->invert, in vc1_Norm2ModeDecode() 211 put_bit(0, col[1],row[1], width, height, pBitplane->invert, in vc1_Norm2ModeDecode() 219 put_bit(1, col[0],row[0], width, height, pBitplane->invert, in vc1_Norm2ModeDecode() 221 put_bit(1, col[1],row[1], width, height, pBitplane->invert, in vc1_Norm2ModeDecode() 229 put_bit(1, col[0],row[0], width, height, pBitplane->invert, in vc1_Norm2ModeDecode() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/ |
D | vp8_variance_halfpixvar16x16_hv_armv6.asm | 38 add r9, r0, r1 ; pointer to pixels on the next row 40 ldr r4, [r0, #0] ; load source pixels a, row N 41 ldr r6, [r0, #1] ; load source pixels b, row N 42 ldr r5, [r9, #0] ; load source pixels c, row N+1 43 ldr r7, [r9, #1] ; load source pixels d, row N+1 45 ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N 49 ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1 80 ldr r4, [r0, #4] ; load source pixels a, row N 81 ldr r6, [r0, #5] ; load source pixels b, row N 82 ldr r5, [r9, #4] ; load source pixels c, row N+1 [all …]
|
D | vp8_sad16x16_armv6.asm | 37 ; 1st row 51 add r0, r0, r1 ; set src pointer to next row 52 add r2, r2, r3 ; set dst pointer to next row 64 ; 2nd row 76 add r0, r0, r1 ; set src pointer to next row 77 add r2, r2, r3 ; set dst pointer to next row
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/ |
D | vp9_mv.h | 23 int16_t row; member 33 int32_t row; member 40 mv->row = clamp(mv->row, min_row, max_row); in clamp_mv()
|
D | vp9_reconinter.c | 88 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, in vp9_build_inter_predictor() 92 const int subpel_y = mv.row & SUBPEL_MASK; in vp9_build_inter_predictor() 94 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); in vp9_build_inter_predictor() 105 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + in mi_mv_pred_q4() 106 mi->bmi[1].as_mv[idx].as_mv.row + in mi_mv_pred_q4() 107 mi->bmi[2].as_mv[idx].as_mv.row + in mi_mv_pred_q4() 108 mi->bmi[3].as_mv[idx].as_mv.row), in mi_mv_pred_q4() 127 src_mv->row * (1 << (1 - ss_y)), in clamp_mv_to_umv_border_sb() 187 scaled_mv.row = mv_q4.row; in build_inter_predictors() 192 subpel_y = scaled_mv.row & SUBPEL_MASK; in build_inter_predictors() [all …]
|
D | vp9_mvref_common.c | 17 int row; member 142 mv.as_mv.row *= -1; in scale_mv() 183 return !(mi_row + mi_pos->row < 0 || in is_inside() 185 mi_row + mi_pos->row >= mi_rows || in is_inside() 218 const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * in find_mv_refs_idx() 238 const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row * in find_mv_refs_idx() 264 const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row in find_mv_refs_idx() 298 if (mv->row & 1) in lower_mv_precision() 299 mv->row += (mv->row > 0 ? -1 : 1); in lower_mv_precision()
|
D | vp9_tile_common.c | 24 void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, int row, int col) { in vp9_tile_init() argument 25 tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows); in vp9_tile_init() 26 tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows); in vp9_tile_init()
|
/hardware/ti/omap4-aah/camera/ |
D | Decoder_libjpeg.cpp | 234 unsigned char *row = &nv12_buffer[0]; in decode() local 237 for (unsigned int j = 0; j < cinfo.output_height; j++, row += stride) in decode() 238 YUV_Planes[0][j] = row; in decode() 240 row = &UV_Plane[0]; in decode() 243 for (unsigned int j = 0; j < cinfo.output_height; j+=2, row += cinfo.output_width / 2){ in decode() 244 YUV_Planes[1][j+0] = row; in decode() 245 YUV_Planes[1][j+1] = row; in decode() 249 for (unsigned int j = 0; j < cinfo.output_height; j+=2, row += cinfo.output_width / 2){ in decode() 250 YUV_Planes[2][j+0] = row; in decode() 251 YUV_Planes[2][j+1] = row; in decode()
|
D | NV12_resize.cpp | 49 mmUint16 row,col; in VT_resizeFrame_Video_opt2_lp() local 116 for ( row = 0; row < cody; row++ ) { in VT_resizeFrame_Video_opt2_lp() 119 y = (mmUint16) ((mmUint32) (row*resizeFactorY) >> 9); in VT_resizeFrame_Video_opt2_lp() 120 yf = (mmUchar) ((mmUint32)((row*resizeFactorY) >> 6) & 0x7); in VT_resizeFrame_Video_opt2_lp() 189 for ( row = 0; row < (((cody)>>1)); row++ ) { in VT_resizeFrame_Video_opt2_lp() 195 y = (mmUint16) ((mmUint32) (row*resizeFactorY) >> 9); in VT_resizeFrame_Video_opt2_lp() 196 yf = (mmUchar) ((mmUint32)((row*resizeFactorY) >> 6) & 0x7); in VT_resizeFrame_Video_opt2_lp()
|
/hardware/ti/omap4xxx/camera/ |
D | NV12_resize.c | 36 mmUint16 row,col; in VT_resizeFrame_Video_opt2_lp() local 113 for (row=0; row < cody; row++) in VT_resizeFrame_Video_opt2_lp() 117 y = (mmUint16) ((mmUint32) (row*resizeFactorY) >> 9); in VT_resizeFrame_Video_opt2_lp() 118 yf = (mmUchar) ((mmUint32)((row*resizeFactorY) >> 6) & 0x7); in VT_resizeFrame_Video_opt2_lp() 192 for (row=0; row < (((cody)>>1)); row++) in VT_resizeFrame_Video_opt2_lp() 199 y = (mmUint16) ((mmUint32) (row*resizeFactorY) >> 9); in VT_resizeFrame_Video_opt2_lp() 200 yf = (mmUchar) ((mmUint32)((row*resizeFactorY) >> 6) & 0x7); in VT_resizeFrame_Video_opt2_lp()
|
/hardware/intel/common/libva/test/videoprocess/ |
D | videoprocess.cpp | 215 uint32_t row, col; in construct_nv12_mask_surface() local 230 for (row = 0; row < surface_image.height; row++) { in construct_nv12_mask_surface() 231 if (row < surface_image.height / 4 || row > surface_image.height * 3 / 4) in construct_nv12_mask_surface() 240 for (row = 0; row < surface_image.height / 2; row++) { in construct_nv12_mask_surface() 264 uint32_t frame_size, i, row, col; in upload_yv12_frame_to_yuv_surface() local 302 for (row = 0; row < surface_image.height; row++) { in upload_yv12_frame_to_yuv_surface() 312 for (row = 0; row < surface_image.height /2; row ++){ in upload_yv12_frame_to_yuv_surface() 328 for (row = 0; row < surface_image.height / 2; row++) { in upload_yv12_frame_to_yuv_surface() 366 uint32_t frame_size, row, col; in store_yuv_surface_to_yv12_frame() local 404 for (row = 0; row < surface_image.height; row++) { in store_yuv_surface_to_yv12_frame() [all …]
|
/hardware/ti/omap4-aah/ion/ |
D | ion_test.c | 67 size_t row, col; in _ion_tiler_map_test() local 69 for (row = 0; row < height; row++) in _ion_tiler_map_test() 71 int i = (row * stride) + col; in _ion_tiler_map_test() 74 for (row = 0; row < height; row++) in _ion_tiler_map_test() 76 int i = (row * stride) + col; in _ion_tiler_map_test()
|
D | ion_test_2.c | 121 size_t row, col; in _ion_tiler_map_test() local 123 for (row = 0; row < height; row++) in _ion_tiler_map_test() 125 int i = (row * stride) + col; in _ion_tiler_map_test() 128 for (row = 0; row < height; row++) in _ion_tiler_map_test() 130 int i = (row * stride) + col; in _ion_tiler_map_test()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/test/android/ |
D | get_files.py | 98 for row in file_list_reader: 99 if len(row) != EXPECTED_COL: 101 file_shas.append(row[SHA_COL]) 102 file_names.append(row[NAME_COL])
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
D | subpixel_mmx.asm | 106 jnz .nextrow ; next row 157 movq mm3, [rsi+rdx] ; mm3 = p0..p8 = row -1 161 movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 2 165 movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 0 169 movq mm4, [rsi] ; mm4 = p0..p3 = row -2 175 movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 1 179 movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 3
|