/third_party/ffmpeg/libavcodec/ |
D | vc1_pred.c | 244 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0; in ff_vc1_pred_mv() 245 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0; in ff_vc1_pred_mv() 246 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0; in ff_vc1_pred_mv() 247 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0; in ff_vc1_pred_mv() 249 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0; in ff_vc1_pred_mv() 250 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0; in ff_vc1_pred_mv() 251 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0; in ff_vc1_pred_mv() 252 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0; in ff_vc1_pred_mv() 253 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0; in ff_vc1_pred_mv() 254 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0; in ff_vc1_pred_mv() [all …]
|
D | h263.c | 60 s->current_picture.mbskip_table[mb_xy] = s->mb_skipped; in ff_h263_update_motion_val() 79 s->current_picture.ref_index[0][4*mb_xy ] = in ff_h263_update_motion_val() 80 s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; in ff_h263_update_motion_val() 81 s->current_picture.ref_index[0][4*mb_xy + 2] = in ff_h263_update_motion_val() 82 s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; in ff_h263_update_motion_val() 86 s->current_picture.motion_val[0][xy][0] = motion_x; in ff_h263_update_motion_val() 87 s->current_picture.motion_val[0][xy][1] = motion_y; in ff_h263_update_motion_val() 88 s->current_picture.motion_val[0][xy + 1][0] = motion_x; in ff_h263_update_motion_val() 89 s->current_picture.motion_val[0][xy + 1][1] = motion_y; in ff_h263_update_motion_val() 90 s->current_picture.motion_val[0][xy + wrap][0] = motion_x; in ff_h263_update_motion_val() [all …]
|
D | vc1_block.c | 415 q1 = FFABS(s->current_picture.qscale_table[mb_pos]); in ff_vc1_pred_dc() 431 q2 = FFABS(s->current_picture.qscale_table[mb_pos - 1]); in ff_vc1_pred_dc() 436 q2 = FFABS(s->current_picture.qscale_table[mb_pos - s->mb_stride]); in ff_vc1_pred_dc() 446 q2 = FFABS(s->current_picture.qscale_table[off]); in ff_vc1_pred_dc() 775 q1 = s->current_picture.qscale_table[mb_pos]; in vc1_decode_i_block_adv() 782 q2 = s->current_picture.qscale_table[mb_pos - 1]; in vc1_decode_i_block_adv() 787 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; in vc1_decode_i_block_adv() 980 q1 = s->current_picture.qscale_table[mb_pos]; in vc1_decode_intra_block() 982 q2 = s->current_picture.qscale_table[mb_pos - 1]; in vc1_decode_intra_block() 984 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; in vc1_decode_intra_block() [all …]
|
D | h261dec.c | 208 s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; in h261_decode_mb_skipped() 214 if (s->current_picture.motion_val[0]) { in h261_decode_mb_skipped() 217 s->current_picture.motion_val[0][b_xy][0] = s->mv[0][0][0]; in h261_decode_mb_skipped() 218 s->current_picture.motion_val[0][b_xy][1] = s->mv[0][0][1]; in h261_decode_mb_skipped() 432 s->current_picture.mb_type[xy] = MB_TYPE_INTRA; in h261_decode_mb() 439 s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; in h261_decode_mb() 443 if (s->current_picture.motion_val[0]) { in h261_decode_mb() 446 s->current_picture.motion_val[0][b_xy][0] = s->mv[0][0][0]; in h261_decode_mb() 447 s->current_picture.motion_val[0][b_xy][1] = s->mv[0][0][1]; in h261_decode_mb() 626 s->current_picture.f->pict_type = s->pict_type; in h261_decode_frame() [all …]
|
D | svq1enc.c | 272 s->m.current_picture_ptr = &s->m.current_picture; in svq1_encode_plane() 278 s->m.current_picture.f->linesize[0] = stride; in svq1_encode_plane() 313 s->m.current_picture.mb_mean = (uint8_t *)s->dummy; in svq1_encode_plane() 314 s->m.current_picture.mb_var = (uint16_t *)s->dummy; in svq1_encode_plane() 315 s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy; in svq1_encode_plane() 316 s->m.current_picture.mb_type = s->dummy; in svq1_encode_plane() 318 s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2; in svq1_encode_plane() 510 av_frame_free(&s->current_picture); in svq1_encode_end() 530 s->current_picture = av_frame_alloc(); in svq1_encode_init() 532 if (!s->current_picture || !s->last_picture) { in svq1_encode_init() [all …]
|
D | snowenc.c | 257 const int stride= s->current_picture->linesize[0]; in encode_q_branch() 258 const int uvstride= s->current_picture->linesize[1]; in encode_q_branch() 521 const int ref_stride= s->current_picture->linesize[plane_index]; in get_dc() 615 const int ref_stride= s->current_picture->linesize[plane_index]; in get_block_rd() 616 uint8_t *dst= s->current_picture->data[plane_index]; in get_block_rd() 718 const int ref_stride= s->current_picture->linesize[plane_index]; in get_4block_rd() 719 uint8_t *dst= s->current_picture->data[plane_index]; in get_4block_rd() 1075 uint8_t *dst= s->current_picture->data[0]; in iterative_me() 1076 const int stride= s->current_picture->linesize[0]; in iterative_me() 1525 s->m.current_picture.mb_var_sum= coef_sum; in ratecontrol_1pass() [all …]
|
D | dxva2_h264.c | 51 const H264Picture *current_picture = h->cur_pic_ptr; in fill_picture_parameters() local 59 ff_dxva2_get_surface_index(avctx, ctx, current_picture->f), in fill_picture_parameters() 131 current_picture->field_poc[0] != INT_MAX) in fill_picture_parameters() 132 pp->CurrFieldOrderCnt[0] = current_picture->field_poc[0]; in fill_picture_parameters() 135 current_picture->field_poc[1] != INT_MAX) in fill_picture_parameters() 136 pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1]; in fill_picture_parameters() 304 const H264Picture *current_picture = h->cur_pic_ptr; in commit_bitstream_and_slice_buffer() local 305 struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; in commit_bitstream_and_slice_buffer() 472 const H264Picture *current_picture = h->cur_pic_ptr; in dxva2_h264_decode_slice() local 473 struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; in dxva2_h264_decode_slice()
|
D | motion_est.c | 508 s->current_picture.motion_val[0][mot_xy ][0] = mx; in set_p_mv_tables() 509 s->current_picture.motion_val[0][mot_xy ][1] = my; in set_p_mv_tables() 510 s->current_picture.motion_val[0][mot_xy + 1][0] = mx; in set_p_mv_tables() 511 s->current_picture.motion_val[0][mot_xy + 1][1] = my; in set_p_mv_tables() 514 s->current_picture.motion_val[0][mot_xy ][0] = mx; in set_p_mv_tables() 515 s->current_picture.motion_val[0][mot_xy ][1] = my; in set_p_mv_tables() 516 s->current_picture.motion_val[0][mot_xy + 1][0] = mx; in set_p_mv_tables() 517 s->current_picture.motion_val[0][mot_xy + 1][1] = my; in set_p_mv_tables() 599 P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; in h263_mv4_search() 600 P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; in h263_mv4_search() [all …]
|
D | mpegvideo_xvmc.c | 45 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2]; in ff_xvmc_init_block() 90 struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2]; in ff_xvmc_field_start() 159 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2]; in ff_xvmc_field_end() 198 s->current_picture.qscale_table[mb_xy] = s->qscale; in ff_xvmc_decode_mb() 201 render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2]; in ff_xvmc_decode_mb()
|
D | dxva2_hevc.c | 60 const HEVCFrame *current_picture = h->ref; in fill_picture_parameters() local 79 …fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, current_picture->frame), 0… in fill_picture_parameters() 165 …if (&h->DPB[j] != current_picture && (h->DPB[j].flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLA… in fill_picture_parameters() 243 const HEVCFrame *current_picture = h->ref; in commit_bitstream_and_slice_buffer() local 244 struct hevc_dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; in commit_bitstream_and_slice_buffer() 385 const HEVCFrame *current_picture = h->ref; in dxva2_hevc_decode_slice() local 386 struct hevc_dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; in dxva2_hevc_decode_slice()
|
D | mpeg_er.c | 84 s->dest[0] = s->current_picture.f->data[0] + in mpeg_er_decode_mb() 87 s->dest[1] = s->current_picture.f->data[1] + in mpeg_er_decode_mb() 90 s->dest[2] = s->current_picture.f->data[2] + in mpeg_er_decode_mb()
|
D | snow.c | 512 s->current_picture = av_frame_alloc(); in ff_snow_common_init() 513 if (!s->mconly_picture || !s->current_picture) in ff_snow_common_init() 655 …s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3] - EDGE_WIDTH*(1+s->current_picture->linesize[i%3… in ff_snow_release_buffer() 671 if(USE_HALFPEL_PLANE && s->current_picture->data[0]) { in ff_snow_frame_start() 672 if((ret = halfpel_interpol(s, s->halfpel_plane[0], s->current_picture)) < 0) in ff_snow_frame_start() 675 s->last_picture[0] = s->current_picture; in ff_snow_frame_start() 676 s->current_picture = tmp; in ff_snow_frame_start() 691 if ((ret = ff_snow_get_buffer(s, s->current_picture)) < 0) in ff_snow_frame_start() 694 s->current_picture->key_frame= s->keyframe; in ff_snow_frame_start() 723 av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]); in ff_snow_common_end() [all …]
|
D | mpegvideo_enc.c | 222 int8_t * const qscale_table = s->current_picture.qscale_table; in ff_init_qscale_tab() 238 COPY(current_picture); in update_duplicate_context_after_me() 1706 ff_mpeg_unref_picture(s->avctx, &s->current_picture); in select_input_picture() 1707 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture, in select_input_picture() 1719 s->current_picture.reference && in frame_end() 1724 s->mpvencdsp.draw_edges(s->current_picture.f->data[0], in frame_end() 1725 s->current_picture.f->linesize[0], in frame_end() 1729 s->mpvencdsp.draw_edges(s->current_picture.f->data[1], in frame_end() 1730 s->current_picture.f->linesize[1], in frame_end() 1736 s->mpvencdsp.draw_edges(s->current_picture.f->data[2], in frame_end() [all …]
|
D | vc1_mc.c | 199 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx; in ff_vc1_mc_1mv() 200 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my; in ff_vc1_mc_1mv() 222 srcY = s->current_picture.f->data[0]; in ff_vc1_mc_1mv() 223 srcU = s->current_picture.f->data[1]; in ff_vc1_mc_1mv() 224 srcV = s->current_picture.f->data[2]; in ff_vc1_mc_1mv() 477 srcY = s->current_picture.f->data[0]; in ff_vc1_mc_4mv_luma() 506 … &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0], in ff_vc1_mc_4mv_luma() 507 … &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1]); in ff_vc1_mc_4mv_luma() 518 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx; in ff_vc1_mc_4mv_luma() 519 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my; in ff_vc1_mc_4mv_luma() [all …]
|
D | mpeg4videodec.c | 75 int8_t *const qscale_table = s->current_picture.qscale_table; in ff_mpeg4_pred_ac() 724 s->current_picture.mb_type[xy] = MB_TYPE_INTRA; in mpeg4_decode_partition_a() 730 s->current_picture.qscale_table[xy] = s->qscale; in mpeg4_decode_partition_a() 748 int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]]; in mpeg4_decode_partition_a() 761 s->current_picture.mb_type[xy] = MB_TYPE_SKIP | in mpeg4_decode_partition_a() 768 s->current_picture.mb_type[xy] = MB_TYPE_SKIP | in mpeg4_decode_partition_a() 801 s->current_picture.mb_type[xy] = MB_TYPE_INTRA; in mpeg4_decode_partition_a() 834 s->current_picture.mb_type[xy] = MB_TYPE_16x16 | in mpeg4_decode_partition_a() 839 s->current_picture.mb_type[xy] = MB_TYPE_16x16 | in mpeg4_decode_partition_a() 854 s->current_picture.mb_type[xy] = MB_TYPE_8x8 | in mpeg4_decode_partition_a() [all …]
|
D | ituh263dec.c | 356 mot_val = s->current_picture.motion_val[0][s->block_index[0]]; in preview_obmc() 362 s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; in preview_obmc() 369 s->current_picture.mb_type[xy] = MB_TYPE_INTRA; in preview_obmc() 381 s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; in preview_obmc() 399 s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; in preview_obmc() 715 s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; in ff_h263_decode_mb() 753 s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; in ff_h263_decode_mb() 778 s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; in ff_h263_decode_mb() 806 int16_t *mot_val0 = s->current_picture.motion_val[0][2 * (s->mb_x + s->mb_y * stride)]; in ff_h263_decode_mb() 807 int16_t *mot_val1 = s->current_picture.motion_val[1][2 * (s->mb_x + s->mb_y * stride)]; in ff_h263_decode_mb() [all …]
|
D | wmv2dec.c | 267 ff_intrax8_decode_picture(&w->x8, &s->current_picture, in ff_wmv2_decode_secondary_picture_header() 302 mot_val = s->current_picture.motion_val[0][xy]; in wmv2_pred_motion() 304 A = s->current_picture.motion_val[0][xy - 1]; in wmv2_pred_motion() 305 B = s->current_picture.motion_val[0][xy - wrap]; in wmv2_pred_motion() 306 C = s->current_picture.motion_val[0][xy + 2 - wrap]; in wmv2_pred_motion() 388 if (IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])) { in ff_wmv2_decode_mb()
|
D | mpegvideo.c | 594 UPDATE_PICTURE(current_picture); in ff_mpeg_update_thread_context() 837 memset(&s->current_picture, 0, sizeof(s->current_picture)); in clear_context() 970 !(s->current_picture.f = av_frame_alloc()) || in ff_mpv_common_init() 1142 ff_free_picture_tables(&s->current_picture); in ff_mpv_common_end() 1143 ff_mpeg_unref_picture(s->avctx, &s->current_picture); in ff_mpv_common_end() 1144 av_frame_free(&s->current_picture.f); in ff_mpv_common_end() 1209 ff_mpeg_unref_picture(s->avctx, &s->current_picture); in ff_mpv_frame_start() 1261 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture, in ff_mpv_frame_start() 1374 s->current_picture.f->data[i] += in ff_mpv_frame_start() 1375 s->current_picture.f->linesize[i]; in ff_mpv_frame_start() [all …]
|
D | vc1dec.c | 203 uint8_t *iplane = s->current_picture.f->data[plane]; in vc1_draw_sprites() 204 int iline = s->current_picture.f->linesize[plane]; in vc1_draw_sprites() 285 if (!s->current_picture.f || !s->current_picture.f->data[0]) { in vc1_decode_sprites() 308 AVFrame *f = s->current_picture.f; in vc1_sprite_flush() 859 s->current_picture.f->pict_type = s->pict_type; in vc1_decode_frame() 860 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; in vc1_decode_frame() 1032 s->current_picture.f->linesize[0] <<= 1; in vc1_decode_frame() 1033 s->current_picture.f->linesize[1] <<= 1; in vc1_decode_frame() 1034 s->current_picture.f->linesize[2] <<= 1; in vc1_decode_frame() 1109 s->current_picture.f->linesize[0] >>= 1; in vc1_decode_frame() [all …]
|
D | vc1_loopfilter.c | 503 … &s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off], in ff_vc1_p_loop_filter() 523 … &s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off], in ff_vc1_p_loop_filter() 546 … &s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off], in ff_vc1_p_loop_filter() 565 … &s->current_picture.motion_val[0][s->block_index[i] - 2 + v->blocks_off], in ff_vc1_p_loop_filter() 586 … &s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off], in ff_vc1_p_loop_filter() 605 … &s->current_picture.motion_val[0][s->block_index[i] + v->blocks_off], in ff_vc1_p_loop_filter() 628 … &s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 4 + v->blocks_off], in ff_vc1_p_loop_filter() 649 … &s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off], in ff_vc1_p_loop_filter() 668 … &s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off], in ff_vc1_p_loop_filter() 691 … &s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 4 + v->blocks_off], in ff_vc1_p_loop_filter() [all …]
|
D | snowdec.c | 45 int ref_stride= s->current_picture->linesize[plane_index]; in predict_slice_buffered() 46 uint8_t *dst8= s->current_picture->data[plane_index]; in predict_slice_buffered() 457 s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P in decode_frame() 484 s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; in decode_frame() 526 … int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]; in decode_frame() 623 res = av_frame_ref(picture, s->current_picture); in decode_frame()
|
/third_party/gstreamer/gstplugins_bad/gst-libs/gst/codecs/ |
D | gstav1decoder.c | 44 GstAV1Picture *current_picture; member 113 gst_av1_picture_clear (&priv->current_picture); in gst_av1_decoder_reset() 313 GstAV1Picture *picture = priv->current_picture; in gst_av1_decoder_decode_tile_group() 351 if (priv->current_picture != NULL) { in gst_av1_decoder_decode_frame_header() 385 priv->current_picture = picture; in gst_av1_decoder_decode_frame_header() 406 priv->current_picture = picture; in gst_av1_decoder_decode_frame_header() 417 g_assert (priv->current_picture != NULL); in gst_av1_decoder_decode_frame_header() 533 GstAV1Picture *picture = priv->current_picture; in gst_av1_decoder_update_state() 571 g_assert (!priv->current_picture); in gst_av1_decoder_handle_frame() 597 if (!priv->current_picture) { in gst_av1_decoder_handle_frame() [all …]
|
D | gsth264decoder.c | 108 GstH264Picture *current_picture; member 208 GstH264Picture * current_picture); 572 gst_h264_picture_clear (&priv->current_picture); in gst_h264_decoder_handle_frame() 738 if (!priv->current_picture) { in gst_h264_decoder_preprocess_slice() 751 GstH264Picture * current_picture, gint frame_num) in gst_h264_decoder_update_pic_nums() argument 764 if (GST_H264_PICTURE_IS_FRAME (current_picture)) in gst_h264_decoder_update_pic_nums() 766 else if (current_picture->field == picture->field) in gst_h264_decoder_update_pic_nums() 776 if (GST_H264_PICTURE_IS_FRAME (current_picture)) in gst_h264_decoder_update_pic_nums() 778 else if (picture->field == current_picture->field) in gst_h264_decoder_update_pic_nums() 916 GstH264Picture * current_picture, GstFlowReturn * ret) in _bump_dpb() argument [all …]
|
D | gstmpeg2decoder.c | 261 GstMpeg2Picture *current_picture; member 818 gst_mpeg2_dpb_get_neighbours (priv->dpb, priv->current_picture, in gst_mpeg2_decoder_start_current_picture() 821 if (priv->current_picture->type == GST_MPEG_VIDEO_PICTURE_TYPE_B in gst_mpeg2_decoder_start_current_picture() 827 ret = klass->start_picture (decoder, priv->current_picture, slice, in gst_mpeg2_decoder_start_current_picture() 847 if (priv->current_picture) { in gst_mpeg2_decoder_ensure_current_picture() 918 priv->current_picture = picture; in gst_mpeg2_decoder_ensure_current_picture() 938 if (priv->current_picture == NULL) in gst_mpeg2_decoder_finish_current_field() 941 ret = klass->end_picture (decoder, priv->current_picture); in gst_mpeg2_decoder_finish_current_field() 947 if (priv->current_picture->structure != in gst_mpeg2_decoder_finish_current_field() 949 !priv->current_picture->first_field) { in gst_mpeg2_decoder_finish_current_field() [all …]
|
D | gsth265decoder.c | 93 GstH265Picture *current_picture; member 589 GstH265Picture *picture = priv->current_picture; in gst_h265_decoder_decode_slice() 623 if (priv->current_picture && slice_hdr->first_slice_segment_in_pic_flag) { in gst_h265_decoder_preprocess_slice() 680 if (!priv->current_picture) { in gst_h265_decoder_parse_slice() 692 priv->current_picture = picture; in gst_h265_decoder_parse_slice() 699 priv->current_picture = NULL; in gst_h265_decoder_parse_slice() 711 if (!priv->current_picture) in gst_h265_decoder_parse_slice() 1207 priv->current_picture)) { in gst_h265_decoder_init_current_picture() 1212 &priv->current_slice, priv->current_picture)) in gst_h265_decoder_init_current_picture() 1216 priv->current_picture->pic_struct = priv->cur_pic_struct; in gst_h265_decoder_init_current_picture() [all …]
|