1 /* GStreamer
2 * Copyright (C) 2020 Igalia, S.L.
3 * Author: Víctor Jáquez <vjaquez@igalia.com>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
14 *
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the0
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
19 */
20
21 /**
22 * SECTION:element-vavp9dec
23 * @title: vavp9dec
24 * @short_description: A VA-API based VP9 video decoder
25 *
26 * vavp9dec decodes VP9 bitstreams to VA surfaces using the
27 * installed and chosen [VA-API](https://01.org/linuxmedia/vaapi)
28 * driver.
29 *
30 * The decoding surfaces can be mapped onto main memory as video
31 * frames.
32 *
33 * ## Example launch line
34 * ```
35 * gst-launch-1.0 filesrc location=sample.webm ! parsebin ! vavp9dec ! autovideosink
36 * ```
37 *
38 * Since: 1.20
39 *
40 */
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include "gstvavp9dec.h"
47
48 #include "gstvabasedec.h"
49
50 GST_DEBUG_CATEGORY_STATIC (gst_va_vp9dec_debug);
51 #ifndef GST_DISABLE_GST_DEBUG
52 #define GST_CAT_DEFAULT gst_va_vp9dec_debug
53 #else
54 #define GST_CAT_DEFAULT NULL
55 #endif
56
57 #define GST_VA_VP9_DEC(obj) ((GstVaVp9Dec *) obj)
58 #define GST_VA_VP9_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaVp9DecClass))
59 #define GST_VA_VP9_DEC_CLASS(klass) ((GstVaVp9DecClass *) klass)
60
61 typedef struct _GstVaVp9Dec GstVaVp9Dec;
62 typedef struct _GstVaVp9DecClass GstVaVp9DecClass;
63
64 struct _GstVaVp9DecClass
65 {
66 GstVaBaseDecClass parent_class;
67 };
68
69 struct _GstVaVp9Dec
70 {
71 GstVaBaseDec parent;
72 GstVp9Segmentation segmentation[GST_VP9_MAX_SEGMENTS];
73 };
74
75 static GstElementClass *parent_class = NULL;
76
77 /* *INDENT-OFF* */
78 static const gchar *src_caps_str =
79 GST_VIDEO_CAPS_MAKE_WITH_FEATURES (GST_CAPS_FEATURE_MEMORY_VA,
80 "{ NV12 }") " ;"
81 GST_VIDEO_CAPS_MAKE ("{ NV12 }");
82 /* *INDENT-ON* */
83
84 static const gchar *sink_caps_str = "video/x-vp9";
85
86 static guint
_get_rtformat(GstVaVp9Dec * self,GstVP9Profile profile,GstVp9BitDepth bit_depth,gint subsampling_x,gint subsampling_y)87 _get_rtformat (GstVaVp9Dec * self, GstVP9Profile profile,
88 GstVp9BitDepth bit_depth, gint subsampling_x, gint subsampling_y)
89 {
90 switch (profile) {
91 case GST_VP9_PROFILE_0:
92 return VA_RT_FORMAT_YUV420;
93 case GST_VP9_PROFILE_1:
94 if (subsampling_x == 1 && subsampling_y == 0)
95 return VA_RT_FORMAT_YUV422;
96 else if (subsampling_x == 0 && subsampling_y == 0)
97 return VA_RT_FORMAT_YUV444;
98 break;
99 case GST_VP9_PROFILE_2:
100 if (bit_depth == GST_VP9_BIT_DEPTH_10)
101 return VA_RT_FORMAT_YUV420_10;
102 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
103 return VA_RT_FORMAT_YUV420_12;
104 break;
105 case GST_VP9_PROFILE_3:
106 if (subsampling_x == 1 && subsampling_y == 0) {
107 if (bit_depth == GST_VP9_BIT_DEPTH_10)
108 return VA_RT_FORMAT_YUV422_10;
109 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
110 return VA_RT_FORMAT_YUV422_12;
111 } else if (subsampling_x == 0 && subsampling_y == 0) {
112 if (bit_depth == GST_VP9_BIT_DEPTH_10)
113 return VA_RT_FORMAT_YUV444_10;
114 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
115 return VA_RT_FORMAT_YUV444_12;
116 }
117 break;
118 default:
119 break;
120 }
121
122 GST_ERROR_OBJECT (self, "Unsupported chroma format");
123 return 0;
124 }
125
126 static VAProfile
_get_profile(GstVaVp9Dec * self,GstVP9Profile profile)127 _get_profile (GstVaVp9Dec * self, GstVP9Profile profile)
128 {
129 switch (profile) {
130 case GST_VP9_PROFILE_0:
131 return VAProfileVP9Profile0;
132 case GST_VP9_PROFILE_1:
133 return VAProfileVP9Profile1;
134 case GST_VP9_PROFILE_2:
135 return VAProfileVP9Profile2;
136 case GST_VP9_PROFILE_3:
137 return VAProfileVP9Profile3;
138 default:
139 break;
140 }
141
142 GST_ERROR_OBJECT (self, "Unsupported profile");
143 return VAProfileNone;
144 }
145
146 static GstFlowReturn
gst_va_vp9_new_sequence(GstVp9Decoder * decoder,const GstVp9FrameHeader * frame_hdr)147 gst_va_vp9_new_sequence (GstVp9Decoder * decoder,
148 const GstVp9FrameHeader * frame_hdr)
149 {
150 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
151 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
152 VAProfile profile;
153 gboolean negotiation_needed = FALSE;
154 guint rt_format;
155
156 profile = _get_profile (self, frame_hdr->profile);
157 if (profile == VAProfileNone)
158 return GST_FLOW_NOT_NEGOTIATED;
159
160 if (!gst_va_decoder_has_profile (base->decoder, profile)) {
161 GST_ERROR_OBJECT (self, "Profile %s is not supported",
162 gst_va_profile_name (profile));
163 return GST_FLOW_NOT_NEGOTIATED;
164 }
165
166 rt_format = _get_rtformat (self, frame_hdr->profile, frame_hdr->bit_depth,
167 frame_hdr->subsampling_x, frame_hdr->subsampling_y);
168 if (rt_format == 0)
169 return GST_FLOW_NOT_NEGOTIATED;
170
171 if (!gst_va_decoder_config_is_equal (base->decoder, profile,
172 rt_format, frame_hdr->width, frame_hdr->height)) {
173 base->profile = profile;
174 base->width = frame_hdr->width;
175 base->height = frame_hdr->height;
176 base->rt_format = rt_format;
177 negotiation_needed = TRUE;
178 }
179
180 base->min_buffers = GST_VP9_REF_FRAMES;
181
182 base->need_negotiation = negotiation_needed;
183
184 return GST_FLOW_OK;
185 }
186
187 static GstFlowReturn
_check_resolution_change(GstVaVp9Dec * self,GstVp9Picture * picture)188 _check_resolution_change (GstVaVp9Dec * self, GstVp9Picture * picture)
189 {
190 GstVaBaseDec *base = GST_VA_BASE_DEC (self);
191 const GstVp9FrameHeader *frame_hdr = &picture->frame_hdr;
192
193 if ((base->width != frame_hdr->width) || base->height != frame_hdr->height) {
194 base->width = frame_hdr->width;
195 base->height = frame_hdr->height;
196
197 base->need_negotiation = TRUE;
198 if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
199 GST_ERROR_OBJECT (self, "Resolution changed, but failed to"
200 " negotiate with downstream");
201 return GST_FLOW_NOT_NEGOTIATED;
202
203 /* @TODO: if negotiation fails, decoder should resize output
204 * frame. For that we would need an auxiliar allocator, and
205 * later use GstVaFilter or GstVideoConverter. */
206 }
207 }
208
209 return GST_FLOW_OK;
210 }
211
212 static GstFlowReturn
gst_va_vp9_dec_new_picture(GstVp9Decoder * decoder,GstVideoCodecFrame * frame,GstVp9Picture * picture)213 gst_va_vp9_dec_new_picture (GstVp9Decoder * decoder,
214 GstVideoCodecFrame * frame, GstVp9Picture * picture)
215 {
216 GstFlowReturn ret;
217 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
218 GstVaDecodePicture *pic;
219 GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder);
220 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
221
222 ret = _check_resolution_change (self, picture);
223 if (ret != GST_FLOW_OK)
224 return ret;
225
226 if (base->need_negotiation) {
227 if (!gst_video_decoder_negotiate (vdec)) {
228 GST_ERROR_OBJECT (self, "Failed to negotiate with downstream");
229 return GST_FLOW_NOT_NEGOTIATED;
230 }
231 }
232
233 ret = gst_video_decoder_allocate_output_frame (vdec, frame);
234 if (ret != GST_FLOW_OK)
235 goto error;
236
237 pic = gst_va_decode_picture_new (base->decoder, frame->output_buffer);
238
239 gst_vp9_picture_set_user_data (picture, pic,
240 (GDestroyNotify) gst_va_decode_picture_free);
241
242 GST_LOG_OBJECT (self, "New va decode picture %p - %#x", pic,
243 gst_va_decode_picture_get_surface (pic));
244
245 return GST_FLOW_OK;
246
247 error:
248 {
249 GST_WARNING_OBJECT (self, "Failed to allocated output buffer, return %s",
250 gst_flow_get_name (ret));
251 return ret;
252 }
253 }
254
255 static inline gboolean
_fill_param(GstVp9Decoder * decoder,GstVp9Picture * picture,GstVp9Dpb * dpb)256 _fill_param (GstVp9Decoder * decoder, GstVp9Picture * picture, GstVp9Dpb * dpb)
257 {
258 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
259 GstVaDecodePicture *va_pic;
260 const GstVp9FrameHeader *frame_hdr = &picture->frame_hdr;
261 const GstVp9LoopFilterParams *lfp = &frame_hdr->loop_filter_params;
262 const GstVp9SegmentationParams *sp = &frame_hdr->segmentation_params;
263 VADecPictureParameterBufferVP9 pic_param;
264 guint i;
265
266 /* *INDENT-OFF* */
267 pic_param = (VADecPictureParameterBufferVP9) {
268 .frame_width = base->width,
269 .frame_height = base->height,
270
271 .pic_fields.bits = {
272 .subsampling_x = frame_hdr->subsampling_x,
273 .subsampling_y = frame_hdr->subsampling_x,
274 .frame_type = frame_hdr->frame_type,
275 .show_frame = frame_hdr->show_frame,
276 .error_resilient_mode = frame_hdr->error_resilient_mode,
277 .intra_only = frame_hdr->intra_only,
278 .allow_high_precision_mv = frame_hdr->allow_high_precision_mv,
279 .mcomp_filter_type = frame_hdr->interpolation_filter,
280 .frame_parallel_decoding_mode = frame_hdr->frame_parallel_decoding_mode,
281 .reset_frame_context = frame_hdr->reset_frame_context,
282 .refresh_frame_context = frame_hdr->refresh_frame_context,
283 .frame_context_idx = frame_hdr->frame_context_idx,
284
285 .segmentation_enabled = sp->segmentation_enabled,
286 .segmentation_temporal_update = sp->segmentation_temporal_update,
287 .segmentation_update_map = sp->segmentation_update_map,
288
289 .last_ref_frame =
290 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_LAST - 1],
291 .last_ref_frame_sign_bias =
292 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_LAST],
293 .golden_ref_frame =
294 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_GOLDEN - 1],
295 .golden_ref_frame_sign_bias =
296 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_GOLDEN],
297 .alt_ref_frame =
298 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_ALTREF - 1],
299 .alt_ref_frame_sign_bias =
300 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_ALTREF],
301
302 .lossless_flag = frame_hdr->lossless_flag,
303 },
304
305 .filter_level = lfp->loop_filter_level,
306 .sharpness_level = lfp->loop_filter_sharpness,
307 .log2_tile_rows = frame_hdr->tile_rows_log2,
308 .log2_tile_columns = frame_hdr->tile_cols_log2,
309
310 .frame_header_length_in_bytes = frame_hdr->frame_header_length_in_bytes,
311 .first_partition_size = frame_hdr->header_size_in_bytes,
312
313 .profile = frame_hdr->profile,
314 .bit_depth = frame_hdr->bit_depth
315 };
316 /* *INDENT-ON* */
317
318 memcpy (pic_param.mb_segment_tree_probs, sp->segmentation_tree_probs,
319 sizeof (sp->segmentation_tree_probs));
320
321 if (sp->segmentation_temporal_update) {
322 memcpy (pic_param.segment_pred_probs, sp->segmentation_pred_prob,
323 sizeof (sp->segmentation_pred_prob));
324 } else {
325 memset (pic_param.segment_pred_probs, 255,
326 sizeof (pic_param.segment_pred_probs));
327 }
328
329 for (i = 0; i < GST_VP9_REF_FRAMES; i++) {
330 if (dpb->pic_list[i]) {
331 GstVaDecodePicture *va_pic =
332 gst_vp9_picture_get_user_data (dpb->pic_list[i]);
333
334 pic_param.reference_frames[i] =
335 gst_va_decode_picture_get_surface (va_pic);
336 } else {
337 pic_param.reference_frames[i] = VA_INVALID_ID;
338 }
339 }
340
341 va_pic = gst_vp9_picture_get_user_data (picture);
342
343 return gst_va_decoder_add_param_buffer (base->decoder, va_pic,
344 VAPictureParameterBufferType, &pic_param, sizeof (pic_param));
345 }
346
347 static void
_update_segmentation(GstVaVp9Dec * self,GstVp9FrameHeader * header)348 _update_segmentation (GstVaVp9Dec * self, GstVp9FrameHeader * header)
349 {
350 const GstVp9LoopFilterParams *lfp = &header->loop_filter_params;
351 const GstVp9QuantizationParams *qp = &header->quantization_params;
352 const GstVp9SegmentationParams *sp = &header->segmentation_params;
353 guint8 n_shift = lfp->loop_filter_level >> 5;
354 guint i;
355
356 for (i = 0; i < GST_VP9_MAX_SEGMENTS; i++) {
357 gint16 luma_dc_quant_scale;
358 gint16 luma_ac_quant_scale;
359 gint16 chroma_dc_quant_scale;
360 gint16 chroma_ac_quant_scale;
361 guint8 qindex;
362 guint8 lvl_lookup[GST_VP9_MAX_REF_LF_DELTAS][GST_VP9_MAX_MODE_LF_DELTAS];
363 gint lvl_seg = lfp->loop_filter_level;
364
365 /* 8.6.1 Dequantization functions */
366 qindex = gst_vp9_get_qindex (sp, qp, i);
367 luma_dc_quant_scale =
368 gst_vp9_get_dc_quant (qindex, qp->delta_q_y_dc, header->bit_depth);
369 luma_ac_quant_scale = gst_vp9_get_ac_quant (qindex, 0, header->bit_depth);
370 chroma_dc_quant_scale =
371 gst_vp9_get_dc_quant (qindex, qp->delta_q_uv_dc, header->bit_depth);
372 chroma_ac_quant_scale =
373 gst_vp9_get_ac_quant (qindex, qp->delta_q_uv_ac, header->bit_depth);
374
375 if (!lfp->loop_filter_level) {
376 memset (lvl_lookup, 0, sizeof (lvl_lookup));
377 } else {
378 /* 8.8.1 Loop filter frame init process */
379 if (gst_vp9_seg_feature_active (sp, i, GST_VP9_SEG_LVL_ALT_L)) {
380 if (sp->segmentation_abs_or_delta_update) {
381 lvl_seg = sp->feature_data[i][GST_VP9_SEG_LVL_ALT_L];
382 } else {
383 lvl_seg += sp->feature_data[i][GST_VP9_SEG_LVL_ALT_L];
384 }
385
386 lvl_seg = CLAMP (lvl_seg, 0, GST_VP9_MAX_LOOP_FILTER);
387 }
388
389 if (!lfp->loop_filter_delta_enabled) {
390 memset (lvl_lookup, lvl_seg, sizeof (lvl_lookup));
391 } else {
392 guint8 ref, mode;
393 gint intra_lvl = lvl_seg +
394 (lfp->loop_filter_ref_deltas[GST_VP9_REF_FRAME_INTRA] << n_shift);
395
396 memcpy (lvl_lookup, self->segmentation[i].filter_level,
397 sizeof (lvl_lookup));
398
399 lvl_lookup[GST_VP9_REF_FRAME_INTRA][0] =
400 CLAMP (intra_lvl, 0, GST_VP9_MAX_LOOP_FILTER);
401 for (ref = GST_VP9_REF_FRAME_LAST; ref < GST_VP9_REF_FRAME_MAX; ref++) {
402 for (mode = 0; mode < GST_VP9_MAX_MODE_LF_DELTAS; mode++) {
403 intra_lvl = lvl_seg + (lfp->loop_filter_ref_deltas[ref] << n_shift)
404 + (lfp->loop_filter_mode_deltas[mode] << n_shift);
405 lvl_lookup[ref][mode] =
406 CLAMP (intra_lvl, 0, GST_VP9_MAX_LOOP_FILTER);
407 }
408 }
409 }
410 }
411
412 /* *INDENT-OFF* */
413 self->segmentation[i] = (GstVp9Segmentation) {
414 .luma_dc_quant_scale = luma_dc_quant_scale,
415 .luma_ac_quant_scale = luma_ac_quant_scale,
416 .chroma_dc_quant_scale = chroma_dc_quant_scale,
417 .chroma_ac_quant_scale = chroma_ac_quant_scale,
418
419 .reference_frame_enabled = sp->feature_enabled[i][GST_VP9_SEG_LVL_REF_FRAME],
420 .reference_frame = sp->feature_data[i][GST_VP9_SEG_LVL_REF_FRAME],
421 .reference_skip = sp->feature_enabled[i][GST_VP9_SEG_SEG_LVL_SKIP],
422 };
423 /* *INDENT-ON* */
424
425 memcpy (self->segmentation[i].filter_level, lvl_lookup,
426 sizeof (lvl_lookup));
427 }
428 }
429
430 static inline gboolean
_fill_slice(GstVp9Decoder * decoder,GstVp9Picture * picture)431 _fill_slice (GstVp9Decoder * decoder, GstVp9Picture * picture)
432 {
433 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
434 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
435 GstVaDecodePicture *va_pic;
436 const GstVp9Segmentation *seg;
437 VASliceParameterBufferVP9 slice_param;
438 guint i;
439
440 _update_segmentation (self, &picture->frame_hdr);
441
442 /* *INDENT-OFF* */
443 slice_param = (VASliceParameterBufferVP9) {
444 .slice_data_size = picture->size,
445 .slice_data_offset = 0,
446 .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
447 };
448 /* *INDENT-ON* */
449
450 for (i = 0; i < GST_VP9_MAX_SEGMENTS; i++) {
451 seg = &self->segmentation[i];
452
453 /* *INDENT-OFF* */
454 slice_param.seg_param[i] = (VASegmentParameterVP9) {
455 .segment_flags.fields = {
456 .segment_reference_enabled = seg->reference_frame_enabled,
457 .segment_reference = seg->reference_frame,
458 .segment_reference_skipped = seg->reference_skip,
459 },
460 .luma_dc_quant_scale = seg->luma_dc_quant_scale,
461 .luma_ac_quant_scale = seg->luma_ac_quant_scale,
462 .chroma_dc_quant_scale = seg->chroma_dc_quant_scale,
463 .chroma_ac_quant_scale = seg->chroma_ac_quant_scale,
464 };
465 /* *INDENT-ON* */
466
467 memcpy (slice_param.seg_param[i].filter_level, seg->filter_level,
468 sizeof (slice_param.seg_param[i].filter_level));
469 }
470
471 va_pic = gst_vp9_picture_get_user_data (picture);
472
473 return gst_va_decoder_add_slice_buffer (base->decoder, va_pic, &slice_param,
474 sizeof (slice_param), (gpointer) picture->data, picture->size);
475 }
476
477 static GstFlowReturn
gst_va_vp9_decode_picture(GstVp9Decoder * decoder,GstVp9Picture * picture,GstVp9Dpb * dpb)478 gst_va_vp9_decode_picture (GstVp9Decoder * decoder, GstVp9Picture * picture,
479 GstVp9Dpb * dpb)
480 {
481 if (_fill_param (decoder, picture, dpb) && _fill_slice (decoder, picture))
482 return GST_FLOW_OK;
483
484 return GST_FLOW_ERROR;
485 }
486
487 static GstFlowReturn
gst_va_vp9_dec_end_picture(GstVp9Decoder * decoder,GstVp9Picture * picture)488 gst_va_vp9_dec_end_picture (GstVp9Decoder * decoder, GstVp9Picture * picture)
489 {
490 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
491 GstVaDecodePicture *va_pic;
492
493 GST_LOG_OBJECT (base, "end picture %p", picture);
494
495 va_pic = gst_vp9_picture_get_user_data (picture);
496
497 if (!gst_va_decoder_decode (base->decoder, va_pic))
498 return GST_FLOW_ERROR;
499
500 return GST_FLOW_OK;
501 }
502
503 static GstFlowReturn
gst_va_vp9_dec_output_picture(GstVp9Decoder * decoder,GstVideoCodecFrame * frame,GstVp9Picture * picture)504 gst_va_vp9_dec_output_picture (GstVp9Decoder * decoder,
505 GstVideoCodecFrame * frame, GstVp9Picture * picture)
506 {
507 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
508 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
509
510 GST_LOG_OBJECT (self, "Outputting picture %p", picture);
511
512 if (base->copy_frames)
513 gst_va_base_dec_copy_output_buffer (base, frame);
514
515 gst_vp9_picture_unref (picture);
516
517 return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
518 }
519
520 static GstVp9Picture *
gst_va_vp9_dec_duplicate_picture(GstVp9Decoder * decoder,GstVideoCodecFrame * frame,GstVp9Picture * picture)521 gst_va_vp9_dec_duplicate_picture (GstVp9Decoder * decoder,
522 GstVideoCodecFrame * frame, GstVp9Picture * picture)
523 {
524 GstVaDecodePicture *va_pic, *va_dup;
525 GstVp9Picture *new_picture;
526
527 if (_check_resolution_change (GST_VA_VP9_DEC (decoder), picture) !=
528 GST_FLOW_OK) {
529 return NULL;
530 }
531
532 va_pic = gst_vp9_picture_get_user_data (picture);
533 va_dup = gst_va_decode_picture_dup (va_pic);
534
535 new_picture = gst_vp9_picture_new ();
536 new_picture->frame_hdr = picture->frame_hdr;
537
538 frame->output_buffer = gst_buffer_ref (va_dup->gstbuffer);
539
540 gst_vp9_picture_set_user_data (picture, va_dup,
541 (GDestroyNotify) gst_va_decode_picture_free);
542
543 return new_picture;
544 }
545
546 static gboolean
gst_va_vp9_dec_negotiate(GstVideoDecoder * decoder)547 gst_va_vp9_dec_negotiate (GstVideoDecoder * decoder)
548 {
549 GstCapsFeatures *capsfeatures = NULL;
550 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
551 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
552 GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
553 GstVp9Decoder *vp9dec = GST_VP9_DECODER (decoder);
554 gboolean need_open;
555
556 /* Ignore downstream renegotiation request. */
557 if (!base->need_negotiation)
558 return TRUE;
559
560 base->need_negotiation = FALSE;
561
562 need_open = TRUE;
563 /* VP9 profile entry should have the ability to handle dynamical
564 * resolution changes. If only the resolution changes, we should not
565 * re-create the config and context. */
566 if (gst_va_decoder_is_open (base->decoder)) {
567 VAProfile cur_profile;
568 guint cur_rtformat;
569 gint cur_width, cur_height;
570
571 if (!gst_va_decoder_get_config (base->decoder, &cur_profile,
572 &cur_rtformat, &cur_width, &cur_height))
573 return FALSE;
574
575 if (base->profile == cur_profile && base->rt_format == cur_rtformat) {
576 if (!gst_va_decoder_update_frame_size (base->decoder, base->width,
577 base->height))
578 return FALSE;
579
580 GST_INFO_OBJECT (self, "dynamical resolution changes from %dx%d to"
581 " %dx%d", cur_width, cur_height, base->width, base->height);
582
583 need_open = FALSE;
584 } else {
585 if (!gst_va_decoder_close (base->decoder))
586 return FALSE;
587 }
588 }
589
590 if (need_open) {
591 if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
592 return FALSE;
593
594 if (!gst_va_decoder_set_frame_size (base->decoder, base->width,
595 base->height))
596 return FALSE;
597 }
598
599 if (base->output_state)
600 gst_video_codec_state_unref (base->output_state);
601
602 gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
603 &capsfeatures);
604
605 base->output_state =
606 gst_video_decoder_set_output_state (decoder, format,
607 base->width, base->height, vp9dec->input_state);
608
609 base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
610 if (capsfeatures)
611 gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
612
613 GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
614 base->output_state->caps);
615
616 return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
617 }
618
619 static void
gst_va_vp9_dec_dispose(GObject * object)620 gst_va_vp9_dec_dispose (GObject * object)
621 {
622 gst_va_base_dec_close (GST_VIDEO_DECODER (object));
623 G_OBJECT_CLASS (parent_class)->dispose (object);
624 }
625
626 static void
gst_va_vp9_dec_class_init(gpointer g_class,gpointer class_data)627 gst_va_vp9_dec_class_init (gpointer g_class, gpointer class_data)
628 {
629 GstCaps *src_doc_caps, *sink_doc_caps;
630 GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
631 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
632 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
633 GstVp9DecoderClass *vp9_class = GST_VP9_DECODER_CLASS (g_class);
634 struct CData *cdata = class_data;
635 gchar *long_name;
636
637 if (cdata->description) {
638 long_name = g_strdup_printf ("VA-API VP9 Decoder in %s",
639 cdata->description);
640 } else {
641 long_name = g_strdup ("VA-API VP9 Decoder");
642 }
643
644 gst_element_class_set_metadata (element_class, long_name,
645 "Codec/Decoder/Video/Hardware", "VA-API based VP9 video decoder",
646 "Víctor Jáquez <vjaquez@igalia.com>");
647
648 sink_doc_caps = gst_caps_from_string (sink_caps_str);
649 src_doc_caps = gst_caps_from_string (src_caps_str);
650
651 parent_class = g_type_class_peek_parent (g_class);
652
653 gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), VP9,
654 cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
655 src_doc_caps, sink_doc_caps);
656
657 gobject_class->dispose = gst_va_vp9_dec_dispose;
658
659 decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_negotiate);
660
661 vp9_class->new_sequence = GST_DEBUG_FUNCPTR (gst_va_vp9_new_sequence);
662 vp9_class->new_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_new_picture);
663 vp9_class->decode_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_decode_picture);
664 vp9_class->end_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_end_picture);
665 vp9_class->output_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_output_picture);
666 vp9_class->duplicate_picture =
667 GST_DEBUG_FUNCPTR (gst_va_vp9_dec_duplicate_picture);
668
669 g_free (long_name);
670 g_free (cdata->description);
671 g_free (cdata->render_device_path);
672 gst_caps_unref (cdata->src_caps);
673 gst_caps_unref (cdata->sink_caps);
674 g_free (cdata);
675 }
676
677 static void
gst_va_vp9_dec_init(GTypeInstance * instance,gpointer g_class)678 gst_va_vp9_dec_init (GTypeInstance * instance, gpointer g_class)
679 {
680 gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
681 }
682
683 /* This element doesn't parse supreframes. Let's delegate it to the
684 * parser. */
685 static GstCaps *
_complete_sink_caps(GstCaps * sinkcaps)686 _complete_sink_caps (GstCaps * sinkcaps)
687 {
688 gst_caps_set_simple (sinkcaps, "alignment", G_TYPE_STRING, "frame", NULL);
689 return gst_caps_ref (sinkcaps);
690 }
691
692 static gpointer
_register_debug_category(gpointer data)693 _register_debug_category (gpointer data)
694 {
695 GST_DEBUG_CATEGORY_INIT (gst_va_vp9dec_debug, "vavp9dec", 0,
696 "VA VP9 decoder");
697
698 return NULL;
699 }
700
701 gboolean
gst_va_vp9_dec_register(GstPlugin * plugin,GstVaDevice * device,GstCaps * sink_caps,GstCaps * src_caps,guint rank)702 gst_va_vp9_dec_register (GstPlugin * plugin, GstVaDevice * device,
703 GstCaps * sink_caps, GstCaps * src_caps, guint rank)
704 {
705 static GOnce debug_once = G_ONCE_INIT;
706 GType type;
707 GTypeInfo type_info = {
708 .class_size = sizeof (GstVaVp9DecClass),
709 .class_init = gst_va_vp9_dec_class_init,
710 .instance_size = sizeof (GstVaVp9Dec),
711 .instance_init = gst_va_vp9_dec_init,
712 };
713 struct CData *cdata;
714 gboolean ret;
715 gchar *type_name, *feature_name;
716
717 g_return_val_if_fail (GST_IS_PLUGIN (plugin), FALSE);
718 g_return_val_if_fail (GST_IS_VA_DEVICE (device), FALSE);
719 g_return_val_if_fail (GST_IS_CAPS (sink_caps), FALSE);
720 g_return_val_if_fail (GST_IS_CAPS (src_caps), FALSE);
721
722 cdata = g_new (struct CData, 1);
723 cdata->description = NULL;
724 cdata->render_device_path = g_strdup (device->render_device_path);
725 cdata->sink_caps = _complete_sink_caps (sink_caps);
726 cdata->src_caps = gst_caps_ref (src_caps);
727
728 /* class data will be leaked if the element never gets instantiated */
729 GST_MINI_OBJECT_FLAG_SET (sink_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
730 GST_MINI_OBJECT_FLAG_SET (src_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
731
732 type_info.class_data = cdata;
733
734 type_name = g_strdup ("GstVaVp9Dec");
735 feature_name = g_strdup ("vavp9dec");
736
737 /* The first decoder to be registered should use a constant name,
738 * like vavp9dec, for any additional decoders, we create unique
739 * names, using inserting the render device name. */
740 if (g_type_from_name (type_name)) {
741 gchar *basename = g_path_get_basename (device->render_device_path);
742 g_free (type_name);
743 g_free (feature_name);
744 type_name = g_strdup_printf ("GstVa%sVp9Dec", basename);
745 feature_name = g_strdup_printf ("va%svp9dec", basename);
746 cdata->description = basename;
747
748 /* lower rank for non-first device */
749 if (rank > 0)
750 rank--;
751 }
752
753 g_once (&debug_once, _register_debug_category, NULL);
754
755 type = g_type_register_static (GST_TYPE_VP9_DECODER,
756 type_name, &type_info, 0);
757
758 ret = gst_element_register (plugin, feature_name, rank, type);
759
760 g_free (type_name);
761 g_free (feature_name);
762
763 return ret;
764 }
765