1 /* GStreamer
2 * Copyright (C) 2020 Intel Corporation
3 * Author: He Junyan <junyan.he@intel.com>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
14 *
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the0
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
19 */
20
21 /**
22 * SECTION:element-vaav1dec
23 * @title: vaav1dec
24 * @short_description: A VA-API based AV1 video decoder
25 *
26 * vaav1dec decodes AV1 bitstreams to VA surfaces using the
27 * installed and chosen [VA-API](https://01.org/linuxmedia/vaapi)
28 * driver.
29 *
30 * The decoding surfaces can be mapped onto main memory as video
31 * frames.
32 *
33 * ## Example launch line
34 * ```
35 * gst-launch-1.0 filesrc location=sample.av1 ! ivfparse ! av1parse ! vaav1dec ! autovideosink
36 * ```
37 *
38 * Since: 1.20
39 *
40 */
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <gst/codecs/gstav1decoder.h>
47 #include "gstvaav1dec.h"
48 #include "gstvabasedec.h"
49 #include "gstvaallocator.h"
50
51 GST_DEBUG_CATEGORY_STATIC (gst_va_av1dec_debug);
52 #ifndef GST_DISABLE_GST_DEBUG
53 #define GST_CAT_DEFAULT gst_va_av1dec_debug
54 #else
55 #define GST_CAT_DEFAULT NULL
56 #endif
57
58 #define GST_VA_AV1_DEC(obj) ((GstVaAV1Dec *) obj)
59 #define GST_VA_AV1_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaAV1DecClass))
60 #define GST_VA_AV1_DEC_CLASS(klass) ((GstVaAV1DecClass *) klass)
61
62 typedef struct _GstVaAV1Dec GstVaAV1Dec;
63 typedef struct _GstVaAV1DecClass GstVaAV1DecClass;
64
65 struct _GstVaAV1DecClass
66 {
67 GstVaBaseDecClass parent_class;
68 };
69
70 struct _GstVaAV1Dec
71 {
72 GstVaBaseDec parent;
73
74 GstFlowReturn last_ret;
75
76 GstAV1SequenceHeaderOBU seq;
77 gint max_width;
78 gint max_height;
79 };
80
81 static GstElementClass *parent_class = NULL;
82
83 /* *INDENT-OFF* */
84 static const gchar *src_caps_str =
85 GST_VIDEO_CAPS_MAKE_WITH_FEATURES (GST_CAPS_FEATURE_MEMORY_VA,
86 "{ NV12, P010_10LE }") " ;"
87 GST_VIDEO_CAPS_MAKE ("{ NV12, P010_10LE }");
88 /* *INDENT-ON* */
89
90 static const gchar *sink_caps_str = "video/x-av1";
91
92 static gboolean
gst_va_av1_dec_negotiate(GstVideoDecoder * decoder)93 gst_va_av1_dec_negotiate (GstVideoDecoder * decoder)
94 {
95 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
96 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
97 GstAV1Decoder *av1dec = GST_AV1_DECODER (decoder);
98 GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
99 GstCapsFeatures *capsfeatures = NULL;
100
101 /* Ignore downstream renegotiation request. */
102 if (!base->need_negotiation)
103 return TRUE;
104
105 base->need_negotiation = FALSE;
106
107 /* Do not re-create the context if only the frame size changes */
108 if (!gst_va_decoder_config_is_equal (base->decoder, base->profile,
109 base->rt_format, self->max_width, self->max_height)) {
110 if (gst_va_decoder_is_open (base->decoder)
111 && !gst_va_decoder_close (base->decoder))
112 return FALSE;
113
114 if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
115 return FALSE;
116
117 if (!gst_va_decoder_set_frame_size (base->decoder, self->max_width,
118 self->max_height))
119 return FALSE;
120 }
121
122 if (base->output_state)
123 gst_video_codec_state_unref (base->output_state);
124
125 gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
126 &capsfeatures);
127
128 base->output_state = gst_video_decoder_set_output_state (decoder, format,
129 base->width, base->height, av1dec->input_state);
130
131 base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
132 if (capsfeatures)
133 gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
134
135 GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
136 base->output_state->caps);
137
138 return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
139 }
140
141 static GstCaps *
_complete_sink_caps(GstCaps * sinkcaps)142 _complete_sink_caps (GstCaps * sinkcaps)
143 {
144 GstCaps *caps = gst_caps_copy (sinkcaps);
145 GValue val = G_VALUE_INIT;
146
147 g_value_init (&val, G_TYPE_STRING);
148 g_value_set_string (&val, "frame");
149 gst_caps_set_value (caps, "alignment", &val);
150 g_value_unset (&val);
151
152 return caps;
153 }
154
155 static VAProfile
_get_profile(GstVaAV1Dec * self,const GstAV1SequenceHeaderOBU * seq_hdr)156 _get_profile (GstVaAV1Dec * self, const GstAV1SequenceHeaderOBU * seq_hdr)
157 {
158 GstVaBaseDec *base = GST_VA_BASE_DEC (self);
159 VAProfile profile = VAProfileNone;
160
161 switch (seq_hdr->seq_profile) {
162 case GST_AV1_PROFILE_0:
163 profile = VAProfileAV1Profile0;
164 break;
165 case GST_AV1_PROFILE_1:
166 profile = VAProfileAV1Profile1;
167 break;
168 default:
169 GST_ERROR_OBJECT (self, "Unsupported av1 profile value %d",
170 seq_hdr->seq_profile);
171 return VAProfileNone;
172 }
173
174 if (!gst_va_decoder_has_profile (base->decoder, profile)) {
175 GST_ERROR_OBJECT (self, "Profile %s is not supported by HW",
176 gst_va_profile_name (profile));
177 return VAProfileNone;
178 }
179
180 return profile;
181 }
182
183 static guint
_get_rtformat(GstVaAV1Dec * self,VAProfile profile,const GstAV1SequenceHeaderOBU * seq_header)184 _get_rtformat (GstVaAV1Dec * self, VAProfile profile,
185 const GstAV1SequenceHeaderOBU * seq_header)
186 {
187 /* 6.4.1:
188 seq_profile Bit depth Monochrome support Chroma subsampling
189 0 8 or 10 Yes YUV 4:2:0
190 1 8 or 10 No YUV 4:4:4
191 2 8 or 10 Yes YUV 4:2:2
192 2 12 Yes YUV 4:2:0,YUV 4:2:2,YUV 4:4:4
193 */
194
195 /* TODO: consider Monochrome case. Just return 4:2:0 for Monochrome now. */
196 switch (profile) {
197 case VAProfileAV1Profile0:
198 if (seq_header->bit_depth == 8) {
199 return VA_RT_FORMAT_YUV420;
200 } else if (seq_header->bit_depth == 10) {
201 return VA_RT_FORMAT_YUV420_10;
202 }
203 break;
204 case VAProfileAV1Profile1:
205 if (seq_header->bit_depth == 8) {
206 return VA_RT_FORMAT_YUV444;
207 } else if (seq_header->bit_depth == 10) {
208 return VA_RT_FORMAT_YUV444_10;
209 }
210 break;
211 default:
212 break;
213 }
214
215 GST_ERROR_OBJECT (self, "Fail to find rtformat for profile:%s, bit_depth:%d",
216 gst_va_profile_name (profile), seq_header->bit_depth);
217 return 0;
218 }
219
220 static GstCaps *
gst_va_av1_dec_getcaps(GstVideoDecoder * decoder,GstCaps * filter)221 gst_va_av1_dec_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
222 {
223 GstCaps *sinkcaps, *caps = NULL, *tmp;
224 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
225
226 if (base->decoder)
227 caps = gst_va_decoder_get_sinkpad_caps (base->decoder);
228
229 if (caps) {
230 sinkcaps = _complete_sink_caps (caps);
231 gst_caps_unref (caps);
232 if (filter) {
233 tmp = gst_caps_intersect_full (filter, sinkcaps,
234 GST_CAPS_INTERSECT_FIRST);
235 gst_caps_unref (sinkcaps);
236 caps = tmp;
237 } else {
238 caps = sinkcaps;
239 }
240 GST_LOG_OBJECT (base, "Returning caps %" GST_PTR_FORMAT, caps);
241 } else if (!caps) {
242 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
243 }
244
245 return caps;
246 }
247
248 static GstFlowReturn
gst_va_av1_dec_new_sequence(GstAV1Decoder * decoder,const GstAV1SequenceHeaderOBU * seq_hdr)249 gst_va_av1_dec_new_sequence (GstAV1Decoder * decoder,
250 const GstAV1SequenceHeaderOBU * seq_hdr)
251 {
252 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
253 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
254 VAProfile profile;
255 guint rt_format;
256
257 GST_LOG_OBJECT (self, "new sequence");
258
259 profile = _get_profile (self, seq_hdr);
260 if (profile == VAProfileNone)
261 return GST_FLOW_NOT_NEGOTIATED;
262
263 rt_format = _get_rtformat (self, profile, seq_hdr);
264 if (!rt_format)
265 return GST_FLOW_NOT_NEGOTIATED;
266
267 self->seq = *seq_hdr;
268
269 if (!gst_va_decoder_config_is_equal (base->decoder, profile,
270 rt_format, seq_hdr->max_frame_width_minus_1 + 1,
271 seq_hdr->max_frame_height_minus_1 + 1)) {
272 base->profile = profile;
273 base->rt_format = rt_format;
274 self->max_width = seq_hdr->max_frame_width_minus_1 + 1;
275 self->max_height = seq_hdr->max_frame_height_minus_1 + 1;
276 base->need_negotiation = TRUE;
277
278 base->min_buffers = 7 + 4; /* dpb size + scratch surfaces */
279
280 /* May be changed by frame header */
281 base->width = self->max_width;
282 base->height = self->max_height;
283 base->need_valign = FALSE;
284 }
285
286 return GST_FLOW_OK;
287 }
288
289 static GstFlowReturn
gst_va_av1_dec_new_picture(GstAV1Decoder * decoder,GstVideoCodecFrame * frame,GstAV1Picture * picture)290 gst_va_av1_dec_new_picture (GstAV1Decoder * decoder,
291 GstVideoCodecFrame * frame, GstAV1Picture * picture)
292 {
293 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
294 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
295 GstVaDecodePicture *pic;
296 GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder);
297 GstAV1FrameHeaderOBU *frame_hdr = &picture->frame_hdr;
298
299 if (frame_hdr->upscaled_width != base->width
300 || frame_hdr->frame_height != base->height) {
301 base->width = frame_hdr->upscaled_width;
302 base->height = frame_hdr->frame_height;
303
304 if (base->width < self->max_width || base->height < self->max_height) {
305 base->need_valign = TRUE;
306 /* *INDENT-OFF* */
307 base->valign = (GstVideoAlignment){
308 .padding_bottom = self->max_height - base->height,
309 .padding_right = self->max_width - base->width,
310 };
311 /* *INDENT-ON* */
312 }
313
314 base->need_negotiation = TRUE;
315 }
316
317 if (base->need_negotiation) {
318 if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
319 GST_ERROR_OBJECT (self, "Failed to negotiate with downstream");
320 return GST_FLOW_NOT_NEGOTIATED;
321 }
322 }
323
324 self->last_ret = gst_video_decoder_allocate_output_frame (vdec, frame);
325 if (self->last_ret != GST_FLOW_OK) {
326 GST_WARNING_OBJECT (self,
327 "Failed to allocated output buffer, return %s",
328 gst_flow_get_name (self->last_ret));
329 return self->last_ret;
330 }
331
332 if (picture->apply_grain) {
333 if (!gst_va_buffer_create_aux_surface (frame->output_buffer)) {
334 GST_WARNING_OBJECT (self,
335 "Failed to allocated aux surface for buffer %p",
336 frame->output_buffer);
337 return GST_FLOW_ERROR;
338 }
339 }
340
341 pic = gst_va_decode_picture_new (base->decoder, frame->output_buffer);
342
343 gst_av1_picture_set_user_data (picture, pic,
344 (GDestroyNotify) gst_va_decode_picture_free);
345
346 if (picture->apply_grain) {
347 GST_LOG_OBJECT (self, "New va decode picture %p - %#x(aux: %#x)", pic,
348 gst_va_decode_picture_get_surface (pic),
349 gst_va_decode_picture_get_aux_surface (pic));
350 } else {
351 GST_LOG_OBJECT (self, "New va decode picture %p - %#x", pic,
352 gst_va_decode_picture_get_surface (pic));
353 }
354
355 return GST_FLOW_OK;
356 }
357
358 static GstAV1Picture *
gst_va_av1_dec_duplicate_picture(GstAV1Decoder * decoder,GstAV1Picture * picture)359 gst_va_av1_dec_duplicate_picture (GstAV1Decoder * decoder,
360 GstAV1Picture * picture)
361 {
362 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
363 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
364 GstVaDecodePicture *pic;
365 GstVaDecodePicture *new_pic;
366 GstAV1Picture *new_picture;
367
368 pic = gst_av1_picture_get_user_data (picture);
369 if (!pic) {
370 GST_ERROR_OBJECT (self, "Parent picture does not have a va picture");
371 return NULL;
372 }
373
374 new_picture = gst_av1_picture_new ();
375 g_assert (pic->gstbuffer);
376 new_pic = gst_va_decode_picture_new (base->decoder, pic->gstbuffer);
377
378 GST_LOG_OBJECT (self, "Duplicate output with buffer %" GST_PTR_FORMAT
379 " (surface %#x)", pic, gst_va_decode_picture_get_surface (pic));
380
381 gst_av1_picture_set_user_data (new_picture, new_pic,
382 (GDestroyNotify) gst_va_decode_picture_free);
383
384 return new_picture;
385 }
386
387 static void
_setup_segment_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header)388 _setup_segment_info (VADecPictureParameterBufferAV1 * pic_param,
389 GstAV1FrameHeaderOBU * frame_header)
390 {
391 guint i, j;
392 uint8_t feature_mask;
393
394 for (i = 0; i < GST_AV1_MAX_SEGMENTS; i++)
395 for (j = 0; j < GST_AV1_SEG_LVL_MAX; j++)
396 pic_param->seg_info.feature_data[i][j] =
397 frame_header->segmentation_params.feature_data[i][j];
398
399 for (i = 0; i < GST_AV1_MAX_SEGMENTS; i++) {
400 feature_mask = 0;
401 for (j = 0; j < GST_AV1_SEG_LVL_MAX; j++) {
402 if (frame_header->segmentation_params.feature_enabled[i][j])
403 feature_mask |= 1 << j;
404 }
405 pic_param->seg_info.feature_mask[i] = feature_mask;
406 }
407 }
408
409 static void
_setup_film_grain_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header)410 _setup_film_grain_info (VADecPictureParameterBufferAV1 * pic_param,
411 GstAV1FrameHeaderOBU * frame_header)
412 {
413 guint i;
414
415 if (!frame_header->film_grain_params.apply_grain)
416 return;
417
418 pic_param->film_grain_info.num_y_points =
419 frame_header->film_grain_params.num_y_points;
420 for (i = 0; i < frame_header->film_grain_params.num_y_points; i++) {
421 pic_param->film_grain_info.point_y_value[i] =
422 frame_header->film_grain_params.point_y_value[i];
423 pic_param->film_grain_info.point_y_scaling[i] =
424 frame_header->film_grain_params.point_y_scaling[i];
425 }
426
427 pic_param->film_grain_info.num_cb_points =
428 frame_header->film_grain_params.num_cb_points;
429 for (i = 0; i < frame_header->film_grain_params.num_cb_points; i++) {
430 pic_param->film_grain_info.point_cb_value[i] =
431 frame_header->film_grain_params.point_cb_value[i];
432 pic_param->film_grain_info.point_cb_scaling[i] =
433 frame_header->film_grain_params.point_cb_scaling[i];
434 }
435
436 pic_param->film_grain_info.num_cr_points =
437 frame_header->film_grain_params.num_cr_points;
438 for (i = 0; i < frame_header->film_grain_params.num_cr_points; i++) {
439 pic_param->film_grain_info.point_cr_value[i] =
440 frame_header->film_grain_params.point_cr_value[i];
441 pic_param->film_grain_info.point_cr_scaling[i] =
442 frame_header->film_grain_params.point_cr_scaling[i];
443 }
444
445
446 if (pic_param->film_grain_info.num_y_points) {
447 for (i = 0; i < 24; i++) {
448 pic_param->film_grain_info.ar_coeffs_y[i] =
449 frame_header->film_grain_params.ar_coeffs_y_plus_128[i] - 128;
450 }
451 }
452 if (frame_header->film_grain_params.chroma_scaling_from_luma
453 || pic_param->film_grain_info.num_cb_points) {
454 for (i = 0; i < GST_AV1_MAX_NUM_POS_LUMA; i++) {
455 pic_param->film_grain_info.ar_coeffs_cb[i] =
456 frame_header->film_grain_params.ar_coeffs_cb_plus_128[i] - 128;
457 }
458 }
459 if (frame_header->film_grain_params.chroma_scaling_from_luma
460 || pic_param->film_grain_info.num_cr_points) {
461 for (i = 0; i < GST_AV1_MAX_NUM_POS_LUMA; i++) {
462 pic_param->film_grain_info.ar_coeffs_cr[i] =
463 frame_header->film_grain_params.ar_coeffs_cr_plus_128[i] - 128;
464 }
465 }
466 }
467
468 static void
_setup_loop_filter_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header)469 _setup_loop_filter_info (VADecPictureParameterBufferAV1 * pic_param,
470 GstAV1FrameHeaderOBU * frame_header)
471 {
472 guint i;
473
474 pic_param->filter_level[0] =
475 frame_header->loop_filter_params.loop_filter_level[0];
476 pic_param->filter_level[1] =
477 frame_header->loop_filter_params.loop_filter_level[1];
478 pic_param->filter_level_u =
479 frame_header->loop_filter_params.loop_filter_level[2];
480 pic_param->filter_level_v =
481 frame_header->loop_filter_params.loop_filter_level[3];
482
483 for (i = 0; i < GST_AV1_TOTAL_REFS_PER_FRAME; i++)
484 pic_param->ref_deltas[i] =
485 frame_header->loop_filter_params.loop_filter_ref_deltas[i];
486 for (i = 0; i < 2; i++)
487 pic_param->mode_deltas[i] =
488 frame_header->loop_filter_params.loop_filter_mode_deltas[i];
489 }
490
491 static void
_setup_quantization_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header)492 _setup_quantization_info (VADecPictureParameterBufferAV1 * pic_param,
493 GstAV1FrameHeaderOBU * frame_header)
494 {
495 pic_param->qmatrix_fields.bits.using_qmatrix =
496 frame_header->quantization_params.using_qmatrix;
497 if (frame_header->quantization_params.using_qmatrix) {
498 pic_param->qmatrix_fields.bits.qm_y =
499 frame_header->quantization_params.qm_y;
500 pic_param->qmatrix_fields.bits.qm_u =
501 frame_header->quantization_params.qm_u;
502 pic_param->qmatrix_fields.bits.qm_v =
503 frame_header->quantization_params.qm_v;
504 } else {
505 pic_param->qmatrix_fields.bits.qm_y = 0;
506 pic_param->qmatrix_fields.bits.qm_u = 0;
507 pic_param->qmatrix_fields.bits.qm_v = 0;
508 }
509 }
510
511 static void
_setup_cdef_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header,guint8 num_planes)512 _setup_cdef_info (VADecPictureParameterBufferAV1 * pic_param,
513 GstAV1FrameHeaderOBU * frame_header, guint8 num_planes)
514 {
515 guint8 sec_strength;
516 guint i;
517
518 pic_param->cdef_damping_minus_3 = frame_header->cdef_params.cdef_damping - 3;
519 pic_param->cdef_bits = frame_header->cdef_params.cdef_bits;
520 for (i = 0; i < GST_AV1_CDEF_MAX; i++) {
521 sec_strength = frame_header->cdef_params.cdef_y_sec_strength[i];
522 g_assert (sec_strength <= 4);
523 /* may need to minus 1 in order to merge with primary value. */
524 if (sec_strength == 4)
525 sec_strength--;
526
527 pic_param->cdef_y_strengths[i] =
528 ((frame_header->cdef_params.cdef_y_pri_strength[i] & 0xf) << 2) |
529 (sec_strength & 0x03);
530 }
531 if (num_planes > 1) {
532 for (i = 0; i < GST_AV1_CDEF_MAX; i++) {
533 sec_strength = frame_header->cdef_params.cdef_uv_sec_strength[i];
534 g_assert (sec_strength <= 4);
535 /* may need to minus 1 in order to merge with primary value. */
536 if (sec_strength == 4)
537 sec_strength--;
538
539 pic_param->cdef_uv_strengths[i] =
540 ((frame_header->cdef_params.cdef_uv_pri_strength[i] & 0xf) << 2) |
541 (sec_strength & 0x03);
542 }
543 } else {
544 for (i = 0; i < GST_AV1_CDEF_MAX; i++) {
545 pic_param->cdef_uv_strengths[i] = 0;
546 }
547 }
548 }
549
550 static void
_setup_global_motion_info(VADecPictureParameterBufferAV1 * pic_param,GstAV1FrameHeaderOBU * frame_header)551 _setup_global_motion_info (VADecPictureParameterBufferAV1 * pic_param,
552 GstAV1FrameHeaderOBU * frame_header)
553 {
554 guint i, j;
555
556 for (i = 0; i < 7; i++) {
557 /* assuming VAAV1TransformationType and GstAV1WarpModelType are
558 * equivalent */
559 pic_param->wm[i].wmtype = (VAAV1TransformationType)
560 frame_header->global_motion_params.gm_type[GST_AV1_REF_LAST_FRAME + i];
561
562 for (j = 0; j < 6; j++)
563 pic_param->wm[i].wmmat[j] =
564 frame_header->global_motion_params.gm_params
565 [GST_AV1_REF_LAST_FRAME + i][j];
566
567 pic_param->wm[i].wmmat[6] = 0;
568 pic_param->wm[i].wmmat[7] = 0;
569
570 pic_param->wm[i].invalid =
571 frame_header->global_motion_params.invalid[GST_AV1_REF_LAST_FRAME + i];
572 }
573 }
574
575 static GstFlowReturn
gst_va_av1_dec_start_picture(GstAV1Decoder * decoder,GstAV1Picture * picture,GstAV1Dpb * dpb)576 gst_va_av1_dec_start_picture (GstAV1Decoder * decoder, GstAV1Picture * picture,
577 GstAV1Dpb * dpb)
578 {
579 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
580 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
581 GstAV1FrameHeaderOBU *frame_header = &picture->frame_hdr;
582 GstAV1SequenceHeaderOBU *seq_header = &self->seq;
583 VADecPictureParameterBufferAV1 pic_param = { };
584 GstVaDecodePicture *va_pic;
585 guint i;
586
587 va_pic = gst_av1_picture_get_user_data (picture);
588 g_assert (va_pic);
589
590 /* *INDENT-OFF* */
591 pic_param = (VADecPictureParameterBufferAV1){
592 .profile = seq_header->seq_profile,
593 .order_hint_bits_minus_1 = seq_header->order_hint_bits_minus_1,
594 .matrix_coefficients = seq_header->color_config.matrix_coefficients,
595 .seq_info_fields.fields = {
596 .still_picture = seq_header->still_picture,
597 .use_128x128_superblock = seq_header->use_128x128_superblock,
598 .enable_filter_intra = seq_header->enable_filter_intra,
599 .enable_intra_edge_filter = seq_header->enable_intra_edge_filter,
600 .enable_interintra_compound = seq_header->enable_interintra_compound,
601 .enable_masked_compound = seq_header->enable_masked_compound,
602 .enable_dual_filter = seq_header->enable_dual_filter,
603 .enable_order_hint = seq_header->enable_order_hint,
604 .enable_jnt_comp = seq_header->enable_jnt_comp,
605 .enable_cdef = seq_header->enable_cdef,
606 .mono_chrome = seq_header->color_config.mono_chrome,
607 .color_range = seq_header->color_config.color_range,
608 .subsampling_x = seq_header->color_config.subsampling_x,
609 .subsampling_y = seq_header->color_config.subsampling_y,
610 .film_grain_params_present = seq_header->film_grain_params_present,
611 },
612 .anchor_frames_num = 0,
613 .anchor_frames_list = NULL,
614 .frame_width_minus1 = frame_header->upscaled_width - 1,
615 .frame_height_minus1 = frame_header->frame_height - 1,
616 .output_frame_width_in_tiles_minus_1 = 0,
617 .output_frame_height_in_tiles_minus_1 = 0,
618 .order_hint = frame_header->order_hint,
619 /* Segmentation */
620 .seg_info.segment_info_fields.bits = {
621 .enabled = frame_header->segmentation_params.segmentation_enabled,
622 .update_map = frame_header->segmentation_params.segmentation_update_map,
623 .temporal_update =
624 frame_header->segmentation_params.segmentation_temporal_update,
625 .update_data =
626 frame_header->segmentation_params.segmentation_update_data,
627 },
628 /* FilmGrain */
629 .film_grain_info = {
630 .film_grain_info_fields.bits = {
631 .apply_grain = frame_header->film_grain_params.apply_grain,
632 .chroma_scaling_from_luma =
633 frame_header->film_grain_params.chroma_scaling_from_luma,
634 .grain_scaling_minus_8 =
635 frame_header->film_grain_params.grain_scaling_minus_8,
636 .ar_coeff_lag = frame_header->film_grain_params.ar_coeff_lag,
637 .ar_coeff_shift_minus_6 =
638 frame_header->film_grain_params.ar_coeff_shift_minus_6,
639 .grain_scale_shift = frame_header->film_grain_params.grain_scale_shift,
640 .overlap_flag = frame_header->film_grain_params.overlap_flag,
641 .clip_to_restricted_range =
642 frame_header->film_grain_params.clip_to_restricted_range,
643 },
644 .grain_seed = frame_header->film_grain_params.grain_seed,
645 .cb_mult = frame_header->film_grain_params.cb_mult,
646 .cb_luma_mult = frame_header->film_grain_params.cb_luma_mult,
647 .cb_offset = frame_header->film_grain_params.cb_offset,
648 .cr_mult = frame_header->film_grain_params.cr_mult,
649 .cr_luma_mult = frame_header->film_grain_params.cr_luma_mult,
650 .cr_offset = frame_header->film_grain_params.cr_offset,
651 },
652 .tile_cols = frame_header->tile_info.tile_cols,
653 .tile_rows = frame_header->tile_info.tile_rows,
654 .context_update_tile_id = frame_header->tile_info.context_update_tile_id,
655 .pic_info_fields.bits = {
656 .frame_type = frame_header->frame_type,
657 .show_frame = frame_header->show_frame,
658 .showable_frame = frame_header->showable_frame,
659 .error_resilient_mode = frame_header->error_resilient_mode,
660 .disable_cdf_update = frame_header->disable_cdf_update,
661 .allow_screen_content_tools = frame_header->allow_screen_content_tools,
662 .force_integer_mv = frame_header->force_integer_mv,
663 .allow_intrabc = frame_header->allow_intrabc,
664 .use_superres = frame_header->use_superres,
665 .allow_high_precision_mv = frame_header->allow_high_precision_mv,
666 .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
667 .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
668 .disable_frame_end_update_cdf =
669 frame_header->disable_frame_end_update_cdf,
670 .uniform_tile_spacing_flag =
671 frame_header->tile_info.uniform_tile_spacing_flag,
672 .allow_warped_motion = frame_header->allow_warped_motion,
673 },
674 .superres_scale_denominator = frame_header->superres_denom,
675 .interp_filter = frame_header->interpolation_filter,
676 /* loop filter */
677 .loop_filter_info_fields.bits = {
678 .sharpness_level =
679 frame_header->loop_filter_params.loop_filter_sharpness,
680 .mode_ref_delta_enabled =
681 frame_header->loop_filter_params.loop_filter_delta_enabled,
682 .mode_ref_delta_update =
683 frame_header->loop_filter_params.loop_filter_delta_update,
684 },
685 .mode_control_fields.bits = {
686 .delta_lf_present_flag =
687 frame_header->loop_filter_params.delta_lf_present,
688 .log2_delta_lf_res = frame_header->loop_filter_params.delta_lf_res,
689 .delta_lf_multi = frame_header->loop_filter_params.delta_lf_multi,
690 .delta_q_present_flag =
691 frame_header->quantization_params.delta_q_present,
692 .log2_delta_q_res = frame_header->quantization_params.delta_q_res,
693 .tx_mode = frame_header->tx_mode,
694 .reference_select = frame_header->reference_select,
695 .reduced_tx_set_used = frame_header->reduced_tx_set,
696 .skip_mode_present = frame_header->skip_mode_present,
697 },
698 /* quantization */
699 .base_qindex = frame_header->quantization_params.base_q_idx,
700 .y_dc_delta_q = frame_header->quantization_params.delta_q_y_dc,
701 .u_dc_delta_q = frame_header->quantization_params.delta_q_u_dc,
702 .u_ac_delta_q = frame_header->quantization_params.delta_q_u_ac,
703 .v_dc_delta_q = frame_header->quantization_params.delta_q_v_dc,
704 .v_ac_delta_q = frame_header->quantization_params.delta_q_v_ac,
705 /* loop restoration */
706 .loop_restoration_fields.bits = {
707 .yframe_restoration_type =
708 frame_header->loop_restoration_params.frame_restoration_type[0],
709 .cbframe_restoration_type =
710 frame_header->loop_restoration_params.frame_restoration_type[1],
711 .crframe_restoration_type =
712 frame_header->loop_restoration_params.frame_restoration_type[2],
713 .lr_unit_shift = frame_header->loop_restoration_params.lr_unit_shift,
714 .lr_uv_shift = frame_header->loop_restoration_params.lr_uv_shift,
715 },
716 };
717 /* *INDENT-ON* */
718
719 if (seq_header->bit_depth == 8) {
720 pic_param.bit_depth_idx = 0;
721 } else if (seq_header->bit_depth == 10) {
722 pic_param.bit_depth_idx = 1;
723 } else if (seq_header->bit_depth == 12) {
724 pic_param.bit_depth_idx = 2;
725 } else {
726 g_assert_not_reached ();
727 }
728
729 if (frame_header->film_grain_params.apply_grain) {
730 pic_param.current_frame = gst_va_decode_picture_get_aux_surface (va_pic);
731 pic_param.current_display_picture =
732 gst_va_decode_picture_get_surface (va_pic);
733 } else {
734 pic_param.current_frame = gst_va_decode_picture_get_surface (va_pic);
735 pic_param.current_display_picture = VA_INVALID_SURFACE;
736 }
737
738 for (i = 0; i < GST_AV1_NUM_REF_FRAMES; i++) {
739 if (dpb->pic_list[i]) {
740 if (dpb->pic_list[i]->apply_grain) {
741 pic_param.ref_frame_map[i] = gst_va_decode_picture_get_aux_surface
742 (gst_av1_picture_get_user_data (dpb->pic_list[i]));
743 } else {
744 pic_param.ref_frame_map[i] = gst_va_decode_picture_get_surface
745 (gst_av1_picture_get_user_data (dpb->pic_list[i]));
746 }
747 } else {
748 pic_param.ref_frame_map[i] = VA_INVALID_SURFACE;
749 }
750 }
751 for (i = 0; i < GST_AV1_REFS_PER_FRAME; i++) {
752 pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
753 }
754 pic_param.primary_ref_frame = frame_header->primary_ref_frame;
755
756 _setup_segment_info (&pic_param, frame_header);
757 _setup_film_grain_info (&pic_param, frame_header);
758
759 for (i = 0; i < 63; i++) {
760 pic_param.width_in_sbs_minus_1[i] =
761 frame_header->tile_info.width_in_sbs_minus_1[i];
762 pic_param.height_in_sbs_minus_1[i] =
763 frame_header->tile_info.height_in_sbs_minus_1[i];
764 }
765
766 _setup_loop_filter_info (&pic_param, frame_header);
767 _setup_quantization_info (&pic_param, frame_header);
768 _setup_cdef_info (&pic_param, frame_header, seq_header->num_planes);
769 _setup_global_motion_info (&pic_param, frame_header);
770
771 if (!gst_va_decoder_add_param_buffer (base->decoder, va_pic,
772 VAPictureParameterBufferType, &pic_param, sizeof (pic_param)))
773 return GST_FLOW_ERROR;
774
775 return GST_FLOW_OK;
776 }
777
778 static GstFlowReturn
gst_va_av1_dec_decode_tile(GstAV1Decoder * decoder,GstAV1Picture * picture,GstAV1Tile * tile)779 gst_va_av1_dec_decode_tile (GstAV1Decoder * decoder, GstAV1Picture * picture,
780 GstAV1Tile * tile)
781 {
782 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
783 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
784 GstAV1TileGroupOBU *tile_group = &tile->tile_group;
785 GstVaDecodePicture *va_pic;
786 guint i;
787 VASliceParameterBufferAV1 slice_param[GST_AV1_MAX_TILE_COUNT];
788
789 GST_TRACE_OBJECT (self, "-");
790
791 for (i = 0; i < tile_group->tg_end - tile_group->tg_start + 1; i++) {
792 slice_param[i] = (VASliceParameterBufferAV1) {
793 };
794 slice_param[i].slice_data_size =
795 tile_group->entry[tile_group->tg_start + i].tile_size;
796 slice_param[i].slice_data_offset =
797 tile_group->entry[tile_group->tg_start + i].tile_offset;
798 slice_param[i].tile_row =
799 tile_group->entry[tile_group->tg_start + i].tile_row;
800 slice_param[i].tile_column =
801 tile_group->entry[tile_group->tg_start + i].tile_col;
802 slice_param[i].slice_data_flag = 0;
803 }
804
805 va_pic = gst_av1_picture_get_user_data (picture);
806
807 if (!gst_va_decoder_add_slice_buffer_with_n_params (base->decoder, va_pic,
808 slice_param, sizeof (VASliceParameterBufferAV1), i, tile->obu.data,
809 tile->obu.obu_size)) {
810 return GST_FLOW_ERROR;
811 }
812
813 return GST_FLOW_OK;
814 }
815
816 static GstFlowReturn
gst_va_av1_dec_end_picture(GstAV1Decoder * decoder,GstAV1Picture * picture)817 gst_va_av1_dec_end_picture (GstAV1Decoder * decoder, GstAV1Picture * picture)
818 {
819 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
820 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
821 GstVaDecodePicture *va_pic;
822
823 GST_LOG_OBJECT (self, "end picture %p, (system_frame_number %d)",
824 picture, picture->system_frame_number);
825
826 va_pic = gst_av1_picture_get_user_data (picture);
827
828 if (!gst_va_decoder_decode_with_aux_surface (base->decoder, va_pic,
829 picture->apply_grain)) {
830 return GST_FLOW_ERROR;
831 }
832
833 return GST_FLOW_OK;
834 }
835
836 static GstFlowReturn
gst_va_av1_dec_output_picture(GstAV1Decoder * decoder,GstVideoCodecFrame * frame,GstAV1Picture * picture)837 gst_va_av1_dec_output_picture (GstAV1Decoder * decoder,
838 GstVideoCodecFrame * frame, GstAV1Picture * picture)
839 {
840 GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder);
841 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
842
843 g_assert (picture->frame_hdr.show_frame ||
844 picture->frame_hdr.show_existing_frame);
845
846 GST_LOG_OBJECT (self,
847 "Outputting picture %p (system_frame_number %d)",
848 picture, picture->system_frame_number);
849
850 if (self->last_ret != GST_FLOW_OK) {
851 gst_av1_picture_unref (picture);
852 gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
853 return self->last_ret;
854 }
855
856 if (picture->frame_hdr.show_existing_frame) {
857 GstVaDecodePicture *pic;
858
859 g_assert (!frame->output_buffer);
860 pic = gst_av1_picture_get_user_data (picture);
861 frame->output_buffer = gst_buffer_ref (pic->gstbuffer);
862 }
863
864 if (base->copy_frames)
865 gst_va_base_dec_copy_output_buffer (base, frame);
866
867 gst_av1_picture_unref (picture);
868
869 return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
870 }
871
872 static void
gst_va_av1_dec_init(GTypeInstance * instance,gpointer g_class)873 gst_va_av1_dec_init (GTypeInstance * instance, gpointer g_class)
874 {
875 gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
876 }
877
878 static void
gst_va_av1_dec_dispose(GObject * object)879 gst_va_av1_dec_dispose (GObject * object)
880 {
881 gst_va_base_dec_close (GST_VIDEO_DECODER (object));
882 G_OBJECT_CLASS (parent_class)->dispose (object);
883 }
884
885 static void
gst_va_av1_dec_class_init(gpointer g_class,gpointer class_data)886 gst_va_av1_dec_class_init (gpointer g_class, gpointer class_data)
887 {
888 GstCaps *src_doc_caps, *sink_doc_caps;
889 GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
890 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
891 GstAV1DecoderClass *av1decoder_class = GST_AV1_DECODER_CLASS (g_class);
892 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
893 struct CData *cdata = class_data;
894 gchar *long_name;
895
896 if (cdata->description) {
897 long_name = g_strdup_printf ("VA-API AV1 Decoder in %s",
898 cdata->description);
899 } else {
900 long_name = g_strdup ("VA-API AV1 Decoder");
901 }
902
903 gst_element_class_set_metadata (element_class, long_name,
904 "Codec/Decoder/Video/Hardware",
905 "VA-API based AV1 video decoder", "He Junyan <junyan.he@intel.com>");
906
907 sink_doc_caps = gst_caps_from_string (sink_caps_str);
908 src_doc_caps = gst_caps_from_string (src_caps_str);
909
910 parent_class = g_type_class_peek_parent (g_class);
911
912 gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), AV1,
913 cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
914 src_doc_caps, sink_doc_caps);
915
916 gobject_class->dispose = gst_va_av1_dec_dispose;
917
918 decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_av1_dec_getcaps);
919 decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_av1_dec_negotiate);
920
921 av1decoder_class->new_sequence =
922 GST_DEBUG_FUNCPTR (gst_va_av1_dec_new_sequence);
923 av1decoder_class->new_picture =
924 GST_DEBUG_FUNCPTR (gst_va_av1_dec_new_picture);
925 av1decoder_class->duplicate_picture =
926 GST_DEBUG_FUNCPTR (gst_va_av1_dec_duplicate_picture);
927 av1decoder_class->start_picture =
928 GST_DEBUG_FUNCPTR (gst_va_av1_dec_start_picture);
929 av1decoder_class->decode_tile =
930 GST_DEBUG_FUNCPTR (gst_va_av1_dec_decode_tile);
931 av1decoder_class->end_picture =
932 GST_DEBUG_FUNCPTR (gst_va_av1_dec_end_picture);
933 av1decoder_class->output_picture =
934 GST_DEBUG_FUNCPTR (gst_va_av1_dec_output_picture);
935
936 g_free (long_name);
937 g_free (cdata->description);
938 g_free (cdata->render_device_path);
939 gst_caps_unref (cdata->src_caps);
940 gst_caps_unref (cdata->sink_caps);
941 g_free (cdata);
942 }
943
944 static gpointer
_register_debug_category(gpointer data)945 _register_debug_category (gpointer data)
946 {
947 GST_DEBUG_CATEGORY_INIT (gst_va_av1dec_debug, "vaav1dec", 0,
948 "VA AV1 decoder");
949
950 return NULL;
951 }
952
953 gboolean
gst_va_av1_dec_register(GstPlugin * plugin,GstVaDevice * device,GstCaps * sink_caps,GstCaps * src_caps,guint rank)954 gst_va_av1_dec_register (GstPlugin * plugin, GstVaDevice * device,
955 GstCaps * sink_caps, GstCaps * src_caps, guint rank)
956 {
957 static GOnce debug_once = G_ONCE_INIT;
958 GType type;
959 GTypeInfo type_info = {
960 .class_size = sizeof (GstVaAV1DecClass),
961 .class_init = gst_va_av1_dec_class_init,
962 .instance_size = sizeof (GstVaAV1Dec),
963 .instance_init = gst_va_av1_dec_init,
964 };
965 struct CData *cdata;
966 gboolean ret;
967 gchar *type_name, *feature_name;
968
969 g_return_val_if_fail (GST_IS_PLUGIN (plugin), FALSE);
970 g_return_val_if_fail (GST_IS_VA_DEVICE (device), FALSE);
971 g_return_val_if_fail (GST_IS_CAPS (sink_caps), FALSE);
972 g_return_val_if_fail (GST_IS_CAPS (src_caps), FALSE);
973
974 cdata = g_new (struct CData, 1);
975 cdata->description = NULL;
976 cdata->render_device_path = g_strdup (device->render_device_path);
977 cdata->sink_caps = _complete_sink_caps (sink_caps);
978 cdata->src_caps = gst_caps_ref (src_caps);
979
980 /* class data will be leaked if the element never gets instantiated */
981 GST_MINI_OBJECT_FLAG_SET (cdata->sink_caps,
982 GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
983 GST_MINI_OBJECT_FLAG_SET (src_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
984
985 type_info.class_data = cdata;
986
987 type_name = g_strdup ("GstVaAV1Dec");
988 feature_name = g_strdup ("vaav1dec");
989
990 /* The first decoder to be registered should use a constant name,
991 * like vaav1dec, for any additional decoders, we create unique
992 * names, using inserting the render device name. */
993 if (g_type_from_name (type_name)) {
994 gchar *basename = g_path_get_basename (device->render_device_path);
995 g_free (type_name);
996 g_free (feature_name);
997 type_name = g_strdup_printf ("GstVa%sAV1Dec", basename);
998 feature_name = g_strdup_printf ("va%sav1dec", basename);
999 cdata->description = basename;
1000
1001 /* lower rank for non-first device */
1002 if (rank > 0)
1003 rank--;
1004 }
1005
1006 g_once (&debug_once, _register_debug_category, NULL);
1007
1008 type = g_type_register_static (GST_TYPE_AV1_DECODER,
1009 type_name, &type_info, 0);
1010
1011 ret = gst_element_register (plugin, feature_name, rank, type);
1012
1013 g_free (type_name);
1014 g_free (feature_name);
1015
1016 return ret;
1017 }
1018