1 /* GStreamer
2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25 /**
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
29 * @see_also:
30 *
31 * This base class is for video decoders turning encoded data into raw video
32 * frames.
33 *
34 * The GstVideoDecoder base class and derived subclasses should cooperate as
35 * follows:
36 *
37 * ## Configuration
38 *
39 * * Initially, GstVideoDecoder calls @start when the decoder element
40 * is activated, which allows the subclass to perform any global setup.
41 *
42 * * GstVideoDecoder calls @set_format to inform the subclass of caps
43 * describing input video data that it is about to receive, including
44 * possibly configuration data.
45 * While unlikely, it might be called more than once, if changing input
46 * parameters require reconfiguration.
47 *
48 * * Incoming data buffers are processed as needed, described in Data
49 * Processing below.
50 *
51 * * GstVideoDecoder calls @stop at end of all processing.
52 *
53 * ## Data processing
54 *
55 * * The base class gathers input data, and optionally allows subclass
56 * to parse this into subsequently manageable chunks, typically
57 * corresponding to and referred to as 'frames'.
58 *
59 * * Each input frame is provided in turn to the subclass' @handle_frame
60 * callback.
61 * The ownership of the frame is given to the @handle_frame callback.
62 *
63 * * If codec processing results in decoded data, the subclass should call
64 * @gst_video_decoder_finish_frame to have decoded data pushed.
65 * downstream. Otherwise, the subclass must call
66 * @gst_video_decoder_drop_frame, to allow the base class to do timestamp
67 * and offset tracking, and possibly to requeue the frame for a later
68 * attempt in the case of reverse playback.
69 *
70 * ## Shutdown phase
71 *
72 * * The GstVideoDecoder class calls @stop to inform the subclass that data
73 * parsing will be stopped.
74 *
75 * ## Additional Notes
76 *
77 * * Seeking/Flushing
78 *
79 * * When the pipeline is seeked or otherwise flushed, the subclass is
80 * informed via a call to its @reset callback, with the hard parameter
81 * set to true. This indicates the subclass should drop any internal data
82 * queues and timestamps and prepare for a fresh set of buffers to arrive
83 * for parsing and decoding.
84 *
85 * * End Of Stream
86 *
87 * * At end-of-stream, the subclass @parse function may be called some final
88 * times with the at_eos parameter set to true, indicating that the element
89 * should not expect any more data to be arriving, and it should parse and
90 * remaining frames and call gst_video_decoder_have_frame() if possible.
91 *
92 * The subclass is responsible for providing pad template caps for
93 * source and sink pads. The pads need to be named "sink" and "src". It also
94 * needs to provide information about the ouptput caps, when they are known.
95 * This may be when the base class calls the subclass' @set_format function,
96 * though it might be during decoding, before calling
97 * @gst_video_decoder_finish_frame. This is done via
98 * @gst_video_decoder_set_output_state
99 *
100 * The subclass is also responsible for providing (presentation) timestamps
101 * (likely based on corresponding input ones). If that is not applicable
102 * or possible, the base class provides limited framerate based interpolation.
103 *
104 * Similarly, the base class provides some limited (legacy) seeking support
105 * if specifically requested by the subclass, as full-fledged support
106 * should rather be left to upstream demuxer, parser or alike. This simple
107 * approach caters for seeking and duration reporting using estimated input
108 * bitrates. To enable it, a subclass should call
109 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
110 * byte-streams.
111 *
112 * The base class provides some support for reverse playback, in particular
113 * in case incoming data is not packetized or upstream does not provide
114 * fragments on keyframe boundaries. However, the subclass should then be
115 * prepared for the parsing and frame processing stage to occur separately
116 * (in normal forward processing, the latter immediately follows the former),
117 * The subclass also needs to ensure the parsing stage properly marks
118 * keyframes, unless it knows the upstream elements will do so properly for
119 * incoming data.
120 *
121 * The bare minimum that a functional subclass needs to implement is:
122 *
123 * * Provide pad templates
124 * * Inform the base class of output caps via
125 * @gst_video_decoder_set_output_state
126 *
127 * * Parse input data, if it is not considered packetized from upstream
128 * Data will be provided to @parse which should invoke
129 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
130 * separate the data belonging to each video frame.
131 *
132 * * Accept data in @handle_frame and provide decoded results to
133 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
134 */
135
136 #ifdef HAVE_CONFIG_H
137 #include "config.h"
138 #endif
139
140 /* TODO
141 *
142 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
143 * features, like applying QoS on input (as opposed to after the frame is
144 * decoded).
145 * * Add a flag/boolean for decoders that require keyframes, so the base
146 * class can automatically discard non-keyframes before one has arrived
147 * * Detect reordered frame/timestamps and fix the pts/dts
148 * * Support for GstIndex (or shall we not care ?)
149 * * Calculate actual latency based on input/output timestamp/frame_number
150 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
151 * * Emit latency message when it changes
152 *
153 */
154
155 /* Implementation notes:
156 * The Video Decoder base class operates in 2 primary processing modes, depending
157 * on whether forward or reverse playback is requested.
158 *
159 * Forward playback:
160 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
161 * handle_frame() -> push downstream
162 *
163 * Reverse playback is more complicated, since it involves gathering incoming
164 * data regions as we loop backwards through the upstream data. The processing
165 * concept (using incoming buffers as containing one frame each to simplify
166 * things) is:
167 *
168 * Upstream data we want to play:
169 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
170 * Keyframe flag: K K
171 * Groupings: AAAAAAA BBBBBBB CCCCCCC
172 *
173 * Input:
174 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
175 * Keyframe flag: K K
176 * Discont flag: D D D
177 *
178 * - Each Discont marks a discont in the decoding order.
179 * - The keyframes mark where we can start decoding.
180 *
181 * Initially, we prepend incoming buffers to the gather queue. Whenever the
182 * discont flag is set on an incoming buffer, the gather queue is flushed out
183 * before the new buffer is collected.
184 *
185 * The above data will be accumulated in the gather queue like this:
186 *
187 * gather queue: 9 8 7
188 * D
189 *
190 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
191 * this:
192 *
193 * while (gather)
194 * take head of queue and prepend to parse queue (this reverses the
195 * sequence, so parse queue is 7 -> 8 -> 9)
196 *
197 * Next, we process the parse queue, which now contains all un-parsed packets
198 * (including any leftover ones from the previous decode section)
199 *
200 * for each buffer now in the parse queue:
201 * Call the subclass parse function, prepending each resulting frame to
202 * the parse_gather queue. Buffers which precede the first one that
203 * produces a parsed frame are retained in the parse queue for
204 * re-processing on the next cycle of parsing.
205 *
206 * The parse_gather queue now contains frame objects ready for decoding,
207 * in reverse order.
208 * parse_gather: 9 -> 8 -> 7
209 *
210 * while (parse_gather)
211 * Take the head of the queue and prepend it to the decode queue
212 * If the frame was a keyframe, process the decode queue
213 * decode is now 7-8-9
214 *
215 * Processing the decode queue results in frames with attached output buffers
216 * stored in the 'output_queue' ready for outputting in reverse order.
217 *
218 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
219 * gather queue. We get the following situation:
220 *
221 * gather queue: 4
222 * decode queue: 7 8 9
223 *
224 * After we received 5 (Keyframe) and 6:
225 *
226 * gather queue: 6 5 4
227 * decode queue: 7 8 9
228 *
229 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
230 *
231 * Copy head of the gather queue (6) to decode queue:
232 *
233 * gather queue: 5 4
234 * decode queue: 6 7 8 9
235 *
236 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
237 * can start decoding.
238 *
239 * gather queue: 4
240 * decode queue: 5 6 7 8 9
241 *
242 * Decode frames in decode queue, store raw decoded data in output queue, we
243 * can take the head of the decode queue and prepend the decoded result in the
244 * output queue:
245 *
246 * gather queue: 4
247 * decode queue:
248 * output queue: 9 8 7 6 5
249 *
250 * Now output all the frames in the output queue, picking a frame from the
251 * head of the queue.
252 *
253 * Copy head of the gather queue (4) to decode queue, we flushed the gather
254 * queue and can now store input buffer in the gather queue:
255 *
256 * gather queue: 1
257 * decode queue: 4
258 *
259 * When we receive EOS, the queue looks like:
260 *
261 * gather queue: 3 2 1
262 * decode queue: 4
263 *
264 * Fill decode queue, first keyframe we copy is 2:
265 *
266 * gather queue: 1
267 * decode queue: 2 3 4
268 *
269 * Decoded output:
270 *
271 * gather queue: 1
272 * decode queue:
273 * output queue: 4 3 2
274 *
275 * Leftover buffer 1 cannot be decoded and must be discarded.
276 */
277
278 #include "gstvideodecoder.h"
279 #include "gstvideoutils.h"
280 #include "gstvideoutilsprivate.h"
281
282 #include <gst/video/video.h>
283 #include <gst/video/video-event.h>
284 #include <gst/video/gstvideopool.h>
285 #include <gst/video/gstvideometa.h>
286 #include <string.h>
287
288 GST_DEBUG_CATEGORY (videodecoder_debug);
289 #define GST_CAT_DEFAULT videodecoder_debug
290
291 struct _GstVideoDecoderPrivate
292 {
293 /* FIXME introduce a context ? */
294
295 GstBufferPool *pool;
296 GstAllocator *allocator;
297 GstAllocationParams params;
298
299 /* parse tracking */
300 /* input data */
301 GstAdapter *input_adapter;
302 /* assembles current frame */
303 GstAdapter *output_adapter;
304
305 /* Whether we attempt to convert newsegment from bytes to
306 * time using a bitrate estimation */
307 gboolean do_estimate_rate;
308
309 /* Whether input is considered packetized or not */
310 gboolean packetized;
311
312 /* Error handling */
313 gint max_errors;
314 gint error_count;
315 gboolean had_output_data;
316 gboolean had_input_data;
317
318 gboolean needs_format;
319 /* input_segment are output_segment identical */
320 gboolean in_out_segment_sync;
321
322 /* ... being tracked here;
323 * only available during parsing */
324 GstVideoCodecFrame *current_frame;
325 /* events that should apply to the current frame */
326 GList *current_frame_events;
327 /* events that should be pushed before the next frame */
328 GList *pending_events;
329
330 /* relative offset of input data */
331 guint64 input_offset;
332 /* relative offset of frame */
333 guint64 frame_offset;
334 /* tracking ts and offsets */
335 GList *timestamps;
336
337 /* last outgoing ts */
338 GstClockTime last_timestamp_out;
339 /* incoming pts - dts */
340 GstClockTime pts_delta;
341 gboolean reordered_output;
342
343 /* reverse playback */
344 /* collect input */
345 GList *gather;
346 /* to-be-parsed */
347 GList *parse;
348 /* collected parsed frames */
349 GList *parse_gather;
350 /* frames to be handled == decoded */
351 GList *decode;
352 /* collected output - of buffer objects, not frames */
353 GList *output_queued;
354
355
356 /* base_picture_number is the picture number of the reference picture */
357 guint64 base_picture_number;
358 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
359 GstClockTime base_timestamp;
360
361 /* FIXME : reorder_depth is never set */
362 int reorder_depth;
363 int distance_from_sync;
364
365 guint32 system_frame_number;
366 guint32 decode_frame_number;
367
368 GList *frames; /* Protected with OBJECT_LOCK */
369 GstVideoCodecState *input_state;
370 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
371 gboolean output_state_changed;
372
373 /* QoS properties */
374 gdouble proportion; /* OBJECT_LOCK */
375 GstClockTime earliest_time; /* OBJECT_LOCK */
376 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
377 gboolean discont;
378 /* qos messages: frames dropped/processed */
379 guint dropped;
380 guint processed;
381
382 /* Outgoing byte size ? */
383 gint64 bytes_out;
384 gint64 time;
385
386 gint64 min_latency;
387 gint64 max_latency;
388
389 /* upstream stream tags (global tags are passed through as-is) */
390 GstTagList *upstream_tags;
391
392 /* subclass tags */
393 GstTagList *tags;
394 GstTagMergeMode tags_merge_mode;
395
396 gboolean tags_changed;
397
398 /* flags */
399 gboolean use_default_pad_acceptcaps;
400
401 #ifndef GST_DISABLE_DEBUG
402 /* Diagnostic time for reporting the time
403 * from flush to first output */
404 GstClockTime last_reset_time;
405 #endif
406 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
407 gboolean has_recv_first_key_frame;
408 gboolean has_push_first_frame;
409 #endif
410 };
411
412 static GstElementClass *parent_class = NULL;
413 static gint private_offset = 0;
414
415 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
416 static void gst_video_decoder_init (GstVideoDecoder * dec,
417 GstVideoDecoderClass * klass);
418
419 static void gst_video_decoder_finalize (GObject * object);
420
421 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
422 GstCaps * caps);
423 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
424 GstEvent * event);
425 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
426 GstEvent * event);
427 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
428 GstBuffer * buf);
429 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
430 GstQuery * query);
431 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
432 element, GstStateChange transition);
433 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
434 GstQuery * query);
435 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
436 gboolean flush_hard);
437
438 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
439 GstVideoCodecFrame * frame);
440
441 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
442 GList * events);
443 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
444 decoder, GstVideoCodecFrame * frame);
445 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
446 decoder);
447 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
448 decoder, GstBuffer * buf);
449 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
450 gboolean at_eos);
451
452 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
453
454 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
455 GstEvent * event);
456 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
457 GstEvent * event);
458 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
459 decoder, GstQuery * query);
460 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
461 decoder, GstQuery * query);
462 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
463 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
464 gboolean at_eos, gboolean new_buffer);
465 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
466 decoder);
467 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
468 GstQuery * query);
469 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
470 GstQuery * query);
471
472 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
473 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
474
475 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
476 * method to get to the padtemplates */
477 GType
gst_video_decoder_get_type(void)478 gst_video_decoder_get_type (void)
479 {
480 static volatile gsize type = 0;
481
482 if (g_once_init_enter (&type)) {
483 GType _type;
484 static const GTypeInfo info = {
485 sizeof (GstVideoDecoderClass),
486 NULL,
487 NULL,
488 (GClassInitFunc) gst_video_decoder_class_init,
489 NULL,
490 NULL,
491 sizeof (GstVideoDecoder),
492 0,
493 (GInstanceInitFunc) gst_video_decoder_init,
494 };
495
496 _type = g_type_register_static (GST_TYPE_ELEMENT,
497 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
498
499 private_offset =
500 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
501
502 g_once_init_leave (&type, _type);
503 }
504 return type;
505 }
506
507 static inline GstVideoDecoderPrivate *
gst_video_decoder_get_instance_private(GstVideoDecoder * self)508 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
509 {
510 return (G_STRUCT_MEMBER_P (self, private_offset));
511 }
512
513 static void
gst_video_decoder_class_init(GstVideoDecoderClass * klass)514 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
515 {
516 GObjectClass *gobject_class;
517 GstElementClass *gstelement_class;
518
519 gobject_class = G_OBJECT_CLASS (klass);
520 gstelement_class = GST_ELEMENT_CLASS (klass);
521
522 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
523 "Base Video Decoder");
524
525 parent_class = g_type_class_peek_parent (klass);
526
527 if (private_offset != 0)
528 g_type_class_adjust_private_offset (klass, &private_offset);
529
530 gobject_class->finalize = gst_video_decoder_finalize;
531
532 gstelement_class->change_state =
533 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
534
535 klass->sink_event = gst_video_decoder_sink_event_default;
536 klass->src_event = gst_video_decoder_src_event_default;
537 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
538 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
539 klass->negotiate = gst_video_decoder_negotiate_default;
540 klass->sink_query = gst_video_decoder_sink_query_default;
541 klass->src_query = gst_video_decoder_src_query_default;
542 klass->transform_meta = gst_video_decoder_transform_meta_default;
543 }
544
545 static void
gst_video_decoder_init(GstVideoDecoder * decoder,GstVideoDecoderClass * klass)546 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
547 {
548 GstPadTemplate *pad_template;
549 GstPad *pad;
550
551 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
552
553 decoder->priv = gst_video_decoder_get_instance_private (decoder);
554
555 pad_template =
556 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
557 g_return_if_fail (pad_template != NULL);
558
559 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
560
561 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
562 gst_pad_set_event_function (pad,
563 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
564 gst_pad_set_query_function (pad,
565 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
566 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
567
568 pad_template =
569 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
570 g_return_if_fail (pad_template != NULL);
571
572 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
573
574 gst_pad_set_event_function (pad,
575 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
576 gst_pad_set_query_function (pad,
577 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
578 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
579
580 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
581 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
582
583 g_rec_mutex_init (&decoder->stream_lock);
584
585 decoder->priv->input_adapter = gst_adapter_new ();
586 decoder->priv->output_adapter = gst_adapter_new ();
587 decoder->priv->packetized = TRUE;
588 decoder->priv->needs_format = FALSE;
589
590 decoder->priv->min_latency = 0;
591 decoder->priv->max_latency = 0;
592
593 gst_video_decoder_reset (decoder, TRUE, TRUE);
594 }
595
596 static GstVideoCodecState *
_new_input_state(GstCaps * caps)597 _new_input_state (GstCaps * caps)
598 {
599 GstVideoCodecState *state;
600 GstStructure *structure;
601 const GValue *codec_data;
602
603 state = g_slice_new0 (GstVideoCodecState);
604 state->ref_count = 1;
605 gst_video_info_init (&state->info);
606 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
607 goto parse_fail;
608 state->caps = gst_caps_ref (caps);
609
610 structure = gst_caps_get_structure (caps, 0);
611
612 codec_data = gst_structure_get_value (structure, "codec_data");
613 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
614 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
615
616 return state;
617
618 parse_fail:
619 {
620 g_slice_free (GstVideoCodecState, state);
621 return NULL;
622 }
623 }
624
625 static GstVideoCodecState *
_new_output_state(GstVideoFormat fmt,GstVideoInterlaceMode mode,guint width,guint height,GstVideoCodecState * reference)626 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width,
627 guint height, GstVideoCodecState * reference)
628 {
629 GstVideoCodecState *state;
630
631 state = g_slice_new0 (GstVideoCodecState);
632 state->ref_count = 1;
633 gst_video_info_init (&state->info);
634 if (!gst_video_info_set_interlaced_format (&state->info, fmt, mode, width,
635 height)) {
636 g_slice_free (GstVideoCodecState, state);
637 return NULL;
638 }
639
640 if (reference) {
641 GstVideoInfo *tgt, *ref;
642
643 tgt = &state->info;
644 ref = &reference->info;
645
646 /* Copy over extra fields from reference state */
647 tgt->interlace_mode = ref->interlace_mode;
648 tgt->flags = ref->flags;
649 /* only copy values that are not unknown so that we don't override the
650 * defaults. subclasses should really fill these in when they know. */
651 if (ref->chroma_site)
652 tgt->chroma_site = ref->chroma_site;
653 if (ref->colorimetry.range)
654 tgt->colorimetry.range = ref->colorimetry.range;
655 if (ref->colorimetry.matrix)
656 tgt->colorimetry.matrix = ref->colorimetry.matrix;
657 if (ref->colorimetry.transfer)
658 tgt->colorimetry.transfer = ref->colorimetry.transfer;
659 if (ref->colorimetry.primaries)
660 tgt->colorimetry.primaries = ref->colorimetry.primaries;
661 GST_DEBUG ("reference par %d/%d fps %d/%d",
662 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
663 tgt->par_n = ref->par_n;
664 tgt->par_d = ref->par_d;
665 tgt->fps_n = ref->fps_n;
666 tgt->fps_d = ref->fps_d;
667 tgt->views = ref->views;
668
669 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
670
671 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
672 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
673 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
674 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
675 } else {
676 /* Default to MONO, overridden as needed by sub-classes */
677 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
678 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
679 }
680 }
681
682 GST_DEBUG ("reference par %d/%d fps %d/%d",
683 state->info.par_n, state->info.par_d,
684 state->info.fps_n, state->info.fps_d);
685
686 return state;
687 }
688
689 static gboolean
gst_video_decoder_setcaps(GstVideoDecoder * decoder,GstCaps * caps)690 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
691 {
692 GstVideoDecoderClass *decoder_class;
693 GstVideoCodecState *state;
694 gboolean ret = TRUE;
695
696 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
697
698 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
699
700 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
701
702 if (decoder->priv->input_state) {
703 GST_DEBUG_OBJECT (decoder,
704 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
705 decoder->priv->input_state->caps, caps);
706 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
707 goto caps_not_changed;
708 }
709
710 state = _new_input_state (caps);
711
712 if (G_UNLIKELY (state == NULL))
713 goto parse_fail;
714
715 if (decoder_class->set_format)
716 ret = decoder_class->set_format (decoder, state);
717
718 if (!ret)
719 goto refused_format;
720
721 if (decoder->priv->input_state)
722 gst_video_codec_state_unref (decoder->priv->input_state);
723 decoder->priv->input_state = state;
724
725 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
726
727 return ret;
728
729 caps_not_changed:
730 {
731 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
732 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
733 return TRUE;
734 }
735
736 /* ERRORS */
737 parse_fail:
738 {
739 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
740 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
741 return FALSE;
742 }
743
744 refused_format:
745 {
746 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
747 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
748 gst_video_codec_state_unref (state);
749 return FALSE;
750 }
751 }
752
753 static void
gst_video_decoder_finalize(GObject * object)754 gst_video_decoder_finalize (GObject * object)
755 {
756 GstVideoDecoder *decoder;
757
758 decoder = GST_VIDEO_DECODER (object);
759
760 GST_DEBUG_OBJECT (object, "finalize");
761
762 g_rec_mutex_clear (&decoder->stream_lock);
763
764 if (decoder->priv->input_adapter) {
765 g_object_unref (decoder->priv->input_adapter);
766 decoder->priv->input_adapter = NULL;
767 }
768 if (decoder->priv->output_adapter) {
769 g_object_unref (decoder->priv->output_adapter);
770 decoder->priv->output_adapter = NULL;
771 }
772
773 if (decoder->priv->input_state)
774 gst_video_codec_state_unref (decoder->priv->input_state);
775 if (decoder->priv->output_state)
776 gst_video_codec_state_unref (decoder->priv->output_state);
777
778 if (decoder->priv->pool) {
779 gst_object_unref (decoder->priv->pool);
780 decoder->priv->pool = NULL;
781 }
782
783 if (decoder->priv->allocator) {
784 gst_object_unref (decoder->priv->allocator);
785 decoder->priv->allocator = NULL;
786 }
787
788 G_OBJECT_CLASS (parent_class)->finalize (object);
789 }
790
791 /* hard == FLUSH, otherwise discont */
792 static GstFlowReturn
gst_video_decoder_flush(GstVideoDecoder * dec,gboolean hard)793 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
794 {
795 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
796 GstFlowReturn ret = GST_FLOW_OK;
797
798 GST_LOG_OBJECT (dec, "flush hard %d", hard);
799
800 /* Inform subclass */
801 if (klass->reset) {
802 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
803 klass->reset (dec, hard);
804 }
805
806 if (klass->flush)
807 klass->flush (dec);
808
809 /* and get (re)set for the sequel */
810 gst_video_decoder_reset (dec, FALSE, hard);
811
812 return ret;
813 }
814
815 static GstEvent *
gst_video_decoder_create_merged_tags_event(GstVideoDecoder * dec)816 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
817 {
818 GstTagList *merged_tags;
819
820 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
821 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
822 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
823
824 merged_tags =
825 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
826 dec->priv->tags_merge_mode);
827
828 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
829
830 if (merged_tags == NULL)
831 return NULL;
832
833 if (gst_tag_list_is_empty (merged_tags)) {
834 gst_tag_list_unref (merged_tags);
835 return NULL;
836 }
837
838 return gst_event_new_tag (merged_tags);
839 }
840
841 static gboolean
gst_video_decoder_push_event(GstVideoDecoder * decoder,GstEvent * event)842 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
843 {
844 switch (GST_EVENT_TYPE (event)) {
845 case GST_EVENT_SEGMENT:
846 {
847 GstSegment segment;
848
849 gst_event_copy_segment (event, &segment);
850
851 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
852
853 if (segment.format != GST_FORMAT_TIME) {
854 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
855 break;
856 }
857
858 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
859 decoder->output_segment = segment;
860 decoder->priv->in_out_segment_sync =
861 gst_segment_is_equal (&decoder->input_segment, &segment);
862 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
863 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
864 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
865 break;
866 }
867 default:
868 break;
869 }
870
871 GST_DEBUG_OBJECT (decoder, "pushing event %s",
872 gst_event_type_get_name (GST_EVENT_TYPE (event)));
873
874 return gst_pad_push_event (decoder->srcpad, event);
875 }
876
877 static GstFlowReturn
gst_video_decoder_parse_available(GstVideoDecoder * dec,gboolean at_eos,gboolean new_buffer)878 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
879 gboolean new_buffer)
880 {
881 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
882 GstVideoDecoderPrivate *priv = dec->priv;
883 GstFlowReturn ret = GST_FLOW_OK;
884 gsize was_available, available;
885 guint inactive = 0;
886
887 available = gst_adapter_available (priv->input_adapter);
888
889 while (available || new_buffer) {
890 new_buffer = FALSE;
891 /* current frame may have been parsed and handled,
892 * so we need to set up a new one when asking subclass to parse */
893 if (priv->current_frame == NULL)
894 priv->current_frame = gst_video_decoder_new_frame (dec);
895
896 was_available = available;
897 ret = decoder_class->parse (dec, priv->current_frame,
898 priv->input_adapter, at_eos);
899 if (ret != GST_FLOW_OK)
900 break;
901
902 /* if the subclass returned success (GST_FLOW_OK), it is expected
903 * to have collected and submitted a frame, i.e. it should have
904 * called gst_video_decoder_have_frame(), or at least consumed a
905 * few bytes through gst_video_decoder_add_to_frame().
906 *
907 * Otherwise, this is an implementation bug, and we error out
908 * after 2 failed attempts */
909 available = gst_adapter_available (priv->input_adapter);
910 if (!priv->current_frame || available != was_available)
911 inactive = 0;
912 else if (++inactive == 2)
913 goto error_inactive;
914 }
915
916 return ret;
917
918 /* ERRORS */
919 error_inactive:
920 {
921 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
922 return GST_FLOW_ERROR;
923 }
924 }
925
926 /* This function has to be called with the stream lock taken. */
927 static GstFlowReturn
gst_video_decoder_drain_out(GstVideoDecoder * dec,gboolean at_eos)928 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
929 {
930 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
931 GstVideoDecoderPrivate *priv = dec->priv;
932 GstFlowReturn ret = GST_FLOW_OK;
933
934 if (dec->input_segment.rate > 0.0) {
935 /* Forward mode, if unpacketized, give the child class
936 * a final chance to flush out packets */
937 if (!priv->packetized) {
938 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
939 }
940
941 if (at_eos) {
942 if (decoder_class->finish)
943 ret = decoder_class->finish (dec);
944 } else {
945 if (decoder_class->drain) {
946 ret = decoder_class->drain (dec);
947 } else {
948 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
949 }
950 }
951 } else {
952 /* Reverse playback mode */
953 ret = gst_video_decoder_flush_parse (dec, TRUE);
954 }
955
956 return ret;
957 }
958
959 static GList *
_flush_events(GstPad * pad,GList * events)960 _flush_events (GstPad * pad, GList * events)
961 {
962 GList *tmp;
963
964 for (tmp = events; tmp; tmp = tmp->next) {
965 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
966 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
967 GST_EVENT_IS_STICKY (tmp->data)) {
968 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
969 }
970 gst_event_unref (tmp->data);
971 }
972 g_list_free (events);
973
974 return NULL;
975 }
976
977 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
978 static gboolean
gst_video_decoder_negotiate_default_caps(GstVideoDecoder * decoder)979 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
980 {
981 GstCaps *caps, *templcaps;
982 GstVideoCodecState *state;
983 GstVideoInfo info;
984 gint i;
985 gint caps_size;
986 GstStructure *structure;
987
988 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
989 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
990 if (caps)
991 gst_caps_unref (templcaps);
992 else
993 caps = templcaps;
994 templcaps = NULL;
995
996 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
997 goto caps_error;
998
999 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1000
1001 /* before fixating, try to use whatever upstream provided */
1002 caps = gst_caps_make_writable (caps);
1003 caps_size = gst_caps_get_size (caps);
1004 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1005 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1006 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1007 gint width, height;
1008
1009 if (gst_structure_get_int (structure, "width", &width)) {
1010 for (i = 0; i < caps_size; i++) {
1011 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1012 G_TYPE_INT, width, NULL);
1013 }
1014 }
1015
1016 if (gst_structure_get_int (structure, "height", &height)) {
1017 for (i = 0; i < caps_size; i++) {
1018 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1019 G_TYPE_INT, height, NULL);
1020 }
1021 }
1022 }
1023
1024 for (i = 0; i < caps_size; i++) {
1025 structure = gst_caps_get_structure (caps, i);
1026 /* Random I420 1280x720 for fixation */
1027 if (gst_structure_has_field (structure, "format"))
1028 gst_structure_fixate_field_string (structure, "format", "I420");
1029 else
1030 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1031
1032 if (gst_structure_has_field (structure, "width"))
1033 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1034 else
1035 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1036
1037 if (gst_structure_has_field (structure, "height"))
1038 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1039 else
1040 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1041 }
1042 caps = gst_caps_fixate (caps);
1043
1044 if (!caps || !gst_video_info_from_caps (&info, caps))
1045 goto caps_error;
1046
1047 GST_INFO_OBJECT (decoder,
1048 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1049 state =
1050 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1051 info.width, info.height, decoder->priv->input_state);
1052 gst_video_codec_state_unref (state);
1053 gst_caps_unref (caps);
1054
1055 return TRUE;
1056
1057 caps_error:
1058 {
1059 if (caps)
1060 gst_caps_unref (caps);
1061 return FALSE;
1062 }
1063 }
1064
1065 static gboolean
gst_video_decoder_sink_event_default(GstVideoDecoder * decoder,GstEvent * event)1066 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1067 GstEvent * event)
1068 {
1069 GstVideoDecoderPrivate *priv;
1070 gboolean ret = FALSE;
1071 gboolean forward_immediate = FALSE;
1072
1073 priv = decoder->priv;
1074
1075 switch (GST_EVENT_TYPE (event)) {
1076 case GST_EVENT_STREAM_START:
1077 {
1078 GstFlowReturn flow_ret = GST_FLOW_OK;
1079
1080 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1081 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1082 ret = (flow_ret == GST_FLOW_OK);
1083
1084 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1085 /* Flush upstream tags after a STREAM_START */
1086 if (priv->upstream_tags) {
1087 gst_tag_list_unref (priv->upstream_tags);
1088 priv->upstream_tags = NULL;
1089 priv->tags_changed = TRUE;
1090 }
1091 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1092
1093 /* Forward STREAM_START immediately. Everything is drained after
1094 * the STREAM_START event and we can forward this event immediately
1095 * now without having buffers out of order.
1096 */
1097 forward_immediate = TRUE;
1098 break;
1099 }
1100 case GST_EVENT_CAPS:
1101 {
1102 GstCaps *caps;
1103
1104 gst_event_parse_caps (event, &caps);
1105 ret = gst_video_decoder_setcaps (decoder, caps);
1106 gst_event_unref (event);
1107 event = NULL;
1108 break;
1109 }
1110 case GST_EVENT_SEGMENT_DONE:
1111 {
1112 GstFlowReturn flow_ret = GST_FLOW_OK;
1113
1114 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1115 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1116 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1117 ret = (flow_ret == GST_FLOW_OK);
1118
1119 /* Forward SEGMENT_DONE immediately. This is required
1120 * because no buffer or serialized event might come
1121 * after SEGMENT_DONE and nothing could trigger another
1122 * _finish_frame() call.
1123 *
1124 * The subclass can override this behaviour by overriding
1125 * the ::sink_event() vfunc and not chaining up to the
1126 * parent class' ::sink_event() until a later time.
1127 */
1128 forward_immediate = TRUE;
1129 break;
1130 }
1131 case GST_EVENT_EOS:
1132 {
1133 GstFlowReturn flow_ret = GST_FLOW_OK;
1134
1135 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1136 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1137 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1138 ret = (flow_ret == GST_FLOW_OK);
1139
1140 /* Error out even if EOS was ok when we had input, but no output */
1141 if (ret && priv->had_input_data && !priv->had_output_data) {
1142 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1143 ("No valid frames decoded before end of stream"),
1144 ("no valid frames found"));
1145 }
1146
1147 /* Forward EOS immediately. This is required because no
1148 * buffer or serialized event will come after EOS and
1149 * nothing could trigger another _finish_frame() call.
1150 *
1151 * The subclass can override this behaviour by overriding
1152 * the ::sink_event() vfunc and not chaining up to the
1153 * parent class' ::sink_event() until a later time.
1154 */
1155 forward_immediate = TRUE;
1156 break;
1157 }
1158 case GST_EVENT_GAP:
1159 {
1160 GstFlowReturn flow_ret = GST_FLOW_OK;
1161 gboolean needs_reconfigure = FALSE;
1162 GList *events;
1163 GList *frame_events;
1164
1165 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1166 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1167 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1168 ret = (flow_ret == GST_FLOW_OK);
1169
1170 /* Ensure we have caps before forwarding the event */
1171 if (!decoder->priv->output_state) {
1172 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1173 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1174 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1175 ("Decoder output not negotiated before GAP event."));
1176 forward_immediate = TRUE;
1177 break;
1178 }
1179 needs_reconfigure = TRUE;
1180 }
1181
1182 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1183 || needs_reconfigure;
1184 if (decoder->priv->output_state_changed || needs_reconfigure) {
1185 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1186 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1187 gst_pad_mark_reconfigure (decoder->srcpad);
1188 }
1189 }
1190
1191 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1192 " before the gap");
1193 events = decoder->priv->pending_events;
1194 frame_events = decoder->priv->current_frame_events;
1195 decoder->priv->pending_events = NULL;
1196 decoder->priv->current_frame_events = NULL;
1197
1198 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1199
1200 gst_video_decoder_push_event_list (decoder, events);
1201 gst_video_decoder_push_event_list (decoder, frame_events);
1202
1203 /* Forward GAP immediately. Everything is drained after
1204 * the GAP event and we can forward this event immediately
1205 * now without having buffers out of order.
1206 */
1207 forward_immediate = TRUE;
1208 break;
1209 }
1210 case GST_EVENT_CUSTOM_DOWNSTREAM:
1211 {
1212 gboolean in_still;
1213 GstFlowReturn flow_ret = GST_FLOW_OK;
1214
1215 if (gst_video_event_parse_still_frame (event, &in_still)) {
1216 if (in_still) {
1217 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1218 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1219 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1220 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1221 ret = (flow_ret == GST_FLOW_OK);
1222 }
1223 /* Forward STILL_FRAME immediately. Everything is drained after
1224 * the STILL_FRAME event and we can forward this event immediately
1225 * now without having buffers out of order.
1226 */
1227 forward_immediate = TRUE;
1228 }
1229 break;
1230 }
1231 case GST_EVENT_SEGMENT:
1232 {
1233 GstSegment segment;
1234
1235 gst_event_copy_segment (event, &segment);
1236
1237 if (segment.format == GST_FORMAT_TIME) {
1238 GST_DEBUG_OBJECT (decoder,
1239 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1240 } else {
1241 gint64 start;
1242
1243 GST_DEBUG_OBJECT (decoder,
1244 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1245
1246 /* handle newsegment as a result from our legacy simple seeking */
1247 /* note that initial 0 should convert to 0 in any case */
1248 if (priv->do_estimate_rate &&
1249 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1250 segment.start, GST_FORMAT_TIME, &start)) {
1251 /* best attempt convert */
1252 /* as these are only estimates, stop is kept open-ended to avoid
1253 * premature cutting */
1254 GST_DEBUG_OBJECT (decoder,
1255 "converted to TIME start %" GST_TIME_FORMAT,
1256 GST_TIME_ARGS (start));
1257 segment.start = start;
1258 segment.stop = GST_CLOCK_TIME_NONE;
1259 segment.time = start;
1260 /* replace event */
1261 gst_event_unref (event);
1262 event = gst_event_new_segment (&segment);
1263 } else {
1264 goto newseg_wrong_format;
1265 }
1266 }
1267
1268 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1269
1270 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1271 priv->base_picture_number = 0;
1272
1273 decoder->input_segment = segment;
1274 decoder->priv->in_out_segment_sync = FALSE;
1275
1276 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1277 break;
1278 }
1279 case GST_EVENT_FLUSH_STOP:
1280 {
1281 GList *l;
1282
1283 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1284 for (l = priv->frames; l; l = l->next) {
1285 GstVideoCodecFrame *frame = l->data;
1286
1287 frame->events = _flush_events (decoder->srcpad, frame->events);
1288 }
1289 priv->current_frame_events = _flush_events (decoder->srcpad,
1290 decoder->priv->current_frame_events);
1291
1292 /* well, this is kind of worse than a DISCONT */
1293 gst_video_decoder_flush (decoder, TRUE);
1294 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1295 /* Forward FLUSH_STOP immediately. This is required because it is
1296 * expected to be forwarded immediately and no buffers are queued
1297 * anyway.
1298 */
1299 forward_immediate = TRUE;
1300 break;
1301 }
1302 case GST_EVENT_TAG:
1303 {
1304 GstTagList *tags;
1305
1306 gst_event_parse_tag (event, &tags);
1307
1308 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1309 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1310 if (priv->upstream_tags != tags) {
1311 if (priv->upstream_tags)
1312 gst_tag_list_unref (priv->upstream_tags);
1313 priv->upstream_tags = gst_tag_list_ref (tags);
1314 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1315 }
1316 gst_event_unref (event);
1317 event = gst_video_decoder_create_merged_tags_event (decoder);
1318 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1319 if (!event)
1320 ret = TRUE;
1321 }
1322 break;
1323 }
1324 default:
1325 break;
1326 }
1327
1328 /* Forward non-serialized events immediately, and all other
1329 * events which can be forwarded immediately without potentially
1330 * causing the event to go out of order with other events and
1331 * buffers as decided above.
1332 */
1333 if (event) {
1334 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1335 ret = gst_video_decoder_push_event (decoder, event);
1336 } else {
1337 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1338 decoder->priv->current_frame_events =
1339 g_list_prepend (decoder->priv->current_frame_events, event);
1340 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1341 ret = TRUE;
1342 }
1343 }
1344
1345 return ret;
1346
1347 newseg_wrong_format:
1348 {
1349 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1350 gst_event_unref (event);
1351 /* SWALLOW EVENT */
1352 return TRUE;
1353 }
1354 }
1355
1356 static gboolean
gst_video_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1357 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1358 GstEvent * event)
1359 {
1360 GstVideoDecoder *decoder;
1361 GstVideoDecoderClass *decoder_class;
1362 gboolean ret = FALSE;
1363
1364 decoder = GST_VIDEO_DECODER (parent);
1365 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1366
1367 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1368 GST_EVENT_TYPE_NAME (event));
1369
1370 if (decoder_class->sink_event)
1371 ret = decoder_class->sink_event (decoder, event);
1372
1373 return ret;
1374 }
1375
1376 /* perform upstream byte <-> time conversion (duration, seeking)
1377 * if subclass allows and if enough data for moderately decent conversion */
1378 static inline gboolean
gst_video_decoder_do_byte(GstVideoDecoder * dec)1379 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1380 {
1381 gboolean ret;
1382
1383 GST_OBJECT_LOCK (dec);
1384 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1385 && (dec->priv->time > GST_SECOND);
1386 GST_OBJECT_UNLOCK (dec);
1387
1388 return ret;
1389 }
1390
1391 static gboolean
gst_video_decoder_do_seek(GstVideoDecoder * dec,GstEvent * event)1392 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1393 {
1394 GstFormat format;
1395 GstSeekFlags flags;
1396 GstSeekType start_type, end_type;
1397 gdouble rate;
1398 gint64 start, start_time, end_time;
1399 GstSegment seek_segment;
1400 guint32 seqnum;
1401
1402 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1403 &start_time, &end_type, &end_time);
1404
1405 /* we'll handle plain open-ended flushing seeks with the simple approach */
1406 if (rate != 1.0) {
1407 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1408 return FALSE;
1409 }
1410
1411 if (start_type != GST_SEEK_TYPE_SET) {
1412 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1413 return FALSE;
1414 }
1415
1416 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1417 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1418 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1419 return FALSE;
1420 }
1421
1422 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1423 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1424 return FALSE;
1425 }
1426
1427 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1428 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1429 start_time, end_type, end_time, NULL);
1430 start_time = seek_segment.position;
1431
1432 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1433 GST_FORMAT_BYTES, &start)) {
1434 GST_DEBUG_OBJECT (dec, "conversion failed");
1435 return FALSE;
1436 }
1437
1438 seqnum = gst_event_get_seqnum (event);
1439 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1440 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1441 gst_event_set_seqnum (event, seqnum);
1442
1443 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1444 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1445
1446 return gst_pad_push_event (dec->sinkpad, event);
1447 }
1448
1449 static gboolean
gst_video_decoder_src_event_default(GstVideoDecoder * decoder,GstEvent * event)1450 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1451 GstEvent * event)
1452 {
1453 GstVideoDecoderPrivate *priv;
1454 gboolean res = FALSE;
1455
1456 priv = decoder->priv;
1457
1458 GST_DEBUG_OBJECT (decoder,
1459 "received event %d, %s", GST_EVENT_TYPE (event),
1460 GST_EVENT_TYPE_NAME (event));
1461
1462 switch (GST_EVENT_TYPE (event)) {
1463 case GST_EVENT_SEEK:
1464 {
1465 GstFormat format;
1466 gdouble rate;
1467 GstSeekFlags flags;
1468 GstSeekType start_type, stop_type;
1469 gint64 start, stop;
1470 gint64 tstart, tstop;
1471 guint32 seqnum;
1472
1473 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1474 &stop_type, &stop);
1475 seqnum = gst_event_get_seqnum (event);
1476
1477 /* upstream gets a chance first */
1478 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1479 break;
1480
1481 /* if upstream fails for a time seek, maybe we can help if allowed */
1482 if (format == GST_FORMAT_TIME) {
1483 if (gst_video_decoder_do_byte (decoder))
1484 res = gst_video_decoder_do_seek (decoder, event);
1485 break;
1486 }
1487
1488 /* ... though a non-time seek can be aided as well */
1489 /* First bring the requested format to time */
1490 if (!(res =
1491 gst_pad_query_convert (decoder->srcpad, format, start,
1492 GST_FORMAT_TIME, &tstart)))
1493 goto convert_error;
1494 if (!(res =
1495 gst_pad_query_convert (decoder->srcpad, format, stop,
1496 GST_FORMAT_TIME, &tstop)))
1497 goto convert_error;
1498
1499 /* then seek with time on the peer */
1500 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1501 flags, start_type, tstart, stop_type, tstop);
1502 gst_event_set_seqnum (event, seqnum);
1503
1504 res = gst_pad_push_event (decoder->sinkpad, event);
1505 break;
1506 }
1507 case GST_EVENT_QOS:
1508 {
1509 GstQOSType type;
1510 gdouble proportion;
1511 GstClockTimeDiff diff;
1512 GstClockTime timestamp;
1513
1514 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1515
1516 GST_OBJECT_LOCK (decoder);
1517 priv->proportion = proportion;
1518 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1519 if (G_UNLIKELY (diff > 0)) {
1520 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1521 } else {
1522 priv->earliest_time = timestamp + diff;
1523 }
1524 } else {
1525 priv->earliest_time = GST_CLOCK_TIME_NONE;
1526 }
1527 GST_OBJECT_UNLOCK (decoder);
1528
1529 GST_DEBUG_OBJECT (decoder,
1530 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1531 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1532
1533 res = gst_pad_push_event (decoder->sinkpad, event);
1534 break;
1535 }
1536 default:
1537 res = gst_pad_push_event (decoder->sinkpad, event);
1538 break;
1539 }
1540 done:
1541 return res;
1542
1543 convert_error:
1544 GST_DEBUG_OBJECT (decoder, "could not convert format");
1545 goto done;
1546 }
1547
1548 static gboolean
gst_video_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1549 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1550 {
1551 GstVideoDecoder *decoder;
1552 GstVideoDecoderClass *decoder_class;
1553 gboolean ret = FALSE;
1554
1555 decoder = GST_VIDEO_DECODER (parent);
1556 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1557
1558 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1559 GST_EVENT_TYPE_NAME (event));
1560
1561 if (decoder_class->src_event)
1562 ret = decoder_class->src_event (decoder, event);
1563
1564 return ret;
1565 }
1566
1567 static gboolean
gst_video_decoder_src_query_default(GstVideoDecoder * dec,GstQuery * query)1568 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1569 {
1570 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1571 gboolean res = TRUE;
1572
1573 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1574
1575 switch (GST_QUERY_TYPE (query)) {
1576 case GST_QUERY_POSITION:
1577 {
1578 GstFormat format;
1579 gint64 time, value;
1580
1581 /* upstream gets a chance first */
1582 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1583 GST_LOG_OBJECT (dec, "returning peer response");
1584 break;
1585 }
1586
1587 /* Refuse BYTES format queries. If it made sense to
1588 * answer them, upstream would have already */
1589 gst_query_parse_position (query, &format, NULL);
1590
1591 if (format == GST_FORMAT_BYTES) {
1592 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1593 break;
1594 }
1595
1596 /* we start from the last seen time */
1597 time = dec->priv->last_timestamp_out;
1598 /* correct for the segment values */
1599 time = gst_segment_to_stream_time (&dec->output_segment,
1600 GST_FORMAT_TIME, time);
1601
1602 GST_LOG_OBJECT (dec,
1603 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1604
1605 /* and convert to the final format */
1606 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1607 format, &value)))
1608 break;
1609
1610 gst_query_set_position (query, format, value);
1611
1612 GST_LOG_OBJECT (dec,
1613 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1614 format);
1615 break;
1616 }
1617 case GST_QUERY_DURATION:
1618 {
1619 GstFormat format;
1620
1621 /* upstream in any case */
1622 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1623 break;
1624
1625 gst_query_parse_duration (query, &format, NULL);
1626 /* try answering TIME by converting from BYTE if subclass allows */
1627 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
1628 gint64 value;
1629
1630 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1631 &value)) {
1632 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1633 if (gst_pad_query_convert (dec->sinkpad,
1634 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1635 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1636 res = TRUE;
1637 }
1638 }
1639 }
1640 break;
1641 }
1642 case GST_QUERY_CONVERT:
1643 {
1644 GstFormat src_fmt, dest_fmt;
1645 gint64 src_val, dest_val;
1646
1647 GST_DEBUG_OBJECT (dec, "convert query");
1648
1649 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1650 GST_OBJECT_LOCK (dec);
1651 if (dec->priv->output_state != NULL)
1652 res = __gst_video_rawvideo_convert (dec->priv->output_state,
1653 src_fmt, src_val, &dest_fmt, &dest_val);
1654 else
1655 res = FALSE;
1656 GST_OBJECT_UNLOCK (dec);
1657 if (!res)
1658 goto error;
1659 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1660 break;
1661 }
1662 case GST_QUERY_LATENCY:
1663 {
1664 gboolean live;
1665 GstClockTime min_latency, max_latency;
1666
1667 res = gst_pad_peer_query (dec->sinkpad, query);
1668 if (res) {
1669 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1670 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1671 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1672 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1673
1674 GST_OBJECT_LOCK (dec);
1675 min_latency += dec->priv->min_latency;
1676 if (max_latency == GST_CLOCK_TIME_NONE
1677 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
1678 max_latency = GST_CLOCK_TIME_NONE;
1679 else
1680 max_latency += dec->priv->max_latency;
1681 GST_OBJECT_UNLOCK (dec);
1682
1683 gst_query_set_latency (query, live, min_latency, max_latency);
1684 }
1685 }
1686 break;
1687 default:
1688 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
1689 }
1690 return res;
1691
1692 error:
1693 GST_ERROR_OBJECT (dec, "query failed");
1694 return res;
1695 }
1696
1697 static gboolean
gst_video_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)1698 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
1699 {
1700 GstVideoDecoder *decoder;
1701 GstVideoDecoderClass *decoder_class;
1702 gboolean ret = FALSE;
1703
1704 decoder = GST_VIDEO_DECODER (parent);
1705 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1706
1707 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1708 GST_QUERY_TYPE_NAME (query));
1709
1710 if (decoder_class->src_query)
1711 ret = decoder_class->src_query (decoder, query);
1712
1713 return ret;
1714 }
1715
1716 /**
1717 * gst_video_decoder_proxy_getcaps:
1718 * @decoder: a #GstVideoDecoder
1719 * @caps: (allow-none): initial caps
1720 * @filter: (allow-none): filter caps
1721 *
1722 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1723 * restricted to resolution/format/... combinations supported by downstream
1724 * elements.
1725 *
1726 * Returns: (transfer full): a #GstCaps owned by caller
1727 *
1728 * Since: 1.6
1729 */
1730 GstCaps *
gst_video_decoder_proxy_getcaps(GstVideoDecoder * decoder,GstCaps * caps,GstCaps * filter)1731 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
1732 GstCaps * filter)
1733 {
1734 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
1735 GST_VIDEO_DECODER_SINK_PAD (decoder),
1736 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
1737 }
1738
1739 static GstCaps *
gst_video_decoder_sink_getcaps(GstVideoDecoder * decoder,GstCaps * filter)1740 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
1741 {
1742 GstVideoDecoderClass *klass;
1743 GstCaps *caps;
1744
1745 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1746
1747 if (klass->getcaps)
1748 caps = klass->getcaps (decoder, filter);
1749 else
1750 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
1751
1752 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
1753
1754 return caps;
1755 }
1756
1757 static gboolean
gst_video_decoder_sink_query_default(GstVideoDecoder * decoder,GstQuery * query)1758 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
1759 GstQuery * query)
1760 {
1761 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
1762 GstVideoDecoderPrivate *priv;
1763 gboolean res = FALSE;
1764
1765 priv = decoder->priv;
1766
1767 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
1768
1769 switch (GST_QUERY_TYPE (query)) {
1770 case GST_QUERY_CONVERT:
1771 {
1772 GstFormat src_fmt, dest_fmt;
1773 gint64 src_val, dest_val;
1774
1775 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1776 GST_OBJECT_LOCK (decoder);
1777 res =
1778 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
1779 src_fmt, src_val, &dest_fmt, &dest_val);
1780 GST_OBJECT_UNLOCK (decoder);
1781 if (!res)
1782 goto error;
1783 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1784 break;
1785 }
1786 case GST_QUERY_ALLOCATION:{
1787 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1788
1789 if (klass->propose_allocation)
1790 res = klass->propose_allocation (decoder, query);
1791 break;
1792 }
1793 case GST_QUERY_CAPS:{
1794 GstCaps *filter, *caps;
1795
1796 gst_query_parse_caps (query, &filter);
1797 caps = gst_video_decoder_sink_getcaps (decoder, filter);
1798 gst_query_set_caps_result (query, caps);
1799 gst_caps_unref (caps);
1800 res = TRUE;
1801 break;
1802 }
1803 case GST_QUERY_ACCEPT_CAPS:{
1804 if (decoder->priv->use_default_pad_acceptcaps) {
1805 res =
1806 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
1807 GST_OBJECT_CAST (decoder), query);
1808 } else {
1809 GstCaps *caps;
1810 GstCaps *allowed_caps;
1811 GstCaps *template_caps;
1812 gboolean accept;
1813
1814 gst_query_parse_accept_caps (query, &caps);
1815
1816 template_caps = gst_pad_get_pad_template_caps (pad);
1817 accept = gst_caps_is_subset (caps, template_caps);
1818 gst_caps_unref (template_caps);
1819
1820 if (accept) {
1821 allowed_caps =
1822 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
1823
1824 accept = gst_caps_can_intersect (caps, allowed_caps);
1825
1826 gst_caps_unref (allowed_caps);
1827 }
1828
1829 gst_query_set_accept_caps_result (query, accept);
1830 res = TRUE;
1831 }
1832 break;
1833 }
1834 default:
1835 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
1836 break;
1837 }
1838 done:
1839
1840 return res;
1841 error:
1842 GST_DEBUG_OBJECT (decoder, "query failed");
1843 goto done;
1844
1845 }
1846
1847 static gboolean
gst_video_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)1848 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
1849 GstQuery * query)
1850 {
1851 GstVideoDecoder *decoder;
1852 GstVideoDecoderClass *decoder_class;
1853 gboolean ret = FALSE;
1854
1855 decoder = GST_VIDEO_DECODER (parent);
1856 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1857
1858 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1859 GST_QUERY_TYPE_NAME (query));
1860
1861 if (decoder_class->sink_query)
1862 ret = decoder_class->sink_query (decoder, query);
1863
1864 return ret;
1865 }
1866
1867 typedef struct _Timestamp Timestamp;
1868 struct _Timestamp
1869 {
1870 guint64 offset;
1871 GstClockTime pts;
1872 GstClockTime dts;
1873 GstClockTime duration;
1874 guint flags;
1875 };
1876
1877 static void
timestamp_free(Timestamp * ts)1878 timestamp_free (Timestamp * ts)
1879 {
1880 g_slice_free (Timestamp, ts);
1881 }
1882
1883 static void
gst_video_decoder_add_buffer_info(GstVideoDecoder * decoder,GstBuffer * buffer)1884 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
1885 GstBuffer * buffer)
1886 {
1887 GstVideoDecoderPrivate *priv = decoder->priv;
1888 Timestamp *ts;
1889
1890 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
1891 !GST_BUFFER_DTS_IS_VALID (buffer) &&
1892 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
1893 GST_BUFFER_FLAGS (buffer) == 0) {
1894 /* Save memory - don't bother storing info
1895 * for buffers with no distinguishing info */
1896 return;
1897 }
1898
1899 ts = g_slice_new (Timestamp);
1900
1901 GST_LOG_OBJECT (decoder,
1902 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
1903 " (offset:%" G_GUINT64_FORMAT ")",
1904 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
1905 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
1906
1907 ts->offset = priv->input_offset;
1908 ts->pts = GST_BUFFER_PTS (buffer);
1909 ts->dts = GST_BUFFER_DTS (buffer);
1910 ts->duration = GST_BUFFER_DURATION (buffer);
1911 ts->flags = GST_BUFFER_FLAGS (buffer);
1912
1913 priv->timestamps = g_list_append (priv->timestamps, ts);
1914 }
1915
1916 static void
gst_video_decoder_get_buffer_info_at_offset(GstVideoDecoder * decoder,guint64 offset,GstClockTime * pts,GstClockTime * dts,GstClockTime * duration,guint * flags)1917 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
1918 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
1919 GstClockTime * duration, guint * flags)
1920 {
1921 #ifndef GST_DISABLE_GST_DEBUG
1922 guint64 got_offset = 0;
1923 #endif
1924 Timestamp *ts;
1925 GList *g;
1926
1927 *pts = GST_CLOCK_TIME_NONE;
1928 *dts = GST_CLOCK_TIME_NONE;
1929 *duration = GST_CLOCK_TIME_NONE;
1930 *flags = 0;
1931
1932 g = decoder->priv->timestamps;
1933 while (g) {
1934 ts = g->data;
1935 if (ts->offset <= offset) {
1936 #ifndef GST_DISABLE_GST_DEBUG
1937 got_offset = ts->offset;
1938 #endif
1939 *pts = ts->pts;
1940 *dts = ts->dts;
1941 *duration = ts->duration;
1942 *flags = ts->flags;
1943 g = g->next;
1944 decoder->priv->timestamps = g_list_remove (decoder->priv->timestamps, ts);
1945 timestamp_free (ts);
1946 } else {
1947 break;
1948 }
1949 }
1950
1951 GST_LOG_OBJECT (decoder,
1952 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
1953 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
1954 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
1955 }
1956
1957 static void
gst_video_decoder_clear_queues(GstVideoDecoder * dec)1958 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
1959 {
1960 GstVideoDecoderPrivate *priv = dec->priv;
1961
1962 g_list_free_full (priv->output_queued,
1963 (GDestroyNotify) gst_mini_object_unref);
1964 priv->output_queued = NULL;
1965
1966 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
1967 priv->gather = NULL;
1968 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
1969 priv->decode = NULL;
1970 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
1971 priv->parse = NULL;
1972 g_list_free_full (priv->parse_gather,
1973 (GDestroyNotify) gst_video_codec_frame_unref);
1974 priv->parse_gather = NULL;
1975 g_list_free_full (priv->frames, (GDestroyNotify) gst_video_codec_frame_unref);
1976 priv->frames = NULL;
1977 }
1978
1979 static void
gst_video_decoder_reset(GstVideoDecoder * decoder,gboolean full,gboolean flush_hard)1980 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
1981 gboolean flush_hard)
1982 {
1983 GstVideoDecoderPrivate *priv = decoder->priv;
1984
1985 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
1986
1987 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1988
1989 if (full || flush_hard) {
1990 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
1991 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
1992 gst_video_decoder_clear_queues (decoder);
1993 decoder->priv->in_out_segment_sync = TRUE;
1994
1995 if (priv->current_frame) {
1996 gst_video_codec_frame_unref (priv->current_frame);
1997 priv->current_frame = NULL;
1998 }
1999
2000 g_list_free_full (priv->current_frame_events,
2001 (GDestroyNotify) gst_event_unref);
2002 priv->current_frame_events = NULL;
2003 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2004 priv->pending_events = NULL;
2005
2006 priv->error_count = 0;
2007 priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
2008 priv->had_output_data = FALSE;
2009 priv->had_input_data = FALSE;
2010
2011 GST_OBJECT_LOCK (decoder);
2012 priv->earliest_time = GST_CLOCK_TIME_NONE;
2013 priv->proportion = 0.5;
2014 GST_OBJECT_UNLOCK (decoder);
2015 }
2016
2017 if (full) {
2018 if (priv->input_state)
2019 gst_video_codec_state_unref (priv->input_state);
2020 priv->input_state = NULL;
2021 GST_OBJECT_LOCK (decoder);
2022 if (priv->output_state)
2023 gst_video_codec_state_unref (priv->output_state);
2024 priv->output_state = NULL;
2025
2026 priv->qos_frame_duration = 0;
2027 GST_OBJECT_UNLOCK (decoder);
2028
2029 if (priv->tags)
2030 gst_tag_list_unref (priv->tags);
2031 priv->tags = NULL;
2032 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2033 if (priv->upstream_tags) {
2034 gst_tag_list_unref (priv->upstream_tags);
2035 priv->upstream_tags = NULL;
2036 }
2037 priv->tags_changed = FALSE;
2038 priv->reordered_output = FALSE;
2039
2040 priv->dropped = 0;
2041 priv->processed = 0;
2042
2043 priv->decode_frame_number = 0;
2044 priv->base_picture_number = 0;
2045
2046 if (priv->pool) {
2047 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2048 priv->pool);
2049 gst_buffer_pool_set_active (priv->pool, FALSE);
2050 gst_object_unref (priv->pool);
2051 priv->pool = NULL;
2052 }
2053
2054 if (priv->allocator) {
2055 gst_object_unref (priv->allocator);
2056 priv->allocator = NULL;
2057 }
2058 }
2059
2060 priv->discont = TRUE;
2061
2062 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2063 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2064 priv->pts_delta = GST_CLOCK_TIME_NONE;
2065
2066 priv->input_offset = 0;
2067 priv->frame_offset = 0;
2068 gst_adapter_clear (priv->input_adapter);
2069 gst_adapter_clear (priv->output_adapter);
2070 g_list_free_full (priv->timestamps, (GDestroyNotify) timestamp_free);
2071 priv->timestamps = NULL;
2072
2073 GST_OBJECT_LOCK (decoder);
2074 priv->bytes_out = 0;
2075 priv->time = 0;
2076 GST_OBJECT_UNLOCK (decoder);
2077
2078 #ifndef GST_DISABLE_DEBUG
2079 priv->last_reset_time = gst_util_get_timestamp ();
2080 #endif
2081
2082 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
2083 priv->has_recv_first_key_frame = FALSE;
2084 priv->has_push_first_frame = FALSE;
2085 #endif
2086 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2087 }
2088
2089 static GstFlowReturn
gst_video_decoder_chain_forward(GstVideoDecoder * decoder,GstBuffer * buf,gboolean at_eos)2090 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2091 GstBuffer * buf, gboolean at_eos)
2092 {
2093 GstVideoDecoderPrivate *priv;
2094 GstVideoDecoderClass *klass;
2095 GstFlowReturn ret = GST_FLOW_OK;
2096
2097 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2098 priv = decoder->priv;
2099
2100 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2101
2102 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2103 * and this function would only be called to get everything collected GOP
2104 * by GOP in the parse_gather list */
2105 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2106 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2107 ret = gst_video_decoder_drain_out (decoder, FALSE);
2108
2109 if (priv->current_frame == NULL)
2110 priv->current_frame = gst_video_decoder_new_frame (decoder);
2111
2112 if (!priv->packetized)
2113 gst_video_decoder_add_buffer_info (decoder, buf);
2114
2115 priv->input_offset += gst_buffer_get_size (buf);
2116
2117 if (priv->packetized) {
2118 gboolean was_keyframe = FALSE;
2119 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2120 was_keyframe = TRUE;
2121 GST_LOG_OBJECT (decoder, "Marking current_frame as sync point");
2122 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
2123 }
2124
2125 priv->current_frame->input_buffer = buf;
2126
2127 if (decoder->input_segment.rate < 0.0) {
2128 priv->parse_gather =
2129 g_list_prepend (priv->parse_gather, priv->current_frame);
2130 } else {
2131 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
2132 }
2133 priv->current_frame = NULL;
2134 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2135 * latency. Only do this for forwards playback as reverse playback handles
2136 * draining on keyframes in flush_parse(), and would otherwise call back
2137 * from drain_out() to here causing an infinite loop.
2138 * Also this function is only called for reverse playback to gather frames
2139 * GOP by GOP, and does not do any actual decoding. That would be done by
2140 * flush_decode() */
2141 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2142 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2143 ret = gst_video_decoder_drain_out (decoder, FALSE);
2144 } else {
2145 gst_adapter_push (priv->input_adapter, buf);
2146
2147 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2148 }
2149
2150 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2151 return GST_FLOW_OK;
2152
2153 return ret;
2154 }
2155
2156 static GstFlowReturn
gst_video_decoder_flush_decode(GstVideoDecoder * dec)2157 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2158 {
2159 GstVideoDecoderPrivate *priv = dec->priv;
2160 GstFlowReturn res = GST_FLOW_OK;
2161 GList *walk;
2162
2163 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2164
2165 walk = priv->decode;
2166 while (walk) {
2167 GList *next;
2168 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2169
2170 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2171 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2172 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2173 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2174
2175 next = walk->next;
2176
2177 priv->decode = g_list_delete_link (priv->decode, walk);
2178
2179 /* decode buffer, resulting data prepended to queue */
2180 res = gst_video_decoder_decode_frame (dec, frame);
2181 if (res != GST_FLOW_OK)
2182 break;
2183
2184 walk = next;
2185 }
2186
2187 return res;
2188 }
2189
2190 /* gst_video_decoder_flush_parse is called from the
2191 * chain_reverse() function when a buffer containing
2192 * a DISCONT - indicating that reverse playback
2193 * looped back to the next data block, and therefore
2194 * all available data should be fed through the
2195 * decoder and frames gathered for reversed output
2196 */
2197 static GstFlowReturn
gst_video_decoder_flush_parse(GstVideoDecoder * dec,gboolean at_eos)2198 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2199 {
2200 GstVideoDecoderPrivate *priv = dec->priv;
2201 GstFlowReturn res = GST_FLOW_OK;
2202 GList *walk;
2203 GstVideoDecoderClass *decoder_class;
2204
2205 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2206
2207 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2208
2209 /* Reverse the gather list, and prepend it to the parse list,
2210 * then flush to parse whatever we can */
2211 priv->gather = g_list_reverse (priv->gather);
2212 priv->parse = g_list_concat (priv->gather, priv->parse);
2213 priv->gather = NULL;
2214
2215 /* clear buffer and decoder state */
2216 gst_video_decoder_flush (dec, FALSE);
2217
2218 walk = priv->parse;
2219 while (walk) {
2220 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2221 GList *next = walk->next;
2222
2223 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2224 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2225 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2226 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2227
2228 /* parse buffer, resulting frames prepended to parse_gather queue */
2229 gst_buffer_ref (buf);
2230 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2231
2232 /* if we generated output, we can discard the buffer, else we
2233 * keep it in the queue */
2234 if (priv->parse_gather) {
2235 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2236 priv->parse = g_list_delete_link (priv->parse, walk);
2237 gst_buffer_unref (buf);
2238 } else {
2239 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2240 }
2241 walk = next;
2242 }
2243
2244 walk = priv->parse_gather;
2245 while (walk) {
2246 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2247 GList *walk2;
2248
2249 /* this is reverse playback, check if we need to apply some segment
2250 * to the output before decoding, as during decoding the segment.rate
2251 * must be used to determine if a buffer should be pushed or added to
2252 * the output list for reverse pushing.
2253 *
2254 * The new segment is not immediately pushed here because we must
2255 * wait for negotiation to happen before it can be pushed to avoid
2256 * pushing a segment before caps event. Negotiation only happens
2257 * when finish_frame is called.
2258 */
2259 for (walk2 = frame->events; walk2;) {
2260 GList *cur = walk2;
2261 GstEvent *event = walk2->data;
2262
2263 walk2 = g_list_next (walk2);
2264 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2265
2266 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2267 GstSegment segment;
2268
2269 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2270 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2271 gst_event_copy_segment (event, &segment);
2272 if (segment.format == GST_FORMAT_TIME) {
2273 dec->output_segment = segment;
2274 dec->priv->in_out_segment_sync =
2275 gst_segment_is_equal (&dec->input_segment, &segment);
2276 }
2277 }
2278 dec->priv->pending_events =
2279 g_list_append (dec->priv->pending_events, event);
2280 frame->events = g_list_delete_link (frame->events, cur);
2281 }
2282 }
2283
2284 walk = walk->next;
2285 }
2286
2287 /* now we can process frames. Start by moving each frame from the parse_gather
2288 * to the decode list, reverse the order as we go, and stopping when/if we
2289 * copy a keyframe. */
2290 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2291 walk = priv->parse_gather;
2292 while (walk) {
2293 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2294
2295 /* remove from the gather list */
2296 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2297
2298 /* move it to the front of the decode queue */
2299 priv->decode = g_list_concat (walk, priv->decode);
2300
2301 /* if we copied a keyframe, flush and decode the decode queue */
2302 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2303 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2304 ", DTS %" GST_TIME_FORMAT, frame,
2305 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2306 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2307 res = gst_video_decoder_flush_decode (dec);
2308 if (res != GST_FLOW_OK)
2309 goto done;
2310
2311 /* We need to tell the subclass to drain now.
2312 * We prefer the drain vfunc, but for backward-compat
2313 * we use a finish() vfunc if drain isn't implemented */
2314 if (decoder_class->drain) {
2315 GST_DEBUG_OBJECT (dec, "Draining");
2316 res = decoder_class->drain (dec);
2317 } else if (decoder_class->finish) {
2318 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2319 "Calling finish() for backwards-compat");
2320 res = decoder_class->finish (dec);
2321 }
2322
2323 if (res != GST_FLOW_OK)
2324 goto done;
2325
2326 /* now send queued data downstream */
2327 walk = priv->output_queued;
2328 while (walk) {
2329 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2330
2331 priv->output_queued =
2332 g_list_delete_link (priv->output_queued, priv->output_queued);
2333
2334 if (G_LIKELY (res == GST_FLOW_OK)) {
2335 /* avoid stray DISCONT from forward processing,
2336 * which have no meaning in reverse pushing */
2337 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2338
2339 /* Last chance to calculate a timestamp as we loop backwards
2340 * through the list */
2341 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2342 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2343 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2344 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2345 GST_BUFFER_TIMESTAMP (buf) =
2346 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2347 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2348 GST_LOG_OBJECT (dec,
2349 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2350 GST_TIME_ARGS (priv->last_timestamp_out));
2351 }
2352
2353 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2354 } else {
2355 gst_buffer_unref (buf);
2356 }
2357
2358 walk = priv->output_queued;
2359 }
2360
2361 /* clear buffer and decoder state again
2362 * before moving to the previous keyframe */
2363 gst_video_decoder_flush (dec, FALSE);
2364 }
2365
2366 walk = priv->parse_gather;
2367 }
2368
2369 done:
2370 return res;
2371 }
2372
2373 static GstFlowReturn
gst_video_decoder_chain_reverse(GstVideoDecoder * dec,GstBuffer * buf)2374 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2375 {
2376 GstVideoDecoderPrivate *priv = dec->priv;
2377 GstFlowReturn result = GST_FLOW_OK;
2378
2379 /* if we have a discont, move buffers to the decode list */
2380 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2381 GST_DEBUG_OBJECT (dec, "received discont");
2382
2383 /* parse and decode stuff in the gather and parse queues */
2384 result = gst_video_decoder_flush_parse (dec, FALSE);
2385 }
2386
2387 if (G_LIKELY (buf)) {
2388 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2389 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2390 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2391 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2392 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2393 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2394
2395 /* add buffer to gather queue */
2396 priv->gather = g_list_prepend (priv->gather, buf);
2397 }
2398
2399 return result;
2400 }
2401
2402 static GstFlowReturn
gst_video_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buf)2403 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2404 {
2405 GstVideoDecoder *decoder;
2406 GstFlowReturn ret = GST_FLOW_OK;
2407
2408 decoder = GST_VIDEO_DECODER (parent);
2409
2410 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2411 goto not_negotiated;
2412
2413 GST_LOG_OBJECT (decoder,
2414 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2415 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2416 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2417 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2418 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2419 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2420
2421 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2422
2423 /* NOTE:
2424 * requiring the pad to be negotiated makes it impossible to use
2425 * oggdemux or filesrc ! decoder */
2426
2427 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2428 GstEvent *event;
2429 GstSegment *segment = &decoder->input_segment;
2430
2431 GST_WARNING_OBJECT (decoder,
2432 "Received buffer without a new-segment. "
2433 "Assuming timestamps start from 0.");
2434
2435 gst_segment_init (segment, GST_FORMAT_TIME);
2436
2437 event = gst_event_new_segment (segment);
2438
2439 decoder->priv->current_frame_events =
2440 g_list_prepend (decoder->priv->current_frame_events, event);
2441 }
2442
2443 decoder->priv->had_input_data = TRUE;
2444
2445 if (decoder->input_segment.rate > 0.0)
2446 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2447 else
2448 ret = gst_video_decoder_chain_reverse (decoder, buf);
2449
2450 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2451 return ret;
2452
2453 /* ERRORS */
2454 not_negotiated:
2455 {
2456 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2457 ("decoder not initialized"));
2458 gst_buffer_unref (buf);
2459 return GST_FLOW_NOT_NEGOTIATED;
2460 }
2461 }
2462
2463 static GstStateChangeReturn
gst_video_decoder_change_state(GstElement * element,GstStateChange transition)2464 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2465 {
2466 GstVideoDecoder *decoder;
2467 GstVideoDecoderClass *decoder_class;
2468 GstStateChangeReturn ret;
2469
2470 decoder = GST_VIDEO_DECODER (element);
2471 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2472
2473 switch (transition) {
2474 case GST_STATE_CHANGE_NULL_TO_READY:
2475 /* open device/library if needed */
2476 if (decoder_class->open && !decoder_class->open (decoder))
2477 goto open_failed;
2478 break;
2479 case GST_STATE_CHANGE_READY_TO_PAUSED:
2480 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2481 gst_video_decoder_reset (decoder, TRUE, TRUE);
2482 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2483
2484 /* Initialize device/library if needed */
2485 if (decoder_class->start && !decoder_class->start (decoder))
2486 goto start_failed;
2487 break;
2488 default:
2489 break;
2490 }
2491
2492 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2493
2494 switch (transition) {
2495 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2496 gboolean stopped = TRUE;
2497
2498 if (decoder_class->stop)
2499 stopped = decoder_class->stop (decoder);
2500
2501 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2502 gst_video_decoder_reset (decoder, TRUE, TRUE);
2503 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2504
2505 if (!stopped)
2506 goto stop_failed;
2507
2508 break;
2509 }
2510 case GST_STATE_CHANGE_READY_TO_NULL:
2511 /* close device/library if needed */
2512 if (decoder_class->close && !decoder_class->close (decoder))
2513 goto close_failed;
2514 break;
2515 default:
2516 break;
2517 }
2518
2519 return ret;
2520
2521 /* Errors */
2522 open_failed:
2523 {
2524 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2525 ("Failed to open decoder"));
2526 return GST_STATE_CHANGE_FAILURE;
2527 }
2528
2529 start_failed:
2530 {
2531 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2532 ("Failed to start decoder"));
2533 return GST_STATE_CHANGE_FAILURE;
2534 }
2535
2536 stop_failed:
2537 {
2538 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2539 ("Failed to stop decoder"));
2540 return GST_STATE_CHANGE_FAILURE;
2541 }
2542
2543 close_failed:
2544 {
2545 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2546 ("Failed to close decoder"));
2547 return GST_STATE_CHANGE_FAILURE;
2548 }
2549 }
2550
2551 static GstVideoCodecFrame *
gst_video_decoder_new_frame(GstVideoDecoder * decoder)2552 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2553 {
2554 GstVideoDecoderPrivate *priv = decoder->priv;
2555 GstVideoCodecFrame *frame;
2556
2557 frame = g_slice_new0 (GstVideoCodecFrame);
2558
2559 frame->ref_count = 1;
2560
2561 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2562 frame->system_frame_number = priv->system_frame_number;
2563 priv->system_frame_number++;
2564 frame->decode_frame_number = priv->decode_frame_number;
2565 priv->decode_frame_number++;
2566
2567 frame->dts = GST_CLOCK_TIME_NONE;
2568 frame->pts = GST_CLOCK_TIME_NONE;
2569 frame->duration = GST_CLOCK_TIME_NONE;
2570 frame->events = priv->current_frame_events;
2571 priv->current_frame_events = NULL;
2572
2573 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2574
2575 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2576 frame, frame->system_frame_number);
2577
2578 return frame;
2579 }
2580
2581 static void
gst_video_decoder_push_event_list(GstVideoDecoder * decoder,GList * events)2582 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
2583 {
2584 GList *l;
2585
2586 /* events are stored in reverse order */
2587 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2588 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2589 gst_video_decoder_push_event (decoder, l->data);
2590 }
2591 g_list_free (events);
2592 }
2593
2594 static void
gst_video_decoder_prepare_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,gboolean dropping)2595 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
2596 decoder, GstVideoCodecFrame * frame, gboolean dropping)
2597 {
2598 GstVideoDecoderPrivate *priv = decoder->priv;
2599 GList *l, *events = NULL;
2600 gboolean sync;
2601
2602 #ifndef GST_DISABLE_GST_DEBUG
2603 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2604 g_list_length (priv->frames),
2605 gst_adapter_available (priv->input_adapter),
2606 gst_adapter_available (priv->output_adapter));
2607 #endif
2608
2609 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
2610
2611 GST_LOG_OBJECT (decoder,
2612 "finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
2613 GST_TIME_FORMAT,
2614 frame, frame->system_frame_number,
2615 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
2616
2617 /* Push all pending events that arrived before this frame */
2618 for (l = priv->frames; l; l = l->next) {
2619 GstVideoCodecFrame *tmp = l->data;
2620
2621 if (tmp->events) {
2622 events = g_list_concat (tmp->events, events);
2623 tmp->events = NULL;
2624 }
2625
2626 if (tmp == frame)
2627 break;
2628 }
2629
2630 if (dropping || !decoder->priv->output_state) {
2631 /* Push before the next frame that is not dropped */
2632 decoder->priv->pending_events =
2633 g_list_concat (events, decoder->priv->pending_events);
2634 } else {
2635 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
2636 decoder->priv->pending_events = NULL;
2637
2638 gst_video_decoder_push_event_list (decoder, events);
2639 }
2640
2641 /* Check if the data should not be displayed. For example altref/invisible
2642 * frame in vp8. In this case we should not update the timestamps. */
2643 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
2644 return;
2645
2646 /* If the frame is meant to be output but we don't have an output_buffer
2647 * we have a problem :) */
2648 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
2649 goto no_output_buffer;
2650
2651 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
2652 if (frame->pts != priv->base_timestamp) {
2653 GST_DEBUG_OBJECT (decoder,
2654 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
2655 GST_TIME_ARGS (frame->pts),
2656 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
2657 decoder->output_segment.start)));
2658 priv->base_timestamp = frame->pts;
2659 priv->base_picture_number = frame->decode_frame_number;
2660 }
2661 }
2662
2663 if (frame->duration == GST_CLOCK_TIME_NONE) {
2664 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
2665 GST_LOG_OBJECT (decoder,
2666 "Guessing duration %" GST_TIME_FORMAT " for frame...",
2667 GST_TIME_ARGS (frame->duration));
2668 }
2669
2670 /* PTS is expected montone ascending,
2671 * so a good guess is lowest unsent DTS */
2672 {
2673 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
2674 GstVideoCodecFrame *oframe = NULL;
2675 gboolean seen_none = FALSE;
2676
2677 /* some maintenance regardless */
2678 for (l = priv->frames; l; l = l->next) {
2679 GstVideoCodecFrame *tmp = l->data;
2680
2681 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
2682 seen_none = TRUE;
2683 continue;
2684 }
2685
2686 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
2687 min_ts = tmp->abidata.ABI.ts;
2688 oframe = tmp;
2689 }
2690 }
2691 /* save a ts if needed */
2692 if (oframe && oframe != frame) {
2693 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
2694 }
2695
2696 /* and set if needed;
2697 * valid delta means we have reasonable DTS input */
2698 /* also, if we ended up reordered, means this approach is conflicting
2699 * with some sparse existing PTS, and so it does not work out */
2700 if (!priv->reordered_output &&
2701 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
2702 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
2703 frame->pts = min_ts + priv->pts_delta;
2704 GST_DEBUG_OBJECT (decoder,
2705 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
2706 GST_TIME_ARGS (frame->pts));
2707 }
2708
2709 /* some more maintenance, ts2 holds PTS */
2710 min_ts = GST_CLOCK_TIME_NONE;
2711 seen_none = FALSE;
2712 for (l = priv->frames; l; l = l->next) {
2713 GstVideoCodecFrame *tmp = l->data;
2714
2715 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
2716 seen_none = TRUE;
2717 continue;
2718 }
2719
2720 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
2721 min_ts = tmp->abidata.ABI.ts2;
2722 oframe = tmp;
2723 }
2724 }
2725 /* save a ts if needed */
2726 if (oframe && oframe != frame) {
2727 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
2728 }
2729
2730 /* if we detected reordered output, then PTS are void,
2731 * however those were obtained; bogus input, subclass etc */
2732 if (priv->reordered_output && !seen_none) {
2733 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
2734 frame->pts = GST_CLOCK_TIME_NONE;
2735 }
2736
2737 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
2738 frame->pts = min_ts;
2739 GST_DEBUG_OBJECT (decoder,
2740 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
2741 GST_TIME_ARGS (frame->pts));
2742 }
2743 }
2744
2745
2746 if (frame->pts == GST_CLOCK_TIME_NONE) {
2747 /* Last ditch timestamp guess: Just add the duration to the previous
2748 * frame. If it's the first frame, just use the segment start. */
2749 if (frame->duration != GST_CLOCK_TIME_NONE) {
2750 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
2751 frame->pts = priv->last_timestamp_out + frame->duration;
2752 else if (decoder->output_segment.rate > 0.0)
2753 frame->pts = decoder->output_segment.start;
2754 GST_LOG_OBJECT (decoder,
2755 "Guessing timestamp %" GST_TIME_FORMAT " for frame...",
2756 GST_TIME_ARGS (frame->pts));
2757 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
2758 frame->pts = frame->dts;
2759 GST_LOG_OBJECT (decoder,
2760 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
2761 GST_TIME_ARGS (frame->pts));
2762 }
2763 }
2764
2765 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
2766 if (frame->pts < priv->last_timestamp_out) {
2767 GST_WARNING_OBJECT (decoder,
2768 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
2769 GST_TIME_FORMAT ")",
2770 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
2771 priv->reordered_output = TRUE;
2772 /* make it a bit less weird downstream */
2773 frame->pts = priv->last_timestamp_out;
2774 }
2775 }
2776
2777 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
2778 priv->last_timestamp_out = frame->pts;
2779
2780 return;
2781
2782 /* ERRORS */
2783 no_output_buffer:
2784 {
2785 GST_ERROR_OBJECT (decoder, "No buffer to output !");
2786 }
2787 }
2788
2789 /**
2790 * gst_video_decoder_release_frame:
2791 * @dec: a #GstVideoDecoder
2792 * @frame: (transfer full): the #GstVideoCodecFrame to release
2793 *
2794 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
2795 * without any processing other than removing it from list of pending frames,
2796 * after which it is considered finished and released.
2797 *
2798 * Since: 1.2.2
2799 */
2800 void
gst_video_decoder_release_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)2801 gst_video_decoder_release_frame (GstVideoDecoder * dec,
2802 GstVideoCodecFrame * frame)
2803 {
2804 GList *link;
2805
2806 /* unref once from the list */
2807 GST_VIDEO_DECODER_STREAM_LOCK (dec);
2808 link = g_list_find (dec->priv->frames, frame);
2809 if (link) {
2810 gst_video_codec_frame_unref (frame);
2811 dec->priv->frames = g_list_delete_link (dec->priv->frames, link);
2812 }
2813 if (frame->events) {
2814 dec->priv->pending_events =
2815 g_list_concat (frame->events, dec->priv->pending_events);
2816 frame->events = NULL;
2817 }
2818 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
2819
2820 /* unref because this function takes ownership */
2821 gst_video_codec_frame_unref (frame);
2822 }
2823
2824 /**
2825 * gst_video_decoder_drop_frame:
2826 * @dec: a #GstVideoDecoder
2827 * @frame: (transfer full): the #GstVideoCodecFrame to drop
2828 *
2829 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
2830 * case and posts a QoS message with the frame's details on the bus.
2831 * In any case, the frame is considered finished and released.
2832 *
2833 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
2834 */
2835 GstFlowReturn
gst_video_decoder_drop_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)2836 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
2837 {
2838 GstClockTime stream_time, jitter, earliest_time, qostime, timestamp;
2839 GstSegment *segment;
2840 GstMessage *qos_msg;
2841 gdouble proportion;
2842
2843 GST_LOG_OBJECT (dec, "drop frame %p", frame);
2844
2845 GST_VIDEO_DECODER_STREAM_LOCK (dec);
2846
2847 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
2848
2849 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
2850 GST_TIME_ARGS (frame->pts));
2851
2852 dec->priv->dropped++;
2853
2854 /* post QoS message */
2855 GST_OBJECT_LOCK (dec);
2856 proportion = dec->priv->proportion;
2857 earliest_time = dec->priv->earliest_time;
2858 GST_OBJECT_UNLOCK (dec);
2859
2860 timestamp = frame->pts;
2861 segment = &dec->output_segment;
2862 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
2863 segment = &dec->input_segment;
2864 stream_time =
2865 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
2866 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
2867 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
2868 qos_msg =
2869 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
2870 timestamp, GST_CLOCK_TIME_NONE);
2871 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
2872 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
2873 dec->priv->processed, dec->priv->dropped);
2874 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
2875
2876 /* now free the frame */
2877 gst_video_decoder_release_frame (dec, frame);
2878
2879 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
2880
2881 return GST_FLOW_OK;
2882 }
2883
2884 static gboolean
gst_video_decoder_transform_meta_default(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstMeta * meta)2885 gst_video_decoder_transform_meta_default (GstVideoDecoder *
2886 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
2887 {
2888 const GstMetaInfo *info = meta->info;
2889 const gchar *const *tags;
2890
2891 tags = gst_meta_api_type_get_tags (info->api);
2892
2893 if (!tags || (g_strv_length ((gchar **) tags) == 1
2894 && gst_meta_api_type_has_tag (info->api,
2895 g_quark_from_string (GST_META_TAG_VIDEO_STR))))
2896 return TRUE;
2897
2898 return FALSE;
2899 }
2900
2901 typedef struct
2902 {
2903 GstVideoDecoder *decoder;
2904 GstVideoCodecFrame *frame;
2905 } CopyMetaData;
2906
2907 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)2908 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
2909 {
2910 CopyMetaData *data = user_data;
2911 GstVideoDecoder *decoder = data->decoder;
2912 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2913 GstVideoCodecFrame *frame = data->frame;
2914 const GstMetaInfo *info = (*meta)->info;
2915 gboolean do_copy = FALSE;
2916
2917 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
2918 /* never call the transform_meta with memory specific metadata */
2919 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
2920 g_type_name (info->api));
2921 do_copy = FALSE;
2922 } else if (klass->transform_meta) {
2923 do_copy = klass->transform_meta (decoder, frame, *meta);
2924 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
2925 g_type_name (info->api), do_copy);
2926 }
2927
2928 /* we only copy metadata when the subclass implemented a transform_meta
2929 * function and when it returns %TRUE */
2930 if (do_copy && info->transform_func) {
2931 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
2932 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
2933 /* simply copy then */
2934 info->transform_func (frame->output_buffer, *meta, inbuf,
2935 _gst_meta_transform_copy, ©_data);
2936 }
2937 return TRUE;
2938 }
2939
2940 /**
2941 * gst_video_decoder_finish_frame:
2942 * @decoder: a #GstVideoDecoder
2943 * @frame: (transfer full): a decoded #GstVideoCodecFrame
2944 *
2945 * @frame should have a valid decoded data buffer, whose metadata fields
2946 * are then appropriately set according to frame data and pushed downstream.
2947 * If no output data is provided, @frame is considered skipped.
2948 * In any case, the frame is considered finished and released.
2949 *
2950 * After calling this function the output buffer of the frame is to be
2951 * considered read-only. This function will also change the metadata
2952 * of the buffer.
2953 *
2954 * Returns: a #GstFlowReturn resulting from sending data downstream
2955 */
2956 GstFlowReturn
gst_video_decoder_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)2957 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
2958 GstVideoCodecFrame * frame)
2959 {
2960 GstFlowReturn ret = GST_FLOW_OK;
2961 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2962 GstVideoDecoderPrivate *priv = decoder->priv;
2963 GstBuffer *output_buffer;
2964 gboolean needs_reconfigure = FALSE;
2965
2966 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
2967
2968 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2969
2970 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
2971 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
2972 && needs_reconfigure))) {
2973 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
2974 gst_pad_mark_reconfigure (decoder->srcpad);
2975 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
2976 ret = GST_FLOW_FLUSHING;
2977 else
2978 ret = GST_FLOW_NOT_NEGOTIATED;
2979 goto done;
2980 }
2981 }
2982
2983 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
2984 priv->processed++;
2985
2986 if (priv->tags_changed) {
2987 GstEvent *tags_event;
2988
2989 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
2990
2991 if (tags_event != NULL)
2992 gst_video_decoder_push_event (decoder, tags_event);
2993
2994 priv->tags_changed = FALSE;
2995 }
2996
2997 /* no buffer data means this frame is skipped */
2998 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
2999 GST_DEBUG_OBJECT (decoder, "skipping frame %" GST_TIME_FORMAT,
3000 GST_TIME_ARGS (frame->pts));
3001 goto done;
3002 }
3003
3004 /* We need a writable buffer for the metadata changes below */
3005 output_buffer = frame->output_buffer =
3006 gst_buffer_make_writable (frame->output_buffer);
3007
3008 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3009
3010 GST_BUFFER_PTS (output_buffer) = frame->pts;
3011 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3012 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3013
3014 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3015 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3016
3017 if (priv->discont) {
3018 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3019 }
3020
3021 if (decoder_class->transform_meta) {
3022 if (G_LIKELY (frame->input_buffer)) {
3023 CopyMetaData data;
3024
3025 data.decoder = decoder;
3026 data.frame = frame;
3027 gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
3028 } else {
3029 GST_WARNING_OBJECT (decoder,
3030 "Can't copy metadata because input frame disappeared");
3031 }
3032 }
3033
3034 /* Get an additional ref to the buffer, which is going to be pushed
3035 * downstream, the original ref is owned by the frame
3036 */
3037 output_buffer = gst_buffer_ref (output_buffer);
3038
3039 /* Release frame so the buffer is writable when we push it downstream
3040 * if possible, i.e. if the subclass does not hold additional references
3041 * to the frame
3042 */
3043 gst_video_decoder_release_frame (decoder, frame);
3044 frame = NULL;
3045
3046 if (decoder->output_segment.rate < 0.0
3047 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3048 GST_LOG_OBJECT (decoder, "queued frame");
3049 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3050 } else {
3051 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3052 }
3053
3054 done:
3055 if (frame)
3056 gst_video_decoder_release_frame (decoder, frame);
3057 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3058 return ret;
3059 }
3060
3061 /* With stream lock, takes the frame reference */
3062 static GstFlowReturn
gst_video_decoder_clip_and_push_buf(GstVideoDecoder * decoder,GstBuffer * buf)3063 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3064 {
3065 GstFlowReturn ret = GST_FLOW_OK;
3066 GstVideoDecoderPrivate *priv = decoder->priv;
3067 guint64 start, stop;
3068 guint64 cstart, cstop;
3069 GstSegment *segment;
3070 GstClockTime duration;
3071
3072 /* Check for clipping */
3073 start = GST_BUFFER_PTS (buf);
3074 duration = GST_BUFFER_DURATION (buf);
3075
3076 /* store that we have valid decoded data */
3077 priv->had_output_data = TRUE;
3078
3079 stop = GST_CLOCK_TIME_NONE;
3080
3081 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3082 stop = start + duration;
3083 } else if (GST_CLOCK_TIME_IS_VALID (start)
3084 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3085 /* If we don't clip away buffers that far before the segment we
3086 * can cause the pipeline to lockup. This can happen if audio is
3087 * properly clipped, and thus the audio sink does not preroll yet
3088 * but the video sink prerolls because we already outputted a
3089 * buffer here... and then queues run full.
3090 *
3091 * In the worst case we will clip one buffer too many here now if no
3092 * framerate is given, no buffer duration is given and the actual
3093 * framerate is lower than 25fps */
3094 stop = start + 40 * GST_MSECOND;
3095 }
3096
3097 segment = &decoder->output_segment;
3098 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3099 GST_BUFFER_PTS (buf) = cstart;
3100
3101 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3102 GST_BUFFER_DURATION (buf) = cstop - cstart;
3103
3104 GST_LOG_OBJECT (decoder,
3105 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3106 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3107 " time %" GST_TIME_FORMAT,
3108 GST_TIME_ARGS (cstart),
3109 GST_TIME_ARGS (cstop),
3110 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3111 GST_TIME_ARGS (segment->time));
3112 } else {
3113 GST_LOG_OBJECT (decoder,
3114 "dropping buffer outside segment: %" GST_TIME_FORMAT
3115 " %" GST_TIME_FORMAT
3116 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3117 " time %" GST_TIME_FORMAT,
3118 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3119 GST_TIME_ARGS (segment->start),
3120 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3121 /* only check and return EOS if upstream still
3122 * in the same segment and interested as such */
3123 if (decoder->priv->in_out_segment_sync) {
3124 if (segment->rate >= 0) {
3125 if (GST_BUFFER_PTS (buf) >= segment->stop)
3126 ret = GST_FLOW_EOS;
3127 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3128 ret = GST_FLOW_EOS;
3129 }
3130 }
3131 gst_buffer_unref (buf);
3132 goto done;
3133 }
3134
3135 /* Is buffer too late (QoS) ? */
3136 if (GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3137 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3138 GstClockTime deadline =
3139 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3140 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3141 GST_DEBUG_OBJECT (decoder,
3142 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3143 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3144 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3145 GST_TIME_ARGS (priv->earliest_time));
3146 gst_buffer_unref (buf);
3147 priv->discont = TRUE;
3148 goto done;
3149 }
3150 }
3151
3152 /* Set DISCONT flag here ! */
3153
3154 if (priv->discont) {
3155 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3156 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3157 priv->discont = FALSE;
3158 }
3159
3160 /* update rate estimate */
3161 GST_OBJECT_LOCK (decoder);
3162 priv->bytes_out += gst_buffer_get_size (buf);
3163 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3164 priv->time += duration;
3165 } else {
3166 /* FIXME : Use difference between current and previous outgoing
3167 * timestamp, and relate to difference between current and previous
3168 * bytes */
3169 /* better none than nothing valid */
3170 priv->time = GST_CLOCK_TIME_NONE;
3171 }
3172 GST_OBJECT_UNLOCK (decoder);
3173
3174 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3175 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3176 gst_buffer_get_size (buf),
3177 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3178 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3179
3180 /* we got data, so note things are looking up again, reduce
3181 * the error count, if there is one */
3182 if (G_UNLIKELY (priv->error_count))
3183 priv->error_count = 0;
3184
3185 #ifndef GST_DISABLE_DEBUG
3186 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3187 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3188
3189 /* First buffer since reset, report how long we took */
3190 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3191 " to produce", GST_TIME_ARGS (elapsed));
3192 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3193 }
3194 #endif
3195
3196 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
3197 if (!priv->has_push_first_frame) {
3198 priv->has_push_first_frame = TRUE;
3199 GST_WARNING_OBJECT (decoder, "KPI-TRACE: FIRST-VIDEO-FRAME videodecoder push first frame");
3200 }
3201 #endif
3202 /* release STREAM_LOCK not to block upstream
3203 * while pushing buffer downstream */
3204 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3205 ret = gst_pad_push (decoder->srcpad, buf);
3206 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3207
3208 done:
3209 return ret;
3210 }
3211
3212 /**
3213 * gst_video_decoder_add_to_frame:
3214 * @decoder: a #GstVideoDecoder
3215 * @n_bytes: the number of bytes to add
3216 *
3217 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3218 */
3219 void
gst_video_decoder_add_to_frame(GstVideoDecoder * decoder,int n_bytes)3220 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3221 {
3222 GstVideoDecoderPrivate *priv = decoder->priv;
3223 GstBuffer *buf;
3224
3225 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3226
3227 if (n_bytes == 0)
3228 return;
3229
3230 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3231 if (gst_adapter_available (priv->output_adapter) == 0) {
3232 priv->frame_offset =
3233 priv->input_offset - gst_adapter_available (priv->input_adapter);
3234 }
3235 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3236
3237 gst_adapter_push (priv->output_adapter, buf);
3238 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3239 }
3240
3241 /**
3242 * gst_video_decoder_get_pending_frame_size:
3243 * @decoder: a #GstVideoDecoder
3244 *
3245 * Returns the number of bytes previously added to the current frame
3246 * by calling gst_video_decoder_add_to_frame().
3247 *
3248 * Returns: The number of bytes pending for the current frame
3249 *
3250 * Since: 1.4
3251 */
3252 gsize
gst_video_decoder_get_pending_frame_size(GstVideoDecoder * decoder)3253 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3254 {
3255 GstVideoDecoderPrivate *priv = decoder->priv;
3256 gsize ret;
3257
3258 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3259 ret = gst_adapter_available (priv->output_adapter);
3260 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3261
3262 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3263 ret);
3264
3265 return ret;
3266 }
3267
3268 static guint64
gst_video_decoder_get_frame_duration(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3269 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3270 GstVideoCodecFrame * frame)
3271 {
3272 GstVideoCodecState *state = decoder->priv->output_state;
3273
3274 /* it's possible that we don't have a state yet when we are dropping the
3275 * initial buffers */
3276 if (state == NULL)
3277 return GST_CLOCK_TIME_NONE;
3278
3279 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3280 return GST_CLOCK_TIME_NONE;
3281 }
3282
3283 /* FIXME: For interlaced frames this needs to take into account
3284 * the number of valid fields in the frame
3285 */
3286
3287 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3288 state->info.fps_n);
3289 }
3290
3291 /**
3292 * gst_video_decoder_have_frame:
3293 * @decoder: a #GstVideoDecoder
3294 *
3295 * Gathers all data collected for currently parsed frame, gathers corresponding
3296 * metadata and passes it along for further processing, i.e. @handle_frame.
3297 *
3298 * Returns: a #GstFlowReturn
3299 */
3300 GstFlowReturn
gst_video_decoder_have_frame(GstVideoDecoder * decoder)3301 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3302 {
3303 GstVideoDecoderPrivate *priv = decoder->priv;
3304 GstBuffer *buffer;
3305 int n_available;
3306 GstClockTime pts, dts, duration;
3307 guint flags;
3308 GstFlowReturn ret = GST_FLOW_OK;
3309
3310 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3311 priv->frame_offset);
3312
3313 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3314
3315 n_available = gst_adapter_available (priv->output_adapter);
3316 if (n_available) {
3317 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3318 } else {
3319 buffer = gst_buffer_new_and_alloc (0);
3320 }
3321
3322 priv->current_frame->input_buffer = buffer;
3323
3324 gst_video_decoder_get_buffer_info_at_offset (decoder,
3325 priv->frame_offset, &pts, &dts, &duration, &flags);
3326
3327 GST_BUFFER_PTS (buffer) = pts;
3328 GST_BUFFER_DTS (buffer) = dts;
3329 GST_BUFFER_DURATION (buffer) = duration;
3330 GST_BUFFER_FLAGS (buffer) = flags;
3331
3332 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3333 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3334 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3335 GST_TIME_ARGS (duration));
3336
3337 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3338 GST_LOG_OBJECT (decoder, "Marking as sync point");
3339 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3340 }
3341
3342 /* In reverse playback, just capture and queue frames for later processing */
3343 if (decoder->input_segment.rate < 0.0) {
3344 priv->parse_gather =
3345 g_list_prepend (priv->parse_gather, priv->current_frame);
3346 } else {
3347 /* Otherwise, decode the frame, which gives away our ref */
3348 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
3349 }
3350 /* Current frame is gone now, either way */
3351 priv->current_frame = NULL;
3352
3353 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3354
3355 return ret;
3356 }
3357
3358 /* Pass the frame in priv->current_frame through the
3359 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3360 * or dropping by passing to gvd_drop_frame() */
3361 static GstFlowReturn
gst_video_decoder_decode_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3362 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3363 GstVideoCodecFrame * frame)
3364 {
3365 GstVideoDecoderPrivate *priv = decoder->priv;
3366 GstVideoDecoderClass *decoder_class;
3367 GstFlowReturn ret = GST_FLOW_OK;
3368
3369 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3370
3371 /* FIXME : This should only have to be checked once (either the subclass has an
3372 * implementation, or it doesn't) */
3373 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3374
3375 frame->distance_from_sync = priv->distance_from_sync;
3376 priv->distance_from_sync++;
3377 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3378 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3379 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3380
3381 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3382 * durations. */
3383 /* FIXME upstream can be quite wrong about the keyframe aspect,
3384 * so we could be going off here as well,
3385 * maybe let subclass decide if it really is/was a keyframe */
3386 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame) &&
3387 GST_CLOCK_TIME_IS_VALID (frame->pts)
3388 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3389 /* just in case they are not equal as might ideally be,
3390 * e.g. quicktime has a (positive) delta approach */
3391 priv->pts_delta = frame->pts - frame->dts;
3392 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3393 (gint) (priv->pts_delta / GST_MSECOND));
3394 }
3395
3396 frame->abidata.ABI.ts = frame->dts;
3397 frame->abidata.ABI.ts2 = frame->pts;
3398
3399 GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
3400 ", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
3401 frame->distance_from_sync);
3402
3403 gst_video_codec_frame_ref (frame);
3404 priv->frames = g_list_append (priv->frames, frame);
3405
3406 if (g_list_length (priv->frames) > 10) {
3407 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
3408 "possible internal leaking?", g_list_length (priv->frames));
3409 }
3410
3411 frame->deadline =
3412 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3413 frame->pts);
3414
3415 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
3416 if (!priv->has_recv_first_key_frame) {
3417 priv->has_recv_first_key_frame = TRUE;
3418 GST_WARNING_OBJECT (decoder, "KPI-TRACE: FIRST-VIDEO-FRAME videodecoder recv first key frame");
3419 }
3420 #endif
3421 /* do something with frame */
3422 ret = decoder_class->handle_frame (decoder, frame);
3423 if (ret != GST_FLOW_OK)
3424 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
3425
3426 /* the frame has either been added to parse_gather or sent to
3427 handle frame so there is no need to unref it */
3428 return ret;
3429 }
3430
3431
3432 /**
3433 * gst_video_decoder_get_output_state:
3434 * @decoder: a #GstVideoDecoder
3435 *
3436 * Get the #GstVideoCodecState currently describing the output stream.
3437 *
3438 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
3439 */
3440 GstVideoCodecState *
gst_video_decoder_get_output_state(GstVideoDecoder * decoder)3441 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
3442 {
3443 GstVideoCodecState *state = NULL;
3444
3445 GST_OBJECT_LOCK (decoder);
3446 if (decoder->priv->output_state)
3447 state = gst_video_codec_state_ref (decoder->priv->output_state);
3448 GST_OBJECT_UNLOCK (decoder);
3449
3450 return state;
3451 }
3452
3453 /**
3454 * gst_video_decoder_set_output_state:
3455 * @decoder: a #GstVideoDecoder
3456 * @fmt: a #GstVideoFormat
3457 * @width: The width in pixels
3458 * @height: The height in pixels
3459 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3460 *
3461 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
3462 * as the output state for the decoder.
3463 * Any previously set output state on @decoder will be replaced by the newly
3464 * created one.
3465 *
3466 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
3467 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
3468 * @reference.
3469 *
3470 * If the subclass wishes to override some fields from the output state (like
3471 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
3472 *
3473 * The new output state will only take effect (set on pads and buffers) starting
3474 * from the next call to #gst_video_decoder_finish_frame().
3475 *
3476 * Returns: (transfer full): the newly configured output state.
3477 */
3478 GstVideoCodecState *
gst_video_decoder_set_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,guint width,guint height,GstVideoCodecState * reference)3479 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
3480 GstVideoFormat fmt, guint width, guint height,
3481 GstVideoCodecState * reference)
3482 {
3483 return gst_video_decoder_set_interlaced_output_state (decoder, fmt,
3484 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference);
3485 }
3486
3487 /**
3488 * gst_video_decoder_set_interlaced_output_state:
3489 * @decoder: a #GstVideoDecoder
3490 * @fmt: a #GstVideoFormat
3491 * @width: The width in pixels
3492 * @height: The height in pixels
3493 * @mode: A #GstVideoInterlaceMode
3494 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3495 *
3496 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
3497 * the interlacing mode.
3498 *
3499 * Returns: (transfer full): the newly configured output state.
3500 *
3501 * Since: 1.16.
3502 */
3503 GstVideoCodecState *
gst_video_decoder_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode mode,guint width,guint height,GstVideoCodecState * reference)3504 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
3505 GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width, guint height,
3506 GstVideoCodecState * reference)
3507 {
3508 GstVideoDecoderPrivate *priv = decoder->priv;
3509 GstVideoCodecState *state;
3510
3511 GST_DEBUG_OBJECT (decoder, "fmt:%d, width:%d, height:%d, reference:%p",
3512 fmt, width, height, reference);
3513
3514 /* Create the new output state */
3515 state = _new_output_state (fmt, mode, width, height, reference);
3516 if (!state)
3517 return NULL;
3518
3519 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3520
3521 GST_OBJECT_LOCK (decoder);
3522 /* Replace existing output state by new one */
3523 if (priv->output_state)
3524 gst_video_codec_state_unref (priv->output_state);
3525 priv->output_state = gst_video_codec_state_ref (state);
3526
3527 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
3528 priv->qos_frame_duration =
3529 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
3530 priv->output_state->info.fps_n);
3531 } else {
3532 priv->qos_frame_duration = 0;
3533 }
3534 priv->output_state_changed = TRUE;
3535 GST_OBJECT_UNLOCK (decoder);
3536
3537 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3538
3539 return state;
3540 }
3541
3542
3543 /**
3544 * gst_video_decoder_get_oldest_frame:
3545 * @decoder: a #GstVideoDecoder
3546 *
3547 * Get the oldest pending unfinished #GstVideoCodecFrame
3548 *
3549 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
3550 */
3551 GstVideoCodecFrame *
gst_video_decoder_get_oldest_frame(GstVideoDecoder * decoder)3552 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
3553 {
3554 GstVideoCodecFrame *frame = NULL;
3555
3556 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3557 if (decoder->priv->frames)
3558 frame = gst_video_codec_frame_ref (decoder->priv->frames->data);
3559 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3560
3561 return (GstVideoCodecFrame *) frame;
3562 }
3563
3564 /**
3565 * gst_video_decoder_get_frame:
3566 * @decoder: a #GstVideoDecoder
3567 * @frame_number: system_frame_number of a frame
3568 *
3569 * Get a pending unfinished #GstVideoCodecFrame
3570 *
3571 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
3572 */
3573 GstVideoCodecFrame *
gst_video_decoder_get_frame(GstVideoDecoder * decoder,int frame_number)3574 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
3575 {
3576 GList *g;
3577 GstVideoCodecFrame *frame = NULL;
3578
3579 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
3580
3581 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3582 for (g = decoder->priv->frames; g; g = g->next) {
3583 GstVideoCodecFrame *tmp = g->data;
3584
3585 if (tmp->system_frame_number == frame_number) {
3586 frame = gst_video_codec_frame_ref (tmp);
3587 break;
3588 }
3589 }
3590 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3591
3592 return frame;
3593 }
3594
3595 /**
3596 * gst_video_decoder_get_frames:
3597 * @decoder: a #GstVideoDecoder
3598 *
3599 * Get all pending unfinished #GstVideoCodecFrame
3600 *
3601 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
3602 */
3603 GList *
gst_video_decoder_get_frames(GstVideoDecoder * decoder)3604 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
3605 {
3606 GList *frames;
3607
3608 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3609 frames = g_list_copy (decoder->priv->frames);
3610 g_list_foreach (frames, (GFunc) gst_video_codec_frame_ref, NULL);
3611 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3612
3613 return frames;
3614 }
3615
3616 static gboolean
gst_video_decoder_decide_allocation_default(GstVideoDecoder * decoder,GstQuery * query)3617 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
3618 GstQuery * query)
3619 {
3620 GstCaps *outcaps = NULL;
3621 GstBufferPool *pool = NULL;
3622 guint size, min, max;
3623 GstAllocator *allocator = NULL;
3624 GstAllocationParams params;
3625 GstStructure *config;
3626 gboolean update_pool, update_allocator;
3627 GstVideoInfo vinfo;
3628
3629 gst_query_parse_allocation (query, &outcaps, NULL);
3630 gst_video_info_init (&vinfo);
3631 if (outcaps)
3632 gst_video_info_from_caps (&vinfo, outcaps);
3633
3634 /* we got configuration from our peer or the decide_allocation method,
3635 * parse them */
3636 if (gst_query_get_n_allocation_params (query) > 0) {
3637 /* try the allocator */
3638 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3639 update_allocator = TRUE;
3640 } else {
3641 allocator = NULL;
3642 gst_allocation_params_init (¶ms);
3643 update_allocator = FALSE;
3644 }
3645
3646 if (gst_query_get_n_allocation_pools (query) > 0) {
3647 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
3648 size = MAX (size, vinfo.size);
3649 update_pool = TRUE;
3650 } else {
3651 pool = NULL;
3652 size = vinfo.size;
3653 min = max = 0;
3654
3655 update_pool = FALSE;
3656 }
3657
3658 if (pool == NULL) {
3659 /* no pool, we can make our own */
3660 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
3661 pool = gst_video_buffer_pool_new ();
3662 }
3663
3664 /* now configure */
3665 config = gst_buffer_pool_get_config (pool);
3666 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3667 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3668
3669 GST_DEBUG_OBJECT (decoder,
3670 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
3671 pool);
3672 if (!gst_buffer_pool_set_config (pool, config)) {
3673 config = gst_buffer_pool_get_config (pool);
3674
3675 /* If change are not acceptable, fallback to generic pool */
3676 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
3677 max)) {
3678 GST_DEBUG_OBJECT (decoder, "unsuported pool, making new pool");
3679
3680 gst_object_unref (pool);
3681 pool = gst_video_buffer_pool_new ();
3682 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3683 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3684 }
3685
3686 if (!gst_buffer_pool_set_config (pool, config))
3687 goto config_failed;
3688 }
3689
3690 if (update_allocator)
3691 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
3692 else
3693 gst_query_add_allocation_param (query, allocator, ¶ms);
3694 if (allocator)
3695 gst_object_unref (allocator);
3696
3697 if (update_pool)
3698 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
3699 else
3700 gst_query_add_allocation_pool (query, pool, size, min, max);
3701
3702 if (pool)
3703 gst_object_unref (pool);
3704
3705 return TRUE;
3706
3707 config_failed:
3708 if (allocator)
3709 gst_object_unref (allocator);
3710 if (pool)
3711 gst_object_unref (pool);
3712 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
3713 ("Failed to configure the buffer pool"),
3714 ("Configuration is most likely invalid, please report this issue."));
3715 return FALSE;
3716 }
3717
3718 static gboolean
gst_video_decoder_propose_allocation_default(GstVideoDecoder * decoder,GstQuery * query)3719 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
3720 GstQuery * query)
3721 {
3722 return TRUE;
3723 }
3724
3725 static gboolean
gst_video_decoder_negotiate_pool(GstVideoDecoder * decoder,GstCaps * caps)3726 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
3727 {
3728 GstVideoDecoderClass *klass;
3729 GstQuery *query = NULL;
3730 GstBufferPool *pool = NULL;
3731 GstAllocator *allocator;
3732 GstAllocationParams params;
3733 gboolean ret = TRUE;
3734
3735 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3736
3737 query = gst_query_new_allocation (caps, TRUE);
3738
3739 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
3740
3741 if (!gst_pad_peer_query (decoder->srcpad, query)) {
3742 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
3743 }
3744
3745 g_assert (klass->decide_allocation != NULL);
3746 ret = klass->decide_allocation (decoder, query);
3747
3748 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
3749 query);
3750
3751 if (!ret)
3752 goto no_decide_allocation;
3753
3754 /* we got configuration from our peer or the decide_allocation method,
3755 * parse them */
3756 if (gst_query_get_n_allocation_params (query) > 0) {
3757 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3758 } else {
3759 allocator = NULL;
3760 gst_allocation_params_init (¶ms);
3761 }
3762
3763 if (gst_query_get_n_allocation_pools (query) > 0)
3764 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
3765 if (!pool) {
3766 if (allocator)
3767 gst_object_unref (allocator);
3768 ret = FALSE;
3769 goto no_decide_allocation;
3770 }
3771
3772 if (decoder->priv->allocator)
3773 gst_object_unref (decoder->priv->allocator);
3774 decoder->priv->allocator = allocator;
3775 decoder->priv->params = params;
3776
3777 if (decoder->priv->pool) {
3778 /* do not set the bufferpool to inactive here, it will be done
3779 * on its finalize function. As videodecoder do late renegotiation
3780 * it might happen that some element downstream is already using this
3781 * same bufferpool and deactivating it will make it fail.
3782 * Happens when a downstream element changes from passthrough to
3783 * non-passthrough and gets this same bufferpool to use */
3784 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
3785 decoder->priv->pool);
3786 gst_object_unref (decoder->priv->pool);
3787 }
3788 decoder->priv->pool = pool;
3789
3790 /* and activate */
3791 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
3792 gst_buffer_pool_set_active (pool, TRUE);
3793
3794 done:
3795 if (query)
3796 gst_query_unref (query);
3797
3798 return ret;
3799
3800 /* Errors */
3801 no_decide_allocation:
3802 {
3803 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
3804 goto done;
3805 }
3806 }
3807
3808 static gboolean
gst_video_decoder_negotiate_default(GstVideoDecoder * decoder)3809 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
3810 {
3811 GstVideoCodecState *state = decoder->priv->output_state;
3812 gboolean ret = TRUE;
3813 GstVideoCodecFrame *frame;
3814 GstCaps *prevcaps;
3815
3816 if (!state) {
3817 GST_DEBUG_OBJECT (decoder,
3818 "Trying to negotiate the pool with out setting the o/p format");
3819 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
3820 goto done;
3821 }
3822
3823 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
3824 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
3825
3826 /* If the base class didn't set any multiview params, assume mono
3827 * now */
3828 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
3829 GST_VIDEO_MULTIVIEW_MODE_NONE) {
3830 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
3831 GST_VIDEO_MULTIVIEW_MODE_MONO;
3832 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
3833 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
3834 }
3835
3836 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
3837 state->info.par_n, state->info.par_d,
3838 state->info.fps_n, state->info.fps_d);
3839
3840 if (state->caps == NULL)
3841 state->caps = gst_video_info_to_caps (&state->info);
3842 if (state->allocation_caps == NULL)
3843 state->allocation_caps = gst_caps_ref (state->caps);
3844
3845 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
3846
3847 /* Push all pending pre-caps events of the oldest frame before
3848 * setting caps */
3849 frame = decoder->priv->frames ? decoder->priv->frames->data : NULL;
3850 if (frame || decoder->priv->current_frame_events) {
3851 GList **events, *l;
3852
3853 if (frame) {
3854 events = &frame->events;
3855 } else {
3856 events = &decoder->priv->current_frame_events;
3857 }
3858
3859 for (l = g_list_last (*events); l;) {
3860 GstEvent *event = GST_EVENT (l->data);
3861 GList *tmp;
3862
3863 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
3864 gst_video_decoder_push_event (decoder, event);
3865 tmp = l;
3866 l = l->prev;
3867 *events = g_list_delete_link (*events, tmp);
3868 } else {
3869 l = l->prev;
3870 }
3871 }
3872 }
3873
3874 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
3875 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
3876 if (!prevcaps) {
3877 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
3878 }
3879 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
3880 } else {
3881 ret = TRUE;
3882 GST_DEBUG_OBJECT (decoder,
3883 "current src pad and output state caps are the same");
3884 }
3885 if (prevcaps)
3886 gst_caps_unref (prevcaps);
3887
3888 if (!ret)
3889 goto done;
3890 decoder->priv->output_state_changed = FALSE;
3891 /* Negotiate pool */
3892 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
3893
3894 done:
3895 return ret;
3896 }
3897
3898 static gboolean
gst_video_decoder_negotiate_unlocked(GstVideoDecoder * decoder)3899 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
3900 {
3901 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3902 gboolean ret = TRUE;
3903
3904 if (G_LIKELY (klass->negotiate))
3905 ret = klass->negotiate (decoder);
3906
3907 return ret;
3908 }
3909
3910 /**
3911 * gst_video_decoder_negotiate:
3912 * @decoder: a #GstVideoDecoder
3913 *
3914 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
3915 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
3916 * negotiate fails.
3917 *
3918 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
3919 */
3920 gboolean
gst_video_decoder_negotiate(GstVideoDecoder * decoder)3921 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
3922 {
3923 GstVideoDecoderClass *klass;
3924 gboolean ret = TRUE;
3925
3926 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
3927
3928 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3929
3930 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3931 gst_pad_check_reconfigure (decoder->srcpad);
3932 if (klass->negotiate) {
3933 ret = klass->negotiate (decoder);
3934 if (!ret)
3935 gst_pad_mark_reconfigure (decoder->srcpad);
3936 }
3937 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3938
3939 return ret;
3940 }
3941
3942 /**
3943 * gst_video_decoder_allocate_output_buffer:
3944 * @decoder: a #GstVideoDecoder
3945 *
3946 * Helper function that allocates a buffer to hold a video frame for @decoder's
3947 * current #GstVideoCodecState.
3948 *
3949 * You should use gst_video_decoder_allocate_output_frame() instead of this
3950 * function, if possible at all.
3951 *
3952 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
3953 * allocated (e.g. when downstream is flushing or shutting down)
3954 */
3955 GstBuffer *
gst_video_decoder_allocate_output_buffer(GstVideoDecoder * decoder)3956 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
3957 {
3958 GstFlowReturn flow;
3959 GstBuffer *buffer = NULL;
3960 gboolean needs_reconfigure = FALSE;
3961
3962 GST_DEBUG ("alloc src buffer");
3963
3964 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3965 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3966 if (G_UNLIKELY (!decoder->priv->output_state
3967 || decoder->priv->output_state_changed || needs_reconfigure)) {
3968 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3969 if (decoder->priv->output_state) {
3970 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
3971 gst_pad_mark_reconfigure (decoder->srcpad);
3972 goto fallback;
3973 } else {
3974 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
3975 goto failed_allocation;
3976 }
3977 }
3978 }
3979
3980 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
3981
3982 if (flow != GST_FLOW_OK) {
3983 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
3984 gst_flow_get_name (flow));
3985 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
3986 goto fallback;
3987 else
3988 goto failed_allocation;
3989 }
3990 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3991
3992 return buffer;
3993
3994 fallback:
3995 GST_INFO_OBJECT (decoder,
3996 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
3997 buffer =
3998 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
3999 NULL);
4000
4001 failed_allocation:
4002 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4003 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4004
4005 return buffer;
4006 }
4007
4008 /**
4009 * gst_video_decoder_allocate_output_frame:
4010 * @decoder: a #GstVideoDecoder
4011 * @frame: a #GstVideoCodecFrame
4012 *
4013 * Helper function that allocates a buffer to hold a video frame for @decoder's
4014 * current #GstVideoCodecState. Subclass should already have configured video
4015 * state and set src pad caps.
4016 *
4017 * The buffer allocated here is owned by the frame and you should only
4018 * keep references to the frame, not the buffer.
4019 *
4020 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4021 */
4022 GstFlowReturn
gst_video_decoder_allocate_output_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4023 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4024 decoder, GstVideoCodecFrame * frame)
4025 {
4026 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4027 NULL);
4028 }
4029
4030 /**
4031 * gst_video_decoder_allocate_output_frame_with_params:
4032 * @decoder: a #GstVideoDecoder
4033 * @frame: a #GstVideoCodecFrame
4034 * @params: a #GstBufferPoolAcquireParams
4035 *
4036 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4037 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4038 *
4039 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4040 *
4041 * Since: 1.12
4042 */
4043 GstFlowReturn
gst_video_decoder_allocate_output_frame_with_params(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBufferPoolAcquireParams * params)4044 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4045 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4046 {
4047 GstFlowReturn flow_ret;
4048 GstVideoCodecState *state;
4049 int num_bytes;
4050 gboolean needs_reconfigure = FALSE;
4051
4052 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4053 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4054
4055 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4056
4057 state = decoder->priv->output_state;
4058 if (state == NULL) {
4059 g_warning ("Output state should be set before allocating frame");
4060 goto error;
4061 }
4062 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4063 if (num_bytes == 0) {
4064 g_warning ("Frame size should not be 0");
4065 goto error;
4066 }
4067
4068 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4069 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4070 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4071 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4072 gst_pad_mark_reconfigure (decoder->srcpad);
4073 }
4074 }
4075
4076 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4077
4078 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4079 &frame->output_buffer, params);
4080
4081 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4082
4083 return flow_ret;
4084
4085 error:
4086 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4087 return GST_FLOW_ERROR;
4088 }
4089
4090 /**
4091 * gst_video_decoder_get_max_decode_time:
4092 * @decoder: a #GstVideoDecoder
4093 * @frame: a #GstVideoCodecFrame
4094 *
4095 * Determines maximum possible decoding time for @frame that will
4096 * allow it to decode and arrive in time (as determined by QoS events).
4097 * In particular, a negative result means decoding in time is no longer possible
4098 * and should therefore occur as soon/skippy as possible.
4099 *
4100 * Returns: max decoding time.
4101 */
4102 GstClockTimeDiff
gst_video_decoder_get_max_decode_time(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4103 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4104 decoder, GstVideoCodecFrame * frame)
4105 {
4106 GstClockTimeDiff deadline;
4107 GstClockTime earliest_time;
4108
4109 GST_OBJECT_LOCK (decoder);
4110 earliest_time = decoder->priv->earliest_time;
4111 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4112 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4113 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4114 else
4115 deadline = G_MAXINT64;
4116
4117 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4118 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4119 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4120 GST_STIME_ARGS (deadline));
4121
4122 GST_OBJECT_UNLOCK (decoder);
4123
4124 return deadline;
4125 }
4126
4127 /**
4128 * gst_video_decoder_get_qos_proportion:
4129 * @decoder: a #GstVideoDecoder
4130 * current QoS proportion, or %NULL
4131 *
4132 * Returns: The current QoS proportion.
4133 *
4134 * Since: 1.0.3
4135 */
4136 gdouble
gst_video_decoder_get_qos_proportion(GstVideoDecoder * decoder)4137 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4138 {
4139 gdouble proportion;
4140
4141 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4142
4143 GST_OBJECT_LOCK (decoder);
4144 proportion = decoder->priv->proportion;
4145 GST_OBJECT_UNLOCK (decoder);
4146
4147 return proportion;
4148 }
4149
4150 GstFlowReturn
_gst_video_decoder_error(GstVideoDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)4151 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4152 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4153 const gchar * function, gint line)
4154 {
4155 if (txt)
4156 GST_WARNING_OBJECT (dec, "error: %s", txt);
4157 if (dbg)
4158 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4159 dec->priv->error_count += weight;
4160 dec->priv->discont = TRUE;
4161 if (dec->priv->max_errors >= 0 &&
4162 dec->priv->error_count > dec->priv->max_errors) {
4163 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4164 domain, code, txt, dbg, file, function, line);
4165 return GST_FLOW_ERROR;
4166 } else {
4167 g_free (txt);
4168 g_free (dbg);
4169 return GST_FLOW_OK;
4170 }
4171 }
4172
4173 /**
4174 * gst_video_decoder_set_max_errors:
4175 * @dec: a #GstVideoDecoder
4176 * @num: max tolerated errors
4177 *
4178 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4179 * warned about, but more than tolerated will lead to fatal error. You can set
4180 * -1 for never returning fatal errors. Default is set to
4181 * GST_VIDEO_DECODER_MAX_ERRORS.
4182 *
4183 * The '-1' option was added in 1.4
4184 */
4185 void
gst_video_decoder_set_max_errors(GstVideoDecoder * dec,gint num)4186 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4187 {
4188 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4189
4190 dec->priv->max_errors = num;
4191 }
4192
4193 /**
4194 * gst_video_decoder_get_max_errors:
4195 * @dec: a #GstVideoDecoder
4196 *
4197 * Returns: currently configured decoder tolerated error count.
4198 */
4199 gint
gst_video_decoder_get_max_errors(GstVideoDecoder * dec)4200 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4201 {
4202 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4203
4204 return dec->priv->max_errors;
4205 }
4206
4207 /**
4208 * gst_video_decoder_set_needs_format:
4209 * @dec: a #GstVideoDecoder
4210 * @enabled: new state
4211 *
4212 * Configures decoder format needs. If enabled, subclass needs to be
4213 * negotiated with format caps before it can process any data. It will then
4214 * never be handed any data before it has been configured.
4215 * Otherwise, it might be handed data without having been configured and
4216 * is then expected being able to do so either by default
4217 * or based on the input data.
4218 *
4219 * Since: 1.4
4220 */
4221 void
gst_video_decoder_set_needs_format(GstVideoDecoder * dec,gboolean enabled)4222 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4223 {
4224 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4225
4226 dec->priv->needs_format = enabled;
4227 }
4228
4229 /**
4230 * gst_video_decoder_get_needs_format:
4231 * @dec: a #GstVideoDecoder
4232 *
4233 * Queries decoder required format handling.
4234 *
4235 * Returns: %TRUE if required format handling is enabled.
4236 *
4237 * Since: 1.4
4238 */
4239 gboolean
gst_video_decoder_get_needs_format(GstVideoDecoder * dec)4240 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4241 {
4242 gboolean result;
4243
4244 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4245
4246 result = dec->priv->needs_format;
4247
4248 return result;
4249 }
4250
4251 /**
4252 * gst_video_decoder_set_packetized:
4253 * @decoder: a #GstVideoDecoder
4254 * @packetized: whether the input data should be considered as packetized.
4255 *
4256 * Allows baseclass to consider input data as packetized or not. If the
4257 * input is packetized, then the @parse method will not be called.
4258 */
4259 void
gst_video_decoder_set_packetized(GstVideoDecoder * decoder,gboolean packetized)4260 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
4261 gboolean packetized)
4262 {
4263 decoder->priv->packetized = packetized;
4264 }
4265
4266 /**
4267 * gst_video_decoder_get_packetized:
4268 * @decoder: a #GstVideoDecoder
4269 *
4270 * Queries whether input data is considered packetized or not by the
4271 * base class.
4272 *
4273 * Returns: TRUE if input data is considered packetized.
4274 */
4275 gboolean
gst_video_decoder_get_packetized(GstVideoDecoder * decoder)4276 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
4277 {
4278 return decoder->priv->packetized;
4279 }
4280
4281 /**
4282 * gst_video_decoder_set_estimate_rate:
4283 * @dec: a #GstVideoDecoder
4284 * @enabled: whether to enable byte to time conversion
4285 *
4286 * Allows baseclass to perform byte to time estimated conversion.
4287 */
4288 void
gst_video_decoder_set_estimate_rate(GstVideoDecoder * dec,gboolean enabled)4289 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
4290 {
4291 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4292
4293 dec->priv->do_estimate_rate = enabled;
4294 }
4295
4296 /**
4297 * gst_video_decoder_get_estimate_rate:
4298 * @dec: a #GstVideoDecoder
4299 *
4300 * Returns: currently configured byte to time conversion setting
4301 */
4302 gboolean
gst_video_decoder_get_estimate_rate(GstVideoDecoder * dec)4303 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
4304 {
4305 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4306
4307 return dec->priv->do_estimate_rate;
4308 }
4309
4310 /**
4311 * gst_video_decoder_set_latency:
4312 * @decoder: a #GstVideoDecoder
4313 * @min_latency: minimum latency
4314 * @max_latency: maximum latency
4315 *
4316 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
4317 * latency is. Will also post a LATENCY message on the bus so the pipeline
4318 * can reconfigure its global latency.
4319 */
4320 void
gst_video_decoder_set_latency(GstVideoDecoder * decoder,GstClockTime min_latency,GstClockTime max_latency)4321 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
4322 GstClockTime min_latency, GstClockTime max_latency)
4323 {
4324 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
4325 g_return_if_fail (max_latency >= min_latency);
4326
4327 GST_OBJECT_LOCK (decoder);
4328 decoder->priv->min_latency = min_latency;
4329 decoder->priv->max_latency = max_latency;
4330 GST_OBJECT_UNLOCK (decoder);
4331
4332 gst_element_post_message (GST_ELEMENT_CAST (decoder),
4333 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
4334 }
4335
4336 /**
4337 * gst_video_decoder_get_latency:
4338 * @decoder: a #GstVideoDecoder
4339 * @min_latency: (out) (allow-none): address of variable in which to store the
4340 * configured minimum latency, or %NULL
4341 * @max_latency: (out) (allow-none): address of variable in which to store the
4342 * configured mximum latency, or %NULL
4343 *
4344 * Query the configured decoder latency. Results will be returned via
4345 * @min_latency and @max_latency.
4346 */
4347 void
gst_video_decoder_get_latency(GstVideoDecoder * decoder,GstClockTime * min_latency,GstClockTime * max_latency)4348 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
4349 GstClockTime * min_latency, GstClockTime * max_latency)
4350 {
4351 GST_OBJECT_LOCK (decoder);
4352 if (min_latency)
4353 *min_latency = decoder->priv->min_latency;
4354 if (max_latency)
4355 *max_latency = decoder->priv->max_latency;
4356 GST_OBJECT_UNLOCK (decoder);
4357 }
4358
4359 /**
4360 * gst_video_decoder_merge_tags:
4361 * @decoder: a #GstVideoDecoder
4362 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
4363 * previously-set tags
4364 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
4365 *
4366 * Sets the audio decoder tags and how they should be merged with any
4367 * upstream stream tags. This will override any tags previously-set
4368 * with gst_audio_decoder_merge_tags().
4369 *
4370 * Note that this is provided for convenience, and the subclass is
4371 * not required to use this and can still do tag handling on its own.
4372 *
4373 * MT safe.
4374 */
4375 void
gst_video_decoder_merge_tags(GstVideoDecoder * decoder,const GstTagList * tags,GstTagMergeMode mode)4376 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
4377 const GstTagList * tags, GstTagMergeMode mode)
4378 {
4379 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4380 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
4381 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
4382
4383 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4384 if (decoder->priv->tags != tags) {
4385 if (decoder->priv->tags) {
4386 gst_tag_list_unref (decoder->priv->tags);
4387 decoder->priv->tags = NULL;
4388 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
4389 }
4390 if (tags) {
4391 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
4392 decoder->priv->tags_merge_mode = mode;
4393 }
4394
4395 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
4396 decoder->priv->tags_changed = TRUE;
4397 }
4398 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4399 }
4400
4401 /**
4402 * gst_video_decoder_get_buffer_pool:
4403 * @decoder: a #GstVideoDecoder
4404 *
4405 * Returns: (transfer full): the instance of the #GstBufferPool used
4406 * by the decoder; free it after use it
4407 */
4408 GstBufferPool *
gst_video_decoder_get_buffer_pool(GstVideoDecoder * decoder)4409 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
4410 {
4411 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
4412
4413 if (decoder->priv->pool)
4414 return gst_object_ref (decoder->priv->pool);
4415
4416 return NULL;
4417 }
4418
4419 /**
4420 * gst_video_decoder_get_allocator:
4421 * @decoder: a #GstVideoDecoder
4422 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
4423 * used
4424 * @params: (out) (allow-none) (transfer full): the
4425 * #GstAllocationParams of @allocator
4426 *
4427 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
4428 * used by the base class and its @params.
4429 *
4430 * Unref the @allocator after use it.
4431 */
4432 void
gst_video_decoder_get_allocator(GstVideoDecoder * decoder,GstAllocator ** allocator,GstAllocationParams * params)4433 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
4434 GstAllocator ** allocator, GstAllocationParams * params)
4435 {
4436 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4437
4438 if (allocator)
4439 *allocator = decoder->priv->allocator ?
4440 gst_object_ref (decoder->priv->allocator) : NULL;
4441
4442 if (params)
4443 *params = decoder->priv->params;
4444 }
4445
4446 /**
4447 * gst_video_decoder_set_use_default_pad_acceptcaps:
4448 * @decoder: a #GstVideoDecoder
4449 * @use: if the default pad accept-caps query handling should be used
4450 *
4451 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
4452 * to use the default pad query handler to reply to accept-caps queries.
4453 *
4454 * By setting this to true it is possible to further customize the default
4455 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
4456 * %GST_PAD_SET_ACCEPT_TEMPLATE
4457 *
4458 * Since: 1.6
4459 */
4460 void
gst_video_decoder_set_use_default_pad_acceptcaps(GstVideoDecoder * decoder,gboolean use)4461 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
4462 gboolean use)
4463 {
4464 decoder->priv->use_default_pad_acceptcaps = use;
4465 }
4466