1 /* GStreamer
2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25 /**
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
29 *
30 * This base class is for video decoders turning encoded data into raw video
31 * frames.
32 *
33 * The GstVideoDecoder base class and derived subclasses should cooperate as
34 * follows:
35 *
36 * ## Configuration
37 *
38 * * Initially, GstVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
40 *
41 * * GstVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
46 *
47 * * Incoming data buffers are processed as needed, described in Data
48 * Processing below.
49 *
50 * * GstVideoDecoder calls @stop at end of all processing.
51 *
52 * ## Data processing
53 *
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
57 *
58 * * Each input frame is provided in turn to the subclass' @handle_frame
59 * callback.
60 * * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
61 * the base class will provide to the subclass the same input frame with
62 * different input buffers to the subclass @handle_frame
63 * callback. During this call, the subclass needs to take
64 * ownership of the input_buffer as @GstVideoCodecFrame.input_buffer
65 * will have been changed before the next subframe buffer is received.
66 * The subclass will call `gst_video_decoder_have_last_subframe`
67 * when a new input frame can be created by the base class.
68 * Every subframe will share the same @GstVideoCodecFrame.output_buffer
69 * to write the decoding result. The subclass is responsible to protect
70 * its access.
71 *
72 * * If codec processing results in decoded data, the subclass should call
73 * @gst_video_decoder_finish_frame to have decoded data pushed
74 * downstream. In subframe mode
75 * the subclass should call @gst_video_decoder_finish_subframe until the
76 * last subframe where it should call @gst_video_decoder_finish_frame.
77 * The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
78 * on buffers or using its own logic to collect the subframes.
79 * In case of decoding failure, the subclass must call
80 * @gst_video_decoder_drop_frame or @gst_video_decoder_drop_subframe,
81 * to allow the base class to do timestamp and offset tracking, and possibly
82 * to requeue the frame for a later attempt in the case of reverse playback.
83 *
84 * ## Shutdown phase
85 *
86 * * The GstVideoDecoder class calls @stop to inform the subclass that data
87 * parsing will be stopped.
88 *
89 * ## Additional Notes
90 *
91 * * Seeking/Flushing
92 *
93 * * When the pipeline is seeked or otherwise flushed, the subclass is
94 * informed via a call to its @reset callback, with the hard parameter
95 * set to true. This indicates the subclass should drop any internal data
96 * queues and timestamps and prepare for a fresh set of buffers to arrive
97 * for parsing and decoding.
98 *
99 * * End Of Stream
100 *
101 * * At end-of-stream, the subclass @parse function may be called some final
102 * times with the at_eos parameter set to true, indicating that the element
103 * should not expect any more data to be arriving, and it should parse and
104 * remaining frames and call gst_video_decoder_have_frame() if possible.
105 *
106 * The subclass is responsible for providing pad template caps for
107 * source and sink pads. The pads need to be named "sink" and "src". It also
108 * needs to provide information about the output caps, when they are known.
109 * This may be when the base class calls the subclass' @set_format function,
110 * though it might be during decoding, before calling
111 * @gst_video_decoder_finish_frame. This is done via
112 * @gst_video_decoder_set_output_state
113 *
114 * The subclass is also responsible for providing (presentation) timestamps
115 * (likely based on corresponding input ones). If that is not applicable
116 * or possible, the base class provides limited framerate based interpolation.
117 *
118 * Similarly, the base class provides some limited (legacy) seeking support
119 * if specifically requested by the subclass, as full-fledged support
120 * should rather be left to upstream demuxer, parser or alike. This simple
121 * approach caters for seeking and duration reporting using estimated input
122 * bitrates. To enable it, a subclass should call
123 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
124 * byte-streams.
125 *
126 * The base class provides some support for reverse playback, in particular
127 * in case incoming data is not packetized or upstream does not provide
128 * fragments on keyframe boundaries. However, the subclass should then be
129 * prepared for the parsing and frame processing stage to occur separately
130 * (in normal forward processing, the latter immediately follows the former),
131 * The subclass also needs to ensure the parsing stage properly marks
132 * keyframes, unless it knows the upstream elements will do so properly for
133 * incoming data.
134 *
135 * The bare minimum that a functional subclass needs to implement is:
136 *
137 * * Provide pad templates
138 * * Inform the base class of output caps via
139 * @gst_video_decoder_set_output_state
140 *
141 * * Parse input data, if it is not considered packetized from upstream
142 * Data will be provided to @parse which should invoke
143 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
144 * separate the data belonging to each video frame.
145 *
146 * * Accept data in @handle_frame and provide decoded results to
147 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
148 */
149
150 #ifdef HAVE_CONFIG_H
151 #include "config.h"
152 #endif
153
154 /* TODO
155 *
156 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
157 * features, like applying QoS on input (as opposed to after the frame is
158 * decoded).
159 * * Add a flag/boolean for decoders that require keyframes, so the base
160 * class can automatically discard non-keyframes before one has arrived
161 * * Detect reordered frame/timestamps and fix the pts/dts
162 * * Support for GstIndex (or shall we not care ?)
163 * * Calculate actual latency based on input/output timestamp/frame_number
164 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
165 * * Emit latency message when it changes
166 *
167 */
168
169 /* Implementation notes:
170 * The Video Decoder base class operates in 2 primary processing modes, depending
171 * on whether forward or reverse playback is requested.
172 *
173 * Forward playback:
174 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
175 * handle_frame() -> push downstream
176 *
177 * Reverse playback is more complicated, since it involves gathering incoming
178 * data regions as we loop backwards through the upstream data. The processing
179 * concept (using incoming buffers as containing one frame each to simplify
180 * things) is:
181 *
182 * Upstream data we want to play:
183 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
184 * Keyframe flag: K K
185 * Groupings: AAAAAAA BBBBBBB CCCCCCC
186 *
187 * Input:
188 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
189 * Keyframe flag: K K
190 * Discont flag: D D D
191 *
192 * - Each Discont marks a discont in the decoding order.
193 * - The keyframes mark where we can start decoding.
194 *
195 * Initially, we prepend incoming buffers to the gather queue. Whenever the
196 * discont flag is set on an incoming buffer, the gather queue is flushed out
197 * before the new buffer is collected.
198 *
199 * The above data will be accumulated in the gather queue like this:
200 *
201 * gather queue: 9 8 7
202 * D
203 *
204 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
205 * this:
206 *
207 * while (gather)
208 * take head of queue and prepend to parse queue (this reverses the
209 * sequence, so parse queue is 7 -> 8 -> 9)
210 *
211 * Next, we process the parse queue, which now contains all un-parsed packets
212 * (including any leftover ones from the previous decode section)
213 *
214 * for each buffer now in the parse queue:
215 * Call the subclass parse function, prepending each resulting frame to
216 * the parse_gather queue. Buffers which precede the first one that
217 * produces a parsed frame are retained in the parse queue for
218 * re-processing on the next cycle of parsing.
219 *
220 * The parse_gather queue now contains frame objects ready for decoding,
221 * in reverse order.
222 * parse_gather: 9 -> 8 -> 7
223 *
224 * while (parse_gather)
225 * Take the head of the queue and prepend it to the decode queue
226 * If the frame was a keyframe, process the decode queue
227 * decode is now 7-8-9
228 *
229 * Processing the decode queue results in frames with attached output buffers
230 * stored in the 'output_queue' ready for outputting in reverse order.
231 *
232 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
233 * gather queue. We get the following situation:
234 *
235 * gather queue: 4
236 * decode queue: 7 8 9
237 *
238 * After we received 5 (Keyframe) and 6:
239 *
240 * gather queue: 6 5 4
241 * decode queue: 7 8 9
242 *
243 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
244 *
245 * Copy head of the gather queue (6) to decode queue:
246 *
247 * gather queue: 5 4
248 * decode queue: 6 7 8 9
249 *
250 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
251 * can start decoding.
252 *
253 * gather queue: 4
254 * decode queue: 5 6 7 8 9
255 *
256 * Decode frames in decode queue, store raw decoded data in output queue, we
257 * can take the head of the decode queue and prepend the decoded result in the
258 * output queue:
259 *
260 * gather queue: 4
261 * decode queue:
262 * output queue: 9 8 7 6 5
263 *
264 * Now output all the frames in the output queue, picking a frame from the
265 * head of the queue.
266 *
267 * Copy head of the gather queue (4) to decode queue, we flushed the gather
268 * queue and can now store input buffer in the gather queue:
269 *
270 * gather queue: 1
271 * decode queue: 4
272 *
273 * When we receive EOS, the queue looks like:
274 *
275 * gather queue: 3 2 1
276 * decode queue: 4
277 *
278 * Fill decode queue, first keyframe we copy is 2:
279 *
280 * gather queue: 1
281 * decode queue: 2 3 4
282 *
283 * Decoded output:
284 *
285 * gather queue: 1
286 * decode queue:
287 * output queue: 4 3 2
288 *
289 * Leftover buffer 1 cannot be decoded and must be discarded.
290 */
291
292 #include "gstvideodecoder.h"
293 #include "gstvideoutils.h"
294 #include "gstvideoutilsprivate.h"
295
296 #include <gst/video/video.h>
297 #include <gst/video/video-event.h>
298 #include <gst/video/gstvideopool.h>
299 #include <gst/video/gstvideometa.h>
300 #include <string.h>
301
302 #ifdef OHOS_OPT_PERFORMANCE
303 // ohos.opt.performance.0005
304 // add trace
305 #include "gst_trace.h"
306 #endif
307
308 GST_DEBUG_CATEGORY (videodecoder_debug);
309 #define GST_CAT_DEFAULT videodecoder_debug
310
311 /* properties */
312 #define DEFAULT_QOS TRUE
313 #define DEFAULT_MAX_ERRORS GST_VIDEO_DECODER_MAX_ERRORS
314 #define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
315 #define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
316 #define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS FALSE
317 #define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS (GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT | GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT)
318
319 /* Used for request_sync_point_frame_number. These are out of range for the
320 * frame numbers and can be given special meaning */
321 #define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
322 #define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
323
324 enum
325 {
326 PROP_0,
327 PROP_QOS,
328 PROP_MAX_ERRORS,
329 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
330 PROP_DISCARD_CORRUPTED_FRAMES,
331 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
332 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
333 #ifdef OHOS_OPT_COMPAT
334 // ohos.opt.compat.0053 In avmetadatahelper service, only need one frame.
335 PROP_ONLY_ONE_FRAME_REQUIRED,
336 #endif
337 };
338
339 struct _GstVideoDecoderPrivate
340 {
341 /* FIXME introduce a context ? */
342
343 GstBufferPool *pool;
344 GstAllocator *allocator;
345 GstAllocationParams params;
346
347 /* parse tracking */
348 /* input data */
349 GstAdapter *input_adapter;
350 /* assembles current frame */
351 GstAdapter *output_adapter;
352
353 /* Whether we attempt to convert newsegment from bytes to
354 * time using a bitrate estimation */
355 gboolean do_estimate_rate;
356
357 /* Whether input is considered packetized or not */
358 gboolean packetized;
359
360 /* whether input is considered as subframes */
361 gboolean subframe_mode;
362
363 /* Error handling */
364 gint max_errors;
365 gint error_count;
366 gboolean had_output_data;
367 #ifdef OHOS_OPT_COMPAT
368 gboolean stream_had_output_data;
369 #endif
370 gboolean had_input_data;
371
372 gboolean needs_format;
373 /* input_segment are output_segment identical */
374 gboolean in_out_segment_sync;
375
376 /* TRUE if we have an active set of instant rate flags */
377 gboolean decode_flags_override;
378 GstSegmentFlags decode_flags;
379
380 /* ... being tracked here;
381 * only available during parsing or when doing subframe decoding */
382 GstVideoCodecFrame *current_frame;
383 /* events that should apply to the current frame */
384 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
385 GList *current_frame_events;
386 /* events that should be pushed before the next frame */
387 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
388 GList *pending_events;
389
390 /* relative offset of input data */
391 guint64 input_offset;
392 /* relative offset of frame */
393 guint64 frame_offset;
394 /* tracking ts and offsets */
395 GQueue timestamps;
396
397 /* last outgoing ts */
398 GstClockTime last_timestamp_out;
399 /* incoming pts - dts */
400 GstClockTime pts_delta;
401 gboolean reordered_output;
402
403 /* FIXME: Consider using a GQueue or other better fitting data structure */
404 /* reverse playback */
405 /* collect input */
406 GList *gather;
407 /* to-be-parsed */
408 GList *parse;
409 /* collected parsed frames */
410 GList *parse_gather;
411 /* frames to be handled == decoded */
412 GList *decode;
413 /* collected output - of buffer objects, not frames */
414 GList *output_queued;
415
416
417 /* base_picture_number is the picture number of the reference picture */
418 guint64 base_picture_number;
419 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
420 GstClockTime base_timestamp;
421
422 /* Properties */
423 GstClockTime min_force_key_unit_interval;
424 gboolean discard_corrupted_frames;
425
426 /* Key unit related state */
427 gboolean needs_sync_point;
428 GstVideoDecoderRequestSyncPointFlags request_sync_point_flags;
429 guint64 request_sync_point_frame_number;
430 GstClockTime last_force_key_unit_time;
431 /* -1 if we saw no sync point yet */
432 guint64 distance_from_sync;
433
434 gboolean automatic_request_sync_points;
435 GstVideoDecoderRequestSyncPointFlags automatic_request_sync_point_flags;
436
437 guint32 system_frame_number;
438 guint32 decode_frame_number;
439
440 GQueue frames; /* Protected with OBJECT_LOCK */
441 GstVideoCodecState *input_state;
442 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
443 gboolean output_state_changed;
444
445 /* QoS properties */
446 gboolean do_qos;
447 gdouble proportion; /* OBJECT_LOCK */
448 GstClockTime earliest_time; /* OBJECT_LOCK */
449 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
450 gboolean discont;
451 /* qos messages: frames dropped/processed */
452 guint dropped;
453 guint processed;
454
455 /* Outgoing byte size ? */
456 gint64 bytes_out;
457 gint64 time;
458
459 gint64 min_latency;
460 gint64 max_latency;
461
462 /* upstream stream tags (global tags are passed through as-is) */
463 GstTagList *upstream_tags;
464
465 /* subclass tags */
466 GstTagList *tags;
467 GstTagMergeMode tags_merge_mode;
468
469 gboolean tags_changed;
470
471 /* flags */
472 gboolean use_default_pad_acceptcaps;
473
474 #ifndef GST_DISABLE_DEBUG
475 /* Diagnostic time for reporting the time
476 * from flush to first output */
477 GstClockTime last_reset_time;
478 #endif
479 #ifdef OHOS_OPT_PERFORMANCE
480 // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
481 gboolean has_recv_first_key_frame;
482 gboolean has_push_first_frame;
483 #endif
484 #ifdef OHOS_OPT_COMPAT
485 // ohos.opt.compat.0053
486 gboolean only_one_frame_required;
487 #endif
488 };
489
490 static GstElementClass *parent_class = NULL;
491 static gint private_offset = 0;
492
493 /* cached quark to avoid contention on the global quark table lock */
494 #define META_TAG_VIDEO meta_tag_video_quark
495 static GQuark meta_tag_video_quark;
496
497 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
498 static void gst_video_decoder_init (GstVideoDecoder * dec,
499 GstVideoDecoderClass * klass);
500
501 static void gst_video_decoder_finalize (GObject * object);
502 static void gst_video_decoder_get_property (GObject * object, guint property_id,
503 GValue * value, GParamSpec * pspec);
504 static void gst_video_decoder_set_property (GObject * object, guint property_id,
505 const GValue * value, GParamSpec * pspec);
506
507 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
508 GstCaps * caps);
509 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
510 GstEvent * event);
511 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
512 GstEvent * event);
513 #ifdef OHOS_OPT_PERFORMANCE
514 // ohos.opt.performance.0005
515 // add trace
516 static GstFlowReturn gst_video_decoder_chain_trace (GstPad * pad, GstObject * parent,
517 GstBuffer * buf);
518 #endif
519 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
520 GstBuffer * buf);
521 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
522 GstQuery * query);
523 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
524 element, GstStateChange transition);
525 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
526 GstQuery * query);
527 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
528 gboolean flush_hard);
529
530 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
531 GstVideoCodecFrame * frame);
532
533 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
534 GList * events);
535 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
536 decoder, GstVideoCodecFrame * frame);
537 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
538 decoder);
539 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
540 decoder, GstBuffer * buf);
541 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
542 gboolean at_eos);
543
544 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
545
546 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
547 GstEvent * event);
548 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
549 GstEvent * event);
550 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
551 decoder, GstQuery * query);
552 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
553 decoder, GstQuery * query);
554 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
555 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
556 gboolean at_eos, gboolean new_buffer);
557 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
558 decoder);
559 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
560 GstQuery * query);
561 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
562 GstQuery * query);
563
564 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
565 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
566
567 static gboolean gst_video_decoder_handle_missing_data_default (GstVideoDecoder *
568 decoder, GstClockTime timestamp, GstClockTime duration);
569
570 static void gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
571 GstVideoCodecFrame * frame, GstBuffer * src_buffer,
572 GstBuffer * dest_buffer);
573
574 static void gst_video_decoder_request_sync_point_internal (GstVideoDecoder *
575 dec, GstClockTime deadline, GstVideoDecoderRequestSyncPointFlags flags);
576
577 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
578 * method to get to the padtemplates */
579 GType
gst_video_decoder_get_type(void)580 gst_video_decoder_get_type (void)
581 {
582 static gsize type = 0;
583
584 if (g_once_init_enter (&type)) {
585 GType _type;
586 static const GTypeInfo info = {
587 sizeof (GstVideoDecoderClass),
588 NULL,
589 NULL,
590 (GClassInitFunc) gst_video_decoder_class_init,
591 NULL,
592 NULL,
593 sizeof (GstVideoDecoder),
594 0,
595 (GInstanceInitFunc) gst_video_decoder_init,
596 };
597
598 _type = g_type_register_static (GST_TYPE_ELEMENT,
599 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
600
601 private_offset =
602 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
603
604 g_once_init_leave (&type, _type);
605 }
606 return type;
607 }
608
609 static inline GstVideoDecoderPrivate *
gst_video_decoder_get_instance_private(GstVideoDecoder * self)610 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
611 {
612 return (G_STRUCT_MEMBER_P (self, private_offset));
613 }
614
615 static void
gst_video_decoder_class_init(GstVideoDecoderClass * klass)616 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
617 {
618 GObjectClass *gobject_class;
619 GstElementClass *gstelement_class;
620
621 gobject_class = G_OBJECT_CLASS (klass);
622 gstelement_class = GST_ELEMENT_CLASS (klass);
623
624 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
625 "Base Video Decoder");
626
627 parent_class = g_type_class_peek_parent (klass);
628
629 if (private_offset != 0)
630 g_type_class_adjust_private_offset (klass, &private_offset);
631
632 gobject_class->finalize = gst_video_decoder_finalize;
633 gobject_class->get_property = gst_video_decoder_get_property;
634 gobject_class->set_property = gst_video_decoder_set_property;
635
636 gstelement_class->change_state =
637 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
638
639 klass->sink_event = gst_video_decoder_sink_event_default;
640 klass->src_event = gst_video_decoder_src_event_default;
641 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
642 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
643 klass->negotiate = gst_video_decoder_negotiate_default;
644 klass->sink_query = gst_video_decoder_sink_query_default;
645 klass->src_query = gst_video_decoder_src_query_default;
646 klass->transform_meta = gst_video_decoder_transform_meta_default;
647 klass->handle_missing_data = gst_video_decoder_handle_missing_data_default;
648
649 /**
650 * GstVideoDecoder:qos:
651 *
652 * If set to %TRUE the decoder will handle QoS events received
653 * from downstream elements.
654 * This includes dropping output frames which are detected as late
655 * using the metrics reported by those events.
656 *
657 * Since: 1.18
658 */
659 g_object_class_install_property (gobject_class, PROP_QOS,
660 g_param_spec_boolean ("qos", "Quality of Service",
661 "Handle Quality-of-Service events from downstream",
662 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
663
664 /**
665 * GstVideoDecoder:max-errors:
666 *
667 * Maximum number of tolerated consecutive decode errors. See
668 * gst_video_decoder_set_max_errors() for more details.
669 *
670 * Since: 1.18
671 */
672 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
673 g_param_spec_int ("max-errors", "Max errors",
674 "Max consecutive decoder errors before returning flow error",
675 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
676 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
677
678 /**
679 * GstVideoDecoder:min-force-key-unit-interval:
680 *
681 * Minimum interval between force-key-unit events sent upstream by the
682 * decoder. Setting this to 0 will cause every event to be handled, setting
683 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
684 *
685 * See gst_video_event_new_upstream_force_key_unit() for more details about
686 * force-key-unit events.
687 *
688 * Since: 1.20
689 */
690 g_object_class_install_property (gobject_class,
691 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
692 g_param_spec_uint64 ("min-force-key-unit-interval",
693 "Minimum Force Keyunit Interval",
694 "Minimum interval between force-keyunit requests in nanoseconds", 0,
695 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
696 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
697
698 /**
699 * GstVideoDecoder:discard-corrupted-frames:
700 *
701 * If set to %TRUE the decoder will discard frames that are marked as
702 * corrupted instead of outputting them.
703 *
704 * Since: 1.20
705 */
706 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
707 g_param_spec_boolean ("discard-corrupted-frames",
708 "Discard Corrupted Frames",
709 "Discard frames marked as corrupted instead of outputting them",
710 DEFAULT_DISCARD_CORRUPTED_FRAMES,
711 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
712
713 /**
714 * GstVideoDecoder:automatic-request-sync-points:
715 *
716 * If set to %TRUE the decoder will automatically request sync points when
717 * it seems like a good idea, e.g. if the first frames are not key frames or
718 * if packet loss was reported by upstream.
719 *
720 * Since: 1.20
721 */
722 g_object_class_install_property (gobject_class,
723 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
724 g_param_spec_boolean ("automatic-request-sync-points",
725 "Automatic Request Sync Points",
726 "Automatically request sync points when it would be useful",
727 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS,
728 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
729
730 /**
731 * GstVideoDecoder:automatic-request-sync-point-flags:
732 *
733 * GstVideoDecoderRequestSyncPointFlags to use for the automatically
734 * requested sync points if `automatic-request-sync-points` is enabled.
735 *
736 * Since: 1.20
737 */
738 g_object_class_install_property (gobject_class,
739 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
740 g_param_spec_flags ("automatic-request-sync-point-flags",
741 "Automatic Request Sync Point Flags",
742 "Flags to use when automatically requesting sync points",
743 GST_TYPE_VIDEO_DECODER_REQUEST_SYNC_POINT_FLAGS,
744 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
745 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
746 #ifdef OHOS_OPT_COMPAT
747 // ohos.opt.compat.0053
748 g_object_class_install_property (gobject_class,
749 PROP_ONLY_ONE_FRAME_REQUIRED,
750 g_param_spec_boolean ("only-one-frame-required",
751 "Only one frame required",
752 "Only one frame required for avmetadatahelper service",
753 FALSE,
754 G_PARAM_WRITABLE | G_PARAM_STATIC_STRINGS));
755 #endif
756
757 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
758 }
759
760 static void
gst_video_decoder_init(GstVideoDecoder * decoder,GstVideoDecoderClass * klass)761 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
762 {
763 GstPadTemplate *pad_template;
764 GstPad *pad;
765
766 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
767
768 decoder->priv = gst_video_decoder_get_instance_private (decoder);
769
770 pad_template =
771 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
772 g_return_if_fail (pad_template != NULL);
773
774 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
775
776 #ifdef OHOS_OPT_PERFORMANCE
777 // ohos.opt.performance.0005
778 // add trace
779 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain_trace));
780 #else
781 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
782 #endif
783 gst_pad_set_event_function (pad,
784 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
785 gst_pad_set_query_function (pad,
786 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
787 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
788
789 pad_template =
790 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
791 g_return_if_fail (pad_template != NULL);
792
793 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
794
795 gst_pad_set_event_function (pad,
796 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
797 gst_pad_set_query_function (pad,
798 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
799 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
800
801 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
802 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
803
804 g_rec_mutex_init (&decoder->stream_lock);
805
806 decoder->priv->input_adapter = gst_adapter_new ();
807 decoder->priv->output_adapter = gst_adapter_new ();
808 decoder->priv->packetized = TRUE;
809 decoder->priv->needs_format = FALSE;
810 #ifdef OHOS_OPT_COMPAT
811 decoder->priv->stream_had_output_data = FALSE;
812 #endif
813
814 g_queue_init (&decoder->priv->frames);
815 g_queue_init (&decoder->priv->timestamps);
816
817 /* properties */
818 decoder->priv->do_qos = DEFAULT_QOS;
819 decoder->priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
820
821 decoder->priv->min_latency = 0;
822 decoder->priv->max_latency = 0;
823
824 decoder->priv->automatic_request_sync_points =
825 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS;
826 decoder->priv->automatic_request_sync_point_flags =
827 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS;
828 #ifdef OHOS_OPT_COMPAT
829 // ohos.opt.compat.0053
830 decoder->priv->only_one_frame_required = FALSE;
831 #endif
832
833 gst_video_decoder_reset (decoder, TRUE, TRUE);
834 }
835
836 static GstVideoCodecState *
_new_input_state(GstCaps * caps)837 _new_input_state (GstCaps * caps)
838 {
839 GstVideoCodecState *state;
840 GstStructure *structure;
841 const GValue *codec_data;
842
843 state = g_slice_new0 (GstVideoCodecState);
844 state->ref_count = 1;
845 gst_video_info_init (&state->info);
846 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
847 goto parse_fail;
848 state->caps = gst_caps_ref (caps);
849
850 structure = gst_caps_get_structure (caps, 0);
851
852 codec_data = gst_structure_get_value (structure, "codec_data");
853 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
854 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
855
856 return state;
857
858 parse_fail:
859 {
860 g_slice_free (GstVideoCodecState, state);
861 return NULL;
862 }
863 }
864
865 static GstVideoCodecState *
_new_output_state(GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference,gboolean copy_interlace_mode)866 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
867 guint width, guint height, GstVideoCodecState * reference,
868 gboolean copy_interlace_mode)
869 {
870 GstVideoCodecState *state;
871
872 state = g_slice_new0 (GstVideoCodecState);
873 state->ref_count = 1;
874 gst_video_info_init (&state->info);
875 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
876 width, height)) {
877 g_slice_free (GstVideoCodecState, state);
878 return NULL;
879 }
880
881 if (reference) {
882 GstVideoInfo *tgt, *ref;
883
884 tgt = &state->info;
885 ref = &reference->info;
886
887 /* Copy over extra fields from reference state */
888 if (copy_interlace_mode)
889 tgt->interlace_mode = ref->interlace_mode;
890 tgt->flags = ref->flags;
891 tgt->chroma_site = ref->chroma_site;
892 tgt->colorimetry = ref->colorimetry;
893 GST_DEBUG ("reference par %d/%d fps %d/%d",
894 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
895 tgt->par_n = ref->par_n;
896 tgt->par_d = ref->par_d;
897 tgt->fps_n = ref->fps_n;
898 tgt->fps_d = ref->fps_d;
899 tgt->views = ref->views;
900
901 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
902
903 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
904 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
905 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
906 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
907 } else {
908 /* Default to MONO, overridden as needed by sub-classes */
909 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
910 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
911 }
912 }
913
914 GST_DEBUG ("reference par %d/%d fps %d/%d",
915 state->info.par_n, state->info.par_d,
916 state->info.fps_n, state->info.fps_d);
917
918 return state;
919 }
920
921 static gboolean
gst_video_decoder_setcaps(GstVideoDecoder * decoder,GstCaps * caps)922 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
923 {
924 GstVideoDecoderClass *decoder_class;
925 GstVideoCodecState *state;
926 gboolean ret = TRUE;
927
928 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
929
930 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
931
932 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
933
934 if (decoder->priv->input_state) {
935 GST_DEBUG_OBJECT (decoder,
936 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
937 decoder->priv->input_state->caps, caps);
938 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
939 goto caps_not_changed;
940 }
941
942 state = _new_input_state (caps);
943
944 if (G_UNLIKELY (state == NULL))
945 goto parse_fail;
946
947 if (decoder_class->set_format)
948 ret = decoder_class->set_format (decoder, state);
949
950 if (!ret)
951 goto refused_format;
952
953 if (decoder->priv->input_state)
954 gst_video_codec_state_unref (decoder->priv->input_state);
955 decoder->priv->input_state = state;
956
957 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
958
959 return ret;
960
961 caps_not_changed:
962 {
963 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
964 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
965 return TRUE;
966 }
967
968 /* ERRORS */
969 parse_fail:
970 {
971 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
972 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
973 return FALSE;
974 }
975
976 refused_format:
977 {
978 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
979 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
980 gst_video_codec_state_unref (state);
981 return FALSE;
982 }
983 }
984
985 static void
gst_video_decoder_finalize(GObject * object)986 gst_video_decoder_finalize (GObject * object)
987 {
988 GstVideoDecoder *decoder;
989
990 decoder = GST_VIDEO_DECODER (object);
991
992 GST_DEBUG_OBJECT (object, "finalize");
993
994 g_rec_mutex_clear (&decoder->stream_lock);
995
996 if (decoder->priv->input_adapter) {
997 g_object_unref (decoder->priv->input_adapter);
998 decoder->priv->input_adapter = NULL;
999 }
1000 if (decoder->priv->output_adapter) {
1001 g_object_unref (decoder->priv->output_adapter);
1002 decoder->priv->output_adapter = NULL;
1003 }
1004
1005 if (decoder->priv->input_state)
1006 gst_video_codec_state_unref (decoder->priv->input_state);
1007 if (decoder->priv->output_state)
1008 gst_video_codec_state_unref (decoder->priv->output_state);
1009
1010 if (decoder->priv->pool) {
1011 gst_object_unref (decoder->priv->pool);
1012 decoder->priv->pool = NULL;
1013 }
1014
1015 if (decoder->priv->allocator) {
1016 gst_object_unref (decoder->priv->allocator);
1017 decoder->priv->allocator = NULL;
1018 }
1019
1020 G_OBJECT_CLASS (parent_class)->finalize (object);
1021 }
1022
1023 static void
gst_video_decoder_get_property(GObject * object,guint property_id,GValue * value,GParamSpec * pspec)1024 gst_video_decoder_get_property (GObject * object, guint property_id,
1025 GValue * value, GParamSpec * pspec)
1026 {
1027 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
1028 GstVideoDecoderPrivate *priv = dec->priv;
1029
1030 switch (property_id) {
1031 case PROP_QOS:
1032 g_value_set_boolean (value, priv->do_qos);
1033 break;
1034 case PROP_MAX_ERRORS:
1035 g_value_set_int (value, gst_video_decoder_get_max_errors (dec));
1036 break;
1037 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1038 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
1039 break;
1040 case PROP_DISCARD_CORRUPTED_FRAMES:
1041 g_value_set_boolean (value, priv->discard_corrupted_frames);
1042 break;
1043 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1044 g_value_set_boolean (value, priv->automatic_request_sync_points);
1045 break;
1046 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1047 g_value_set_flags (value, priv->automatic_request_sync_point_flags);
1048 break;
1049 default:
1050 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1051 break;
1052 }
1053 }
1054
1055 static void
gst_video_decoder_set_property(GObject * object,guint property_id,const GValue * value,GParamSpec * pspec)1056 gst_video_decoder_set_property (GObject * object, guint property_id,
1057 const GValue * value, GParamSpec * pspec)
1058 {
1059 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
1060 GstVideoDecoderPrivate *priv = dec->priv;
1061
1062 switch (property_id) {
1063 case PROP_QOS:
1064 priv->do_qos = g_value_get_boolean (value);
1065 break;
1066 case PROP_MAX_ERRORS:
1067 gst_video_decoder_set_max_errors (dec, g_value_get_int (value));
1068 break;
1069 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1070 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
1071 break;
1072 case PROP_DISCARD_CORRUPTED_FRAMES:
1073 priv->discard_corrupted_frames = g_value_get_boolean (value);
1074 break;
1075 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1076 priv->automatic_request_sync_points = g_value_get_boolean (value);
1077 break;
1078 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1079 priv->automatic_request_sync_point_flags = g_value_get_flags (value);
1080 break;
1081 #ifdef OHOS_OPT_COMPAT
1082 // ohos.opt.compat.0053
1083 case PROP_ONLY_ONE_FRAME_REQUIRED:
1084 priv->only_one_frame_required = g_value_get_boolean (value);
1085 break;
1086 #endif
1087 default:
1088 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1089 break;
1090 }
1091 }
1092
1093 /* hard == FLUSH, otherwise discont */
1094 static GstFlowReturn
gst_video_decoder_flush(GstVideoDecoder * dec,gboolean hard)1095 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
1096 {
1097 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
1098 GstFlowReturn ret = GST_FLOW_OK;
1099
1100 GST_LOG_OBJECT (dec, "flush hard %d", hard);
1101
1102 /* Inform subclass */
1103 if (klass->reset) {
1104 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
1105 klass->reset (dec, hard);
1106 }
1107
1108 if (klass->flush)
1109 klass->flush (dec);
1110
1111 /* and get (re)set for the sequel */
1112 gst_video_decoder_reset (dec, FALSE, hard);
1113
1114 return ret;
1115 }
1116
1117 static GstEvent *
gst_video_decoder_create_merged_tags_event(GstVideoDecoder * dec)1118 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
1119 {
1120 GstTagList *merged_tags;
1121
1122 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
1123 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
1124 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
1125
1126 merged_tags =
1127 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
1128 dec->priv->tags_merge_mode);
1129
1130 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
1131
1132 if (merged_tags == NULL)
1133 return NULL;
1134
1135 if (gst_tag_list_is_empty (merged_tags)) {
1136 gst_tag_list_unref (merged_tags);
1137 return NULL;
1138 }
1139
1140 return gst_event_new_tag (merged_tags);
1141 }
1142
1143 static gboolean
gst_video_decoder_push_event(GstVideoDecoder * decoder,GstEvent * event)1144 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
1145 {
1146 switch (GST_EVENT_TYPE (event)) {
1147 case GST_EVENT_SEGMENT:
1148 {
1149 GstSegment segment;
1150
1151 gst_event_copy_segment (event, &segment);
1152
1153 #ifdef OHOS_OPT_PERFORMANCE
1154 GST_INFO_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1155 #else
1156 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1157 #endif
1158
1159 if (segment.format != GST_FORMAT_TIME) {
1160 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1161 break;
1162 }
1163
1164 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1165 decoder->output_segment = segment;
1166 decoder->priv->in_out_segment_sync =
1167 gst_segment_is_equal (&decoder->input_segment, &segment);
1168 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1169 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1170 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1171 break;
1172 }
1173 default:
1174 break;
1175 }
1176
1177 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1178 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1179
1180 return gst_pad_push_event (decoder->srcpad, event);
1181 }
1182
1183 static GstFlowReturn
gst_video_decoder_parse_available(GstVideoDecoder * dec,gboolean at_eos,gboolean new_buffer)1184 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
1185 gboolean new_buffer)
1186 {
1187 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1188 GstVideoDecoderPrivate *priv = dec->priv;
1189 GstFlowReturn ret = GST_FLOW_OK;
1190 gsize was_available, available;
1191 guint inactive = 0;
1192
1193 available = gst_adapter_available (priv->input_adapter);
1194
1195 while (available || new_buffer) {
1196 new_buffer = FALSE;
1197 /* current frame may have been parsed and handled,
1198 * so we need to set up a new one when asking subclass to parse */
1199 if (priv->current_frame == NULL)
1200 priv->current_frame = gst_video_decoder_new_frame (dec);
1201
1202 was_available = available;
1203 ret = decoder_class->parse (dec, priv->current_frame,
1204 priv->input_adapter, at_eos);
1205 if (ret != GST_FLOW_OK)
1206 break;
1207
1208 /* if the subclass returned success (GST_FLOW_OK), it is expected
1209 * to have collected and submitted a frame, i.e. it should have
1210 * called gst_video_decoder_have_frame(), or at least consumed a
1211 * few bytes through gst_video_decoder_add_to_frame().
1212 *
1213 * Otherwise, this is an implementation bug, and we error out
1214 * after 2 failed attempts */
1215 available = gst_adapter_available (priv->input_adapter);
1216 if (!priv->current_frame || available != was_available)
1217 inactive = 0;
1218 else if (++inactive == 2)
1219 goto error_inactive;
1220 }
1221
1222 return ret;
1223
1224 /* ERRORS */
1225 error_inactive:
1226 {
1227 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1228 return GST_FLOW_ERROR;
1229 }
1230 }
1231
1232 /* This function has to be called with the stream lock taken. */
1233 static GstFlowReturn
gst_video_decoder_drain_out(GstVideoDecoder * dec,gboolean at_eos)1234 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
1235 {
1236 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1237 GstVideoDecoderPrivate *priv = dec->priv;
1238 GstFlowReturn ret = GST_FLOW_OK;
1239
1240 if (dec->input_segment.rate > 0.0) {
1241 /* Forward mode, if unpacketized, give the child class
1242 * a final chance to flush out packets */
1243 if (!priv->packetized) {
1244 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
1245 }
1246
1247 if (at_eos) {
1248 if (decoder_class->finish)
1249 ret = decoder_class->finish (dec);
1250 } else {
1251 if (decoder_class->drain) {
1252 ret = decoder_class->drain (dec);
1253 } else {
1254 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1255 }
1256 }
1257 } else {
1258 /* Reverse playback mode */
1259 ret = gst_video_decoder_flush_parse (dec, TRUE);
1260 }
1261
1262 return ret;
1263 }
1264
1265 static GList *
_flush_events(GstPad * pad,GList * events)1266 _flush_events (GstPad * pad, GList * events)
1267 {
1268 GList *tmp;
1269
1270 for (tmp = events; tmp; tmp = tmp->next) {
1271 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1272 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1273 GST_EVENT_IS_STICKY (tmp->data)) {
1274 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1275 }
1276 gst_event_unref (tmp->data);
1277 }
1278 g_list_free (events);
1279
1280 return NULL;
1281 }
1282
1283 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
1284 static gboolean
gst_video_decoder_negotiate_default_caps(GstVideoDecoder * decoder)1285 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
1286 {
1287 GstCaps *caps, *templcaps;
1288 GstVideoCodecState *state;
1289 GstVideoInfo info;
1290 gint i;
1291 gint caps_size;
1292 GstStructure *structure;
1293
1294 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1295 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1296 if (caps)
1297 gst_caps_unref (templcaps);
1298 else
1299 caps = templcaps;
1300 templcaps = NULL;
1301
1302 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1303 goto caps_error;
1304
1305 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1306
1307 /* before fixating, try to use whatever upstream provided */
1308 caps = gst_caps_make_writable (caps);
1309 caps_size = gst_caps_get_size (caps);
1310 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1311 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1312 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1313 gint width, height;
1314
1315 if (gst_structure_get_int (structure, "width", &width)) {
1316 for (i = 0; i < caps_size; i++) {
1317 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1318 G_TYPE_INT, width, NULL);
1319 }
1320 }
1321
1322 if (gst_structure_get_int (structure, "height", &height)) {
1323 for (i = 0; i < caps_size; i++) {
1324 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1325 G_TYPE_INT, height, NULL);
1326 }
1327 }
1328 }
1329
1330 for (i = 0; i < caps_size; i++) {
1331 structure = gst_caps_get_structure (caps, i);
1332 /* Random I420 1280x720 for fixation */
1333 if (gst_structure_has_field (structure, "format"))
1334 gst_structure_fixate_field_string (structure, "format", "I420");
1335 else
1336 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1337
1338 if (gst_structure_has_field (structure, "width"))
1339 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1340 else
1341 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1342
1343 if (gst_structure_has_field (structure, "height"))
1344 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1345 else
1346 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1347 }
1348 caps = gst_caps_fixate (caps);
1349
1350 if (!caps || !gst_video_info_from_caps (&info, caps))
1351 goto caps_error;
1352
1353 GST_INFO_OBJECT (decoder,
1354 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1355 state =
1356 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1357 info.width, info.height, decoder->priv->input_state);
1358 gst_video_codec_state_unref (state);
1359 gst_caps_unref (caps);
1360
1361 return TRUE;
1362
1363 caps_error:
1364 {
1365 if (caps)
1366 gst_caps_unref (caps);
1367 return FALSE;
1368 }
1369 }
1370
1371 static gboolean
gst_video_decoder_handle_missing_data_default(GstVideoDecoder * decoder,GstClockTime timestamp,GstClockTime duration)1372 gst_video_decoder_handle_missing_data_default (GstVideoDecoder *
1373 decoder, GstClockTime timestamp, GstClockTime duration)
1374 {
1375 GstVideoDecoderPrivate *priv;
1376
1377 priv = decoder->priv;
1378
1379 if (priv->automatic_request_sync_points) {
1380 GstClockTime deadline =
1381 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
1382 timestamp);
1383
1384 GST_DEBUG_OBJECT (decoder,
1385 "Requesting sync point for missing data at running time %"
1386 GST_TIME_FORMAT " timestamp %" GST_TIME_FORMAT " with duration %"
1387 GST_TIME_FORMAT, GST_TIME_ARGS (deadline), GST_TIME_ARGS (timestamp),
1388 GST_TIME_ARGS (duration));
1389
1390 gst_video_decoder_request_sync_point_internal (decoder, deadline,
1391 priv->automatic_request_sync_point_flags);
1392 }
1393
1394 return TRUE;
1395 }
1396
1397 static gboolean
gst_video_decoder_sink_event_default(GstVideoDecoder * decoder,GstEvent * event)1398 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1399 GstEvent * event)
1400 {
1401 GstVideoDecoderClass *decoder_class;
1402 GstVideoDecoderPrivate *priv;
1403 gboolean ret = FALSE;
1404 gboolean forward_immediate = FALSE;
1405
1406 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1407
1408 priv = decoder->priv;
1409
1410 switch (GST_EVENT_TYPE (event)) {
1411 case GST_EVENT_STREAM_START:
1412 {
1413 GstFlowReturn flow_ret = GST_FLOW_OK;
1414
1415 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1416 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1417 ret = (flow_ret == GST_FLOW_OK);
1418
1419 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1420 /* Flush upstream tags after a STREAM_START */
1421 if (priv->upstream_tags) {
1422 gst_tag_list_unref (priv->upstream_tags);
1423 priv->upstream_tags = NULL;
1424 priv->tags_changed = TRUE;
1425 }
1426 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1427
1428 /* Forward STREAM_START immediately. Everything is drained after
1429 * the STREAM_START event and we can forward this event immediately
1430 * now without having buffers out of order.
1431 */
1432 forward_immediate = TRUE;
1433 break;
1434 }
1435 case GST_EVENT_CAPS:
1436 {
1437 GstCaps *caps;
1438
1439 gst_event_parse_caps (event, &caps);
1440 ret = gst_video_decoder_setcaps (decoder, caps);
1441 gst_event_unref (event);
1442 event = NULL;
1443 break;
1444 }
1445 case GST_EVENT_SEGMENT_DONE:
1446 {
1447 GstFlowReturn flow_ret = GST_FLOW_OK;
1448
1449 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1450 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1451 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1452 ret = (flow_ret == GST_FLOW_OK);
1453
1454 /* Forward SEGMENT_DONE immediately. This is required
1455 * because no buffer or serialized event might come
1456 * after SEGMENT_DONE and nothing could trigger another
1457 * _finish_frame() call.
1458 *
1459 * The subclass can override this behaviour by overriding
1460 * the ::sink_event() vfunc and not chaining up to the
1461 * parent class' ::sink_event() until a later time.
1462 */
1463 forward_immediate = TRUE;
1464 break;
1465 }
1466 case GST_EVENT_EOS:
1467 {
1468 GstFlowReturn flow_ret = GST_FLOW_OK;
1469
1470 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1471 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1472 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1473 ret = (flow_ret == GST_FLOW_OK);
1474
1475 /* Error out even if EOS was ok when we had input, but no output */
1476 #ifdef OHOS_OPT_COMPAT
1477 /* ohos.opt.compat.0049
1478 * When we seek to a position where is no keyframe and the decoding fails,
1479 * we don't think it's a mistake. For example, tsdemux does not guarantee that the
1480 * stream pushed when seeking contains keyframes, or mkvdemux incorrectly treats
1481 * non-keyframes as keyframes.
1482 */
1483 if (ret && priv->had_input_data) {
1484 if (!priv->had_output_data)
1485 GST_WARNING_OBJECT (decoder, "No valid frames decoded before end of stream");
1486 else if (!priv->stream_had_output_data)
1487 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1488 ("No valid frames decoded before end of stream"),
1489 ("no valid frames found"));
1490 }
1491 #else
1492 if (ret && priv->had_input_data && !priv->had_output_data) {
1493 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1494 ("No valid frames decoded before end of stream"),
1495 ("no valid frames found"));
1496 }
1497 #endif
1498
1499 /* Forward EOS immediately. This is required because no
1500 * buffer or serialized event will come after EOS and
1501 * nothing could trigger another _finish_frame() call.
1502 *
1503 * The subclass can override this behaviour by overriding
1504 * the ::sink_event() vfunc and not chaining up to the
1505 * parent class' ::sink_event() until a later time.
1506 */
1507 forward_immediate = TRUE;
1508 break;
1509 }
1510 case GST_EVENT_GAP:
1511 {
1512 GstClockTime timestamp, duration;
1513 GstGapFlags gap_flags = 0;
1514 GstFlowReturn flow_ret = GST_FLOW_OK;
1515 gboolean needs_reconfigure = FALSE;
1516 GList *events;
1517 GList *frame_events;
1518
1519 gst_event_parse_gap (event, ×tamp, &duration);
1520 gst_event_parse_gap_flags (event, &gap_flags);
1521
1522 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1523 /* If this is not missing data, or the subclass does not handle it
1524 * specifically, then drain out the decoder and forward the event
1525 * directly. */
1526 if ((gap_flags & GST_GAP_FLAG_MISSING_DATA) == 0
1527 || !decoder_class->handle_missing_data
1528 || decoder_class->handle_missing_data (decoder, timestamp,
1529 duration)) {
1530 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1531 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1532 ret = (flow_ret == GST_FLOW_OK);
1533
1534 /* Ensure we have caps before forwarding the event */
1535 if (!decoder->priv->output_state) {
1536 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1537 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1538 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1539 ("Decoder output not negotiated before GAP event."));
1540 forward_immediate = TRUE;
1541 break;
1542 }
1543 needs_reconfigure = TRUE;
1544 }
1545
1546 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1547 || needs_reconfigure;
1548 if (decoder->priv->output_state_changed || needs_reconfigure) {
1549 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1550 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1551 gst_pad_mark_reconfigure (decoder->srcpad);
1552 }
1553 }
1554
1555 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1556 " before the gap");
1557 events = decoder->priv->pending_events;
1558 frame_events = decoder->priv->current_frame_events;
1559 decoder->priv->pending_events = NULL;
1560 decoder->priv->current_frame_events = NULL;
1561
1562 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1563
1564 gst_video_decoder_push_event_list (decoder, events);
1565 gst_video_decoder_push_event_list (decoder, frame_events);
1566
1567 /* Forward GAP immediately. Everything is drained after
1568 * the GAP event and we can forward this event immediately
1569 * now without having buffers out of order.
1570 */
1571 forward_immediate = TRUE;
1572 } else {
1573 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1574 gst_clear_event (&event);
1575 }
1576 break;
1577 }
1578 case GST_EVENT_CUSTOM_DOWNSTREAM:
1579 {
1580 gboolean in_still;
1581 GstFlowReturn flow_ret = GST_FLOW_OK;
1582
1583 if (gst_video_event_parse_still_frame (event, &in_still)) {
1584 if (in_still) {
1585 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1586 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1587 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1588 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1589 ret = (flow_ret == GST_FLOW_OK);
1590 }
1591 /* Forward STILL_FRAME immediately. Everything is drained after
1592 * the STILL_FRAME event and we can forward this event immediately
1593 * now without having buffers out of order.
1594 */
1595 forward_immediate = TRUE;
1596 }
1597 break;
1598 }
1599 case GST_EVENT_SEGMENT:
1600 {
1601 GstSegment segment;
1602
1603 gst_event_copy_segment (event, &segment);
1604
1605 if (segment.format == GST_FORMAT_TIME) {
1606 GST_DEBUG_OBJECT (decoder,
1607 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1608 } else {
1609 gint64 start;
1610
1611 GST_DEBUG_OBJECT (decoder,
1612 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1613
1614 /* handle newsegment as a result from our legacy simple seeking */
1615 /* note that initial 0 should convert to 0 in any case */
1616 if (priv->do_estimate_rate &&
1617 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1618 segment.start, GST_FORMAT_TIME, &start)) {
1619 /* best attempt convert */
1620 /* as these are only estimates, stop is kept open-ended to avoid
1621 * premature cutting */
1622 GST_DEBUG_OBJECT (decoder,
1623 "converted to TIME start %" GST_TIME_FORMAT,
1624 GST_TIME_ARGS (start));
1625 segment.start = start;
1626 segment.stop = GST_CLOCK_TIME_NONE;
1627 segment.time = start;
1628 /* replace event */
1629 gst_event_unref (event);
1630 event = gst_event_new_segment (&segment);
1631 } else {
1632 goto newseg_wrong_format;
1633 }
1634 }
1635
1636 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1637
1638 /* Update the decode flags in the segment if we have an instant-rate
1639 * override active */
1640 GST_OBJECT_LOCK (decoder);
1641 if (!priv->decode_flags_override)
1642 priv->decode_flags = segment.flags;
1643 else {
1644 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1645 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1646 }
1647
1648 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1649 priv->base_picture_number = 0;
1650
1651 decoder->input_segment = segment;
1652 decoder->priv->in_out_segment_sync = FALSE;
1653
1654 GST_OBJECT_UNLOCK (decoder);
1655 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1656
1657 break;
1658 }
1659 case GST_EVENT_INSTANT_RATE_CHANGE:
1660 {
1661 GstSegmentFlags flags;
1662 GstSegment *seg;
1663
1664 gst_event_parse_instant_rate_change (event, NULL, &flags);
1665
1666 GST_OBJECT_LOCK (decoder);
1667 priv->decode_flags_override = TRUE;
1668 priv->decode_flags = flags;
1669
1670 /* Update the input segment flags */
1671 seg = &decoder->input_segment;
1672 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1673 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1674 GST_OBJECT_UNLOCK (decoder);
1675 break;
1676 }
1677 case GST_EVENT_FLUSH_STOP:
1678 {
1679 GList *l;
1680
1681 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1682 for (l = priv->frames.head; l; l = l->next) {
1683 GstVideoCodecFrame *frame = l->data;
1684
1685 frame->events = _flush_events (decoder->srcpad, frame->events);
1686 }
1687 priv->current_frame_events = _flush_events (decoder->srcpad,
1688 decoder->priv->current_frame_events);
1689
1690 /* well, this is kind of worse than a DISCONT */
1691 gst_video_decoder_flush (decoder, TRUE);
1692 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1693 /* Forward FLUSH_STOP immediately. This is required because it is
1694 * expected to be forwarded immediately and no buffers are queued
1695 * anyway.
1696 */
1697 forward_immediate = TRUE;
1698 break;
1699 }
1700 case GST_EVENT_TAG:
1701 {
1702 GstTagList *tags;
1703
1704 gst_event_parse_tag (event, &tags);
1705
1706 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1707 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1708 if (priv->upstream_tags != tags) {
1709 if (priv->upstream_tags)
1710 gst_tag_list_unref (priv->upstream_tags);
1711 priv->upstream_tags = gst_tag_list_ref (tags);
1712 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1713 }
1714 gst_event_unref (event);
1715 event = gst_video_decoder_create_merged_tags_event (decoder);
1716 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1717 if (!event)
1718 ret = TRUE;
1719 }
1720 break;
1721 }
1722 default:
1723 break;
1724 }
1725
1726 /* Forward non-serialized events immediately, and all other
1727 * events which can be forwarded immediately without potentially
1728 * causing the event to go out of order with other events and
1729 * buffers as decided above.
1730 */
1731 if (event) {
1732 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1733 ret = gst_video_decoder_push_event (decoder, event);
1734 } else {
1735 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1736 decoder->priv->current_frame_events =
1737 g_list_prepend (decoder->priv->current_frame_events, event);
1738 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1739 ret = TRUE;
1740 }
1741 }
1742
1743 return ret;
1744
1745 newseg_wrong_format:
1746 {
1747 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1748 gst_event_unref (event);
1749 /* SWALLOW EVENT */
1750 return TRUE;
1751 }
1752 }
1753
1754 static gboolean
gst_video_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1755 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1756 GstEvent * event)
1757 {
1758 GstVideoDecoder *decoder;
1759 GstVideoDecoderClass *decoder_class;
1760 gboolean ret = FALSE;
1761
1762 decoder = GST_VIDEO_DECODER (parent);
1763 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1764
1765 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1766 GST_EVENT_TYPE_NAME (event));
1767
1768 if (decoder_class->sink_event)
1769 ret = decoder_class->sink_event (decoder, event);
1770
1771 return ret;
1772 }
1773
1774 /* perform upstream byte <-> time conversion (duration, seeking)
1775 * if subclass allows and if enough data for moderately decent conversion */
1776 static inline gboolean
gst_video_decoder_do_byte(GstVideoDecoder * dec)1777 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1778 {
1779 gboolean ret;
1780
1781 GST_OBJECT_LOCK (dec);
1782 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1783 && (dec->priv->time > GST_SECOND);
1784 GST_OBJECT_UNLOCK (dec);
1785
1786 return ret;
1787 }
1788
1789 static gboolean
gst_video_decoder_do_seek(GstVideoDecoder * dec,GstEvent * event)1790 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1791 {
1792 GstFormat format;
1793 GstSeekFlags flags;
1794 GstSeekType start_type, end_type;
1795 gdouble rate;
1796 gint64 start, start_time, end_time;
1797 GstSegment seek_segment;
1798 guint32 seqnum;
1799
1800 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1801 &start_time, &end_type, &end_time);
1802
1803 /* we'll handle plain open-ended flushing seeks with the simple approach */
1804 if (rate != 1.0) {
1805 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1806 return FALSE;
1807 }
1808
1809 if (start_type != GST_SEEK_TYPE_SET) {
1810 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1811 return FALSE;
1812 }
1813
1814 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1815 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1816 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1817 return FALSE;
1818 }
1819
1820 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1821 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1822 return FALSE;
1823 }
1824
1825 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1826 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1827 start_time, end_type, end_time, NULL);
1828 start_time = seek_segment.position;
1829
1830 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1831 GST_FORMAT_BYTES, &start)) {
1832 GST_DEBUG_OBJECT (dec, "conversion failed");
1833 return FALSE;
1834 }
1835
1836 seqnum = gst_event_get_seqnum (event);
1837 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1838 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1839 gst_event_set_seqnum (event, seqnum);
1840
1841 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1842 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1843
1844 return gst_pad_push_event (dec->sinkpad, event);
1845 }
1846
1847 static gboolean
gst_video_decoder_src_event_default(GstVideoDecoder * decoder,GstEvent * event)1848 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1849 GstEvent * event)
1850 {
1851 GstVideoDecoderPrivate *priv;
1852 gboolean res = FALSE;
1853
1854 priv = decoder->priv;
1855
1856 GST_DEBUG_OBJECT (decoder,
1857 "received event %d, %s", GST_EVENT_TYPE (event),
1858 GST_EVENT_TYPE_NAME (event));
1859
1860 switch (GST_EVENT_TYPE (event)) {
1861 case GST_EVENT_SEEK:
1862 {
1863 GstFormat format;
1864 gdouble rate;
1865 GstSeekFlags flags;
1866 GstSeekType start_type, stop_type;
1867 gint64 start, stop;
1868 gint64 tstart, tstop;
1869 guint32 seqnum;
1870
1871 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1872 &stop_type, &stop);
1873 seqnum = gst_event_get_seqnum (event);
1874
1875 /* upstream gets a chance first */
1876 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1877 break;
1878
1879 /* if upstream fails for a time seek, maybe we can help if allowed */
1880 if (format == GST_FORMAT_TIME) {
1881 if (gst_video_decoder_do_byte (decoder))
1882 res = gst_video_decoder_do_seek (decoder, event);
1883 break;
1884 }
1885
1886 /* ... though a non-time seek can be aided as well */
1887 /* First bring the requested format to time */
1888 if (!(res =
1889 gst_pad_query_convert (decoder->srcpad, format, start,
1890 GST_FORMAT_TIME, &tstart)))
1891 goto convert_error;
1892 if (!(res =
1893 gst_pad_query_convert (decoder->srcpad, format, stop,
1894 GST_FORMAT_TIME, &tstop)))
1895 goto convert_error;
1896
1897 /* then seek with time on the peer */
1898 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1899 flags, start_type, tstart, stop_type, tstop);
1900 gst_event_set_seqnum (event, seqnum);
1901
1902 res = gst_pad_push_event (decoder->sinkpad, event);
1903 break;
1904 }
1905 case GST_EVENT_QOS:
1906 {
1907 GstQOSType type;
1908 gdouble proportion;
1909 GstClockTimeDiff diff;
1910 GstClockTime timestamp;
1911
1912 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1913
1914 GST_OBJECT_LOCK (decoder);
1915 priv->proportion = proportion;
1916 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1917 if (G_UNLIKELY (diff > 0)) {
1918 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1919 } else {
1920 priv->earliest_time = timestamp + diff;
1921 }
1922 } else {
1923 priv->earliest_time = GST_CLOCK_TIME_NONE;
1924 }
1925 GST_OBJECT_UNLOCK (decoder);
1926
1927 GST_DEBUG_OBJECT (decoder,
1928 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1929 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1930
1931 res = gst_pad_push_event (decoder->sinkpad, event);
1932 break;
1933 }
1934 default:
1935 res = gst_pad_push_event (decoder->sinkpad, event);
1936 break;
1937 }
1938 done:
1939 return res;
1940
1941 convert_error:
1942 GST_DEBUG_OBJECT (decoder, "could not convert format");
1943 goto done;
1944 }
1945
1946 static gboolean
gst_video_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1947 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1948 {
1949 GstVideoDecoder *decoder;
1950 GstVideoDecoderClass *decoder_class;
1951 gboolean ret = FALSE;
1952
1953 decoder = GST_VIDEO_DECODER (parent);
1954 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1955
1956 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1957 GST_EVENT_TYPE_NAME (event));
1958
1959 if (decoder_class->src_event)
1960 ret = decoder_class->src_event (decoder, event);
1961
1962 return ret;
1963 }
1964
1965 static gboolean
gst_video_decoder_src_query_default(GstVideoDecoder * dec,GstQuery * query)1966 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1967 {
1968 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1969 gboolean res = TRUE;
1970
1971 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1972
1973 switch (GST_QUERY_TYPE (query)) {
1974 case GST_QUERY_POSITION:
1975 {
1976 GstFormat format;
1977 gint64 time, value;
1978
1979 /* upstream gets a chance first */
1980 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1981 GST_LOG_OBJECT (dec, "returning peer response");
1982 break;
1983 }
1984
1985 /* Refuse BYTES format queries. If it made sense to
1986 * answer them, upstream would have already */
1987 gst_query_parse_position (query, &format, NULL);
1988
1989 if (format == GST_FORMAT_BYTES) {
1990 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1991 break;
1992 }
1993
1994 /* we start from the last seen time */
1995 time = dec->priv->last_timestamp_out;
1996 /* correct for the segment values */
1997 time = gst_segment_to_stream_time (&dec->output_segment,
1998 GST_FORMAT_TIME, time);
1999
2000 GST_LOG_OBJECT (dec,
2001 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
2002
2003 /* and convert to the final format */
2004 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
2005 format, &value)))
2006 break;
2007
2008 gst_query_set_position (query, format, value);
2009
2010 GST_LOG_OBJECT (dec,
2011 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
2012 format);
2013 break;
2014 }
2015 case GST_QUERY_DURATION:
2016 {
2017 GstFormat format;
2018
2019 /* upstream in any case */
2020 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
2021 break;
2022
2023 gst_query_parse_duration (query, &format, NULL);
2024 /* try answering TIME by converting from BYTE if subclass allows */
2025 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
2026 gint64 value;
2027
2028 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2029 &value)) {
2030 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2031 if (gst_pad_query_convert (dec->sinkpad,
2032 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
2033 gst_query_set_duration (query, GST_FORMAT_TIME, value);
2034 res = TRUE;
2035 }
2036 }
2037 }
2038 break;
2039 }
2040 case GST_QUERY_CONVERT:
2041 {
2042 GstFormat src_fmt, dest_fmt;
2043 gint64 src_val, dest_val;
2044
2045 GST_DEBUG_OBJECT (dec, "convert query");
2046
2047 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2048 GST_OBJECT_LOCK (dec);
2049 if (dec->priv->output_state != NULL)
2050 res = __gst_video_rawvideo_convert (dec->priv->output_state,
2051 src_fmt, src_val, &dest_fmt, &dest_val);
2052 else
2053 res = FALSE;
2054 GST_OBJECT_UNLOCK (dec);
2055 if (!res)
2056 goto error;
2057 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2058 break;
2059 }
2060 case GST_QUERY_LATENCY:
2061 {
2062 gboolean live;
2063 GstClockTime min_latency, max_latency;
2064
2065 res = gst_pad_peer_query (dec->sinkpad, query);
2066 if (res) {
2067 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2068 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
2069 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2070 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2071
2072 GST_OBJECT_LOCK (dec);
2073 min_latency += dec->priv->min_latency;
2074 if (max_latency == GST_CLOCK_TIME_NONE
2075 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
2076 max_latency = GST_CLOCK_TIME_NONE;
2077 else
2078 max_latency += dec->priv->max_latency;
2079 GST_OBJECT_UNLOCK (dec);
2080
2081 gst_query_set_latency (query, live, min_latency, max_latency);
2082 }
2083 }
2084 break;
2085 default:
2086 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
2087 }
2088 return res;
2089
2090 error:
2091 GST_ERROR_OBJECT (dec, "query failed");
2092 return res;
2093 }
2094
2095 static gboolean
gst_video_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2096 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2097 {
2098 GstVideoDecoder *decoder;
2099 GstVideoDecoderClass *decoder_class;
2100 gboolean ret = FALSE;
2101
2102 decoder = GST_VIDEO_DECODER (parent);
2103 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2104
2105 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2106 GST_QUERY_TYPE_NAME (query));
2107
2108 if (decoder_class->src_query)
2109 ret = decoder_class->src_query (decoder, query);
2110
2111 return ret;
2112 }
2113
2114 /**
2115 * gst_video_decoder_proxy_getcaps:
2116 * @decoder: a #GstVideoDecoder
2117 * @caps: (allow-none): initial caps
2118 * @filter: (allow-none): filter caps
2119 *
2120 * Returns caps that express @caps (or sink template caps if @caps == NULL)
2121 * restricted to resolution/format/... combinations supported by downstream
2122 * elements.
2123 *
2124 * Returns: (transfer full): a #GstCaps owned by caller
2125 *
2126 * Since: 1.6
2127 */
2128 GstCaps *
gst_video_decoder_proxy_getcaps(GstVideoDecoder * decoder,GstCaps * caps,GstCaps * filter)2129 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
2130 GstCaps * filter)
2131 {
2132 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2133 GST_VIDEO_DECODER_SINK_PAD (decoder),
2134 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
2135 }
2136
2137 static GstCaps *
gst_video_decoder_sink_getcaps(GstVideoDecoder * decoder,GstCaps * filter)2138 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
2139 {
2140 GstVideoDecoderClass *klass;
2141 GstCaps *caps;
2142
2143 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2144
2145 if (klass->getcaps)
2146 caps = klass->getcaps (decoder, filter);
2147 else
2148 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
2149
2150 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2151
2152 return caps;
2153 }
2154
2155 static gboolean
gst_video_decoder_sink_query_default(GstVideoDecoder * decoder,GstQuery * query)2156 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
2157 GstQuery * query)
2158 {
2159 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
2160 GstVideoDecoderPrivate *priv;
2161 gboolean res = FALSE;
2162
2163 priv = decoder->priv;
2164
2165 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
2166
2167 switch (GST_QUERY_TYPE (query)) {
2168 case GST_QUERY_CONVERT:
2169 {
2170 GstFormat src_fmt, dest_fmt;
2171 gint64 src_val, dest_val;
2172
2173 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2174 GST_OBJECT_LOCK (decoder);
2175 res =
2176 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
2177 src_fmt, src_val, &dest_fmt, &dest_val);
2178 GST_OBJECT_UNLOCK (decoder);
2179 if (!res)
2180 goto error;
2181 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2182 break;
2183 }
2184 case GST_QUERY_ALLOCATION:{
2185 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2186
2187 if (klass->propose_allocation)
2188 res = klass->propose_allocation (decoder, query);
2189 break;
2190 }
2191 case GST_QUERY_CAPS:{
2192 GstCaps *filter, *caps;
2193
2194 gst_query_parse_caps (query, &filter);
2195 caps = gst_video_decoder_sink_getcaps (decoder, filter);
2196 gst_query_set_caps_result (query, caps);
2197 gst_caps_unref (caps);
2198 res = TRUE;
2199 break;
2200 }
2201 case GST_QUERY_ACCEPT_CAPS:{
2202 if (decoder->priv->use_default_pad_acceptcaps) {
2203 res =
2204 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
2205 GST_OBJECT_CAST (decoder), query);
2206 } else {
2207 GstCaps *caps;
2208 GstCaps *allowed_caps;
2209 GstCaps *template_caps;
2210 gboolean accept;
2211
2212 gst_query_parse_accept_caps (query, &caps);
2213
2214 template_caps = gst_pad_get_pad_template_caps (pad);
2215 accept = gst_caps_is_subset (caps, template_caps);
2216 gst_caps_unref (template_caps);
2217
2218 if (accept) {
2219 allowed_caps =
2220 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
2221
2222 accept = gst_caps_can_intersect (caps, allowed_caps);
2223
2224 gst_caps_unref (allowed_caps);
2225 }
2226
2227 gst_query_set_accept_caps_result (query, accept);
2228 res = TRUE;
2229 }
2230 break;
2231 }
2232 default:
2233 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2234 break;
2235 }
2236 done:
2237
2238 return res;
2239 error:
2240 GST_DEBUG_OBJECT (decoder, "query failed");
2241 goto done;
2242
2243 }
2244
2245 static gboolean
gst_video_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2246 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2247 GstQuery * query)
2248 {
2249 GstVideoDecoder *decoder;
2250 GstVideoDecoderClass *decoder_class;
2251 gboolean ret = FALSE;
2252
2253 decoder = GST_VIDEO_DECODER (parent);
2254 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2255
2256 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2257 GST_QUERY_TYPE_NAME (query));
2258
2259 if (decoder_class->sink_query)
2260 ret = decoder_class->sink_query (decoder, query);
2261
2262 return ret;
2263 }
2264
2265 typedef struct _Timestamp Timestamp;
2266 struct _Timestamp
2267 {
2268 guint64 offset;
2269 GstClockTime pts;
2270 GstClockTime dts;
2271 GstClockTime duration;
2272 guint flags;
2273 };
2274
2275 static void
timestamp_free(Timestamp * ts)2276 timestamp_free (Timestamp * ts)
2277 {
2278 g_slice_free (Timestamp, ts);
2279 }
2280
2281 static void
gst_video_decoder_add_buffer_info(GstVideoDecoder * decoder,GstBuffer * buffer)2282 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
2283 GstBuffer * buffer)
2284 {
2285 GstVideoDecoderPrivate *priv = decoder->priv;
2286 Timestamp *ts;
2287
2288 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2289 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2290 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2291 GST_BUFFER_FLAGS (buffer) == 0) {
2292 /* Save memory - don't bother storing info
2293 * for buffers with no distinguishing info */
2294 return;
2295 }
2296
2297 ts = g_slice_new (Timestamp);
2298
2299 GST_LOG_OBJECT (decoder,
2300 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2301 " (offset:%" G_GUINT64_FORMAT ")",
2302 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2303 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2304
2305 ts->offset = priv->input_offset;
2306 ts->pts = GST_BUFFER_PTS (buffer);
2307 ts->dts = GST_BUFFER_DTS (buffer);
2308 ts->duration = GST_BUFFER_DURATION (buffer);
2309 ts->flags = GST_BUFFER_FLAGS (buffer);
2310
2311 g_queue_push_tail (&priv->timestamps, ts);
2312
2313 if (g_queue_get_length (&priv->timestamps) > 40) {
2314 GST_WARNING_OBJECT (decoder,
2315 "decoder timestamp list getting long: %d timestamps,"
2316 "possible internal leaking?", g_queue_get_length (&priv->timestamps));
2317 }
2318 }
2319
2320 static void
gst_video_decoder_get_buffer_info_at_offset(GstVideoDecoder * decoder,guint64 offset,GstClockTime * pts,GstClockTime * dts,GstClockTime * duration,guint * flags)2321 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
2322 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2323 GstClockTime * duration, guint * flags)
2324 {
2325 #ifndef GST_DISABLE_GST_DEBUG
2326 guint64 got_offset = 0;
2327 #endif
2328 Timestamp *ts;
2329 GList *g;
2330
2331 *pts = GST_CLOCK_TIME_NONE;
2332 *dts = GST_CLOCK_TIME_NONE;
2333 *duration = GST_CLOCK_TIME_NONE;
2334 *flags = 0;
2335
2336 g = decoder->priv->timestamps.head;
2337 while (g) {
2338 ts = g->data;
2339 if (ts->offset <= offset) {
2340 GList *next = g->next;
2341 #ifndef GST_DISABLE_GST_DEBUG
2342 got_offset = ts->offset;
2343 #endif
2344 *pts = ts->pts;
2345 *dts = ts->dts;
2346 *duration = ts->duration;
2347 *flags = ts->flags;
2348 g_queue_delete_link (&decoder->priv->timestamps, g);
2349 g = next;
2350 timestamp_free (ts);
2351 } else {
2352 break;
2353 }
2354 }
2355
2356 GST_LOG_OBJECT (decoder,
2357 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2358 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2359 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2360 }
2361
2362 #if !GLIB_CHECK_VERSION(2, 60, 0)
2363 #define g_queue_clear_full queue_clear_full
2364 static void
queue_clear_full(GQueue * queue,GDestroyNotify free_func)2365 queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2366 {
2367 gpointer data;
2368
2369 while ((data = g_queue_pop_head (queue)) != NULL)
2370 free_func (data);
2371 }
2372 #endif
2373
2374 static void
gst_video_decoder_clear_queues(GstVideoDecoder * dec)2375 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
2376 {
2377 GstVideoDecoderPrivate *priv = dec->priv;
2378
2379 g_list_free_full (priv->output_queued,
2380 (GDestroyNotify) gst_mini_object_unref);
2381 priv->output_queued = NULL;
2382
2383 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2384 priv->gather = NULL;
2385 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
2386 priv->decode = NULL;
2387 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2388 priv->parse = NULL;
2389 g_list_free_full (priv->parse_gather,
2390 (GDestroyNotify) gst_video_codec_frame_unref);
2391 priv->parse_gather = NULL;
2392 g_queue_clear_full (&priv->frames,
2393 (GDestroyNotify) gst_video_codec_frame_unref);
2394 }
2395
2396 static void
gst_video_decoder_reset(GstVideoDecoder * decoder,gboolean full,gboolean flush_hard)2397 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
2398 gboolean flush_hard)
2399 {
2400 GstVideoDecoderPrivate *priv = decoder->priv;
2401
2402 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2403
2404 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2405
2406 if (full || flush_hard) {
2407 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2408 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2409 gst_video_decoder_clear_queues (decoder);
2410 decoder->priv->in_out_segment_sync = TRUE;
2411 #ifdef OHOS_OPT_PERFORMANCE
2412 // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
2413 priv->has_recv_first_key_frame = FALSE;
2414 priv->has_push_first_frame = FALSE;
2415 #endif
2416
2417 if (priv->current_frame) {
2418 gst_video_codec_frame_unref (priv->current_frame);
2419 priv->current_frame = NULL;
2420 }
2421
2422 g_list_free_full (priv->current_frame_events,
2423 (GDestroyNotify) gst_event_unref);
2424 priv->current_frame_events = NULL;
2425 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2426 priv->pending_events = NULL;
2427
2428 priv->error_count = 0;
2429 priv->had_output_data = FALSE;
2430 priv->had_input_data = FALSE;
2431
2432 GST_OBJECT_LOCK (decoder);
2433 priv->earliest_time = GST_CLOCK_TIME_NONE;
2434 priv->proportion = 0.5;
2435 priv->decode_flags_override = FALSE;
2436
2437 priv->request_sync_point_flags = 0;
2438 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2439 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2440 GST_OBJECT_UNLOCK (decoder);
2441 priv->distance_from_sync = -1;
2442 }
2443
2444 if (full) {
2445 if (priv->input_state)
2446 gst_video_codec_state_unref (priv->input_state);
2447 priv->input_state = NULL;
2448 GST_OBJECT_LOCK (decoder);
2449 if (priv->output_state)
2450 gst_video_codec_state_unref (priv->output_state);
2451 priv->output_state = NULL;
2452
2453 priv->qos_frame_duration = 0;
2454 GST_OBJECT_UNLOCK (decoder);
2455
2456 if (priv->tags)
2457 gst_tag_list_unref (priv->tags);
2458 priv->tags = NULL;
2459 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2460 if (priv->upstream_tags) {
2461 gst_tag_list_unref (priv->upstream_tags);
2462 priv->upstream_tags = NULL;
2463 }
2464 priv->tags_changed = FALSE;
2465 priv->reordered_output = FALSE;
2466
2467 priv->dropped = 0;
2468 priv->processed = 0;
2469
2470 priv->decode_frame_number = 0;
2471 priv->base_picture_number = 0;
2472
2473 if (priv->pool) {
2474 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2475 priv->pool);
2476 gst_buffer_pool_set_active (priv->pool, FALSE);
2477 gst_object_unref (priv->pool);
2478 priv->pool = NULL;
2479 }
2480
2481 if (priv->allocator) {
2482 gst_object_unref (priv->allocator);
2483 priv->allocator = NULL;
2484 }
2485 }
2486
2487 priv->discont = TRUE;
2488
2489 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2490 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2491 priv->pts_delta = GST_CLOCK_TIME_NONE;
2492
2493 priv->input_offset = 0;
2494 priv->frame_offset = 0;
2495 gst_adapter_clear (priv->input_adapter);
2496 gst_adapter_clear (priv->output_adapter);
2497 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2498
2499 GST_OBJECT_LOCK (decoder);
2500 priv->bytes_out = 0;
2501 priv->time = 0;
2502 GST_OBJECT_UNLOCK (decoder);
2503
2504 #ifndef GST_DISABLE_DEBUG
2505 priv->last_reset_time = gst_util_get_timestamp ();
2506 #endif
2507
2508 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2509 }
2510
2511 static GstFlowReturn
gst_video_decoder_chain_forward(GstVideoDecoder * decoder,GstBuffer * buf,gboolean at_eos)2512 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2513 GstBuffer * buf, gboolean at_eos)
2514 {
2515 GstVideoDecoderPrivate *priv;
2516 GstVideoDecoderClass *klass;
2517 GstFlowReturn ret = GST_FLOW_OK;
2518
2519 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2520 priv = decoder->priv;
2521
2522 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2523
2524 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2525 * and this function would only be called to get everything collected GOP
2526 * by GOP in the parse_gather list */
2527 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2528 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2529 ret = gst_video_decoder_drain_out (decoder, FALSE);
2530
2531 if (priv->current_frame == NULL)
2532 priv->current_frame = gst_video_decoder_new_frame (decoder);
2533
2534 if (!priv->packetized)
2535 gst_video_decoder_add_buffer_info (decoder, buf);
2536
2537 priv->input_offset += gst_buffer_get_size (buf);
2538
2539 if (priv->packetized) {
2540 GstVideoCodecFrame *frame;
2541 gboolean was_keyframe = FALSE;
2542
2543 frame = priv->current_frame;
2544
2545 frame->abidata.ABI.num_subframes++;
2546 if (gst_video_decoder_get_subframe_mode (decoder)) {
2547 /* End the frame if the marker flag is set */
2548 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
2549 && (decoder->input_segment.rate > 0.0))
2550 priv->current_frame = gst_video_codec_frame_ref (frame);
2551 else
2552 priv->current_frame = NULL;
2553 } else {
2554 priv->current_frame = frame;
2555 }
2556
2557 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2558 was_keyframe = TRUE;
2559 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2560 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
2561 }
2562
2563 if (frame->input_buffer) {
2564 gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
2565 gst_buffer_unref (frame->input_buffer);
2566 }
2567 frame->input_buffer = buf;
2568
2569 if (decoder->input_segment.rate < 0.0) {
2570 priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
2571 priv->current_frame = NULL;
2572 } else {
2573 ret = gst_video_decoder_decode_frame (decoder, frame);
2574 if (!gst_video_decoder_get_subframe_mode (decoder))
2575 priv->current_frame = NULL;
2576 }
2577 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2578 * latency. Only do this for forwards playback as reverse playback handles
2579 * draining on keyframes in flush_parse(), and would otherwise call back
2580 * from drain_out() to here causing an infinite loop.
2581 * Also this function is only called for reverse playback to gather frames
2582 * GOP by GOP, and does not do any actual decoding. That would be done by
2583 * flush_decode() */
2584 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2585 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2586 ret = gst_video_decoder_drain_out (decoder, FALSE);
2587 } else {
2588 gst_adapter_push (priv->input_adapter, buf);
2589
2590 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2591 }
2592
2593 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2594 return GST_FLOW_OK;
2595
2596 return ret;
2597 }
2598
2599 static GstFlowReturn
gst_video_decoder_flush_decode(GstVideoDecoder * dec)2600 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2601 {
2602 GstVideoDecoderPrivate *priv = dec->priv;
2603 GstFlowReturn res = GST_FLOW_OK;
2604 GList *walk;
2605 GstVideoCodecFrame *current_frame = NULL;
2606 gboolean last_subframe;
2607 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2608
2609 walk = priv->decode;
2610 while (walk) {
2611 GList *next;
2612 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2613 last_subframe = TRUE;
2614 /* In subframe mode, we need to get rid of intermediary frames
2615 * created during the buffer gather stage. That's why that we keep a current
2616 * frame as the main frame and drop all the frame afterwhile until the end
2617 * of the subframes batch.
2618 * */
2619 if (gst_video_decoder_get_subframe_mode (dec)) {
2620 if (current_frame == NULL) {
2621 current_frame = gst_video_codec_frame_ref (frame);
2622 } else {
2623 if (current_frame->input_buffer) {
2624 gst_video_decoder_copy_metas (dec, current_frame,
2625 current_frame->input_buffer, current_frame->output_buffer);
2626 gst_buffer_unref (current_frame->input_buffer);
2627 }
2628 current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
2629 gst_video_codec_frame_unref (frame);
2630 }
2631 last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
2632 GST_VIDEO_BUFFER_FLAG_MARKER);
2633 } else {
2634 current_frame = frame;
2635 }
2636
2637 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2638 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2639 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2640 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2641
2642 next = walk->next;
2643
2644 priv->decode = g_list_delete_link (priv->decode, walk);
2645
2646 /* decode buffer, resulting data prepended to queue */
2647 res = gst_video_decoder_decode_frame (dec, current_frame);
2648 if (res != GST_FLOW_OK)
2649 break;
2650 if (!gst_video_decoder_get_subframe_mode (dec)
2651 || last_subframe)
2652 current_frame = NULL;
2653 walk = next;
2654 }
2655
2656 return res;
2657 }
2658
2659 /* gst_video_decoder_flush_parse is called from the
2660 * chain_reverse() function when a buffer containing
2661 * a DISCONT - indicating that reverse playback
2662 * looped back to the next data block, and therefore
2663 * all available data should be fed through the
2664 * decoder and frames gathered for reversed output
2665 */
2666 static GstFlowReturn
gst_video_decoder_flush_parse(GstVideoDecoder * dec,gboolean at_eos)2667 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2668 {
2669 GstVideoDecoderPrivate *priv = dec->priv;
2670 GstFlowReturn res = GST_FLOW_OK;
2671 GList *walk;
2672 GstVideoDecoderClass *decoder_class;
2673
2674 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2675
2676 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2677
2678 /* Reverse the gather list, and prepend it to the parse list,
2679 * then flush to parse whatever we can */
2680 priv->gather = g_list_reverse (priv->gather);
2681 priv->parse = g_list_concat (priv->gather, priv->parse);
2682 priv->gather = NULL;
2683
2684 /* clear buffer and decoder state */
2685 gst_video_decoder_flush (dec, FALSE);
2686
2687 walk = priv->parse;
2688 while (walk) {
2689 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2690 GList *next = walk->next;
2691
2692 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2693 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2694 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2695 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2696
2697 /* parse buffer, resulting frames prepended to parse_gather queue */
2698 gst_buffer_ref (buf);
2699 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2700
2701 /* if we generated output, we can discard the buffer, else we
2702 * keep it in the queue */
2703 if (priv->parse_gather) {
2704 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2705 priv->parse = g_list_delete_link (priv->parse, walk);
2706 gst_buffer_unref (buf);
2707 } else {
2708 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2709 }
2710 walk = next;
2711 }
2712
2713 walk = priv->parse_gather;
2714 while (walk) {
2715 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2716 GList *walk2;
2717
2718 /* this is reverse playback, check if we need to apply some segment
2719 * to the output before decoding, as during decoding the segment.rate
2720 * must be used to determine if a buffer should be pushed or added to
2721 * the output list for reverse pushing.
2722 *
2723 * The new segment is not immediately pushed here because we must
2724 * wait for negotiation to happen before it can be pushed to avoid
2725 * pushing a segment before caps event. Negotiation only happens
2726 * when finish_frame is called.
2727 */
2728 for (walk2 = frame->events; walk2;) {
2729 GList *cur = walk2;
2730 GstEvent *event = walk2->data;
2731
2732 walk2 = g_list_next (walk2);
2733 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2734
2735 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2736 GstSegment segment;
2737
2738 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2739 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2740 gst_event_copy_segment (event, &segment);
2741 if (segment.format == GST_FORMAT_TIME) {
2742 dec->output_segment = segment;
2743 dec->priv->in_out_segment_sync =
2744 gst_segment_is_equal (&dec->input_segment, &segment);
2745 }
2746 }
2747 dec->priv->pending_events =
2748 g_list_append (dec->priv->pending_events, event);
2749 frame->events = g_list_delete_link (frame->events, cur);
2750 }
2751 }
2752
2753 walk = walk->next;
2754 }
2755
2756 /* now we can process frames. Start by moving each frame from the parse_gather
2757 * to the decode list, reverse the order as we go, and stopping when/if we
2758 * copy a keyframe. */
2759 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2760 walk = priv->parse_gather;
2761 while (walk) {
2762 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2763
2764 /* remove from the gather list */
2765 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2766
2767 /* move it to the front of the decode queue */
2768 priv->decode = g_list_concat (walk, priv->decode);
2769
2770 /* if we copied a keyframe, flush and decode the decode queue */
2771 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2772 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2773 ", DTS %" GST_TIME_FORMAT, frame,
2774 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2775 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2776 res = gst_video_decoder_flush_decode (dec);
2777 if (res != GST_FLOW_OK)
2778 goto done;
2779
2780 /* We need to tell the subclass to drain now.
2781 * We prefer the drain vfunc, but for backward-compat
2782 * we use a finish() vfunc if drain isn't implemented */
2783 if (decoder_class->drain) {
2784 GST_DEBUG_OBJECT (dec, "Draining");
2785 res = decoder_class->drain (dec);
2786 } else if (decoder_class->finish) {
2787 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2788 "Calling finish() for backwards-compat");
2789 res = decoder_class->finish (dec);
2790 }
2791
2792 if (res != GST_FLOW_OK)
2793 goto done;
2794
2795 /* now send queued data downstream */
2796 walk = priv->output_queued;
2797 while (walk) {
2798 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2799
2800 priv->output_queued =
2801 g_list_delete_link (priv->output_queued, priv->output_queued);
2802
2803 if (G_LIKELY (res == GST_FLOW_OK)) {
2804 /* avoid stray DISCONT from forward processing,
2805 * which have no meaning in reverse pushing */
2806 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2807
2808 /* Last chance to calculate a timestamp as we loop backwards
2809 * through the list */
2810 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2811 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2812 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2813 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2814 GST_BUFFER_TIMESTAMP (buf) =
2815 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2816 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2817 GST_LOG_OBJECT (dec,
2818 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2819 GST_TIME_ARGS (priv->last_timestamp_out));
2820 }
2821
2822 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2823 } else {
2824 gst_buffer_unref (buf);
2825 }
2826
2827 walk = priv->output_queued;
2828 }
2829
2830 /* clear buffer and decoder state again
2831 * before moving to the previous keyframe */
2832 gst_video_decoder_flush (dec, FALSE);
2833 }
2834
2835 walk = priv->parse_gather;
2836 }
2837
2838 done:
2839 return res;
2840 }
2841
2842 static GstFlowReturn
gst_video_decoder_chain_reverse(GstVideoDecoder * dec,GstBuffer * buf)2843 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2844 {
2845 GstVideoDecoderPrivate *priv = dec->priv;
2846 GstFlowReturn result = GST_FLOW_OK;
2847
2848 /* if we have a discont, move buffers to the decode list */
2849 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2850 GST_DEBUG_OBJECT (dec, "received discont");
2851
2852 /* parse and decode stuff in the gather and parse queues */
2853 result = gst_video_decoder_flush_parse (dec, FALSE);
2854 }
2855
2856 if (G_LIKELY (buf)) {
2857 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2858 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2859 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2860 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2861 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2862 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2863
2864 /* add buffer to gather queue */
2865 priv->gather = g_list_prepend (priv->gather, buf);
2866 }
2867
2868 return result;
2869 }
2870
2871 #ifdef OHOS_OPT_PERFORMANCE
2872 // ohos.opt.performance.0005
2873 // add trace
2874 static GstFlowReturn
gst_video_decoder_chain_trace(GstPad * pad,GstObject * parent,GstBuffer * buf)2875 gst_video_decoder_chain_trace (GstPad * pad, GstObject * parent, GstBuffer * buf)
2876 {
2877 GstStartTrace("Decoder:chain");
2878 GstFlowReturn ret = gst_video_decoder_chain (pad, parent, buf);
2879 GstFinishTrace();
2880 return ret;
2881 }
2882 #endif
2883 static GstFlowReturn
gst_video_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buf)2884 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2885 {
2886 GstVideoDecoder *decoder;
2887 GstFlowReturn ret = GST_FLOW_OK;
2888
2889 decoder = GST_VIDEO_DECODER (parent);
2890
2891 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2892 goto not_negotiated;
2893
2894 GST_LOG_OBJECT (decoder,
2895 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2896 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2897 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2898 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2899 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2900 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2901
2902 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2903
2904 /* NOTE:
2905 * requiring the pad to be negotiated makes it impossible to use
2906 * oggdemux or filesrc ! decoder */
2907
2908 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2909 GstEvent *event;
2910 GstSegment *segment = &decoder->input_segment;
2911
2912 GST_WARNING_OBJECT (decoder,
2913 "Received buffer without a new-segment. "
2914 "Assuming timestamps start from 0.");
2915
2916 gst_segment_init (segment, GST_FORMAT_TIME);
2917
2918 event = gst_event_new_segment (segment);
2919
2920 decoder->priv->current_frame_events =
2921 g_list_prepend (decoder->priv->current_frame_events, event);
2922 }
2923
2924 decoder->priv->had_input_data = TRUE;
2925
2926 if (decoder->input_segment.rate > 0.0)
2927 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2928 else
2929 ret = gst_video_decoder_chain_reverse (decoder, buf);
2930
2931 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2932 return ret;
2933
2934 /* ERRORS */
2935 not_negotiated:
2936 {
2937 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2938 ("decoder not initialized"));
2939 gst_buffer_unref (buf);
2940 return GST_FLOW_NOT_NEGOTIATED;
2941 }
2942 }
2943
2944 static GstStateChangeReturn
gst_video_decoder_change_state(GstElement * element,GstStateChange transition)2945 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2946 {
2947 GstVideoDecoder *decoder;
2948 GstVideoDecoderClass *decoder_class;
2949 GstStateChangeReturn ret;
2950
2951 decoder = GST_VIDEO_DECODER (element);
2952 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2953
2954 switch (transition) {
2955 case GST_STATE_CHANGE_NULL_TO_READY:
2956 /* open device/library if needed */
2957 if (decoder_class->open && !decoder_class->open (decoder))
2958 goto open_failed;
2959 break;
2960 case GST_STATE_CHANGE_READY_TO_PAUSED:
2961 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2962 gst_video_decoder_reset (decoder, TRUE, TRUE);
2963 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2964
2965 /* Initialize device/library if needed */
2966 if (decoder_class->start && !decoder_class->start (decoder))
2967 goto start_failed;
2968 break;
2969 default:
2970 break;
2971 }
2972
2973 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2974
2975 switch (transition) {
2976 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2977 gboolean stopped = TRUE;
2978
2979 if (decoder_class->stop)
2980 stopped = decoder_class->stop (decoder);
2981
2982 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2983 gst_video_decoder_reset (decoder, TRUE, TRUE);
2984 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2985
2986 if (!stopped)
2987 goto stop_failed;
2988
2989 break;
2990 }
2991 case GST_STATE_CHANGE_READY_TO_NULL:
2992 /* close device/library if needed */
2993 if (decoder_class->close && !decoder_class->close (decoder))
2994 goto close_failed;
2995 break;
2996 default:
2997 break;
2998 }
2999
3000 return ret;
3001
3002 /* Errors */
3003 open_failed:
3004 {
3005 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
3006 ("Failed to open decoder"));
3007 return GST_STATE_CHANGE_FAILURE;
3008 }
3009
3010 start_failed:
3011 {
3012 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
3013 ("Failed to start decoder"));
3014 return GST_STATE_CHANGE_FAILURE;
3015 }
3016
3017 stop_failed:
3018 {
3019 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
3020 ("Failed to stop decoder"));
3021 return GST_STATE_CHANGE_FAILURE;
3022 }
3023
3024 close_failed:
3025 {
3026 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
3027 ("Failed to close decoder"));
3028 return GST_STATE_CHANGE_FAILURE;
3029 }
3030 }
3031
3032 static GstVideoCodecFrame *
gst_video_decoder_new_frame(GstVideoDecoder * decoder)3033 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
3034 {
3035 GstVideoDecoderPrivate *priv = decoder->priv;
3036 GstVideoCodecFrame *frame;
3037
3038 frame = g_slice_new0 (GstVideoCodecFrame);
3039
3040 frame->ref_count = 1;
3041
3042 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3043 frame->system_frame_number = priv->system_frame_number;
3044 priv->system_frame_number++;
3045 frame->decode_frame_number = priv->decode_frame_number;
3046 priv->decode_frame_number++;
3047
3048 frame->dts = GST_CLOCK_TIME_NONE;
3049 frame->pts = GST_CLOCK_TIME_NONE;
3050 frame->duration = GST_CLOCK_TIME_NONE;
3051 frame->events = priv->current_frame_events;
3052 priv->current_frame_events = NULL;
3053
3054 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3055
3056 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
3057 frame, frame->system_frame_number);
3058
3059 return frame;
3060 }
3061
3062 static void
gst_video_decoder_push_event_list(GstVideoDecoder * decoder,GList * events)3063 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
3064 {
3065 GList *l;
3066
3067 /* events are stored in reverse order */
3068 for (l = g_list_last (events); l; l = g_list_previous (l)) {
3069 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
3070 gst_video_decoder_push_event (decoder, l->data);
3071 }
3072 g_list_free (events);
3073 }
3074
3075 static void
gst_video_decoder_prepare_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,gboolean dropping)3076 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
3077 decoder, GstVideoCodecFrame * frame, gboolean dropping)
3078 {
3079 GstVideoDecoderPrivate *priv = decoder->priv;
3080 GList *l, *events = NULL;
3081 gboolean sync;
3082
3083 #ifndef GST_DISABLE_GST_DEBUG
3084 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
3085 priv->frames.length,
3086 gst_adapter_available (priv->input_adapter),
3087 gst_adapter_available (priv->output_adapter));
3088 #endif
3089
3090 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
3091
3092 GST_LOG_OBJECT (decoder,
3093 "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
3094 GST_TIME_FORMAT,
3095 frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
3096 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
3097
3098 /* Push all pending events that arrived before this frame */
3099 for (l = priv->frames.head; l; l = l->next) {
3100 GstVideoCodecFrame *tmp = l->data;
3101
3102 if (tmp->events) {
3103 events = g_list_concat (tmp->events, events);
3104 tmp->events = NULL;
3105 }
3106
3107 if (tmp == frame)
3108 break;
3109 }
3110
3111 if (dropping || !decoder->priv->output_state) {
3112 /* Push before the next frame that is not dropped */
3113 decoder->priv->pending_events =
3114 g_list_concat (events, decoder->priv->pending_events);
3115 } else {
3116 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
3117 decoder->priv->pending_events = NULL;
3118
3119 gst_video_decoder_push_event_list (decoder, events);
3120 }
3121
3122 /* Check if the data should not be displayed. For example altref/invisible
3123 * frame in vp8. In this case we should not update the timestamps. */
3124 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
3125 return;
3126
3127 /* If the frame is meant to be output but we don't have an output_buffer
3128 * we have a problem :) */
3129 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
3130 goto no_output_buffer;
3131
3132 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
3133 if (frame->pts != priv->base_timestamp) {
3134 GST_DEBUG_OBJECT (decoder,
3135 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
3136 GST_TIME_ARGS (frame->pts),
3137 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
3138 decoder->output_segment.start)));
3139 priv->base_timestamp = frame->pts;
3140 priv->base_picture_number = frame->decode_frame_number;
3141 }
3142 }
3143
3144 if (frame->duration == GST_CLOCK_TIME_NONE) {
3145 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
3146 GST_LOG_OBJECT (decoder,
3147 "Guessing duration %" GST_TIME_FORMAT " for frame...",
3148 GST_TIME_ARGS (frame->duration));
3149 }
3150
3151 /* PTS is expected montone ascending,
3152 * so a good guess is lowest unsent DTS */
3153 {
3154 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
3155 GstVideoCodecFrame *oframe = NULL;
3156 gboolean seen_none = FALSE;
3157
3158 /* some maintenance regardless */
3159 for (l = priv->frames.head; l; l = l->next) {
3160 GstVideoCodecFrame *tmp = l->data;
3161
3162 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
3163 seen_none = TRUE;
3164 continue;
3165 }
3166
3167 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
3168 min_ts = tmp->abidata.ABI.ts;
3169 oframe = tmp;
3170 }
3171 }
3172 /* save a ts if needed */
3173 if (oframe && oframe != frame) {
3174 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
3175 }
3176
3177 /* and set if needed;
3178 * valid delta means we have reasonable DTS input */
3179 /* also, if we ended up reordered, means this approach is conflicting
3180 * with some sparse existing PTS, and so it does not work out */
3181 if (!priv->reordered_output &&
3182 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
3183 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
3184 frame->pts = min_ts + priv->pts_delta;
3185 GST_DEBUG_OBJECT (decoder,
3186 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
3187 GST_TIME_ARGS (frame->pts));
3188 }
3189
3190 /* some more maintenance, ts2 holds PTS */
3191 min_ts = GST_CLOCK_TIME_NONE;
3192 seen_none = FALSE;
3193 for (l = priv->frames.head; l; l = l->next) {
3194 GstVideoCodecFrame *tmp = l->data;
3195
3196 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
3197 seen_none = TRUE;
3198 continue;
3199 }
3200
3201 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
3202 min_ts = tmp->abidata.ABI.ts2;
3203 oframe = tmp;
3204 }
3205 }
3206 /* save a ts if needed */
3207 if (oframe && oframe != frame) {
3208 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
3209 }
3210
3211 /* if we detected reordered output, then PTS are void,
3212 * however those were obtained; bogus input, subclass etc */
3213 if (priv->reordered_output && !seen_none) {
3214 #ifdef OHOS_OPT_COMPAT
3215 /**
3216 * ohos.ext.compat.0046
3217 * Restore reordered_output to false to avoid pts persistent exceptions
3218 */
3219 if (GST_CLOCK_TIME_IS_VALID (frame->pts) && frame->pts >= priv->last_timestamp_out &&
3220 (!(frame->duration != GST_CLOCK_TIME_NONE) || !(sync && frame->dts != GST_CLOCK_TIME_NONE))) {
3221 GST_DEBUG_OBJECT (decoder, "pts %" GST_TIME_FORMAT "last_timestamp_out %" GST_TIME_FORMAT,
3222 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3223 priv->reordered_output = FALSE;
3224 } else if (GST_CLOCK_TIME_IS_VALID(frame->pts) && !GST_CLOCK_TIME_IS_VALID(priv->last_timestamp_out)) {
3225 priv->last_timestamp_out = frame->pts;
3226 } else {
3227 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3228 frame->pts = GST_CLOCK_TIME_NONE;
3229 }
3230 #else
3231 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3232 frame->pts = GST_CLOCK_TIME_NONE;
3233 #endif
3234 }
3235
3236 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
3237 frame->pts = min_ts;
3238 GST_DEBUG_OBJECT (decoder,
3239 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
3240 GST_TIME_ARGS (frame->pts));
3241 }
3242 }
3243
3244
3245 if (frame->pts == GST_CLOCK_TIME_NONE) {
3246 /* Last ditch timestamp guess: Just add the duration to the previous
3247 * frame. If it's the first frame, just use the segment start. */
3248 if (frame->duration != GST_CLOCK_TIME_NONE) {
3249 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
3250 frame->pts = priv->last_timestamp_out + frame->duration;
3251 else if (frame->dts != GST_CLOCK_TIME_NONE) {
3252 frame->pts = frame->dts;
3253 GST_LOG_OBJECT (decoder,
3254 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3255 GST_TIME_ARGS (frame->pts));
3256 } else if (decoder->output_segment.rate > 0.0)
3257 frame->pts = decoder->output_segment.start;
3258 GST_INFO_OBJECT (decoder,
3259 "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
3260 GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
3261 GST_TIME_ARGS (frame->dts));
3262 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
3263 frame->pts = frame->dts;
3264 GST_LOG_OBJECT (decoder,
3265 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3266 GST_TIME_ARGS (frame->pts));
3267 }
3268 }
3269
3270 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
3271 if (frame->pts < priv->last_timestamp_out) {
3272 GST_WARNING_OBJECT (decoder,
3273 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
3274 GST_TIME_FORMAT ")",
3275 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3276 priv->reordered_output = TRUE;
3277 /* make it a bit less weird downstream */
3278 frame->pts = priv->last_timestamp_out;
3279 }
3280 }
3281
3282 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
3283 priv->last_timestamp_out = frame->pts;
3284
3285 return;
3286
3287 /* ERRORS */
3288 no_output_buffer:
3289 {
3290 GST_ERROR_OBJECT (decoder, "No buffer to output !");
3291 }
3292 }
3293
3294 /**
3295 * gst_video_decoder_release_frame:
3296 * @dec: a #GstVideoDecoder
3297 * @frame: (transfer full): the #GstVideoCodecFrame to release
3298 *
3299 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
3300 * without any processing other than removing it from list of pending frames,
3301 * after which it is considered finished and released.
3302 *
3303 * Since: 1.2.2
3304 */
3305 void
gst_video_decoder_release_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3306 gst_video_decoder_release_frame (GstVideoDecoder * dec,
3307 GstVideoCodecFrame * frame)
3308 {
3309 GList *link;
3310
3311 /* unref once from the list */
3312 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3313 link = g_queue_find (&dec->priv->frames, frame);
3314 if (link) {
3315 gst_video_codec_frame_unref (frame);
3316 g_queue_delete_link (&dec->priv->frames, link);
3317 }
3318 if (frame->events) {
3319 dec->priv->pending_events =
3320 g_list_concat (frame->events, dec->priv->pending_events);
3321 frame->events = NULL;
3322 }
3323 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3324
3325 /* unref because this function takes ownership */
3326 gst_video_codec_frame_unref (frame);
3327 }
3328
3329 /* called with STREAM_LOCK */
3330 static void
gst_video_decoder_post_qos_drop(GstVideoDecoder * dec,GstClockTime timestamp)3331 gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
3332 {
3333 GstClockTime stream_time, jitter, earliest_time, qostime;
3334 GstSegment *segment;
3335 GstMessage *qos_msg;
3336 gdouble proportion;
3337 dec->priv->dropped++;
3338
3339 /* post QoS message */
3340 GST_OBJECT_LOCK (dec);
3341 proportion = dec->priv->proportion;
3342 earliest_time = dec->priv->earliest_time;
3343 GST_OBJECT_UNLOCK (dec);
3344
3345 segment = &dec->output_segment;
3346 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3347 segment = &dec->input_segment;
3348 stream_time =
3349 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3350 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3351 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3352 qos_msg =
3353 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3354 timestamp, GST_CLOCK_TIME_NONE);
3355 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3356 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3357 dec->priv->processed, dec->priv->dropped);
3358 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3359 }
3360
3361 /**
3362 * gst_video_decoder_drop_frame:
3363 * @dec: a #GstVideoDecoder
3364 * @frame: (transfer full): the #GstVideoCodecFrame to drop
3365 *
3366 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
3367 * case and posts a QoS message with the frame's details on the bus.
3368 * In any case, the frame is considered finished and released.
3369 *
3370 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3371 */
3372 GstFlowReturn
gst_video_decoder_drop_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3373 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
3374 {
3375 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3376
3377 if (gst_video_decoder_get_subframe_mode (dec))
3378 GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
3379 frame->abidata.ABI.num_subframes);
3380
3381 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3382
3383 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3384
3385 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3386 GST_TIME_ARGS (frame->pts));
3387
3388 gst_video_decoder_post_qos_drop (dec, frame->pts);
3389
3390 /* now free the frame */
3391 gst_video_decoder_release_frame (dec, frame);
3392
3393 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3394
3395 return GST_FLOW_OK;
3396 }
3397
3398 /**
3399 * gst_video_decoder_drop_subframe:
3400 * @dec: a #GstVideoDecoder
3401 * @frame: (transfer full): the #GstVideoCodecFrame
3402 *
3403 * Drops input data.
3404 * The frame is not considered finished until the whole frame
3405 * is finished or dropped by the subclass.
3406 *
3407 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3408 *
3409 * Since: 1.20
3410 */
3411 GstFlowReturn
gst_video_decoder_drop_subframe(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3412 gst_video_decoder_drop_subframe (GstVideoDecoder * dec,
3413 GstVideoCodecFrame * frame)
3414 {
3415 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (dec),
3416 GST_FLOW_NOT_SUPPORTED);
3417
3418 GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
3419 gst_video_decoder_get_input_subframe_index (dec, frame));
3420
3421 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3422
3423 gst_video_codec_frame_unref (frame);
3424
3425 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3426
3427 return GST_FLOW_OK;
3428 }
3429
3430 static gboolean
gst_video_decoder_transform_meta_default(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstMeta * meta)3431 gst_video_decoder_transform_meta_default (GstVideoDecoder *
3432 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
3433 {
3434 const GstMetaInfo *info = meta->info;
3435 const gchar *const *tags;
3436 const gchar *const supported_tags[] = {
3437 GST_META_TAG_VIDEO_STR,
3438 GST_META_TAG_VIDEO_ORIENTATION_STR,
3439 GST_META_TAG_VIDEO_SIZE_STR,
3440 NULL,
3441 };
3442
3443 tags = gst_meta_api_type_get_tags (info->api);
3444
3445 if (!tags)
3446 return TRUE;
3447
3448 while (*tags) {
3449 if (!g_strv_contains (supported_tags, *tags))
3450 return FALSE;
3451 tags++;
3452 }
3453
3454 return TRUE;
3455 }
3456
3457 typedef struct
3458 {
3459 GstVideoDecoder *decoder;
3460 GstVideoCodecFrame *frame;
3461 GstBuffer *buffer;
3462 } CopyMetaData;
3463
3464 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)3465 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3466 {
3467 CopyMetaData *data = user_data;
3468 GstVideoDecoder *decoder = data->decoder;
3469 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3470 GstVideoCodecFrame *frame = data->frame;
3471 GstBuffer *buffer = data->buffer;
3472 const GstMetaInfo *info = (*meta)->info;
3473 gboolean do_copy = FALSE;
3474
3475 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3476 /* never call the transform_meta with memory specific metadata */
3477 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3478 g_type_name (info->api));
3479 do_copy = FALSE;
3480 } else if (klass->transform_meta) {
3481 do_copy = klass->transform_meta (decoder, frame, *meta);
3482 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3483 g_type_name (info->api), do_copy);
3484 }
3485
3486 /* we only copy metadata when the subclass implemented a transform_meta
3487 * function and when it returns %TRUE */
3488 if (do_copy && info->transform_func) {
3489 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3490 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3491 /* simply copy then */
3492
3493 info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
3494 ©_data);
3495 }
3496 return TRUE;
3497 }
3498
3499 static void
gst_video_decoder_copy_metas(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBuffer * src_buffer,GstBuffer * dest_buffer)3500 gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
3501 GstVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
3502 {
3503 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3504
3505 if (decoder_class->transform_meta) {
3506 if (G_LIKELY (frame)) {
3507 CopyMetaData data;
3508
3509 data.decoder = decoder;
3510 data.frame = frame;
3511 data.buffer = dest_buffer;
3512 gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
3513 } else {
3514 GST_WARNING_OBJECT (decoder,
3515 "Can't copy metadata because input frame disappeared");
3516 }
3517 }
3518 }
3519
3520 /**
3521 * gst_video_decoder_finish_frame:
3522 * @decoder: a #GstVideoDecoder
3523 * @frame: (transfer full): a decoded #GstVideoCodecFrame
3524 *
3525 * @frame should have a valid decoded data buffer, whose metadata fields
3526 * are then appropriately set according to frame data and pushed downstream.
3527 * If no output data is provided, @frame is considered skipped.
3528 * In any case, the frame is considered finished and released.
3529 *
3530 * After calling this function the output buffer of the frame is to be
3531 * considered read-only. This function will also change the metadata
3532 * of the buffer.
3533 *
3534 * Returns: a #GstFlowReturn resulting from sending data downstream
3535 */
3536 GstFlowReturn
gst_video_decoder_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3537 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
3538 GstVideoCodecFrame * frame)
3539 {
3540 GstFlowReturn ret = GST_FLOW_OK;
3541 GstVideoDecoderPrivate *priv = decoder->priv;
3542 GstBuffer *output_buffer;
3543 gboolean needs_reconfigure = FALSE;
3544
3545 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3546
3547 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3548
3549 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3550 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3551 && needs_reconfigure))) {
3552 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3553 gst_pad_mark_reconfigure (decoder->srcpad);
3554 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3555 ret = GST_FLOW_FLUSHING;
3556 else
3557 ret = GST_FLOW_NOT_NEGOTIATED;
3558 goto done;
3559 }
3560 }
3561
3562 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3563 priv->processed++;
3564
3565 if (priv->tags_changed) {
3566 GstEvent *tags_event;
3567
3568 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
3569
3570 if (tags_event != NULL)
3571 gst_video_decoder_push_event (decoder, tags_event);
3572
3573 priv->tags_changed = FALSE;
3574 }
3575
3576 /* no buffer data means this frame is skipped */
3577 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3578 GST_DEBUG_OBJECT (decoder,
3579 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3580 GST_TIME_ARGS (frame->pts));
3581 goto done;
3582 }
3583
3584 /* Mark output as corrupted if the subclass requested so and we're either
3585 * still before the sync point after the request, or we don't even know the
3586 * frame number of the sync point yet (it is 0) */
3587 GST_OBJECT_LOCK (decoder);
3588 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3589 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3590 if (priv->request_sync_point_flags &
3591 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3592 GST_DEBUG_OBJECT (decoder,
3593 "marking frame %" GST_TIME_FORMAT
3594 " as corrupted because it is still before the sync point",
3595 GST_TIME_ARGS (frame->pts));
3596 GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3597 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3598 }
3599 } else {
3600 /* Reset to -1 to mark it as unset now that we've reached the frame */
3601 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3602 }
3603 GST_OBJECT_UNLOCK (decoder);
3604
3605 if (priv->discard_corrupted_frames
3606 && (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3607 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3608 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3609 GST_BUFFER_FLAG_CORRUPTED))) {
3610 GST_DEBUG_OBJECT (decoder,
3611 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3612 GST_TIME_ARGS (frame->pts));
3613 goto done;
3614 }
3615
3616 /* We need a writable buffer for the metadata changes below */
3617 output_buffer = frame->output_buffer =
3618 gst_buffer_make_writable (frame->output_buffer);
3619
3620 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3621
3622 GST_BUFFER_PTS (output_buffer) = frame->pts;
3623 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3624 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3625
3626 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3627 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3628
3629 if (priv->discont) {
3630 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3631 }
3632
3633 if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3634 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3635 GST_DEBUG_OBJECT (decoder,
3636 "marking frame %" GST_TIME_FORMAT " as corrupted",
3637 GST_TIME_ARGS (frame->pts));
3638 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3639 }
3640
3641 gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
3642 frame->output_buffer);
3643
3644 /* Get an additional ref to the buffer, which is going to be pushed
3645 * downstream, the original ref is owned by the frame
3646 */
3647 output_buffer = gst_buffer_ref (output_buffer);
3648
3649 /* Release frame so the buffer is writable when we push it downstream
3650 * if possible, i.e. if the subclass does not hold additional references
3651 * to the frame
3652 */
3653 gst_video_decoder_release_frame (decoder, frame);
3654 frame = NULL;
3655
3656 if (decoder->output_segment.rate < 0.0
3657 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3658 GST_LOG_OBJECT (decoder, "queued frame");
3659 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3660 } else {
3661 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3662 }
3663
3664 done:
3665 if (frame)
3666 gst_video_decoder_release_frame (decoder, frame);
3667 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3668 return ret;
3669 }
3670
3671 /**
3672 * gst_video_decoder_finish_subframe:
3673 * @decoder: a #GstVideoDecoder
3674 * @frame: (transfer full): the #GstVideoCodecFrame
3675 *
3676 * Indicate that a subframe has been finished to be decoded
3677 * by the subclass. This method should be called for all subframes
3678 * except the last subframe where @gst_video_decoder_finish_frame
3679 * should be called instead.
3680 *
3681 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3682 *
3683 * Since: 1.20
3684 */
3685 GstFlowReturn
gst_video_decoder_finish_subframe(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3686 gst_video_decoder_finish_subframe (GstVideoDecoder * decoder,
3687 GstVideoCodecFrame * frame)
3688 {
3689 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
3690 GST_FLOW_NOT_SUPPORTED);
3691
3692 GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
3693 gst_video_decoder_get_input_subframe_index (decoder, frame));
3694
3695 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3696 frame->abidata.ABI.subframes_processed++;
3697 gst_video_codec_frame_unref (frame);
3698
3699 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3700
3701 return GST_FLOW_OK;
3702 }
3703
3704 /* With stream lock, takes the frame reference */
3705 static GstFlowReturn
gst_video_decoder_clip_and_push_buf(GstVideoDecoder * decoder,GstBuffer * buf)3706 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3707 {
3708 GstFlowReturn ret = GST_FLOW_OK;
3709 GstVideoDecoderPrivate *priv = decoder->priv;
3710 guint64 start, stop;
3711 guint64 cstart, cstop;
3712 GstSegment *segment;
3713 GstClockTime duration;
3714
3715 /* Check for clipping */
3716 start = GST_BUFFER_PTS (buf);
3717 duration = GST_BUFFER_DURATION (buf);
3718
3719 /* store that we have valid decoded data */
3720 priv->had_output_data = TRUE;
3721 #ifdef OHOS_OPT_COMPAT
3722 priv->stream_had_output_data = TRUE;
3723 #endif
3724
3725 stop = GST_CLOCK_TIME_NONE;
3726
3727 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3728 stop = start + duration;
3729 } else if (GST_CLOCK_TIME_IS_VALID (start)
3730 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3731 /* If we don't clip away buffers that far before the segment we
3732 * can cause the pipeline to lockup. This can happen if audio is
3733 * properly clipped, and thus the audio sink does not preroll yet
3734 * but the video sink prerolls because we already outputted a
3735 * buffer here... and then queues run full.
3736 *
3737 * In the worst case we will clip one buffer too many here now if no
3738 * framerate is given, no buffer duration is given and the actual
3739 * framerate is lower than 25fps */
3740 stop = start + 40 * GST_MSECOND;
3741 }
3742
3743 segment = &decoder->output_segment;
3744 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3745 GST_BUFFER_PTS (buf) = cstart;
3746
3747 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3748 GST_BUFFER_DURATION (buf) = cstop - cstart;
3749
3750 GST_LOG_OBJECT (decoder,
3751 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3752 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3753 " time %" GST_TIME_FORMAT,
3754 GST_TIME_ARGS (cstart),
3755 GST_TIME_ARGS (cstop),
3756 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3757 GST_TIME_ARGS (segment->time));
3758 } else {
3759 #ifdef OHOS_OPT_PERFORMANCE
3760 // ohos.opt.performance.0005
3761 // add trace
3762 GST_INFO_OBJECT (decoder,
3763 "dropping buffer outside segment: %" GST_TIME_FORMAT
3764 " %" GST_TIME_FORMAT
3765 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3766 " time %" GST_TIME_FORMAT,
3767 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3768 GST_TIME_ARGS (segment->start),
3769 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3770 #else
3771 GST_LOG_OBJECT (decoder,
3772 "dropping buffer outside segment: %" GST_TIME_FORMAT
3773 " %" GST_TIME_FORMAT
3774 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3775 " time %" GST_TIME_FORMAT,
3776 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3777 GST_TIME_ARGS (segment->start),
3778 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3779 #endif
3780 /* only check and return EOS if upstream still
3781 * in the same segment and interested as such */
3782 if (decoder->priv->in_out_segment_sync) {
3783 if (segment->rate >= 0) {
3784 if (GST_BUFFER_PTS (buf) >= segment->stop)
3785 ret = GST_FLOW_EOS;
3786 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3787 ret = GST_FLOW_EOS;
3788 }
3789 }
3790 gst_buffer_unref (buf);
3791 goto done;
3792 }
3793
3794 /* Is buffer too late (QoS) ? */
3795 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3796 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3797 GstClockTime deadline =
3798 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3799 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3800 GST_WARNING_OBJECT (decoder,
3801 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3802 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3803 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3804 GST_TIME_ARGS (priv->earliest_time));
3805 gst_video_decoder_post_qos_drop (decoder, cstart);
3806 gst_buffer_unref (buf);
3807 priv->discont = TRUE;
3808 goto done;
3809 }
3810 }
3811
3812 /* Set DISCONT flag here ! */
3813
3814 if (priv->discont) {
3815 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3816 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3817 priv->discont = FALSE;
3818 }
3819
3820 /* update rate estimate */
3821 GST_OBJECT_LOCK (decoder);
3822 priv->bytes_out += gst_buffer_get_size (buf);
3823 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3824 priv->time += duration;
3825 } else {
3826 /* FIXME : Use difference between current and previous outgoing
3827 * timestamp, and relate to difference between current and previous
3828 * bytes */
3829 /* better none than nothing valid */
3830 priv->time = GST_CLOCK_TIME_NONE;
3831 }
3832 GST_OBJECT_UNLOCK (decoder);
3833
3834 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3835 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3836 gst_buffer_get_size (buf),
3837 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3838 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3839
3840 /* we got data, so note things are looking up again, reduce
3841 * the error count, if there is one */
3842 if (G_UNLIKELY (priv->error_count))
3843 priv->error_count = 0;
3844
3845 #ifndef GST_DISABLE_DEBUG
3846 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3847 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3848
3849 /* First buffer since reset, report how long we took */
3850 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3851 " to produce", GST_TIME_ARGS (elapsed));
3852 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3853 }
3854 #endif
3855
3856 #ifdef OHOS_OPT_PERFORMANCE
3857 // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
3858 if (!priv->has_push_first_frame && GST_BUFFER_PTS (buf) != GST_CLOCK_TIME_NONE) {
3859 priv->has_push_first_frame = TRUE;
3860 GST_WARNING_OBJECT (decoder, "videodecoder push first frame");
3861
3862 decoder->output_segment.flags |= GST_SEGMENT_FLAG_FIRST_FRAME;
3863 decoder->output_segment.start = GST_BUFFER_PTS (buf);
3864 GstEvent *event = gst_event_new_segment (&decoder->output_segment);
3865 if (event) {
3866 ret = gst_pad_push_event (decoder->srcpad, event);
3867 }
3868 }
3869 #endif
3870 /* release STREAM_LOCK not to block upstream
3871 * while pushing buffer downstream */
3872 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3873 #ifdef OHOS_OPT_PERFORMANCE
3874 // ohos.opt.performance.0005
3875 GstStartTrace("Decoder:push buffer to sink");
3876 #endif
3877 ret = gst_pad_push (decoder->srcpad, buf);
3878 #ifdef OHOS_OPT_PERFORMANCE
3879 GstFinishTrace();
3880 #endif
3881 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3882
3883 done:
3884 return ret;
3885 }
3886
3887 /**
3888 * gst_video_decoder_add_to_frame:
3889 * @decoder: a #GstVideoDecoder
3890 * @n_bytes: the number of bytes to add
3891 *
3892 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3893 */
3894 void
gst_video_decoder_add_to_frame(GstVideoDecoder * decoder,int n_bytes)3895 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3896 {
3897 GstVideoDecoderPrivate *priv = decoder->priv;
3898 GstBuffer *buf;
3899
3900 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3901
3902 if (n_bytes == 0)
3903 return;
3904
3905 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3906 if (gst_adapter_available (priv->output_adapter) == 0) {
3907 priv->frame_offset =
3908 priv->input_offset - gst_adapter_available (priv->input_adapter);
3909 }
3910 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3911
3912 gst_adapter_push (priv->output_adapter, buf);
3913 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3914 }
3915
3916 /**
3917 * gst_video_decoder_get_pending_frame_size:
3918 * @decoder: a #GstVideoDecoder
3919 *
3920 * Returns the number of bytes previously added to the current frame
3921 * by calling gst_video_decoder_add_to_frame().
3922 *
3923 * Returns: The number of bytes pending for the current frame
3924 *
3925 * Since: 1.4
3926 */
3927 gsize
gst_video_decoder_get_pending_frame_size(GstVideoDecoder * decoder)3928 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3929 {
3930 GstVideoDecoderPrivate *priv = decoder->priv;
3931 gsize ret;
3932
3933 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3934 ret = gst_adapter_available (priv->output_adapter);
3935 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3936
3937 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3938 ret);
3939
3940 return ret;
3941 }
3942
3943 static guint64
gst_video_decoder_get_frame_duration(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3944 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3945 GstVideoCodecFrame * frame)
3946 {
3947 GstVideoCodecState *state = decoder->priv->output_state;
3948
3949 /* it's possible that we don't have a state yet when we are dropping the
3950 * initial buffers */
3951 if (state == NULL)
3952 return GST_CLOCK_TIME_NONE;
3953
3954 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3955 return GST_CLOCK_TIME_NONE;
3956 }
3957
3958 /* FIXME: For interlaced frames this needs to take into account
3959 * the number of valid fields in the frame
3960 */
3961
3962 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3963 state->info.fps_n);
3964 }
3965
3966 /**
3967 * gst_video_decoder_have_frame:
3968 * @decoder: a #GstVideoDecoder
3969 *
3970 * Gathers all data collected for currently parsed frame, gathers corresponding
3971 * metadata and passes it along for further processing, i.e. @handle_frame.
3972 *
3973 * Returns: a #GstFlowReturn
3974 */
3975 GstFlowReturn
gst_video_decoder_have_frame(GstVideoDecoder * decoder)3976 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3977 {
3978 GstVideoDecoderPrivate *priv = decoder->priv;
3979 GstBuffer *buffer;
3980 int n_available;
3981 GstClockTime pts, dts, duration;
3982 guint flags;
3983 GstFlowReturn ret = GST_FLOW_OK;
3984
3985 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3986 priv->frame_offset);
3987
3988 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3989
3990 n_available = gst_adapter_available (priv->output_adapter);
3991 if (n_available) {
3992 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3993 } else {
3994 buffer = gst_buffer_new_and_alloc (0);
3995 }
3996
3997 if (priv->current_frame->input_buffer) {
3998 gst_video_decoder_copy_metas (decoder, priv->current_frame,
3999 priv->current_frame->input_buffer, buffer);
4000 gst_buffer_unref (priv->current_frame->input_buffer);
4001 }
4002 priv->current_frame->input_buffer = buffer;
4003
4004 gst_video_decoder_get_buffer_info_at_offset (decoder,
4005 priv->frame_offset, &pts, &dts, &duration, &flags);
4006
4007 GST_BUFFER_PTS (buffer) = pts;
4008 GST_BUFFER_DTS (buffer) = dts;
4009 GST_BUFFER_DURATION (buffer) = duration;
4010 GST_BUFFER_FLAGS (buffer) = flags;
4011
4012 GST_LOG_OBJECT (decoder, "collected frame size %d, "
4013 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
4014 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
4015 GST_TIME_ARGS (duration));
4016
4017 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
4018 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
4019 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
4020 }
4021
4022 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
4023 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
4024 GST_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
4025 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
4026 }
4027
4028 /* In reverse playback, just capture and queue frames for later processing */
4029 if (decoder->input_segment.rate < 0.0) {
4030 priv->parse_gather =
4031 g_list_prepend (priv->parse_gather, priv->current_frame);
4032 priv->current_frame = NULL;
4033 } else {
4034 GstVideoCodecFrame *frame = priv->current_frame;
4035 frame->abidata.ABI.num_subframes++;
4036 /* In subframe mode, we keep a ref for ourselves
4037 * as this frame will be kept during the data collection
4038 * in parsed mode. The frame reference will be released by
4039 * finish_(sub)frame or drop_(sub)frame.*/
4040 if (gst_video_decoder_get_subframe_mode (decoder))
4041 gst_video_codec_frame_ref (priv->current_frame);
4042 else
4043 priv->current_frame = NULL;
4044
4045 /* Decode the frame, which gives away our ref */
4046 ret = gst_video_decoder_decode_frame (decoder, frame);
4047 }
4048
4049 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4050
4051 return ret;
4052 }
4053
4054 /* Pass the frame in priv->current_frame through the
4055 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
4056 * or dropping by passing to gvd_drop_frame() */
4057 static GstFlowReturn
gst_video_decoder_decode_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4058 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
4059 GstVideoCodecFrame * frame)
4060 {
4061 GstVideoDecoderPrivate *priv = decoder->priv;
4062 GstVideoDecoderClass *decoder_class;
4063 GstFlowReturn ret = GST_FLOW_OK;
4064
4065 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
4066
4067 /* FIXME : This should only have to be checked once (either the subclass has an
4068 * implementation, or it doesn't) */
4069 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
4070 g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
4071
4072 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
4073 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
4074 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
4075 frame->deadline =
4076 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
4077 frame->pts);
4078
4079 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
4080 * durations. */
4081 /* FIXME upstream can be quite wrong about the keyframe aspect,
4082 * so we could be going off here as well,
4083 * maybe let subclass decide if it really is/was a keyframe */
4084 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
4085 priv->distance_from_sync = 0;
4086
4087 GST_OBJECT_LOCK (decoder);
4088 priv->request_sync_point_flags &=
4089 ~GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
4090 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
4091 priv->request_sync_point_frame_number = frame->system_frame_number;
4092 GST_OBJECT_UNLOCK (decoder);
4093
4094 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
4095 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
4096 /* just in case they are not equal as might ideally be,
4097 * e.g. quicktime has a (positive) delta approach */
4098 priv->pts_delta = frame->pts - frame->dts;
4099 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
4100 (gint) (priv->pts_delta / GST_MSECOND));
4101 }
4102 } else {
4103 if (priv->distance_from_sync == -1 && priv->automatic_request_sync_points) {
4104 GST_DEBUG_OBJECT (decoder,
4105 "Didn't receive a keyframe yet, requesting sync point");
4106 gst_video_decoder_request_sync_point (decoder, frame,
4107 priv->automatic_request_sync_point_flags);
4108 }
4109
4110 GST_OBJECT_LOCK (decoder);
4111 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
4112 || (priv->request_sync_point_flags &
4113 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
4114 GST_WARNING_OBJECT (decoder,
4115 "Subclass requires a sync point but we didn't receive one yet, discarding input");
4116 GST_OBJECT_UNLOCK (decoder);
4117 if (priv->automatic_request_sync_points) {
4118 gst_video_decoder_request_sync_point (decoder, frame,
4119 priv->automatic_request_sync_point_flags);
4120 }
4121 gst_video_decoder_release_frame (decoder, frame);
4122 return GST_FLOW_OK;
4123 }
4124 GST_OBJECT_UNLOCK (decoder);
4125
4126 priv->distance_from_sync++;
4127 }
4128
4129 frame->distance_from_sync = priv->distance_from_sync;
4130
4131 if (frame->abidata.ABI.num_subframes == 1) {
4132 frame->abidata.ABI.ts = frame->dts;
4133 frame->abidata.ABI.ts2 = frame->pts;
4134 }
4135
4136 GST_LOG_OBJECT (decoder,
4137 "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
4138 frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
4139 frame->distance_from_sync);
4140 /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
4141 if (!g_queue_find (&priv->frames, frame)) {
4142 g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
4143 } else {
4144 GST_LOG_OBJECT (decoder,
4145 "Do not add an existing frame used to decode subframes");
4146 }
4147
4148 if (priv->frames.length > 10) {
4149 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
4150 "possible internal leaking?", priv->frames.length);
4151 }
4152
4153 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
4154 if (!priv->has_recv_first_key_frame) {
4155 priv->has_recv_first_key_frame = TRUE;
4156 GST_WARNING_OBJECT (decoder, "videodecoder recv first key frame");
4157 }
4158 #endif
4159 #ifdef OHOS_OPT_COMPAT
4160 // ohos.opt.compat.0053
4161 if (priv->has_push_first_frame && priv->only_one_frame_required) {
4162 gst_video_decoder_release_frame(decoder, frame);
4163 GST_DEBUG_OBJECT(decoder, "only need one frame, release!");
4164 return GST_FLOW_EOS;
4165 }
4166 #endif
4167 /* do something with frame */
4168 ret = decoder_class->handle_frame (decoder, frame);
4169 if (ret != GST_FLOW_OK)
4170 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
4171
4172 /* the frame has either been added to parse_gather or sent to
4173 handle frame so there is no need to unref it */
4174 return ret;
4175 }
4176
4177
4178 /**
4179 * gst_video_decoder_get_output_state:
4180 * @decoder: a #GstVideoDecoder
4181 *
4182 * Get the #GstVideoCodecState currently describing the output stream.
4183 *
4184 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
4185 */
4186 GstVideoCodecState *
gst_video_decoder_get_output_state(GstVideoDecoder * decoder)4187 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
4188 {
4189 GstVideoCodecState *state = NULL;
4190
4191 GST_OBJECT_LOCK (decoder);
4192 if (decoder->priv->output_state)
4193 state = gst_video_codec_state_ref (decoder->priv->output_state);
4194 GST_OBJECT_UNLOCK (decoder);
4195
4196 return state;
4197 }
4198
4199 static GstVideoCodecState *
_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference,gboolean copy_interlace_mode)4200 _set_interlaced_output_state (GstVideoDecoder * decoder,
4201 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4202 guint height, GstVideoCodecState * reference, gboolean copy_interlace_mode)
4203 {
4204 GstVideoDecoderPrivate *priv = decoder->priv;
4205 GstVideoCodecState *state;
4206
4207 g_assert ((copy_interlace_mode
4208 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
4209 || !copy_interlace_mode);
4210
4211 GST_DEBUG_OBJECT (decoder,
4212 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
4213 width, height, gst_video_interlace_mode_to_string (interlace_mode),
4214 reference);
4215
4216 /* Create the new output state */
4217 state =
4218 _new_output_state (fmt, interlace_mode, width, height, reference,
4219 copy_interlace_mode);
4220 if (!state)
4221 return NULL;
4222
4223 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4224
4225 GST_OBJECT_LOCK (decoder);
4226 /* Replace existing output state by new one */
4227 if (priv->output_state)
4228 gst_video_codec_state_unref (priv->output_state);
4229 priv->output_state = gst_video_codec_state_ref (state);
4230
4231 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
4232 priv->qos_frame_duration =
4233 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
4234 priv->output_state->info.fps_n);
4235 } else {
4236 priv->qos_frame_duration = 0;
4237 }
4238 priv->output_state_changed = TRUE;
4239 GST_OBJECT_UNLOCK (decoder);
4240
4241 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4242
4243 return state;
4244 }
4245
4246 /**
4247 * gst_video_decoder_set_output_state:
4248 * @decoder: a #GstVideoDecoder
4249 * @fmt: a #GstVideoFormat
4250 * @width: The width in pixels
4251 * @height: The height in pixels
4252 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
4253 *
4254 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
4255 * as the output state for the decoder.
4256 * Any previously set output state on @decoder will be replaced by the newly
4257 * created one.
4258 *
4259 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
4260 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
4261 * @reference.
4262 *
4263 * If the subclass wishes to override some fields from the output state (like
4264 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
4265 *
4266 * The new output state will only take effect (set on pads and buffers) starting
4267 * from the next call to #gst_video_decoder_finish_frame().
4268 *
4269 * Returns: (transfer full): the newly configured output state.
4270 */
4271 GstVideoCodecState *
gst_video_decoder_set_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,guint width,guint height,GstVideoCodecState * reference)4272 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
4273 GstVideoFormat fmt, guint width, guint height,
4274 GstVideoCodecState * reference)
4275 {
4276 return _set_interlaced_output_state (decoder, fmt,
4277 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
4278 }
4279
4280 /**
4281 * gst_video_decoder_set_interlaced_output_state:
4282 * @decoder: a #GstVideoDecoder
4283 * @fmt: a #GstVideoFormat
4284 * @width: The width in pixels
4285 * @height: The height in pixels
4286 * @interlace_mode: A #GstVideoInterlaceMode
4287 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
4288 *
4289 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
4290 * the interlacing mode.
4291 *
4292 * Returns: (transfer full): the newly configured output state.
4293 *
4294 * Since: 1.16.
4295 */
4296 GstVideoCodecState *
gst_video_decoder_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference)4297 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
4298 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4299 guint height, GstVideoCodecState * reference)
4300 {
4301 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
4302 height, reference, FALSE);
4303 }
4304
4305
4306 /**
4307 * gst_video_decoder_get_oldest_frame:
4308 * @decoder: a #GstVideoDecoder
4309 *
4310 * Get the oldest pending unfinished #GstVideoCodecFrame
4311 *
4312 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
4313 */
4314 GstVideoCodecFrame *
gst_video_decoder_get_oldest_frame(GstVideoDecoder * decoder)4315 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
4316 {
4317 GstVideoCodecFrame *frame = NULL;
4318
4319 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4320 if (decoder->priv->frames.head)
4321 frame = gst_video_codec_frame_ref (decoder->priv->frames.head->data);
4322 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4323
4324 return (GstVideoCodecFrame *) frame;
4325 }
4326
4327 /**
4328 * gst_video_decoder_get_frame:
4329 * @decoder: a #GstVideoDecoder
4330 * @frame_number: system_frame_number of a frame
4331 *
4332 * Get a pending unfinished #GstVideoCodecFrame
4333 *
4334 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
4335 */
4336 GstVideoCodecFrame *
gst_video_decoder_get_frame(GstVideoDecoder * decoder,int frame_number)4337 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
4338 {
4339 GList *g;
4340 GstVideoCodecFrame *frame = NULL;
4341
4342 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
4343
4344 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4345 for (g = decoder->priv->frames.head; g; g = g->next) {
4346 GstVideoCodecFrame *tmp = g->data;
4347
4348 if (tmp->system_frame_number == frame_number) {
4349 frame = gst_video_codec_frame_ref (tmp);
4350 break;
4351 }
4352 }
4353 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4354
4355 return frame;
4356 }
4357
4358 /**
4359 * gst_video_decoder_get_frames:
4360 * @decoder: a #GstVideoDecoder
4361 *
4362 * Get all pending unfinished #GstVideoCodecFrame
4363 *
4364 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
4365 */
4366 GList *
gst_video_decoder_get_frames(GstVideoDecoder * decoder)4367 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
4368 {
4369 GList *frames;
4370
4371 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4372 frames =
4373 g_list_copy_deep (decoder->priv->frames.head,
4374 (GCopyFunc) gst_video_codec_frame_ref, NULL);
4375 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4376
4377 return frames;
4378 }
4379
4380 static gboolean
gst_video_decoder_decide_allocation_default(GstVideoDecoder * decoder,GstQuery * query)4381 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
4382 GstQuery * query)
4383 {
4384 GstCaps *outcaps = NULL;
4385 GstBufferPool *pool = NULL;
4386 guint size, min, max;
4387 GstAllocator *allocator = NULL;
4388 GstAllocationParams params;
4389 GstStructure *config;
4390 gboolean update_pool, update_allocator;
4391 GstVideoInfo vinfo;
4392
4393 gst_query_parse_allocation (query, &outcaps, NULL);
4394 gst_video_info_init (&vinfo);
4395 if (outcaps)
4396 gst_video_info_from_caps (&vinfo, outcaps);
4397
4398 /* we got configuration from our peer or the decide_allocation method,
4399 * parse them */
4400 if (gst_query_get_n_allocation_params (query) > 0) {
4401 /* try the allocator */
4402 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4403 update_allocator = TRUE;
4404 } else {
4405 allocator = NULL;
4406 gst_allocation_params_init (¶ms);
4407 update_allocator = FALSE;
4408 }
4409
4410 if (gst_query_get_n_allocation_pools (query) > 0) {
4411 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
4412 size = MAX (size, vinfo.size);
4413 update_pool = TRUE;
4414 } else {
4415 pool = NULL;
4416 size = vinfo.size;
4417 min = max = 0;
4418
4419 update_pool = FALSE;
4420 }
4421
4422 if (pool == NULL) {
4423 /* no pool, we can make our own */
4424 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
4425 pool = gst_video_buffer_pool_new ();
4426 }
4427
4428 /* now configure */
4429 config = gst_buffer_pool_get_config (pool);
4430 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4431 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
4432
4433 GST_DEBUG_OBJECT (decoder,
4434 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
4435 pool);
4436 if (!gst_buffer_pool_set_config (pool, config)) {
4437 config = gst_buffer_pool_get_config (pool);
4438
4439 /* If change are not acceptable, fallback to generic pool */
4440 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
4441 max)) {
4442 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
4443
4444 gst_object_unref (pool);
4445 pool = gst_video_buffer_pool_new ();
4446 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4447 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
4448 }
4449
4450 if (!gst_buffer_pool_set_config (pool, config))
4451 goto config_failed;
4452 }
4453
4454 if (update_allocator)
4455 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
4456 else
4457 gst_query_add_allocation_param (query, allocator, ¶ms);
4458 if (allocator)
4459 gst_object_unref (allocator);
4460
4461 if (update_pool)
4462 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4463 else
4464 gst_query_add_allocation_pool (query, pool, size, min, max);
4465
4466 if (pool)
4467 gst_object_unref (pool);
4468
4469 return TRUE;
4470
4471 config_failed:
4472 if (allocator)
4473 gst_object_unref (allocator);
4474 if (pool)
4475 gst_object_unref (pool);
4476 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4477 ("Failed to configure the buffer pool"),
4478 ("Configuration is most likely invalid, please report this issue."));
4479 return FALSE;
4480 }
4481
4482 static gboolean
gst_video_decoder_propose_allocation_default(GstVideoDecoder * decoder,GstQuery * query)4483 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
4484 GstQuery * query)
4485 {
4486 return TRUE;
4487 }
4488
4489 static gboolean
gst_video_decoder_negotiate_pool(GstVideoDecoder * decoder,GstCaps * caps)4490 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
4491 {
4492 GstVideoDecoderClass *klass;
4493 GstQuery *query = NULL;
4494 GstBufferPool *pool = NULL;
4495 GstAllocator *allocator;
4496 GstAllocationParams params;
4497 gboolean ret = TRUE;
4498
4499 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4500
4501 query = gst_query_new_allocation (caps, TRUE);
4502
4503 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4504
4505 if (!gst_pad_peer_query (decoder->srcpad, query)) {
4506 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4507 }
4508
4509 g_assert (klass->decide_allocation != NULL);
4510 ret = klass->decide_allocation (decoder, query);
4511
4512 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4513 query);
4514
4515 if (!ret)
4516 goto no_decide_allocation;
4517
4518 /* we got configuration from our peer or the decide_allocation method,
4519 * parse them */
4520 if (gst_query_get_n_allocation_params (query) > 0) {
4521 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4522 } else {
4523 allocator = NULL;
4524 gst_allocation_params_init (¶ms);
4525 }
4526
4527 if (gst_query_get_n_allocation_pools (query) > 0)
4528 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4529 if (!pool) {
4530 if (allocator)
4531 gst_object_unref (allocator);
4532 ret = FALSE;
4533 goto no_decide_allocation;
4534 }
4535
4536 if (decoder->priv->allocator)
4537 gst_object_unref (decoder->priv->allocator);
4538 decoder->priv->allocator = allocator;
4539 decoder->priv->params = params;
4540
4541 if (decoder->priv->pool) {
4542 /* do not set the bufferpool to inactive here, it will be done
4543 * on its finalize function. As videodecoder do late renegotiation
4544 * it might happen that some element downstream is already using this
4545 * same bufferpool and deactivating it will make it fail.
4546 * Happens when a downstream element changes from passthrough to
4547 * non-passthrough and gets this same bufferpool to use */
4548 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4549 decoder->priv->pool);
4550 gst_object_unref (decoder->priv->pool);
4551 }
4552 decoder->priv->pool = pool;
4553
4554 /* and activate */
4555 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4556 gst_buffer_pool_set_active (pool, TRUE);
4557
4558 done:
4559 if (query)
4560 gst_query_unref (query);
4561
4562 return ret;
4563
4564 /* Errors */
4565 no_decide_allocation:
4566 {
4567 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4568 goto done;
4569 }
4570 }
4571
4572 static gboolean
gst_video_decoder_negotiate_default(GstVideoDecoder * decoder)4573 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
4574 {
4575 GstVideoCodecState *state = decoder->priv->output_state;
4576 gboolean ret = TRUE;
4577 GstVideoCodecFrame *frame;
4578 GstCaps *prevcaps;
4579 GstCaps *incaps;
4580
4581 if (!state) {
4582 GST_DEBUG_OBJECT (decoder,
4583 "Trying to negotiate the pool with out setting the o/p format");
4584 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
4585 goto done;
4586 }
4587
4588 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4589 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4590
4591 /* If the base class didn't set any multiview params, assume mono
4592 * now */
4593 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4594 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4595 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4596 GST_VIDEO_MULTIVIEW_MODE_MONO;
4597 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4598 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4599 }
4600
4601 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4602 state->info.par_n, state->info.par_d,
4603 state->info.fps_n, state->info.fps_d);
4604
4605 if (state->caps == NULL)
4606 state->caps = gst_video_info_to_caps (&state->info);
4607
4608 incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
4609 if (incaps) {
4610 GstStructure *in_struct;
4611
4612 in_struct = gst_caps_get_structure (incaps, 0);
4613 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4614 gst_structure_has_field (in_struct, "content-light-level")) {
4615 const gchar *s;
4616
4617 /* prefer upstream information */
4618 state->caps = gst_caps_make_writable (state->caps);
4619 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4620 gst_caps_set_simple (state->caps,
4621 "mastering-display-info", G_TYPE_STRING, s, NULL);
4622 }
4623
4624 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4625 gst_caps_set_simple (state->caps,
4626 "content-light-level", G_TYPE_STRING, s, NULL);
4627 }
4628 }
4629
4630 gst_caps_unref (incaps);
4631 }
4632
4633 if (state->allocation_caps == NULL)
4634 state->allocation_caps = gst_caps_ref (state->caps);
4635
4636 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4637
4638 /* Push all pending pre-caps events of the oldest frame before
4639 * setting caps */
4640 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4641 if (frame || decoder->priv->current_frame_events) {
4642 GList **events, *l;
4643
4644 if (frame) {
4645 events = &frame->events;
4646 } else {
4647 events = &decoder->priv->current_frame_events;
4648 }
4649
4650 for (l = g_list_last (*events); l;) {
4651 GstEvent *event = GST_EVENT (l->data);
4652 GList *tmp;
4653
4654 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4655 gst_video_decoder_push_event (decoder, event);
4656 tmp = l;
4657 l = l->prev;
4658 *events = g_list_delete_link (*events, tmp);
4659 } else {
4660 l = l->prev;
4661 }
4662 }
4663 }
4664
4665 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4666 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4667 if (!prevcaps) {
4668 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4669 }
4670 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4671 } else {
4672 ret = TRUE;
4673 GST_DEBUG_OBJECT (decoder,
4674 "current src pad and output state caps are the same");
4675 }
4676 if (prevcaps)
4677 gst_caps_unref (prevcaps);
4678
4679 if (!ret)
4680 goto done;
4681 decoder->priv->output_state_changed = FALSE;
4682 /* Negotiate pool */
4683 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4684
4685 done:
4686 return ret;
4687 }
4688
4689 static gboolean
gst_video_decoder_negotiate_unlocked(GstVideoDecoder * decoder)4690 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
4691 {
4692 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4693 gboolean ret = TRUE;
4694
4695 if (G_LIKELY (klass->negotiate))
4696 ret = klass->negotiate (decoder);
4697
4698 return ret;
4699 }
4700
4701 /**
4702 * gst_video_decoder_negotiate:
4703 * @decoder: a #GstVideoDecoder
4704 *
4705 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
4706 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4707 * negotiate fails.
4708 *
4709 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4710 */
4711 gboolean
gst_video_decoder_negotiate(GstVideoDecoder * decoder)4712 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
4713 {
4714 GstVideoDecoderClass *klass;
4715 gboolean ret = TRUE;
4716
4717 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
4718
4719 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4720
4721 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4722 gst_pad_check_reconfigure (decoder->srcpad);
4723 if (klass->negotiate) {
4724 ret = klass->negotiate (decoder);
4725 if (!ret)
4726 gst_pad_mark_reconfigure (decoder->srcpad);
4727 }
4728 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4729
4730 return ret;
4731 }
4732
4733 /**
4734 * gst_video_decoder_allocate_output_buffer:
4735 * @decoder: a #GstVideoDecoder
4736 *
4737 * Helper function that allocates a buffer to hold a video frame for @decoder's
4738 * current #GstVideoCodecState.
4739 *
4740 * You should use gst_video_decoder_allocate_output_frame() instead of this
4741 * function, if possible at all.
4742 *
4743 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4744 * allocated (e.g. when downstream is flushing or shutting down)
4745 */
4746 GstBuffer *
gst_video_decoder_allocate_output_buffer(GstVideoDecoder * decoder)4747 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
4748 {
4749 GstFlowReturn flow;
4750 GstBuffer *buffer = NULL;
4751 gboolean needs_reconfigure = FALSE;
4752
4753 GST_DEBUG ("alloc src buffer");
4754
4755 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4756 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4757 if (G_UNLIKELY (!decoder->priv->output_state
4758 || decoder->priv->output_state_changed || needs_reconfigure)) {
4759 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4760 if (decoder->priv->output_state) {
4761 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4762 gst_pad_mark_reconfigure (decoder->srcpad);
4763 goto fallback;
4764 } else {
4765 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4766 goto failed_allocation;
4767 }
4768 }
4769 }
4770
4771 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4772
4773 if (flow != GST_FLOW_OK) {
4774 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4775 gst_flow_get_name (flow));
4776 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4777 goto fallback;
4778 else
4779 goto failed_allocation;
4780 }
4781 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4782
4783 return buffer;
4784
4785 fallback:
4786 GST_INFO_OBJECT (decoder,
4787 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4788 buffer =
4789 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4790 NULL);
4791
4792 failed_allocation:
4793 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4794 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4795
4796 return buffer;
4797 }
4798
4799 /**
4800 * gst_video_decoder_allocate_output_frame:
4801 * @decoder: a #GstVideoDecoder
4802 * @frame: a #GstVideoCodecFrame
4803 *
4804 * Helper function that allocates a buffer to hold a video frame for @decoder's
4805 * current #GstVideoCodecState. Subclass should already have configured video
4806 * state and set src pad caps.
4807 *
4808 * The buffer allocated here is owned by the frame and you should only
4809 * keep references to the frame, not the buffer.
4810 *
4811 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4812 */
4813 GstFlowReturn
gst_video_decoder_allocate_output_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4814 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4815 decoder, GstVideoCodecFrame * frame)
4816 {
4817 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4818 NULL);
4819 }
4820
4821 /**
4822 * gst_video_decoder_allocate_output_frame_with_params:
4823 * @decoder: a #GstVideoDecoder
4824 * @frame: a #GstVideoCodecFrame
4825 * @params: a #GstBufferPoolAcquireParams
4826 *
4827 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4828 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4829 *
4830 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4831 *
4832 * Since: 1.12
4833 */
4834 GstFlowReturn
gst_video_decoder_allocate_output_frame_with_params(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBufferPoolAcquireParams * params)4835 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4836 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4837 {
4838 GstFlowReturn flow_ret;
4839 GstVideoCodecState *state;
4840 int num_bytes;
4841 gboolean needs_reconfigure = FALSE;
4842
4843 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4844 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4845
4846 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4847
4848 state = decoder->priv->output_state;
4849 if (state == NULL) {
4850 g_warning ("Output state should be set before allocating frame");
4851 goto error;
4852 }
4853 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4854 if (num_bytes == 0) {
4855 g_warning ("Frame size should not be 0");
4856 goto error;
4857 }
4858
4859 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4860 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4861 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4862 gst_pad_mark_reconfigure (decoder->srcpad);
4863 if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4864 GST_DEBUG_OBJECT (decoder,
4865 "Failed to negotiate a pool: pad is flushing");
4866 goto flushing;
4867 } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4868 GST_DEBUG_OBJECT (decoder,
4869 "Failed to negotiate a pool and no previous pool to reuse");
4870 goto error;
4871 } else {
4872 GST_DEBUG_OBJECT (decoder,
4873 "Failed to negotiate a pool, falling back to the previous pool");
4874 }
4875 }
4876 }
4877
4878 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4879
4880 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4881 &frame->output_buffer, params);
4882
4883 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4884
4885 return flow_ret;
4886
4887 flushing:
4888 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4889 return GST_FLOW_FLUSHING;
4890
4891 error:
4892 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4893 return GST_FLOW_ERROR;
4894 }
4895
4896 /**
4897 * gst_video_decoder_get_max_decode_time:
4898 * @decoder: a #GstVideoDecoder
4899 * @frame: a #GstVideoCodecFrame
4900 *
4901 * Determines maximum possible decoding time for @frame that will
4902 * allow it to decode and arrive in time (as determined by QoS events).
4903 * In particular, a negative result means decoding in time is no longer possible
4904 * and should therefore occur as soon/skippy as possible.
4905 *
4906 * Returns: max decoding time.
4907 */
4908 GstClockTimeDiff
gst_video_decoder_get_max_decode_time(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4909 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4910 decoder, GstVideoCodecFrame * frame)
4911 {
4912 GstClockTimeDiff deadline;
4913 GstClockTime earliest_time;
4914
4915 GST_OBJECT_LOCK (decoder);
4916 earliest_time = decoder->priv->earliest_time;
4917 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4918 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4919 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4920 else
4921 deadline = G_MAXINT64;
4922
4923 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4924 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4925 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4926 GST_STIME_ARGS (deadline));
4927
4928 GST_OBJECT_UNLOCK (decoder);
4929
4930 return deadline;
4931 }
4932
4933 /**
4934 * gst_video_decoder_get_qos_proportion:
4935 * @decoder: a #GstVideoDecoder
4936 * current QoS proportion, or %NULL
4937 *
4938 * Returns: The current QoS proportion.
4939 *
4940 * Since: 1.0.3
4941 */
4942 gdouble
gst_video_decoder_get_qos_proportion(GstVideoDecoder * decoder)4943 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4944 {
4945 gdouble proportion;
4946
4947 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4948
4949 GST_OBJECT_LOCK (decoder);
4950 proportion = decoder->priv->proportion;
4951 GST_OBJECT_UNLOCK (decoder);
4952
4953 return proportion;
4954 }
4955
4956 GstFlowReturn
_gst_video_decoder_error(GstVideoDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)4957 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4958 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4959 const gchar * function, gint line)
4960 {
4961 if (txt)
4962 GST_WARNING_OBJECT (dec, "error: %s", txt);
4963 if (dbg)
4964 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4965 dec->priv->error_count += weight;
4966 dec->priv->discont = TRUE;
4967 if (dec->priv->max_errors >= 0 &&
4968 dec->priv->error_count > dec->priv->max_errors) {
4969 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4970 domain, code, txt, dbg, file, function, line);
4971 return GST_FLOW_ERROR;
4972 } else {
4973 g_free (txt);
4974 g_free (dbg);
4975 return GST_FLOW_OK;
4976 }
4977 }
4978
4979 /**
4980 * gst_video_decoder_set_max_errors:
4981 * @dec: a #GstVideoDecoder
4982 * @num: max tolerated errors
4983 *
4984 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4985 * warned about, but more than tolerated will lead to fatal error. You can set
4986 * -1 for never returning fatal errors. Default is set to
4987 * GST_VIDEO_DECODER_MAX_ERRORS.
4988 *
4989 * The '-1' option was added in 1.4
4990 */
4991 void
gst_video_decoder_set_max_errors(GstVideoDecoder * dec,gint num)4992 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4993 {
4994 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4995
4996 dec->priv->max_errors = num;
4997 }
4998
4999 /**
5000 * gst_video_decoder_get_max_errors:
5001 * @dec: a #GstVideoDecoder
5002 *
5003 * Returns: currently configured decoder tolerated error count.
5004 */
5005 gint
gst_video_decoder_get_max_errors(GstVideoDecoder * dec)5006 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
5007 {
5008 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
5009
5010 return dec->priv->max_errors;
5011 }
5012
5013 /**
5014 * gst_video_decoder_set_needs_format:
5015 * @dec: a #GstVideoDecoder
5016 * @enabled: new state
5017 *
5018 * Configures decoder format needs. If enabled, subclass needs to be
5019 * negotiated with format caps before it can process any data. It will then
5020 * never be handed any data before it has been configured.
5021 * Otherwise, it might be handed data without having been configured and
5022 * is then expected being able to do so either by default
5023 * or based on the input data.
5024 *
5025 * Since: 1.4
5026 */
5027 void
gst_video_decoder_set_needs_format(GstVideoDecoder * dec,gboolean enabled)5028 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
5029 {
5030 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5031
5032 dec->priv->needs_format = enabled;
5033 }
5034
5035 /**
5036 * gst_video_decoder_get_needs_format:
5037 * @dec: a #GstVideoDecoder
5038 *
5039 * Queries decoder required format handling.
5040 *
5041 * Returns: %TRUE if required format handling is enabled.
5042 *
5043 * Since: 1.4
5044 */
5045 gboolean
gst_video_decoder_get_needs_format(GstVideoDecoder * dec)5046 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
5047 {
5048 gboolean result;
5049
5050 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
5051
5052 result = dec->priv->needs_format;
5053
5054 return result;
5055 }
5056
5057 /**
5058 * gst_video_decoder_set_packetized:
5059 * @decoder: a #GstVideoDecoder
5060 * @packetized: whether the input data should be considered as packetized.
5061 *
5062 * Allows baseclass to consider input data as packetized or not. If the
5063 * input is packetized, then the @parse method will not be called.
5064 */
5065 void
gst_video_decoder_set_packetized(GstVideoDecoder * decoder,gboolean packetized)5066 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
5067 gboolean packetized)
5068 {
5069 decoder->priv->packetized = packetized;
5070 }
5071
5072 /**
5073 * gst_video_decoder_get_packetized:
5074 * @decoder: a #GstVideoDecoder
5075 *
5076 * Queries whether input data is considered packetized or not by the
5077 * base class.
5078 *
5079 * Returns: TRUE if input data is considered packetized.
5080 */
5081 gboolean
gst_video_decoder_get_packetized(GstVideoDecoder * decoder)5082 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
5083 {
5084 return decoder->priv->packetized;
5085 }
5086
5087 /**
5088 * gst_video_decoder_have_last_subframe:
5089 * @decoder: a #GstVideoDecoder
5090 * @frame: (transfer none): the #GstVideoCodecFrame to update
5091 *
5092 * Indicates that the last subframe has been processed by the decoder
5093 * in @frame. This will release the current frame in video decoder
5094 * allowing to receive new frames from upstream elements. This method
5095 * must be called in the subclass @handle_frame callback.
5096 *
5097 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
5098 *
5099 * Since: 1.20
5100 */
5101 GstFlowReturn
gst_video_decoder_have_last_subframe(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5102 gst_video_decoder_have_last_subframe (GstVideoDecoder * decoder,
5103 GstVideoCodecFrame * frame)
5104 {
5105 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
5106 GST_FLOW_OK);
5107 /* unref once from the list */
5108 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
5109 if (decoder->priv->current_frame == frame) {
5110 gst_video_codec_frame_unref (decoder->priv->current_frame);
5111 decoder->priv->current_frame = NULL;
5112 }
5113 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5114
5115 return GST_FLOW_OK;
5116 }
5117
5118 /**
5119 * gst_video_decoder_set_subframe_mode:
5120 * @decoder: a #GstVideoDecoder
5121 * @subframe_mode: whether the input data should be considered as subframes.
5122 *
5123 * If this is set to TRUE, it informs the base class that the subclass
5124 * can receive the data at a granularity lower than one frame.
5125 *
5126 * Note that in this mode, the subclass has two options. It can either
5127 * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
5128 * end of a frame. Or it can operate in such a way that it will decode
5129 * a single frame at a time. In this second case, every buffer that
5130 * arrives to the element is considered part of the same frame until
5131 * gst_video_decoder_finish_frame() is called.
5132 *
5133 * In either case, the same #GstVideoCodecFrame will be passed to the
5134 * GstVideoDecoderClass:handle_frame vmethod repeatedly with a
5135 * different GstVideoCodecFrame:input_buffer every time until the end of the
5136 * frame has been signaled using either method.
5137 * This method must be called during the decoder subclass @set_format call.
5138 *
5139 * Since: 1.20
5140 */
5141 void
gst_video_decoder_set_subframe_mode(GstVideoDecoder * decoder,gboolean subframe_mode)5142 gst_video_decoder_set_subframe_mode (GstVideoDecoder * decoder,
5143 gboolean subframe_mode)
5144 {
5145 decoder->priv->subframe_mode = subframe_mode;
5146 }
5147
5148 /**
5149 * gst_video_decoder_get_subframe_mode:
5150 * @decoder: a #GstVideoDecoder
5151 *
5152 * Queries whether input data is considered as subframes or not by the
5153 * base class. If FALSE, each input buffer will be considered as a full
5154 * frame.
5155 *
5156 * Returns: TRUE if input data is considered as sub frames.
5157 *
5158 * Since: 1.20
5159 */
5160 gboolean
gst_video_decoder_get_subframe_mode(GstVideoDecoder * decoder)5161 gst_video_decoder_get_subframe_mode (GstVideoDecoder * decoder)
5162 {
5163 return decoder->priv->subframe_mode;
5164 }
5165
5166 /**
5167 * gst_video_decoder_get_input_subframe_index:
5168 * @decoder: a #GstVideoDecoder
5169 * @frame: (transfer none): the #GstVideoCodecFrame to update
5170 *
5171 * Queries the number of the last subframe received by
5172 * the decoder baseclass in the @frame.
5173 *
5174 * Returns: the current subframe index received in subframe mode, 1 otherwise.
5175 *
5176 * Since: 1.20
5177 */
5178 guint
gst_video_decoder_get_input_subframe_index(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5179 gst_video_decoder_get_input_subframe_index (GstVideoDecoder * decoder,
5180 GstVideoCodecFrame * frame)
5181 {
5182 return frame->abidata.ABI.num_subframes;
5183 }
5184
5185 /**
5186 * gst_video_decoder_get_processed_subframe_index:
5187 * @decoder: a #GstVideoDecoder
5188 * @frame: (transfer none): the #GstVideoCodecFrame to update
5189 *
5190 * Queries the number of subframes in the frame processed by
5191 * the decoder baseclass.
5192 *
5193 * Returns: the current subframe processed received in subframe mode.
5194 *
5195 * Since: 1.20
5196 */
5197 guint
gst_video_decoder_get_processed_subframe_index(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5198 gst_video_decoder_get_processed_subframe_index (GstVideoDecoder * decoder,
5199 GstVideoCodecFrame * frame)
5200 {
5201 return frame->abidata.ABI.subframes_processed;
5202 }
5203
5204 /**
5205 * gst_video_decoder_set_estimate_rate:
5206 * @dec: a #GstVideoDecoder
5207 * @enabled: whether to enable byte to time conversion
5208 *
5209 * Allows baseclass to perform byte to time estimated conversion.
5210 */
5211 void
gst_video_decoder_set_estimate_rate(GstVideoDecoder * dec,gboolean enabled)5212 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
5213 {
5214 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5215
5216 dec->priv->do_estimate_rate = enabled;
5217 }
5218
5219 /**
5220 * gst_video_decoder_get_estimate_rate:
5221 * @dec: a #GstVideoDecoder
5222 *
5223 * Returns: currently configured byte to time conversion setting
5224 */
5225 gboolean
gst_video_decoder_get_estimate_rate(GstVideoDecoder * dec)5226 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
5227 {
5228 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
5229
5230 return dec->priv->do_estimate_rate;
5231 }
5232
5233 /**
5234 * gst_video_decoder_set_latency:
5235 * @decoder: a #GstVideoDecoder
5236 * @min_latency: minimum latency
5237 * @max_latency: maximum latency
5238 *
5239 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
5240 * latency is. Will also post a LATENCY message on the bus so the pipeline
5241 * can reconfigure its global latency.
5242 */
5243 void
gst_video_decoder_set_latency(GstVideoDecoder * decoder,GstClockTime min_latency,GstClockTime max_latency)5244 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
5245 GstClockTime min_latency, GstClockTime max_latency)
5246 {
5247 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
5248 g_return_if_fail (max_latency >= min_latency);
5249
5250 GST_OBJECT_LOCK (decoder);
5251 decoder->priv->min_latency = min_latency;
5252 decoder->priv->max_latency = max_latency;
5253 GST_OBJECT_UNLOCK (decoder);
5254
5255 gst_element_post_message (GST_ELEMENT_CAST (decoder),
5256 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
5257 }
5258
5259 /**
5260 * gst_video_decoder_get_latency:
5261 * @decoder: a #GstVideoDecoder
5262 * @min_latency: (out) (allow-none): address of variable in which to store the
5263 * configured minimum latency, or %NULL
5264 * @max_latency: (out) (allow-none): address of variable in which to store the
5265 * configured mximum latency, or %NULL
5266 *
5267 * Query the configured decoder latency. Results will be returned via
5268 * @min_latency and @max_latency.
5269 */
5270 void
gst_video_decoder_get_latency(GstVideoDecoder * decoder,GstClockTime * min_latency,GstClockTime * max_latency)5271 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
5272 GstClockTime * min_latency, GstClockTime * max_latency)
5273 {
5274 GST_OBJECT_LOCK (decoder);
5275 if (min_latency)
5276 *min_latency = decoder->priv->min_latency;
5277 if (max_latency)
5278 *max_latency = decoder->priv->max_latency;
5279 GST_OBJECT_UNLOCK (decoder);
5280 }
5281
5282 /**
5283 * gst_video_decoder_merge_tags:
5284 * @decoder: a #GstVideoDecoder
5285 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
5286 * previously-set tags
5287 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
5288 *
5289 * Sets the audio decoder tags and how they should be merged with any
5290 * upstream stream tags. This will override any tags previously-set
5291 * with gst_audio_decoder_merge_tags().
5292 *
5293 * Note that this is provided for convenience, and the subclass is
5294 * not required to use this and can still do tag handling on its own.
5295 *
5296 * MT safe.
5297 */
5298 void
gst_video_decoder_merge_tags(GstVideoDecoder * decoder,const GstTagList * tags,GstTagMergeMode mode)5299 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
5300 const GstTagList * tags, GstTagMergeMode mode)
5301 {
5302 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5303 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
5304 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
5305
5306 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
5307 if (decoder->priv->tags != tags) {
5308 if (decoder->priv->tags) {
5309 gst_tag_list_unref (decoder->priv->tags);
5310 decoder->priv->tags = NULL;
5311 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
5312 }
5313 if (tags) {
5314 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
5315 decoder->priv->tags_merge_mode = mode;
5316 }
5317
5318 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
5319 decoder->priv->tags_changed = TRUE;
5320 }
5321 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5322 }
5323
5324 /**
5325 * gst_video_decoder_get_buffer_pool:
5326 * @decoder: a #GstVideoDecoder
5327 *
5328 * Returns: (transfer full): the instance of the #GstBufferPool used
5329 * by the decoder; free it after use it
5330 */
5331 GstBufferPool *
gst_video_decoder_get_buffer_pool(GstVideoDecoder * decoder)5332 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
5333 {
5334 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
5335
5336 if (decoder->priv->pool)
5337 return gst_object_ref (decoder->priv->pool);
5338
5339 return NULL;
5340 }
5341
5342 /**
5343 * gst_video_decoder_get_allocator:
5344 * @decoder: a #GstVideoDecoder
5345 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
5346 * used
5347 * @params: (out) (allow-none) (transfer full): the
5348 * #GstAllocationParams of @allocator
5349 *
5350 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
5351 * used by the base class and its @params.
5352 *
5353 * Unref the @allocator after use it.
5354 */
5355 void
gst_video_decoder_get_allocator(GstVideoDecoder * decoder,GstAllocator ** allocator,GstAllocationParams * params)5356 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
5357 GstAllocator ** allocator, GstAllocationParams * params)
5358 {
5359 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5360
5361 if (allocator)
5362 *allocator = decoder->priv->allocator ?
5363 gst_object_ref (decoder->priv->allocator) : NULL;
5364
5365 if (params)
5366 *params = decoder->priv->params;
5367 }
5368
5369 /**
5370 * gst_video_decoder_set_use_default_pad_acceptcaps:
5371 * @decoder: a #GstVideoDecoder
5372 * @use: if the default pad accept-caps query handling should be used
5373 *
5374 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
5375 * to use the default pad query handler to reply to accept-caps queries.
5376 *
5377 * By setting this to true it is possible to further customize the default
5378 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
5379 * %GST_PAD_SET_ACCEPT_TEMPLATE
5380 *
5381 * Since: 1.6
5382 */
5383 void
gst_video_decoder_set_use_default_pad_acceptcaps(GstVideoDecoder * decoder,gboolean use)5384 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
5385 gboolean use)
5386 {
5387 decoder->priv->use_default_pad_acceptcaps = use;
5388 }
5389
5390 static void
gst_video_decoder_request_sync_point_internal(GstVideoDecoder * dec,GstClockTime deadline,GstVideoDecoderRequestSyncPointFlags flags)5391 gst_video_decoder_request_sync_point_internal (GstVideoDecoder * dec,
5392 GstClockTime deadline, GstVideoDecoderRequestSyncPointFlags flags)
5393 {
5394 GstEvent *fku = NULL;
5395 GstVideoDecoderPrivate *priv;
5396
5397 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5398
5399 priv = dec->priv;
5400
5401 GST_OBJECT_LOCK (dec);
5402
5403 /* Check if we're allowed to send a new force-keyunit event.
5404 * frame->deadline is set to the running time of the PTS. */
5405 if (priv->min_force_key_unit_interval == 0 ||
5406 deadline == GST_CLOCK_TIME_NONE ||
5407 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
5408 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
5409 || (priv->last_force_key_unit_time +
5410 priv->min_force_key_unit_interval <= deadline)))) {
5411 GST_DEBUG_OBJECT (dec,
5412 "Requesting a new key-unit for frame with deadline %" GST_TIME_FORMAT,
5413 GST_TIME_ARGS (deadline));
5414 fku =
5415 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
5416 0);
5417 priv->last_force_key_unit_time = deadline;
5418 } else {
5419 GST_DEBUG_OBJECT (dec,
5420 "Can't request a new key-unit for frame with deadline %"
5421 GST_TIME_FORMAT, GST_TIME_ARGS (deadline));
5422 }
5423 priv->request_sync_point_flags |= flags;
5424 /* We don't know yet the frame number of the sync point so set it to a
5425 * frame number higher than any allowed frame number */
5426 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
5427 GST_OBJECT_UNLOCK (dec);
5428
5429 if (fku)
5430 gst_pad_push_event (dec->sinkpad, fku);
5431 }
5432
5433 /**
5434 * gst_video_decoder_request_sync_point:
5435 * @dec: a #GstVideoDecoder
5436 * @frame: a #GstVideoCodecFrame
5437 * @flags: #GstVideoDecoderRequestSyncPointFlags
5438 *
5439 * Allows the #GstVideoDecoder subclass to request from the base class that
5440 * a new sync should be requested from upstream, and that @frame was the frame
5441 * when the subclass noticed that a new sync point is required. A reason for
5442 * the subclass to do this could be missing reference frames, for example.
5443 *
5444 * The base class will then request a new sync point from upstream as long as
5445 * the time that passed since the last one is exceeding
5446 * #GstVideoDecoder:min-force-key-unit-interval.
5447 *
5448 * The subclass can signal via @flags how the frames until the next sync point
5449 * should be handled:
5450 *
5451 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
5452 * all following input frames until the next sync point are discarded.
5453 * This can be useful if the lack of a sync point will prevent all further
5454 * decoding and the decoder implementation is not very robust in handling
5455 * missing references frames.
5456 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
5457 * then all output frames following @frame are marked as corrupted via
5458 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
5459 * dropped by the base class, see #GstVideoDecoder:discard-corrupted-frames.
5460 * Subclasses can manually mark frames as corrupted via %GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
5461 * before calling gst_video_decoder_finish_frame().
5462 *
5463 * Since: 1.20
5464 */
5465 void
gst_video_decoder_request_sync_point(GstVideoDecoder * dec,GstVideoCodecFrame * frame,GstVideoDecoderRequestSyncPointFlags flags)5466 gst_video_decoder_request_sync_point (GstVideoDecoder * dec,
5467 GstVideoCodecFrame * frame, GstVideoDecoderRequestSyncPointFlags flags)
5468 {
5469 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5470 g_return_if_fail (frame != NULL);
5471
5472 gst_video_decoder_request_sync_point_internal (dec, frame->deadline, flags);
5473 }
5474
5475 /**
5476 * gst_video_decoder_set_needs_sync_point:
5477 * @dec: a #GstVideoDecoder
5478 * @enabled: new state
5479 *
5480 * Configures whether the decoder requires a sync point before it starts
5481 * outputting data in the beginning. If enabled, the base class will discard
5482 * all non-sync point frames in the beginning and after a flush and does not
5483 * pass it to the subclass.
5484 *
5485 * If the first frame is not a sync point, the base class will request a sync
5486 * point via the force-key-unit event.
5487 *
5488 * Since: 1.20
5489 */
5490 void
gst_video_decoder_set_needs_sync_point(GstVideoDecoder * dec,gboolean enabled)5491 gst_video_decoder_set_needs_sync_point (GstVideoDecoder * dec, gboolean enabled)
5492 {
5493 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5494
5495 dec->priv->needs_sync_point = enabled;
5496 }
5497
5498 /**
5499 * gst_video_decoder_get_needs_sync_point:
5500 * @dec: a #GstVideoDecoder
5501 *
5502 * Queries if the decoder requires a sync point before it starts outputting
5503 * data in the beginning.
5504 *
5505 * Returns: %TRUE if a sync point is required in the beginning.
5506 *
5507 * Since: 1.20
5508 */
5509 gboolean
gst_video_decoder_get_needs_sync_point(GstVideoDecoder * dec)5510 gst_video_decoder_get_needs_sync_point (GstVideoDecoder * dec)
5511 {
5512 gboolean result;
5513
5514 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
5515
5516 result = dec->priv->needs_sync_point;
5517
5518 return result;
5519 }
5520
5521 #ifdef OHOS_OPT_COMPAT
5522 // ohos.opt.compat.0053
5523 gboolean
gst_video_decoder_need_decode(GstVideoDecoder * dec)5524 gst_video_decoder_need_decode (GstVideoDecoder * dec)
5525 {
5526 return !(dec->priv->has_push_first_frame && dec->priv->only_one_frame_required);
5527 }
5528 #endif