• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer
2  * Copyright (C) 2008 David Schleef <ds@schleef.org>
3  * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4  * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5  *   Contact: Stefan Kost <stefan.kost@nokia.com>
6  * Copyright (C) 2012 Collabora Ltd.
7  *	Author : Edward Hervey <edward@collabora.com>
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Library General Public
11  * License as published by the Free Software Foundation; either
12  * version 2 of the License, or (at your option) any later version.
13  *
14  * This library is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Library General Public License for more details.
18  *
19  * You should have received a copy of the GNU Library General Public
20  * License along with this library; if not, write to the
21  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22  * Boston, MA 02110-1301, USA.
23  */
24 
25 /**
26  * SECTION:gstvideodecoder
27  * @title: GstVideoDecoder
28  * @short_description: Base class for video decoders
29  *
30  * This base class is for video decoders turning encoded data into raw video
31  * frames.
32  *
33  * The GstVideoDecoder base class and derived subclasses should cooperate as
34  * follows:
35  *
36  * ## Configuration
37  *
38  *   * Initially, GstVideoDecoder calls @start when the decoder element
39  *     is activated, which allows the subclass to perform any global setup.
40  *
41  *   * GstVideoDecoder calls @set_format to inform the subclass of caps
42  *     describing input video data that it is about to receive, including
43  *     possibly configuration data.
44  *     While unlikely, it might be called more than once, if changing input
45  *     parameters require reconfiguration.
46  *
47  *   * Incoming data buffers are processed as needed, described in Data
48  *     Processing below.
49  *
50  *   * GstVideoDecoder calls @stop at end of all processing.
51  *
52  * ## Data processing
53  *
54  *   * The base class gathers input data, and optionally allows subclass
55  *     to parse this into subsequently manageable chunks, typically
56  *     corresponding to and referred to as 'frames'.
57  *
58  *   * Each input frame is provided in turn to the subclass' @handle_frame
59  *     callback.
60  *   * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
61  *     the base class will provide to the subclass the same input frame with
62  *     different input buffers to the subclass @handle_frame
63  *     callback. During this call, the subclass needs to take
64  *     ownership of the input_buffer as @GstVideoCodecFrame.input_buffer
65  *     will have been changed before the next subframe buffer is received.
66  *     The subclass will call `gst_video_decoder_have_last_subframe`
67  *     when a new input frame can be created by the base class.
68  *     Every subframe will share the same @GstVideoCodecFrame.output_buffer
69  *     to write the decoding result. The subclass is responsible to protect
70  *     its access.
71  *
72  *   * If codec processing results in decoded data, the subclass should call
73  *     @gst_video_decoder_finish_frame to have decoded data pushed
74  *     downstream. In subframe mode
75  *     the subclass should call @gst_video_decoder_finish_subframe until the
76  *     last subframe where it should call @gst_video_decoder_finish_frame.
77  *     The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
78  *     on buffers or using its own logic to collect the subframes.
79  *     In case of decoding failure, the subclass must call
80  *     @gst_video_decoder_drop_frame or @gst_video_decoder_drop_subframe,
81  *     to allow the base class to do timestamp and offset tracking, and possibly
82  *     to requeue the frame for a later attempt in the case of reverse playback.
83  *
84  * ## Shutdown phase
85  *
86  *   * The GstVideoDecoder class calls @stop to inform the subclass that data
87  *     parsing will be stopped.
88  *
89  * ## Additional Notes
90  *
91  *   * Seeking/Flushing
92  *
93  *     * When the pipeline is seeked or otherwise flushed, the subclass is
94  *       informed via a call to its @reset callback, with the hard parameter
95  *       set to true. This indicates the subclass should drop any internal data
96  *       queues and timestamps and prepare for a fresh set of buffers to arrive
97  *       for parsing and decoding.
98  *
99  *   * End Of Stream
100  *
101  *     * At end-of-stream, the subclass @parse function may be called some final
102  *       times with the at_eos parameter set to true, indicating that the element
103  *       should not expect any more data to be arriving, and it should parse and
104  *       remaining frames and call gst_video_decoder_have_frame() if possible.
105  *
106  * The subclass is responsible for providing pad template caps for
107  * source and sink pads. The pads need to be named "sink" and "src". It also
108  * needs to provide information about the output caps, when they are known.
109  * This may be when the base class calls the subclass' @set_format function,
110  * though it might be during decoding, before calling
111  * @gst_video_decoder_finish_frame. This is done via
112  * @gst_video_decoder_set_output_state
113  *
114  * The subclass is also responsible for providing (presentation) timestamps
115  * (likely based on corresponding input ones).  If that is not applicable
116  * or possible, the base class provides limited framerate based interpolation.
117  *
118  * Similarly, the base class provides some limited (legacy) seeking support
119  * if specifically requested by the subclass, as full-fledged support
120  * should rather be left to upstream demuxer, parser or alike.  This simple
121  * approach caters for seeking and duration reporting using estimated input
122  * bitrates. To enable it, a subclass should call
123  * @gst_video_decoder_set_estimate_rate to enable handling of incoming
124  * byte-streams.
125  *
126  * The base class provides some support for reverse playback, in particular
127  * in case incoming data is not packetized or upstream does not provide
128  * fragments on keyframe boundaries.  However, the subclass should then be
129  * prepared for the parsing and frame processing stage to occur separately
130  * (in normal forward processing, the latter immediately follows the former),
131  * The subclass also needs to ensure the parsing stage properly marks
132  * keyframes, unless it knows the upstream elements will do so properly for
133  * incoming data.
134  *
135  * The bare minimum that a functional subclass needs to implement is:
136  *
137  *   * Provide pad templates
138  *   * Inform the base class of output caps via
139  *      @gst_video_decoder_set_output_state
140  *
141  *   * Parse input data, if it is not considered packetized from upstream
142  *      Data will be provided to @parse which should invoke
143  *      @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
144  *      separate the data belonging to each video frame.
145  *
146  *   * Accept data in @handle_frame and provide decoded results to
147  *      @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
148  */
149 
150 #ifdef HAVE_CONFIG_H
151 #include "config.h"
152 #endif
153 
154 /* TODO
155  *
156  * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
157  *   features, like applying QoS on input (as opposed to after the frame is
158  *   decoded).
159  * * Add a flag/boolean for decoders that require keyframes, so the base
160  *   class can automatically discard non-keyframes before one has arrived
161  * * Detect reordered frame/timestamps and fix the pts/dts
162  * * Support for GstIndex (or shall we not care ?)
163  * * Calculate actual latency based on input/output timestamp/frame_number
164  *   and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
165  * * Emit latency message when it changes
166  *
167  */
168 
169 /* Implementation notes:
170  * The Video Decoder base class operates in 2 primary processing modes, depending
171  * on whether forward or reverse playback is requested.
172  *
173  * Forward playback:
174  *   * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
175  *     handle_frame() -> push downstream
176  *
177  * Reverse playback is more complicated, since it involves gathering incoming
178  * data regions as we loop backwards through the upstream data. The processing
179  * concept (using incoming buffers as containing one frame each to simplify
180  * things) is:
181  *
182  * Upstream data we want to play:
183  *  Buffer encoded order:  1  2  3  4  5  6  7  8  9  EOS
184  *  Keyframe flag:            K        K
185  *  Groupings:             AAAAAAA  BBBBBBB  CCCCCCC
186  *
187  * Input:
188  *  Buffer reception order:  7  8  9  4  5  6  1  2  3  EOS
189  *  Keyframe flag:                       K        K
190  *  Discont flag:            D        D        D
191  *
192  * - Each Discont marks a discont in the decoding order.
193  * - The keyframes mark where we can start decoding.
194  *
195  * Initially, we prepend incoming buffers to the gather queue. Whenever the
196  * discont flag is set on an incoming buffer, the gather queue is flushed out
197  * before the new buffer is collected.
198  *
199  * The above data will be accumulated in the gather queue like this:
200  *
201  *   gather queue:  9  8  7
202  *                        D
203  *
204  * When buffer 4 is received (with a DISCONT), we flush the gather queue like
205  * this:
206  *
207  *   while (gather)
208  *     take head of queue and prepend to parse queue (this reverses the
209  *     sequence, so parse queue is 7 -> 8 -> 9)
210  *
211  *   Next, we process the parse queue, which now contains all un-parsed packets
212  *   (including any leftover ones from the previous decode section)
213  *
214  *   for each buffer now in the parse queue:
215  *     Call the subclass parse function, prepending each resulting frame to
216  *     the parse_gather queue. Buffers which precede the first one that
217  *     produces a parsed frame are retained in the parse queue for
218  *     re-processing on the next cycle of parsing.
219  *
220  *   The parse_gather queue now contains frame objects ready for decoding,
221  *   in reverse order.
222  *   parse_gather: 9 -> 8 -> 7
223  *
224  *   while (parse_gather)
225  *     Take the head of the queue and prepend it to the decode queue
226  *     If the frame was a keyframe, process the decode queue
227  *   decode is now 7-8-9
228  *
229  *  Processing the decode queue results in frames with attached output buffers
230  *  stored in the 'output_queue' ready for outputting in reverse order.
231  *
232  * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
233  * gather queue. We get the following situation:
234  *
235  *  gather queue:    4
236  *  decode queue:    7  8  9
237  *
238  * After we received 5 (Keyframe) and 6:
239  *
240  *  gather queue:    6  5  4
241  *  decode queue:    7  8  9
242  *
243  * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
244  *
245  *   Copy head of the gather queue (6) to decode queue:
246  *
247  *    gather queue:    5  4
248  *    decode queue:    6  7  8  9
249  *
250  *   Copy head of the gather queue (5) to decode queue. This is a keyframe so we
251  *   can start decoding.
252  *
253  *    gather queue:    4
254  *    decode queue:    5  6  7  8  9
255  *
256  *   Decode frames in decode queue, store raw decoded data in output queue, we
257  *   can take the head of the decode queue and prepend the decoded result in the
258  *   output queue:
259  *
260  *    gather queue:    4
261  *    decode queue:
262  *    output queue:    9  8  7  6  5
263  *
264  *   Now output all the frames in the output queue, picking a frame from the
265  *   head of the queue.
266  *
267  *   Copy head of the gather queue (4) to decode queue, we flushed the gather
268  *   queue and can now store input buffer in the gather queue:
269  *
270  *    gather queue:    1
271  *    decode queue:    4
272  *
273  *  When we receive EOS, the queue looks like:
274  *
275  *    gather queue:    3  2  1
276  *    decode queue:    4
277  *
278  *  Fill decode queue, first keyframe we copy is 2:
279  *
280  *    gather queue:    1
281  *    decode queue:    2  3  4
282  *
283  *  Decoded output:
284  *
285  *    gather queue:    1
286  *    decode queue:
287  *    output queue:    4  3  2
288  *
289  *  Leftover buffer 1 cannot be decoded and must be discarded.
290  */
291 
292 #include "gstvideodecoder.h"
293 #include "gstvideoutils.h"
294 #include "gstvideoutilsprivate.h"
295 
296 #include <gst/video/video.h>
297 #include <gst/video/video-event.h>
298 #include <gst/video/gstvideopool.h>
299 #include <gst/video/gstvideometa.h>
300 #include <string.h>
301 
302 GST_DEBUG_CATEGORY (videodecoder_debug);
303 #define GST_CAT_DEFAULT videodecoder_debug
304 
305 /* properties */
306 #define DEFAULT_QOS                 TRUE
307 #define DEFAULT_MAX_ERRORS          GST_VIDEO_DECODER_MAX_ERRORS
308 #define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
309 #define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
310 #define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS FALSE
311 #define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS (GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT | GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT)
312 
313 /* Used for request_sync_point_frame_number. These are out of range for the
314  * frame numbers and can be given special meaning */
315 #define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
316 #define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
317 
318 enum
319 {
320   PROP_0,
321   PROP_QOS,
322   PROP_MAX_ERRORS,
323   PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
324   PROP_DISCARD_CORRUPTED_FRAMES,
325   PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
326   PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
327 #ifdef OHOS_OPT_COMPAT
328   // ohos.opt.compat.0053 In avmetadatahelper service, only need one frame.
329   PROP_ONLY_ONE_FRAME_REQUIRED,
330 #endif
331 };
332 
333 struct _GstVideoDecoderPrivate
334 {
335   /* FIXME introduce a context ? */
336 
337   GstBufferPool *pool;
338   GstAllocator *allocator;
339   GstAllocationParams params;
340 
341   /* parse tracking */
342   /* input data */
343   GstAdapter *input_adapter;
344   /* assembles current frame */
345   GstAdapter *output_adapter;
346 
347   /* Whether we attempt to convert newsegment from bytes to
348    * time using a bitrate estimation */
349   gboolean do_estimate_rate;
350 
351   /* Whether input is considered packetized or not */
352   gboolean packetized;
353 
354   /* whether input is considered as subframes */
355   gboolean subframe_mode;
356 
357   /* Error handling */
358   gint max_errors;
359   gint error_count;
360   gboolean had_output_data;
361 #ifdef OHOS_OPT_COMPAT
362   gboolean stream_had_output_data;
363 #endif
364   gboolean had_input_data;
365 
366   gboolean needs_format;
367   /* input_segment are output_segment identical */
368   gboolean in_out_segment_sync;
369 
370   /* TRUE if we have an active set of instant rate flags */
371   gboolean decode_flags_override;
372   GstSegmentFlags decode_flags;
373 
374   /* ... being tracked here;
375    * only available during parsing or when doing subframe decoding */
376   GstVideoCodecFrame *current_frame;
377   /* events that should apply to the current frame */
378   /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
379   GList *current_frame_events;
380   /* events that should be pushed before the next frame */
381   /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
382   GList *pending_events;
383 
384   /* relative offset of input data */
385   guint64 input_offset;
386   /* relative offset of frame */
387   guint64 frame_offset;
388   /* tracking ts and offsets */
389   GQueue timestamps;
390 
391   /* last outgoing ts */
392   GstClockTime last_timestamp_out;
393   /* incoming pts - dts */
394   GstClockTime pts_delta;
395   gboolean reordered_output;
396 
397   /* FIXME: Consider using a GQueue or other better fitting data structure */
398   /* reverse playback */
399   /* collect input */
400   GList *gather;
401   /* to-be-parsed */
402   GList *parse;
403   /* collected parsed frames */
404   GList *parse_gather;
405   /* frames to be handled == decoded */
406   GList *decode;
407   /* collected output - of buffer objects, not frames */
408   GList *output_queued;
409 
410 
411   /* base_picture_number is the picture number of the reference picture */
412   guint64 base_picture_number;
413   /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
414   GstClockTime base_timestamp;
415 
416   /* Properties */
417   GstClockTime min_force_key_unit_interval;
418   gboolean discard_corrupted_frames;
419 
420   /* Key unit related state */
421   gboolean needs_sync_point;
422   GstVideoDecoderRequestSyncPointFlags request_sync_point_flags;
423   guint64 request_sync_point_frame_number;
424   GstClockTime last_force_key_unit_time;
425   /* -1 if we saw no sync point yet */
426   guint64 distance_from_sync;
427 
428   gboolean automatic_request_sync_points;
429   GstVideoDecoderRequestSyncPointFlags automatic_request_sync_point_flags;
430 
431   guint32 system_frame_number;
432   guint32 decode_frame_number;
433 
434   GQueue frames;                /* Protected with OBJECT_LOCK */
435   GstVideoCodecState *input_state;
436   GstVideoCodecState *output_state;     /* OBJECT_LOCK and STREAM_LOCK */
437   gboolean output_state_changed;
438 
439   /* QoS properties */
440   gboolean do_qos;
441   gdouble proportion;           /* OBJECT_LOCK */
442   GstClockTime earliest_time;   /* OBJECT_LOCK */
443   GstClockTime qos_frame_duration;      /* OBJECT_LOCK */
444   gboolean discont;
445   /* qos messages: frames dropped/processed */
446   guint dropped;
447   guint processed;
448 
449   /* Outgoing byte size ? */
450   gint64 bytes_out;
451   gint64 time;
452 
453   gint64 min_latency;
454   gint64 max_latency;
455 
456   /* upstream stream tags (global tags are passed through as-is) */
457   GstTagList *upstream_tags;
458 
459   /* subclass tags */
460   GstTagList *tags;
461   GstTagMergeMode tags_merge_mode;
462 
463   gboolean tags_changed;
464 
465   /* flags */
466   gboolean use_default_pad_acceptcaps;
467 
468 #ifndef GST_DISABLE_DEBUG
469   /* Diagnostic time for reporting the time
470    * from flush to first output */
471   GstClockTime last_reset_time;
472 #endif
473 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
474   gboolean has_recv_first_key_frame;
475   gboolean has_push_first_frame;
476 #endif
477 #ifdef OHOS_OPT_COMPAT
478   // ohos.opt.compat.0053
479   gboolean only_one_frame_required;
480 #endif
481 };
482 
483 static GstElementClass *parent_class = NULL;
484 static gint private_offset = 0;
485 
486 /* cached quark to avoid contention on the global quark table lock */
487 #define META_TAG_VIDEO meta_tag_video_quark
488 static GQuark meta_tag_video_quark;
489 
490 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
491 static void gst_video_decoder_init (GstVideoDecoder * dec,
492     GstVideoDecoderClass * klass);
493 
494 static void gst_video_decoder_finalize (GObject * object);
495 static void gst_video_decoder_get_property (GObject * object, guint property_id,
496     GValue * value, GParamSpec * pspec);
497 static void gst_video_decoder_set_property (GObject * object, guint property_id,
498     const GValue * value, GParamSpec * pspec);
499 
500 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
501     GstCaps * caps);
502 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
503     GstEvent * event);
504 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
505     GstEvent * event);
506 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
507     GstBuffer * buf);
508 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
509     GstQuery * query);
510 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
511     element, GstStateChange transition);
512 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
513     GstQuery * query);
514 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
515     gboolean flush_hard);
516 
517 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
518     GstVideoCodecFrame * frame);
519 
520 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
521     GList * events);
522 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
523     decoder, GstVideoCodecFrame * frame);
524 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
525     decoder);
526 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
527     decoder, GstBuffer * buf);
528 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
529     gboolean at_eos);
530 
531 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
532 
533 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
534     GstEvent * event);
535 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
536     GstEvent * event);
537 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
538     decoder, GstQuery * query);
539 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
540     decoder, GstQuery * query);
541 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
542 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
543     gboolean at_eos, gboolean new_buffer);
544 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
545     decoder);
546 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
547     GstQuery * query);
548 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
549     GstQuery * query);
550 
551 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
552     decoder, GstVideoCodecFrame * frame, GstMeta * meta);
553 
554 static gboolean gst_video_decoder_handle_missing_data_default (GstVideoDecoder *
555     decoder, GstClockTime timestamp, GstClockTime duration);
556 
557 static void gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
558     GstVideoCodecFrame * frame, GstBuffer * src_buffer,
559     GstBuffer * dest_buffer);
560 
561 static void gst_video_decoder_request_sync_point_internal (GstVideoDecoder *
562     dec, GstClockTime deadline, GstVideoDecoderRequestSyncPointFlags flags);
563 
564 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
565  * method to get to the padtemplates */
566 GType
gst_video_decoder_get_type(void)567 gst_video_decoder_get_type (void)
568 {
569   static gsize type = 0;
570 
571   if (g_once_init_enter (&type)) {
572     GType _type;
573     static const GTypeInfo info = {
574       sizeof (GstVideoDecoderClass),
575       NULL,
576       NULL,
577       (GClassInitFunc) gst_video_decoder_class_init,
578       NULL,
579       NULL,
580       sizeof (GstVideoDecoder),
581       0,
582       (GInstanceInitFunc) gst_video_decoder_init,
583     };
584 
585     _type = g_type_register_static (GST_TYPE_ELEMENT,
586         "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
587 
588     private_offset =
589         g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
590 
591     g_once_init_leave (&type, _type);
592   }
593   return type;
594 }
595 
596 static inline GstVideoDecoderPrivate *
gst_video_decoder_get_instance_private(GstVideoDecoder * self)597 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
598 {
599   return (G_STRUCT_MEMBER_P (self, private_offset));
600 }
601 
602 static void
gst_video_decoder_class_init(GstVideoDecoderClass * klass)603 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
604 {
605   GObjectClass *gobject_class;
606   GstElementClass *gstelement_class;
607 
608   gobject_class = G_OBJECT_CLASS (klass);
609   gstelement_class = GST_ELEMENT_CLASS (klass);
610 
611   GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
612       "Base Video Decoder");
613 
614   parent_class = g_type_class_peek_parent (klass);
615 
616   if (private_offset != 0)
617     g_type_class_adjust_private_offset (klass, &private_offset);
618 
619   gobject_class->finalize = gst_video_decoder_finalize;
620   gobject_class->get_property = gst_video_decoder_get_property;
621   gobject_class->set_property = gst_video_decoder_set_property;
622 
623   gstelement_class->change_state =
624       GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
625 
626   klass->sink_event = gst_video_decoder_sink_event_default;
627   klass->src_event = gst_video_decoder_src_event_default;
628   klass->decide_allocation = gst_video_decoder_decide_allocation_default;
629   klass->propose_allocation = gst_video_decoder_propose_allocation_default;
630   klass->negotiate = gst_video_decoder_negotiate_default;
631   klass->sink_query = gst_video_decoder_sink_query_default;
632   klass->src_query = gst_video_decoder_src_query_default;
633   klass->transform_meta = gst_video_decoder_transform_meta_default;
634   klass->handle_missing_data = gst_video_decoder_handle_missing_data_default;
635 
636   /**
637    * GstVideoDecoder:qos:
638    *
639    * If set to %TRUE the decoder will handle QoS events received
640    * from downstream elements.
641    * This includes dropping output frames which are detected as late
642    * using the metrics reported by those events.
643    *
644    * Since: 1.18
645    */
646   g_object_class_install_property (gobject_class, PROP_QOS,
647       g_param_spec_boolean ("qos", "Quality of Service",
648           "Handle Quality-of-Service events from downstream",
649           DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
650 
651   /**
652    * GstVideoDecoder:max-errors:
653    *
654    * Maximum number of tolerated consecutive decode errors. See
655    * gst_video_decoder_set_max_errors() for more details.
656    *
657    * Since: 1.18
658    */
659   g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
660       g_param_spec_int ("max-errors", "Max errors",
661           "Max consecutive decoder errors before returning flow error",
662           -1, G_MAXINT, DEFAULT_MAX_ERRORS,
663           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
664 
665   /**
666    * GstVideoDecoder:min-force-key-unit-interval:
667    *
668    * Minimum interval between force-key-unit events sent upstream by the
669    * decoder. Setting this to 0 will cause every event to be handled, setting
670    * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
671    *
672    * See gst_video_event_new_upstream_force_key_unit() for more details about
673    * force-key-unit events.
674    *
675    * Since: 1.20
676    */
677   g_object_class_install_property (gobject_class,
678       PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
679       g_param_spec_uint64 ("min-force-key-unit-interval",
680           "Minimum Force Keyunit Interval",
681           "Minimum interval between force-keyunit requests in nanoseconds", 0,
682           G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
683           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
684 
685   /**
686    * GstVideoDecoder:discard-corrupted-frames:
687    *
688    * If set to %TRUE the decoder will discard frames that are marked as
689    * corrupted instead of outputting them.
690    *
691    * Since: 1.20
692    */
693   g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
694       g_param_spec_boolean ("discard-corrupted-frames",
695           "Discard Corrupted Frames",
696           "Discard frames marked as corrupted instead of outputting them",
697           DEFAULT_DISCARD_CORRUPTED_FRAMES,
698           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
699 
700   /**
701    * GstVideoDecoder:automatic-request-sync-points:
702    *
703    * If set to %TRUE the decoder will automatically request sync points when
704    * it seems like a good idea, e.g. if the first frames are not key frames or
705    * if packet loss was reported by upstream.
706    *
707    * Since: 1.20
708    */
709   g_object_class_install_property (gobject_class,
710       PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
711       g_param_spec_boolean ("automatic-request-sync-points",
712           "Automatic Request Sync Points",
713           "Automatically request sync points when it would be useful",
714           DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS,
715           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
716 
717   /**
718    * GstVideoDecoder:automatic-request-sync-point-flags:
719    *
720    * GstVideoDecoderRequestSyncPointFlags to use for the automatically
721    * requested sync points if `automatic-request-sync-points` is enabled.
722    *
723    * Since: 1.20
724    */
725   g_object_class_install_property (gobject_class,
726       PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
727       g_param_spec_flags ("automatic-request-sync-point-flags",
728           "Automatic Request Sync Point Flags",
729           "Flags to use when automatically requesting sync points",
730           GST_TYPE_VIDEO_DECODER_REQUEST_SYNC_POINT_FLAGS,
731           DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
732           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
733 #ifdef OHOS_OPT_COMPAT
734   // ohos.opt.compat.0053
735   g_object_class_install_property (gobject_class,
736       PROP_ONLY_ONE_FRAME_REQUIRED,
737       g_param_spec_boolean ("only-one-frame-required",
738           "Only one frame required",
739           "Only one frame required for avmetadatahelper service",
740           FALSE,
741           G_PARAM_WRITABLE | G_PARAM_STATIC_STRINGS));
742 #endif
743 
744   meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
745 }
746 
747 static void
gst_video_decoder_init(GstVideoDecoder * decoder,GstVideoDecoderClass * klass)748 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
749 {
750   GstPadTemplate *pad_template;
751   GstPad *pad;
752 
753   GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
754 
755   decoder->priv = gst_video_decoder_get_instance_private (decoder);
756 
757   pad_template =
758       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
759   g_return_if_fail (pad_template != NULL);
760 
761   decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
762 
763   gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
764   gst_pad_set_event_function (pad,
765       GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
766   gst_pad_set_query_function (pad,
767       GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
768   gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
769 
770   pad_template =
771       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
772   g_return_if_fail (pad_template != NULL);
773 
774   decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
775 
776   gst_pad_set_event_function (pad,
777       GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
778   gst_pad_set_query_function (pad,
779       GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
780   gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
781 
782   gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
783   gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
784 
785   g_rec_mutex_init (&decoder->stream_lock);
786 
787   decoder->priv->input_adapter = gst_adapter_new ();
788   decoder->priv->output_adapter = gst_adapter_new ();
789   decoder->priv->packetized = TRUE;
790   decoder->priv->needs_format = FALSE;
791 #ifdef OHOS_OPT_COMPAT
792   decoder->priv->stream_had_output_data = FALSE;
793 #endif
794 
795   g_queue_init (&decoder->priv->frames);
796   g_queue_init (&decoder->priv->timestamps);
797 
798   /* properties */
799   decoder->priv->do_qos = DEFAULT_QOS;
800   decoder->priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
801 
802   decoder->priv->min_latency = 0;
803   decoder->priv->max_latency = 0;
804 
805   decoder->priv->automatic_request_sync_points =
806       DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS;
807   decoder->priv->automatic_request_sync_point_flags =
808       DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS;
809 #ifdef OHOS_OPT_COMPAT
810   // ohos.opt.compat.0053
811   decoder->priv->only_one_frame_required = FALSE;
812 #endif
813 
814   gst_video_decoder_reset (decoder, TRUE, TRUE);
815 }
816 
817 static GstVideoCodecState *
_new_input_state(GstCaps * caps)818 _new_input_state (GstCaps * caps)
819 {
820   GstVideoCodecState *state;
821   GstStructure *structure;
822   const GValue *codec_data;
823 
824   state = g_slice_new0 (GstVideoCodecState);
825   state->ref_count = 1;
826   gst_video_info_init (&state->info);
827   if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
828     goto parse_fail;
829   state->caps = gst_caps_ref (caps);
830 
831   structure = gst_caps_get_structure (caps, 0);
832 
833   codec_data = gst_structure_get_value (structure, "codec_data");
834   if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
835     state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
836 
837   return state;
838 
839 parse_fail:
840   {
841     g_slice_free (GstVideoCodecState, state);
842     return NULL;
843   }
844 }
845 
846 static GstVideoCodecState *
_new_output_state(GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference,gboolean copy_interlace_mode)847 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
848     guint width, guint height, GstVideoCodecState * reference,
849     gboolean copy_interlace_mode)
850 {
851   GstVideoCodecState *state;
852 
853   state = g_slice_new0 (GstVideoCodecState);
854   state->ref_count = 1;
855   gst_video_info_init (&state->info);
856   if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
857           width, height)) {
858     g_slice_free (GstVideoCodecState, state);
859     return NULL;
860   }
861 
862   if (reference) {
863     GstVideoInfo *tgt, *ref;
864 
865     tgt = &state->info;
866     ref = &reference->info;
867 
868     /* Copy over extra fields from reference state */
869     if (copy_interlace_mode)
870       tgt->interlace_mode = ref->interlace_mode;
871     tgt->flags = ref->flags;
872     tgt->chroma_site = ref->chroma_site;
873     tgt->colorimetry = ref->colorimetry;
874     GST_DEBUG ("reference par %d/%d fps %d/%d",
875         ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
876     tgt->par_n = ref->par_n;
877     tgt->par_d = ref->par_d;
878     tgt->fps_n = ref->fps_n;
879     tgt->fps_d = ref->fps_d;
880     tgt->views = ref->views;
881 
882     GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
883 
884     if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
885       GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
886       GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
887           GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
888     } else {
889       /* Default to MONO, overridden as needed by sub-classes */
890       GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
891       GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
892     }
893   }
894 
895   GST_DEBUG ("reference par %d/%d fps %d/%d",
896       state->info.par_n, state->info.par_d,
897       state->info.fps_n, state->info.fps_d);
898 
899   return state;
900 }
901 
902 static gboolean
gst_video_decoder_setcaps(GstVideoDecoder * decoder,GstCaps * caps)903 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
904 {
905   GstVideoDecoderClass *decoder_class;
906   GstVideoCodecState *state;
907   gboolean ret = TRUE;
908 
909   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
910 
911   GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
912 
913   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
914 
915   if (decoder->priv->input_state) {
916     GST_DEBUG_OBJECT (decoder,
917         "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
918         decoder->priv->input_state->caps, caps);
919     if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
920       goto caps_not_changed;
921   }
922 
923   state = _new_input_state (caps);
924 
925   if (G_UNLIKELY (state == NULL))
926     goto parse_fail;
927 
928   if (decoder_class->set_format)
929     ret = decoder_class->set_format (decoder, state);
930 
931   if (!ret)
932     goto refused_format;
933 
934   if (decoder->priv->input_state)
935     gst_video_codec_state_unref (decoder->priv->input_state);
936   decoder->priv->input_state = state;
937 
938   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
939 
940   return ret;
941 
942 caps_not_changed:
943   {
944     GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
945     GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
946     return TRUE;
947   }
948 
949   /* ERRORS */
950 parse_fail:
951   {
952     GST_WARNING_OBJECT (decoder, "Failed to parse caps");
953     GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
954     return FALSE;
955   }
956 
957 refused_format:
958   {
959     GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
960     GST_WARNING_OBJECT (decoder, "Subclass refused caps");
961     gst_video_codec_state_unref (state);
962     return FALSE;
963   }
964 }
965 
966 static void
gst_video_decoder_finalize(GObject * object)967 gst_video_decoder_finalize (GObject * object)
968 {
969   GstVideoDecoder *decoder;
970 
971   decoder = GST_VIDEO_DECODER (object);
972 
973   GST_DEBUG_OBJECT (object, "finalize");
974 
975   g_rec_mutex_clear (&decoder->stream_lock);
976 
977   if (decoder->priv->input_adapter) {
978     g_object_unref (decoder->priv->input_adapter);
979     decoder->priv->input_adapter = NULL;
980   }
981   if (decoder->priv->output_adapter) {
982     g_object_unref (decoder->priv->output_adapter);
983     decoder->priv->output_adapter = NULL;
984   }
985 
986   if (decoder->priv->input_state)
987     gst_video_codec_state_unref (decoder->priv->input_state);
988   if (decoder->priv->output_state)
989     gst_video_codec_state_unref (decoder->priv->output_state);
990 
991   if (decoder->priv->pool) {
992     gst_object_unref (decoder->priv->pool);
993     decoder->priv->pool = NULL;
994   }
995 
996   if (decoder->priv->allocator) {
997     gst_object_unref (decoder->priv->allocator);
998     decoder->priv->allocator = NULL;
999   }
1000 
1001   G_OBJECT_CLASS (parent_class)->finalize (object);
1002 }
1003 
1004 static void
gst_video_decoder_get_property(GObject * object,guint property_id,GValue * value,GParamSpec * pspec)1005 gst_video_decoder_get_property (GObject * object, guint property_id,
1006     GValue * value, GParamSpec * pspec)
1007 {
1008   GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
1009   GstVideoDecoderPrivate *priv = dec->priv;
1010 
1011   switch (property_id) {
1012     case PROP_QOS:
1013       g_value_set_boolean (value, priv->do_qos);
1014       break;
1015     case PROP_MAX_ERRORS:
1016       g_value_set_int (value, gst_video_decoder_get_max_errors (dec));
1017       break;
1018     case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1019       g_value_set_uint64 (value, priv->min_force_key_unit_interval);
1020       break;
1021     case PROP_DISCARD_CORRUPTED_FRAMES:
1022       g_value_set_boolean (value, priv->discard_corrupted_frames);
1023       break;
1024     case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1025       g_value_set_boolean (value, priv->automatic_request_sync_points);
1026       break;
1027     case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1028       g_value_set_flags (value, priv->automatic_request_sync_point_flags);
1029       break;
1030     default:
1031       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1032       break;
1033   }
1034 }
1035 
1036 static void
gst_video_decoder_set_property(GObject * object,guint property_id,const GValue * value,GParamSpec * pspec)1037 gst_video_decoder_set_property (GObject * object, guint property_id,
1038     const GValue * value, GParamSpec * pspec)
1039 {
1040   GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
1041   GstVideoDecoderPrivate *priv = dec->priv;
1042 
1043   switch (property_id) {
1044     case PROP_QOS:
1045       priv->do_qos = g_value_get_boolean (value);
1046       break;
1047     case PROP_MAX_ERRORS:
1048       gst_video_decoder_set_max_errors (dec, g_value_get_int (value));
1049       break;
1050     case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1051       priv->min_force_key_unit_interval = g_value_get_uint64 (value);
1052       break;
1053     case PROP_DISCARD_CORRUPTED_FRAMES:
1054       priv->discard_corrupted_frames = g_value_get_boolean (value);
1055       break;
1056     case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1057       priv->automatic_request_sync_points = g_value_get_boolean (value);
1058       break;
1059     case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1060       priv->automatic_request_sync_point_flags = g_value_get_flags (value);
1061       break;
1062 #ifdef OHOS_OPT_COMPAT
1063     // ohos.opt.compat.0053
1064     case PROP_ONLY_ONE_FRAME_REQUIRED:
1065       priv->only_one_frame_required = g_value_get_boolean (value);
1066       break;
1067 #endif
1068     default:
1069       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1070       break;
1071   }
1072 }
1073 
1074 /* hard == FLUSH, otherwise discont */
1075 static GstFlowReturn
gst_video_decoder_flush(GstVideoDecoder * dec,gboolean hard)1076 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
1077 {
1078   GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
1079   GstFlowReturn ret = GST_FLOW_OK;
1080 
1081   GST_LOG_OBJECT (dec, "flush hard %d", hard);
1082 
1083   /* Inform subclass */
1084   if (klass->reset) {
1085     GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
1086     klass->reset (dec, hard);
1087   }
1088 
1089   if (klass->flush)
1090     klass->flush (dec);
1091 
1092   /* and get (re)set for the sequel */
1093   gst_video_decoder_reset (dec, FALSE, hard);
1094 
1095   return ret;
1096 }
1097 
1098 static GstEvent *
gst_video_decoder_create_merged_tags_event(GstVideoDecoder * dec)1099 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
1100 {
1101   GstTagList *merged_tags;
1102 
1103   GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
1104   GST_LOG_OBJECT (dec, "decoder  : %" GST_PTR_FORMAT, dec->priv->tags);
1105   GST_LOG_OBJECT (dec, "mode     : %d", dec->priv->tags_merge_mode);
1106 
1107   merged_tags =
1108       gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
1109       dec->priv->tags_merge_mode);
1110 
1111   GST_DEBUG_OBJECT (dec, "merged   : %" GST_PTR_FORMAT, merged_tags);
1112 
1113   if (merged_tags == NULL)
1114     return NULL;
1115 
1116   if (gst_tag_list_is_empty (merged_tags)) {
1117     gst_tag_list_unref (merged_tags);
1118     return NULL;
1119   }
1120 
1121   return gst_event_new_tag (merged_tags);
1122 }
1123 
1124 static gboolean
gst_video_decoder_push_event(GstVideoDecoder * decoder,GstEvent * event)1125 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
1126 {
1127   switch (GST_EVENT_TYPE (event)) {
1128     case GST_EVENT_SEGMENT:
1129     {
1130       GstSegment segment;
1131 
1132       gst_event_copy_segment (event, &segment);
1133 
1134       GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1135 
1136       if (segment.format != GST_FORMAT_TIME) {
1137         GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1138         break;
1139       }
1140 
1141       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1142       decoder->output_segment = segment;
1143       decoder->priv->in_out_segment_sync =
1144           gst_segment_is_equal (&decoder->input_segment, &segment);
1145       decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1146       decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1147       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1148       break;
1149     }
1150     default:
1151       break;
1152   }
1153 
1154   GST_DEBUG_OBJECT (decoder, "pushing event %s",
1155       gst_event_type_get_name (GST_EVENT_TYPE (event)));
1156 
1157   return gst_pad_push_event (decoder->srcpad, event);
1158 }
1159 
1160 static GstFlowReturn
gst_video_decoder_parse_available(GstVideoDecoder * dec,gboolean at_eos,gboolean new_buffer)1161 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
1162     gboolean new_buffer)
1163 {
1164   GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1165   GstVideoDecoderPrivate *priv = dec->priv;
1166   GstFlowReturn ret = GST_FLOW_OK;
1167   gsize was_available, available;
1168   guint inactive = 0;
1169 
1170   available = gst_adapter_available (priv->input_adapter);
1171 
1172   while (available || new_buffer) {
1173     new_buffer = FALSE;
1174     /* current frame may have been parsed and handled,
1175      * so we need to set up a new one when asking subclass to parse */
1176     if (priv->current_frame == NULL)
1177       priv->current_frame = gst_video_decoder_new_frame (dec);
1178 
1179     was_available = available;
1180     ret = decoder_class->parse (dec, priv->current_frame,
1181         priv->input_adapter, at_eos);
1182     if (ret != GST_FLOW_OK)
1183       break;
1184 
1185     /* if the subclass returned success (GST_FLOW_OK), it is expected
1186      * to have collected and submitted a frame, i.e. it should have
1187      * called gst_video_decoder_have_frame(), or at least consumed a
1188      * few bytes through gst_video_decoder_add_to_frame().
1189      *
1190      * Otherwise, this is an implementation bug, and we error out
1191      * after 2 failed attempts */
1192     available = gst_adapter_available (priv->input_adapter);
1193     if (!priv->current_frame || available != was_available)
1194       inactive = 0;
1195     else if (++inactive == 2)
1196       goto error_inactive;
1197   }
1198 
1199   return ret;
1200 
1201   /* ERRORS */
1202 error_inactive:
1203   {
1204     GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1205     return GST_FLOW_ERROR;
1206   }
1207 }
1208 
1209 /* This function has to be called with the stream lock taken. */
1210 static GstFlowReturn
gst_video_decoder_drain_out(GstVideoDecoder * dec,gboolean at_eos)1211 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
1212 {
1213   GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1214   GstVideoDecoderPrivate *priv = dec->priv;
1215   GstFlowReturn ret = GST_FLOW_OK;
1216 
1217   if (dec->input_segment.rate > 0.0) {
1218     /* Forward mode, if unpacketized, give the child class
1219      * a final chance to flush out packets */
1220     if (!priv->packetized) {
1221       ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
1222     }
1223 
1224     if (at_eos) {
1225       if (decoder_class->finish)
1226         ret = decoder_class->finish (dec);
1227     } else {
1228       if (decoder_class->drain) {
1229         ret = decoder_class->drain (dec);
1230       } else {
1231         GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1232       }
1233     }
1234   } else {
1235     /* Reverse playback mode */
1236     ret = gst_video_decoder_flush_parse (dec, TRUE);
1237   }
1238 
1239   return ret;
1240 }
1241 
1242 static GList *
_flush_events(GstPad * pad,GList * events)1243 _flush_events (GstPad * pad, GList * events)
1244 {
1245   GList *tmp;
1246 
1247   for (tmp = events; tmp; tmp = tmp->next) {
1248     if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1249         GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1250         GST_EVENT_IS_STICKY (tmp->data)) {
1251       gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1252     }
1253     gst_event_unref (tmp->data);
1254   }
1255   g_list_free (events);
1256 
1257   return NULL;
1258 }
1259 
1260 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
1261 static gboolean
gst_video_decoder_negotiate_default_caps(GstVideoDecoder * decoder)1262 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
1263 {
1264   GstCaps *caps, *templcaps;
1265   GstVideoCodecState *state;
1266   GstVideoInfo info;
1267   gint i;
1268   gint caps_size;
1269   GstStructure *structure;
1270 
1271   templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1272   caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1273   if (caps)
1274     gst_caps_unref (templcaps);
1275   else
1276     caps = templcaps;
1277   templcaps = NULL;
1278 
1279   if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1280     goto caps_error;
1281 
1282   GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1283 
1284   /* before fixating, try to use whatever upstream provided */
1285   caps = gst_caps_make_writable (caps);
1286   caps_size = gst_caps_get_size (caps);
1287   if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1288     GstCaps *sinkcaps = decoder->priv->input_state->caps;
1289     GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1290     gint width, height;
1291 
1292     if (gst_structure_get_int (structure, "width", &width)) {
1293       for (i = 0; i < caps_size; i++) {
1294         gst_structure_set (gst_caps_get_structure (caps, i), "width",
1295             G_TYPE_INT, width, NULL);
1296       }
1297     }
1298 
1299     if (gst_structure_get_int (structure, "height", &height)) {
1300       for (i = 0; i < caps_size; i++) {
1301         gst_structure_set (gst_caps_get_structure (caps, i), "height",
1302             G_TYPE_INT, height, NULL);
1303       }
1304     }
1305   }
1306 
1307   for (i = 0; i < caps_size; i++) {
1308     structure = gst_caps_get_structure (caps, i);
1309     /* Random I420 1280x720 for fixation */
1310     if (gst_structure_has_field (structure, "format"))
1311       gst_structure_fixate_field_string (structure, "format", "I420");
1312     else
1313       gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1314 
1315     if (gst_structure_has_field (structure, "width"))
1316       gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1317     else
1318       gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1319 
1320     if (gst_structure_has_field (structure, "height"))
1321       gst_structure_fixate_field_nearest_int (structure, "height", 720);
1322     else
1323       gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1324   }
1325   caps = gst_caps_fixate (caps);
1326 
1327   if (!caps || !gst_video_info_from_caps (&info, caps))
1328     goto caps_error;
1329 
1330   GST_INFO_OBJECT (decoder,
1331       "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1332   state =
1333       gst_video_decoder_set_output_state (decoder, info.finfo->format,
1334       info.width, info.height, decoder->priv->input_state);
1335   gst_video_codec_state_unref (state);
1336   gst_caps_unref (caps);
1337 
1338   return TRUE;
1339 
1340 caps_error:
1341   {
1342     if (caps)
1343       gst_caps_unref (caps);
1344     return FALSE;
1345   }
1346 }
1347 
1348 static gboolean
gst_video_decoder_handle_missing_data_default(GstVideoDecoder * decoder,GstClockTime timestamp,GstClockTime duration)1349 gst_video_decoder_handle_missing_data_default (GstVideoDecoder *
1350     decoder, GstClockTime timestamp, GstClockTime duration)
1351 {
1352   GstVideoDecoderPrivate *priv;
1353 
1354   priv = decoder->priv;
1355 
1356   if (priv->automatic_request_sync_points) {
1357     GstClockTime deadline =
1358         gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
1359         timestamp);
1360 
1361     GST_DEBUG_OBJECT (decoder,
1362         "Requesting sync point for missing data at running time %"
1363         GST_TIME_FORMAT " timestamp %" GST_TIME_FORMAT " with duration %"
1364         GST_TIME_FORMAT, GST_TIME_ARGS (deadline), GST_TIME_ARGS (timestamp),
1365         GST_TIME_ARGS (duration));
1366 
1367     gst_video_decoder_request_sync_point_internal (decoder, deadline,
1368         priv->automatic_request_sync_point_flags);
1369   }
1370 
1371   return TRUE;
1372 }
1373 
1374 static gboolean
gst_video_decoder_sink_event_default(GstVideoDecoder * decoder,GstEvent * event)1375 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1376     GstEvent * event)
1377 {
1378   GstVideoDecoderClass *decoder_class;
1379   GstVideoDecoderPrivate *priv;
1380   gboolean ret = FALSE;
1381   gboolean forward_immediate = FALSE;
1382 
1383   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1384 
1385   priv = decoder->priv;
1386 
1387   switch (GST_EVENT_TYPE (event)) {
1388     case GST_EVENT_STREAM_START:
1389     {
1390       GstFlowReturn flow_ret = GST_FLOW_OK;
1391 
1392       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1393       flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1394       ret = (flow_ret == GST_FLOW_OK);
1395 
1396       GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1397       /* Flush upstream tags after a STREAM_START */
1398       if (priv->upstream_tags) {
1399         gst_tag_list_unref (priv->upstream_tags);
1400         priv->upstream_tags = NULL;
1401         priv->tags_changed = TRUE;
1402       }
1403       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1404 
1405       /* Forward STREAM_START immediately. Everything is drained after
1406        * the STREAM_START event and we can forward this event immediately
1407        * now without having buffers out of order.
1408        */
1409       forward_immediate = TRUE;
1410       break;
1411     }
1412     case GST_EVENT_CAPS:
1413     {
1414       GstCaps *caps;
1415 
1416       gst_event_parse_caps (event, &caps);
1417       ret = gst_video_decoder_setcaps (decoder, caps);
1418       gst_event_unref (event);
1419       event = NULL;
1420       break;
1421     }
1422     case GST_EVENT_SEGMENT_DONE:
1423     {
1424       GstFlowReturn flow_ret = GST_FLOW_OK;
1425 
1426       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1427       flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1428       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1429       ret = (flow_ret == GST_FLOW_OK);
1430 
1431       /* Forward SEGMENT_DONE immediately. This is required
1432        * because no buffer or serialized event might come
1433        * after SEGMENT_DONE and nothing could trigger another
1434        * _finish_frame() call.
1435        *
1436        * The subclass can override this behaviour by overriding
1437        * the ::sink_event() vfunc and not chaining up to the
1438        * parent class' ::sink_event() until a later time.
1439        */
1440       forward_immediate = TRUE;
1441       break;
1442     }
1443     case GST_EVENT_EOS:
1444     {
1445       GstFlowReturn flow_ret = GST_FLOW_OK;
1446 
1447       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1448       flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1449       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1450       ret = (flow_ret == GST_FLOW_OK);
1451 
1452       /* Error out even if EOS was ok when we had input, but no output */
1453 #ifdef OHOS_OPT_COMPAT
1454       /* ohos.opt.compat.0049
1455       * When we seek to a position where is no keyframe and the decoding fails,
1456       * we don't think it's a mistake. For example, tsdemux does not guarantee that the
1457       * stream pushed when seeking contains keyframes, or mkvdemux incorrectly treats
1458       * non-keyframes as keyframes.
1459       */
1460       if (ret && priv->had_input_data) {
1461         if (!priv->had_output_data)
1462           GST_WARNING_OBJECT (decoder, "No valid frames decoded before end of stream");
1463         else if (!priv->stream_had_output_data)
1464           GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1465             ("No valid frames decoded before end of stream"),
1466             ("no valid frames found"));
1467       }
1468 #else
1469       if (ret && priv->had_input_data && !priv->had_output_data) {
1470         GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1471             ("No valid frames decoded before end of stream"),
1472             ("no valid frames found"));
1473       }
1474 #endif
1475 
1476       /* Forward EOS immediately. This is required because no
1477        * buffer or serialized event will come after EOS and
1478        * nothing could trigger another _finish_frame() call.
1479        *
1480        * The subclass can override this behaviour by overriding
1481        * the ::sink_event() vfunc and not chaining up to the
1482        * parent class' ::sink_event() until a later time.
1483        */
1484       forward_immediate = TRUE;
1485       break;
1486     }
1487     case GST_EVENT_GAP:
1488     {
1489       GstClockTime timestamp, duration;
1490       GstGapFlags gap_flags = 0;
1491       GstFlowReturn flow_ret = GST_FLOW_OK;
1492       gboolean needs_reconfigure = FALSE;
1493       GList *events;
1494       GList *frame_events;
1495 
1496       gst_event_parse_gap (event, &timestamp, &duration);
1497       gst_event_parse_gap_flags (event, &gap_flags);
1498 
1499       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1500       /* If this is not missing data, or the subclass does not handle it
1501        * specifically, then drain out the decoder and forward the event
1502        * directly. */
1503       if ((gap_flags & GST_GAP_FLAG_MISSING_DATA) == 0
1504           || !decoder_class->handle_missing_data
1505           || decoder_class->handle_missing_data (decoder, timestamp,
1506               duration)) {
1507         if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1508           flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1509         ret = (flow_ret == GST_FLOW_OK);
1510 
1511         /* Ensure we have caps before forwarding the event */
1512         if (!decoder->priv->output_state) {
1513           if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1514             GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1515             GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1516                 ("Decoder output not negotiated before GAP event."));
1517             forward_immediate = TRUE;
1518             break;
1519           }
1520           needs_reconfigure = TRUE;
1521         }
1522 
1523         needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1524             || needs_reconfigure;
1525         if (decoder->priv->output_state_changed || needs_reconfigure) {
1526           if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1527             GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1528             gst_pad_mark_reconfigure (decoder->srcpad);
1529           }
1530         }
1531 
1532         GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1533             " before the gap");
1534         events = decoder->priv->pending_events;
1535         frame_events = decoder->priv->current_frame_events;
1536         decoder->priv->pending_events = NULL;
1537         decoder->priv->current_frame_events = NULL;
1538 
1539         GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1540 
1541         gst_video_decoder_push_event_list (decoder, events);
1542         gst_video_decoder_push_event_list (decoder, frame_events);
1543 
1544         /* Forward GAP immediately. Everything is drained after
1545          * the GAP event and we can forward this event immediately
1546          * now without having buffers out of order.
1547          */
1548         forward_immediate = TRUE;
1549       } else {
1550         GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1551         gst_clear_event (&event);
1552       }
1553       break;
1554     }
1555     case GST_EVENT_CUSTOM_DOWNSTREAM:
1556     {
1557       gboolean in_still;
1558       GstFlowReturn flow_ret = GST_FLOW_OK;
1559 
1560       if (gst_video_event_parse_still_frame (event, &in_still)) {
1561         if (in_still) {
1562           GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1563           GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1564           flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1565           GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1566           ret = (flow_ret == GST_FLOW_OK);
1567         }
1568         /* Forward STILL_FRAME immediately. Everything is drained after
1569          * the STILL_FRAME event and we can forward this event immediately
1570          * now without having buffers out of order.
1571          */
1572         forward_immediate = TRUE;
1573       }
1574       break;
1575     }
1576     case GST_EVENT_SEGMENT:
1577     {
1578       GstSegment segment;
1579 
1580       gst_event_copy_segment (event, &segment);
1581 
1582       if (segment.format == GST_FORMAT_TIME) {
1583         GST_DEBUG_OBJECT (decoder,
1584             "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1585       } else {
1586         gint64 start;
1587 
1588         GST_DEBUG_OBJECT (decoder,
1589             "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1590 
1591         /* handle newsegment as a result from our legacy simple seeking */
1592         /* note that initial 0 should convert to 0 in any case */
1593         if (priv->do_estimate_rate &&
1594             gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1595                 segment.start, GST_FORMAT_TIME, &start)) {
1596           /* best attempt convert */
1597           /* as these are only estimates, stop is kept open-ended to avoid
1598            * premature cutting */
1599           GST_DEBUG_OBJECT (decoder,
1600               "converted to TIME start %" GST_TIME_FORMAT,
1601               GST_TIME_ARGS (start));
1602           segment.start = start;
1603           segment.stop = GST_CLOCK_TIME_NONE;
1604           segment.time = start;
1605           /* replace event */
1606           gst_event_unref (event);
1607           event = gst_event_new_segment (&segment);
1608         } else {
1609           goto newseg_wrong_format;
1610         }
1611       }
1612 
1613       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1614 
1615       /* Update the decode flags in the segment if we have an instant-rate
1616        * override active */
1617       GST_OBJECT_LOCK (decoder);
1618       if (!priv->decode_flags_override)
1619         priv->decode_flags = segment.flags;
1620       else {
1621         segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1622         segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1623       }
1624 
1625       priv->base_timestamp = GST_CLOCK_TIME_NONE;
1626       priv->base_picture_number = 0;
1627 
1628       decoder->input_segment = segment;
1629       decoder->priv->in_out_segment_sync = FALSE;
1630 
1631       GST_OBJECT_UNLOCK (decoder);
1632       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1633 
1634       break;
1635     }
1636     case GST_EVENT_INSTANT_RATE_CHANGE:
1637     {
1638       GstSegmentFlags flags;
1639       GstSegment *seg;
1640 
1641       gst_event_parse_instant_rate_change (event, NULL, &flags);
1642 
1643       GST_OBJECT_LOCK (decoder);
1644       priv->decode_flags_override = TRUE;
1645       priv->decode_flags = flags;
1646 
1647       /* Update the input segment flags */
1648       seg = &decoder->input_segment;
1649       seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1650       seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1651       GST_OBJECT_UNLOCK (decoder);
1652       break;
1653     }
1654     case GST_EVENT_FLUSH_STOP:
1655     {
1656       GList *l;
1657 
1658       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1659       for (l = priv->frames.head; l; l = l->next) {
1660         GstVideoCodecFrame *frame = l->data;
1661 
1662         frame->events = _flush_events (decoder->srcpad, frame->events);
1663       }
1664       priv->current_frame_events = _flush_events (decoder->srcpad,
1665           decoder->priv->current_frame_events);
1666 
1667       /* well, this is kind of worse than a DISCONT */
1668       gst_video_decoder_flush (decoder, TRUE);
1669       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1670       /* Forward FLUSH_STOP immediately. This is required because it is
1671        * expected to be forwarded immediately and no buffers are queued
1672        * anyway.
1673        */
1674       forward_immediate = TRUE;
1675       break;
1676     }
1677     case GST_EVENT_TAG:
1678     {
1679       GstTagList *tags;
1680 
1681       gst_event_parse_tag (event, &tags);
1682 
1683       if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1684         GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1685         if (priv->upstream_tags != tags) {
1686           if (priv->upstream_tags)
1687             gst_tag_list_unref (priv->upstream_tags);
1688           priv->upstream_tags = gst_tag_list_ref (tags);
1689           GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1690         }
1691         gst_event_unref (event);
1692         event = gst_video_decoder_create_merged_tags_event (decoder);
1693         GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1694         if (!event)
1695           ret = TRUE;
1696       }
1697       break;
1698     }
1699     default:
1700       break;
1701   }
1702 
1703   /* Forward non-serialized events immediately, and all other
1704    * events which can be forwarded immediately without potentially
1705    * causing the event to go out of order with other events and
1706    * buffers as decided above.
1707    */
1708   if (event) {
1709     if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1710       ret = gst_video_decoder_push_event (decoder, event);
1711     } else {
1712       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1713       decoder->priv->current_frame_events =
1714           g_list_prepend (decoder->priv->current_frame_events, event);
1715       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1716       ret = TRUE;
1717     }
1718   }
1719 
1720   return ret;
1721 
1722 newseg_wrong_format:
1723   {
1724     GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1725     gst_event_unref (event);
1726     /* SWALLOW EVENT */
1727     return TRUE;
1728   }
1729 }
1730 
1731 static gboolean
gst_video_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1732 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1733     GstEvent * event)
1734 {
1735   GstVideoDecoder *decoder;
1736   GstVideoDecoderClass *decoder_class;
1737   gboolean ret = FALSE;
1738 
1739   decoder = GST_VIDEO_DECODER (parent);
1740   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1741 
1742   GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1743       GST_EVENT_TYPE_NAME (event));
1744 
1745   if (decoder_class->sink_event)
1746     ret = decoder_class->sink_event (decoder, event);
1747 
1748   return ret;
1749 }
1750 
1751 /* perform upstream byte <-> time conversion (duration, seeking)
1752  * if subclass allows and if enough data for moderately decent conversion */
1753 static inline gboolean
gst_video_decoder_do_byte(GstVideoDecoder * dec)1754 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1755 {
1756   gboolean ret;
1757 
1758   GST_OBJECT_LOCK (dec);
1759   ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1760       && (dec->priv->time > GST_SECOND);
1761   GST_OBJECT_UNLOCK (dec);
1762 
1763   return ret;
1764 }
1765 
1766 static gboolean
gst_video_decoder_do_seek(GstVideoDecoder * dec,GstEvent * event)1767 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1768 {
1769   GstFormat format;
1770   GstSeekFlags flags;
1771   GstSeekType start_type, end_type;
1772   gdouble rate;
1773   gint64 start, start_time, end_time;
1774   GstSegment seek_segment;
1775   guint32 seqnum;
1776 
1777   gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1778       &start_time, &end_type, &end_time);
1779 
1780   /* we'll handle plain open-ended flushing seeks with the simple approach */
1781   if (rate != 1.0) {
1782     GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1783     return FALSE;
1784   }
1785 
1786   if (start_type != GST_SEEK_TYPE_SET) {
1787     GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1788     return FALSE;
1789   }
1790 
1791   if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1792       (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1793     GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1794     return FALSE;
1795   }
1796 
1797   if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1798     GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1799     return FALSE;
1800   }
1801 
1802   memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1803   gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1804       start_time, end_type, end_time, NULL);
1805   start_time = seek_segment.position;
1806 
1807   if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1808           GST_FORMAT_BYTES, &start)) {
1809     GST_DEBUG_OBJECT (dec, "conversion failed");
1810     return FALSE;
1811   }
1812 
1813   seqnum = gst_event_get_seqnum (event);
1814   event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1815       GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1816   gst_event_set_seqnum (event, seqnum);
1817 
1818   GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1819       G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1820 
1821   return gst_pad_push_event (dec->sinkpad, event);
1822 }
1823 
1824 static gboolean
gst_video_decoder_src_event_default(GstVideoDecoder * decoder,GstEvent * event)1825 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1826     GstEvent * event)
1827 {
1828   GstVideoDecoderPrivate *priv;
1829   gboolean res = FALSE;
1830 
1831   priv = decoder->priv;
1832 
1833   GST_DEBUG_OBJECT (decoder,
1834       "received event %d, %s", GST_EVENT_TYPE (event),
1835       GST_EVENT_TYPE_NAME (event));
1836 
1837   switch (GST_EVENT_TYPE (event)) {
1838     case GST_EVENT_SEEK:
1839     {
1840       GstFormat format;
1841       gdouble rate;
1842       GstSeekFlags flags;
1843       GstSeekType start_type, stop_type;
1844       gint64 start, stop;
1845       gint64 tstart, tstop;
1846       guint32 seqnum;
1847 
1848       gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1849           &stop_type, &stop);
1850       seqnum = gst_event_get_seqnum (event);
1851 
1852       /* upstream gets a chance first */
1853       if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1854         break;
1855 
1856       /* if upstream fails for a time seek, maybe we can help if allowed */
1857       if (format == GST_FORMAT_TIME) {
1858         if (gst_video_decoder_do_byte (decoder))
1859           res = gst_video_decoder_do_seek (decoder, event);
1860         break;
1861       }
1862 
1863       /* ... though a non-time seek can be aided as well */
1864       /* First bring the requested format to time */
1865       if (!(res =
1866               gst_pad_query_convert (decoder->srcpad, format, start,
1867                   GST_FORMAT_TIME, &tstart)))
1868         goto convert_error;
1869       if (!(res =
1870               gst_pad_query_convert (decoder->srcpad, format, stop,
1871                   GST_FORMAT_TIME, &tstop)))
1872         goto convert_error;
1873 
1874       /* then seek with time on the peer */
1875       event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1876           flags, start_type, tstart, stop_type, tstop);
1877       gst_event_set_seqnum (event, seqnum);
1878 
1879       res = gst_pad_push_event (decoder->sinkpad, event);
1880       break;
1881     }
1882     case GST_EVENT_QOS:
1883     {
1884       GstQOSType type;
1885       gdouble proportion;
1886       GstClockTimeDiff diff;
1887       GstClockTime timestamp;
1888 
1889       gst_event_parse_qos (event, &type, &proportion, &diff, &timestamp);
1890 
1891       GST_OBJECT_LOCK (decoder);
1892       priv->proportion = proportion;
1893       if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1894         if (G_UNLIKELY (diff > 0)) {
1895           priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1896         } else {
1897           priv->earliest_time = timestamp + diff;
1898         }
1899       } else {
1900         priv->earliest_time = GST_CLOCK_TIME_NONE;
1901       }
1902       GST_OBJECT_UNLOCK (decoder);
1903 
1904       GST_DEBUG_OBJECT (decoder,
1905           "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1906           GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1907 
1908       res = gst_pad_push_event (decoder->sinkpad, event);
1909       break;
1910     }
1911     default:
1912       res = gst_pad_push_event (decoder->sinkpad, event);
1913       break;
1914   }
1915 done:
1916   return res;
1917 
1918 convert_error:
1919   GST_DEBUG_OBJECT (decoder, "could not convert format");
1920   goto done;
1921 }
1922 
1923 static gboolean
gst_video_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1924 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1925 {
1926   GstVideoDecoder *decoder;
1927   GstVideoDecoderClass *decoder_class;
1928   gboolean ret = FALSE;
1929 
1930   decoder = GST_VIDEO_DECODER (parent);
1931   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1932 
1933   GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1934       GST_EVENT_TYPE_NAME (event));
1935 
1936   if (decoder_class->src_event)
1937     ret = decoder_class->src_event (decoder, event);
1938 
1939   return ret;
1940 }
1941 
1942 static gboolean
gst_video_decoder_src_query_default(GstVideoDecoder * dec,GstQuery * query)1943 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1944 {
1945   GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1946   gboolean res = TRUE;
1947 
1948   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1949 
1950   switch (GST_QUERY_TYPE (query)) {
1951     case GST_QUERY_POSITION:
1952     {
1953       GstFormat format;
1954       gint64 time, value;
1955 
1956       /* upstream gets a chance first */
1957       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1958         GST_LOG_OBJECT (dec, "returning peer response");
1959         break;
1960       }
1961 
1962       /* Refuse BYTES format queries. If it made sense to
1963        * answer them, upstream would have already */
1964       gst_query_parse_position (query, &format, NULL);
1965 
1966       if (format == GST_FORMAT_BYTES) {
1967         GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1968         break;
1969       }
1970 
1971       /* we start from the last seen time */
1972       time = dec->priv->last_timestamp_out;
1973       /* correct for the segment values */
1974       time = gst_segment_to_stream_time (&dec->output_segment,
1975           GST_FORMAT_TIME, time);
1976 
1977       GST_LOG_OBJECT (dec,
1978           "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1979 
1980       /* and convert to the final format */
1981       if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1982                   format, &value)))
1983         break;
1984 
1985       gst_query_set_position (query, format, value);
1986 
1987       GST_LOG_OBJECT (dec,
1988           "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1989           format);
1990       break;
1991     }
1992     case GST_QUERY_DURATION:
1993     {
1994       GstFormat format;
1995 
1996       /* upstream in any case */
1997       if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1998         break;
1999 
2000       gst_query_parse_duration (query, &format, NULL);
2001       /* try answering TIME by converting from BYTE if subclass allows  */
2002       if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
2003         gint64 value;
2004 
2005         if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2006                 &value)) {
2007           GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2008           if (gst_pad_query_convert (dec->sinkpad,
2009                   GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
2010             gst_query_set_duration (query, GST_FORMAT_TIME, value);
2011             res = TRUE;
2012           }
2013         }
2014       }
2015       break;
2016     }
2017     case GST_QUERY_CONVERT:
2018     {
2019       GstFormat src_fmt, dest_fmt;
2020       gint64 src_val, dest_val;
2021 
2022       GST_DEBUG_OBJECT (dec, "convert query");
2023 
2024       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2025       GST_OBJECT_LOCK (dec);
2026       if (dec->priv->output_state != NULL)
2027         res = __gst_video_rawvideo_convert (dec->priv->output_state,
2028             src_fmt, src_val, &dest_fmt, &dest_val);
2029       else
2030         res = FALSE;
2031       GST_OBJECT_UNLOCK (dec);
2032       if (!res)
2033         goto error;
2034       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2035       break;
2036     }
2037     case GST_QUERY_LATENCY:
2038     {
2039       gboolean live;
2040       GstClockTime min_latency, max_latency;
2041 
2042       res = gst_pad_peer_query (dec->sinkpad, query);
2043       if (res) {
2044         gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2045         GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
2046             GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2047             GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2048 
2049         GST_OBJECT_LOCK (dec);
2050         min_latency += dec->priv->min_latency;
2051         if (max_latency == GST_CLOCK_TIME_NONE
2052             || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
2053           max_latency = GST_CLOCK_TIME_NONE;
2054         else
2055           max_latency += dec->priv->max_latency;
2056         GST_OBJECT_UNLOCK (dec);
2057 
2058         gst_query_set_latency (query, live, min_latency, max_latency);
2059       }
2060     }
2061       break;
2062     default:
2063       res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
2064   }
2065   return res;
2066 
2067 error:
2068   GST_ERROR_OBJECT (dec, "query failed");
2069   return res;
2070 }
2071 
2072 static gboolean
gst_video_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2073 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2074 {
2075   GstVideoDecoder *decoder;
2076   GstVideoDecoderClass *decoder_class;
2077   gboolean ret = FALSE;
2078 
2079   decoder = GST_VIDEO_DECODER (parent);
2080   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2081 
2082   GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2083       GST_QUERY_TYPE_NAME (query));
2084 
2085   if (decoder_class->src_query)
2086     ret = decoder_class->src_query (decoder, query);
2087 
2088   return ret;
2089 }
2090 
2091 /**
2092  * gst_video_decoder_proxy_getcaps:
2093  * @decoder: a #GstVideoDecoder
2094  * @caps: (allow-none): initial caps
2095  * @filter: (allow-none): filter caps
2096  *
2097  * Returns caps that express @caps (or sink template caps if @caps == NULL)
2098  * restricted to resolution/format/... combinations supported by downstream
2099  * elements.
2100  *
2101  * Returns: (transfer full): a #GstCaps owned by caller
2102  *
2103  * Since: 1.6
2104  */
2105 GstCaps *
gst_video_decoder_proxy_getcaps(GstVideoDecoder * decoder,GstCaps * caps,GstCaps * filter)2106 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
2107     GstCaps * filter)
2108 {
2109   return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2110       GST_VIDEO_DECODER_SINK_PAD (decoder),
2111       GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
2112 }
2113 
2114 static GstCaps *
gst_video_decoder_sink_getcaps(GstVideoDecoder * decoder,GstCaps * filter)2115 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
2116 {
2117   GstVideoDecoderClass *klass;
2118   GstCaps *caps;
2119 
2120   klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2121 
2122   if (klass->getcaps)
2123     caps = klass->getcaps (decoder, filter);
2124   else
2125     caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
2126 
2127   GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2128 
2129   return caps;
2130 }
2131 
2132 static gboolean
gst_video_decoder_sink_query_default(GstVideoDecoder * decoder,GstQuery * query)2133 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
2134     GstQuery * query)
2135 {
2136   GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
2137   GstVideoDecoderPrivate *priv;
2138   gboolean res = FALSE;
2139 
2140   priv = decoder->priv;
2141 
2142   GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
2143 
2144   switch (GST_QUERY_TYPE (query)) {
2145     case GST_QUERY_CONVERT:
2146     {
2147       GstFormat src_fmt, dest_fmt;
2148       gint64 src_val, dest_val;
2149 
2150       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2151       GST_OBJECT_LOCK (decoder);
2152       res =
2153           __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
2154           src_fmt, src_val, &dest_fmt, &dest_val);
2155       GST_OBJECT_UNLOCK (decoder);
2156       if (!res)
2157         goto error;
2158       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2159       break;
2160     }
2161     case GST_QUERY_ALLOCATION:{
2162       GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2163 
2164       if (klass->propose_allocation)
2165         res = klass->propose_allocation (decoder, query);
2166       break;
2167     }
2168     case GST_QUERY_CAPS:{
2169       GstCaps *filter, *caps;
2170 
2171       gst_query_parse_caps (query, &filter);
2172       caps = gst_video_decoder_sink_getcaps (decoder, filter);
2173       gst_query_set_caps_result (query, caps);
2174       gst_caps_unref (caps);
2175       res = TRUE;
2176       break;
2177     }
2178     case GST_QUERY_ACCEPT_CAPS:{
2179       if (decoder->priv->use_default_pad_acceptcaps) {
2180         res =
2181             gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
2182             GST_OBJECT_CAST (decoder), query);
2183       } else {
2184         GstCaps *caps;
2185         GstCaps *allowed_caps;
2186         GstCaps *template_caps;
2187         gboolean accept;
2188 
2189         gst_query_parse_accept_caps (query, &caps);
2190 
2191         template_caps = gst_pad_get_pad_template_caps (pad);
2192         accept = gst_caps_is_subset (caps, template_caps);
2193         gst_caps_unref (template_caps);
2194 
2195         if (accept) {
2196           allowed_caps =
2197               gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
2198 
2199           accept = gst_caps_can_intersect (caps, allowed_caps);
2200 
2201           gst_caps_unref (allowed_caps);
2202         }
2203 
2204         gst_query_set_accept_caps_result (query, accept);
2205         res = TRUE;
2206       }
2207       break;
2208     }
2209     default:
2210       res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2211       break;
2212   }
2213 done:
2214 
2215   return res;
2216 error:
2217   GST_DEBUG_OBJECT (decoder, "query failed");
2218   goto done;
2219 
2220 }
2221 
2222 static gboolean
gst_video_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2223 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2224     GstQuery * query)
2225 {
2226   GstVideoDecoder *decoder;
2227   GstVideoDecoderClass *decoder_class;
2228   gboolean ret = FALSE;
2229 
2230   decoder = GST_VIDEO_DECODER (parent);
2231   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2232 
2233   GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2234       GST_QUERY_TYPE_NAME (query));
2235 
2236   if (decoder_class->sink_query)
2237     ret = decoder_class->sink_query (decoder, query);
2238 
2239   return ret;
2240 }
2241 
2242 typedef struct _Timestamp Timestamp;
2243 struct _Timestamp
2244 {
2245   guint64 offset;
2246   GstClockTime pts;
2247   GstClockTime dts;
2248   GstClockTime duration;
2249   guint flags;
2250 };
2251 
2252 static void
timestamp_free(Timestamp * ts)2253 timestamp_free (Timestamp * ts)
2254 {
2255   g_slice_free (Timestamp, ts);
2256 }
2257 
2258 static void
gst_video_decoder_add_buffer_info(GstVideoDecoder * decoder,GstBuffer * buffer)2259 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
2260     GstBuffer * buffer)
2261 {
2262   GstVideoDecoderPrivate *priv = decoder->priv;
2263   Timestamp *ts;
2264 
2265   if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2266       !GST_BUFFER_DTS_IS_VALID (buffer) &&
2267       !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2268       GST_BUFFER_FLAGS (buffer) == 0) {
2269     /* Save memory - don't bother storing info
2270      * for buffers with no distinguishing info */
2271     return;
2272   }
2273 
2274   ts = g_slice_new (Timestamp);
2275 
2276   GST_LOG_OBJECT (decoder,
2277       "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2278       " (offset:%" G_GUINT64_FORMAT ")",
2279       GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2280       GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2281 
2282   ts->offset = priv->input_offset;
2283   ts->pts = GST_BUFFER_PTS (buffer);
2284   ts->dts = GST_BUFFER_DTS (buffer);
2285   ts->duration = GST_BUFFER_DURATION (buffer);
2286   ts->flags = GST_BUFFER_FLAGS (buffer);
2287 
2288   g_queue_push_tail (&priv->timestamps, ts);
2289 
2290   if (g_queue_get_length (&priv->timestamps) > 40) {
2291     GST_WARNING_OBJECT (decoder,
2292         "decoder timestamp list getting long: %d timestamps,"
2293         "possible internal leaking?", g_queue_get_length (&priv->timestamps));
2294   }
2295 }
2296 
2297 static void
gst_video_decoder_get_buffer_info_at_offset(GstVideoDecoder * decoder,guint64 offset,GstClockTime * pts,GstClockTime * dts,GstClockTime * duration,guint * flags)2298 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
2299     decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2300     GstClockTime * duration, guint * flags)
2301 {
2302 #ifndef GST_DISABLE_GST_DEBUG
2303   guint64 got_offset = 0;
2304 #endif
2305   Timestamp *ts;
2306   GList *g;
2307 
2308   *pts = GST_CLOCK_TIME_NONE;
2309   *dts = GST_CLOCK_TIME_NONE;
2310   *duration = GST_CLOCK_TIME_NONE;
2311   *flags = 0;
2312 
2313   g = decoder->priv->timestamps.head;
2314   while (g) {
2315     ts = g->data;
2316     if (ts->offset <= offset) {
2317       GList *next = g->next;
2318 #ifndef GST_DISABLE_GST_DEBUG
2319       got_offset = ts->offset;
2320 #endif
2321       *pts = ts->pts;
2322       *dts = ts->dts;
2323       *duration = ts->duration;
2324       *flags = ts->flags;
2325       g_queue_delete_link (&decoder->priv->timestamps, g);
2326       g = next;
2327       timestamp_free (ts);
2328     } else {
2329       break;
2330     }
2331   }
2332 
2333   GST_LOG_OBJECT (decoder,
2334       "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2335       G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2336       GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2337 }
2338 
2339 #if !GLIB_CHECK_VERSION(2, 60, 0)
2340 #define g_queue_clear_full queue_clear_full
2341 static void
queue_clear_full(GQueue * queue,GDestroyNotify free_func)2342 queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2343 {
2344   gpointer data;
2345 
2346   while ((data = g_queue_pop_head (queue)) != NULL)
2347     free_func (data);
2348 }
2349 #endif
2350 
2351 static void
gst_video_decoder_clear_queues(GstVideoDecoder * dec)2352 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
2353 {
2354   GstVideoDecoderPrivate *priv = dec->priv;
2355 
2356   g_list_free_full (priv->output_queued,
2357       (GDestroyNotify) gst_mini_object_unref);
2358   priv->output_queued = NULL;
2359 
2360   g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2361   priv->gather = NULL;
2362   g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
2363   priv->decode = NULL;
2364   g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2365   priv->parse = NULL;
2366   g_list_free_full (priv->parse_gather,
2367       (GDestroyNotify) gst_video_codec_frame_unref);
2368   priv->parse_gather = NULL;
2369   g_queue_clear_full (&priv->frames,
2370       (GDestroyNotify) gst_video_codec_frame_unref);
2371 }
2372 
2373 static void
gst_video_decoder_reset(GstVideoDecoder * decoder,gboolean full,gboolean flush_hard)2374 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
2375     gboolean flush_hard)
2376 {
2377   GstVideoDecoderPrivate *priv = decoder->priv;
2378 
2379   GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2380 
2381   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2382 
2383   if (full || flush_hard) {
2384     gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2385     gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2386     gst_video_decoder_clear_queues (decoder);
2387     decoder->priv->in_out_segment_sync = TRUE;
2388 
2389     if (priv->current_frame) {
2390       gst_video_codec_frame_unref (priv->current_frame);
2391       priv->current_frame = NULL;
2392     }
2393 
2394     g_list_free_full (priv->current_frame_events,
2395         (GDestroyNotify) gst_event_unref);
2396     priv->current_frame_events = NULL;
2397     g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2398     priv->pending_events = NULL;
2399 
2400     priv->error_count = 0;
2401     priv->had_output_data = FALSE;
2402     priv->had_input_data = FALSE;
2403 
2404     GST_OBJECT_LOCK (decoder);
2405     priv->earliest_time = GST_CLOCK_TIME_NONE;
2406     priv->proportion = 0.5;
2407     priv->decode_flags_override = FALSE;
2408 
2409     priv->request_sync_point_flags = 0;
2410     priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2411     priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2412     GST_OBJECT_UNLOCK (decoder);
2413     priv->distance_from_sync = -1;
2414   }
2415 
2416   if (full) {
2417     if (priv->input_state)
2418       gst_video_codec_state_unref (priv->input_state);
2419     priv->input_state = NULL;
2420     GST_OBJECT_LOCK (decoder);
2421     if (priv->output_state)
2422       gst_video_codec_state_unref (priv->output_state);
2423     priv->output_state = NULL;
2424 
2425     priv->qos_frame_duration = 0;
2426     GST_OBJECT_UNLOCK (decoder);
2427 
2428     if (priv->tags)
2429       gst_tag_list_unref (priv->tags);
2430     priv->tags = NULL;
2431     priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2432     if (priv->upstream_tags) {
2433       gst_tag_list_unref (priv->upstream_tags);
2434       priv->upstream_tags = NULL;
2435     }
2436     priv->tags_changed = FALSE;
2437     priv->reordered_output = FALSE;
2438 
2439     priv->dropped = 0;
2440     priv->processed = 0;
2441 
2442     priv->decode_frame_number = 0;
2443     priv->base_picture_number = 0;
2444 
2445     if (priv->pool) {
2446       GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2447           priv->pool);
2448       gst_buffer_pool_set_active (priv->pool, FALSE);
2449       gst_object_unref (priv->pool);
2450       priv->pool = NULL;
2451     }
2452 
2453     if (priv->allocator) {
2454       gst_object_unref (priv->allocator);
2455       priv->allocator = NULL;
2456     }
2457   }
2458 
2459   priv->discont = TRUE;
2460 
2461   priv->base_timestamp = GST_CLOCK_TIME_NONE;
2462   priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2463   priv->pts_delta = GST_CLOCK_TIME_NONE;
2464 
2465   priv->input_offset = 0;
2466   priv->frame_offset = 0;
2467   gst_adapter_clear (priv->input_adapter);
2468   gst_adapter_clear (priv->output_adapter);
2469   g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2470 
2471   GST_OBJECT_LOCK (decoder);
2472   priv->bytes_out = 0;
2473   priv->time = 0;
2474   GST_OBJECT_UNLOCK (decoder);
2475 
2476 #ifndef GST_DISABLE_DEBUG
2477   priv->last_reset_time = gst_util_get_timestamp ();
2478 #endif
2479 
2480 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
2481   priv->has_recv_first_key_frame = FALSE;
2482   priv->has_push_first_frame = FALSE;
2483 #endif
2484   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2485 }
2486 
2487 static GstFlowReturn
gst_video_decoder_chain_forward(GstVideoDecoder * decoder,GstBuffer * buf,gboolean at_eos)2488 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2489     GstBuffer * buf, gboolean at_eos)
2490 {
2491   GstVideoDecoderPrivate *priv;
2492   GstVideoDecoderClass *klass;
2493   GstFlowReturn ret = GST_FLOW_OK;
2494 
2495   klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2496   priv = decoder->priv;
2497 
2498   g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2499 
2500   /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2501    * and this function would only be called to get everything collected GOP
2502    * by GOP in the parse_gather list */
2503   if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2504       && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2505     ret = gst_video_decoder_drain_out (decoder, FALSE);
2506 
2507   if (priv->current_frame == NULL)
2508     priv->current_frame = gst_video_decoder_new_frame (decoder);
2509 
2510   if (!priv->packetized)
2511     gst_video_decoder_add_buffer_info (decoder, buf);
2512 
2513   priv->input_offset += gst_buffer_get_size (buf);
2514 
2515   if (priv->packetized) {
2516     GstVideoCodecFrame *frame;
2517     gboolean was_keyframe = FALSE;
2518 
2519     frame = priv->current_frame;
2520 
2521     frame->abidata.ABI.num_subframes++;
2522     if (gst_video_decoder_get_subframe_mode (decoder)) {
2523       /* End the frame if the marker flag is set */
2524       if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
2525           && (decoder->input_segment.rate > 0.0))
2526         priv->current_frame = gst_video_codec_frame_ref (frame);
2527       else
2528         priv->current_frame = NULL;
2529     } else {
2530       priv->current_frame = frame;
2531     }
2532 
2533     if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2534       was_keyframe = TRUE;
2535       GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2536       GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
2537     }
2538 
2539     if (frame->input_buffer) {
2540       gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
2541       gst_buffer_unref (frame->input_buffer);
2542     }
2543     frame->input_buffer = buf;
2544 
2545     if (decoder->input_segment.rate < 0.0) {
2546       priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
2547       priv->current_frame = NULL;
2548     } else {
2549       ret = gst_video_decoder_decode_frame (decoder, frame);
2550       if (!gst_video_decoder_get_subframe_mode (decoder))
2551         priv->current_frame = NULL;
2552     }
2553     /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2554      * latency. Only do this for forwards playback as reverse playback handles
2555      * draining on keyframes in flush_parse(), and would otherwise call back
2556      * from drain_out() to here causing an infinite loop.
2557      * Also this function is only called for reverse playback to gather frames
2558      * GOP by GOP, and does not do any actual decoding. That would be done by
2559      * flush_decode() */
2560     if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2561         && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2562       ret = gst_video_decoder_drain_out (decoder, FALSE);
2563   } else {
2564     gst_adapter_push (priv->input_adapter, buf);
2565 
2566     ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2567   }
2568 
2569   if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2570     return GST_FLOW_OK;
2571 
2572   return ret;
2573 }
2574 
2575 static GstFlowReturn
gst_video_decoder_flush_decode(GstVideoDecoder * dec)2576 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2577 {
2578   GstVideoDecoderPrivate *priv = dec->priv;
2579   GstFlowReturn res = GST_FLOW_OK;
2580   GList *walk;
2581   GstVideoCodecFrame *current_frame = NULL;
2582   gboolean last_subframe;
2583   GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2584 
2585   walk = priv->decode;
2586   while (walk) {
2587     GList *next;
2588     GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2589     last_subframe = TRUE;
2590     /* In subframe mode, we need to get rid of intermediary frames
2591      * created during the buffer gather stage. That's why that we keep a current
2592      * frame as the main frame and drop all the frame afterwhile until the end
2593      * of the subframes batch.
2594      * */
2595     if (gst_video_decoder_get_subframe_mode (dec)) {
2596       if (current_frame == NULL) {
2597         current_frame = gst_video_codec_frame_ref (frame);
2598       } else {
2599         if (current_frame->input_buffer) {
2600           gst_video_decoder_copy_metas (dec, current_frame,
2601               current_frame->input_buffer, current_frame->output_buffer);
2602           gst_buffer_unref (current_frame->input_buffer);
2603         }
2604         current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
2605         gst_video_codec_frame_unref (frame);
2606       }
2607       last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
2608           GST_VIDEO_BUFFER_FLAG_MARKER);
2609     } else {
2610       current_frame = frame;
2611     }
2612 
2613     GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2614         ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2615         GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2616         GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2617 
2618     next = walk->next;
2619 
2620     priv->decode = g_list_delete_link (priv->decode, walk);
2621 
2622     /* decode buffer, resulting data prepended to queue */
2623     res = gst_video_decoder_decode_frame (dec, current_frame);
2624     if (res != GST_FLOW_OK)
2625       break;
2626     if (!gst_video_decoder_get_subframe_mode (dec)
2627         || last_subframe)
2628       current_frame = NULL;
2629     walk = next;
2630   }
2631 
2632   return res;
2633 }
2634 
2635 /* gst_video_decoder_flush_parse is called from the
2636  * chain_reverse() function when a buffer containing
2637  * a DISCONT - indicating that reverse playback
2638  * looped back to the next data block, and therefore
2639  * all available data should be fed through the
2640  * decoder and frames gathered for reversed output
2641  */
2642 static GstFlowReturn
gst_video_decoder_flush_parse(GstVideoDecoder * dec,gboolean at_eos)2643 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2644 {
2645   GstVideoDecoderPrivate *priv = dec->priv;
2646   GstFlowReturn res = GST_FLOW_OK;
2647   GList *walk;
2648   GstVideoDecoderClass *decoder_class;
2649 
2650   decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2651 
2652   GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2653 
2654   /* Reverse the gather list, and prepend it to the parse list,
2655    * then flush to parse whatever we can */
2656   priv->gather = g_list_reverse (priv->gather);
2657   priv->parse = g_list_concat (priv->gather, priv->parse);
2658   priv->gather = NULL;
2659 
2660   /* clear buffer and decoder state */
2661   gst_video_decoder_flush (dec, FALSE);
2662 
2663   walk = priv->parse;
2664   while (walk) {
2665     GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2666     GList *next = walk->next;
2667 
2668     GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2669         ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2670         GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2671         GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2672 
2673     /* parse buffer, resulting frames prepended to parse_gather queue */
2674     gst_buffer_ref (buf);
2675     res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2676 
2677     /* if we generated output, we can discard the buffer, else we
2678      * keep it in the queue */
2679     if (priv->parse_gather) {
2680       GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2681       priv->parse = g_list_delete_link (priv->parse, walk);
2682       gst_buffer_unref (buf);
2683     } else {
2684       GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2685     }
2686     walk = next;
2687   }
2688 
2689   walk = priv->parse_gather;
2690   while (walk) {
2691     GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2692     GList *walk2;
2693 
2694     /* this is reverse playback, check if we need to apply some segment
2695      * to the output before decoding, as during decoding the segment.rate
2696      * must be used to determine if a buffer should be pushed or added to
2697      * the output list for reverse pushing.
2698      *
2699      * The new segment is not immediately pushed here because we must
2700      * wait for negotiation to happen before it can be pushed to avoid
2701      * pushing a segment before caps event. Negotiation only happens
2702      * when finish_frame is called.
2703      */
2704     for (walk2 = frame->events; walk2;) {
2705       GList *cur = walk2;
2706       GstEvent *event = walk2->data;
2707 
2708       walk2 = g_list_next (walk2);
2709       if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2710 
2711         if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2712           GstSegment segment;
2713 
2714           GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2715               frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2716           gst_event_copy_segment (event, &segment);
2717           if (segment.format == GST_FORMAT_TIME) {
2718             dec->output_segment = segment;
2719             dec->priv->in_out_segment_sync =
2720                 gst_segment_is_equal (&dec->input_segment, &segment);
2721           }
2722         }
2723         dec->priv->pending_events =
2724             g_list_append (dec->priv->pending_events, event);
2725         frame->events = g_list_delete_link (frame->events, cur);
2726       }
2727     }
2728 
2729     walk = walk->next;
2730   }
2731 
2732   /* now we can process frames. Start by moving each frame from the parse_gather
2733    * to the decode list, reverse the order as we go, and stopping when/if we
2734    * copy a keyframe. */
2735   GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2736   walk = priv->parse_gather;
2737   while (walk) {
2738     GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2739 
2740     /* remove from the gather list */
2741     priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2742 
2743     /* move it to the front of the decode queue */
2744     priv->decode = g_list_concat (walk, priv->decode);
2745 
2746     /* if we copied a keyframe, flush and decode the decode queue */
2747     if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2748       GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2749           ", DTS %" GST_TIME_FORMAT, frame,
2750           GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2751           GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2752       res = gst_video_decoder_flush_decode (dec);
2753       if (res != GST_FLOW_OK)
2754         goto done;
2755 
2756       /* We need to tell the subclass to drain now.
2757        * We prefer the drain vfunc, but for backward-compat
2758        * we use a finish() vfunc if drain isn't implemented */
2759       if (decoder_class->drain) {
2760         GST_DEBUG_OBJECT (dec, "Draining");
2761         res = decoder_class->drain (dec);
2762       } else if (decoder_class->finish) {
2763         GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2764             "Calling finish() for backwards-compat");
2765         res = decoder_class->finish (dec);
2766       }
2767 
2768       if (res != GST_FLOW_OK)
2769         goto done;
2770 
2771       /* now send queued data downstream */
2772       walk = priv->output_queued;
2773       while (walk) {
2774         GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2775 
2776         priv->output_queued =
2777             g_list_delete_link (priv->output_queued, priv->output_queued);
2778 
2779         if (G_LIKELY (res == GST_FLOW_OK)) {
2780           /* avoid stray DISCONT from forward processing,
2781            * which have no meaning in reverse pushing */
2782           GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2783 
2784           /* Last chance to calculate a timestamp as we loop backwards
2785            * through the list */
2786           if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2787             priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2788           else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2789               GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2790             GST_BUFFER_TIMESTAMP (buf) =
2791                 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2792             priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2793             GST_LOG_OBJECT (dec,
2794                 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2795                 GST_TIME_ARGS (priv->last_timestamp_out));
2796           }
2797 
2798           res = gst_video_decoder_clip_and_push_buf (dec, buf);
2799         } else {
2800           gst_buffer_unref (buf);
2801         }
2802 
2803         walk = priv->output_queued;
2804       }
2805 
2806       /* clear buffer and decoder state again
2807        * before moving to the previous keyframe */
2808       gst_video_decoder_flush (dec, FALSE);
2809     }
2810 
2811     walk = priv->parse_gather;
2812   }
2813 
2814 done:
2815   return res;
2816 }
2817 
2818 static GstFlowReturn
gst_video_decoder_chain_reverse(GstVideoDecoder * dec,GstBuffer * buf)2819 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2820 {
2821   GstVideoDecoderPrivate *priv = dec->priv;
2822   GstFlowReturn result = GST_FLOW_OK;
2823 
2824   /* if we have a discont, move buffers to the decode list */
2825   if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2826     GST_DEBUG_OBJECT (dec, "received discont");
2827 
2828     /* parse and decode stuff in the gather and parse queues */
2829     result = gst_video_decoder_flush_parse (dec, FALSE);
2830   }
2831 
2832   if (G_LIKELY (buf)) {
2833     GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2834         "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2835         GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2836         GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2837         GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2838         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2839 
2840     /* add buffer to gather queue */
2841     priv->gather = g_list_prepend (priv->gather, buf);
2842   }
2843 
2844   return result;
2845 }
2846 
2847 static GstFlowReturn
gst_video_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buf)2848 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2849 {
2850   GstVideoDecoder *decoder;
2851   GstFlowReturn ret = GST_FLOW_OK;
2852 
2853   decoder = GST_VIDEO_DECODER (parent);
2854 
2855   if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2856     goto not_negotiated;
2857 
2858   GST_LOG_OBJECT (decoder,
2859       "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2860       GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2861       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2862       GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2863       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2864       gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2865 
2866   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2867 
2868   /* NOTE:
2869    * requiring the pad to be negotiated makes it impossible to use
2870    * oggdemux or filesrc ! decoder */
2871 
2872   if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2873     GstEvent *event;
2874     GstSegment *segment = &decoder->input_segment;
2875 
2876     GST_WARNING_OBJECT (decoder,
2877         "Received buffer without a new-segment. "
2878         "Assuming timestamps start from 0.");
2879 
2880     gst_segment_init (segment, GST_FORMAT_TIME);
2881 
2882     event = gst_event_new_segment (segment);
2883 
2884     decoder->priv->current_frame_events =
2885         g_list_prepend (decoder->priv->current_frame_events, event);
2886   }
2887 
2888   decoder->priv->had_input_data = TRUE;
2889 
2890   if (decoder->input_segment.rate > 0.0)
2891     ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2892   else
2893     ret = gst_video_decoder_chain_reverse (decoder, buf);
2894 
2895   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2896   return ret;
2897 
2898   /* ERRORS */
2899 not_negotiated:
2900   {
2901     GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2902         ("decoder not initialized"));
2903     gst_buffer_unref (buf);
2904     return GST_FLOW_NOT_NEGOTIATED;
2905   }
2906 }
2907 
2908 static GstStateChangeReturn
gst_video_decoder_change_state(GstElement * element,GstStateChange transition)2909 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2910 {
2911   GstVideoDecoder *decoder;
2912   GstVideoDecoderClass *decoder_class;
2913   GstStateChangeReturn ret;
2914 
2915   decoder = GST_VIDEO_DECODER (element);
2916   decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2917 
2918   switch (transition) {
2919     case GST_STATE_CHANGE_NULL_TO_READY:
2920       /* open device/library if needed */
2921       if (decoder_class->open && !decoder_class->open (decoder))
2922         goto open_failed;
2923       break;
2924     case GST_STATE_CHANGE_READY_TO_PAUSED:
2925       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2926       gst_video_decoder_reset (decoder, TRUE, TRUE);
2927       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2928 
2929       /* Initialize device/library if needed */
2930       if (decoder_class->start && !decoder_class->start (decoder))
2931         goto start_failed;
2932       break;
2933     default:
2934       break;
2935   }
2936 
2937   ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2938 
2939   switch (transition) {
2940     case GST_STATE_CHANGE_PAUSED_TO_READY:{
2941       gboolean stopped = TRUE;
2942 
2943       if (decoder_class->stop)
2944         stopped = decoder_class->stop (decoder);
2945 
2946       GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2947       gst_video_decoder_reset (decoder, TRUE, TRUE);
2948       GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2949 
2950       if (!stopped)
2951         goto stop_failed;
2952 
2953       break;
2954     }
2955     case GST_STATE_CHANGE_READY_TO_NULL:
2956       /* close device/library if needed */
2957       if (decoder_class->close && !decoder_class->close (decoder))
2958         goto close_failed;
2959       break;
2960     default:
2961       break;
2962   }
2963 
2964   return ret;
2965 
2966   /* Errors */
2967 open_failed:
2968   {
2969     GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2970         ("Failed to open decoder"));
2971     return GST_STATE_CHANGE_FAILURE;
2972   }
2973 
2974 start_failed:
2975   {
2976     GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2977         ("Failed to start decoder"));
2978     return GST_STATE_CHANGE_FAILURE;
2979   }
2980 
2981 stop_failed:
2982   {
2983     GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2984         ("Failed to stop decoder"));
2985     return GST_STATE_CHANGE_FAILURE;
2986   }
2987 
2988 close_failed:
2989   {
2990     GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2991         ("Failed to close decoder"));
2992     return GST_STATE_CHANGE_FAILURE;
2993   }
2994 }
2995 
2996 static GstVideoCodecFrame *
gst_video_decoder_new_frame(GstVideoDecoder * decoder)2997 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2998 {
2999   GstVideoDecoderPrivate *priv = decoder->priv;
3000   GstVideoCodecFrame *frame;
3001 
3002   frame = g_slice_new0 (GstVideoCodecFrame);
3003 
3004   frame->ref_count = 1;
3005 
3006   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3007   frame->system_frame_number = priv->system_frame_number;
3008   priv->system_frame_number++;
3009   frame->decode_frame_number = priv->decode_frame_number;
3010   priv->decode_frame_number++;
3011 
3012   frame->dts = GST_CLOCK_TIME_NONE;
3013   frame->pts = GST_CLOCK_TIME_NONE;
3014   frame->duration = GST_CLOCK_TIME_NONE;
3015   frame->events = priv->current_frame_events;
3016   priv->current_frame_events = NULL;
3017 
3018   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3019 
3020   GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
3021       frame, frame->system_frame_number);
3022 
3023   return frame;
3024 }
3025 
3026 static void
gst_video_decoder_push_event_list(GstVideoDecoder * decoder,GList * events)3027 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
3028 {
3029   GList *l;
3030 
3031   /* events are stored in reverse order */
3032   for (l = g_list_last (events); l; l = g_list_previous (l)) {
3033     GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
3034     gst_video_decoder_push_event (decoder, l->data);
3035   }
3036   g_list_free (events);
3037 }
3038 
3039 static void
gst_video_decoder_prepare_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,gboolean dropping)3040 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
3041     decoder, GstVideoCodecFrame * frame, gboolean dropping)
3042 {
3043   GstVideoDecoderPrivate *priv = decoder->priv;
3044   GList *l, *events = NULL;
3045   gboolean sync;
3046 
3047 #ifndef GST_DISABLE_GST_DEBUG
3048   GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
3049       priv->frames.length,
3050       gst_adapter_available (priv->input_adapter),
3051       gst_adapter_available (priv->output_adapter));
3052 #endif
3053 
3054   sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
3055 
3056   GST_LOG_OBJECT (decoder,
3057       "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
3058       GST_TIME_FORMAT,
3059       frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
3060       sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
3061 
3062   /* Push all pending events that arrived before this frame */
3063   for (l = priv->frames.head; l; l = l->next) {
3064     GstVideoCodecFrame *tmp = l->data;
3065 
3066     if (tmp->events) {
3067       events = g_list_concat (tmp->events, events);
3068       tmp->events = NULL;
3069     }
3070 
3071     if (tmp == frame)
3072       break;
3073   }
3074 
3075   if (dropping || !decoder->priv->output_state) {
3076     /* Push before the next frame that is not dropped */
3077     decoder->priv->pending_events =
3078         g_list_concat (events, decoder->priv->pending_events);
3079   } else {
3080     gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
3081     decoder->priv->pending_events = NULL;
3082 
3083     gst_video_decoder_push_event_list (decoder, events);
3084   }
3085 
3086   /* Check if the data should not be displayed. For example altref/invisible
3087    * frame in vp8. In this case we should not update the timestamps. */
3088   if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
3089     return;
3090 
3091   /* If the frame is meant to be output but we don't have an output_buffer
3092    * we have a problem :) */
3093   if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
3094     goto no_output_buffer;
3095 
3096   if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
3097     if (frame->pts != priv->base_timestamp) {
3098       GST_DEBUG_OBJECT (decoder,
3099           "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
3100           GST_TIME_ARGS (frame->pts),
3101           GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
3102                   decoder->output_segment.start)));
3103       priv->base_timestamp = frame->pts;
3104       priv->base_picture_number = frame->decode_frame_number;
3105     }
3106   }
3107 
3108   if (frame->duration == GST_CLOCK_TIME_NONE) {
3109     frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
3110     GST_LOG_OBJECT (decoder,
3111         "Guessing duration %" GST_TIME_FORMAT " for frame...",
3112         GST_TIME_ARGS (frame->duration));
3113   }
3114 
3115   /* PTS is expected montone ascending,
3116    * so a good guess is lowest unsent DTS */
3117   {
3118     GstClockTime min_ts = GST_CLOCK_TIME_NONE;
3119     GstVideoCodecFrame *oframe = NULL;
3120     gboolean seen_none = FALSE;
3121 
3122     /* some maintenance regardless */
3123     for (l = priv->frames.head; l; l = l->next) {
3124       GstVideoCodecFrame *tmp = l->data;
3125 
3126       if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
3127         seen_none = TRUE;
3128         continue;
3129       }
3130 
3131       if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
3132         min_ts = tmp->abidata.ABI.ts;
3133         oframe = tmp;
3134       }
3135     }
3136     /* save a ts if needed */
3137     if (oframe && oframe != frame) {
3138       oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
3139     }
3140 
3141     /* and set if needed;
3142      * valid delta means we have reasonable DTS input */
3143     /* also, if we ended up reordered, means this approach is conflicting
3144      * with some sparse existing PTS, and so it does not work out */
3145     if (!priv->reordered_output &&
3146         !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
3147         GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
3148       frame->pts = min_ts + priv->pts_delta;
3149       GST_DEBUG_OBJECT (decoder,
3150           "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
3151           GST_TIME_ARGS (frame->pts));
3152     }
3153 
3154     /* some more maintenance, ts2 holds PTS */
3155     min_ts = GST_CLOCK_TIME_NONE;
3156     seen_none = FALSE;
3157     for (l = priv->frames.head; l; l = l->next) {
3158       GstVideoCodecFrame *tmp = l->data;
3159 
3160       if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
3161         seen_none = TRUE;
3162         continue;
3163       }
3164 
3165       if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
3166         min_ts = tmp->abidata.ABI.ts2;
3167         oframe = tmp;
3168       }
3169     }
3170     /* save a ts if needed */
3171     if (oframe && oframe != frame) {
3172       oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
3173     }
3174 
3175     /* if we detected reordered output, then PTS are void,
3176      * however those were obtained; bogus input, subclass etc */
3177     if (priv->reordered_output && !seen_none) {
3178 #ifdef OHOS_OPT_COMPAT
3179       /**
3180        * ohos.ext.compat.0046
3181        * Restore reordered_output to false to avoid pts persistent exceptions
3182        */
3183       if (GST_CLOCK_TIME_IS_VALID (frame->pts) && frame->pts >= priv->last_timestamp_out &&
3184           (!(frame->duration != GST_CLOCK_TIME_NONE) || !(sync && frame->dts != GST_CLOCK_TIME_NONE))) {
3185         GST_DEBUG_OBJECT (decoder, "pts %" GST_TIME_FORMAT "last_timestamp_out %" GST_TIME_FORMAT,
3186           GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3187         priv->reordered_output = FALSE;
3188       } else {
3189         GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3190         frame->pts = GST_CLOCK_TIME_NONE;
3191       }
3192 #else
3193       GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3194       frame->pts = GST_CLOCK_TIME_NONE;
3195 #endif
3196     }
3197 
3198     if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
3199       frame->pts = min_ts;
3200       GST_DEBUG_OBJECT (decoder,
3201           "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
3202           GST_TIME_ARGS (frame->pts));
3203     }
3204   }
3205 
3206 
3207   if (frame->pts == GST_CLOCK_TIME_NONE) {
3208     /* Last ditch timestamp guess: Just add the duration to the previous
3209      * frame. If it's the first frame, just use the segment start. */
3210     if (frame->duration != GST_CLOCK_TIME_NONE) {
3211       if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
3212         frame->pts = priv->last_timestamp_out + frame->duration;
3213       else if (frame->dts != GST_CLOCK_TIME_NONE) {
3214         frame->pts = frame->dts;
3215         GST_LOG_OBJECT (decoder,
3216             "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3217             GST_TIME_ARGS (frame->pts));
3218       } else if (decoder->output_segment.rate > 0.0)
3219         frame->pts = decoder->output_segment.start;
3220       GST_INFO_OBJECT (decoder,
3221           "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
3222           GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
3223           GST_TIME_ARGS (frame->dts));
3224     } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
3225       frame->pts = frame->dts;
3226       GST_LOG_OBJECT (decoder,
3227           "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3228           GST_TIME_ARGS (frame->pts));
3229     }
3230   }
3231 
3232   if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
3233     if (frame->pts < priv->last_timestamp_out) {
3234       GST_WARNING_OBJECT (decoder,
3235           "decreasing timestamp (%" GST_TIME_FORMAT " < %"
3236           GST_TIME_FORMAT ")",
3237           GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3238       priv->reordered_output = TRUE;
3239       /* make it a bit less weird downstream */
3240       frame->pts = priv->last_timestamp_out;
3241     }
3242   }
3243 
3244   if (GST_CLOCK_TIME_IS_VALID (frame->pts))
3245     priv->last_timestamp_out = frame->pts;
3246 
3247   return;
3248 
3249   /* ERRORS */
3250 no_output_buffer:
3251   {
3252     GST_ERROR_OBJECT (decoder, "No buffer to output !");
3253   }
3254 }
3255 
3256 /**
3257  * gst_video_decoder_release_frame:
3258  * @dec: a #GstVideoDecoder
3259  * @frame: (transfer full): the #GstVideoCodecFrame to release
3260  *
3261  * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
3262  * without any processing other than removing it from list of pending frames,
3263  * after which it is considered finished and released.
3264  *
3265  * Since: 1.2.2
3266  */
3267 void
gst_video_decoder_release_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3268 gst_video_decoder_release_frame (GstVideoDecoder * dec,
3269     GstVideoCodecFrame * frame)
3270 {
3271   GList *link;
3272 
3273   /* unref once from the list */
3274   GST_VIDEO_DECODER_STREAM_LOCK (dec);
3275   link = g_queue_find (&dec->priv->frames, frame);
3276   if (link) {
3277     gst_video_codec_frame_unref (frame);
3278     g_queue_delete_link (&dec->priv->frames, link);
3279   }
3280   if (frame->events) {
3281     dec->priv->pending_events =
3282         g_list_concat (frame->events, dec->priv->pending_events);
3283     frame->events = NULL;
3284   }
3285   GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3286 
3287   /* unref because this function takes ownership */
3288   gst_video_codec_frame_unref (frame);
3289 }
3290 
3291 /* called with STREAM_LOCK */
3292 static void
gst_video_decoder_post_qos_drop(GstVideoDecoder * dec,GstClockTime timestamp)3293 gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
3294 {
3295   GstClockTime stream_time, jitter, earliest_time, qostime;
3296   GstSegment *segment;
3297   GstMessage *qos_msg;
3298   gdouble proportion;
3299   dec->priv->dropped++;
3300 
3301   /* post QoS message */
3302   GST_OBJECT_LOCK (dec);
3303   proportion = dec->priv->proportion;
3304   earliest_time = dec->priv->earliest_time;
3305   GST_OBJECT_UNLOCK (dec);
3306 
3307   segment = &dec->output_segment;
3308   if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3309     segment = &dec->input_segment;
3310   stream_time =
3311       gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3312   qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3313   jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3314   qos_msg =
3315       gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3316       timestamp, GST_CLOCK_TIME_NONE);
3317   gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3318   gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3319       dec->priv->processed, dec->priv->dropped);
3320   gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3321 }
3322 
3323 /**
3324  * gst_video_decoder_drop_frame:
3325  * @dec: a #GstVideoDecoder
3326  * @frame: (transfer full): the #GstVideoCodecFrame to drop
3327  *
3328  * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
3329  * case and posts a QoS message with the frame's details on the bus.
3330  * In any case, the frame is considered finished and released.
3331  *
3332  * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3333  */
3334 GstFlowReturn
gst_video_decoder_drop_frame(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3335 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
3336 {
3337   GST_LOG_OBJECT (dec, "drop frame %p", frame);
3338 
3339   if (gst_video_decoder_get_subframe_mode (dec))
3340     GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
3341         frame->abidata.ABI.num_subframes);
3342 
3343   GST_VIDEO_DECODER_STREAM_LOCK (dec);
3344 
3345   gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3346 
3347   GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3348       GST_TIME_ARGS (frame->pts));
3349 
3350   gst_video_decoder_post_qos_drop (dec, frame->pts);
3351 
3352   /* now free the frame */
3353   gst_video_decoder_release_frame (dec, frame);
3354 
3355   GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3356 
3357   return GST_FLOW_OK;
3358 }
3359 
3360 /**
3361  * gst_video_decoder_drop_subframe:
3362  * @dec: a #GstVideoDecoder
3363  * @frame: (transfer full): the #GstVideoCodecFrame
3364  *
3365  * Drops input data.
3366  * The frame is not considered finished until the whole frame
3367  * is finished or dropped by the subclass.
3368  *
3369  * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3370  *
3371  * Since: 1.20
3372  */
3373 GstFlowReturn
gst_video_decoder_drop_subframe(GstVideoDecoder * dec,GstVideoCodecFrame * frame)3374 gst_video_decoder_drop_subframe (GstVideoDecoder * dec,
3375     GstVideoCodecFrame * frame)
3376 {
3377   g_return_val_if_fail (gst_video_decoder_get_subframe_mode (dec),
3378       GST_FLOW_NOT_SUPPORTED);
3379 
3380   GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
3381       gst_video_decoder_get_input_subframe_index (dec, frame));
3382 
3383   GST_VIDEO_DECODER_STREAM_LOCK (dec);
3384 
3385   gst_video_codec_frame_unref (frame);
3386 
3387   GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3388 
3389   return GST_FLOW_OK;
3390 }
3391 
3392 static gboolean
gst_video_decoder_transform_meta_default(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstMeta * meta)3393 gst_video_decoder_transform_meta_default (GstVideoDecoder *
3394     decoder, GstVideoCodecFrame * frame, GstMeta * meta)
3395 {
3396   const GstMetaInfo *info = meta->info;
3397   const gchar *const *tags;
3398   const gchar *const supported_tags[] = {
3399     GST_META_TAG_VIDEO_STR,
3400     GST_META_TAG_VIDEO_ORIENTATION_STR,
3401     GST_META_TAG_VIDEO_SIZE_STR,
3402     NULL,
3403   };
3404 
3405   tags = gst_meta_api_type_get_tags (info->api);
3406 
3407   if (!tags)
3408     return TRUE;
3409 
3410   while (*tags) {
3411     if (!g_strv_contains (supported_tags, *tags))
3412       return FALSE;
3413     tags++;
3414   }
3415 
3416   return TRUE;
3417 }
3418 
3419 typedef struct
3420 {
3421   GstVideoDecoder *decoder;
3422   GstVideoCodecFrame *frame;
3423   GstBuffer *buffer;
3424 } CopyMetaData;
3425 
3426 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)3427 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3428 {
3429   CopyMetaData *data = user_data;
3430   GstVideoDecoder *decoder = data->decoder;
3431   GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3432   GstVideoCodecFrame *frame = data->frame;
3433   GstBuffer *buffer = data->buffer;
3434   const GstMetaInfo *info = (*meta)->info;
3435   gboolean do_copy = FALSE;
3436 
3437   if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3438     /* never call the transform_meta with memory specific metadata */
3439     GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3440         g_type_name (info->api));
3441     do_copy = FALSE;
3442   } else if (klass->transform_meta) {
3443     do_copy = klass->transform_meta (decoder, frame, *meta);
3444     GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3445         g_type_name (info->api), do_copy);
3446   }
3447 
3448   /* we only copy metadata when the subclass implemented a transform_meta
3449    * function and when it returns %TRUE */
3450   if (do_copy && info->transform_func) {
3451     GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3452     GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3453     /* simply copy then */
3454 
3455     info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
3456         &copy_data);
3457   }
3458   return TRUE;
3459 }
3460 
3461 static void
gst_video_decoder_copy_metas(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBuffer * src_buffer,GstBuffer * dest_buffer)3462 gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
3463     GstVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
3464 {
3465   GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3466 
3467   if (decoder_class->transform_meta) {
3468     if (G_LIKELY (frame)) {
3469       CopyMetaData data;
3470 
3471       data.decoder = decoder;
3472       data.frame = frame;
3473       data.buffer = dest_buffer;
3474       gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
3475     } else {
3476       GST_WARNING_OBJECT (decoder,
3477           "Can't copy metadata because input frame disappeared");
3478     }
3479   }
3480 }
3481 
3482 /**
3483  * gst_video_decoder_finish_frame:
3484  * @decoder: a #GstVideoDecoder
3485  * @frame: (transfer full): a decoded #GstVideoCodecFrame
3486  *
3487  * @frame should have a valid decoded data buffer, whose metadata fields
3488  * are then appropriately set according to frame data and pushed downstream.
3489  * If no output data is provided, @frame is considered skipped.
3490  * In any case, the frame is considered finished and released.
3491  *
3492  * After calling this function the output buffer of the frame is to be
3493  * considered read-only. This function will also change the metadata
3494  * of the buffer.
3495  *
3496  * Returns: a #GstFlowReturn resulting from sending data downstream
3497  */
3498 GstFlowReturn
gst_video_decoder_finish_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3499 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
3500     GstVideoCodecFrame * frame)
3501 {
3502   GstFlowReturn ret = GST_FLOW_OK;
3503   GstVideoDecoderPrivate *priv = decoder->priv;
3504   GstBuffer *output_buffer;
3505   gboolean needs_reconfigure = FALSE;
3506 
3507   GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3508 
3509   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3510 
3511   needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3512   if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3513               && needs_reconfigure))) {
3514     if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3515       gst_pad_mark_reconfigure (decoder->srcpad);
3516       if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3517         ret = GST_FLOW_FLUSHING;
3518       else
3519         ret = GST_FLOW_NOT_NEGOTIATED;
3520       goto done;
3521     }
3522   }
3523 
3524   gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3525   priv->processed++;
3526 
3527   if (priv->tags_changed) {
3528     GstEvent *tags_event;
3529 
3530     tags_event = gst_video_decoder_create_merged_tags_event (decoder);
3531 
3532     if (tags_event != NULL)
3533       gst_video_decoder_push_event (decoder, tags_event);
3534 
3535     priv->tags_changed = FALSE;
3536   }
3537 
3538   /* no buffer data means this frame is skipped */
3539   if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3540     GST_DEBUG_OBJECT (decoder,
3541         "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3542         GST_TIME_ARGS (frame->pts));
3543     goto done;
3544   }
3545 
3546   /* Mark output as corrupted if the subclass requested so and we're either
3547    * still before the sync point after the request, or we don't even know the
3548    * frame number of the sync point yet (it is 0) */
3549   GST_OBJECT_LOCK (decoder);
3550   if (frame->system_frame_number <= priv->request_sync_point_frame_number
3551       && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3552     if (priv->request_sync_point_flags &
3553         GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3554       GST_DEBUG_OBJECT (decoder,
3555           "marking frame %" GST_TIME_FORMAT
3556           " as corrupted because it is still before the sync point",
3557           GST_TIME_ARGS (frame->pts));
3558       GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3559           GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3560     }
3561   } else {
3562     /* Reset to -1 to mark it as unset now that we've reached the frame */
3563     priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3564   }
3565   GST_OBJECT_UNLOCK (decoder);
3566 
3567   if (priv->discard_corrupted_frames
3568       && (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3569               GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3570           || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3571               GST_BUFFER_FLAG_CORRUPTED))) {
3572     GST_DEBUG_OBJECT (decoder,
3573         "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3574         GST_TIME_ARGS (frame->pts));
3575     goto done;
3576   }
3577 
3578   /* We need a writable buffer for the metadata changes below */
3579   output_buffer = frame->output_buffer =
3580       gst_buffer_make_writable (frame->output_buffer);
3581 
3582   GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3583 
3584   GST_BUFFER_PTS (output_buffer) = frame->pts;
3585   GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3586   GST_BUFFER_DURATION (output_buffer) = frame->duration;
3587 
3588   GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3589   GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3590 
3591   if (priv->discont) {
3592     GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3593   }
3594 
3595   if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3596           GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3597     GST_DEBUG_OBJECT (decoder,
3598         "marking frame %" GST_TIME_FORMAT " as corrupted",
3599         GST_TIME_ARGS (frame->pts));
3600     GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3601   }
3602 
3603   gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
3604       frame->output_buffer);
3605 
3606   /* Get an additional ref to the buffer, which is going to be pushed
3607    * downstream, the original ref is owned by the frame
3608    */
3609   output_buffer = gst_buffer_ref (output_buffer);
3610 
3611   /* Release frame so the buffer is writable when we push it downstream
3612    * if possible, i.e. if the subclass does not hold additional references
3613    * to the frame
3614    */
3615   gst_video_decoder_release_frame (decoder, frame);
3616   frame = NULL;
3617 
3618   if (decoder->output_segment.rate < 0.0
3619       && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3620     GST_LOG_OBJECT (decoder, "queued frame");
3621     priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3622   } else {
3623     ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3624   }
3625 
3626 done:
3627   if (frame)
3628     gst_video_decoder_release_frame (decoder, frame);
3629   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3630   return ret;
3631 }
3632 
3633 /**
3634  * gst_video_decoder_finish_subframe:
3635  * @decoder: a #GstVideoDecoder
3636  * @frame: (transfer full): the #GstVideoCodecFrame
3637  *
3638  * Indicate that a subframe has been finished to be decoded
3639  * by the subclass. This method should be called for all subframes
3640  * except the last subframe where @gst_video_decoder_finish_frame
3641  * should be called instead.
3642  *
3643  * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3644  *
3645  * Since: 1.20
3646  */
3647 GstFlowReturn
gst_video_decoder_finish_subframe(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3648 gst_video_decoder_finish_subframe (GstVideoDecoder * decoder,
3649     GstVideoCodecFrame * frame)
3650 {
3651   g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
3652       GST_FLOW_NOT_SUPPORTED);
3653 
3654   GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
3655       gst_video_decoder_get_input_subframe_index (decoder, frame));
3656 
3657   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3658   frame->abidata.ABI.subframes_processed++;
3659   gst_video_codec_frame_unref (frame);
3660 
3661   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3662 
3663   return GST_FLOW_OK;
3664 }
3665 
3666 /* With stream lock, takes the frame reference */
3667 static GstFlowReturn
gst_video_decoder_clip_and_push_buf(GstVideoDecoder * decoder,GstBuffer * buf)3668 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3669 {
3670   GstFlowReturn ret = GST_FLOW_OK;
3671   GstVideoDecoderPrivate *priv = decoder->priv;
3672   guint64 start, stop;
3673   guint64 cstart, cstop;
3674   GstSegment *segment;
3675   GstClockTime duration;
3676 
3677   /* Check for clipping */
3678   start = GST_BUFFER_PTS (buf);
3679   duration = GST_BUFFER_DURATION (buf);
3680 
3681   /* store that we have valid decoded data */
3682   priv->had_output_data = TRUE;
3683 #ifdef OHOS_OPT_COMPAT
3684   priv->stream_had_output_data = TRUE;
3685 #endif
3686 
3687   stop = GST_CLOCK_TIME_NONE;
3688 
3689   if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3690     stop = start + duration;
3691   } else if (GST_CLOCK_TIME_IS_VALID (start)
3692       && !GST_CLOCK_TIME_IS_VALID (duration)) {
3693     /* If we don't clip away buffers that far before the segment we
3694      * can cause the pipeline to lockup. This can happen if audio is
3695      * properly clipped, and thus the audio sink does not preroll yet
3696      * but the video sink prerolls because we already outputted a
3697      * buffer here... and then queues run full.
3698      *
3699      * In the worst case we will clip one buffer too many here now if no
3700      * framerate is given, no buffer duration is given and the actual
3701      * framerate is lower than 25fps */
3702     stop = start + 40 * GST_MSECOND;
3703   }
3704 
3705   segment = &decoder->output_segment;
3706   if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3707     GST_BUFFER_PTS (buf) = cstart;
3708 
3709     if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3710       GST_BUFFER_DURATION (buf) = cstop - cstart;
3711 
3712     GST_LOG_OBJECT (decoder,
3713         "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3714         GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3715         " time %" GST_TIME_FORMAT,
3716         GST_TIME_ARGS (cstart),
3717         GST_TIME_ARGS (cstop),
3718         GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3719         GST_TIME_ARGS (segment->time));
3720   } else {
3721     GST_LOG_OBJECT (decoder,
3722         "dropping buffer outside segment: %" GST_TIME_FORMAT
3723         " %" GST_TIME_FORMAT
3724         " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3725         " time %" GST_TIME_FORMAT,
3726         GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3727         GST_TIME_ARGS (segment->start),
3728         GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3729     /* only check and return EOS if upstream still
3730      * in the same segment and interested as such */
3731     if (decoder->priv->in_out_segment_sync) {
3732       if (segment->rate >= 0) {
3733         if (GST_BUFFER_PTS (buf) >= segment->stop)
3734           ret = GST_FLOW_EOS;
3735       } else if (GST_BUFFER_PTS (buf) < segment->start) {
3736         ret = GST_FLOW_EOS;
3737       }
3738     }
3739     gst_buffer_unref (buf);
3740     goto done;
3741   }
3742 
3743   /* Is buffer too late (QoS) ? */
3744   if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3745       && GST_CLOCK_TIME_IS_VALID (cstart)) {
3746     GstClockTime deadline =
3747         gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3748     if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3749       GST_WARNING_OBJECT (decoder,
3750           "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3751           GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3752           GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3753           GST_TIME_ARGS (priv->earliest_time));
3754       gst_video_decoder_post_qos_drop (decoder, cstart);
3755       gst_buffer_unref (buf);
3756       priv->discont = TRUE;
3757       goto done;
3758     }
3759   }
3760 
3761   /* Set DISCONT flag here ! */
3762 
3763   if (priv->discont) {
3764     GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3765     GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3766     priv->discont = FALSE;
3767   }
3768 
3769   /* update rate estimate */
3770   GST_OBJECT_LOCK (decoder);
3771   priv->bytes_out += gst_buffer_get_size (buf);
3772   if (GST_CLOCK_TIME_IS_VALID (duration)) {
3773     priv->time += duration;
3774   } else {
3775     /* FIXME : Use difference between current and previous outgoing
3776      * timestamp, and relate to difference between current and previous
3777      * bytes */
3778     /* better none than nothing valid */
3779     priv->time = GST_CLOCK_TIME_NONE;
3780   }
3781   GST_OBJECT_UNLOCK (decoder);
3782 
3783   GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3784       "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3785       gst_buffer_get_size (buf),
3786       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3787       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3788 
3789   /* we got data, so note things are looking up again, reduce
3790    * the error count, if there is one */
3791   if (G_UNLIKELY (priv->error_count))
3792     priv->error_count = 0;
3793 
3794 #ifndef GST_DISABLE_DEBUG
3795   if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3796     GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3797 
3798     /* First buffer since reset, report how long we took */
3799     GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3800         " to produce", GST_TIME_ARGS (elapsed));
3801     priv->last_reset_time = GST_CLOCK_TIME_NONE;
3802   }
3803 #endif
3804 
3805 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
3806   if (!priv->has_push_first_frame) {
3807     priv->has_push_first_frame = TRUE;
3808     GST_WARNING_OBJECT (decoder, "KPI-TRACE: FIRST-VIDEO-FRAME videodecoder push first frame");
3809   }
3810 #endif
3811   /* release STREAM_LOCK not to block upstream
3812    * while pushing buffer downstream */
3813   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3814   ret = gst_pad_push (decoder->srcpad, buf);
3815   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3816 
3817 done:
3818   return ret;
3819 }
3820 
3821 /**
3822  * gst_video_decoder_add_to_frame:
3823  * @decoder: a #GstVideoDecoder
3824  * @n_bytes: the number of bytes to add
3825  *
3826  * Removes next @n_bytes of input data and adds it to currently parsed frame.
3827  */
3828 void
gst_video_decoder_add_to_frame(GstVideoDecoder * decoder,int n_bytes)3829 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3830 {
3831   GstVideoDecoderPrivate *priv = decoder->priv;
3832   GstBuffer *buf;
3833 
3834   GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3835 
3836   if (n_bytes == 0)
3837     return;
3838 
3839   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3840   if (gst_adapter_available (priv->output_adapter) == 0) {
3841     priv->frame_offset =
3842         priv->input_offset - gst_adapter_available (priv->input_adapter);
3843   }
3844   buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3845 
3846   gst_adapter_push (priv->output_adapter, buf);
3847   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3848 }
3849 
3850 /**
3851  * gst_video_decoder_get_pending_frame_size:
3852  * @decoder: a #GstVideoDecoder
3853  *
3854  * Returns the number of bytes previously added to the current frame
3855  * by calling gst_video_decoder_add_to_frame().
3856  *
3857  * Returns: The number of bytes pending for the current frame
3858  *
3859  * Since: 1.4
3860  */
3861 gsize
gst_video_decoder_get_pending_frame_size(GstVideoDecoder * decoder)3862 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3863 {
3864   GstVideoDecoderPrivate *priv = decoder->priv;
3865   gsize ret;
3866 
3867   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3868   ret = gst_adapter_available (priv->output_adapter);
3869   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3870 
3871   GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3872       ret);
3873 
3874   return ret;
3875 }
3876 
3877 static guint64
gst_video_decoder_get_frame_duration(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3878 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3879     GstVideoCodecFrame * frame)
3880 {
3881   GstVideoCodecState *state = decoder->priv->output_state;
3882 
3883   /* it's possible that we don't have a state yet when we are dropping the
3884    * initial buffers */
3885   if (state == NULL)
3886     return GST_CLOCK_TIME_NONE;
3887 
3888   if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3889     return GST_CLOCK_TIME_NONE;
3890   }
3891 
3892   /* FIXME: For interlaced frames this needs to take into account
3893    * the number of valid fields in the frame
3894    */
3895 
3896   return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3897       state->info.fps_n);
3898 }
3899 
3900 /**
3901  * gst_video_decoder_have_frame:
3902  * @decoder: a #GstVideoDecoder
3903  *
3904  * Gathers all data collected for currently parsed frame, gathers corresponding
3905  * metadata and passes it along for further processing, i.e. @handle_frame.
3906  *
3907  * Returns: a #GstFlowReturn
3908  */
3909 GstFlowReturn
gst_video_decoder_have_frame(GstVideoDecoder * decoder)3910 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3911 {
3912   GstVideoDecoderPrivate *priv = decoder->priv;
3913   GstBuffer *buffer;
3914   int n_available;
3915   GstClockTime pts, dts, duration;
3916   guint flags;
3917   GstFlowReturn ret = GST_FLOW_OK;
3918 
3919   GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3920       priv->frame_offset);
3921 
3922   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3923 
3924   n_available = gst_adapter_available (priv->output_adapter);
3925   if (n_available) {
3926     buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3927   } else {
3928     buffer = gst_buffer_new_and_alloc (0);
3929   }
3930 
3931   if (priv->current_frame->input_buffer) {
3932     gst_video_decoder_copy_metas (decoder, priv->current_frame,
3933         priv->current_frame->input_buffer, buffer);
3934     gst_buffer_unref (priv->current_frame->input_buffer);
3935   }
3936   priv->current_frame->input_buffer = buffer;
3937 
3938   gst_video_decoder_get_buffer_info_at_offset (decoder,
3939       priv->frame_offset, &pts, &dts, &duration, &flags);
3940 
3941   GST_BUFFER_PTS (buffer) = pts;
3942   GST_BUFFER_DTS (buffer) = dts;
3943   GST_BUFFER_DURATION (buffer) = duration;
3944   GST_BUFFER_FLAGS (buffer) = flags;
3945 
3946   GST_LOG_OBJECT (decoder, "collected frame size %d, "
3947       "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3948       GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3949       GST_TIME_ARGS (duration));
3950 
3951   if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3952     GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3953     GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3954   }
3955 
3956   if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3957     GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3958     GST_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3959         GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3960   }
3961 
3962   /* In reverse playback, just capture and queue frames for later processing */
3963   if (decoder->input_segment.rate < 0.0) {
3964     priv->parse_gather =
3965         g_list_prepend (priv->parse_gather, priv->current_frame);
3966     priv->current_frame = NULL;
3967   } else {
3968     GstVideoCodecFrame *frame = priv->current_frame;
3969     frame->abidata.ABI.num_subframes++;
3970     /* In subframe mode, we keep a ref for ourselves
3971      * as this frame will be kept during the data collection
3972      * in parsed mode. The frame reference will be released by
3973      * finish_(sub)frame or drop_(sub)frame.*/
3974     if (gst_video_decoder_get_subframe_mode (decoder))
3975       gst_video_codec_frame_ref (priv->current_frame);
3976     else
3977       priv->current_frame = NULL;
3978 
3979     /* Decode the frame, which gives away our ref */
3980     ret = gst_video_decoder_decode_frame (decoder, frame);
3981   }
3982 
3983   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3984 
3985   return ret;
3986 }
3987 
3988 /* Pass the frame in priv->current_frame through the
3989  * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3990  * or dropping by passing to gvd_drop_frame() */
3991 static GstFlowReturn
gst_video_decoder_decode_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)3992 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3993     GstVideoCodecFrame * frame)
3994 {
3995   GstVideoDecoderPrivate *priv = decoder->priv;
3996   GstVideoDecoderClass *decoder_class;
3997   GstFlowReturn ret = GST_FLOW_OK;
3998 
3999   decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
4000 
4001   /* FIXME : This should only have to be checked once (either the subclass has an
4002    * implementation, or it doesn't) */
4003   g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
4004   g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
4005 
4006   frame->pts = GST_BUFFER_PTS (frame->input_buffer);
4007   frame->dts = GST_BUFFER_DTS (frame->input_buffer);
4008   frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
4009   frame->deadline =
4010       gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
4011       frame->pts);
4012 
4013   /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
4014    * durations. */
4015   /* FIXME upstream can be quite wrong about the keyframe aspect,
4016    * so we could be going off here as well,
4017    * maybe let subclass decide if it really is/was a keyframe */
4018   if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
4019     priv->distance_from_sync = 0;
4020 
4021     GST_OBJECT_LOCK (decoder);
4022     priv->request_sync_point_flags &=
4023         ~GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
4024     if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
4025       priv->request_sync_point_frame_number = frame->system_frame_number;
4026     GST_OBJECT_UNLOCK (decoder);
4027 
4028     if (GST_CLOCK_TIME_IS_VALID (frame->pts)
4029         && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
4030       /* just in case they are not equal as might ideally be,
4031        * e.g. quicktime has a (positive) delta approach */
4032       priv->pts_delta = frame->pts - frame->dts;
4033       GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
4034           (gint) (priv->pts_delta / GST_MSECOND));
4035     }
4036   } else {
4037     if (priv->distance_from_sync == -1 && priv->automatic_request_sync_points) {
4038       GST_DEBUG_OBJECT (decoder,
4039           "Didn't receive a keyframe yet, requesting sync point");
4040       gst_video_decoder_request_sync_point (decoder, frame,
4041           priv->automatic_request_sync_point_flags);
4042     }
4043 
4044     GST_OBJECT_LOCK (decoder);
4045     if ((priv->needs_sync_point && priv->distance_from_sync == -1)
4046         || (priv->request_sync_point_flags &
4047             GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
4048       GST_WARNING_OBJECT (decoder,
4049           "Subclass requires a sync point but we didn't receive one yet, discarding input");
4050       GST_OBJECT_UNLOCK (decoder);
4051       if (priv->automatic_request_sync_points) {
4052         gst_video_decoder_request_sync_point (decoder, frame,
4053             priv->automatic_request_sync_point_flags);
4054       }
4055       gst_video_decoder_release_frame (decoder, frame);
4056       return GST_FLOW_OK;
4057     }
4058     GST_OBJECT_UNLOCK (decoder);
4059 
4060     priv->distance_from_sync++;
4061   }
4062 
4063   frame->distance_from_sync = priv->distance_from_sync;
4064 
4065   if (frame->abidata.ABI.num_subframes == 1) {
4066     frame->abidata.ABI.ts = frame->dts;
4067     frame->abidata.ABI.ts2 = frame->pts;
4068   }
4069 
4070   GST_LOG_OBJECT (decoder,
4071       "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
4072       frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
4073       frame->distance_from_sync);
4074   /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
4075   if (!g_queue_find (&priv->frames, frame)) {
4076     g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
4077   } else {
4078     GST_LOG_OBJECT (decoder,
4079         "Do not add an existing frame used to decode subframes");
4080   }
4081 
4082   if (priv->frames.length > 10) {
4083     GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
4084         "possible internal leaking?", priv->frames.length);
4085   }
4086 
4087 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
4088   if (!priv->has_recv_first_key_frame) {
4089     priv->has_recv_first_key_frame = TRUE;
4090     GST_WARNING_OBJECT (decoder, "KPI-TRACE: FIRST-VIDEO-FRAME videodecoder recv first key frame");
4091   }
4092 #endif
4093 #ifdef OHOS_OPT_COMPAT
4094   // ohos.opt.compat.0053
4095   if (priv->has_push_first_frame && priv->only_one_frame_required) {
4096     gst_video_decoder_release_frame(decoder, frame);
4097     GST_DEBUG_OBJECT(decoder, "only need one frame, release!");
4098     return GST_FLOW_EOS;
4099   }
4100 #endif
4101   /* do something with frame */
4102   ret = decoder_class->handle_frame (decoder, frame);
4103   if (ret != GST_FLOW_OK)
4104     GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
4105 
4106   /* the frame has either been added to parse_gather or sent to
4107      handle frame so there is no need to unref it */
4108   return ret;
4109 }
4110 
4111 
4112 /**
4113  * gst_video_decoder_get_output_state:
4114  * @decoder: a #GstVideoDecoder
4115  *
4116  * Get the #GstVideoCodecState currently describing the output stream.
4117  *
4118  * Returns: (transfer full): #GstVideoCodecState describing format of video data.
4119  */
4120 GstVideoCodecState *
gst_video_decoder_get_output_state(GstVideoDecoder * decoder)4121 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
4122 {
4123   GstVideoCodecState *state = NULL;
4124 
4125   GST_OBJECT_LOCK (decoder);
4126   if (decoder->priv->output_state)
4127     state = gst_video_codec_state_ref (decoder->priv->output_state);
4128   GST_OBJECT_UNLOCK (decoder);
4129 
4130   return state;
4131 }
4132 
4133 static GstVideoCodecState *
_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference,gboolean copy_interlace_mode)4134 _set_interlaced_output_state (GstVideoDecoder * decoder,
4135     GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4136     guint height, GstVideoCodecState * reference, gboolean copy_interlace_mode)
4137 {
4138   GstVideoDecoderPrivate *priv = decoder->priv;
4139   GstVideoCodecState *state;
4140 
4141   g_assert ((copy_interlace_mode
4142           && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
4143       || !copy_interlace_mode);
4144 
4145   GST_DEBUG_OBJECT (decoder,
4146       "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
4147       width, height, gst_video_interlace_mode_to_string (interlace_mode),
4148       reference);
4149 
4150   /* Create the new output state */
4151   state =
4152       _new_output_state (fmt, interlace_mode, width, height, reference,
4153       copy_interlace_mode);
4154   if (!state)
4155     return NULL;
4156 
4157   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4158 
4159   GST_OBJECT_LOCK (decoder);
4160   /* Replace existing output state by new one */
4161   if (priv->output_state)
4162     gst_video_codec_state_unref (priv->output_state);
4163   priv->output_state = gst_video_codec_state_ref (state);
4164 
4165   if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
4166     priv->qos_frame_duration =
4167         gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
4168         priv->output_state->info.fps_n);
4169   } else {
4170     priv->qos_frame_duration = 0;
4171   }
4172   priv->output_state_changed = TRUE;
4173   GST_OBJECT_UNLOCK (decoder);
4174 
4175   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4176 
4177   return state;
4178 }
4179 
4180 /**
4181  * gst_video_decoder_set_output_state:
4182  * @decoder: a #GstVideoDecoder
4183  * @fmt: a #GstVideoFormat
4184  * @width: The width in pixels
4185  * @height: The height in pixels
4186  * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
4187  *
4188  * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
4189  * as the output state for the decoder.
4190  * Any previously set output state on @decoder will be replaced by the newly
4191  * created one.
4192  *
4193  * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
4194  * or framerate) from an existing #GstVideoCodecState, it can be provided as a
4195  * @reference.
4196  *
4197  * If the subclass wishes to override some fields from the output state (like
4198  * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
4199  *
4200  * The new output state will only take effect (set on pads and buffers) starting
4201  * from the next call to #gst_video_decoder_finish_frame().
4202  *
4203  * Returns: (transfer full): the newly configured output state.
4204  */
4205 GstVideoCodecState *
gst_video_decoder_set_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,guint width,guint height,GstVideoCodecState * reference)4206 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
4207     GstVideoFormat fmt, guint width, guint height,
4208     GstVideoCodecState * reference)
4209 {
4210   return _set_interlaced_output_state (decoder, fmt,
4211       GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
4212 }
4213 
4214 /**
4215  * gst_video_decoder_set_interlaced_output_state:
4216  * @decoder: a #GstVideoDecoder
4217  * @fmt: a #GstVideoFormat
4218  * @width: The width in pixels
4219  * @height: The height in pixels
4220  * @interlace_mode: A #GstVideoInterlaceMode
4221  * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
4222  *
4223  * Same as #gst_video_decoder_set_output_state() but also allows you to also set
4224  * the interlacing mode.
4225  *
4226  * Returns: (transfer full): the newly configured output state.
4227  *
4228  * Since: 1.16.
4229  */
4230 GstVideoCodecState *
gst_video_decoder_set_interlaced_output_state(GstVideoDecoder * decoder,GstVideoFormat fmt,GstVideoInterlaceMode interlace_mode,guint width,guint height,GstVideoCodecState * reference)4231 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
4232     GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4233     guint height, GstVideoCodecState * reference)
4234 {
4235   return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
4236       height, reference, FALSE);
4237 }
4238 
4239 
4240 /**
4241  * gst_video_decoder_get_oldest_frame:
4242  * @decoder: a #GstVideoDecoder
4243  *
4244  * Get the oldest pending unfinished #GstVideoCodecFrame
4245  *
4246  * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
4247  */
4248 GstVideoCodecFrame *
gst_video_decoder_get_oldest_frame(GstVideoDecoder * decoder)4249 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
4250 {
4251   GstVideoCodecFrame *frame = NULL;
4252 
4253   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4254   if (decoder->priv->frames.head)
4255     frame = gst_video_codec_frame_ref (decoder->priv->frames.head->data);
4256   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4257 
4258   return (GstVideoCodecFrame *) frame;
4259 }
4260 
4261 /**
4262  * gst_video_decoder_get_frame:
4263  * @decoder: a #GstVideoDecoder
4264  * @frame_number: system_frame_number of a frame
4265  *
4266  * Get a pending unfinished #GstVideoCodecFrame
4267  *
4268  * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
4269  */
4270 GstVideoCodecFrame *
gst_video_decoder_get_frame(GstVideoDecoder * decoder,int frame_number)4271 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
4272 {
4273   GList *g;
4274   GstVideoCodecFrame *frame = NULL;
4275 
4276   GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
4277 
4278   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4279   for (g = decoder->priv->frames.head; g; g = g->next) {
4280     GstVideoCodecFrame *tmp = g->data;
4281 
4282     if (tmp->system_frame_number == frame_number) {
4283       frame = gst_video_codec_frame_ref (tmp);
4284       break;
4285     }
4286   }
4287   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4288 
4289   return frame;
4290 }
4291 
4292 /**
4293  * gst_video_decoder_get_frames:
4294  * @decoder: a #GstVideoDecoder
4295  *
4296  * Get all pending unfinished #GstVideoCodecFrame
4297  *
4298  * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
4299  */
4300 GList *
gst_video_decoder_get_frames(GstVideoDecoder * decoder)4301 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
4302 {
4303   GList *frames;
4304 
4305   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4306   frames =
4307       g_list_copy_deep (decoder->priv->frames.head,
4308       (GCopyFunc) gst_video_codec_frame_ref, NULL);
4309   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4310 
4311   return frames;
4312 }
4313 
4314 static gboolean
gst_video_decoder_decide_allocation_default(GstVideoDecoder * decoder,GstQuery * query)4315 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
4316     GstQuery * query)
4317 {
4318   GstCaps *outcaps = NULL;
4319   GstBufferPool *pool = NULL;
4320   guint size, min, max;
4321   GstAllocator *allocator = NULL;
4322   GstAllocationParams params;
4323   GstStructure *config;
4324   gboolean update_pool, update_allocator;
4325   GstVideoInfo vinfo;
4326 
4327   gst_query_parse_allocation (query, &outcaps, NULL);
4328   gst_video_info_init (&vinfo);
4329   if (outcaps)
4330     gst_video_info_from_caps (&vinfo, outcaps);
4331 
4332   /* we got configuration from our peer or the decide_allocation method,
4333    * parse them */
4334   if (gst_query_get_n_allocation_params (query) > 0) {
4335     /* try the allocator */
4336     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
4337     update_allocator = TRUE;
4338   } else {
4339     allocator = NULL;
4340     gst_allocation_params_init (&params);
4341     update_allocator = FALSE;
4342   }
4343 
4344   if (gst_query_get_n_allocation_pools (query) > 0) {
4345     gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
4346     size = MAX (size, vinfo.size);
4347     update_pool = TRUE;
4348   } else {
4349     pool = NULL;
4350     size = vinfo.size;
4351     min = max = 0;
4352 
4353     update_pool = FALSE;
4354   }
4355 
4356   if (pool == NULL) {
4357     /* no pool, we can make our own */
4358     GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
4359     pool = gst_video_buffer_pool_new ();
4360   }
4361 
4362   /* now configure */
4363   config = gst_buffer_pool_get_config (pool);
4364   gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4365   gst_buffer_pool_config_set_allocator (config, allocator, &params);
4366 
4367   GST_DEBUG_OBJECT (decoder,
4368       "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
4369       pool);
4370   if (!gst_buffer_pool_set_config (pool, config)) {
4371     config = gst_buffer_pool_get_config (pool);
4372 
4373     /* If change are not acceptable, fallback to generic pool */
4374     if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
4375             max)) {
4376       GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
4377 
4378       gst_object_unref (pool);
4379       pool = gst_video_buffer_pool_new ();
4380       gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4381       gst_buffer_pool_config_set_allocator (config, allocator, &params);
4382     }
4383 
4384     if (!gst_buffer_pool_set_config (pool, config))
4385       goto config_failed;
4386   }
4387 
4388   if (update_allocator)
4389     gst_query_set_nth_allocation_param (query, 0, allocator, &params);
4390   else
4391     gst_query_add_allocation_param (query, allocator, &params);
4392   if (allocator)
4393     gst_object_unref (allocator);
4394 
4395   if (update_pool)
4396     gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4397   else
4398     gst_query_add_allocation_pool (query, pool, size, min, max);
4399 
4400   if (pool)
4401     gst_object_unref (pool);
4402 
4403   return TRUE;
4404 
4405 config_failed:
4406   if (allocator)
4407     gst_object_unref (allocator);
4408   if (pool)
4409     gst_object_unref (pool);
4410   GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4411       ("Failed to configure the buffer pool"),
4412       ("Configuration is most likely invalid, please report this issue."));
4413   return FALSE;
4414 }
4415 
4416 static gboolean
gst_video_decoder_propose_allocation_default(GstVideoDecoder * decoder,GstQuery * query)4417 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
4418     GstQuery * query)
4419 {
4420   return TRUE;
4421 }
4422 
4423 static gboolean
gst_video_decoder_negotiate_pool(GstVideoDecoder * decoder,GstCaps * caps)4424 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
4425 {
4426   GstVideoDecoderClass *klass;
4427   GstQuery *query = NULL;
4428   GstBufferPool *pool = NULL;
4429   GstAllocator *allocator;
4430   GstAllocationParams params;
4431   gboolean ret = TRUE;
4432 
4433   klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4434 
4435   query = gst_query_new_allocation (caps, TRUE);
4436 
4437   GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4438 
4439   if (!gst_pad_peer_query (decoder->srcpad, query)) {
4440     GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4441   }
4442 
4443   g_assert (klass->decide_allocation != NULL);
4444   ret = klass->decide_allocation (decoder, query);
4445 
4446   GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4447       query);
4448 
4449   if (!ret)
4450     goto no_decide_allocation;
4451 
4452   /* we got configuration from our peer or the decide_allocation method,
4453    * parse them */
4454   if (gst_query_get_n_allocation_params (query) > 0) {
4455     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
4456   } else {
4457     allocator = NULL;
4458     gst_allocation_params_init (&params);
4459   }
4460 
4461   if (gst_query_get_n_allocation_pools (query) > 0)
4462     gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4463   if (!pool) {
4464     if (allocator)
4465       gst_object_unref (allocator);
4466     ret = FALSE;
4467     goto no_decide_allocation;
4468   }
4469 
4470   if (decoder->priv->allocator)
4471     gst_object_unref (decoder->priv->allocator);
4472   decoder->priv->allocator = allocator;
4473   decoder->priv->params = params;
4474 
4475   if (decoder->priv->pool) {
4476     /* do not set the bufferpool to inactive here, it will be done
4477      * on its finalize function. As videodecoder do late renegotiation
4478      * it might happen that some element downstream is already using this
4479      * same bufferpool and deactivating it will make it fail.
4480      * Happens when a downstream element changes from passthrough to
4481      * non-passthrough and gets this same bufferpool to use */
4482     GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4483         decoder->priv->pool);
4484     gst_object_unref (decoder->priv->pool);
4485   }
4486   decoder->priv->pool = pool;
4487 
4488   /* and activate */
4489   GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4490   gst_buffer_pool_set_active (pool, TRUE);
4491 
4492 done:
4493   if (query)
4494     gst_query_unref (query);
4495 
4496   return ret;
4497 
4498   /* Errors */
4499 no_decide_allocation:
4500   {
4501     GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4502     goto done;
4503   }
4504 }
4505 
4506 static gboolean
gst_video_decoder_negotiate_default(GstVideoDecoder * decoder)4507 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
4508 {
4509   GstVideoCodecState *state = decoder->priv->output_state;
4510   gboolean ret = TRUE;
4511   GstVideoCodecFrame *frame;
4512   GstCaps *prevcaps;
4513   GstCaps *incaps;
4514 
4515   if (!state) {
4516     GST_DEBUG_OBJECT (decoder,
4517         "Trying to negotiate the pool with out setting the o/p format");
4518     ret = gst_video_decoder_negotiate_pool (decoder, NULL);
4519     goto done;
4520   }
4521 
4522   g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4523   g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4524 
4525   /* If the base class didn't set any multiview params, assume mono
4526    * now */
4527   if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4528       GST_VIDEO_MULTIVIEW_MODE_NONE) {
4529     GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4530         GST_VIDEO_MULTIVIEW_MODE_MONO;
4531     GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4532         GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4533   }
4534 
4535   GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4536       state->info.par_n, state->info.par_d,
4537       state->info.fps_n, state->info.fps_d);
4538 
4539   if (state->caps == NULL)
4540     state->caps = gst_video_info_to_caps (&state->info);
4541 
4542   incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
4543   if (incaps) {
4544     GstStructure *in_struct;
4545 
4546     in_struct = gst_caps_get_structure (incaps, 0);
4547     if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4548         gst_structure_has_field (in_struct, "content-light-level")) {
4549       const gchar *s;
4550 
4551       /* prefer upstream information */
4552       state->caps = gst_caps_make_writable (state->caps);
4553       if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4554         gst_caps_set_simple (state->caps,
4555             "mastering-display-info", G_TYPE_STRING, s, NULL);
4556       }
4557 
4558       if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4559         gst_caps_set_simple (state->caps,
4560             "content-light-level", G_TYPE_STRING, s, NULL);
4561       }
4562     }
4563 
4564     gst_caps_unref (incaps);
4565   }
4566 
4567   if (state->allocation_caps == NULL)
4568     state->allocation_caps = gst_caps_ref (state->caps);
4569 
4570   GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4571 
4572   /* Push all pending pre-caps events of the oldest frame before
4573    * setting caps */
4574   frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4575   if (frame || decoder->priv->current_frame_events) {
4576     GList **events, *l;
4577 
4578     if (frame) {
4579       events = &frame->events;
4580     } else {
4581       events = &decoder->priv->current_frame_events;
4582     }
4583 
4584     for (l = g_list_last (*events); l;) {
4585       GstEvent *event = GST_EVENT (l->data);
4586       GList *tmp;
4587 
4588       if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4589         gst_video_decoder_push_event (decoder, event);
4590         tmp = l;
4591         l = l->prev;
4592         *events = g_list_delete_link (*events, tmp);
4593       } else {
4594         l = l->prev;
4595       }
4596     }
4597   }
4598 
4599   prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4600   if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4601     if (!prevcaps) {
4602       GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4603     }
4604     ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4605   } else {
4606     ret = TRUE;
4607     GST_DEBUG_OBJECT (decoder,
4608         "current src pad and output state caps are the same");
4609   }
4610   if (prevcaps)
4611     gst_caps_unref (prevcaps);
4612 
4613   if (!ret)
4614     goto done;
4615   decoder->priv->output_state_changed = FALSE;
4616   /* Negotiate pool */
4617   ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4618 
4619 done:
4620   return ret;
4621 }
4622 
4623 static gboolean
gst_video_decoder_negotiate_unlocked(GstVideoDecoder * decoder)4624 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
4625 {
4626   GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4627   gboolean ret = TRUE;
4628 
4629   if (G_LIKELY (klass->negotiate))
4630     ret = klass->negotiate (decoder);
4631 
4632   return ret;
4633 }
4634 
4635 /**
4636  * gst_video_decoder_negotiate:
4637  * @decoder: a #GstVideoDecoder
4638  *
4639  * Negotiate with downstream elements to currently configured #GstVideoCodecState.
4640  * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4641  * negotiate fails.
4642  *
4643  * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4644  */
4645 gboolean
gst_video_decoder_negotiate(GstVideoDecoder * decoder)4646 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
4647 {
4648   GstVideoDecoderClass *klass;
4649   gboolean ret = TRUE;
4650 
4651   g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
4652 
4653   klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4654 
4655   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4656   gst_pad_check_reconfigure (decoder->srcpad);
4657   if (klass->negotiate) {
4658     ret = klass->negotiate (decoder);
4659     if (!ret)
4660       gst_pad_mark_reconfigure (decoder->srcpad);
4661   }
4662   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4663 
4664   return ret;
4665 }
4666 
4667 /**
4668  * gst_video_decoder_allocate_output_buffer:
4669  * @decoder: a #GstVideoDecoder
4670  *
4671  * Helper function that allocates a buffer to hold a video frame for @decoder's
4672  * current #GstVideoCodecState.
4673  *
4674  * You should use gst_video_decoder_allocate_output_frame() instead of this
4675  * function, if possible at all.
4676  *
4677  * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4678  *     allocated (e.g. when downstream is flushing or shutting down)
4679  */
4680 GstBuffer *
gst_video_decoder_allocate_output_buffer(GstVideoDecoder * decoder)4681 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
4682 {
4683   GstFlowReturn flow;
4684   GstBuffer *buffer = NULL;
4685   gboolean needs_reconfigure = FALSE;
4686 
4687   GST_DEBUG ("alloc src buffer");
4688 
4689   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4690   needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4691   if (G_UNLIKELY (!decoder->priv->output_state
4692           || decoder->priv->output_state_changed || needs_reconfigure)) {
4693     if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4694       if (decoder->priv->output_state) {
4695         GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4696         gst_pad_mark_reconfigure (decoder->srcpad);
4697         goto fallback;
4698       } else {
4699         GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4700         goto failed_allocation;
4701       }
4702     }
4703   }
4704 
4705   flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4706 
4707   if (flow != GST_FLOW_OK) {
4708     GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4709         gst_flow_get_name (flow));
4710     if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4711       goto fallback;
4712     else
4713       goto failed_allocation;
4714   }
4715   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4716 
4717   return buffer;
4718 
4719 fallback:
4720   GST_INFO_OBJECT (decoder,
4721       "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4722   buffer =
4723       gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4724       NULL);
4725 
4726 failed_allocation:
4727   GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4728   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4729 
4730   return buffer;
4731 }
4732 
4733 /**
4734  * gst_video_decoder_allocate_output_frame:
4735  * @decoder: a #GstVideoDecoder
4736  * @frame: a #GstVideoCodecFrame
4737  *
4738  * Helper function that allocates a buffer to hold a video frame for @decoder's
4739  * current #GstVideoCodecState.  Subclass should already have configured video
4740  * state and set src pad caps.
4741  *
4742  * The buffer allocated here is owned by the frame and you should only
4743  * keep references to the frame, not the buffer.
4744  *
4745  * Returns: %GST_FLOW_OK if an output buffer could be allocated
4746  */
4747 GstFlowReturn
gst_video_decoder_allocate_output_frame(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4748 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4749     decoder, GstVideoCodecFrame * frame)
4750 {
4751   return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4752       NULL);
4753 }
4754 
4755 /**
4756  * gst_video_decoder_allocate_output_frame_with_params:
4757  * @decoder: a #GstVideoDecoder
4758  * @frame: a #GstVideoCodecFrame
4759  * @params: a #GstBufferPoolAcquireParams
4760  *
4761  * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4762  * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4763  *
4764  * Returns: %GST_FLOW_OK if an output buffer could be allocated
4765  *
4766  * Since: 1.12
4767  */
4768 GstFlowReturn
gst_video_decoder_allocate_output_frame_with_params(GstVideoDecoder * decoder,GstVideoCodecFrame * frame,GstBufferPoolAcquireParams * params)4769 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4770     decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4771 {
4772   GstFlowReturn flow_ret;
4773   GstVideoCodecState *state;
4774   int num_bytes;
4775   gboolean needs_reconfigure = FALSE;
4776 
4777   g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4778   g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4779 
4780   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4781 
4782   state = decoder->priv->output_state;
4783   if (state == NULL) {
4784     g_warning ("Output state should be set before allocating frame");
4785     goto error;
4786   }
4787   num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4788   if (num_bytes == 0) {
4789     g_warning ("Frame size should not be 0");
4790     goto error;
4791   }
4792 
4793   needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4794   if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4795     if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4796       gst_pad_mark_reconfigure (decoder->srcpad);
4797       if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4798         GST_DEBUG_OBJECT (decoder,
4799             "Failed to negotiate a pool: pad is flushing");
4800         goto flushing;
4801       } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4802         GST_DEBUG_OBJECT (decoder,
4803             "Failed to negotiate a pool and no previous pool to reuse");
4804         goto error;
4805       } else {
4806         GST_DEBUG_OBJECT (decoder,
4807             "Failed to negotiate a pool, falling back to the previous pool");
4808       }
4809     }
4810   }
4811 
4812   GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4813 
4814   flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4815       &frame->output_buffer, params);
4816 
4817   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4818 
4819   return flow_ret;
4820 
4821 flushing:
4822   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4823   return GST_FLOW_FLUSHING;
4824 
4825 error:
4826   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4827   return GST_FLOW_ERROR;
4828 }
4829 
4830 /**
4831  * gst_video_decoder_get_max_decode_time:
4832  * @decoder: a #GstVideoDecoder
4833  * @frame: a #GstVideoCodecFrame
4834  *
4835  * Determines maximum possible decoding time for @frame that will
4836  * allow it to decode and arrive in time (as determined by QoS events).
4837  * In particular, a negative result means decoding in time is no longer possible
4838  * and should therefore occur as soon/skippy as possible.
4839  *
4840  * Returns: max decoding time.
4841  */
4842 GstClockTimeDiff
gst_video_decoder_get_max_decode_time(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)4843 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4844     decoder, GstVideoCodecFrame * frame)
4845 {
4846   GstClockTimeDiff deadline;
4847   GstClockTime earliest_time;
4848 
4849   GST_OBJECT_LOCK (decoder);
4850   earliest_time = decoder->priv->earliest_time;
4851   if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4852       && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4853     deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4854   else
4855     deadline = G_MAXINT64;
4856 
4857   GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4858       ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4859       GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4860       GST_STIME_ARGS (deadline));
4861 
4862   GST_OBJECT_UNLOCK (decoder);
4863 
4864   return deadline;
4865 }
4866 
4867 /**
4868  * gst_video_decoder_get_qos_proportion:
4869  * @decoder: a #GstVideoDecoder
4870  *     current QoS proportion, or %NULL
4871  *
4872  * Returns: The current QoS proportion.
4873  *
4874  * Since: 1.0.3
4875  */
4876 gdouble
gst_video_decoder_get_qos_proportion(GstVideoDecoder * decoder)4877 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4878 {
4879   gdouble proportion;
4880 
4881   g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4882 
4883   GST_OBJECT_LOCK (decoder);
4884   proportion = decoder->priv->proportion;
4885   GST_OBJECT_UNLOCK (decoder);
4886 
4887   return proportion;
4888 }
4889 
4890 GstFlowReturn
_gst_video_decoder_error(GstVideoDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)4891 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4892     GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4893     const gchar * function, gint line)
4894 {
4895   if (txt)
4896     GST_WARNING_OBJECT (dec, "error: %s", txt);
4897   if (dbg)
4898     GST_WARNING_OBJECT (dec, "error: %s", dbg);
4899   dec->priv->error_count += weight;
4900   dec->priv->discont = TRUE;
4901   if (dec->priv->max_errors >= 0 &&
4902       dec->priv->error_count > dec->priv->max_errors) {
4903     gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4904         domain, code, txt, dbg, file, function, line);
4905     return GST_FLOW_ERROR;
4906   } else {
4907     g_free (txt);
4908     g_free (dbg);
4909     return GST_FLOW_OK;
4910   }
4911 }
4912 
4913 /**
4914  * gst_video_decoder_set_max_errors:
4915  * @dec: a #GstVideoDecoder
4916  * @num: max tolerated errors
4917  *
4918  * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4919  * warned about, but more than tolerated will lead to fatal error.  You can set
4920  * -1 for never returning fatal errors. Default is set to
4921  * GST_VIDEO_DECODER_MAX_ERRORS.
4922  *
4923  * The '-1' option was added in 1.4
4924  */
4925 void
gst_video_decoder_set_max_errors(GstVideoDecoder * dec,gint num)4926 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4927 {
4928   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4929 
4930   dec->priv->max_errors = num;
4931 }
4932 
4933 /**
4934  * gst_video_decoder_get_max_errors:
4935  * @dec: a #GstVideoDecoder
4936  *
4937  * Returns: currently configured decoder tolerated error count.
4938  */
4939 gint
gst_video_decoder_get_max_errors(GstVideoDecoder * dec)4940 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4941 {
4942   g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4943 
4944   return dec->priv->max_errors;
4945 }
4946 
4947 /**
4948  * gst_video_decoder_set_needs_format:
4949  * @dec: a #GstVideoDecoder
4950  * @enabled: new state
4951  *
4952  * Configures decoder format needs.  If enabled, subclass needs to be
4953  * negotiated with format caps before it can process any data.  It will then
4954  * never be handed any data before it has been configured.
4955  * Otherwise, it might be handed data without having been configured and
4956  * is then expected being able to do so either by default
4957  * or based on the input data.
4958  *
4959  * Since: 1.4
4960  */
4961 void
gst_video_decoder_set_needs_format(GstVideoDecoder * dec,gboolean enabled)4962 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4963 {
4964   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4965 
4966   dec->priv->needs_format = enabled;
4967 }
4968 
4969 /**
4970  * gst_video_decoder_get_needs_format:
4971  * @dec: a #GstVideoDecoder
4972  *
4973  * Queries decoder required format handling.
4974  *
4975  * Returns: %TRUE if required format handling is enabled.
4976  *
4977  * Since: 1.4
4978  */
4979 gboolean
gst_video_decoder_get_needs_format(GstVideoDecoder * dec)4980 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4981 {
4982   gboolean result;
4983 
4984   g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4985 
4986   result = dec->priv->needs_format;
4987 
4988   return result;
4989 }
4990 
4991 /**
4992  * gst_video_decoder_set_packetized:
4993  * @decoder: a #GstVideoDecoder
4994  * @packetized: whether the input data should be considered as packetized.
4995  *
4996  * Allows baseclass to consider input data as packetized or not. If the
4997  * input is packetized, then the @parse method will not be called.
4998  */
4999 void
gst_video_decoder_set_packetized(GstVideoDecoder * decoder,gboolean packetized)5000 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
5001     gboolean packetized)
5002 {
5003   decoder->priv->packetized = packetized;
5004 }
5005 
5006 /**
5007  * gst_video_decoder_get_packetized:
5008  * @decoder: a #GstVideoDecoder
5009  *
5010  * Queries whether input data is considered packetized or not by the
5011  * base class.
5012  *
5013  * Returns: TRUE if input data is considered packetized.
5014  */
5015 gboolean
gst_video_decoder_get_packetized(GstVideoDecoder * decoder)5016 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
5017 {
5018   return decoder->priv->packetized;
5019 }
5020 
5021 /**
5022  * gst_video_decoder_have_last_subframe:
5023  * @decoder: a #GstVideoDecoder
5024  * @frame: (transfer none): the #GstVideoCodecFrame to update
5025  *
5026  * Indicates that the last subframe has been processed by the decoder
5027  * in @frame. This will release the current frame in video decoder
5028  * allowing to receive new frames from upstream elements. This method
5029  * must be called in the subclass @handle_frame callback.
5030  *
5031  * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
5032  *
5033  * Since: 1.20
5034  */
5035 GstFlowReturn
gst_video_decoder_have_last_subframe(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5036 gst_video_decoder_have_last_subframe (GstVideoDecoder * decoder,
5037     GstVideoCodecFrame * frame)
5038 {
5039   g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
5040       GST_FLOW_OK);
5041   /* unref once from the list */
5042   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
5043   if (decoder->priv->current_frame == frame) {
5044     gst_video_codec_frame_unref (decoder->priv->current_frame);
5045     decoder->priv->current_frame = NULL;
5046   }
5047   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5048 
5049   return GST_FLOW_OK;
5050 }
5051 
5052 /**
5053  * gst_video_decoder_set_subframe_mode:
5054  * @decoder: a #GstVideoDecoder
5055  * @subframe_mode: whether the input data should be considered as subframes.
5056  *
5057  * If this is set to TRUE, it informs the base class that the subclass
5058  * can receive the data at a granularity lower than one frame.
5059  *
5060  * Note that in this mode, the subclass has two options. It can either
5061  * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
5062  * end of a frame. Or it can operate in such a way that it will decode
5063  * a single frame at a time. In this second case, every buffer that
5064  * arrives to the element is considered part of the same frame until
5065  * gst_video_decoder_finish_frame() is called.
5066  *
5067  * In either case, the same #GstVideoCodecFrame will be passed to the
5068  * GstVideoDecoderClass:handle_frame vmethod repeatedly with a
5069  * different GstVideoCodecFrame:input_buffer every time until the end of the
5070  * frame has been signaled using either method.
5071  * This method must be called during the decoder subclass @set_format call.
5072  *
5073  * Since: 1.20
5074  */
5075 void
gst_video_decoder_set_subframe_mode(GstVideoDecoder * decoder,gboolean subframe_mode)5076 gst_video_decoder_set_subframe_mode (GstVideoDecoder * decoder,
5077     gboolean subframe_mode)
5078 {
5079   decoder->priv->subframe_mode = subframe_mode;
5080 }
5081 
5082 /**
5083  * gst_video_decoder_get_subframe_mode:
5084  * @decoder: a #GstVideoDecoder
5085  *
5086  * Queries whether input data is considered as subframes or not by the
5087  * base class. If FALSE, each input buffer will be considered as a full
5088  * frame.
5089  *
5090  * Returns: TRUE if input data is considered as sub frames.
5091  *
5092  * Since: 1.20
5093  */
5094 gboolean
gst_video_decoder_get_subframe_mode(GstVideoDecoder * decoder)5095 gst_video_decoder_get_subframe_mode (GstVideoDecoder * decoder)
5096 {
5097   return decoder->priv->subframe_mode;
5098 }
5099 
5100 /**
5101  * gst_video_decoder_get_input_subframe_index:
5102  * @decoder: a #GstVideoDecoder
5103  * @frame: (transfer none): the #GstVideoCodecFrame to update
5104  *
5105  * Queries the number of the last subframe received by
5106  * the decoder baseclass in the @frame.
5107  *
5108  * Returns: the current subframe index received in subframe mode, 1 otherwise.
5109  *
5110  * Since: 1.20
5111  */
5112 guint
gst_video_decoder_get_input_subframe_index(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5113 gst_video_decoder_get_input_subframe_index (GstVideoDecoder * decoder,
5114     GstVideoCodecFrame * frame)
5115 {
5116   return frame->abidata.ABI.num_subframes;
5117 }
5118 
5119 /**
5120  * gst_video_decoder_get_processed_subframe_index:
5121  * @decoder: a #GstVideoDecoder
5122  * @frame: (transfer none): the #GstVideoCodecFrame to update
5123  *
5124  * Queries the number of subframes in the frame processed by
5125  * the decoder baseclass.
5126  *
5127  * Returns: the current subframe processed received in subframe mode.
5128  *
5129  * Since: 1.20
5130  */
5131 guint
gst_video_decoder_get_processed_subframe_index(GstVideoDecoder * decoder,GstVideoCodecFrame * frame)5132 gst_video_decoder_get_processed_subframe_index (GstVideoDecoder * decoder,
5133     GstVideoCodecFrame * frame)
5134 {
5135   return frame->abidata.ABI.subframes_processed;
5136 }
5137 
5138 /**
5139  * gst_video_decoder_set_estimate_rate:
5140  * @dec: a #GstVideoDecoder
5141  * @enabled: whether to enable byte to time conversion
5142  *
5143  * Allows baseclass to perform byte to time estimated conversion.
5144  */
5145 void
gst_video_decoder_set_estimate_rate(GstVideoDecoder * dec,gboolean enabled)5146 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
5147 {
5148   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5149 
5150   dec->priv->do_estimate_rate = enabled;
5151 }
5152 
5153 /**
5154  * gst_video_decoder_get_estimate_rate:
5155  * @dec: a #GstVideoDecoder
5156  *
5157  * Returns: currently configured byte to time conversion setting
5158  */
5159 gboolean
gst_video_decoder_get_estimate_rate(GstVideoDecoder * dec)5160 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
5161 {
5162   g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
5163 
5164   return dec->priv->do_estimate_rate;
5165 }
5166 
5167 /**
5168  * gst_video_decoder_set_latency:
5169  * @decoder: a #GstVideoDecoder
5170  * @min_latency: minimum latency
5171  * @max_latency: maximum latency
5172  *
5173  * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
5174  * latency is. Will also post a LATENCY message on the bus so the pipeline
5175  * can reconfigure its global latency.
5176  */
5177 void
gst_video_decoder_set_latency(GstVideoDecoder * decoder,GstClockTime min_latency,GstClockTime max_latency)5178 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
5179     GstClockTime min_latency, GstClockTime max_latency)
5180 {
5181   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
5182   g_return_if_fail (max_latency >= min_latency);
5183 
5184   GST_OBJECT_LOCK (decoder);
5185   decoder->priv->min_latency = min_latency;
5186   decoder->priv->max_latency = max_latency;
5187   GST_OBJECT_UNLOCK (decoder);
5188 
5189   gst_element_post_message (GST_ELEMENT_CAST (decoder),
5190       gst_message_new_latency (GST_OBJECT_CAST (decoder)));
5191 }
5192 
5193 /**
5194  * gst_video_decoder_get_latency:
5195  * @decoder: a #GstVideoDecoder
5196  * @min_latency: (out) (allow-none): address of variable in which to store the
5197  *     configured minimum latency, or %NULL
5198  * @max_latency: (out) (allow-none): address of variable in which to store the
5199  *     configured mximum latency, or %NULL
5200  *
5201  * Query the configured decoder latency. Results will be returned via
5202  * @min_latency and @max_latency.
5203  */
5204 void
gst_video_decoder_get_latency(GstVideoDecoder * decoder,GstClockTime * min_latency,GstClockTime * max_latency)5205 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
5206     GstClockTime * min_latency, GstClockTime * max_latency)
5207 {
5208   GST_OBJECT_LOCK (decoder);
5209   if (min_latency)
5210     *min_latency = decoder->priv->min_latency;
5211   if (max_latency)
5212     *max_latency = decoder->priv->max_latency;
5213   GST_OBJECT_UNLOCK (decoder);
5214 }
5215 
5216 /**
5217  * gst_video_decoder_merge_tags:
5218  * @decoder: a #GstVideoDecoder
5219  * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
5220  *     previously-set tags
5221  * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
5222  *
5223  * Sets the audio decoder tags and how they should be merged with any
5224  * upstream stream tags. This will override any tags previously-set
5225  * with gst_audio_decoder_merge_tags().
5226  *
5227  * Note that this is provided for convenience, and the subclass is
5228  * not required to use this and can still do tag handling on its own.
5229  *
5230  * MT safe.
5231  */
5232 void
gst_video_decoder_merge_tags(GstVideoDecoder * decoder,const GstTagList * tags,GstTagMergeMode mode)5233 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
5234     const GstTagList * tags, GstTagMergeMode mode)
5235 {
5236   g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5237   g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
5238   g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
5239 
5240   GST_VIDEO_DECODER_STREAM_LOCK (decoder);
5241   if (decoder->priv->tags != tags) {
5242     if (decoder->priv->tags) {
5243       gst_tag_list_unref (decoder->priv->tags);
5244       decoder->priv->tags = NULL;
5245       decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
5246     }
5247     if (tags) {
5248       decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
5249       decoder->priv->tags_merge_mode = mode;
5250     }
5251 
5252     GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
5253     decoder->priv->tags_changed = TRUE;
5254   }
5255   GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5256 }
5257 
5258 /**
5259  * gst_video_decoder_get_buffer_pool:
5260  * @decoder: a #GstVideoDecoder
5261  *
5262  * Returns: (transfer full): the instance of the #GstBufferPool used
5263  * by the decoder; free it after use it
5264  */
5265 GstBufferPool *
gst_video_decoder_get_buffer_pool(GstVideoDecoder * decoder)5266 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
5267 {
5268   g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
5269 
5270   if (decoder->priv->pool)
5271     return gst_object_ref (decoder->priv->pool);
5272 
5273   return NULL;
5274 }
5275 
5276 /**
5277  * gst_video_decoder_get_allocator:
5278  * @decoder: a #GstVideoDecoder
5279  * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
5280  * used
5281  * @params: (out) (allow-none) (transfer full): the
5282  * #GstAllocationParams of @allocator
5283  *
5284  * Lets #GstVideoDecoder sub-classes to know the memory @allocator
5285  * used by the base class and its @params.
5286  *
5287  * Unref the @allocator after use it.
5288  */
5289 void
gst_video_decoder_get_allocator(GstVideoDecoder * decoder,GstAllocator ** allocator,GstAllocationParams * params)5290 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
5291     GstAllocator ** allocator, GstAllocationParams * params)
5292 {
5293   g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5294 
5295   if (allocator)
5296     *allocator = decoder->priv->allocator ?
5297         gst_object_ref (decoder->priv->allocator) : NULL;
5298 
5299   if (params)
5300     *params = decoder->priv->params;
5301 }
5302 
5303 /**
5304  * gst_video_decoder_set_use_default_pad_acceptcaps:
5305  * @decoder: a #GstVideoDecoder
5306  * @use: if the default pad accept-caps query handling should be used
5307  *
5308  * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
5309  * to use the default pad query handler to reply to accept-caps queries.
5310  *
5311  * By setting this to true it is possible to further customize the default
5312  * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
5313  * %GST_PAD_SET_ACCEPT_TEMPLATE
5314  *
5315  * Since: 1.6
5316  */
5317 void
gst_video_decoder_set_use_default_pad_acceptcaps(GstVideoDecoder * decoder,gboolean use)5318 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
5319     gboolean use)
5320 {
5321   decoder->priv->use_default_pad_acceptcaps = use;
5322 }
5323 
5324 static void
gst_video_decoder_request_sync_point_internal(GstVideoDecoder * dec,GstClockTime deadline,GstVideoDecoderRequestSyncPointFlags flags)5325 gst_video_decoder_request_sync_point_internal (GstVideoDecoder * dec,
5326     GstClockTime deadline, GstVideoDecoderRequestSyncPointFlags flags)
5327 {
5328   GstEvent *fku = NULL;
5329   GstVideoDecoderPrivate *priv;
5330 
5331   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5332 
5333   priv = dec->priv;
5334 
5335   GST_OBJECT_LOCK (dec);
5336 
5337   /* Check if we're allowed to send a new force-keyunit event.
5338    * frame->deadline is set to the running time of the PTS. */
5339   if (priv->min_force_key_unit_interval == 0 ||
5340       deadline == GST_CLOCK_TIME_NONE ||
5341       (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
5342           (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
5343               || (priv->last_force_key_unit_time +
5344                   priv->min_force_key_unit_interval <= deadline)))) {
5345     GST_DEBUG_OBJECT (dec,
5346         "Requesting a new key-unit for frame with deadline %" GST_TIME_FORMAT,
5347         GST_TIME_ARGS (deadline));
5348     fku =
5349         gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
5350         0);
5351     priv->last_force_key_unit_time = deadline;
5352   } else {
5353     GST_DEBUG_OBJECT (dec,
5354         "Can't request a new key-unit for frame with deadline %"
5355         GST_TIME_FORMAT, GST_TIME_ARGS (deadline));
5356   }
5357   priv->request_sync_point_flags |= flags;
5358   /* We don't know yet the frame number of the sync point so set it to a
5359    * frame number higher than any allowed frame number */
5360   priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
5361   GST_OBJECT_UNLOCK (dec);
5362 
5363   if (fku)
5364     gst_pad_push_event (dec->sinkpad, fku);
5365 }
5366 
5367 /**
5368  * gst_video_decoder_request_sync_point:
5369  * @dec: a #GstVideoDecoder
5370  * @frame: a #GstVideoCodecFrame
5371  * @flags: #GstVideoDecoderRequestSyncPointFlags
5372  *
5373  * Allows the #GstVideoDecoder subclass to request from the base class that
5374  * a new sync should be requested from upstream, and that @frame was the frame
5375  * when the subclass noticed that a new sync point is required. A reason for
5376  * the subclass to do this could be missing reference frames, for example.
5377  *
5378  * The base class will then request a new sync point from upstream as long as
5379  * the time that passed since the last one is exceeding
5380  * #GstVideoDecoder:min-force-key-unit-interval.
5381  *
5382  * The subclass can signal via @flags how the frames until the next sync point
5383  * should be handled:
5384  *
5385  *   * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
5386  *     all following input frames until the next sync point are discarded.
5387  *     This can be useful if the lack of a sync point will prevent all further
5388  *     decoding and the decoder implementation is not very robust in handling
5389  *     missing references frames.
5390  *   * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
5391  *     then all output frames following @frame are marked as corrupted via
5392  *     %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
5393  *     dropped by the base class, see #GstVideoDecoder:discard-corrupted-frames.
5394  *     Subclasses can manually mark frames as corrupted via %GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
5395  *     before calling gst_video_decoder_finish_frame().
5396  *
5397  * Since: 1.20
5398  */
5399 void
gst_video_decoder_request_sync_point(GstVideoDecoder * dec,GstVideoCodecFrame * frame,GstVideoDecoderRequestSyncPointFlags flags)5400 gst_video_decoder_request_sync_point (GstVideoDecoder * dec,
5401     GstVideoCodecFrame * frame, GstVideoDecoderRequestSyncPointFlags flags)
5402 {
5403   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5404   g_return_if_fail (frame != NULL);
5405 
5406   gst_video_decoder_request_sync_point_internal (dec, frame->deadline, flags);
5407 }
5408 
5409 /**
5410  * gst_video_decoder_set_needs_sync_point:
5411  * @dec: a #GstVideoDecoder
5412  * @enabled: new state
5413  *
5414  * Configures whether the decoder requires a sync point before it starts
5415  * outputting data in the beginning. If enabled, the base class will discard
5416  * all non-sync point frames in the beginning and after a flush and does not
5417  * pass it to the subclass.
5418  *
5419  * If the first frame is not a sync point, the base class will request a sync
5420  * point via the force-key-unit event.
5421  *
5422  * Since: 1.20
5423  */
5424 void
gst_video_decoder_set_needs_sync_point(GstVideoDecoder * dec,gboolean enabled)5425 gst_video_decoder_set_needs_sync_point (GstVideoDecoder * dec, gboolean enabled)
5426 {
5427   g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5428 
5429   dec->priv->needs_sync_point = enabled;
5430 }
5431 
5432 /**
5433  * gst_video_decoder_get_needs_sync_point:
5434  * @dec: a #GstVideoDecoder
5435  *
5436  * Queries if the decoder requires a sync point before it starts outputting
5437  * data in the beginning.
5438  *
5439  * Returns: %TRUE if a sync point is required in the beginning.
5440  *
5441  * Since: 1.20
5442  */
5443 gboolean
gst_video_decoder_get_needs_sync_point(GstVideoDecoder * dec)5444 gst_video_decoder_get_needs_sync_point (GstVideoDecoder * dec)
5445 {
5446   gboolean result;
5447 
5448   g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
5449 
5450   result = dec->priv->needs_sync_point;
5451 
5452   return result;
5453 }
5454 
5455 #ifdef OHOS_OPT_COMPAT
5456   // ohos.opt.compat.0053
5457 gboolean
gst_video_decoder_need_decode(GstVideoDecoder * dec)5458 gst_video_decoder_need_decode (GstVideoDecoder * dec)
5459 {
5460   return !(dec->priv->has_push_first_frame && dec->priv->only_one_frame_required);
5461 }
5462 #endif