• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer
2  * Copyright (C) 2009 Igalia S.L.
3  * Author: Iago Toral Quiroga <itoral@igalia.com>
4  * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
5  * Copyright (C) 2011 Nokia Corporation. All rights reserved.
6  *   Contact: Stefan Kost <stefan.kost@nokia.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Library General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 /**
25  * SECTION:gstaudiodecoder
26  * @title: GstAudioDecoder
27  * @short_description: Base class for audio decoders
28  * @see_also: #GstBaseTransform
29  *
30  * This base class is for audio decoders turning encoded data into
31  * raw audio samples.
32  *
33  * GstAudioDecoder and subclass should cooperate as follows.
34  *
35  * ## Configuration
36  *
37  *   * Initially, GstAudioDecoder calls @start when the decoder element
38  *     is activated, which allows subclass to perform any global setup.
39  *     Base class (context) parameters can already be set according to subclass
40  *     capabilities (or possibly upon receive more information in subsequent
41  *     @set_format).
42  *   * GstAudioDecoder calls @set_format to inform subclass of the format
43  *     of input audio data that it is about to receive.
44  *     While unlikely, it might be called more than once, if changing input
45  *     parameters require reconfiguration.
46  *   * GstAudioDecoder calls @stop at end of all processing.
47  *
48  * As of configuration stage, and throughout processing, GstAudioDecoder
49  * provides various (context) parameters, e.g. describing the format of
50  * output audio data (valid when output caps have been set) or current parsing state.
51  * Conversely, subclass can and should configure context to inform
52  * base class of its expectation w.r.t. buffer handling.
53  *
54  * ## Data processing
55  *     * Base class gathers input data, and optionally allows subclass
56  *       to parse this into subsequently manageable (as defined by subclass)
57  *       chunks.  Such chunks are subsequently referred to as 'frames',
58  *       though they may or may not correspond to 1 (or more) audio format frame.
59  *     * Input frame is provided to subclass' @handle_frame.
60  *     * If codec processing results in decoded data, subclass should call
61  *       @gst_audio_decoder_finish_frame to have decoded data pushed
62  *       downstream.
63  *     * Just prior to actually pushing a buffer downstream,
64  *       it is passed to @pre_push.  Subclass should either use this callback
65  *       to arrange for additional downstream pushing or otherwise ensure such
66  *       custom pushing occurs after at least a method call has finished since
67  *       setting src pad caps.
68  *     * During the parsing process GstAudioDecoderClass will handle both
69  *       srcpad and sinkpad events. Sink events will be passed to subclass
70  *       if @event callback has been provided.
71  *
72  * ## Shutdown phase
73  *
74  *   * GstAudioDecoder class calls @stop to inform the subclass that data
75  *     parsing will be stopped.
76  *
77  * Subclass is responsible for providing pad template caps for
78  * source and sink pads. The pads need to be named "sink" and "src". It also
79  * needs to set the fixed caps on srcpad, when the format is ensured.  This
80  * is typically when base class calls subclass' @set_format function, though
81  * it might be delayed until calling @gst_audio_decoder_finish_frame.
82  *
83  * In summary, above process should have subclass concentrating on
84  * codec data processing while leaving other matters to base class,
85  * such as most notably timestamp handling.  While it may exert more control
86  * in this area (see e.g. @pre_push), it is very much not recommended.
87  *
88  * In particular, base class will try to arrange for perfect output timestamps
89  * as much as possible while tracking upstream timestamps.
90  * To this end, if deviation between the next ideal expected perfect timestamp
91  * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream
92  * occurs (which would happen always if the tolerance mechanism is disabled).
93  *
94  * In non-live pipelines, baseclass can also (configurably) arrange for
95  * output buffer aggregation which may help to redue large(r) numbers of
96  * small(er) buffers being pushed and processed downstream. Note that this
97  * feature is only available if the buffer layout is interleaved. For planar
98  * buffers, the decoder implementation is fully responsible for the output
99  * buffer size.
100  *
101  * On the other hand, it should be noted that baseclass only provides limited
102  * seeking support (upon explicit subclass request), as full-fledged support
103  * should rather be left to upstream demuxer, parser or alike.  This simple
104  * approach caters for seeking and duration reporting using estimated input
105  * bitrates.
106  *
107  * Things that subclass need to take care of:
108  *
109  *   * Provide pad templates
110  *   * Set source pad caps when appropriate
111  *   * Set user-configurable properties to sane defaults for format and
112  *      implementing codec at hand, and convey some subclass capabilities and
113  *      expectations in context.
114  *
115  *   * Accept data in @handle_frame and provide encoded results to
116  *      @gst_audio_decoder_finish_frame.  If it is prepared to perform
117  *      PLC, it should also accept NULL data in @handle_frame and provide for
118  *      data for indicated duration.
119  *
120  */
121 
122 #ifdef HAVE_CONFIG_H
123 #include "config.h"
124 #endif
125 
126 #include "gstaudiodecoder.h"
127 #include "gstaudioutilsprivate.h"
128 #include <gst/pbutils/descriptions.h>
129 
130 #include <string.h>
131 #ifdef OHOS_OPT_PERFORMANCE
132 // ohos.opt.performance.0005
133 // add trace
134 #include "gst_trace.h"
135 #endif
136 
137 GST_DEBUG_CATEGORY (audiodecoder_debug);
138 #define GST_CAT_DEFAULT audiodecoder_debug
139 
140 enum
141 {
142   LAST_SIGNAL
143 };
144 
145 enum
146 {
147   PROP_0,
148   PROP_LATENCY,
149   PROP_TOLERANCE,
150   PROP_PLC,
151   PROP_MAX_ERRORS
152 };
153 
154 #define DEFAULT_LATENCY    0
155 #define DEFAULT_TOLERANCE  0
156 #define DEFAULT_PLC        FALSE
157 #define DEFAULT_DRAINABLE  TRUE
158 #define DEFAULT_NEEDS_FORMAT  FALSE
159 #define DEFAULT_MAX_ERRORS GST_AUDIO_DECODER_MAX_ERRORS
160 
161 typedef struct _GstAudioDecoderContext
162 {
163   /* last negotiated input caps */
164   GstCaps *input_caps;
165 
166   /* (output) audio format */
167   GstAudioInfo info;
168   GstCaps *caps;
169   gboolean output_format_changed;
170 
171   /* parsing state */
172   gboolean eos;
173   gboolean sync;
174 
175   gboolean had_output_data;
176   gboolean had_input_data;
177 
178   /* misc */
179   gint delay;
180 
181   /* output */
182   gboolean do_plc;
183   gboolean do_estimate_rate;
184   GstCaps *allocation_caps;
185   /* MT-protected (with LOCK) */
186   GstClockTime min_latency;
187   GstClockTime max_latency;
188 
189   GstAllocator *allocator;
190   GstAllocationParams params;
191 } GstAudioDecoderContext;
192 
193 struct _GstAudioDecoderPrivate
194 {
195   /* activation status */
196   gboolean active;
197 
198   /* input base/first ts as basis for output ts */
199   GstClockTime base_ts;
200   /* input samples processed and sent downstream so far (w.r.t. base_ts) */
201   guint64 samples;
202 
203   /* collected input data */
204   GstAdapter *adapter;
205   /* tracking input ts for changes */
206   GstClockTime prev_ts;
207   guint64 prev_distance;
208   /* frames obtained from input */
209   GQueue frames;
210   /* collected output data */
211   GstAdapter *adapter_out;
212   /* ts and duration for output data collected above */
213   GstClockTime out_ts, out_dur;
214   /* mark outgoing discont */
215   gboolean discont;
216 
217   /* subclass gave all it could already */
218   gboolean drained;
219   /* subclass currently being forcibly drained */
220   gboolean force;
221   /* input_segment are output_segment identical */
222   gboolean in_out_segment_sync;
223   /* TRUE if we have an active set of instant rate flags */
224   gboolean decode_flags_override;
225   GstSegmentFlags decode_flags;
226 
227   /* expecting the buffer with DISCONT flag */
228   gboolean expecting_discont_buf;
229 
230   /* number of samples pushed out via _finish_subframe(), resets on _finish_frame() */
231   guint subframe_samples;
232 
233   /* input bps estimatation */
234   /* global in bytes seen */
235   guint64 bytes_in;
236   /* global samples sent out */
237   guint64 samples_out;
238   /* bytes flushed during parsing */
239   guint sync_flush;
240   /* error count */
241   gint error_count;
242   /* max errors */
243   gint max_errors;
244 
245   /* upstream stream tags (global tags are passed through as-is) */
246   GstTagList *upstream_tags;
247 
248   /* subclass tags */
249   GstTagList *taglist;          /* FIXME: rename to decoder_tags */
250   GstTagMergeMode decoder_tags_merge_mode;
251 
252   gboolean taglist_changed;     /* FIXME: rename to tags_changed */
253 
254   /* whether circumstances allow output aggregation */
255   gint agg;
256 
257   /* reverse playback queues */
258   /* collect input */
259   GList *gather;
260   /* to-be-decoded */
261   GList *decode;
262   /* reversed output */
263   GList *queued;
264 
265   /* context storage */
266   GstAudioDecoderContext ctx;
267 
268   /* properties */
269   GstClockTime latency;
270   GstClockTime tolerance;
271   gboolean plc;
272   gboolean drainable;
273   gboolean needs_format;
274 
275   /* pending serialized sink events, will be sent from finish_frame() */
276   GList *pending_events;
277 
278   /* flags */
279   gboolean use_default_pad_acceptcaps;
280 #ifdef OHOS_OPT_PERFORMANCE
281   // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
282   gboolean has_recv_first_frame;
283   gboolean has_push_first_frame;
284 #endif
285 };
286 
287 /* cached quark to avoid contention on the global quark table lock */
288 #define META_TAG_AUDIO meta_tag_audio_quark
289 static GQuark meta_tag_audio_quark;
290 
291 static void gst_audio_decoder_finalize (GObject * object);
292 static void gst_audio_decoder_set_property (GObject * object,
293     guint prop_id, const GValue * value, GParamSpec * pspec);
294 static void gst_audio_decoder_get_property (GObject * object,
295     guint prop_id, GValue * value, GParamSpec * pspec);
296 
297 static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec);
298 static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder *
299     dec, GstBuffer * buf);
300 
301 static GstStateChangeReturn gst_audio_decoder_change_state (GstElement *
302     element, GstStateChange transition);
303 static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec,
304     GstEvent * event);
305 static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec,
306     GstEvent * event);
307 static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
308     GstEvent * event);
309 static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent,
310     GstEvent * event);
311 static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec,
312     GstCaps * caps);
313 static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent,
314     GstBuffer * buf);
315 static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent,
316     GstQuery * query);
317 static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
318     GstQuery * query);
319 static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full);
320 
321 static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder *
322     dec, GstQuery * query);
323 static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder *
324     dec, GstQuery * query);
325 static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec);
326 static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec);
327 static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec,
328     GstEvent * event);
329 static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec,
330     GstQuery * query);
331 static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec,
332     GstQuery * query);
333 
334 static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder *
335     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
336 
337 static GstFlowReturn
338 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
339     GstBuffer * buf, gint frames);
340 
341 static GstElementClass *parent_class = NULL;
342 static gint private_offset = 0;
343 
344 static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass);
345 static void gst_audio_decoder_init (GstAudioDecoder * dec,
346     GstAudioDecoderClass * klass);
347 
348 GType
gst_audio_decoder_get_type(void)349 gst_audio_decoder_get_type (void)
350 {
351   static gsize audio_decoder_type = 0;
352 
353   if (g_once_init_enter (&audio_decoder_type)) {
354     GType _type;
355     static const GTypeInfo audio_decoder_info = {
356       sizeof (GstAudioDecoderClass),
357       NULL,
358       NULL,
359       (GClassInitFunc) gst_audio_decoder_class_init,
360       NULL,
361       NULL,
362       sizeof (GstAudioDecoder),
363       0,
364       (GInstanceInitFunc) gst_audio_decoder_init,
365     };
366 
367     _type = g_type_register_static (GST_TYPE_ELEMENT,
368         "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT);
369 
370     private_offset =
371         g_type_add_instance_private (_type, sizeof (GstAudioDecoderPrivate));
372 
373     g_once_init_leave (&audio_decoder_type, _type);
374   }
375   return audio_decoder_type;
376 }
377 
378 static inline GstAudioDecoderPrivate *
gst_audio_decoder_get_instance_private(GstAudioDecoder * self)379 gst_audio_decoder_get_instance_private (GstAudioDecoder * self)
380 {
381   return (G_STRUCT_MEMBER_P (self, private_offset));
382 }
383 
384 static void
gst_audio_decoder_class_init(GstAudioDecoderClass * klass)385 gst_audio_decoder_class_init (GstAudioDecoderClass * klass)
386 {
387   GObjectClass *gobject_class;
388   GstElementClass *element_class;
389   GstAudioDecoderClass *audiodecoder_class;
390 
391   gobject_class = G_OBJECT_CLASS (klass);
392   element_class = GST_ELEMENT_CLASS (klass);
393   audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
394 
395   parent_class = g_type_class_peek_parent (klass);
396 
397   if (private_offset != 0)
398     g_type_class_adjust_private_offset (klass, &private_offset);
399 
400   GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0,
401       "audio decoder base class");
402 
403   gobject_class->set_property = gst_audio_decoder_set_property;
404   gobject_class->get_property = gst_audio_decoder_get_property;
405   gobject_class->finalize = gst_audio_decoder_finalize;
406 
407   element_class->change_state =
408       GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state);
409 
410   /* Properties */
411   g_object_class_install_property (gobject_class, PROP_LATENCY,
412       g_param_spec_int64 ("min-latency", "Minimum Latency",
413           "Aggregate output data to a minimum of latency time (ns)",
414           0, G_MAXINT64, DEFAULT_LATENCY,
415           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
416 
417   g_object_class_install_property (gobject_class, PROP_TOLERANCE,
418       g_param_spec_int64 ("tolerance", "Tolerance",
419           "Perfect ts while timestamp jitter/imperfection within tolerance (ns)",
420           0, G_MAXINT64, DEFAULT_TOLERANCE,
421           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
422 
423   g_object_class_install_property (gobject_class, PROP_PLC,
424       g_param_spec_boolean ("plc", "Packet Loss Concealment",
425           "Perform packet loss concealment (if supported)",
426           DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
427 
428   /**
429    * GstAudioDecoder:max-errors:
430    *
431    * Maximum number of tolerated consecutive decode errors. See
432    * gst_audio_decoder_set_max_errors() for more details.
433    *
434    * Since: 1.18
435    */
436   g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
437       g_param_spec_int ("max-errors", "Max errors",
438           "Max consecutive decoder errors before returning flow error",
439           -1, G_MAXINT, DEFAULT_MAX_ERRORS,
440           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
441 
442   audiodecoder_class->sink_event =
443       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc);
444   audiodecoder_class->src_event =
445       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc);
446   audiodecoder_class->propose_allocation =
447       GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default);
448   audiodecoder_class->decide_allocation =
449       GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default);
450   audiodecoder_class->negotiate =
451       GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default);
452   audiodecoder_class->sink_query =
453       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default);
454   audiodecoder_class->src_query =
455       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default);
456   audiodecoder_class->transform_meta =
457       GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default);
458 
459   meta_tag_audio_quark = g_quark_from_static_string (GST_META_TAG_AUDIO_STR);
460 }
461 
462 static void
gst_audio_decoder_init(GstAudioDecoder * dec,GstAudioDecoderClass * klass)463 gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass)
464 {
465   GstPadTemplate *pad_template;
466 
467   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init");
468 
469   dec->priv = gst_audio_decoder_get_instance_private (dec);
470 
471   /* Setup sink pad */
472   pad_template =
473       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
474   g_return_if_fail (pad_template != NULL);
475 
476   dec->sinkpad = gst_pad_new_from_template (pad_template, "sink");
477   gst_pad_set_event_function (dec->sinkpad,
478       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event));
479   gst_pad_set_chain_function (dec->sinkpad,
480       GST_DEBUG_FUNCPTR (gst_audio_decoder_chain));
481   gst_pad_set_query_function (dec->sinkpad,
482       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query));
483   gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
484   GST_DEBUG_OBJECT (dec, "sinkpad created");
485 
486   /* Setup source pad */
487   pad_template =
488       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
489   g_return_if_fail (pad_template != NULL);
490 
491   dec->srcpad = gst_pad_new_from_template (pad_template, "src");
492   gst_pad_set_event_function (dec->srcpad,
493       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event));
494   gst_pad_set_query_function (dec->srcpad,
495       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query));
496   gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
497   GST_DEBUG_OBJECT (dec, "srcpad created");
498 
499   dec->priv->adapter = gst_adapter_new ();
500   dec->priv->adapter_out = gst_adapter_new ();
501   g_queue_init (&dec->priv->frames);
502 
503   g_rec_mutex_init (&dec->stream_lock);
504 
505   /* property default */
506   dec->priv->latency = DEFAULT_LATENCY;
507   dec->priv->tolerance = DEFAULT_TOLERANCE;
508   dec->priv->plc = DEFAULT_PLC;
509   dec->priv->drainable = DEFAULT_DRAINABLE;
510   dec->priv->needs_format = DEFAULT_NEEDS_FORMAT;
511   dec->priv->max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
512 
513   /* init state */
514   dec->priv->ctx.min_latency = 0;
515   dec->priv->ctx.max_latency = 0;
516   gst_audio_decoder_reset (dec, TRUE);
517   GST_DEBUG_OBJECT (dec, "init ok");
518 }
519 
520 static void
gst_audio_decoder_reset(GstAudioDecoder * dec,gboolean full)521 gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
522 {
523   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
524 
525   GST_AUDIO_DECODER_STREAM_LOCK (dec);
526 
527   if (full) {
528     dec->priv->active = FALSE;
529     GST_OBJECT_LOCK (dec);
530     dec->priv->bytes_in = 0;
531     dec->priv->samples_out = 0;
532     GST_OBJECT_UNLOCK (dec);
533     dec->priv->agg = -1;
534     dec->priv->error_count = 0;
535     gst_audio_decoder_clear_queues (dec);
536 
537     if (dec->priv->taglist) {
538       gst_tag_list_unref (dec->priv->taglist);
539       dec->priv->taglist = NULL;
540     }
541     dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
542     if (dec->priv->upstream_tags) {
543       gst_tag_list_unref (dec->priv->upstream_tags);
544       dec->priv->upstream_tags = NULL;
545     }
546     dec->priv->taglist_changed = FALSE;
547 
548     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
549     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
550     dec->priv->in_out_segment_sync = TRUE;
551 #ifdef OHOS_OPT_PERFORMANCE
552     // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
553     dec->priv->has_recv_first_frame = FALSE;
554     dec->priv->has_push_first_frame = FALSE;
555 #endif
556 
557     g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
558     g_list_free (dec->priv->pending_events);
559     dec->priv->pending_events = NULL;
560 
561     if (dec->priv->ctx.allocator)
562       gst_object_unref (dec->priv->ctx.allocator);
563 
564     GST_OBJECT_LOCK (dec);
565     dec->priv->decode_flags_override = FALSE;
566     gst_caps_replace (&dec->priv->ctx.input_caps, NULL);
567     gst_caps_replace (&dec->priv->ctx.caps, NULL);
568     gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL);
569 
570     memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));
571 
572     gst_audio_info_init (&dec->priv->ctx.info);
573     GST_OBJECT_UNLOCK (dec);
574     dec->priv->ctx.had_output_data = FALSE;
575     dec->priv->ctx.had_input_data = FALSE;
576   }
577 
578   g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
579   g_queue_clear (&dec->priv->frames);
580   gst_adapter_clear (dec->priv->adapter);
581   gst_adapter_clear (dec->priv->adapter_out);
582   dec->priv->out_ts = GST_CLOCK_TIME_NONE;
583   dec->priv->out_dur = 0;
584   dec->priv->prev_ts = GST_CLOCK_TIME_NONE;
585   dec->priv->prev_distance = 0;
586   dec->priv->drained = TRUE;
587   dec->priv->base_ts = GST_CLOCK_TIME_NONE;
588   dec->priv->samples = 0;
589   dec->priv->discont = TRUE;
590   dec->priv->sync_flush = FALSE;
591 
592   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
593 }
594 
595 static void
gst_audio_decoder_finalize(GObject * object)596 gst_audio_decoder_finalize (GObject * object)
597 {
598   GstAudioDecoder *dec;
599 
600   g_return_if_fail (GST_IS_AUDIO_DECODER (object));
601   dec = GST_AUDIO_DECODER (object);
602 
603   if (dec->priv->adapter) {
604     g_object_unref (dec->priv->adapter);
605   }
606   if (dec->priv->adapter_out) {
607     g_object_unref (dec->priv->adapter_out);
608   }
609 
610   g_rec_mutex_clear (&dec->stream_lock);
611 
612   G_OBJECT_CLASS (parent_class)->finalize (object);
613 }
614 
615 static GstEvent *
gst_audio_decoder_create_merged_tags_event(GstAudioDecoder * dec)616 gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec)
617 {
618   GstTagList *merged_tags;
619 
620   GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
621   GST_LOG_OBJECT (dec, "decoder  : %" GST_PTR_FORMAT, dec->priv->taglist);
622   GST_LOG_OBJECT (dec, "mode     : %d", dec->priv->decoder_tags_merge_mode);
623 
624   merged_tags =
625       gst_tag_list_merge (dec->priv->upstream_tags,
626       dec->priv->taglist, dec->priv->decoder_tags_merge_mode);
627 
628   GST_DEBUG_OBJECT (dec, "merged   : %" GST_PTR_FORMAT, merged_tags);
629 
630   if (merged_tags == NULL)
631     return NULL;
632 
633   if (gst_tag_list_is_empty (merged_tags)) {
634     gst_tag_list_unref (merged_tags);
635     return NULL;
636   }
637 
638   return gst_event_new_tag (merged_tags);
639 }
640 
641 static gboolean
gst_audio_decoder_push_event(GstAudioDecoder * dec,GstEvent * event)642 gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event)
643 {
644   switch (GST_EVENT_TYPE (event)) {
645     case GST_EVENT_SEGMENT:{
646       GstSegment seg;
647 
648       GST_AUDIO_DECODER_STREAM_LOCK (dec);
649       gst_event_copy_segment (event, &seg);
650 
651 #ifdef OHOS_OPT_PERFORMANCE
652       // ohos.opt.performance.0006: add segment info
653       GST_INFO_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
654 #else
655       GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
656 #endif
657 
658       dec->output_segment = seg;
659       dec->priv->in_out_segment_sync =
660           gst_segment_is_equal (&dec->input_segment, &seg);
661       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
662       break;
663     }
664     default:
665       break;
666   }
667 
668   return gst_pad_push_event (dec->srcpad, event);
669 }
670 
671 static gboolean
gst_audio_decoder_negotiate_default(GstAudioDecoder * dec)672 gst_audio_decoder_negotiate_default (GstAudioDecoder * dec)
673 {
674   GstAudioDecoderClass *klass;
675   gboolean res = TRUE;
676   GstCaps *caps;
677   GstCaps *prevcaps;
678   GstQuery *query = NULL;
679   GstAllocator *allocator;
680   GstAllocationParams params;
681 
682   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
683   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE);
684   g_return_val_if_fail (GST_IS_CAPS (dec->priv->ctx.caps), FALSE);
685 
686   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
687 
688   caps = dec->priv->ctx.caps;
689   if (dec->priv->ctx.allocation_caps == NULL)
690     dec->priv->ctx.allocation_caps = gst_caps_ref (caps);
691 
692   GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);
693 
694   if (dec->priv->pending_events) {
695     GList **pending_events, *l;
696 
697     pending_events = &dec->priv->pending_events;
698 
699     GST_DEBUG_OBJECT (dec, "Pushing pending events");
700     for (l = *pending_events; l;) {
701       GstEvent *event = GST_EVENT (l->data);
702       GList *tmp;
703 
704       if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
705         gst_audio_decoder_push_event (dec, l->data);
706         tmp = l;
707         l = l->next;
708         *pending_events = g_list_delete_link (*pending_events, tmp);
709       } else {
710         l = l->next;
711       }
712     }
713   }
714 
715   prevcaps = gst_pad_get_current_caps (dec->srcpad);
716   if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
717     res = gst_pad_set_caps (dec->srcpad, caps);
718   if (prevcaps)
719     gst_caps_unref (prevcaps);
720 
721   if (!res)
722     goto done;
723   dec->priv->ctx.output_format_changed = FALSE;
724 
725   query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE);
726   if (!gst_pad_peer_query (dec->srcpad, query)) {
727     GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
728   }
729 
730   g_assert (klass->decide_allocation != NULL);
731   res = klass->decide_allocation (dec, query);
732 
733   GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
734       query);
735 
736   if (!res)
737     goto no_decide_allocation;
738 
739   /* we got configuration from our peer or the decide_allocation method,
740    * parse them */
741   if (gst_query_get_n_allocation_params (query) > 0) {
742     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
743   } else {
744     allocator = NULL;
745     gst_allocation_params_init (&params);
746   }
747 
748   if (dec->priv->ctx.allocator)
749     gst_object_unref (dec->priv->ctx.allocator);
750   dec->priv->ctx.allocator = allocator;
751   dec->priv->ctx.params = params;
752 
753 done:
754 
755   if (query)
756     gst_query_unref (query);
757 
758   return res;
759 
760   /* ERRORS */
761 no_decide_allocation:
762   {
763     GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation");
764     goto done;
765   }
766 }
767 
768 static gboolean
gst_audio_decoder_negotiate_unlocked(GstAudioDecoder * dec)769 gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec)
770 {
771   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
772   gboolean ret = TRUE;
773 
774   if (G_LIKELY (klass->negotiate))
775     ret = klass->negotiate (dec);
776 
777   return ret;
778 }
779 
780 /**
781  * gst_audio_decoder_negotiate:
782  * @dec: a #GstAudioDecoder
783  *
784  * Negotiate with downstream elements to currently configured #GstAudioInfo.
785  * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
786  * negotiate fails.
787  *
788  * Returns: %TRUE if the negotiation succeeded, else %FALSE.
789  */
790 gboolean
gst_audio_decoder_negotiate(GstAudioDecoder * dec)791 gst_audio_decoder_negotiate (GstAudioDecoder * dec)
792 {
793   GstAudioDecoderClass *klass;
794   gboolean res = TRUE;
795 
796   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
797 
798   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
799 
800   GST_AUDIO_DECODER_STREAM_LOCK (dec);
801   gst_pad_check_reconfigure (dec->srcpad);
802   if (klass->negotiate) {
803     res = klass->negotiate (dec);
804     if (!res)
805       gst_pad_mark_reconfigure (dec->srcpad);
806   }
807   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
808 
809   return res;
810 }
811 
812 /**
813  * gst_audio_decoder_set_output_format:
814  * @dec: a #GstAudioDecoder
815  * @info: #GstAudioInfo
816  *
817  * Configure output info on the srcpad of @dec.
818  *
819  * Returns: %TRUE on success.
820  **/
821 gboolean
gst_audio_decoder_set_output_format(GstAudioDecoder * dec,const GstAudioInfo * info)822 gst_audio_decoder_set_output_format (GstAudioDecoder * dec,
823     const GstAudioInfo * info)
824 {
825   gboolean res = TRUE;
826   GstCaps *caps = NULL;
827 
828   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
829   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE);
830 
831   /* If the audio info can't be converted to caps,
832    * it was invalid */
833   caps = gst_audio_info_to_caps (info);
834   if (!caps) {
835     GST_WARNING_OBJECT (dec, "invalid output format");
836     return FALSE;
837   }
838 
839   res = gst_audio_decoder_set_output_caps (dec, caps);
840   gst_caps_unref (caps);
841 
842   return res;
843 }
844 
845 /**
846  * gst_audio_decoder_set_output_caps:
847  * @dec: a #GstAudioDecoder
848  * @caps: (transfer none): (fixed) #GstCaps
849  *
850  * Configure output caps on the srcpad of @dec. Similar to
851  * gst_audio_decoder_set_output_format(), but allows subclasses to specify
852  * output caps that can't be expressed via #GstAudioInfo e.g. caps that have
853  * caps features.
854  *
855  * Returns: %TRUE on success.
856  *
857  * Since: 1.16
858  **/
859 gboolean
gst_audio_decoder_set_output_caps(GstAudioDecoder * dec,GstCaps * caps)860 gst_audio_decoder_set_output_caps (GstAudioDecoder * dec, GstCaps * caps)
861 {
862   gboolean res = TRUE;
863   guint old_rate;
864   GstCaps *templ_caps;
865   GstAudioInfo info;
866 
867   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
868 
869   GST_DEBUG_OBJECT (dec, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
870 
871   GST_AUDIO_DECODER_STREAM_LOCK (dec);
872 
873   if (!gst_caps_is_fixed (caps))
874     goto refuse_caps;
875 
876   /* check if caps can be parsed */
877   if (!gst_audio_info_from_caps (&info, caps))
878     goto refuse_caps;
879 
880   /* Only allow caps that are a subset of the template caps */
881   templ_caps = gst_pad_get_pad_template_caps (dec->srcpad);
882   if (!gst_caps_is_subset (caps, templ_caps)) {
883     GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT
884         " do not match template %" GST_PTR_FORMAT, caps, templ_caps);
885     gst_caps_unref (templ_caps);
886     goto refuse_caps;
887   }
888   gst_caps_unref (templ_caps);
889 
890   /* adjust ts tracking to new sample rate */
891   old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info);
892   if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) {
893     dec->priv->base_ts +=
894         GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate);
895     dec->priv->samples = 0;
896   }
897 
898   /* copy the GstAudioInfo */
899   GST_OBJECT_LOCK (dec);
900   dec->priv->ctx.info = info;
901   GST_OBJECT_UNLOCK (dec);
902 
903   gst_caps_replace (&dec->priv->ctx.caps, caps);
904   dec->priv->ctx.output_format_changed = TRUE;
905 
906 done:
907   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
908 
909   return res;
910 
911   /* ERRORS */
912 refuse_caps:
913   {
914     GST_WARNING_OBJECT (dec, "invalid output format");
915     res = FALSE;
916     goto done;
917   }
918 }
919 
920 static gboolean
gst_audio_decoder_sink_setcaps(GstAudioDecoder * dec,GstCaps * caps)921 gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
922 {
923   GstAudioDecoderClass *klass;
924   gboolean res = TRUE;
925 
926   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
927 
928   GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
929 
930   GST_AUDIO_DECODER_STREAM_LOCK (dec);
931 
932   if (dec->priv->ctx.input_caps
933       && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) {
934     GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again");
935     goto done;
936   }
937 
938   /* NOTE pbutils only needed here */
939   /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
940 #if 0
941   if (!dec->priv->taglist)
942     dec->priv->taglist = gst_tag_list_new ();
943   dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist);
944   gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist,
945       GST_TAG_AUDIO_CODEC, caps);
946   dec->priv->taglist_changed = TRUE;
947 #endif
948 
949   if (klass->set_format)
950     res = klass->set_format (dec, caps);
951 
952   if (res)
953     gst_caps_replace (&dec->priv->ctx.input_caps, caps);
954 
955 done:
956   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
957 
958   return res;
959 }
960 
961 static void
gst_audio_decoder_setup(GstAudioDecoder * dec)962 gst_audio_decoder_setup (GstAudioDecoder * dec)
963 {
964   GstQuery *query;
965   gboolean res;
966 
967   /* check if in live pipeline, then latency messing is no-no */
968   query = gst_query_new_latency ();
969   res = gst_pad_peer_query (dec->sinkpad, query);
970   if (res) {
971     gst_query_parse_latency (query, &res, NULL, NULL);
972     res = !res;
973   }
974   gst_query_unref (query);
975 
976   /* normalize to bool */
977   dec->priv->agg = ! !res;
978 }
979 
980 static GstFlowReturn
gst_audio_decoder_push_forward(GstAudioDecoder * dec,GstBuffer * buf)981 gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf)
982 {
983   GstAudioDecoderClass *klass;
984   GstAudioDecoderPrivate *priv;
985   GstAudioDecoderContext *ctx;
986   GstFlowReturn ret = GST_FLOW_OK;
987   GstClockTime ts;
988 
989   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
990   priv = dec->priv;
991   ctx = &dec->priv->ctx;
992 
993   g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR);
994 
995   if (G_UNLIKELY (!buf)) {
996     g_assert_not_reached ();
997     return GST_FLOW_OK;
998   }
999 
1000   ctx->had_output_data = TRUE;
1001   ts = GST_BUFFER_PTS (buf);
1002 
1003   GST_LOG_OBJECT (dec,
1004       "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1005       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1006       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1007       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1008 
1009   /* clip buffer */
1010   buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate,
1011       ctx->info.bpf);
1012   if (G_UNLIKELY (!buf)) {
1013     GST_DEBUG_OBJECT (dec, "no data after clipping to segment");
1014     /* only check and return EOS if upstream still
1015      * in the same segment and interested as such */
1016     if (dec->priv->in_out_segment_sync) {
1017       if (dec->output_segment.rate >= 0) {
1018         if (ts >= dec->output_segment.stop)
1019           ret = GST_FLOW_EOS;
1020       } else if (ts < dec->output_segment.start) {
1021         ret = GST_FLOW_EOS;
1022       }
1023     }
1024     goto exit;
1025   }
1026 
1027   /* decorate */
1028   if (G_UNLIKELY (priv->discont)) {
1029     GST_LOG_OBJECT (dec, "marking discont");
1030     GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
1031     priv->discont = FALSE;
1032   }
1033 
1034   /* track where we are */
1035   if (G_LIKELY (GST_BUFFER_PTS_IS_VALID (buf))) {
1036     /* duration should always be valid for raw audio */
1037     g_assert (GST_BUFFER_DURATION_IS_VALID (buf));
1038     dec->output_segment.position =
1039         GST_BUFFER_PTS (buf) + GST_BUFFER_DURATION (buf);
1040   }
1041 
1042   if (klass->pre_push) {
1043     /* last chance for subclass to do some dirty stuff */
1044     ret = klass->pre_push (dec, &buf);
1045     if (ret != GST_FLOW_OK || !buf) {
1046       GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p",
1047           gst_flow_get_name (ret), buf);
1048       if (buf)
1049         gst_buffer_unref (buf);
1050       goto exit;
1051     }
1052   }
1053 
1054   GST_LOG_OBJECT (dec,
1055       "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1056       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1057       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1058       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1059 
1060 #ifdef OHOS_OPT_PERFORMANCE
1061   // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
1062   if (!priv->has_push_first_frame && GST_BUFFER_PTS (buf) != GST_CLOCK_TIME_NONE) {
1063     priv->has_push_first_frame = TRUE;
1064     GST_WARNING_OBJECT (dec, "audiodecoder push first frame");
1065     dec->output_segment.flags |= GST_SEGMENT_FLAG_FIRST_FRAME;
1066     dec->output_segment.start = GST_BUFFER_PTS (buf);
1067     GstEvent *event = gst_event_new_segment (&dec->output_segment);
1068     if (event) {
1069       ret = gst_pad_push_event (dec->srcpad, event);
1070     }
1071   }
1072 #endif
1073 
1074 #ifdef OHOS_OPT_PERFORMANCE
1075   // ohos.opt.performance.0005
1076   // add trace
1077   GstStartTrace("AudioDecoder:push buffer to sink");
1078 #endif
1079   ret = gst_pad_push (dec->srcpad, buf);
1080 #ifdef OHOS_OPT_PERFORMANCE
1081   GstFinishTrace();
1082 #endif
1083 
1084 exit:
1085   return ret;
1086 }
1087 
1088 /* mini aggregator combining output buffers into fewer larger ones,
1089  * if so allowed/configured */
1090 static GstFlowReturn
gst_audio_decoder_output(GstAudioDecoder * dec,GstBuffer * buf)1091 gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf)
1092 {
1093   GstAudioDecoderPrivate *priv;
1094   GstFlowReturn ret = GST_FLOW_OK;
1095   GstBuffer *inbuf = NULL;
1096 
1097   priv = dec->priv;
1098 
1099   if (G_UNLIKELY (priv->agg < 0))
1100     gst_audio_decoder_setup (dec);
1101 
1102   if (G_LIKELY (buf)) {
1103     GST_LOG_OBJECT (dec,
1104         "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1105         ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1106         GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1107         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1108   }
1109 
1110 again:
1111   inbuf = NULL;
1112   if (priv->agg && dec->priv->latency > 0 &&
1113       priv->ctx.info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1114     gint av;
1115     gboolean assemble = FALSE;
1116     const GstClockTimeDiff tol = 10 * GST_MSECOND;
1117     GstClockTimeDiff diff = -100 * GST_MSECOND;
1118 
1119     av = gst_adapter_available (priv->adapter_out);
1120     if (G_UNLIKELY (!buf)) {
1121       /* forcibly send current */
1122       assemble = TRUE;
1123       GST_LOG_OBJECT (dec, "forcing fragment flush");
1124     } else if (av && (!GST_BUFFER_PTS_IS_VALID (buf) ||
1125             !GST_CLOCK_TIME_IS_VALID (priv->out_ts) ||
1126             ((diff = GST_CLOCK_DIFF (GST_BUFFER_PTS (buf),
1127                         priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) {
1128       assemble = TRUE;
1129       GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment",
1130           (gint) (diff / GST_MSECOND));
1131     } else {
1132       /* add or start collecting */
1133       if (!av) {
1134         GST_LOG_OBJECT (dec, "starting new fragment");
1135         priv->out_ts = GST_BUFFER_PTS (buf);
1136       } else {
1137         GST_LOG_OBJECT (dec, "adding to fragment");
1138       }
1139       gst_adapter_push (priv->adapter_out, buf);
1140       priv->out_dur += GST_BUFFER_DURATION (buf);
1141       av += gst_buffer_get_size (buf);
1142       buf = NULL;
1143     }
1144     if (priv->out_dur > dec->priv->latency)
1145       assemble = TRUE;
1146     if (av && assemble) {
1147       GST_LOG_OBJECT (dec, "assembling fragment");
1148       inbuf = buf;
1149       buf = gst_adapter_take_buffer (priv->adapter_out, av);
1150       GST_BUFFER_PTS (buf) = priv->out_ts;
1151       GST_BUFFER_DURATION (buf) = priv->out_dur;
1152       priv->out_ts = GST_CLOCK_TIME_NONE;
1153       priv->out_dur = 0;
1154     }
1155   }
1156 
1157   if (G_LIKELY (buf)) {
1158     if (dec->output_segment.rate > 0.0) {
1159 #ifdef OHOS_OPT_PERFORMANCE
1160       // ohos.opt.performance.0005
1161       // add trace
1162       GstStartTrace("AudioDecoder:push buffer");
1163 #endif
1164       ret = gst_audio_decoder_push_forward (dec, buf);
1165 #ifdef OHOS_OPT_PERFORMANCE
1166       GstFinishTrace();
1167 #endif
1168       GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret));
1169     } else {
1170       ret = GST_FLOW_OK;
1171       priv->queued = g_list_prepend (priv->queued, buf);
1172       GST_LOG_OBJECT (dec, "buffer queued");
1173     }
1174 
1175     if (inbuf) {
1176       buf = inbuf;
1177       goto again;
1178     }
1179   }
1180 
1181   return ret;
1182 }
1183 
1184 static void
send_pending_events(GstAudioDecoder * dec)1185 send_pending_events (GstAudioDecoder * dec)
1186 {
1187   GstAudioDecoderPrivate *priv = dec->priv;
1188   GList *pending_events, *l;
1189 
1190   pending_events = priv->pending_events;
1191   priv->pending_events = NULL;
1192 
1193   GST_DEBUG_OBJECT (dec, "Pushing pending events");
1194   for (l = pending_events; l; l = l->next)
1195     gst_audio_decoder_push_event (dec, l->data);
1196   g_list_free (pending_events);
1197 }
1198 
1199 /* Iterate the list of pending events, and ensure
1200  * the current output segment is up to date for
1201  * decoding */
1202 static void
apply_pending_events(GstAudioDecoder * dec)1203 apply_pending_events (GstAudioDecoder * dec)
1204 {
1205   GstAudioDecoderPrivate *priv = dec->priv;
1206   GList *l;
1207 
1208   GST_DEBUG_OBJECT (dec, "Applying pending segments");
1209   for (l = priv->pending_events; l; l = l->next) {
1210     GstEvent *event = GST_EVENT (l->data);
1211     switch (GST_EVENT_TYPE (event)) {
1212       case GST_EVENT_SEGMENT:{
1213         GstSegment seg;
1214 
1215         GST_AUDIO_DECODER_STREAM_LOCK (dec);
1216         gst_event_copy_segment (event, &seg);
1217 
1218         GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
1219 
1220         dec->output_segment = seg;
1221         dec->priv->in_out_segment_sync =
1222             gst_segment_is_equal (&dec->input_segment, &seg);
1223         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1224         break;
1225       }
1226       default:
1227         break;
1228     }
1229   }
1230 }
1231 
1232 static GstFlowReturn
check_pending_reconfigure(GstAudioDecoder * dec)1233 check_pending_reconfigure (GstAudioDecoder * dec)
1234 {
1235   GstFlowReturn ret = GST_FLOW_OK;
1236   GstAudioDecoderContext *ctx;
1237   gboolean needs_reconfigure;
1238 
1239   ctx = &dec->priv->ctx;
1240 
1241   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
1242   if (G_UNLIKELY (ctx->output_format_changed ||
1243           (GST_AUDIO_INFO_IS_VALID (&ctx->info)
1244               && needs_reconfigure))) {
1245     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
1246       gst_pad_mark_reconfigure (dec->srcpad);
1247       if (GST_PAD_IS_FLUSHING (dec->srcpad))
1248         ret = GST_FLOW_FLUSHING;
1249       else
1250         ret = GST_FLOW_NOT_NEGOTIATED;
1251     }
1252   }
1253   return ret;
1254 }
1255 
1256 static gboolean
gst_audio_decoder_transform_meta_default(GstAudioDecoder * decoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)1257 gst_audio_decoder_transform_meta_default (GstAudioDecoder *
1258     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
1259 {
1260   const GstMetaInfo *info = meta->info;
1261   const gchar *const *tags;
1262   const gchar *const supported_tags[] = {
1263     GST_META_TAG_AUDIO_STR,
1264     GST_META_TAG_AUDIO_CHANNELS_STR,
1265     NULL,
1266   };
1267 
1268   tags = gst_meta_api_type_get_tags (info->api);
1269 
1270   if (!tags)
1271     return TRUE;
1272 
1273   while (*tags) {
1274     if (!g_strv_contains (supported_tags, *tags))
1275       return FALSE;
1276     tags++;
1277   }
1278 
1279   return TRUE;
1280 }
1281 
1282 typedef struct
1283 {
1284   GstAudioDecoder *decoder;
1285   GstBuffer *outbuf;
1286 } CopyMetaData;
1287 
1288 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)1289 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
1290 {
1291   CopyMetaData *data = user_data;
1292   GstAudioDecoder *decoder = data->decoder;
1293   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
1294   GstBuffer *outbuf = data->outbuf;
1295   const GstMetaInfo *info = (*meta)->info;
1296   gboolean do_copy = FALSE;
1297 
1298   if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
1299     /* never call the transform_meta with memory specific metadata */
1300     GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
1301         g_type_name (info->api));
1302     do_copy = FALSE;
1303   } else if (klass->transform_meta) {
1304     do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf);
1305     GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
1306         g_type_name (info->api), do_copy);
1307   }
1308 
1309   /* we only copy metadata when the subclass implemented a transform_meta
1310    * function and when it returns %TRUE */
1311   if (do_copy && info->transform_func) {
1312     GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
1313     GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
1314     /* simply copy then */
1315     info->transform_func (outbuf, *meta, inbuf,
1316         _gst_meta_transform_copy, &copy_data);
1317   }
1318   return TRUE;
1319 }
1320 
1321 /**
1322  * gst_audio_decoder_finish_subframe:
1323  * @dec: a #GstAudioDecoder
1324  * @buf: (transfer full) (allow-none): decoded data
1325  *
1326  * Collects decoded data and pushes it downstream. This function may be called
1327  * multiple times for a given input frame.
1328  *
1329  * @buf may be NULL in which case it is assumed that the current input frame is
1330  * finished. This is equivalent to calling gst_audio_decoder_finish_subframe()
1331  * with a NULL buffer and frames=1 after having pushed out all decoded audio
1332  * subframes using this function.
1333  *
1334  * When called with valid data in @buf the source pad caps must have been set
1335  * already.
1336  *
1337  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1338  * invalidated by a call to this function.
1339  *
1340  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1341  *
1342  * Since: 1.16
1343  */
1344 GstFlowReturn
gst_audio_decoder_finish_subframe(GstAudioDecoder * dec,GstBuffer * buf)1345 gst_audio_decoder_finish_subframe (GstAudioDecoder * dec, GstBuffer * buf)
1346 {
1347   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1348 
1349   if (buf == NULL)
1350     return gst_audio_decoder_finish_frame_or_subframe (dec, NULL, 1);
1351   else
1352     return gst_audio_decoder_finish_frame_or_subframe (dec, buf, 0);
1353 }
1354 
1355 /**
1356  * gst_audio_decoder_finish_frame:
1357  * @dec: a #GstAudioDecoder
1358  * @buf: (transfer full) (allow-none): decoded data
1359  * @frames: number of decoded frames represented by decoded data
1360  *
1361  * Collects decoded data and pushes it downstream.
1362  *
1363  * @buf may be NULL in which case the indicated number of frames
1364  * are discarded and considered to have produced no output
1365  * (e.g. lead-in or setup frames).
1366  * Otherwise, source pad caps must be set when it is called with valid
1367  * data in @buf.
1368  *
1369  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1370  * invalidated by a call to this function.
1371  *
1372  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1373  */
1374 GstFlowReturn
gst_audio_decoder_finish_frame(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1375 gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
1376     gint frames)
1377 {
1378   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1379 
1380   /* no dummy calls please */
1381   g_return_val_if_fail (frames != 0, GST_FLOW_ERROR);
1382 
1383   return gst_audio_decoder_finish_frame_or_subframe (dec, buf, frames);
1384 }
1385 
1386 /* frames == 0 indicates that this is a sub-frame and further sub-frames may
1387  * follow for the current input frame. */
1388 static GstFlowReturn
gst_audio_decoder_finish_frame_or_subframe(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1389 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
1390     GstBuffer * buf, gint frames)
1391 {
1392   GstAudioDecoderPrivate *priv;
1393   GstAudioDecoderContext *ctx;
1394   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1395   GstAudioMeta *meta;
1396   GstClockTime ts, next_ts;
1397   gsize size, samples = 0;
1398   GstFlowReturn ret = GST_FLOW_OK;
1399   GQueue inbufs = G_QUEUE_INIT;
1400   gboolean is_subframe = (frames == 0);
1401   gboolean do_check_resync;
1402 
1403   /* subclass should not hand us no data */
1404   g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
1405       GST_FLOW_ERROR);
1406 
1407   /* if it's a subframe (frames == 0) we must have a valid buffer */
1408   g_assert (!is_subframe || buf != NULL);
1409 
1410   priv = dec->priv;
1411   ctx = &dec->priv->ctx;
1412   meta = buf ? gst_buffer_get_audio_meta (buf) : NULL;
1413   size = buf ? gst_buffer_get_size (buf) : 0;
1414   samples = buf ? (meta ? meta->samples : size / ctx->info.bpf) : 0;
1415 
1416   /* must know the output format by now */
1417   g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info),
1418       GST_FLOW_ERROR);
1419 
1420   GST_LOG_OBJECT (dec,
1421       "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT
1422       " samples for %d frames", buf ? size : 0, samples, frames);
1423 
1424   GST_AUDIO_DECODER_STREAM_LOCK (dec);
1425 
1426   if (buf != NULL && priv->subframe_samples == 0) {
1427     ret = check_pending_reconfigure (dec);
1428     if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) {
1429       gst_buffer_unref (buf);
1430       goto exit;
1431     }
1432 
1433     if (priv->pending_events)
1434       send_pending_events (dec);
1435   }
1436 
1437   /* sanity checking */
1438   if (G_LIKELY (buf && ctx->info.bpf)) {
1439     if (!meta || meta->info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1440       /* output should be whole number of sample frames */
1441       if (size % ctx->info.bpf)
1442         goto wrong_buffer;
1443       /* output should have no additional padding */
1444       if (samples != size / ctx->info.bpf)
1445         goto wrong_samples;
1446     } else {
1447       /* can't have more samples than what the buffer fits */
1448       if (samples > size / ctx->info.bpf)
1449         goto wrong_samples;
1450     }
1451   }
1452 
1453   /* frame and ts book-keeping */
1454   if (G_UNLIKELY (frames < 0)) {
1455     if (G_UNLIKELY (-frames - 1 > priv->frames.length)) {
1456       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1457           ("received more decoded frames %d than provided %d", frames,
1458               priv->frames.length), (NULL));
1459       frames = 0;
1460     } else {
1461       frames = priv->frames.length + frames + 1;
1462     }
1463   } else if (G_UNLIKELY (frames > priv->frames.length)) {
1464     if (G_LIKELY (!priv->force)) {
1465       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1466           ("received more decoded frames %d than provided %d", frames,
1467               priv->frames.length), (NULL));
1468     }
1469     frames = priv->frames.length;
1470   }
1471 
1472   if (G_LIKELY (priv->frames.length))
1473     ts = GST_BUFFER_PTS (priv->frames.head->data);
1474   else
1475     ts = GST_CLOCK_TIME_NONE;
1476 
1477   GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT,
1478       GST_TIME_ARGS (ts));
1479 
1480   if (is_subframe && priv->frames.length == 0)
1481     goto subframe_without_pending_input_frame;
1482 
1483   /* this will be skipped in the is_subframe case because frames will be 0 */
1484   while (priv->frames.length && frames) {
1485     g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames));
1486     dec->priv->ctx.delay = dec->priv->frames.length;
1487     frames--;
1488   }
1489 
1490   if (G_UNLIKELY (!buf))
1491     goto exit;
1492 
1493   /* lock on */
1494   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1495     priv->base_ts = ts;
1496     GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
1497   }
1498 
1499   /* still no valid ts, track the segment one */
1500   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) &&
1501       dec->output_segment.rate > 0.0) {
1502     priv->base_ts = dec->output_segment.start;
1503   }
1504 
1505   /* only check for resync at the beginning of an input/output frame */
1506   do_check_resync = !is_subframe || priv->subframe_samples == 0;
1507 
1508   /* slightly convoluted approach caters for perfect ts if subclass desires. */
1509   if (do_check_resync && GST_CLOCK_TIME_IS_VALID (ts)) {
1510     if (dec->priv->tolerance > 0) {
1511       GstClockTimeDiff diff;
1512 
1513       g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts));
1514       next_ts = priv->base_ts +
1515           gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
1516       GST_LOG_OBJECT (dec,
1517           "buffer is %" G_GUINT64_FORMAT " samples past base_ts %"
1518           GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples,
1519           GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1520       diff = GST_CLOCK_DIFF (next_ts, ts);
1521       GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1522       /* if within tolerance,
1523        * discard buffer ts and carry on producing perfect stream,
1524        * otherwise resync to ts */
1525       if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance ||
1526               diff > (gint64) dec->priv->tolerance)) {
1527         GST_DEBUG_OBJECT (dec, "base_ts resync");
1528         priv->base_ts = ts;
1529         priv->samples = 0;
1530       }
1531     } else {
1532       GST_DEBUG_OBJECT (dec, "base_ts resync");
1533       priv->base_ts = ts;
1534       priv->samples = 0;
1535     }
1536   }
1537 
1538   /* delayed one-shot stuff until confirmed data */
1539   if (priv->taglist && priv->taglist_changed) {
1540     GstEvent *tags_event;
1541 
1542     tags_event = gst_audio_decoder_create_merged_tags_event (dec);
1543 
1544     if (tags_event != NULL)
1545       gst_audio_decoder_push_event (dec, tags_event);
1546 
1547     priv->taglist_changed = FALSE;
1548   }
1549 
1550   buf = gst_buffer_make_writable (buf);
1551   if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1552     GST_BUFFER_PTS (buf) =
1553         priv->base_ts +
1554         GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->info.rate);
1555     GST_BUFFER_DURATION (buf) = priv->base_ts +
1556         GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->info.rate) -
1557         GST_BUFFER_PTS (buf);
1558   } else {
1559     GST_BUFFER_PTS (buf) = GST_CLOCK_TIME_NONE;
1560     GST_BUFFER_DURATION (buf) =
1561         GST_FRAMES_TO_CLOCK_TIME (samples, ctx->info.rate);
1562   }
1563 
1564   if (klass->transform_meta) {
1565     if (inbufs.length) {
1566       GList *l;
1567       for (l = inbufs.head; l; l = l->next) {
1568         CopyMetaData data;
1569 
1570         data.decoder = dec;
1571         data.outbuf = buf;
1572         gst_buffer_foreach_meta (l->data, foreach_metadata, &data);
1573       }
1574     } else if (is_subframe) {
1575       CopyMetaData data;
1576       GstBuffer *in_buf;
1577 
1578       /* For subframes we assume a 1:N relationship for now, so we just take
1579        * metas from the first pending input buf */
1580       in_buf = g_queue_peek_head (&priv->frames);
1581       data.decoder = dec;
1582       data.outbuf = buf;
1583       gst_buffer_foreach_meta (in_buf, foreach_metadata, &data);
1584     } else {
1585       GST_WARNING_OBJECT (dec,
1586           "Can't copy metadata because input buffers disappeared");
1587     }
1588   }
1589 
1590   GST_OBJECT_LOCK (dec);
1591   priv->samples += samples;
1592   priv->samples_out += samples;
1593   GST_OBJECT_UNLOCK (dec);
1594 
1595   /* we got data, so note things are looking up */
1596   if (G_UNLIKELY (dec->priv->error_count))
1597     dec->priv->error_count = 0;
1598 
1599   ret = gst_audio_decoder_output (dec, buf);
1600 
1601 exit:
1602   g_queue_foreach (&inbufs, (GFunc) gst_buffer_unref, NULL);
1603   g_queue_clear (&inbufs);
1604 
1605   if (is_subframe)
1606     dec->priv->subframe_samples += samples;
1607   else
1608     dec->priv->subframe_samples = 0;
1609 
1610   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1611 
1612   return ret;
1613 
1614   /* ERRORS */
1615 wrong_buffer:
1616   {
1617     /* arguably more of a programming error? */
1618     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1619         ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d", size,
1620             ctx->info.bpf));
1621     gst_buffer_unref (buf);
1622     ret = GST_FLOW_ERROR;
1623     goto exit;
1624   }
1625 wrong_samples:
1626   {
1627     /* arguably more of a programming error? */
1628     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1629         ("GstAudioMeta samples (%" G_GSIZE_FORMAT ") are inconsistent with "
1630             "the buffer size and layout (size/bpf = %" G_GSIZE_FORMAT ")",
1631             meta->samples, size / ctx->info.bpf));
1632     gst_buffer_unref (buf);
1633     ret = GST_FLOW_ERROR;
1634     goto exit;
1635   }
1636 subframe_without_pending_input_frame:
1637   {
1638     /* arguably more of a programming error? */
1639     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1640         ("Received decoded subframe, but no pending frame"));
1641     gst_buffer_unref (buf);
1642     ret = GST_FLOW_ERROR;
1643     goto exit;
1644   }
1645 }
1646 
1647 static GstFlowReturn
gst_audio_decoder_handle_frame(GstAudioDecoder * dec,GstAudioDecoderClass * klass,GstBuffer * buffer)1648 gst_audio_decoder_handle_frame (GstAudioDecoder * dec,
1649     GstAudioDecoderClass * klass, GstBuffer * buffer)
1650 {
1651   /* Skip decoding and send a GAP instead if
1652    * GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO is set and we have timestamps
1653    * FIXME: We only do this for forward playback atm, because reverse
1654    * playback would require accumulating GAP events and pushing them
1655    * out in reverse order as for normal audio samples
1656    */
1657   if (G_UNLIKELY (dec->input_segment.rate > 0.0
1658           && dec->input_segment.flags & GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO)) {
1659     if (buffer) {
1660       GstClockTime ts = GST_BUFFER_PTS (buffer);
1661       if (GST_CLOCK_TIME_IS_VALID (ts)) {
1662         GstEvent *event = gst_event_new_gap (ts, GST_BUFFER_DURATION (buffer));
1663 
1664         gst_buffer_unref (buffer);
1665         GST_LOG_OBJECT (dec, "Skipping decode in trickmode and sending gap");
1666         gst_audio_decoder_handle_gap (dec, event);
1667         return GST_FLOW_OK;
1668       }
1669     }
1670   }
1671 
1672   if (G_LIKELY (buffer)) {
1673     gsize size = gst_buffer_get_size (buffer);
1674     /* keep around for admin */
1675     GST_LOG_OBJECT (dec,
1676         "tracking frame size %" G_GSIZE_FORMAT ", ts %" GST_TIME_FORMAT, size,
1677         GST_TIME_ARGS (GST_BUFFER_PTS (buffer)));
1678     g_queue_push_tail (&dec->priv->frames, buffer);
1679     dec->priv->ctx.delay = dec->priv->frames.length;
1680     GST_OBJECT_LOCK (dec);
1681     dec->priv->bytes_in += size;
1682     GST_OBJECT_UNLOCK (dec);
1683   } else {
1684     GST_LOG_OBJECT (dec, "providing subclass with NULL frame");
1685   }
1686 
1687 #ifdef OHOS_OPT_PERFORMANCE
1688   // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
1689   if (!dec->priv->has_recv_first_frame) {
1690     dec->priv->has_recv_first_frame = TRUE;
1691     GST_WARNING_OBJECT (dec, "audiodecoder recv first frame");
1692   }
1693 #endif
1694 
1695   return klass->handle_frame (dec, buffer);
1696 }
1697 
1698 /* maybe subclass configurable instead, but this allows for a whole lot of
1699  * raw samples, so at least quite some encoded ... */
1700 #define GST_AUDIO_DECODER_MAX_SYNC     10 * 8 * 2 * 1024
1701 
1702 static GstFlowReturn
gst_audio_decoder_push_buffers(GstAudioDecoder * dec,gboolean force)1703 gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force)
1704 {
1705   GstAudioDecoderClass *klass;
1706   GstAudioDecoderPrivate *priv;
1707   GstAudioDecoderContext *ctx;
1708   GstFlowReturn ret = GST_FLOW_OK;
1709   GstBuffer *buffer;
1710   gint av, flush;
1711 
1712   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1713   priv = dec->priv;
1714   ctx = &dec->priv->ctx;
1715 
1716   g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1717 
1718   av = gst_adapter_available (priv->adapter);
1719   GST_DEBUG_OBJECT (dec, "available: %d", av);
1720 
1721   while (ret == GST_FLOW_OK) {
1722 
1723     flush = 0;
1724     ctx->eos = force;
1725 
1726     if (G_LIKELY (av)) {
1727       gint len;
1728       GstClockTime ts;
1729       guint64 distance;
1730 
1731       /* parse if needed */
1732       if (klass->parse) {
1733         gint offset = 0;
1734 
1735         /* limited (legacy) parsing; avoid whole of baseparse */
1736         GST_DEBUG_OBJECT (dec, "parsing available: %d", av);
1737         /* piggyback sync state on discont */
1738         ctx->sync = !priv->discont;
1739         ret = klass->parse (dec, priv->adapter, &offset, &len);
1740 
1741         g_assert (offset <= av);
1742         if (offset) {
1743           /* jumped a bit */
1744           GST_DEBUG_OBJECT (dec, "skipped %d; setting DISCONT", offset);
1745           gst_adapter_flush (priv->adapter, offset);
1746           flush = offset;
1747           /* avoid parsing indefinitely */
1748           priv->sync_flush += offset;
1749           if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC)
1750             goto parse_failed;
1751         }
1752 
1753         if (ret == GST_FLOW_EOS) {
1754           GST_LOG_OBJECT (dec, "no frame yet");
1755           ret = GST_FLOW_OK;
1756           break;
1757         } else if (ret == GST_FLOW_OK) {
1758           GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len);
1759           g_assert (len);
1760           g_assert (offset + len <= av);
1761           priv->sync_flush = 0;
1762         } else {
1763           break;
1764         }
1765       } else {
1766         len = av;
1767       }
1768       /* track upstream ts, but do not get stuck if nothing new upstream */
1769       ts = gst_adapter_prev_pts (priv->adapter, &distance);
1770       if (ts != priv->prev_ts || distance <= priv->prev_distance) {
1771         priv->prev_ts = ts;
1772         priv->prev_distance = distance;
1773       } else {
1774         GST_LOG_OBJECT (dec, "ts == prev_ts; discarding");
1775         ts = GST_CLOCK_TIME_NONE;
1776       }
1777       buffer = gst_adapter_take_buffer (priv->adapter, len);
1778       buffer = gst_buffer_make_writable (buffer);
1779       GST_BUFFER_PTS (buffer) = ts;
1780       flush += len;
1781       priv->force = FALSE;
1782     } else {
1783       if (!force)
1784         break;
1785       if (!priv->drainable) {
1786         priv->drained = TRUE;
1787         break;
1788       }
1789       buffer = NULL;
1790       priv->force = TRUE;
1791     }
1792 
1793 #ifdef OHOS_OPT_PERFORMANCE
1794       // ohos.opt.performance.0005
1795       // add trace
1796       GstStartTrace("AudioDecoder:HandleFrame");
1797 #endif
1798     ret = gst_audio_decoder_handle_frame (dec, klass, buffer);
1799 #ifdef OHOS_OPT_PERFORMANCE
1800       GstFinishTrace();
1801 #endif
1802 
1803     /* do not keep pushing it ... */
1804     if (G_UNLIKELY (!av)) {
1805       priv->drained = TRUE;
1806       break;
1807     }
1808 
1809     av -= flush;
1810     g_assert (av >= 0);
1811   }
1812 
1813   GST_LOG_OBJECT (dec, "done pushing to subclass");
1814   return ret;
1815 
1816   /* ERRORS */
1817 parse_failed:
1818   {
1819     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream"));
1820     return GST_FLOW_ERROR;
1821   }
1822 }
1823 
1824 static GstFlowReturn
gst_audio_decoder_drain(GstAudioDecoder * dec)1825 gst_audio_decoder_drain (GstAudioDecoder * dec)
1826 {
1827   GstFlowReturn ret;
1828 
1829   if (dec->priv->drained && !dec->priv->gather)
1830     return GST_FLOW_OK;
1831 
1832   /* Apply any pending events before draining, as that
1833    * may update the pending segment info */
1834   apply_pending_events (dec);
1835 
1836   /* dispatch reverse pending buffers */
1837   /* chain eventually calls upon drain as well, but by that time
1838    * gather list should be clear, so ok ... */
1839   if (dec->output_segment.rate < 0.0 && dec->priv->gather)
1840     gst_audio_decoder_chain_reverse (dec, NULL);
1841   /* have subclass give all it can */
1842   ret = gst_audio_decoder_push_buffers (dec, TRUE);
1843   if (ret != GST_FLOW_OK) {
1844     GST_WARNING_OBJECT (dec, "audio decoder push buffers failed");
1845     goto drain_failed;
1846   }
1847   /* ensure all output sent */
1848   ret = gst_audio_decoder_output (dec, NULL);
1849   if (ret != GST_FLOW_OK)
1850     GST_WARNING_OBJECT (dec, "audio decoder output failed");
1851 
1852 drain_failed:
1853   /* everything should be away now */
1854   if (dec->priv->frames.length) {
1855     /* not fatal/impossible though if subclass/codec eats stuff */
1856     GST_WARNING_OBJECT (dec, "still %d frames left after draining",
1857         dec->priv->frames.length);
1858     g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
1859     g_queue_clear (&dec->priv->frames);
1860   }
1861 
1862   /* discard (unparsed) leftover */
1863   gst_adapter_clear (dec->priv->adapter);
1864   return ret;
1865 }
1866 
1867 /* hard == FLUSH, otherwise discont */
1868 static GstFlowReturn
gst_audio_decoder_flush(GstAudioDecoder * dec,gboolean hard)1869 gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard)
1870 {
1871   GstAudioDecoderClass *klass;
1872   GstFlowReturn ret = GST_FLOW_OK;
1873 
1874   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1875 
1876   GST_LOG_OBJECT (dec, "flush hard %d", hard);
1877 
1878   if (!hard) {
1879     ret = gst_audio_decoder_drain (dec);
1880   } else {
1881     gst_audio_decoder_clear_queues (dec);
1882     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
1883     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
1884     dec->priv->error_count = 0;
1885 #ifdef OHOS_OPT_PERFORMANCE
1886     // ohos.opt.performance.0006: the PTS segment of the first frame is calibrated to improve the performance.
1887     dec->priv->has_recv_first_frame = FALSE;
1888     dec->priv->has_push_first_frame = FALSE;
1889 #endif
1890   }
1891   /* only bother subclass with flushing if known it is already alive
1892    * and kicking out stuff */
1893   if (klass->flush && dec->priv->samples_out > 0)
1894     klass->flush (dec, hard);
1895   /* and get (re)set for the sequel */
1896   gst_audio_decoder_reset (dec, FALSE);
1897 
1898   return ret;
1899 }
1900 
1901 static GstFlowReturn
gst_audio_decoder_chain_forward(GstAudioDecoder * dec,GstBuffer * buffer)1902 gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer)
1903 {
1904   GstFlowReturn ret = GST_FLOW_OK;
1905 
1906   /* discard silly case, though maybe ts may be of value ?? */
1907   if (G_UNLIKELY (gst_buffer_get_size (buffer) == 0)) {
1908     GST_DEBUG_OBJECT (dec, "discarding empty buffer");
1909     gst_buffer_unref (buffer);
1910     goto exit;
1911   }
1912 
1913   /* grab buffer */
1914   gst_adapter_push (dec->priv->adapter, buffer);
1915   buffer = NULL;
1916   /* new stuff, so we can push subclass again */
1917   dec->priv->drained = FALSE;
1918 
1919   /* hand to subclass */
1920   ret = gst_audio_decoder_push_buffers (dec, FALSE);
1921 
1922 exit:
1923   GST_LOG_OBJECT (dec, "chain-done");
1924   return ret;
1925 }
1926 
1927 static void
gst_audio_decoder_clear_queues(GstAudioDecoder * dec)1928 gst_audio_decoder_clear_queues (GstAudioDecoder * dec)
1929 {
1930   GstAudioDecoderPrivate *priv = dec->priv;
1931 
1932   g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL);
1933   g_list_free (priv->queued);
1934   priv->queued = NULL;
1935   g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL);
1936   g_list_free (priv->gather);
1937   priv->gather = NULL;
1938   g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL);
1939   g_list_free (priv->decode);
1940   priv->decode = NULL;
1941 }
1942 
1943 /*
1944  * Input:
1945  *  Buffer decoding order:  7  8  9  4  5  6  3  1  2  EOS
1946  *  Discont flag:           D        D        D  D
1947  *
1948  * - Each Discont marks a discont in the decoding order.
1949  *
1950  * for vorbis, each buffer is a keyframe when we have the previous
1951  * buffer. This means that to decode buffer 7, we need buffer 6, which
1952  * arrives out of order.
1953  *
1954  * we first gather buffers in the gather queue until we get a DISCONT. We
1955  * prepend each incoming buffer so that they are in reversed order.
1956  *
1957  *    gather queue:    9  8  7
1958  *    decode queue:
1959  *    output queue:
1960  *
1961  * When a DISCONT is received (buffer 4), we move the gather queue to the
1962  * decode queue. This is simply done be taking the head of the gather queue
1963  * and prepending it to the decode queue. This yields:
1964  *
1965  *    gather queue:
1966  *    decode queue:    7  8  9
1967  *    output queue:
1968  *
1969  * Then we decode each buffer in the decode queue in order and put the output
1970  * buffer in the output queue. The first buffer (7) will not produce any output
1971  * because it needs the previous buffer (6) which did not arrive yet. This
1972  * yields:
1973  *
1974  *    gather queue:
1975  *    decode queue:    7  8  9
1976  *    output queue:    9  8
1977  *
1978  * Then we remove the consumed buffers from the decode queue. Buffer 7 is not
1979  * completely consumed, we need to keep it around for when we receive buffer
1980  * 6. This yields:
1981  *
1982  *    gather queue:
1983  *    decode queue:    7
1984  *    output queue:    9  8
1985  *
1986  * Then we accumulate more buffers:
1987  *
1988  *    gather queue:    6  5  4
1989  *    decode queue:    7
1990  *    output queue:
1991  *
1992  * prepending to the decode queue on DISCONT yields:
1993  *
1994  *    gather queue:
1995  *    decode queue:    4  5  6  7
1996  *    output queue:
1997  *
1998  * after decoding and keeping buffer 4:
1999  *
2000  *    gather queue:
2001  *    decode queue:    4
2002  *    output queue:    7  6  5
2003  *
2004  * Etc..
2005  */
2006 static GstFlowReturn
gst_audio_decoder_flush_decode(GstAudioDecoder * dec)2007 gst_audio_decoder_flush_decode (GstAudioDecoder * dec)
2008 {
2009   GstAudioDecoderPrivate *priv = dec->priv;
2010   GstFlowReturn res = GST_FLOW_OK;
2011   GstClockTime timestamp;
2012   GList *walk;
2013 
2014   walk = priv->decode;
2015 
2016   GST_DEBUG_OBJECT (dec, "flushing buffers to decoder");
2017 
2018   /* clear buffer and decoder state */
2019   gst_audio_decoder_flush (dec, FALSE);
2020 
2021   while (walk) {
2022     GList *next;
2023     GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2024 
2025     GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT,
2026         buf, GST_TIME_ARGS (GST_BUFFER_PTS (buf)));
2027 
2028     next = g_list_next (walk);
2029     /* decode buffer, resulting data prepended to output queue */
2030     gst_buffer_ref (buf);
2031     res = gst_audio_decoder_chain_forward (dec, buf);
2032 
2033     /* if we generated output, we can discard the buffer, else we
2034      * keep it in the queue */
2035     if (priv->queued) {
2036       GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data);
2037       priv->decode = g_list_delete_link (priv->decode, walk);
2038       gst_buffer_unref (buf);
2039     } else {
2040       GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2041     }
2042     walk = next;
2043   }
2044 
2045   /* drain any aggregation (or otherwise) leftover */
2046   gst_audio_decoder_drain (dec);
2047 
2048   /* now send queued data downstream */
2049   timestamp = GST_CLOCK_TIME_NONE;
2050   while (priv->queued) {
2051     GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
2052     GstClockTime duration;
2053 
2054     duration = GST_BUFFER_DURATION (buf);
2055 
2056     /* duration should always be valid for raw audio */
2057     g_assert (GST_CLOCK_TIME_IS_VALID (duration));
2058 
2059     /* interpolate (backward) if needed */
2060     if (G_LIKELY (timestamp != -1)) {
2061       if (timestamp > duration)
2062         timestamp -= duration;
2063       else
2064         timestamp = 0;
2065     }
2066 
2067     if (!GST_BUFFER_PTS_IS_VALID (buf)) {
2068       GST_LOG_OBJECT (dec, "applying reverse interpolated ts %"
2069           GST_TIME_FORMAT, GST_TIME_ARGS (timestamp));
2070       GST_BUFFER_PTS (buf) = timestamp;
2071     } else {
2072       /* track otherwise */
2073       timestamp = GST_BUFFER_PTS (buf);
2074       GST_LOG_OBJECT (dec, "tracking ts %" GST_TIME_FORMAT,
2075           GST_TIME_ARGS (timestamp));
2076     }
2077 
2078     if (G_LIKELY (res == GST_FLOW_OK)) {
2079       GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
2080           "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2081           gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2082           GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2083       /* should be already, but let's be sure */
2084       buf = gst_buffer_make_writable (buf);
2085       /* avoid stray DISCONT from forward processing,
2086        * which have no meaning in reverse pushing */
2087       GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2088       res = gst_audio_decoder_push_forward (dec, buf);
2089     } else {
2090       gst_buffer_unref (buf);
2091     }
2092 
2093     priv->queued = g_list_delete_link (priv->queued, priv->queued);
2094   }
2095 
2096   return res;
2097 }
2098 
2099 static GstFlowReturn
gst_audio_decoder_chain_reverse(GstAudioDecoder * dec,GstBuffer * buf)2100 gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf)
2101 {
2102   GstAudioDecoderPrivate *priv = dec->priv;
2103   GstFlowReturn result = GST_FLOW_OK;
2104 
2105   /* if we have a discont, move buffers to the decode list */
2106   if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) {
2107     GST_DEBUG_OBJECT (dec, "received discont");
2108     while (priv->gather) {
2109       GstBuffer *gbuf;
2110 
2111       gbuf = GST_BUFFER_CAST (priv->gather->data);
2112       /* remove from the gather list */
2113       priv->gather = g_list_delete_link (priv->gather, priv->gather);
2114       /* copy to decode queue */
2115       priv->decode = g_list_prepend (priv->decode, gbuf);
2116     }
2117     /* decode stuff in the decode queue */
2118     gst_audio_decoder_flush_decode (dec);
2119   }
2120 
2121   if (G_LIKELY (buf)) {
2122     GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2123         "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2124         gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2125         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2126 
2127     /* add buffer to gather queue */
2128     priv->gather = g_list_prepend (priv->gather, buf);
2129   }
2130 
2131   return result;
2132 }
2133 
2134 static GstFlowReturn
gst_audio_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2135 gst_audio_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2136 {
2137   GstAudioDecoder *dec;
2138   GstFlowReturn ret;
2139 
2140   dec = GST_AUDIO_DECODER (parent);
2141 
2142   GST_LOG_OBJECT (dec,
2143       "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
2144       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buffer),
2145       GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2146       GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
2147 
2148   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2149 
2150   if (G_UNLIKELY (dec->priv->ctx.input_caps == NULL && dec->priv->needs_format))
2151     goto not_negotiated;
2152 
2153   dec->priv->ctx.had_input_data = TRUE;
2154 
2155   if (!dec->priv->expecting_discont_buf &&
2156       GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
2157     gint64 samples, ts;
2158 
2159     /* track present position */
2160     ts = dec->priv->base_ts;
2161     samples = dec->priv->samples;
2162 
2163     GST_DEBUG_OBJECT (dec, "handling discont");
2164     gst_audio_decoder_flush (dec, FALSE);
2165     dec->priv->discont = TRUE;
2166 
2167     /* buffer may claim DISCONT loudly, if it can't tell us where we are now,
2168      * we'll stick to where we were ...
2169      * Particularly useful/needed for upstream BYTE based */
2170     if (dec->input_segment.rate > 0.0 && !GST_BUFFER_PTS_IS_VALID (buffer)) {
2171       GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking");
2172       dec->priv->base_ts = ts;
2173       dec->priv->samples = samples;
2174     }
2175   }
2176   dec->priv->expecting_discont_buf = FALSE;
2177 
2178   if (dec->input_segment.rate > 0.0)
2179     ret = gst_audio_decoder_chain_forward (dec, buffer);
2180   else
2181     ret = gst_audio_decoder_chain_reverse (dec, buffer);
2182 
2183   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2184 
2185   return ret;
2186 
2187   /* ERRORS */
2188 not_negotiated:
2189   {
2190     GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2191     GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL),
2192         ("decoder not initialized"));
2193     gst_buffer_unref (buffer);
2194     return GST_FLOW_NOT_NEGOTIATED;
2195   }
2196 }
2197 
2198 /* perform upstream byte <-> time conversion (duration, seeking)
2199  * if subclass allows and if enough data for moderately decent conversion */
2200 static inline gboolean
gst_audio_decoder_do_byte(GstAudioDecoder * dec)2201 gst_audio_decoder_do_byte (GstAudioDecoder * dec)
2202 {
2203   gboolean ret;
2204 
2205   GST_OBJECT_LOCK (dec);
2206   ret = dec->priv->ctx.do_estimate_rate && dec->priv->ctx.info.bpf &&
2207       dec->priv->ctx.info.rate <= dec->priv->samples_out;
2208   GST_OBJECT_UNLOCK (dec);
2209 
2210   return ret;
2211 }
2212 
2213 /* Must be called holding the GST_AUDIO_DECODER_STREAM_LOCK */
2214 static gboolean
gst_audio_decoder_negotiate_default_caps(GstAudioDecoder * dec)2215 gst_audio_decoder_negotiate_default_caps (GstAudioDecoder * dec)
2216 {
2217   GstCaps *caps, *templcaps;
2218   gint i;
2219   gint channels = 0;
2220   gint rate;
2221   guint64 channel_mask = 0;
2222   gint caps_size;
2223   GstStructure *structure;
2224   GstAudioInfo info;
2225 
2226   templcaps = gst_pad_get_pad_template_caps (dec->srcpad);
2227   caps = gst_pad_peer_query_caps (dec->srcpad, templcaps);
2228   if (caps)
2229     gst_caps_unref (templcaps);
2230   else
2231     caps = templcaps;
2232   templcaps = NULL;
2233 
2234   if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
2235     goto caps_error;
2236 
2237   GST_LOG_OBJECT (dec, "peer caps  %" GST_PTR_FORMAT, caps);
2238 
2239   /* before fixating, try to use whatever upstream provided */
2240   caps = gst_caps_make_writable (caps);
2241   caps_size = gst_caps_get_size (caps);
2242   if (dec->priv->ctx.input_caps) {
2243     GstCaps *sinkcaps = dec->priv->ctx.input_caps;
2244     GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
2245 
2246     if (gst_structure_get_int (structure, "rate", &rate)) {
2247       for (i = 0; i < caps_size; i++) {
2248         gst_structure_set (gst_caps_get_structure (caps, i), "rate",
2249             G_TYPE_INT, rate, NULL);
2250       }
2251     }
2252 
2253     if (gst_structure_get_int (structure, "channels", &channels)) {
2254       for (i = 0; i < caps_size; i++) {
2255         gst_structure_set (gst_caps_get_structure (caps, i), "channels",
2256             G_TYPE_INT, channels, NULL);
2257       }
2258     }
2259 
2260     if (gst_structure_get (structure, "channel-mask", GST_TYPE_BITMASK,
2261             &channel_mask, NULL)) {
2262       for (i = 0; i < caps_size; i++) {
2263         gst_structure_set (gst_caps_get_structure (caps, i), "channel-mask",
2264             GST_TYPE_BITMASK, channel_mask, NULL);
2265       }
2266     }
2267   }
2268 
2269   for (i = 0; i < caps_size; i++) {
2270     structure = gst_caps_get_structure (caps, i);
2271     if (gst_structure_has_field (structure, "channels"))
2272       gst_structure_fixate_field_nearest_int (structure,
2273           "channels", GST_AUDIO_DEF_CHANNELS);
2274     else
2275       gst_structure_set (structure, "channels", G_TYPE_INT,
2276           GST_AUDIO_DEF_CHANNELS, NULL);
2277     if (gst_structure_has_field (structure, "rate"))
2278       gst_structure_fixate_field_nearest_int (structure,
2279           "rate", GST_AUDIO_DEF_RATE);
2280     else
2281       gst_structure_set (structure, "rate", G_TYPE_INT, GST_AUDIO_DEF_RATE,
2282           NULL);
2283   }
2284   caps = gst_caps_fixate (caps);
2285   structure = gst_caps_get_structure (caps, 0);
2286 
2287   /* Need to add a channel-mask if channels > 2 */
2288   gst_structure_get_int (structure, "channels", &channels);
2289   if (channels > 2 && !gst_structure_has_field (structure, "channel-mask")) {
2290     channel_mask = gst_audio_channel_get_fallback_mask (channels);
2291     if (channel_mask != 0) {
2292       gst_structure_set (structure, "channel-mask",
2293           GST_TYPE_BITMASK, channel_mask, NULL);
2294     } else {
2295       GST_WARNING_OBJECT (dec, "No default channel-mask for %d channels",
2296           channels);
2297     }
2298   }
2299 
2300   if (!caps || !gst_audio_info_from_caps (&info, caps))
2301     goto caps_error;
2302 
2303   GST_OBJECT_LOCK (dec);
2304   dec->priv->ctx.info = info;
2305   dec->priv->ctx.caps = caps;
2306   GST_OBJECT_UNLOCK (dec);
2307 
2308   GST_INFO_OBJECT (dec,
2309       "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
2310 
2311   return TRUE;
2312 
2313 caps_error:
2314   {
2315     if (caps)
2316       gst_caps_unref (caps);
2317     return FALSE;
2318   }
2319 }
2320 
2321 static gboolean
gst_audio_decoder_handle_gap(GstAudioDecoder * dec,GstEvent * event)2322 gst_audio_decoder_handle_gap (GstAudioDecoder * dec, GstEvent * event)
2323 {
2324   gboolean ret;
2325   GstClockTime timestamp, duration;
2326   gboolean needs_reconfigure = FALSE;
2327 
2328   /* Ensure we have caps first */
2329   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2330   if (!GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)) {
2331     if (!gst_audio_decoder_negotiate_default_caps (dec)) {
2332       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2333       GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL),
2334           ("Decoder output not negotiated before GAP event."));
2335       gst_event_unref (event);
2336       return FALSE;
2337     }
2338     needs_reconfigure = TRUE;
2339   }
2340   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad)
2341       || needs_reconfigure;
2342   if (G_UNLIKELY (dec->priv->ctx.output_format_changed || needs_reconfigure)) {
2343     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
2344       GST_WARNING_OBJECT (dec, "Failed to negotiate with downstream");
2345       gst_pad_mark_reconfigure (dec->srcpad);
2346     }
2347   }
2348   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2349 
2350   gst_event_parse_gap (event, &timestamp, &duration);
2351 
2352   /* time progressed without data, see if we can fill the gap with
2353    * some concealment data */
2354   GST_DEBUG_OBJECT (dec,
2355       "gap event: plc %d, do_plc %d, position %" GST_TIME_FORMAT
2356       " duration %" GST_TIME_FORMAT,
2357       dec->priv->plc, dec->priv->ctx.do_plc,
2358       GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration));
2359 
2360   if (dec->priv->plc && dec->priv->ctx.do_plc && dec->input_segment.rate > 0.0) {
2361     GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2362     GstBuffer *buf;
2363 
2364     /* hand subclass empty frame with duration that needs covering */
2365     buf = gst_buffer_new ();
2366     GST_BUFFER_PTS (buf) = timestamp;
2367     GST_BUFFER_DURATION (buf) = duration;
2368     /* best effort, not much error handling */
2369     gst_audio_decoder_handle_frame (dec, klass, buf);
2370     ret = TRUE;
2371     dec->priv->expecting_discont_buf = TRUE;
2372     gst_event_unref (event);
2373   } else {
2374     GstFlowReturn flowret;
2375 
2376     /* sub-class doesn't know how to handle empty buffers,
2377      * so just try sending GAP downstream */
2378     flowret = check_pending_reconfigure (dec);
2379     if (flowret == GST_FLOW_OK) {
2380       send_pending_events (dec);
2381       ret = gst_audio_decoder_push_event (dec, event);
2382     } else {
2383       ret = FALSE;
2384       gst_event_unref (event);
2385     }
2386   }
2387   return ret;
2388 }
2389 
2390 static GList *
_flush_events(GstPad * pad,GList * events)2391 _flush_events (GstPad * pad, GList * events)
2392 {
2393   GList *tmp;
2394 
2395   for (tmp = events; tmp; tmp = tmp->next) {
2396     if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
2397         GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
2398         GST_EVENT_IS_STICKY (tmp->data)) {
2399       gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
2400     }
2401     gst_event_unref (tmp->data);
2402   }
2403   g_list_free (events);
2404 
2405   return NULL;
2406 }
2407 
2408 static gboolean
gst_audio_decoder_sink_eventfunc(GstAudioDecoder * dec,GstEvent * event)2409 gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2410 {
2411   gboolean ret;
2412 
2413   switch (GST_EVENT_TYPE (event)) {
2414     case GST_EVENT_STREAM_START:
2415       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2416       /* finish any data in current segment and clear the decoder
2417        * to be ready for new stream data */
2418       gst_audio_decoder_drain (dec);
2419       gst_audio_decoder_flush (dec, FALSE);
2420 
2421       GST_DEBUG_OBJECT (dec, "received STREAM_START. Clearing taglist");
2422       /* Flush upstream tags after a STREAM_START */
2423       if (dec->priv->upstream_tags) {
2424         gst_tag_list_unref (dec->priv->upstream_tags);
2425         dec->priv->upstream_tags = NULL;
2426         dec->priv->taglist_changed = TRUE;
2427       }
2428       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2429 
2430       ret = gst_audio_decoder_push_event (dec, event);
2431       break;
2432     case GST_EVENT_SEGMENT:
2433     {
2434       GstSegment seg;
2435       GstFormat format;
2436 
2437       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2438       gst_event_copy_segment (event, &seg);
2439 
2440       format = seg.format;
2441       if (format == GST_FORMAT_TIME) {
2442         GST_DEBUG_OBJECT (dec, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
2443             &seg);
2444       } else {
2445         gint64 nstart;
2446         GST_DEBUG_OBJECT (dec, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
2447         /* handle newsegment resulting from legacy simple seeking */
2448         /* note that we need to convert this whether or not enough data
2449          * to handle initial newsegment */
2450         if (dec->priv->ctx.do_estimate_rate &&
2451             gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, seg.start,
2452                 GST_FORMAT_TIME, &nstart)) {
2453           /* best attempt convert */
2454           /* as these are only estimates, stop is kept open-ended to avoid
2455            * premature cutting */
2456           GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT,
2457               GST_TIME_ARGS (nstart));
2458           seg.format = GST_FORMAT_TIME;
2459           seg.start = nstart;
2460           seg.time = nstart;
2461           seg.stop = GST_CLOCK_TIME_NONE;
2462           /* replace event */
2463           gst_event_unref (event);
2464           event = gst_event_new_segment (&seg);
2465         } else {
2466           GST_DEBUG_OBJECT (dec, "unsupported format; ignoring");
2467           GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2468           gst_event_unref (event);
2469           ret = FALSE;
2470           break;
2471         }
2472       }
2473 
2474       /* prepare for next segment */
2475       /* Use the segment start as a base timestamp
2476        * in case upstream does not come up with anything better
2477        * (e.g. upstream BYTE) */
2478       if (format != GST_FORMAT_TIME) {
2479         dec->priv->base_ts = seg.start;
2480         dec->priv->samples = 0;
2481       }
2482 
2483       /* Update the decode flags in the segment if we have an instant-rate
2484        * override active */
2485       GST_OBJECT_LOCK (dec);
2486       if (dec->priv->decode_flags_override) {
2487         seg.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
2488         seg.flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
2489       }
2490 
2491       /* and follow along with segment */
2492       dec->priv->in_out_segment_sync = FALSE;
2493       dec->input_segment = seg;
2494       GST_OBJECT_UNLOCK (dec);
2495 
2496       dec->priv->pending_events =
2497           g_list_append (dec->priv->pending_events, event);
2498       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2499 
2500       ret = TRUE;
2501       break;
2502     }
2503     case GST_EVENT_INSTANT_RATE_CHANGE:
2504     {
2505       GstSegmentFlags flags;
2506       GstSegment *seg;
2507 
2508       gst_event_parse_instant_rate_change (event, NULL, &flags);
2509 
2510       GST_OBJECT_LOCK (dec);
2511       dec->priv->decode_flags_override = TRUE;
2512       dec->priv->decode_flags = flags;
2513 
2514       /* Update the input segment flags */
2515       seg = &dec->input_segment;
2516       seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
2517       seg->flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
2518       GST_OBJECT_UNLOCK (dec);
2519 
2520       /* Forward downstream */
2521       ret = gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2522       break;
2523     }
2524     case GST_EVENT_GAP:
2525       ret = gst_audio_decoder_handle_gap (dec, event);
2526       break;
2527     case GST_EVENT_FLUSH_STOP:
2528       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2529       /* prepare for fresh start */
2530       gst_audio_decoder_flush (dec, TRUE);
2531 
2532       dec->priv->pending_events = _flush_events (dec->srcpad,
2533           dec->priv->pending_events);
2534       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2535 
2536       /* Forward FLUSH_STOP, it is expected to be forwarded immediately
2537        * and no buffers are queued anyway. */
2538       ret = gst_audio_decoder_push_event (dec, event);
2539       break;
2540 
2541     case GST_EVENT_SEGMENT_DONE:
2542       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2543       gst_audio_decoder_drain (dec);
2544       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2545 
2546       /* Forward SEGMENT_DONE because no buffer or serialized event might come after
2547        * SEGMENT_DONE and nothing could trigger another _finish_frame() call. */
2548       if (dec->priv->pending_events)
2549         send_pending_events (dec);
2550       ret = gst_audio_decoder_push_event (dec, event);
2551       break;
2552 
2553     case GST_EVENT_EOS:
2554       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2555       gst_audio_decoder_drain (dec);
2556       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2557 
2558       if (dec->priv->ctx.had_input_data && !dec->priv->ctx.had_output_data) {
2559         GST_ELEMENT_ERROR (dec, STREAM, DECODE,
2560             ("No valid frames decoded before end of stream"),
2561             ("no valid frames found"));
2562 #ifdef OHOS_OPT_COMPAT
2563         /**
2564         * ohos.opt.compat.0058
2565         * If push eos event to downstream, at the same time the engine is destroying process,
2566         * deadlock may occur.
2567         */
2568         ret = TRUE;
2569         break;
2570 #endif
2571       }
2572 
2573       /* Forward EOS because no buffer or serialized event will come after
2574        * EOS and nothing could trigger another _finish_frame() call. */
2575       if (dec->priv->pending_events)
2576         send_pending_events (dec);
2577       ret = gst_audio_decoder_push_event (dec, event);
2578       break;
2579 
2580     case GST_EVENT_CAPS:
2581     {
2582       GstCaps *caps;
2583 
2584       gst_event_parse_caps (event, &caps);
2585       ret = gst_audio_decoder_sink_setcaps (dec, caps);
2586       gst_event_unref (event);
2587       break;
2588     }
2589     case GST_EVENT_TAG:
2590     {
2591       GstTagList *tags;
2592 
2593       gst_event_parse_tag (event, &tags);
2594 
2595       if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
2596         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2597         if (dec->priv->upstream_tags != tags) {
2598           if (dec->priv->upstream_tags)
2599             gst_tag_list_unref (dec->priv->upstream_tags);
2600           dec->priv->upstream_tags = gst_tag_list_ref (tags);
2601           GST_INFO_OBJECT (dec, "upstream stream tags: %" GST_PTR_FORMAT, tags);
2602         }
2603         gst_event_unref (event);
2604         event = gst_audio_decoder_create_merged_tags_event (dec);
2605         dec->priv->taglist_changed = FALSE;
2606         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2607 
2608         /* No tags, go out of here instead of fall through */
2609         if (!event) {
2610           ret = TRUE;
2611           break;
2612         }
2613       }
2614 
2615       /* fall through */
2616     }
2617     default:
2618       if (!GST_EVENT_IS_SERIALIZED (event)) {
2619         ret =
2620             gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2621       } else {
2622         GST_DEBUG_OBJECT (dec, "Enqueuing event %d, %s", GST_EVENT_TYPE (event),
2623             GST_EVENT_TYPE_NAME (event));
2624         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2625         dec->priv->pending_events =
2626             g_list_append (dec->priv->pending_events, event);
2627         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2628         ret = TRUE;
2629       }
2630       break;
2631   }
2632   return ret;
2633 }
2634 
2635 static gboolean
gst_audio_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2636 gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
2637     GstEvent * event)
2638 {
2639   GstAudioDecoder *dec;
2640   GstAudioDecoderClass *klass;
2641   gboolean ret;
2642 
2643   dec = GST_AUDIO_DECODER (parent);
2644   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2645 
2646   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2647       GST_EVENT_TYPE_NAME (event));
2648 
2649   if (klass->sink_event)
2650     ret = klass->sink_event (dec, event);
2651   else {
2652     gst_event_unref (event);
2653     ret = FALSE;
2654   }
2655   return ret;
2656 }
2657 
2658 static gboolean
gst_audio_decoder_do_seek(GstAudioDecoder * dec,GstEvent * event)2659 gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event)
2660 {
2661   GstSeekFlags flags;
2662   GstSeekType start_type, end_type;
2663   GstFormat format;
2664   gdouble rate;
2665   gint64 start, start_time, end_time;
2666   GstSegment seek_segment;
2667   guint32 seqnum;
2668 
2669   gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
2670       &start_time, &end_type, &end_time);
2671 
2672   /* we'll handle plain open-ended flushing seeks with the simple approach */
2673   if (rate != 1.0) {
2674     GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
2675     return FALSE;
2676   }
2677 
2678   if (start_type != GST_SEEK_TYPE_SET) {
2679     GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
2680     return FALSE;
2681   }
2682 
2683   if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
2684       (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
2685     GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
2686     return FALSE;
2687   }
2688 
2689   if (!(flags & GST_SEEK_FLAG_FLUSH)) {
2690     GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
2691     return FALSE;
2692   }
2693 
2694   memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
2695   gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
2696       start_time, end_type, end_time, NULL);
2697   start_time = seek_segment.position;
2698 
2699   if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
2700           GST_FORMAT_BYTES, &start)) {
2701     GST_DEBUG_OBJECT (dec, "conversion failed");
2702     return FALSE;
2703   }
2704 
2705   seqnum = gst_event_get_seqnum (event);
2706   event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
2707       GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
2708   gst_event_set_seqnum (event, seqnum);
2709 
2710   GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
2711       G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
2712 
2713   return gst_pad_push_event (dec->sinkpad, event);
2714 }
2715 
2716 static gboolean
gst_audio_decoder_src_eventfunc(GstAudioDecoder * dec,GstEvent * event)2717 gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2718 {
2719   gboolean res;
2720 
2721   switch (GST_EVENT_TYPE (event)) {
2722     case GST_EVENT_SEEK:
2723     {
2724       GstFormat format;
2725       gdouble rate;
2726       GstSeekFlags flags;
2727       GstSeekType start_type, stop_type;
2728       gint64 start, stop;
2729       gint64 tstart, tstop;
2730       guint32 seqnum;
2731 
2732       gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
2733           &stop_type, &stop);
2734       seqnum = gst_event_get_seqnum (event);
2735 
2736       /* upstream gets a chance first */
2737       if ((res = gst_pad_push_event (dec->sinkpad, event)))
2738         break;
2739 
2740       /* if upstream fails for a time seek, maybe we can help if allowed */
2741       if (format == GST_FORMAT_TIME) {
2742         if (gst_audio_decoder_do_byte (dec))
2743           res = gst_audio_decoder_do_seek (dec, event);
2744         break;
2745       }
2746 
2747       /* ... though a non-time seek can be aided as well */
2748       /* First bring the requested format to time */
2749       if (!(res =
2750               gst_pad_query_convert (dec->srcpad, format, start,
2751                   GST_FORMAT_TIME, &tstart)))
2752         goto convert_error;
2753       if (!(res =
2754               gst_pad_query_convert (dec->srcpad, format, stop, GST_FORMAT_TIME,
2755                   &tstop)))
2756         goto convert_error;
2757 
2758       /* then seek with time on the peer */
2759       event = gst_event_new_seek (rate, GST_FORMAT_TIME,
2760           flags, start_type, tstart, stop_type, tstop);
2761       gst_event_set_seqnum (event, seqnum);
2762 
2763       res = gst_pad_push_event (dec->sinkpad, event);
2764       break;
2765     }
2766     default:
2767       res = gst_pad_event_default (dec->srcpad, GST_OBJECT_CAST (dec), event);
2768       break;
2769   }
2770 done:
2771   return res;
2772 
2773   /* ERRORS */
2774 convert_error:
2775   {
2776     GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek");
2777     goto done;
2778   }
2779 }
2780 
2781 static gboolean
gst_audio_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2782 gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2783 {
2784   GstAudioDecoder *dec;
2785   GstAudioDecoderClass *klass;
2786   gboolean ret;
2787 
2788   dec = GST_AUDIO_DECODER (parent);
2789   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2790 
2791   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2792       GST_EVENT_TYPE_NAME (event));
2793 
2794   if (klass->src_event)
2795     ret = klass->src_event (dec, event);
2796   else {
2797     gst_event_unref (event);
2798     ret = FALSE;
2799   }
2800 
2801   return ret;
2802 }
2803 
2804 static gboolean
gst_audio_decoder_decide_allocation_default(GstAudioDecoder * dec,GstQuery * query)2805 gst_audio_decoder_decide_allocation_default (GstAudioDecoder * dec,
2806     GstQuery * query)
2807 {
2808   GstAllocator *allocator = NULL;
2809   GstAllocationParams params;
2810   gboolean update_allocator;
2811 
2812   /* we got configuration from our peer or the decide_allocation method,
2813    * parse them */
2814   if (gst_query_get_n_allocation_params (query) > 0) {
2815     /* try the allocator */
2816     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
2817     update_allocator = TRUE;
2818   } else {
2819     allocator = NULL;
2820     gst_allocation_params_init (&params);
2821     update_allocator = FALSE;
2822   }
2823 
2824   if (update_allocator)
2825     gst_query_set_nth_allocation_param (query, 0, allocator, &params);
2826   else
2827     gst_query_add_allocation_param (query, allocator, &params);
2828   if (allocator)
2829     gst_object_unref (allocator);
2830 
2831   return TRUE;
2832 }
2833 
2834 static gboolean
gst_audio_decoder_propose_allocation_default(GstAudioDecoder * dec,GstQuery * query)2835 gst_audio_decoder_propose_allocation_default (GstAudioDecoder * dec,
2836     GstQuery * query)
2837 {
2838   return TRUE;
2839 }
2840 
2841 /**
2842  * gst_audio_decoder_proxy_getcaps:
2843  * @decoder: a #GstAudioDecoder
2844  * @caps: (allow-none): initial caps
2845  * @filter: (allow-none): filter caps
2846  *
2847  * Returns caps that express @caps (or sink template caps if @caps == NULL)
2848  * restricted to rate/channels/... combinations supported by downstream
2849  * elements.
2850  *
2851  * Returns: (transfer full): a #GstCaps owned by caller
2852  *
2853  * Since: 1.6
2854  */
2855 GstCaps *
gst_audio_decoder_proxy_getcaps(GstAudioDecoder * decoder,GstCaps * caps,GstCaps * filter)2856 gst_audio_decoder_proxy_getcaps (GstAudioDecoder * decoder, GstCaps * caps,
2857     GstCaps * filter)
2858 {
2859   return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2860       GST_AUDIO_DECODER_SINK_PAD (decoder),
2861       GST_AUDIO_DECODER_SRC_PAD (decoder), caps, filter);
2862 }
2863 
2864 static GstCaps *
gst_audio_decoder_sink_getcaps(GstAudioDecoder * decoder,GstCaps * filter)2865 gst_audio_decoder_sink_getcaps (GstAudioDecoder * decoder, GstCaps * filter)
2866 {
2867   GstAudioDecoderClass *klass;
2868   GstCaps *caps;
2869 
2870   klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
2871 
2872   if (klass->getcaps)
2873     caps = klass->getcaps (decoder, filter);
2874   else
2875     caps = gst_audio_decoder_proxy_getcaps (decoder, NULL, filter);
2876 
2877   GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2878 
2879   return caps;
2880 }
2881 
2882 static gboolean
gst_audio_decoder_sink_query_default(GstAudioDecoder * dec,GstQuery * query)2883 gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, GstQuery * query)
2884 {
2885   GstPad *pad = GST_AUDIO_DECODER_SINK_PAD (dec);
2886   gboolean res = FALSE;
2887 
2888   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2889 
2890   switch (GST_QUERY_TYPE (query)) {
2891     case GST_QUERY_FORMATS:
2892     {
2893       gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
2894       res = TRUE;
2895       break;
2896     }
2897     case GST_QUERY_CONVERT:
2898     {
2899       GstFormat src_fmt, dest_fmt;
2900       gint64 src_val, dest_val;
2901 
2902       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2903       GST_OBJECT_LOCK (dec);
2904       res = __gst_audio_encoded_audio_convert (&dec->priv->ctx.info,
2905           dec->priv->bytes_in, dec->priv->samples_out,
2906           src_fmt, src_val, &dest_fmt, &dest_val);
2907       GST_OBJECT_UNLOCK (dec);
2908       if (!res)
2909         goto error;
2910       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2911       break;
2912     }
2913     case GST_QUERY_ALLOCATION:
2914     {
2915       GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2916 
2917       if (klass->propose_allocation)
2918         res = klass->propose_allocation (dec, query);
2919       break;
2920     }
2921     case GST_QUERY_CAPS:{
2922       GstCaps *filter, *caps;
2923 
2924       gst_query_parse_caps (query, &filter);
2925       caps = gst_audio_decoder_sink_getcaps (dec, filter);
2926       gst_query_set_caps_result (query, caps);
2927       gst_caps_unref (caps);
2928       res = TRUE;
2929       break;
2930     }
2931     case GST_QUERY_ACCEPT_CAPS:{
2932       if (dec->priv->use_default_pad_acceptcaps) {
2933         res =
2934             gst_pad_query_default (GST_AUDIO_DECODER_SINK_PAD (dec),
2935             GST_OBJECT_CAST (dec), query);
2936       } else {
2937         GstCaps *caps;
2938         GstCaps *allowed_caps;
2939         GstCaps *template_caps;
2940         gboolean accept;
2941 
2942         gst_query_parse_accept_caps (query, &caps);
2943 
2944         template_caps = gst_pad_get_pad_template_caps (pad);
2945         accept = gst_caps_is_subset (caps, template_caps);
2946         gst_caps_unref (template_caps);
2947 
2948         if (accept) {
2949           allowed_caps = gst_pad_query_caps (GST_AUDIO_DECODER_SINK_PAD (dec),
2950               caps);
2951 
2952           accept = gst_caps_can_intersect (caps, allowed_caps);
2953 
2954           gst_caps_unref (allowed_caps);
2955         }
2956 
2957         gst_query_set_accept_caps_result (query, accept);
2958         res = TRUE;
2959       }
2960       break;
2961     }
2962     case GST_QUERY_SEEKING:
2963     {
2964       GstFormat format;
2965 
2966       /* non-TIME segments are discarded, so we won't seek that way either */
2967       gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
2968       if (format != GST_FORMAT_TIME) {
2969         GST_DEBUG_OBJECT (dec, "discarding non-TIME SEEKING query");
2970         res = FALSE;
2971         break;
2972       }
2973       /* fall-through */
2974     }
2975     default:
2976       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
2977       break;
2978   }
2979 
2980 error:
2981   return res;
2982 }
2983 
2984 static gboolean
gst_audio_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2985 gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
2986     GstQuery * query)
2987 {
2988   GstAudioDecoderClass *dec_class;
2989   GstAudioDecoder *dec;
2990   gboolean ret = FALSE;
2991 
2992   dec = GST_AUDIO_DECODER (parent);
2993   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
2994 
2995   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
2996 
2997   if (dec_class->sink_query)
2998     ret = dec_class->sink_query (dec, query);
2999 
3000   return ret;
3001 }
3002 
3003 /* FIXME ? are any of these queries (other than latency) a decoder's business ??
3004  * also, the conversion stuff might seem to make sense, but seems to not mind
3005  * segment stuff etc at all
3006  * Supposedly that's backward compatibility ... */
3007 static gboolean
gst_audio_decoder_src_query_default(GstAudioDecoder * dec,GstQuery * query)3008 gst_audio_decoder_src_query_default (GstAudioDecoder * dec, GstQuery * query)
3009 {
3010   GstPad *pad = GST_AUDIO_DECODER_SRC_PAD (dec);
3011   gboolean res = FALSE;
3012 
3013   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
3014 
3015   switch (GST_QUERY_TYPE (query)) {
3016     case GST_QUERY_DURATION:
3017     {
3018       GstFormat format;
3019 
3020       /* upstream in any case */
3021       if ((res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query)))
3022         break;
3023 
3024       gst_query_parse_duration (query, &format, NULL);
3025       /* try answering TIME by converting from BYTE if subclass allows  */
3026       if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) {
3027         gint64 value;
3028 
3029         if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
3030                 &value)) {
3031           GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
3032           if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value,
3033                   GST_FORMAT_TIME, &value)) {
3034             gst_query_set_duration (query, GST_FORMAT_TIME, value);
3035             res = TRUE;
3036           }
3037         }
3038       }
3039       break;
3040     }
3041     case GST_QUERY_POSITION:
3042     {
3043       GstFormat format;
3044       gint64 time, value;
3045 
3046       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
3047         GST_LOG_OBJECT (dec, "returning peer response");
3048         break;
3049       }
3050 
3051       /* Refuse BYTES format queries. If it made sense to
3052        * answer them, upstream would have already */
3053       gst_query_parse_position (query, &format, NULL);
3054 
3055       if (format == GST_FORMAT_BYTES) {
3056         GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
3057         break;
3058       }
3059 
3060       /* we start from the last seen time */
3061       time = dec->output_segment.position;
3062       /* correct for the segment values */
3063       time =
3064           gst_segment_to_stream_time (&dec->output_segment, GST_FORMAT_TIME,
3065           time);
3066 
3067       GST_LOG_OBJECT (dec,
3068           "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
3069 
3070       /* and convert to the final format */
3071       if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
3072                   format, &value)))
3073         break;
3074 
3075       gst_query_set_position (query, format, value);
3076 
3077       GST_LOG_OBJECT (dec,
3078           "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
3079           format);
3080       break;
3081     }
3082     case GST_QUERY_FORMATS:
3083     {
3084       gst_query_set_formats (query, 3,
3085           GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
3086       res = TRUE;
3087       break;
3088     }
3089     case GST_QUERY_CONVERT:
3090     {
3091       GstFormat src_fmt, dest_fmt;
3092       gint64 src_val, dest_val;
3093 
3094       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
3095       GST_OBJECT_LOCK (dec);
3096       res = gst_audio_info_convert (&dec->priv->ctx.info,
3097           src_fmt, src_val, dest_fmt, &dest_val);
3098       GST_OBJECT_UNLOCK (dec);
3099       if (!res)
3100         break;
3101       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
3102       break;
3103     }
3104     case GST_QUERY_LATENCY:
3105     {
3106       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
3107         gboolean live;
3108         GstClockTime min_latency, max_latency;
3109 
3110         gst_query_parse_latency (query, &live, &min_latency, &max_latency);
3111         GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %"
3112             GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
3113             GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
3114 
3115         GST_OBJECT_LOCK (dec);
3116         /* add our latency */
3117         min_latency += dec->priv->ctx.min_latency;
3118         if (max_latency == -1 || dec->priv->ctx.max_latency == -1)
3119           max_latency = -1;
3120         else
3121           max_latency += dec->priv->ctx.max_latency;
3122         GST_OBJECT_UNLOCK (dec);
3123 
3124         gst_query_set_latency (query, live, min_latency, max_latency);
3125       }
3126       break;
3127     }
3128     default:
3129       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
3130       break;
3131   }
3132 
3133   return res;
3134 }
3135 
3136 static gboolean
gst_audio_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)3137 gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
3138 {
3139   GstAudioDecoder *dec;
3140   GstAudioDecoderClass *dec_class;
3141   gboolean ret = FALSE;
3142 
3143   dec = GST_AUDIO_DECODER (parent);
3144   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
3145 
3146   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
3147 
3148   if (dec_class->src_query)
3149     ret = dec_class->src_query (dec, query);
3150 
3151   return ret;
3152 }
3153 
3154 static gboolean
gst_audio_decoder_stop(GstAudioDecoder * dec)3155 gst_audio_decoder_stop (GstAudioDecoder * dec)
3156 {
3157   GstAudioDecoderClass *klass;
3158   gboolean ret = TRUE;
3159 
3160   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop");
3161 
3162   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3163 
3164   if (klass->stop) {
3165     ret = klass->stop (dec);
3166   }
3167 
3168   /* clean up */
3169   gst_audio_decoder_reset (dec, TRUE);
3170 
3171   if (ret)
3172     dec->priv->active = FALSE;
3173 
3174   return ret;
3175 }
3176 
3177 static gboolean
gst_audio_decoder_start(GstAudioDecoder * dec)3178 gst_audio_decoder_start (GstAudioDecoder * dec)
3179 {
3180   GstAudioDecoderClass *klass;
3181   gboolean ret = TRUE;
3182 
3183   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start");
3184 
3185   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3186 
3187   /* arrange clean state */
3188   gst_audio_decoder_reset (dec, TRUE);
3189 
3190   if (klass->start) {
3191     ret = klass->start (dec);
3192   }
3193 
3194   if (ret)
3195     dec->priv->active = TRUE;
3196 
3197   return ret;
3198 }
3199 
3200 static void
gst_audio_decoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)3201 gst_audio_decoder_get_property (GObject * object, guint prop_id,
3202     GValue * value, GParamSpec * pspec)
3203 {
3204   GstAudioDecoder *dec;
3205 
3206   dec = GST_AUDIO_DECODER (object);
3207 
3208   switch (prop_id) {
3209     case PROP_LATENCY:
3210       g_value_set_int64 (value, dec->priv->latency);
3211       break;
3212     case PROP_TOLERANCE:
3213       g_value_set_int64 (value, dec->priv->tolerance);
3214       break;
3215     case PROP_PLC:
3216       g_value_set_boolean (value, dec->priv->plc);
3217       break;
3218     case PROP_MAX_ERRORS:
3219       g_value_set_int (value, gst_audio_decoder_get_max_errors (dec));
3220       break;
3221     default:
3222       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3223       break;
3224   }
3225 }
3226 
3227 static void
gst_audio_decoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)3228 gst_audio_decoder_set_property (GObject * object, guint prop_id,
3229     const GValue * value, GParamSpec * pspec)
3230 {
3231   GstAudioDecoder *dec;
3232 
3233   dec = GST_AUDIO_DECODER (object);
3234 
3235   switch (prop_id) {
3236     case PROP_LATENCY:
3237       dec->priv->latency = g_value_get_int64 (value);
3238       break;
3239     case PROP_TOLERANCE:
3240       dec->priv->tolerance = g_value_get_int64 (value);
3241       break;
3242     case PROP_PLC:
3243       dec->priv->plc = g_value_get_boolean (value);
3244       break;
3245     case PROP_MAX_ERRORS:
3246       gst_audio_decoder_set_max_errors (dec, g_value_get_int (value));
3247       break;
3248     default:
3249       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3250       break;
3251   }
3252 }
3253 
3254 static GstStateChangeReturn
gst_audio_decoder_change_state(GstElement * element,GstStateChange transition)3255 gst_audio_decoder_change_state (GstElement * element, GstStateChange transition)
3256 {
3257   GstAudioDecoder *codec;
3258   GstAudioDecoderClass *klass;
3259   GstStateChangeReturn ret;
3260 
3261   codec = GST_AUDIO_DECODER (element);
3262   klass = GST_AUDIO_DECODER_GET_CLASS (codec);
3263 
3264   switch (transition) {
3265     case GST_STATE_CHANGE_NULL_TO_READY:
3266       if (klass->open) {
3267         if (!klass->open (codec))
3268           goto open_failed;
3269       }
3270       break;
3271     case GST_STATE_CHANGE_READY_TO_PAUSED:
3272       if (!gst_audio_decoder_start (codec)) {
3273         goto start_failed;
3274       }
3275       break;
3276     case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
3277       break;
3278     default:
3279       break;
3280   }
3281 
3282   ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
3283 
3284   switch (transition) {
3285     case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
3286       break;
3287     case GST_STATE_CHANGE_PAUSED_TO_READY:
3288       if (!gst_audio_decoder_stop (codec)) {
3289         goto stop_failed;
3290       }
3291       break;
3292     case GST_STATE_CHANGE_READY_TO_NULL:
3293       if (klass->close) {
3294         if (!klass->close (codec))
3295           goto close_failed;
3296       }
3297       break;
3298     default:
3299       break;
3300   }
3301 
3302   return ret;
3303 
3304 start_failed:
3305   {
3306     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec"));
3307     return GST_STATE_CHANGE_FAILURE;
3308   }
3309 stop_failed:
3310   {
3311     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec"));
3312     return GST_STATE_CHANGE_FAILURE;
3313   }
3314 open_failed:
3315   {
3316     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to open codec"));
3317     return GST_STATE_CHANGE_FAILURE;
3318   }
3319 close_failed:
3320   {
3321     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to close codec"));
3322     return GST_STATE_CHANGE_FAILURE;
3323   }
3324 }
3325 
3326 GstFlowReturn
_gst_audio_decoder_error(GstAudioDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)3327 _gst_audio_decoder_error (GstAudioDecoder * dec, gint weight,
3328     GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
3329     const gchar * function, gint line)
3330 {
3331   if (txt)
3332     GST_WARNING_OBJECT (dec, "error: %s", txt);
3333   if (dbg)
3334     GST_WARNING_OBJECT (dec, "error: %s", dbg);
3335   dec->priv->error_count += weight;
3336   dec->priv->discont = TRUE;
3337   if (dec->priv->max_errors >= 0
3338       && dec->priv->max_errors < dec->priv->error_count) {
3339     gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, domain,
3340         code, txt, dbg, file, function, line);
3341     return GST_FLOW_ERROR;
3342   } else {
3343     g_free (txt);
3344     g_free (dbg);
3345     return GST_FLOW_OK;
3346   }
3347 }
3348 
3349 /**
3350  * gst_audio_decoder_get_audio_info:
3351  * @dec: a #GstAudioDecoder
3352  *
3353  * Returns: (transfer none): a #GstAudioInfo describing the input audio format
3354  */
3355 GstAudioInfo *
gst_audio_decoder_get_audio_info(GstAudioDecoder * dec)3356 gst_audio_decoder_get_audio_info (GstAudioDecoder * dec)
3357 {
3358   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL);
3359 
3360   return &dec->priv->ctx.info;
3361 }
3362 
3363 /**
3364  * gst_audio_decoder_set_plc_aware:
3365  * @dec: a #GstAudioDecoder
3366  * @plc: new plc state
3367  *
3368  * Indicates whether or not subclass handles packet loss concealment (plc).
3369  */
3370 void
gst_audio_decoder_set_plc_aware(GstAudioDecoder * dec,gboolean plc)3371 gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc)
3372 {
3373   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3374 
3375   dec->priv->ctx.do_plc = plc;
3376 }
3377 
3378 /**
3379  * gst_audio_decoder_get_plc_aware:
3380  * @dec: a #GstAudioDecoder
3381  *
3382  * Returns: currently configured plc handling
3383  */
3384 gint
gst_audio_decoder_get_plc_aware(GstAudioDecoder * dec)3385 gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec)
3386 {
3387   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3388 
3389   return dec->priv->ctx.do_plc;
3390 }
3391 
3392 /**
3393  * gst_audio_decoder_set_estimate_rate:
3394  * @dec: a #GstAudioDecoder
3395  * @enabled: whether to enable byte to time conversion
3396  *
3397  * Allows baseclass to perform byte to time estimated conversion.
3398  */
3399 void
gst_audio_decoder_set_estimate_rate(GstAudioDecoder * dec,gboolean enabled)3400 gst_audio_decoder_set_estimate_rate (GstAudioDecoder * dec, gboolean enabled)
3401 {
3402   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3403 
3404   dec->priv->ctx.do_estimate_rate = enabled;
3405 }
3406 
3407 /**
3408  * gst_audio_decoder_get_estimate_rate:
3409  * @dec: a #GstAudioDecoder
3410  *
3411  * Returns: currently configured byte to time conversion setting
3412  */
3413 gint
gst_audio_decoder_get_estimate_rate(GstAudioDecoder * dec)3414 gst_audio_decoder_get_estimate_rate (GstAudioDecoder * dec)
3415 {
3416   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3417 
3418   return dec->priv->ctx.do_estimate_rate;
3419 }
3420 
3421 /**
3422  * gst_audio_decoder_get_delay:
3423  * @dec: a #GstAudioDecoder
3424  *
3425  * Returns: currently configured decoder delay
3426  */
3427 gint
gst_audio_decoder_get_delay(GstAudioDecoder * dec)3428 gst_audio_decoder_get_delay (GstAudioDecoder * dec)
3429 {
3430   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3431 
3432   return dec->priv->ctx.delay;
3433 }
3434 
3435 /**
3436  * gst_audio_decoder_set_max_errors:
3437  * @dec: a #GstAudioDecoder
3438  * @num: max tolerated errors
3439  *
3440  * Sets numbers of tolerated decoder errors, where a tolerated one is then only
3441  * warned about, but more than tolerated will lead to fatal error. You can set
3442  * -1 for never returning fatal errors. Default is set to
3443  * GST_AUDIO_DECODER_MAX_ERRORS.
3444  */
3445 void
gst_audio_decoder_set_max_errors(GstAudioDecoder * dec,gint num)3446 gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num)
3447 {
3448   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3449 
3450   dec->priv->max_errors = num;
3451 }
3452 
3453 /**
3454  * gst_audio_decoder_get_max_errors:
3455  * @dec: a #GstAudioDecoder
3456  *
3457  * Returns: currently configured decoder tolerated error count.
3458  */
3459 gint
gst_audio_decoder_get_max_errors(GstAudioDecoder * dec)3460 gst_audio_decoder_get_max_errors (GstAudioDecoder * dec)
3461 {
3462   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3463 
3464   return dec->priv->max_errors;
3465 }
3466 
3467 /**
3468  * gst_audio_decoder_set_latency:
3469  * @dec: a #GstAudioDecoder
3470  * @min: minimum latency
3471  * @max: maximum latency
3472  *
3473  * Sets decoder latency.
3474  */
3475 void
gst_audio_decoder_set_latency(GstAudioDecoder * dec,GstClockTime min,GstClockTime max)3476 gst_audio_decoder_set_latency (GstAudioDecoder * dec,
3477     GstClockTime min, GstClockTime max)
3478 {
3479   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3480   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
3481   g_return_if_fail (min <= max);
3482 
3483   GST_OBJECT_LOCK (dec);
3484   dec->priv->ctx.min_latency = min;
3485   dec->priv->ctx.max_latency = max;
3486   GST_OBJECT_UNLOCK (dec);
3487 
3488   /* post latency message on the bus */
3489   gst_element_post_message (GST_ELEMENT (dec),
3490       gst_message_new_latency (GST_OBJECT (dec)));
3491 }
3492 
3493 /**
3494  * gst_audio_decoder_get_latency:
3495  * @dec: a #GstAudioDecoder
3496  * @min: (out) (allow-none): a pointer to storage to hold minimum latency
3497  * @max: (out) (allow-none): a pointer to storage to hold maximum latency
3498  *
3499  * Sets the variables pointed to by @min and @max to the currently configured
3500  * latency.
3501  */
3502 void
gst_audio_decoder_get_latency(GstAudioDecoder * dec,GstClockTime * min,GstClockTime * max)3503 gst_audio_decoder_get_latency (GstAudioDecoder * dec,
3504     GstClockTime * min, GstClockTime * max)
3505 {
3506   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3507 
3508   GST_OBJECT_LOCK (dec);
3509   if (min)
3510     *min = dec->priv->ctx.min_latency;
3511   if (max)
3512     *max = dec->priv->ctx.max_latency;
3513   GST_OBJECT_UNLOCK (dec);
3514 }
3515 
3516 /**
3517  * gst_audio_decoder_get_parse_state:
3518  * @dec: a #GstAudioDecoder
3519  * @sync: (out) (optional): a pointer to a variable to hold the current sync state
3520  * @eos: (out) (optional): a pointer to a variable to hold the current eos state
3521  *
3522  * Return current parsing (sync and eos) state.
3523  */
3524 void
gst_audio_decoder_get_parse_state(GstAudioDecoder * dec,gboolean * sync,gboolean * eos)3525 gst_audio_decoder_get_parse_state (GstAudioDecoder * dec,
3526     gboolean * sync, gboolean * eos)
3527 {
3528   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3529 
3530   if (sync)
3531     *sync = dec->priv->ctx.sync;
3532   if (eos)
3533     *eos = dec->priv->ctx.eos;
3534 }
3535 
3536 /**
3537  * gst_audio_decoder_set_allocation_caps:
3538  * @dec: a #GstAudioDecoder
3539  * @allocation_caps: (allow-none): a #GstCaps or %NULL
3540  *
3541  * Sets a caps in allocation query which are different from the set
3542  * pad's caps. Use this function before calling
3543  * gst_audio_decoder_negotiate(). Setting to %NULL the allocation
3544  * query will use the caps from the pad.
3545  *
3546  * Since: 1.10
3547  */
3548 void
gst_audio_decoder_set_allocation_caps(GstAudioDecoder * dec,GstCaps * allocation_caps)3549 gst_audio_decoder_set_allocation_caps (GstAudioDecoder * dec,
3550     GstCaps * allocation_caps)
3551 {
3552   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3553 
3554   gst_caps_replace (&dec->priv->ctx.allocation_caps, allocation_caps);
3555 }
3556 
3557 /**
3558  * gst_audio_decoder_set_plc:
3559  * @dec: a #GstAudioDecoder
3560  * @enabled: new state
3561  *
3562  * Enable or disable decoder packet loss concealment, provided subclass
3563  * and codec are capable and allow handling plc.
3564  *
3565  * MT safe.
3566  */
3567 void
gst_audio_decoder_set_plc(GstAudioDecoder * dec,gboolean enabled)3568 gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled)
3569 {
3570   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3571 
3572   GST_LOG_OBJECT (dec, "enabled: %d", enabled);
3573 
3574   GST_OBJECT_LOCK (dec);
3575   dec->priv->plc = enabled;
3576   GST_OBJECT_UNLOCK (dec);
3577 }
3578 
3579 /**
3580  * gst_audio_decoder_get_plc:
3581  * @dec: a #GstAudioDecoder
3582  *
3583  * Queries decoder packet loss concealment handling.
3584  *
3585  * Returns: TRUE if packet loss concealment is enabled.
3586  *
3587  * MT safe.
3588  */
3589 gboolean
gst_audio_decoder_get_plc(GstAudioDecoder * dec)3590 gst_audio_decoder_get_plc (GstAudioDecoder * dec)
3591 {
3592   gboolean result;
3593 
3594   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3595 
3596   GST_OBJECT_LOCK (dec);
3597   result = dec->priv->plc;
3598   GST_OBJECT_UNLOCK (dec);
3599 
3600   return result;
3601 }
3602 
3603 /**
3604  * gst_audio_decoder_set_min_latency:
3605  * @dec: a #GstAudioDecoder
3606  * @num: new minimum latency
3607  *
3608  * Sets decoder minimum aggregation latency.
3609  *
3610  * MT safe.
3611  */
3612 void
gst_audio_decoder_set_min_latency(GstAudioDecoder * dec,GstClockTime num)3613 gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, GstClockTime num)
3614 {
3615   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3616   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (num));
3617 
3618   GST_OBJECT_LOCK (dec);
3619   dec->priv->latency = num;
3620   GST_OBJECT_UNLOCK (dec);
3621 }
3622 
3623 /**
3624  * gst_audio_decoder_get_min_latency:
3625  * @dec: a #GstAudioDecoder
3626  *
3627  * Queries decoder's latency aggregation.
3628  *
3629  * Returns: aggregation latency.
3630  *
3631  * MT safe.
3632  */
3633 GstClockTime
gst_audio_decoder_get_min_latency(GstAudioDecoder * dec)3634 gst_audio_decoder_get_min_latency (GstAudioDecoder * dec)
3635 {
3636   GstClockTime result;
3637 
3638   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3639 
3640   GST_OBJECT_LOCK (dec);
3641   result = dec->priv->latency;
3642   GST_OBJECT_UNLOCK (dec);
3643 
3644   return result;
3645 }
3646 
3647 /**
3648  * gst_audio_decoder_set_tolerance:
3649  * @dec: a #GstAudioDecoder
3650  * @tolerance: new tolerance
3651  *
3652  * Configures decoder audio jitter tolerance threshold.
3653  *
3654  * MT safe.
3655  */
3656 void
gst_audio_decoder_set_tolerance(GstAudioDecoder * dec,GstClockTime tolerance)3657 gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, GstClockTime tolerance)
3658 {
3659   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3660   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (tolerance));
3661 
3662   GST_OBJECT_LOCK (dec);
3663   dec->priv->tolerance = tolerance;
3664   GST_OBJECT_UNLOCK (dec);
3665 }
3666 
3667 /**
3668  * gst_audio_decoder_get_tolerance:
3669  * @dec: a #GstAudioDecoder
3670  *
3671  * Queries current audio jitter tolerance threshold.
3672  *
3673  * Returns: decoder audio jitter tolerance threshold.
3674  *
3675  * MT safe.
3676  */
3677 GstClockTime
gst_audio_decoder_get_tolerance(GstAudioDecoder * dec)3678 gst_audio_decoder_get_tolerance (GstAudioDecoder * dec)
3679 {
3680   GstClockTime result;
3681 
3682   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3683 
3684   GST_OBJECT_LOCK (dec);
3685   result = dec->priv->tolerance;
3686   GST_OBJECT_UNLOCK (dec);
3687 
3688   return result;
3689 }
3690 
3691 /**
3692  * gst_audio_decoder_set_drainable:
3693  * @dec: a #GstAudioDecoder
3694  * @enabled: new state
3695  *
3696  * Configures decoder drain handling.  If drainable, subclass might
3697  * be handed a NULL buffer to have it return any leftover decoded data.
3698  * Otherwise, it is not considered so capable and will only ever be passed
3699  * real data.
3700  *
3701  * MT safe.
3702  */
3703 void
gst_audio_decoder_set_drainable(GstAudioDecoder * dec,gboolean enabled)3704 gst_audio_decoder_set_drainable (GstAudioDecoder * dec, gboolean enabled)
3705 {
3706   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3707 
3708   GST_OBJECT_LOCK (dec);
3709   dec->priv->drainable = enabled;
3710   GST_OBJECT_UNLOCK (dec);
3711 }
3712 
3713 /**
3714  * gst_audio_decoder_get_drainable:
3715  * @dec: a #GstAudioDecoder
3716  *
3717  * Queries decoder drain handling.
3718  *
3719  * Returns: TRUE if drainable handling is enabled.
3720  *
3721  * MT safe.
3722  */
3723 gboolean
gst_audio_decoder_get_drainable(GstAudioDecoder * dec)3724 gst_audio_decoder_get_drainable (GstAudioDecoder * dec)
3725 {
3726   gboolean result;
3727 
3728   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3729 
3730   GST_OBJECT_LOCK (dec);
3731   result = dec->priv->drainable;
3732   GST_OBJECT_UNLOCK (dec);
3733 
3734   return result;
3735 }
3736 
3737 /**
3738  * gst_audio_decoder_set_needs_format:
3739  * @dec: a #GstAudioDecoder
3740  * @enabled: new state
3741  *
3742  * Configures decoder format needs.  If enabled, subclass needs to be
3743  * negotiated with format caps before it can process any data.  It will then
3744  * never be handed any data before it has been configured.
3745  * Otherwise, it might be handed data without having been configured and
3746  * is then expected being able to do so either by default
3747  * or based on the input data.
3748  *
3749  * MT safe.
3750  */
3751 void
gst_audio_decoder_set_needs_format(GstAudioDecoder * dec,gboolean enabled)3752 gst_audio_decoder_set_needs_format (GstAudioDecoder * dec, gboolean enabled)
3753 {
3754   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3755 
3756   GST_OBJECT_LOCK (dec);
3757   dec->priv->needs_format = enabled;
3758   GST_OBJECT_UNLOCK (dec);
3759 }
3760 
3761 /**
3762  * gst_audio_decoder_get_needs_format:
3763  * @dec: a #GstAudioDecoder
3764  *
3765  * Queries decoder required format handling.
3766  *
3767  * Returns: TRUE if required format handling is enabled.
3768  *
3769  * MT safe.
3770  */
3771 gboolean
gst_audio_decoder_get_needs_format(GstAudioDecoder * dec)3772 gst_audio_decoder_get_needs_format (GstAudioDecoder * dec)
3773 {
3774   gboolean result;
3775 
3776   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3777 
3778   GST_OBJECT_LOCK (dec);
3779   result = dec->priv->needs_format;
3780   GST_OBJECT_UNLOCK (dec);
3781 
3782   return result;
3783 }
3784 
3785 /**
3786  * gst_audio_decoder_merge_tags:
3787  * @dec: a #GstAudioDecoder
3788  * @tags: (allow-none): a #GstTagList to merge, or NULL
3789  * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
3790  *
3791  * Sets the audio decoder tags and how they should be merged with any
3792  * upstream stream tags. This will override any tags previously-set
3793  * with gst_audio_decoder_merge_tags().
3794  *
3795  * Note that this is provided for convenience, and the subclass is
3796  * not required to use this and can still do tag handling on its own.
3797  */
3798 void
gst_audio_decoder_merge_tags(GstAudioDecoder * dec,const GstTagList * tags,GstTagMergeMode mode)3799 gst_audio_decoder_merge_tags (GstAudioDecoder * dec,
3800     const GstTagList * tags, GstTagMergeMode mode)
3801 {
3802   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3803   g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
3804   g_return_if_fail (mode != GST_TAG_MERGE_UNDEFINED);
3805 
3806   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3807   if (dec->priv->taglist != tags) {
3808     if (dec->priv->taglist) {
3809       gst_tag_list_unref (dec->priv->taglist);
3810       dec->priv->taglist = NULL;
3811       dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
3812     }
3813     if (tags) {
3814       dec->priv->taglist = gst_tag_list_ref ((GstTagList *) tags);
3815       dec->priv->decoder_tags_merge_mode = mode;
3816     }
3817 
3818     GST_DEBUG_OBJECT (dec, "setting decoder tags to %" GST_PTR_FORMAT, tags);
3819     dec->priv->taglist_changed = TRUE;
3820   }
3821   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3822 }
3823 
3824 /**
3825  * gst_audio_decoder_allocate_output_buffer:
3826  * @dec: a #GstAudioDecoder
3827  * @size: size of the buffer
3828  *
3829  * Helper function that allocates a buffer to hold an audio frame
3830  * for @dec's current output format.
3831  *
3832  * Returns: (transfer full): allocated buffer
3833  */
3834 GstBuffer *
gst_audio_decoder_allocate_output_buffer(GstAudioDecoder * dec,gsize size)3835 gst_audio_decoder_allocate_output_buffer (GstAudioDecoder * dec, gsize size)
3836 {
3837   GstBuffer *buffer = NULL;
3838   gboolean needs_reconfigure = FALSE;
3839 
3840   g_return_val_if_fail (size > 0, NULL);
3841 
3842   GST_DEBUG ("alloc src buffer");
3843 
3844   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3845 
3846   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
3847   if (G_UNLIKELY (dec->priv->ctx.output_format_changed ||
3848           (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)
3849               && needs_reconfigure))) {
3850     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
3851       GST_INFO_OBJECT (dec, "Failed to negotiate, fallback allocation");
3852       gst_pad_mark_reconfigure (dec->srcpad);
3853       goto fallback;
3854     }
3855   }
3856 
3857   buffer =
3858       gst_buffer_new_allocate (dec->priv->ctx.allocator, size,
3859       &dec->priv->ctx.params);
3860   if (!buffer) {
3861     GST_INFO_OBJECT (dec, "couldn't allocate output buffer");
3862     goto fallback;
3863   }
3864 
3865   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3866 
3867   return buffer;
3868 fallback:
3869   buffer = gst_buffer_new_allocate (NULL, size, NULL);
3870   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3871 
3872   return buffer;
3873 }
3874 
3875 /**
3876  * gst_audio_decoder_get_allocator:
3877  * @dec: a #GstAudioDecoder
3878  * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
3879  * used
3880  * @params: (out) (allow-none) (transfer full): the
3881  * #GstAllocationParams of @allocator
3882  *
3883  * Lets #GstAudioDecoder sub-classes to know the memory @allocator
3884  * used by the base class and its @params.
3885  *
3886  * Unref the @allocator after use it.
3887  */
3888 void
gst_audio_decoder_get_allocator(GstAudioDecoder * dec,GstAllocator ** allocator,GstAllocationParams * params)3889 gst_audio_decoder_get_allocator (GstAudioDecoder * dec,
3890     GstAllocator ** allocator, GstAllocationParams * params)
3891 {
3892   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3893 
3894   if (allocator)
3895     *allocator = dec->priv->ctx.allocator ?
3896         gst_object_ref (dec->priv->ctx.allocator) : NULL;
3897 
3898   if (params)
3899     *params = dec->priv->ctx.params;
3900 }
3901 
3902 /**
3903  * gst_audio_decoder_set_use_default_pad_acceptcaps:
3904  * @decoder: a #GstAudioDecoder
3905  * @use: if the default pad accept-caps query handling should be used
3906  *
3907  * Lets #GstAudioDecoder sub-classes decide if they want the sink pad
3908  * to use the default pad query handler to reply to accept-caps queries.
3909  *
3910  * By setting this to true it is possible to further customize the default
3911  * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
3912  * %GST_PAD_SET_ACCEPT_TEMPLATE
3913  *
3914  * Since: 1.6
3915  */
3916 void
gst_audio_decoder_set_use_default_pad_acceptcaps(GstAudioDecoder * decoder,gboolean use)3917 gst_audio_decoder_set_use_default_pad_acceptcaps (GstAudioDecoder * decoder,
3918     gboolean use)
3919 {
3920   decoder->priv->use_default_pad_acceptcaps = use;
3921 }
3922