• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer
2  * Copyright (C) 2009 Igalia S.L.
3  * Author: Iago Toral Quiroga <itoral@igalia.com>
4  * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
5  * Copyright (C) 2011 Nokia Corporation. All rights reserved.
6  *   Contact: Stefan Kost <stefan.kost@nokia.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Library General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 /**
25  * SECTION:gstaudiodecoder
26  * @title: GstAudioDecoder
27  * @short_description: Base class for audio decoders
28  * @see_also: #GstBaseTransform
29  *
30  * This base class is for audio decoders turning encoded data into
31  * raw audio samples.
32  *
33  * GstAudioDecoder and subclass should cooperate as follows.
34  *
35  * ## Configuration
36  *
37  *   * Initially, GstAudioDecoder calls @start when the decoder element
38  *     is activated, which allows subclass to perform any global setup.
39  *     Base class (context) parameters can already be set according to subclass
40  *     capabilities (or possibly upon receive more information in subsequent
41  *     @set_format).
42  *   * GstAudioDecoder calls @set_format to inform subclass of the format
43  *     of input audio data that it is about to receive.
44  *     While unlikely, it might be called more than once, if changing input
45  *     parameters require reconfiguration.
46  *   * GstAudioDecoder calls @stop at end of all processing.
47  *
48  * As of configuration stage, and throughout processing, GstAudioDecoder
49  * provides various (context) parameters, e.g. describing the format of
50  * output audio data (valid when output caps have been set) or current parsing state.
51  * Conversely, subclass can and should configure context to inform
52  * base class of its expectation w.r.t. buffer handling.
53  *
54  * ## Data processing
55  *     * Base class gathers input data, and optionally allows subclass
56  *       to parse this into subsequently manageable (as defined by subclass)
57  *       chunks.  Such chunks are subsequently referred to as 'frames',
58  *       though they may or may not correspond to 1 (or more) audio format frame.
59  *     * Input frame is provided to subclass' @handle_frame.
60  *     * If codec processing results in decoded data, subclass should call
61  *       @gst_audio_decoder_finish_frame to have decoded data pushed
62  *       downstream.
63  *     * Just prior to actually pushing a buffer downstream,
64  *       it is passed to @pre_push.  Subclass should either use this callback
65  *       to arrange for additional downstream pushing or otherwise ensure such
66  *       custom pushing occurs after at least a method call has finished since
67  *       setting src pad caps.
68  *     * During the parsing process GstAudioDecoderClass will handle both
69  *       srcpad and sinkpad events. Sink events will be passed to subclass
70  *       if @event callback has been provided.
71  *
72  * ## Shutdown phase
73  *
74  *   * GstAudioDecoder class calls @stop to inform the subclass that data
75  *     parsing will be stopped.
76  *
77  * Subclass is responsible for providing pad template caps for
78  * source and sink pads. The pads need to be named "sink" and "src". It also
79  * needs to set the fixed caps on srcpad, when the format is ensured.  This
80  * is typically when base class calls subclass' @set_format function, though
81  * it might be delayed until calling @gst_audio_decoder_finish_frame.
82  *
83  * In summary, above process should have subclass concentrating on
84  * codec data processing while leaving other matters to base class,
85  * such as most notably timestamp handling.  While it may exert more control
86  * in this area (see e.g. @pre_push), it is very much not recommended.
87  *
88  * In particular, base class will try to arrange for perfect output timestamps
89  * as much as possible while tracking upstream timestamps.
90  * To this end, if deviation between the next ideal expected perfect timestamp
91  * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream
92  * occurs (which would happen always if the tolerance mechanism is disabled).
93  *
94  * In non-live pipelines, baseclass can also (configurably) arrange for
95  * output buffer aggregation which may help to redue large(r) numbers of
96  * small(er) buffers being pushed and processed downstream. Note that this
97  * feature is only available if the buffer layout is interleaved. For planar
98  * buffers, the decoder implementation is fully responsible for the output
99  * buffer size.
100  *
101  * On the other hand, it should be noted that baseclass only provides limited
102  * seeking support (upon explicit subclass request), as full-fledged support
103  * should rather be left to upstream demuxer, parser or alike.  This simple
104  * approach caters for seeking and duration reporting using estimated input
105  * bitrates.
106  *
107  * Things that subclass need to take care of:
108  *
109  *   * Provide pad templates
110  *   * Set source pad caps when appropriate
111  *   * Set user-configurable properties to sane defaults for format and
112  *      implementing codec at hand, and convey some subclass capabilities and
113  *      expectations in context.
114  *
115  *   * Accept data in @handle_frame and provide encoded results to
116  *      @gst_audio_decoder_finish_frame.  If it is prepared to perform
117  *      PLC, it should also accept NULL data in @handle_frame and provide for
118  *      data for indicated duration.
119  *
120  */
121 
122 #ifdef HAVE_CONFIG_H
123 #include "config.h"
124 #endif
125 
126 #include "gstaudiodecoder.h"
127 #include "gstaudioutilsprivate.h"
128 #include <gst/pbutils/descriptions.h>
129 
130 #include <string.h>
131 
132 GST_DEBUG_CATEGORY (audiodecoder_debug);
133 #define GST_CAT_DEFAULT audiodecoder_debug
134 
135 enum
136 {
137   LAST_SIGNAL
138 };
139 
140 enum
141 {
142   PROP_0,
143   PROP_LATENCY,
144   PROP_TOLERANCE,
145   PROP_PLC,
146   PROP_MAX_ERRORS
147 };
148 
149 #define DEFAULT_LATENCY    0
150 #define DEFAULT_TOLERANCE  0
151 #define DEFAULT_PLC        FALSE
152 #define DEFAULT_DRAINABLE  TRUE
153 #define DEFAULT_NEEDS_FORMAT  FALSE
154 #define DEFAULT_MAX_ERRORS GST_AUDIO_DECODER_MAX_ERRORS
155 
156 typedef struct _GstAudioDecoderContext
157 {
158   /* last negotiated input caps */
159   GstCaps *input_caps;
160 
161   /* (output) audio format */
162   GstAudioInfo info;
163   GstCaps *caps;
164   gboolean output_format_changed;
165 
166   /* parsing state */
167   gboolean eos;
168   gboolean sync;
169 
170   gboolean had_output_data;
171   gboolean had_input_data;
172 
173   /* misc */
174   gint delay;
175 
176   /* output */
177   gboolean do_plc;
178   gboolean do_estimate_rate;
179   GstCaps *allocation_caps;
180   /* MT-protected (with LOCK) */
181   GstClockTime min_latency;
182   GstClockTime max_latency;
183 
184   GstAllocator *allocator;
185   GstAllocationParams params;
186 } GstAudioDecoderContext;
187 
188 struct _GstAudioDecoderPrivate
189 {
190   /* activation status */
191   gboolean active;
192 
193   /* input base/first ts as basis for output ts */
194   GstClockTime base_ts;
195   /* input samples processed and sent downstream so far (w.r.t. base_ts) */
196   guint64 samples;
197 
198   /* collected input data */
199   GstAdapter *adapter;
200   /* tracking input ts for changes */
201   GstClockTime prev_ts;
202   guint64 prev_distance;
203   /* frames obtained from input */
204   GQueue frames;
205   /* collected output data */
206   GstAdapter *adapter_out;
207   /* ts and duration for output data collected above */
208   GstClockTime out_ts, out_dur;
209   /* mark outgoing discont */
210   gboolean discont;
211 
212   /* subclass gave all it could already */
213   gboolean drained;
214   /* subclass currently being forcibly drained */
215   gboolean force;
216   /* input_segment are output_segment identical */
217   gboolean in_out_segment_sync;
218   /* TRUE if we have an active set of instant rate flags */
219   gboolean decode_flags_override;
220   GstSegmentFlags decode_flags;
221 
222   /* expecting the buffer with DISCONT flag */
223   gboolean expecting_discont_buf;
224 
225   /* number of samples pushed out via _finish_subframe(), resets on _finish_frame() */
226   guint subframe_samples;
227 
228   /* input bps estimatation */
229   /* global in bytes seen */
230   guint64 bytes_in;
231   /* global samples sent out */
232   guint64 samples_out;
233   /* bytes flushed during parsing */
234   guint sync_flush;
235   /* error count */
236   gint error_count;
237   /* max errors */
238   gint max_errors;
239 
240   /* upstream stream tags (global tags are passed through as-is) */
241   GstTagList *upstream_tags;
242 
243   /* subclass tags */
244   GstTagList *taglist;          /* FIXME: rename to decoder_tags */
245   GstTagMergeMode decoder_tags_merge_mode;
246 
247   gboolean taglist_changed;     /* FIXME: rename to tags_changed */
248 
249   /* whether circumstances allow output aggregation */
250   gint agg;
251 
252   /* reverse playback queues */
253   /* collect input */
254   GList *gather;
255   /* to-be-decoded */
256   GList *decode;
257   /* reversed output */
258   GList *queued;
259 
260   /* context storage */
261   GstAudioDecoderContext ctx;
262 
263   /* properties */
264   GstClockTime latency;
265   GstClockTime tolerance;
266   gboolean plc;
267   gboolean drainable;
268   gboolean needs_format;
269 
270   /* pending serialized sink events, will be sent from finish_frame() */
271   GList *pending_events;
272 
273   /* flags */
274   gboolean use_default_pad_acceptcaps;
275 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
276   gboolean has_recv_first_frame;
277   gboolean has_push_first_frame;
278 #endif
279 };
280 
281 /* cached quark to avoid contention on the global quark table lock */
282 #define META_TAG_AUDIO meta_tag_audio_quark
283 static GQuark meta_tag_audio_quark;
284 
285 static void gst_audio_decoder_finalize (GObject * object);
286 static void gst_audio_decoder_set_property (GObject * object,
287     guint prop_id, const GValue * value, GParamSpec * pspec);
288 static void gst_audio_decoder_get_property (GObject * object,
289     guint prop_id, GValue * value, GParamSpec * pspec);
290 
291 static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec);
292 static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder *
293     dec, GstBuffer * buf);
294 
295 static GstStateChangeReturn gst_audio_decoder_change_state (GstElement *
296     element, GstStateChange transition);
297 static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec,
298     GstEvent * event);
299 static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec,
300     GstEvent * event);
301 static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
302     GstEvent * event);
303 static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent,
304     GstEvent * event);
305 static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec,
306     GstCaps * caps);
307 static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent,
308     GstBuffer * buf);
309 static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent,
310     GstQuery * query);
311 static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
312     GstQuery * query);
313 static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full);
314 
315 static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder *
316     dec, GstQuery * query);
317 static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder *
318     dec, GstQuery * query);
319 static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec);
320 static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec);
321 static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec,
322     GstEvent * event);
323 static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec,
324     GstQuery * query);
325 static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec,
326     GstQuery * query);
327 
328 static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder *
329     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
330 
331 static GstFlowReturn
332 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
333     GstBuffer * buf, gint frames);
334 
335 static GstElementClass *parent_class = NULL;
336 static gint private_offset = 0;
337 
338 static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass);
339 static void gst_audio_decoder_init (GstAudioDecoder * dec,
340     GstAudioDecoderClass * klass);
341 
342 GType
gst_audio_decoder_get_type(void)343 gst_audio_decoder_get_type (void)
344 {
345   static gsize audio_decoder_type = 0;
346 
347   if (g_once_init_enter (&audio_decoder_type)) {
348     GType _type;
349     static const GTypeInfo audio_decoder_info = {
350       sizeof (GstAudioDecoderClass),
351       NULL,
352       NULL,
353       (GClassInitFunc) gst_audio_decoder_class_init,
354       NULL,
355       NULL,
356       sizeof (GstAudioDecoder),
357       0,
358       (GInstanceInitFunc) gst_audio_decoder_init,
359     };
360 
361     _type = g_type_register_static (GST_TYPE_ELEMENT,
362         "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT);
363 
364     private_offset =
365         g_type_add_instance_private (_type, sizeof (GstAudioDecoderPrivate));
366 
367     g_once_init_leave (&audio_decoder_type, _type);
368   }
369   return audio_decoder_type;
370 }
371 
372 static inline GstAudioDecoderPrivate *
gst_audio_decoder_get_instance_private(GstAudioDecoder * self)373 gst_audio_decoder_get_instance_private (GstAudioDecoder * self)
374 {
375   return (G_STRUCT_MEMBER_P (self, private_offset));
376 }
377 
378 static void
gst_audio_decoder_class_init(GstAudioDecoderClass * klass)379 gst_audio_decoder_class_init (GstAudioDecoderClass * klass)
380 {
381   GObjectClass *gobject_class;
382   GstElementClass *element_class;
383   GstAudioDecoderClass *audiodecoder_class;
384 
385   gobject_class = G_OBJECT_CLASS (klass);
386   element_class = GST_ELEMENT_CLASS (klass);
387   audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
388 
389   parent_class = g_type_class_peek_parent (klass);
390 
391   if (private_offset != 0)
392     g_type_class_adjust_private_offset (klass, &private_offset);
393 
394   GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0,
395       "audio decoder base class");
396 
397   gobject_class->set_property = gst_audio_decoder_set_property;
398   gobject_class->get_property = gst_audio_decoder_get_property;
399   gobject_class->finalize = gst_audio_decoder_finalize;
400 
401   element_class->change_state =
402       GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state);
403 
404   /* Properties */
405   g_object_class_install_property (gobject_class, PROP_LATENCY,
406       g_param_spec_int64 ("min-latency", "Minimum Latency",
407           "Aggregate output data to a minimum of latency time (ns)",
408           0, G_MAXINT64, DEFAULT_LATENCY,
409           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
410 
411   g_object_class_install_property (gobject_class, PROP_TOLERANCE,
412       g_param_spec_int64 ("tolerance", "Tolerance",
413           "Perfect ts while timestamp jitter/imperfection within tolerance (ns)",
414           0, G_MAXINT64, DEFAULT_TOLERANCE,
415           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
416 
417   g_object_class_install_property (gobject_class, PROP_PLC,
418       g_param_spec_boolean ("plc", "Packet Loss Concealment",
419           "Perform packet loss concealment (if supported)",
420           DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
421 
422   /**
423    * GstAudioDecoder:max-errors:
424    *
425    * Maximum number of tolerated consecutive decode errors. See
426    * gst_audio_decoder_set_max_errors() for more details.
427    *
428    * Since: 1.18
429    */
430   g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
431       g_param_spec_int ("max-errors", "Max errors",
432           "Max consecutive decoder errors before returning flow error",
433           -1, G_MAXINT, DEFAULT_MAX_ERRORS,
434           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
435 
436   audiodecoder_class->sink_event =
437       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc);
438   audiodecoder_class->src_event =
439       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc);
440   audiodecoder_class->propose_allocation =
441       GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default);
442   audiodecoder_class->decide_allocation =
443       GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default);
444   audiodecoder_class->negotiate =
445       GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default);
446   audiodecoder_class->sink_query =
447       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default);
448   audiodecoder_class->src_query =
449       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default);
450   audiodecoder_class->transform_meta =
451       GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default);
452 
453   meta_tag_audio_quark = g_quark_from_static_string (GST_META_TAG_AUDIO_STR);
454 }
455 
456 static void
gst_audio_decoder_init(GstAudioDecoder * dec,GstAudioDecoderClass * klass)457 gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass)
458 {
459   GstPadTemplate *pad_template;
460 
461   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init");
462 
463   dec->priv = gst_audio_decoder_get_instance_private (dec);
464 
465   /* Setup sink pad */
466   pad_template =
467       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
468   g_return_if_fail (pad_template != NULL);
469 
470   dec->sinkpad = gst_pad_new_from_template (pad_template, "sink");
471   gst_pad_set_event_function (dec->sinkpad,
472       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event));
473   gst_pad_set_chain_function (dec->sinkpad,
474       GST_DEBUG_FUNCPTR (gst_audio_decoder_chain));
475   gst_pad_set_query_function (dec->sinkpad,
476       GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query));
477   gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
478   GST_DEBUG_OBJECT (dec, "sinkpad created");
479 
480   /* Setup source pad */
481   pad_template =
482       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
483   g_return_if_fail (pad_template != NULL);
484 
485   dec->srcpad = gst_pad_new_from_template (pad_template, "src");
486   gst_pad_set_event_function (dec->srcpad,
487       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event));
488   gst_pad_set_query_function (dec->srcpad,
489       GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query));
490   gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
491   GST_DEBUG_OBJECT (dec, "srcpad created");
492 
493   dec->priv->adapter = gst_adapter_new ();
494   dec->priv->adapter_out = gst_adapter_new ();
495   g_queue_init (&dec->priv->frames);
496 
497   g_rec_mutex_init (&dec->stream_lock);
498 
499   /* property default */
500   dec->priv->latency = DEFAULT_LATENCY;
501   dec->priv->tolerance = DEFAULT_TOLERANCE;
502   dec->priv->plc = DEFAULT_PLC;
503   dec->priv->drainable = DEFAULT_DRAINABLE;
504   dec->priv->needs_format = DEFAULT_NEEDS_FORMAT;
505   dec->priv->max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
506 
507   /* init state */
508   dec->priv->ctx.min_latency = 0;
509   dec->priv->ctx.max_latency = 0;
510   gst_audio_decoder_reset (dec, TRUE);
511   GST_DEBUG_OBJECT (dec, "init ok");
512 }
513 
514 static void
gst_audio_decoder_reset(GstAudioDecoder * dec,gboolean full)515 gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
516 {
517   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
518 
519   GST_AUDIO_DECODER_STREAM_LOCK (dec);
520 
521   if (full) {
522     dec->priv->active = FALSE;
523     GST_OBJECT_LOCK (dec);
524     dec->priv->bytes_in = 0;
525     dec->priv->samples_out = 0;
526     GST_OBJECT_UNLOCK (dec);
527     dec->priv->agg = -1;
528     dec->priv->error_count = 0;
529     gst_audio_decoder_clear_queues (dec);
530 
531     if (dec->priv->taglist) {
532       gst_tag_list_unref (dec->priv->taglist);
533       dec->priv->taglist = NULL;
534     }
535     dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
536     if (dec->priv->upstream_tags) {
537       gst_tag_list_unref (dec->priv->upstream_tags);
538       dec->priv->upstream_tags = NULL;
539     }
540     dec->priv->taglist_changed = FALSE;
541 
542     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
543     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
544     dec->priv->in_out_segment_sync = TRUE;
545 
546     g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
547     g_list_free (dec->priv->pending_events);
548     dec->priv->pending_events = NULL;
549 
550     if (dec->priv->ctx.allocator)
551       gst_object_unref (dec->priv->ctx.allocator);
552 
553     GST_OBJECT_LOCK (dec);
554     dec->priv->decode_flags_override = FALSE;
555     gst_caps_replace (&dec->priv->ctx.input_caps, NULL);
556     gst_caps_replace (&dec->priv->ctx.caps, NULL);
557     gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL);
558 
559     memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));
560 
561     gst_audio_info_init (&dec->priv->ctx.info);
562     GST_OBJECT_UNLOCK (dec);
563     dec->priv->ctx.had_output_data = FALSE;
564     dec->priv->ctx.had_input_data = FALSE;
565   }
566 
567   g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
568   g_queue_clear (&dec->priv->frames);
569   gst_adapter_clear (dec->priv->adapter);
570   gst_adapter_clear (dec->priv->adapter_out);
571   dec->priv->out_ts = GST_CLOCK_TIME_NONE;
572   dec->priv->out_dur = 0;
573   dec->priv->prev_ts = GST_CLOCK_TIME_NONE;
574   dec->priv->prev_distance = 0;
575   dec->priv->drained = TRUE;
576   dec->priv->base_ts = GST_CLOCK_TIME_NONE;
577   dec->priv->samples = 0;
578   dec->priv->discont = TRUE;
579   dec->priv->sync_flush = FALSE;
580 
581   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
582 }
583 
584 static void
gst_audio_decoder_finalize(GObject * object)585 gst_audio_decoder_finalize (GObject * object)
586 {
587   GstAudioDecoder *dec;
588 
589   g_return_if_fail (GST_IS_AUDIO_DECODER (object));
590   dec = GST_AUDIO_DECODER (object);
591 
592   if (dec->priv->adapter) {
593     g_object_unref (dec->priv->adapter);
594   }
595   if (dec->priv->adapter_out) {
596     g_object_unref (dec->priv->adapter_out);
597   }
598 
599   g_rec_mutex_clear (&dec->stream_lock);
600 
601   G_OBJECT_CLASS (parent_class)->finalize (object);
602 }
603 
604 static GstEvent *
gst_audio_decoder_create_merged_tags_event(GstAudioDecoder * dec)605 gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec)
606 {
607   GstTagList *merged_tags;
608 
609   GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
610   GST_LOG_OBJECT (dec, "decoder  : %" GST_PTR_FORMAT, dec->priv->taglist);
611   GST_LOG_OBJECT (dec, "mode     : %d", dec->priv->decoder_tags_merge_mode);
612 
613   merged_tags =
614       gst_tag_list_merge (dec->priv->upstream_tags,
615       dec->priv->taglist, dec->priv->decoder_tags_merge_mode);
616 
617   GST_DEBUG_OBJECT (dec, "merged   : %" GST_PTR_FORMAT, merged_tags);
618 
619   if (merged_tags == NULL)
620     return NULL;
621 
622   if (gst_tag_list_is_empty (merged_tags)) {
623     gst_tag_list_unref (merged_tags);
624     return NULL;
625   }
626 
627   return gst_event_new_tag (merged_tags);
628 }
629 
630 static gboolean
gst_audio_decoder_push_event(GstAudioDecoder * dec,GstEvent * event)631 gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event)
632 {
633   switch (GST_EVENT_TYPE (event)) {
634     case GST_EVENT_SEGMENT:{
635       GstSegment seg;
636 
637       GST_AUDIO_DECODER_STREAM_LOCK (dec);
638       gst_event_copy_segment (event, &seg);
639 
640       GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
641 
642       dec->output_segment = seg;
643       dec->priv->in_out_segment_sync =
644           gst_segment_is_equal (&dec->input_segment, &seg);
645       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
646       break;
647     }
648     default:
649       break;
650   }
651 
652   return gst_pad_push_event (dec->srcpad, event);
653 }
654 
655 static gboolean
gst_audio_decoder_negotiate_default(GstAudioDecoder * dec)656 gst_audio_decoder_negotiate_default (GstAudioDecoder * dec)
657 {
658   GstAudioDecoderClass *klass;
659   gboolean res = TRUE;
660   GstCaps *caps;
661   GstCaps *prevcaps;
662   GstQuery *query = NULL;
663   GstAllocator *allocator;
664   GstAllocationParams params;
665 
666   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
667   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE);
668   g_return_val_if_fail (GST_IS_CAPS (dec->priv->ctx.caps), FALSE);
669 
670   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
671 
672   caps = dec->priv->ctx.caps;
673   if (dec->priv->ctx.allocation_caps == NULL)
674     dec->priv->ctx.allocation_caps = gst_caps_ref (caps);
675 
676   GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);
677 
678   if (dec->priv->pending_events) {
679     GList **pending_events, *l;
680 
681     pending_events = &dec->priv->pending_events;
682 
683     GST_DEBUG_OBJECT (dec, "Pushing pending events");
684     for (l = *pending_events; l;) {
685       GstEvent *event = GST_EVENT (l->data);
686       GList *tmp;
687 
688       if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
689         gst_audio_decoder_push_event (dec, l->data);
690         tmp = l;
691         l = l->next;
692         *pending_events = g_list_delete_link (*pending_events, tmp);
693       } else {
694         l = l->next;
695       }
696     }
697   }
698 
699   prevcaps = gst_pad_get_current_caps (dec->srcpad);
700   if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
701     res = gst_pad_set_caps (dec->srcpad, caps);
702   if (prevcaps)
703     gst_caps_unref (prevcaps);
704 
705   if (!res)
706     goto done;
707   dec->priv->ctx.output_format_changed = FALSE;
708 
709   query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE);
710   if (!gst_pad_peer_query (dec->srcpad, query)) {
711     GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
712   }
713 
714   g_assert (klass->decide_allocation != NULL);
715   res = klass->decide_allocation (dec, query);
716 
717   GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
718       query);
719 
720   if (!res)
721     goto no_decide_allocation;
722 
723   /* we got configuration from our peer or the decide_allocation method,
724    * parse them */
725   if (gst_query_get_n_allocation_params (query) > 0) {
726     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
727   } else {
728     allocator = NULL;
729     gst_allocation_params_init (&params);
730   }
731 
732   if (dec->priv->ctx.allocator)
733     gst_object_unref (dec->priv->ctx.allocator);
734   dec->priv->ctx.allocator = allocator;
735   dec->priv->ctx.params = params;
736 
737 done:
738 
739   if (query)
740     gst_query_unref (query);
741 
742   return res;
743 
744   /* ERRORS */
745 no_decide_allocation:
746   {
747     GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation");
748     goto done;
749   }
750 }
751 
752 static gboolean
gst_audio_decoder_negotiate_unlocked(GstAudioDecoder * dec)753 gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec)
754 {
755   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
756   gboolean ret = TRUE;
757 
758   if (G_LIKELY (klass->negotiate))
759     ret = klass->negotiate (dec);
760 
761   return ret;
762 }
763 
764 /**
765  * gst_audio_decoder_negotiate:
766  * @dec: a #GstAudioDecoder
767  *
768  * Negotiate with downstream elements to currently configured #GstAudioInfo.
769  * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
770  * negotiate fails.
771  *
772  * Returns: %TRUE if the negotiation succeeded, else %FALSE.
773  */
774 gboolean
gst_audio_decoder_negotiate(GstAudioDecoder * dec)775 gst_audio_decoder_negotiate (GstAudioDecoder * dec)
776 {
777   GstAudioDecoderClass *klass;
778   gboolean res = TRUE;
779 
780   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
781 
782   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
783 
784   GST_AUDIO_DECODER_STREAM_LOCK (dec);
785   gst_pad_check_reconfigure (dec->srcpad);
786   if (klass->negotiate) {
787     res = klass->negotiate (dec);
788     if (!res)
789       gst_pad_mark_reconfigure (dec->srcpad);
790   }
791   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
792 
793   return res;
794 }
795 
796 /**
797  * gst_audio_decoder_set_output_format:
798  * @dec: a #GstAudioDecoder
799  * @info: #GstAudioInfo
800  *
801  * Configure output info on the srcpad of @dec.
802  *
803  * Returns: %TRUE on success.
804  **/
805 gboolean
gst_audio_decoder_set_output_format(GstAudioDecoder * dec,const GstAudioInfo * info)806 gst_audio_decoder_set_output_format (GstAudioDecoder * dec,
807     const GstAudioInfo * info)
808 {
809   gboolean res = TRUE;
810   GstCaps *caps = NULL;
811 
812   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
813   g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE);
814 
815   /* If the audio info can't be converted to caps,
816    * it was invalid */
817   caps = gst_audio_info_to_caps (info);
818   if (!caps) {
819     GST_WARNING_OBJECT (dec, "invalid output format");
820     return FALSE;
821   }
822 
823   res = gst_audio_decoder_set_output_caps (dec, caps);
824   gst_caps_unref (caps);
825 
826   return res;
827 }
828 
829 /**
830  * gst_audio_decoder_set_output_caps:
831  * @dec: a #GstAudioDecoder
832  * @caps: (transfer none): (fixed) #GstCaps
833  *
834  * Configure output caps on the srcpad of @dec. Similar to
835  * gst_audio_decoder_set_output_format(), but allows subclasses to specify
836  * output caps that can't be expressed via #GstAudioInfo e.g. caps that have
837  * caps features.
838  *
839  * Returns: %TRUE on success.
840  *
841  * Since: 1.16
842  **/
843 gboolean
gst_audio_decoder_set_output_caps(GstAudioDecoder * dec,GstCaps * caps)844 gst_audio_decoder_set_output_caps (GstAudioDecoder * dec, GstCaps * caps)
845 {
846   gboolean res = TRUE;
847   guint old_rate;
848   GstCaps *templ_caps;
849   GstAudioInfo info;
850 
851   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
852 
853   GST_DEBUG_OBJECT (dec, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
854 
855   GST_AUDIO_DECODER_STREAM_LOCK (dec);
856 
857   if (!gst_caps_is_fixed (caps))
858     goto refuse_caps;
859 
860   /* check if caps can be parsed */
861   if (!gst_audio_info_from_caps (&info, caps))
862     goto refuse_caps;
863 
864   /* Only allow caps that are a subset of the template caps */
865   templ_caps = gst_pad_get_pad_template_caps (dec->srcpad);
866   if (!gst_caps_is_subset (caps, templ_caps)) {
867     GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT
868         " do not match template %" GST_PTR_FORMAT, caps, templ_caps);
869     gst_caps_unref (templ_caps);
870     goto refuse_caps;
871   }
872   gst_caps_unref (templ_caps);
873 
874   /* adjust ts tracking to new sample rate */
875   old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info);
876   if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) {
877     dec->priv->base_ts +=
878         GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate);
879     dec->priv->samples = 0;
880   }
881 
882   /* copy the GstAudioInfo */
883   GST_OBJECT_LOCK (dec);
884   dec->priv->ctx.info = info;
885   GST_OBJECT_UNLOCK (dec);
886 
887   gst_caps_replace (&dec->priv->ctx.caps, caps);
888   dec->priv->ctx.output_format_changed = TRUE;
889 
890 done:
891   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
892 
893   return res;
894 
895   /* ERRORS */
896 refuse_caps:
897   {
898     GST_WARNING_OBJECT (dec, "invalid output format");
899     res = FALSE;
900     goto done;
901   }
902 }
903 
904 static gboolean
gst_audio_decoder_sink_setcaps(GstAudioDecoder * dec,GstCaps * caps)905 gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
906 {
907   GstAudioDecoderClass *klass;
908   gboolean res = TRUE;
909 
910   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
911 
912   GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
913 
914   GST_AUDIO_DECODER_STREAM_LOCK (dec);
915 
916   if (dec->priv->ctx.input_caps
917       && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) {
918     GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again");
919     goto done;
920   }
921 
922   /* NOTE pbutils only needed here */
923   /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
924 #if 0
925   if (!dec->priv->taglist)
926     dec->priv->taglist = gst_tag_list_new ();
927   dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist);
928   gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist,
929       GST_TAG_AUDIO_CODEC, caps);
930   dec->priv->taglist_changed = TRUE;
931 #endif
932 
933   if (klass->set_format)
934     res = klass->set_format (dec, caps);
935 
936   if (res)
937     gst_caps_replace (&dec->priv->ctx.input_caps, caps);
938 
939 done:
940   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
941 
942   return res;
943 }
944 
945 static void
gst_audio_decoder_setup(GstAudioDecoder * dec)946 gst_audio_decoder_setup (GstAudioDecoder * dec)
947 {
948   GstQuery *query;
949   gboolean res;
950 
951   /* check if in live pipeline, then latency messing is no-no */
952   query = gst_query_new_latency ();
953   res = gst_pad_peer_query (dec->sinkpad, query);
954   if (res) {
955     gst_query_parse_latency (query, &res, NULL, NULL);
956     res = !res;
957   }
958   gst_query_unref (query);
959 
960   /* normalize to bool */
961   dec->priv->agg = ! !res;
962 }
963 
964 static GstFlowReturn
gst_audio_decoder_push_forward(GstAudioDecoder * dec,GstBuffer * buf)965 gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf)
966 {
967   GstAudioDecoderClass *klass;
968   GstAudioDecoderPrivate *priv;
969   GstAudioDecoderContext *ctx;
970   GstFlowReturn ret = GST_FLOW_OK;
971   GstClockTime ts;
972 
973   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
974   priv = dec->priv;
975   ctx = &dec->priv->ctx;
976 
977   g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR);
978 
979   if (G_UNLIKELY (!buf)) {
980     g_assert_not_reached ();
981     return GST_FLOW_OK;
982   }
983 
984   ctx->had_output_data = TRUE;
985   ts = GST_BUFFER_PTS (buf);
986 
987   GST_LOG_OBJECT (dec,
988       "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
989       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
990       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
991       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
992 
993   /* clip buffer */
994   buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate,
995       ctx->info.bpf);
996   if (G_UNLIKELY (!buf)) {
997     GST_DEBUG_OBJECT (dec, "no data after clipping to segment");
998     /* only check and return EOS if upstream still
999      * in the same segment and interested as such */
1000     if (dec->priv->in_out_segment_sync) {
1001       if (dec->output_segment.rate >= 0) {
1002         if (ts >= dec->output_segment.stop)
1003           ret = GST_FLOW_EOS;
1004       } else if (ts < dec->output_segment.start) {
1005         ret = GST_FLOW_EOS;
1006       }
1007     }
1008     goto exit;
1009   }
1010 
1011   /* decorate */
1012   if (G_UNLIKELY (priv->discont)) {
1013     GST_LOG_OBJECT (dec, "marking discont");
1014     GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
1015     priv->discont = FALSE;
1016   }
1017 
1018   /* track where we are */
1019   if (G_LIKELY (GST_BUFFER_PTS_IS_VALID (buf))) {
1020     /* duration should always be valid for raw audio */
1021     g_assert (GST_BUFFER_DURATION_IS_VALID (buf));
1022     dec->output_segment.position =
1023         GST_BUFFER_PTS (buf) + GST_BUFFER_DURATION (buf);
1024   }
1025 
1026   if (klass->pre_push) {
1027     /* last chance for subclass to do some dirty stuff */
1028     ret = klass->pre_push (dec, &buf);
1029     if (ret != GST_FLOW_OK || !buf) {
1030       GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p",
1031           gst_flow_get_name (ret), buf);
1032       if (buf)
1033         gst_buffer_unref (buf);
1034       goto exit;
1035     }
1036   }
1037 
1038   GST_LOG_OBJECT (dec,
1039       "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1040       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1041       GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1042       GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1043 
1044 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
1045   if (!priv->has_push_first_frame) {
1046     priv->has_push_first_frame = TRUE;
1047     GST_WARNING_OBJECT (dec, "KPI-TRACE: audiodecoder push first frame");
1048   }
1049 #endif
1050   ret = gst_pad_push (dec->srcpad, buf);
1051 
1052 exit:
1053   return ret;
1054 }
1055 
1056 /* mini aggregator combining output buffers into fewer larger ones,
1057  * if so allowed/configured */
1058 static GstFlowReturn
gst_audio_decoder_output(GstAudioDecoder * dec,GstBuffer * buf)1059 gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf)
1060 {
1061   GstAudioDecoderPrivate *priv;
1062   GstFlowReturn ret = GST_FLOW_OK;
1063   GstBuffer *inbuf = NULL;
1064 
1065   priv = dec->priv;
1066 
1067   if (G_UNLIKELY (priv->agg < 0))
1068     gst_audio_decoder_setup (dec);
1069 
1070   if (G_LIKELY (buf)) {
1071     GST_LOG_OBJECT (dec,
1072         "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1073         ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1074         GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1075         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1076   }
1077 
1078 again:
1079   inbuf = NULL;
1080   if (priv->agg && dec->priv->latency > 0 &&
1081       priv->ctx.info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1082     gint av;
1083     gboolean assemble = FALSE;
1084     const GstClockTimeDiff tol = 10 * GST_MSECOND;
1085     GstClockTimeDiff diff = -100 * GST_MSECOND;
1086 
1087     av = gst_adapter_available (priv->adapter_out);
1088     if (G_UNLIKELY (!buf)) {
1089       /* forcibly send current */
1090       assemble = TRUE;
1091       GST_LOG_OBJECT (dec, "forcing fragment flush");
1092     } else if (av && (!GST_BUFFER_PTS_IS_VALID (buf) ||
1093             !GST_CLOCK_TIME_IS_VALID (priv->out_ts) ||
1094             ((diff = GST_CLOCK_DIFF (GST_BUFFER_PTS (buf),
1095                         priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) {
1096       assemble = TRUE;
1097       GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment",
1098           (gint) (diff / GST_MSECOND));
1099     } else {
1100       /* add or start collecting */
1101       if (!av) {
1102         GST_LOG_OBJECT (dec, "starting new fragment");
1103         priv->out_ts = GST_BUFFER_PTS (buf);
1104       } else {
1105         GST_LOG_OBJECT (dec, "adding to fragment");
1106       }
1107       gst_adapter_push (priv->adapter_out, buf);
1108       priv->out_dur += GST_BUFFER_DURATION (buf);
1109       av += gst_buffer_get_size (buf);
1110       buf = NULL;
1111     }
1112     if (priv->out_dur > dec->priv->latency)
1113       assemble = TRUE;
1114     if (av && assemble) {
1115       GST_LOG_OBJECT (dec, "assembling fragment");
1116       inbuf = buf;
1117       buf = gst_adapter_take_buffer (priv->adapter_out, av);
1118       GST_BUFFER_PTS (buf) = priv->out_ts;
1119       GST_BUFFER_DURATION (buf) = priv->out_dur;
1120       priv->out_ts = GST_CLOCK_TIME_NONE;
1121       priv->out_dur = 0;
1122     }
1123   }
1124 
1125   if (G_LIKELY (buf)) {
1126     if (dec->output_segment.rate > 0.0) {
1127       ret = gst_audio_decoder_push_forward (dec, buf);
1128       GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret));
1129     } else {
1130       ret = GST_FLOW_OK;
1131       priv->queued = g_list_prepend (priv->queued, buf);
1132       GST_LOG_OBJECT (dec, "buffer queued");
1133     }
1134 
1135     if (inbuf) {
1136       buf = inbuf;
1137       goto again;
1138     }
1139   }
1140 
1141   return ret;
1142 }
1143 
1144 static void
send_pending_events(GstAudioDecoder * dec)1145 send_pending_events (GstAudioDecoder * dec)
1146 {
1147   GstAudioDecoderPrivate *priv = dec->priv;
1148   GList *pending_events, *l;
1149 
1150   pending_events = priv->pending_events;
1151   priv->pending_events = NULL;
1152 
1153   GST_DEBUG_OBJECT (dec, "Pushing pending events");
1154   for (l = pending_events; l; l = l->next)
1155     gst_audio_decoder_push_event (dec, l->data);
1156   g_list_free (pending_events);
1157 }
1158 
1159 /* Iterate the list of pending events, and ensure
1160  * the current output segment is up to date for
1161  * decoding */
1162 static void
apply_pending_events(GstAudioDecoder * dec)1163 apply_pending_events (GstAudioDecoder * dec)
1164 {
1165   GstAudioDecoderPrivate *priv = dec->priv;
1166   GList *l;
1167 
1168   GST_DEBUG_OBJECT (dec, "Applying pending segments");
1169   for (l = priv->pending_events; l; l = l->next) {
1170     GstEvent *event = GST_EVENT (l->data);
1171     switch (GST_EVENT_TYPE (event)) {
1172       case GST_EVENT_SEGMENT:{
1173         GstSegment seg;
1174 
1175         GST_AUDIO_DECODER_STREAM_LOCK (dec);
1176         gst_event_copy_segment (event, &seg);
1177 
1178         GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
1179 
1180         dec->output_segment = seg;
1181         dec->priv->in_out_segment_sync =
1182             gst_segment_is_equal (&dec->input_segment, &seg);
1183         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1184         break;
1185       }
1186       default:
1187         break;
1188     }
1189   }
1190 }
1191 
1192 static GstFlowReturn
check_pending_reconfigure(GstAudioDecoder * dec)1193 check_pending_reconfigure (GstAudioDecoder * dec)
1194 {
1195   GstFlowReturn ret = GST_FLOW_OK;
1196   GstAudioDecoderContext *ctx;
1197   gboolean needs_reconfigure;
1198 
1199   ctx = &dec->priv->ctx;
1200 
1201   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
1202   if (G_UNLIKELY (ctx->output_format_changed ||
1203           (GST_AUDIO_INFO_IS_VALID (&ctx->info)
1204               && needs_reconfigure))) {
1205     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
1206       gst_pad_mark_reconfigure (dec->srcpad);
1207       if (GST_PAD_IS_FLUSHING (dec->srcpad))
1208         ret = GST_FLOW_FLUSHING;
1209       else
1210         ret = GST_FLOW_NOT_NEGOTIATED;
1211     }
1212   }
1213   return ret;
1214 }
1215 
1216 static gboolean
gst_audio_decoder_transform_meta_default(GstAudioDecoder * decoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)1217 gst_audio_decoder_transform_meta_default (GstAudioDecoder *
1218     decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
1219 {
1220   const GstMetaInfo *info = meta->info;
1221   const gchar *const *tags;
1222   const gchar *const supported_tags[] = {
1223     GST_META_TAG_AUDIO_STR,
1224     GST_META_TAG_AUDIO_CHANNELS_STR,
1225     NULL,
1226   };
1227 
1228   tags = gst_meta_api_type_get_tags (info->api);
1229 
1230   if (!tags)
1231     return TRUE;
1232 
1233   while (*tags) {
1234     if (!g_strv_contains (supported_tags, *tags))
1235       return FALSE;
1236     tags++;
1237   }
1238 
1239   return TRUE;
1240 }
1241 
1242 typedef struct
1243 {
1244   GstAudioDecoder *decoder;
1245   GstBuffer *outbuf;
1246 } CopyMetaData;
1247 
1248 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)1249 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
1250 {
1251   CopyMetaData *data = user_data;
1252   GstAudioDecoder *decoder = data->decoder;
1253   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
1254   GstBuffer *outbuf = data->outbuf;
1255   const GstMetaInfo *info = (*meta)->info;
1256   gboolean do_copy = FALSE;
1257 
1258   if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
1259     /* never call the transform_meta with memory specific metadata */
1260     GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
1261         g_type_name (info->api));
1262     do_copy = FALSE;
1263   } else if (klass->transform_meta) {
1264     do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf);
1265     GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
1266         g_type_name (info->api), do_copy);
1267   }
1268 
1269   /* we only copy metadata when the subclass implemented a transform_meta
1270    * function and when it returns %TRUE */
1271   if (do_copy && info->transform_func) {
1272     GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
1273     GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
1274     /* simply copy then */
1275     info->transform_func (outbuf, *meta, inbuf,
1276         _gst_meta_transform_copy, &copy_data);
1277   }
1278   return TRUE;
1279 }
1280 
1281 /**
1282  * gst_audio_decoder_finish_subframe:
1283  * @dec: a #GstAudioDecoder
1284  * @buf: (transfer full) (allow-none): decoded data
1285  *
1286  * Collects decoded data and pushes it downstream. This function may be called
1287  * multiple times for a given input frame.
1288  *
1289  * @buf may be NULL in which case it is assumed that the current input frame is
1290  * finished. This is equivalent to calling gst_audio_decoder_finish_subframe()
1291  * with a NULL buffer and frames=1 after having pushed out all decoded audio
1292  * subframes using this function.
1293  *
1294  * When called with valid data in @buf the source pad caps must have been set
1295  * already.
1296  *
1297  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1298  * invalidated by a call to this function.
1299  *
1300  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1301  *
1302  * Since: 1.16
1303  */
1304 GstFlowReturn
gst_audio_decoder_finish_subframe(GstAudioDecoder * dec,GstBuffer * buf)1305 gst_audio_decoder_finish_subframe (GstAudioDecoder * dec, GstBuffer * buf)
1306 {
1307   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1308 
1309   if (buf == NULL)
1310     return gst_audio_decoder_finish_frame_or_subframe (dec, NULL, 1);
1311   else
1312     return gst_audio_decoder_finish_frame_or_subframe (dec, buf, 0);
1313 }
1314 
1315 /**
1316  * gst_audio_decoder_finish_frame:
1317  * @dec: a #GstAudioDecoder
1318  * @buf: (transfer full) (allow-none): decoded data
1319  * @frames: number of decoded frames represented by decoded data
1320  *
1321  * Collects decoded data and pushes it downstream.
1322  *
1323  * @buf may be NULL in which case the indicated number of frames
1324  * are discarded and considered to have produced no output
1325  * (e.g. lead-in or setup frames).
1326  * Otherwise, source pad caps must be set when it is called with valid
1327  * data in @buf.
1328  *
1329  * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1330  * invalidated by a call to this function.
1331  *
1332  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1333  */
1334 GstFlowReturn
gst_audio_decoder_finish_frame(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1335 gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
1336     gint frames)
1337 {
1338   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1339 
1340   /* no dummy calls please */
1341   g_return_val_if_fail (frames != 0, GST_FLOW_ERROR);
1342 
1343   return gst_audio_decoder_finish_frame_or_subframe (dec, buf, frames);
1344 }
1345 
1346 /* frames == 0 indicates that this is a sub-frame and further sub-frames may
1347  * follow for the current input frame. */
1348 static GstFlowReturn
gst_audio_decoder_finish_frame_or_subframe(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1349 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
1350     GstBuffer * buf, gint frames)
1351 {
1352   GstAudioDecoderPrivate *priv;
1353   GstAudioDecoderContext *ctx;
1354   GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1355   GstAudioMeta *meta;
1356   GstClockTime ts, next_ts;
1357   gsize size, samples = 0;
1358   GstFlowReturn ret = GST_FLOW_OK;
1359   GQueue inbufs = G_QUEUE_INIT;
1360   gboolean is_subframe = (frames == 0);
1361   gboolean do_check_resync;
1362 
1363   /* subclass should not hand us no data */
1364   g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
1365       GST_FLOW_ERROR);
1366 
1367   /* if it's a subframe (frames == 0) we must have a valid buffer */
1368   g_assert (!is_subframe || buf != NULL);
1369 
1370   priv = dec->priv;
1371   ctx = &dec->priv->ctx;
1372   meta = buf ? gst_buffer_get_audio_meta (buf) : NULL;
1373   size = buf ? gst_buffer_get_size (buf) : 0;
1374   samples = buf ? (meta ? meta->samples : size / ctx->info.bpf) : 0;
1375 
1376   /* must know the output format by now */
1377   g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info),
1378       GST_FLOW_ERROR);
1379 
1380   GST_LOG_OBJECT (dec,
1381       "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT
1382       " samples for %d frames", buf ? size : 0, samples, frames);
1383 
1384   GST_AUDIO_DECODER_STREAM_LOCK (dec);
1385 
1386   if (buf != NULL && priv->subframe_samples == 0) {
1387     ret = check_pending_reconfigure (dec);
1388     if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) {
1389       gst_buffer_unref (buf);
1390       goto exit;
1391     }
1392 
1393     if (priv->pending_events)
1394       send_pending_events (dec);
1395   }
1396 
1397   /* sanity checking */
1398   if (G_LIKELY (buf && ctx->info.bpf)) {
1399     if (!meta || meta->info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1400       /* output should be whole number of sample frames */
1401       if (size % ctx->info.bpf)
1402         goto wrong_buffer;
1403       /* output should have no additional padding */
1404       if (samples != size / ctx->info.bpf)
1405         goto wrong_samples;
1406     } else {
1407       /* can't have more samples than what the buffer fits */
1408       if (samples > size / ctx->info.bpf)
1409         goto wrong_samples;
1410     }
1411   }
1412 
1413   /* frame and ts book-keeping */
1414   if (G_UNLIKELY (frames < 0)) {
1415     if (G_UNLIKELY (-frames - 1 > priv->frames.length)) {
1416       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1417           ("received more decoded frames %d than provided %d", frames,
1418               priv->frames.length), (NULL));
1419       frames = 0;
1420     } else {
1421       frames = priv->frames.length + frames + 1;
1422     }
1423   } else if (G_UNLIKELY (frames > priv->frames.length)) {
1424     if (G_LIKELY (!priv->force)) {
1425       GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1426           ("received more decoded frames %d than provided %d", frames,
1427               priv->frames.length), (NULL));
1428     }
1429     frames = priv->frames.length;
1430   }
1431 
1432   if (G_LIKELY (priv->frames.length))
1433     ts = GST_BUFFER_PTS (priv->frames.head->data);
1434   else
1435     ts = GST_CLOCK_TIME_NONE;
1436 
1437   GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT,
1438       GST_TIME_ARGS (ts));
1439 
1440   if (is_subframe && priv->frames.length == 0)
1441     goto subframe_without_pending_input_frame;
1442 
1443   /* this will be skipped in the is_subframe case because frames will be 0 */
1444   while (priv->frames.length && frames) {
1445     g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames));
1446     dec->priv->ctx.delay = dec->priv->frames.length;
1447     frames--;
1448   }
1449 
1450   if (G_UNLIKELY (!buf))
1451     goto exit;
1452 
1453   /* lock on */
1454   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1455     priv->base_ts = ts;
1456     GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
1457   }
1458 
1459   /* still no valid ts, track the segment one */
1460   if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) &&
1461       dec->output_segment.rate > 0.0) {
1462     priv->base_ts = dec->output_segment.start;
1463   }
1464 
1465   /* only check for resync at the beginning of an input/output frame */
1466   do_check_resync = !is_subframe || priv->subframe_samples == 0;
1467 
1468   /* slightly convoluted approach caters for perfect ts if subclass desires. */
1469   if (do_check_resync && GST_CLOCK_TIME_IS_VALID (ts)) {
1470     if (dec->priv->tolerance > 0) {
1471       GstClockTimeDiff diff;
1472 
1473       g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts));
1474       next_ts = priv->base_ts +
1475           gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
1476       GST_LOG_OBJECT (dec,
1477           "buffer is %" G_GUINT64_FORMAT " samples past base_ts %"
1478           GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples,
1479           GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1480       diff = GST_CLOCK_DIFF (next_ts, ts);
1481       GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1482       /* if within tolerance,
1483        * discard buffer ts and carry on producing perfect stream,
1484        * otherwise resync to ts */
1485       if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance ||
1486               diff > (gint64) dec->priv->tolerance)) {
1487         GST_DEBUG_OBJECT (dec, "base_ts resync");
1488         priv->base_ts = ts;
1489         priv->samples = 0;
1490       }
1491     } else {
1492       GST_DEBUG_OBJECT (dec, "base_ts resync");
1493       priv->base_ts = ts;
1494       priv->samples = 0;
1495     }
1496   }
1497 
1498   /* delayed one-shot stuff until confirmed data */
1499   if (priv->taglist && priv->taglist_changed) {
1500     GstEvent *tags_event;
1501 
1502     tags_event = gst_audio_decoder_create_merged_tags_event (dec);
1503 
1504     if (tags_event != NULL)
1505       gst_audio_decoder_push_event (dec, tags_event);
1506 
1507     priv->taglist_changed = FALSE;
1508   }
1509 
1510   buf = gst_buffer_make_writable (buf);
1511   if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1512     GST_BUFFER_PTS (buf) =
1513         priv->base_ts +
1514         GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->info.rate);
1515     GST_BUFFER_DURATION (buf) = priv->base_ts +
1516         GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->info.rate) -
1517         GST_BUFFER_PTS (buf);
1518   } else {
1519     GST_BUFFER_PTS (buf) = GST_CLOCK_TIME_NONE;
1520     GST_BUFFER_DURATION (buf) =
1521         GST_FRAMES_TO_CLOCK_TIME (samples, ctx->info.rate);
1522   }
1523 
1524   if (klass->transform_meta) {
1525     if (inbufs.length) {
1526       GList *l;
1527       for (l = inbufs.head; l; l = l->next) {
1528         CopyMetaData data;
1529 
1530         data.decoder = dec;
1531         data.outbuf = buf;
1532         gst_buffer_foreach_meta (l->data, foreach_metadata, &data);
1533       }
1534     } else if (is_subframe) {
1535       CopyMetaData data;
1536       GstBuffer *in_buf;
1537 
1538       /* For subframes we assume a 1:N relationship for now, so we just take
1539        * metas from the first pending input buf */
1540       in_buf = g_queue_peek_head (&priv->frames);
1541       data.decoder = dec;
1542       data.outbuf = buf;
1543       gst_buffer_foreach_meta (in_buf, foreach_metadata, &data);
1544     } else {
1545       GST_WARNING_OBJECT (dec,
1546           "Can't copy metadata because input buffers disappeared");
1547     }
1548   }
1549 
1550   GST_OBJECT_LOCK (dec);
1551   priv->samples += samples;
1552   priv->samples_out += samples;
1553   GST_OBJECT_UNLOCK (dec);
1554 
1555   /* we got data, so note things are looking up */
1556   if (G_UNLIKELY (dec->priv->error_count))
1557     dec->priv->error_count = 0;
1558 
1559   ret = gst_audio_decoder_output (dec, buf);
1560 
1561 exit:
1562   g_queue_foreach (&inbufs, (GFunc) gst_buffer_unref, NULL);
1563   g_queue_clear (&inbufs);
1564 
1565   if (is_subframe)
1566     dec->priv->subframe_samples += samples;
1567   else
1568     dec->priv->subframe_samples = 0;
1569 
1570   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1571 
1572   return ret;
1573 
1574   /* ERRORS */
1575 wrong_buffer:
1576   {
1577     /* arguably more of a programming error? */
1578     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1579         ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d", size,
1580             ctx->info.bpf));
1581     gst_buffer_unref (buf);
1582     ret = GST_FLOW_ERROR;
1583     goto exit;
1584   }
1585 wrong_samples:
1586   {
1587     /* arguably more of a programming error? */
1588     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1589         ("GstAudioMeta samples (%" G_GSIZE_FORMAT ") are inconsistent with "
1590             "the buffer size and layout (size/bpf = %" G_GSIZE_FORMAT ")",
1591             meta->samples, size / ctx->info.bpf));
1592     gst_buffer_unref (buf);
1593     ret = GST_FLOW_ERROR;
1594     goto exit;
1595   }
1596 subframe_without_pending_input_frame:
1597   {
1598     /* arguably more of a programming error? */
1599     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1600         ("Received decoded subframe, but no pending frame"));
1601     gst_buffer_unref (buf);
1602     ret = GST_FLOW_ERROR;
1603     goto exit;
1604   }
1605 }
1606 
1607 static GstFlowReturn
gst_audio_decoder_handle_frame(GstAudioDecoder * dec,GstAudioDecoderClass * klass,GstBuffer * buffer)1608 gst_audio_decoder_handle_frame (GstAudioDecoder * dec,
1609     GstAudioDecoderClass * klass, GstBuffer * buffer)
1610 {
1611   /* Skip decoding and send a GAP instead if
1612    * GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO is set and we have timestamps
1613    * FIXME: We only do this for forward playback atm, because reverse
1614    * playback would require accumulating GAP events and pushing them
1615    * out in reverse order as for normal audio samples
1616    */
1617   if (G_UNLIKELY (dec->input_segment.rate > 0.0
1618           && dec->input_segment.flags & GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO)) {
1619     if (buffer) {
1620       GstClockTime ts = GST_BUFFER_PTS (buffer);
1621       if (GST_CLOCK_TIME_IS_VALID (ts)) {
1622         GstEvent *event = gst_event_new_gap (ts, GST_BUFFER_DURATION (buffer));
1623 
1624         gst_buffer_unref (buffer);
1625         GST_LOG_OBJECT (dec, "Skipping decode in trickmode and sending gap");
1626         gst_audio_decoder_handle_gap (dec, event);
1627         return GST_FLOW_OK;
1628       }
1629     }
1630   }
1631 
1632   if (G_LIKELY (buffer)) {
1633     gsize size = gst_buffer_get_size (buffer);
1634     /* keep around for admin */
1635     GST_LOG_OBJECT (dec,
1636         "tracking frame size %" G_GSIZE_FORMAT ", ts %" GST_TIME_FORMAT, size,
1637         GST_TIME_ARGS (GST_BUFFER_PTS (buffer)));
1638     g_queue_push_tail (&dec->priv->frames, buffer);
1639     dec->priv->ctx.delay = dec->priv->frames.length;
1640     GST_OBJECT_LOCK (dec);
1641     dec->priv->bytes_in += size;
1642     GST_OBJECT_UNLOCK (dec);
1643   } else {
1644     GST_LOG_OBJECT (dec, "providing subclass with NULL frame");
1645   }
1646 
1647 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
1648   if (!dec->priv->has_recv_first_frame) {
1649     dec->priv->has_recv_first_frame = TRUE;
1650     GST_WARNING_OBJECT (dec, "KPI-TRACE: audiodecoder recv first frame");
1651   }
1652 #endif
1653 
1654   return klass->handle_frame (dec, buffer);
1655 }
1656 
1657 /* maybe subclass configurable instead, but this allows for a whole lot of
1658  * raw samples, so at least quite some encoded ... */
1659 #define GST_AUDIO_DECODER_MAX_SYNC     10 * 8 * 2 * 1024
1660 
1661 static GstFlowReturn
gst_audio_decoder_push_buffers(GstAudioDecoder * dec,gboolean force)1662 gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force)
1663 {
1664   GstAudioDecoderClass *klass;
1665   GstAudioDecoderPrivate *priv;
1666   GstAudioDecoderContext *ctx;
1667   GstFlowReturn ret = GST_FLOW_OK;
1668   GstBuffer *buffer;
1669   gint av, flush;
1670 
1671   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1672   priv = dec->priv;
1673   ctx = &dec->priv->ctx;
1674 
1675   g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1676 
1677   av = gst_adapter_available (priv->adapter);
1678   GST_DEBUG_OBJECT (dec, "available: %d", av);
1679 
1680   while (ret == GST_FLOW_OK) {
1681 
1682     flush = 0;
1683     ctx->eos = force;
1684 
1685     if (G_LIKELY (av)) {
1686       gint len;
1687       GstClockTime ts;
1688       guint64 distance;
1689 
1690       /* parse if needed */
1691       if (klass->parse) {
1692         gint offset = 0;
1693 
1694         /* limited (legacy) parsing; avoid whole of baseparse */
1695         GST_DEBUG_OBJECT (dec, "parsing available: %d", av);
1696         /* piggyback sync state on discont */
1697         ctx->sync = !priv->discont;
1698         ret = klass->parse (dec, priv->adapter, &offset, &len);
1699 
1700         g_assert (offset <= av);
1701         if (offset) {
1702           /* jumped a bit */
1703           GST_DEBUG_OBJECT (dec, "skipped %d; setting DISCONT", offset);
1704           gst_adapter_flush (priv->adapter, offset);
1705           flush = offset;
1706           /* avoid parsing indefinitely */
1707           priv->sync_flush += offset;
1708           if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC)
1709             goto parse_failed;
1710         }
1711 
1712         if (ret == GST_FLOW_EOS) {
1713           GST_LOG_OBJECT (dec, "no frame yet");
1714           ret = GST_FLOW_OK;
1715           break;
1716         } else if (ret == GST_FLOW_OK) {
1717           GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len);
1718           g_assert (len);
1719           g_assert (offset + len <= av);
1720           priv->sync_flush = 0;
1721         } else {
1722           break;
1723         }
1724       } else {
1725         len = av;
1726       }
1727       /* track upstream ts, but do not get stuck if nothing new upstream */
1728       ts = gst_adapter_prev_pts (priv->adapter, &distance);
1729       if (ts != priv->prev_ts || distance <= priv->prev_distance) {
1730         priv->prev_ts = ts;
1731         priv->prev_distance = distance;
1732       } else {
1733         GST_LOG_OBJECT (dec, "ts == prev_ts; discarding");
1734         ts = GST_CLOCK_TIME_NONE;
1735       }
1736       buffer = gst_adapter_take_buffer (priv->adapter, len);
1737       buffer = gst_buffer_make_writable (buffer);
1738       GST_BUFFER_PTS (buffer) = ts;
1739       flush += len;
1740       priv->force = FALSE;
1741     } else {
1742       if (!force)
1743         break;
1744       if (!priv->drainable) {
1745         priv->drained = TRUE;
1746         break;
1747       }
1748       buffer = NULL;
1749       priv->force = TRUE;
1750     }
1751 
1752     ret = gst_audio_decoder_handle_frame (dec, klass, buffer);
1753 
1754     /* do not keep pushing it ... */
1755     if (G_UNLIKELY (!av)) {
1756       priv->drained = TRUE;
1757       break;
1758     }
1759 
1760     av -= flush;
1761     g_assert (av >= 0);
1762   }
1763 
1764   GST_LOG_OBJECT (dec, "done pushing to subclass");
1765   return ret;
1766 
1767   /* ERRORS */
1768 parse_failed:
1769   {
1770     GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream"));
1771     return GST_FLOW_ERROR;
1772   }
1773 }
1774 
1775 static GstFlowReturn
gst_audio_decoder_drain(GstAudioDecoder * dec)1776 gst_audio_decoder_drain (GstAudioDecoder * dec)
1777 {
1778   GstFlowReturn ret;
1779 
1780   if (dec->priv->drained && !dec->priv->gather)
1781     return GST_FLOW_OK;
1782 
1783   /* Apply any pending events before draining, as that
1784    * may update the pending segment info */
1785   apply_pending_events (dec);
1786 
1787   /* dispatch reverse pending buffers */
1788   /* chain eventually calls upon drain as well, but by that time
1789    * gather list should be clear, so ok ... */
1790   if (dec->output_segment.rate < 0.0 && dec->priv->gather)
1791     gst_audio_decoder_chain_reverse (dec, NULL);
1792   /* have subclass give all it can */
1793   ret = gst_audio_decoder_push_buffers (dec, TRUE);
1794   if (ret != GST_FLOW_OK) {
1795     GST_WARNING_OBJECT (dec, "audio decoder push buffers failed");
1796     goto drain_failed;
1797   }
1798   /* ensure all output sent */
1799   ret = gst_audio_decoder_output (dec, NULL);
1800   if (ret != GST_FLOW_OK)
1801     GST_WARNING_OBJECT (dec, "audio decoder output failed");
1802 
1803 drain_failed:
1804   /* everything should be away now */
1805   if (dec->priv->frames.length) {
1806     /* not fatal/impossible though if subclass/codec eats stuff */
1807     GST_WARNING_OBJECT (dec, "still %d frames left after draining",
1808         dec->priv->frames.length);
1809     g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
1810     g_queue_clear (&dec->priv->frames);
1811   }
1812 
1813   /* discard (unparsed) leftover */
1814   gst_adapter_clear (dec->priv->adapter);
1815   return ret;
1816 }
1817 
1818 /* hard == FLUSH, otherwise discont */
1819 static GstFlowReturn
gst_audio_decoder_flush(GstAudioDecoder * dec,gboolean hard)1820 gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard)
1821 {
1822   GstAudioDecoderClass *klass;
1823   GstFlowReturn ret = GST_FLOW_OK;
1824 
1825   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1826 
1827   GST_LOG_OBJECT (dec, "flush hard %d", hard);
1828 
1829   if (!hard) {
1830     ret = gst_audio_decoder_drain (dec);
1831   } else {
1832     gst_audio_decoder_clear_queues (dec);
1833     gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
1834     gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
1835     dec->priv->error_count = 0;
1836   }
1837   /* only bother subclass with flushing if known it is already alive
1838    * and kicking out stuff */
1839   if (klass->flush && dec->priv->samples_out > 0)
1840     klass->flush (dec, hard);
1841   /* and get (re)set for the sequel */
1842   gst_audio_decoder_reset (dec, FALSE);
1843 
1844   return ret;
1845 }
1846 
1847 static GstFlowReturn
gst_audio_decoder_chain_forward(GstAudioDecoder * dec,GstBuffer * buffer)1848 gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer)
1849 {
1850   GstFlowReturn ret = GST_FLOW_OK;
1851 
1852   /* discard silly case, though maybe ts may be of value ?? */
1853   if (G_UNLIKELY (gst_buffer_get_size (buffer) == 0)) {
1854     GST_DEBUG_OBJECT (dec, "discarding empty buffer");
1855     gst_buffer_unref (buffer);
1856     goto exit;
1857   }
1858 
1859   /* grab buffer */
1860   gst_adapter_push (dec->priv->adapter, buffer);
1861   buffer = NULL;
1862   /* new stuff, so we can push subclass again */
1863   dec->priv->drained = FALSE;
1864 
1865   /* hand to subclass */
1866   ret = gst_audio_decoder_push_buffers (dec, FALSE);
1867 
1868 exit:
1869   GST_LOG_OBJECT (dec, "chain-done");
1870   return ret;
1871 }
1872 
1873 static void
gst_audio_decoder_clear_queues(GstAudioDecoder * dec)1874 gst_audio_decoder_clear_queues (GstAudioDecoder * dec)
1875 {
1876   GstAudioDecoderPrivate *priv = dec->priv;
1877 
1878   g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL);
1879   g_list_free (priv->queued);
1880   priv->queued = NULL;
1881   g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL);
1882   g_list_free (priv->gather);
1883   priv->gather = NULL;
1884   g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL);
1885   g_list_free (priv->decode);
1886   priv->decode = NULL;
1887 }
1888 
1889 /*
1890  * Input:
1891  *  Buffer decoding order:  7  8  9  4  5  6  3  1  2  EOS
1892  *  Discont flag:           D        D        D  D
1893  *
1894  * - Each Discont marks a discont in the decoding order.
1895  *
1896  * for vorbis, each buffer is a keyframe when we have the previous
1897  * buffer. This means that to decode buffer 7, we need buffer 6, which
1898  * arrives out of order.
1899  *
1900  * we first gather buffers in the gather queue until we get a DISCONT. We
1901  * prepend each incoming buffer so that they are in reversed order.
1902  *
1903  *    gather queue:    9  8  7
1904  *    decode queue:
1905  *    output queue:
1906  *
1907  * When a DISCONT is received (buffer 4), we move the gather queue to the
1908  * decode queue. This is simply done be taking the head of the gather queue
1909  * and prepending it to the decode queue. This yields:
1910  *
1911  *    gather queue:
1912  *    decode queue:    7  8  9
1913  *    output queue:
1914  *
1915  * Then we decode each buffer in the decode queue in order and put the output
1916  * buffer in the output queue. The first buffer (7) will not produce any output
1917  * because it needs the previous buffer (6) which did not arrive yet. This
1918  * yields:
1919  *
1920  *    gather queue:
1921  *    decode queue:    7  8  9
1922  *    output queue:    9  8
1923  *
1924  * Then we remove the consumed buffers from the decode queue. Buffer 7 is not
1925  * completely consumed, we need to keep it around for when we receive buffer
1926  * 6. This yields:
1927  *
1928  *    gather queue:
1929  *    decode queue:    7
1930  *    output queue:    9  8
1931  *
1932  * Then we accumulate more buffers:
1933  *
1934  *    gather queue:    6  5  4
1935  *    decode queue:    7
1936  *    output queue:
1937  *
1938  * prepending to the decode queue on DISCONT yields:
1939  *
1940  *    gather queue:
1941  *    decode queue:    4  5  6  7
1942  *    output queue:
1943  *
1944  * after decoding and keeping buffer 4:
1945  *
1946  *    gather queue:
1947  *    decode queue:    4
1948  *    output queue:    7  6  5
1949  *
1950  * Etc..
1951  */
1952 static GstFlowReturn
gst_audio_decoder_flush_decode(GstAudioDecoder * dec)1953 gst_audio_decoder_flush_decode (GstAudioDecoder * dec)
1954 {
1955   GstAudioDecoderPrivate *priv = dec->priv;
1956   GstFlowReturn res = GST_FLOW_OK;
1957   GstClockTime timestamp;
1958   GList *walk;
1959 
1960   walk = priv->decode;
1961 
1962   GST_DEBUG_OBJECT (dec, "flushing buffers to decoder");
1963 
1964   /* clear buffer and decoder state */
1965   gst_audio_decoder_flush (dec, FALSE);
1966 
1967   while (walk) {
1968     GList *next;
1969     GstBuffer *buf = GST_BUFFER_CAST (walk->data);
1970 
1971     GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT,
1972         buf, GST_TIME_ARGS (GST_BUFFER_PTS (buf)));
1973 
1974     next = g_list_next (walk);
1975     /* decode buffer, resulting data prepended to output queue */
1976     gst_buffer_ref (buf);
1977     res = gst_audio_decoder_chain_forward (dec, buf);
1978 
1979     /* if we generated output, we can discard the buffer, else we
1980      * keep it in the queue */
1981     if (priv->queued) {
1982       GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data);
1983       priv->decode = g_list_delete_link (priv->decode, walk);
1984       gst_buffer_unref (buf);
1985     } else {
1986       GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
1987     }
1988     walk = next;
1989   }
1990 
1991   /* drain any aggregation (or otherwise) leftover */
1992   gst_audio_decoder_drain (dec);
1993 
1994   /* now send queued data downstream */
1995   timestamp = GST_CLOCK_TIME_NONE;
1996   while (priv->queued) {
1997     GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
1998     GstClockTime duration;
1999 
2000     duration = GST_BUFFER_DURATION (buf);
2001 
2002     /* duration should always be valid for raw audio */
2003     g_assert (GST_CLOCK_TIME_IS_VALID (duration));
2004 
2005     /* interpolate (backward) if needed */
2006     if (G_LIKELY (timestamp != -1)) {
2007       if (timestamp > duration)
2008         timestamp -= duration;
2009       else
2010         timestamp = 0;
2011     }
2012 
2013     if (!GST_BUFFER_PTS_IS_VALID (buf)) {
2014       GST_LOG_OBJECT (dec, "applying reverse interpolated ts %"
2015           GST_TIME_FORMAT, GST_TIME_ARGS (timestamp));
2016       GST_BUFFER_PTS (buf) = timestamp;
2017     } else {
2018       /* track otherwise */
2019       timestamp = GST_BUFFER_PTS (buf);
2020       GST_LOG_OBJECT (dec, "tracking ts %" GST_TIME_FORMAT,
2021           GST_TIME_ARGS (timestamp));
2022     }
2023 
2024     if (G_LIKELY (res == GST_FLOW_OK)) {
2025       GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
2026           "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2027           gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2028           GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2029       /* should be already, but let's be sure */
2030       buf = gst_buffer_make_writable (buf);
2031       /* avoid stray DISCONT from forward processing,
2032        * which have no meaning in reverse pushing */
2033       GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2034       res = gst_audio_decoder_push_forward (dec, buf);
2035     } else {
2036       gst_buffer_unref (buf);
2037     }
2038 
2039     priv->queued = g_list_delete_link (priv->queued, priv->queued);
2040   }
2041 
2042   return res;
2043 }
2044 
2045 static GstFlowReturn
gst_audio_decoder_chain_reverse(GstAudioDecoder * dec,GstBuffer * buf)2046 gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf)
2047 {
2048   GstAudioDecoderPrivate *priv = dec->priv;
2049   GstFlowReturn result = GST_FLOW_OK;
2050 
2051   /* if we have a discont, move buffers to the decode list */
2052   if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) {
2053     GST_DEBUG_OBJECT (dec, "received discont");
2054     while (priv->gather) {
2055       GstBuffer *gbuf;
2056 
2057       gbuf = GST_BUFFER_CAST (priv->gather->data);
2058       /* remove from the gather list */
2059       priv->gather = g_list_delete_link (priv->gather, priv->gather);
2060       /* copy to decode queue */
2061       priv->decode = g_list_prepend (priv->decode, gbuf);
2062     }
2063     /* decode stuff in the decode queue */
2064     gst_audio_decoder_flush_decode (dec);
2065   }
2066 
2067   if (G_LIKELY (buf)) {
2068     GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2069         "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2070         gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2071         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2072 
2073     /* add buffer to gather queue */
2074     priv->gather = g_list_prepend (priv->gather, buf);
2075   }
2076 
2077   return result;
2078 }
2079 
2080 static GstFlowReturn
gst_audio_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2081 gst_audio_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2082 {
2083   GstAudioDecoder *dec;
2084   GstFlowReturn ret;
2085 
2086   dec = GST_AUDIO_DECODER (parent);
2087 
2088   GST_LOG_OBJECT (dec,
2089       "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
2090       ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buffer),
2091       GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2092       GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
2093 
2094   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2095 
2096   if (G_UNLIKELY (dec->priv->ctx.input_caps == NULL && dec->priv->needs_format))
2097     goto not_negotiated;
2098 
2099   dec->priv->ctx.had_input_data = TRUE;
2100 
2101   if (!dec->priv->expecting_discont_buf &&
2102       GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
2103     gint64 samples, ts;
2104 
2105     /* track present position */
2106     ts = dec->priv->base_ts;
2107     samples = dec->priv->samples;
2108 
2109     GST_DEBUG_OBJECT (dec, "handling discont");
2110     gst_audio_decoder_flush (dec, FALSE);
2111     dec->priv->discont = TRUE;
2112 
2113     /* buffer may claim DISCONT loudly, if it can't tell us where we are now,
2114      * we'll stick to where we were ...
2115      * Particularly useful/needed for upstream BYTE based */
2116     if (dec->input_segment.rate > 0.0 && !GST_BUFFER_PTS_IS_VALID (buffer)) {
2117       GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking");
2118       dec->priv->base_ts = ts;
2119       dec->priv->samples = samples;
2120     }
2121   }
2122   dec->priv->expecting_discont_buf = FALSE;
2123 
2124   if (dec->input_segment.rate > 0.0)
2125     ret = gst_audio_decoder_chain_forward (dec, buffer);
2126   else
2127     ret = gst_audio_decoder_chain_reverse (dec, buffer);
2128 
2129   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2130 
2131   return ret;
2132 
2133   /* ERRORS */
2134 not_negotiated:
2135   {
2136     GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2137     GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL),
2138         ("decoder not initialized"));
2139     gst_buffer_unref (buffer);
2140     return GST_FLOW_NOT_NEGOTIATED;
2141   }
2142 }
2143 
2144 /* perform upstream byte <-> time conversion (duration, seeking)
2145  * if subclass allows and if enough data for moderately decent conversion */
2146 static inline gboolean
gst_audio_decoder_do_byte(GstAudioDecoder * dec)2147 gst_audio_decoder_do_byte (GstAudioDecoder * dec)
2148 {
2149   gboolean ret;
2150 
2151   GST_OBJECT_LOCK (dec);
2152   ret = dec->priv->ctx.do_estimate_rate && dec->priv->ctx.info.bpf &&
2153       dec->priv->ctx.info.rate <= dec->priv->samples_out;
2154   GST_OBJECT_UNLOCK (dec);
2155 
2156   return ret;
2157 }
2158 
2159 /* Must be called holding the GST_AUDIO_DECODER_STREAM_LOCK */
2160 static gboolean
gst_audio_decoder_negotiate_default_caps(GstAudioDecoder * dec)2161 gst_audio_decoder_negotiate_default_caps (GstAudioDecoder * dec)
2162 {
2163   GstCaps *caps, *templcaps;
2164   gint i;
2165   gint channels = 0;
2166   gint rate;
2167   guint64 channel_mask = 0;
2168   gint caps_size;
2169   GstStructure *structure;
2170   GstAudioInfo info;
2171 
2172   templcaps = gst_pad_get_pad_template_caps (dec->srcpad);
2173   caps = gst_pad_peer_query_caps (dec->srcpad, templcaps);
2174   if (caps)
2175     gst_caps_unref (templcaps);
2176   else
2177     caps = templcaps;
2178   templcaps = NULL;
2179 
2180   if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
2181     goto caps_error;
2182 
2183   GST_LOG_OBJECT (dec, "peer caps  %" GST_PTR_FORMAT, caps);
2184 
2185   /* before fixating, try to use whatever upstream provided */
2186   caps = gst_caps_make_writable (caps);
2187   caps_size = gst_caps_get_size (caps);
2188   if (dec->priv->ctx.input_caps) {
2189     GstCaps *sinkcaps = dec->priv->ctx.input_caps;
2190     GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
2191 
2192     if (gst_structure_get_int (structure, "rate", &rate)) {
2193       for (i = 0; i < caps_size; i++) {
2194         gst_structure_set (gst_caps_get_structure (caps, i), "rate",
2195             G_TYPE_INT, rate, NULL);
2196       }
2197     }
2198 
2199     if (gst_structure_get_int (structure, "channels", &channels)) {
2200       for (i = 0; i < caps_size; i++) {
2201         gst_structure_set (gst_caps_get_structure (caps, i), "channels",
2202             G_TYPE_INT, channels, NULL);
2203       }
2204     }
2205 
2206     if (gst_structure_get (structure, "channel-mask", GST_TYPE_BITMASK,
2207             &channel_mask, NULL)) {
2208       for (i = 0; i < caps_size; i++) {
2209         gst_structure_set (gst_caps_get_structure (caps, i), "channel-mask",
2210             GST_TYPE_BITMASK, channel_mask, NULL);
2211       }
2212     }
2213   }
2214 
2215   for (i = 0; i < caps_size; i++) {
2216     structure = gst_caps_get_structure (caps, i);
2217     if (gst_structure_has_field (structure, "channels"))
2218       gst_structure_fixate_field_nearest_int (structure,
2219           "channels", GST_AUDIO_DEF_CHANNELS);
2220     else
2221       gst_structure_set (structure, "channels", G_TYPE_INT,
2222           GST_AUDIO_DEF_CHANNELS, NULL);
2223     if (gst_structure_has_field (structure, "rate"))
2224       gst_structure_fixate_field_nearest_int (structure,
2225           "rate", GST_AUDIO_DEF_RATE);
2226     else
2227       gst_structure_set (structure, "rate", G_TYPE_INT, GST_AUDIO_DEF_RATE,
2228           NULL);
2229   }
2230   caps = gst_caps_fixate (caps);
2231   structure = gst_caps_get_structure (caps, 0);
2232 
2233   /* Need to add a channel-mask if channels > 2 */
2234   gst_structure_get_int (structure, "channels", &channels);
2235   if (channels > 2 && !gst_structure_has_field (structure, "channel-mask")) {
2236     channel_mask = gst_audio_channel_get_fallback_mask (channels);
2237     if (channel_mask != 0) {
2238       gst_structure_set (structure, "channel-mask",
2239           GST_TYPE_BITMASK, channel_mask, NULL);
2240     } else {
2241       GST_WARNING_OBJECT (dec, "No default channel-mask for %d channels",
2242           channels);
2243     }
2244   }
2245 
2246   if (!caps || !gst_audio_info_from_caps (&info, caps))
2247     goto caps_error;
2248 
2249   GST_OBJECT_LOCK (dec);
2250   dec->priv->ctx.info = info;
2251   dec->priv->ctx.caps = caps;
2252   GST_OBJECT_UNLOCK (dec);
2253 
2254   GST_INFO_OBJECT (dec,
2255       "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
2256 
2257   return TRUE;
2258 
2259 caps_error:
2260   {
2261     if (caps)
2262       gst_caps_unref (caps);
2263     return FALSE;
2264   }
2265 }
2266 
2267 static gboolean
gst_audio_decoder_handle_gap(GstAudioDecoder * dec,GstEvent * event)2268 gst_audio_decoder_handle_gap (GstAudioDecoder * dec, GstEvent * event)
2269 {
2270   gboolean ret;
2271   GstClockTime timestamp, duration;
2272   gboolean needs_reconfigure = FALSE;
2273 
2274   /* Ensure we have caps first */
2275   GST_AUDIO_DECODER_STREAM_LOCK (dec);
2276   if (!GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)) {
2277     if (!gst_audio_decoder_negotiate_default_caps (dec)) {
2278       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2279       GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL),
2280           ("Decoder output not negotiated before GAP event."));
2281       gst_event_unref (event);
2282       return FALSE;
2283     }
2284     needs_reconfigure = TRUE;
2285   }
2286   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad)
2287       || needs_reconfigure;
2288   if (G_UNLIKELY (dec->priv->ctx.output_format_changed || needs_reconfigure)) {
2289     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
2290       GST_WARNING_OBJECT (dec, "Failed to negotiate with downstream");
2291       gst_pad_mark_reconfigure (dec->srcpad);
2292     }
2293   }
2294   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2295 
2296   gst_event_parse_gap (event, &timestamp, &duration);
2297 
2298   /* time progressed without data, see if we can fill the gap with
2299    * some concealment data */
2300   GST_DEBUG_OBJECT (dec,
2301       "gap event: plc %d, do_plc %d, position %" GST_TIME_FORMAT
2302       " duration %" GST_TIME_FORMAT,
2303       dec->priv->plc, dec->priv->ctx.do_plc,
2304       GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration));
2305 
2306   if (dec->priv->plc && dec->priv->ctx.do_plc && dec->input_segment.rate > 0.0) {
2307     GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2308     GstBuffer *buf;
2309 
2310     /* hand subclass empty frame with duration that needs covering */
2311     buf = gst_buffer_new ();
2312     GST_BUFFER_PTS (buf) = timestamp;
2313     GST_BUFFER_DURATION (buf) = duration;
2314     /* best effort, not much error handling */
2315     gst_audio_decoder_handle_frame (dec, klass, buf);
2316     ret = TRUE;
2317     dec->priv->expecting_discont_buf = TRUE;
2318     gst_event_unref (event);
2319   } else {
2320     GstFlowReturn flowret;
2321 
2322     /* sub-class doesn't know how to handle empty buffers,
2323      * so just try sending GAP downstream */
2324     flowret = check_pending_reconfigure (dec);
2325     if (flowret == GST_FLOW_OK) {
2326       send_pending_events (dec);
2327       ret = gst_audio_decoder_push_event (dec, event);
2328     } else {
2329       ret = FALSE;
2330       gst_event_unref (event);
2331     }
2332   }
2333   return ret;
2334 }
2335 
2336 static GList *
_flush_events(GstPad * pad,GList * events)2337 _flush_events (GstPad * pad, GList * events)
2338 {
2339   GList *tmp;
2340 
2341   for (tmp = events; tmp; tmp = tmp->next) {
2342     if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
2343         GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
2344         GST_EVENT_IS_STICKY (tmp->data)) {
2345       gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
2346     }
2347     gst_event_unref (tmp->data);
2348   }
2349   g_list_free (events);
2350 
2351   return NULL;
2352 }
2353 
2354 static gboolean
gst_audio_decoder_sink_eventfunc(GstAudioDecoder * dec,GstEvent * event)2355 gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2356 {
2357   gboolean ret;
2358 
2359   switch (GST_EVENT_TYPE (event)) {
2360     case GST_EVENT_STREAM_START:
2361       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2362       /* finish any data in current segment and clear the decoder
2363        * to be ready for new stream data */
2364       gst_audio_decoder_drain (dec);
2365       gst_audio_decoder_flush (dec, FALSE);
2366 
2367       GST_DEBUG_OBJECT (dec, "received STREAM_START. Clearing taglist");
2368       /* Flush upstream tags after a STREAM_START */
2369       if (dec->priv->upstream_tags) {
2370         gst_tag_list_unref (dec->priv->upstream_tags);
2371         dec->priv->upstream_tags = NULL;
2372         dec->priv->taglist_changed = TRUE;
2373       }
2374       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2375 
2376       ret = gst_audio_decoder_push_event (dec, event);
2377       break;
2378     case GST_EVENT_SEGMENT:
2379     {
2380       GstSegment seg;
2381       GstFormat format;
2382 
2383       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2384       gst_event_copy_segment (event, &seg);
2385 
2386       format = seg.format;
2387       if (format == GST_FORMAT_TIME) {
2388         GST_DEBUG_OBJECT (dec, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
2389             &seg);
2390       } else {
2391         gint64 nstart;
2392         GST_DEBUG_OBJECT (dec, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
2393         /* handle newsegment resulting from legacy simple seeking */
2394         /* note that we need to convert this whether or not enough data
2395          * to handle initial newsegment */
2396         if (dec->priv->ctx.do_estimate_rate &&
2397             gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, seg.start,
2398                 GST_FORMAT_TIME, &nstart)) {
2399           /* best attempt convert */
2400           /* as these are only estimates, stop is kept open-ended to avoid
2401            * premature cutting */
2402           GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT,
2403               GST_TIME_ARGS (nstart));
2404           seg.format = GST_FORMAT_TIME;
2405           seg.start = nstart;
2406           seg.time = nstart;
2407           seg.stop = GST_CLOCK_TIME_NONE;
2408           /* replace event */
2409           gst_event_unref (event);
2410           event = gst_event_new_segment (&seg);
2411         } else {
2412           GST_DEBUG_OBJECT (dec, "unsupported format; ignoring");
2413           GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2414           gst_event_unref (event);
2415           ret = FALSE;
2416           break;
2417         }
2418       }
2419 
2420       /* prepare for next segment */
2421       /* Use the segment start as a base timestamp
2422        * in case upstream does not come up with anything better
2423        * (e.g. upstream BYTE) */
2424       if (format != GST_FORMAT_TIME) {
2425         dec->priv->base_ts = seg.start;
2426         dec->priv->samples = 0;
2427       }
2428 
2429       /* Update the decode flags in the segment if we have an instant-rate
2430        * override active */
2431       GST_OBJECT_LOCK (dec);
2432       if (dec->priv->decode_flags_override) {
2433         seg.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
2434         seg.flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
2435       }
2436 
2437       /* and follow along with segment */
2438       dec->priv->in_out_segment_sync = FALSE;
2439       dec->input_segment = seg;
2440       GST_OBJECT_UNLOCK (dec);
2441 
2442       dec->priv->pending_events =
2443           g_list_append (dec->priv->pending_events, event);
2444       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2445 
2446       ret = TRUE;
2447       break;
2448     }
2449     case GST_EVENT_INSTANT_RATE_CHANGE:
2450     {
2451       GstSegmentFlags flags;
2452       GstSegment *seg;
2453 
2454       gst_event_parse_instant_rate_change (event, NULL, &flags);
2455 
2456       GST_OBJECT_LOCK (dec);
2457       dec->priv->decode_flags_override = TRUE;
2458       dec->priv->decode_flags = flags;
2459 
2460       /* Update the input segment flags */
2461       seg = &dec->input_segment;
2462       seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
2463       seg->flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
2464       GST_OBJECT_UNLOCK (dec);
2465 
2466       /* Forward downstream */
2467       ret = gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2468       break;
2469     }
2470     case GST_EVENT_GAP:
2471       ret = gst_audio_decoder_handle_gap (dec, event);
2472       break;
2473     case GST_EVENT_FLUSH_STOP:
2474       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2475       /* prepare for fresh start */
2476       gst_audio_decoder_flush (dec, TRUE);
2477 
2478       dec->priv->pending_events = _flush_events (dec->srcpad,
2479           dec->priv->pending_events);
2480       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2481 
2482       /* Forward FLUSH_STOP, it is expected to be forwarded immediately
2483        * and no buffers are queued anyway. */
2484       ret = gst_audio_decoder_push_event (dec, event);
2485       break;
2486 
2487     case GST_EVENT_SEGMENT_DONE:
2488       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2489       gst_audio_decoder_drain (dec);
2490       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2491 
2492       /* Forward SEGMENT_DONE because no buffer or serialized event might come after
2493        * SEGMENT_DONE and nothing could trigger another _finish_frame() call. */
2494       if (dec->priv->pending_events)
2495         send_pending_events (dec);
2496       ret = gst_audio_decoder_push_event (dec, event);
2497       break;
2498 
2499     case GST_EVENT_EOS:
2500       GST_AUDIO_DECODER_STREAM_LOCK (dec);
2501       gst_audio_decoder_drain (dec);
2502       GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2503 
2504       if (dec->priv->ctx.had_input_data && !dec->priv->ctx.had_output_data) {
2505         GST_ELEMENT_ERROR (dec, STREAM, DECODE,
2506             ("No valid frames decoded before end of stream"),
2507             ("no valid frames found"));
2508 #ifdef OHOS_OPT_COMPAT
2509         /**
2510         * ohos.opt.compat.0058
2511         * If push eos event to downstream, at the same time the engine is destroying process,
2512         * deadlock may occur.
2513         */
2514         ret = TRUE;
2515         break;
2516 #endif
2517       }
2518 
2519       /* Forward EOS because no buffer or serialized event will come after
2520        * EOS and nothing could trigger another _finish_frame() call. */
2521       if (dec->priv->pending_events)
2522         send_pending_events (dec);
2523       ret = gst_audio_decoder_push_event (dec, event);
2524       break;
2525 
2526     case GST_EVENT_CAPS:
2527     {
2528       GstCaps *caps;
2529 
2530       gst_event_parse_caps (event, &caps);
2531       ret = gst_audio_decoder_sink_setcaps (dec, caps);
2532       gst_event_unref (event);
2533       break;
2534     }
2535     case GST_EVENT_TAG:
2536     {
2537       GstTagList *tags;
2538 
2539       gst_event_parse_tag (event, &tags);
2540 
2541       if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
2542         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2543         if (dec->priv->upstream_tags != tags) {
2544           if (dec->priv->upstream_tags)
2545             gst_tag_list_unref (dec->priv->upstream_tags);
2546           dec->priv->upstream_tags = gst_tag_list_ref (tags);
2547           GST_INFO_OBJECT (dec, "upstream stream tags: %" GST_PTR_FORMAT, tags);
2548         }
2549         gst_event_unref (event);
2550         event = gst_audio_decoder_create_merged_tags_event (dec);
2551         dec->priv->taglist_changed = FALSE;
2552         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2553 
2554         /* No tags, go out of here instead of fall through */
2555         if (!event) {
2556           ret = TRUE;
2557           break;
2558         }
2559       }
2560 
2561       /* fall through */
2562     }
2563     default:
2564       if (!GST_EVENT_IS_SERIALIZED (event)) {
2565         ret =
2566             gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2567       } else {
2568         GST_DEBUG_OBJECT (dec, "Enqueuing event %d, %s", GST_EVENT_TYPE (event),
2569             GST_EVENT_TYPE_NAME (event));
2570         GST_AUDIO_DECODER_STREAM_LOCK (dec);
2571         dec->priv->pending_events =
2572             g_list_append (dec->priv->pending_events, event);
2573         GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2574         ret = TRUE;
2575       }
2576       break;
2577   }
2578   return ret;
2579 }
2580 
2581 static gboolean
gst_audio_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2582 gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
2583     GstEvent * event)
2584 {
2585   GstAudioDecoder *dec;
2586   GstAudioDecoderClass *klass;
2587   gboolean ret;
2588 
2589   dec = GST_AUDIO_DECODER (parent);
2590   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2591 
2592   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2593       GST_EVENT_TYPE_NAME (event));
2594 
2595   if (klass->sink_event)
2596     ret = klass->sink_event (dec, event);
2597   else {
2598     gst_event_unref (event);
2599     ret = FALSE;
2600   }
2601   return ret;
2602 }
2603 
2604 static gboolean
gst_audio_decoder_do_seek(GstAudioDecoder * dec,GstEvent * event)2605 gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event)
2606 {
2607   GstSeekFlags flags;
2608   GstSeekType start_type, end_type;
2609   GstFormat format;
2610   gdouble rate;
2611   gint64 start, start_time, end_time;
2612   GstSegment seek_segment;
2613   guint32 seqnum;
2614 
2615   gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
2616       &start_time, &end_type, &end_time);
2617 
2618   /* we'll handle plain open-ended flushing seeks with the simple approach */
2619   if (rate != 1.0) {
2620     GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
2621     return FALSE;
2622   }
2623 
2624   if (start_type != GST_SEEK_TYPE_SET) {
2625     GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
2626     return FALSE;
2627   }
2628 
2629   if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
2630       (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
2631     GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
2632     return FALSE;
2633   }
2634 
2635   if (!(flags & GST_SEEK_FLAG_FLUSH)) {
2636     GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
2637     return FALSE;
2638   }
2639 
2640   memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
2641   gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
2642       start_time, end_type, end_time, NULL);
2643   start_time = seek_segment.position;
2644 
2645   if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
2646           GST_FORMAT_BYTES, &start)) {
2647     GST_DEBUG_OBJECT (dec, "conversion failed");
2648     return FALSE;
2649   }
2650 
2651   seqnum = gst_event_get_seqnum (event);
2652   event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
2653       GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
2654   gst_event_set_seqnum (event, seqnum);
2655 
2656   GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
2657       G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
2658 
2659   return gst_pad_push_event (dec->sinkpad, event);
2660 }
2661 
2662 static gboolean
gst_audio_decoder_src_eventfunc(GstAudioDecoder * dec,GstEvent * event)2663 gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2664 {
2665   gboolean res;
2666 
2667   switch (GST_EVENT_TYPE (event)) {
2668     case GST_EVENT_SEEK:
2669     {
2670       GstFormat format;
2671       gdouble rate;
2672       GstSeekFlags flags;
2673       GstSeekType start_type, stop_type;
2674       gint64 start, stop;
2675       gint64 tstart, tstop;
2676       guint32 seqnum;
2677 
2678       gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
2679           &stop_type, &stop);
2680       seqnum = gst_event_get_seqnum (event);
2681 
2682       /* upstream gets a chance first */
2683       if ((res = gst_pad_push_event (dec->sinkpad, event)))
2684         break;
2685 
2686       /* if upstream fails for a time seek, maybe we can help if allowed */
2687       if (format == GST_FORMAT_TIME) {
2688         if (gst_audio_decoder_do_byte (dec))
2689           res = gst_audio_decoder_do_seek (dec, event);
2690         break;
2691       }
2692 
2693       /* ... though a non-time seek can be aided as well */
2694       /* First bring the requested format to time */
2695       if (!(res =
2696               gst_pad_query_convert (dec->srcpad, format, start,
2697                   GST_FORMAT_TIME, &tstart)))
2698         goto convert_error;
2699       if (!(res =
2700               gst_pad_query_convert (dec->srcpad, format, stop, GST_FORMAT_TIME,
2701                   &tstop)))
2702         goto convert_error;
2703 
2704       /* then seek with time on the peer */
2705       event = gst_event_new_seek (rate, GST_FORMAT_TIME,
2706           flags, start_type, tstart, stop_type, tstop);
2707       gst_event_set_seqnum (event, seqnum);
2708 
2709       res = gst_pad_push_event (dec->sinkpad, event);
2710       break;
2711     }
2712     default:
2713       res = gst_pad_event_default (dec->srcpad, GST_OBJECT_CAST (dec), event);
2714       break;
2715   }
2716 done:
2717   return res;
2718 
2719   /* ERRORS */
2720 convert_error:
2721   {
2722     GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek");
2723     goto done;
2724   }
2725 }
2726 
2727 static gboolean
gst_audio_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2728 gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2729 {
2730   GstAudioDecoder *dec;
2731   GstAudioDecoderClass *klass;
2732   gboolean ret;
2733 
2734   dec = GST_AUDIO_DECODER (parent);
2735   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2736 
2737   GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2738       GST_EVENT_TYPE_NAME (event));
2739 
2740   if (klass->src_event)
2741     ret = klass->src_event (dec, event);
2742   else {
2743     gst_event_unref (event);
2744     ret = FALSE;
2745   }
2746 
2747   return ret;
2748 }
2749 
2750 static gboolean
gst_audio_decoder_decide_allocation_default(GstAudioDecoder * dec,GstQuery * query)2751 gst_audio_decoder_decide_allocation_default (GstAudioDecoder * dec,
2752     GstQuery * query)
2753 {
2754   GstAllocator *allocator = NULL;
2755   GstAllocationParams params;
2756   gboolean update_allocator;
2757 
2758   /* we got configuration from our peer or the decide_allocation method,
2759    * parse them */
2760   if (gst_query_get_n_allocation_params (query) > 0) {
2761     /* try the allocator */
2762     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
2763     update_allocator = TRUE;
2764   } else {
2765     allocator = NULL;
2766     gst_allocation_params_init (&params);
2767     update_allocator = FALSE;
2768   }
2769 
2770   if (update_allocator)
2771     gst_query_set_nth_allocation_param (query, 0, allocator, &params);
2772   else
2773     gst_query_add_allocation_param (query, allocator, &params);
2774   if (allocator)
2775     gst_object_unref (allocator);
2776 
2777   return TRUE;
2778 }
2779 
2780 static gboolean
gst_audio_decoder_propose_allocation_default(GstAudioDecoder * dec,GstQuery * query)2781 gst_audio_decoder_propose_allocation_default (GstAudioDecoder * dec,
2782     GstQuery * query)
2783 {
2784   return TRUE;
2785 }
2786 
2787 /**
2788  * gst_audio_decoder_proxy_getcaps:
2789  * @decoder: a #GstAudioDecoder
2790  * @caps: (allow-none): initial caps
2791  * @filter: (allow-none): filter caps
2792  *
2793  * Returns caps that express @caps (or sink template caps if @caps == NULL)
2794  * restricted to rate/channels/... combinations supported by downstream
2795  * elements.
2796  *
2797  * Returns: (transfer full): a #GstCaps owned by caller
2798  *
2799  * Since: 1.6
2800  */
2801 GstCaps *
gst_audio_decoder_proxy_getcaps(GstAudioDecoder * decoder,GstCaps * caps,GstCaps * filter)2802 gst_audio_decoder_proxy_getcaps (GstAudioDecoder * decoder, GstCaps * caps,
2803     GstCaps * filter)
2804 {
2805   return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2806       GST_AUDIO_DECODER_SINK_PAD (decoder),
2807       GST_AUDIO_DECODER_SRC_PAD (decoder), caps, filter);
2808 }
2809 
2810 static GstCaps *
gst_audio_decoder_sink_getcaps(GstAudioDecoder * decoder,GstCaps * filter)2811 gst_audio_decoder_sink_getcaps (GstAudioDecoder * decoder, GstCaps * filter)
2812 {
2813   GstAudioDecoderClass *klass;
2814   GstCaps *caps;
2815 
2816   klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
2817 
2818   if (klass->getcaps)
2819     caps = klass->getcaps (decoder, filter);
2820   else
2821     caps = gst_audio_decoder_proxy_getcaps (decoder, NULL, filter);
2822 
2823   GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2824 
2825   return caps;
2826 }
2827 
2828 static gboolean
gst_audio_decoder_sink_query_default(GstAudioDecoder * dec,GstQuery * query)2829 gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, GstQuery * query)
2830 {
2831   GstPad *pad = GST_AUDIO_DECODER_SINK_PAD (dec);
2832   gboolean res = FALSE;
2833 
2834   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2835 
2836   switch (GST_QUERY_TYPE (query)) {
2837     case GST_QUERY_FORMATS:
2838     {
2839       gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
2840       res = TRUE;
2841       break;
2842     }
2843     case GST_QUERY_CONVERT:
2844     {
2845       GstFormat src_fmt, dest_fmt;
2846       gint64 src_val, dest_val;
2847 
2848       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2849       GST_OBJECT_LOCK (dec);
2850       res = __gst_audio_encoded_audio_convert (&dec->priv->ctx.info,
2851           dec->priv->bytes_in, dec->priv->samples_out,
2852           src_fmt, src_val, &dest_fmt, &dest_val);
2853       GST_OBJECT_UNLOCK (dec);
2854       if (!res)
2855         goto error;
2856       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2857       break;
2858     }
2859     case GST_QUERY_ALLOCATION:
2860     {
2861       GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2862 
2863       if (klass->propose_allocation)
2864         res = klass->propose_allocation (dec, query);
2865       break;
2866     }
2867     case GST_QUERY_CAPS:{
2868       GstCaps *filter, *caps;
2869 
2870       gst_query_parse_caps (query, &filter);
2871       caps = gst_audio_decoder_sink_getcaps (dec, filter);
2872       gst_query_set_caps_result (query, caps);
2873       gst_caps_unref (caps);
2874       res = TRUE;
2875       break;
2876     }
2877     case GST_QUERY_ACCEPT_CAPS:{
2878       if (dec->priv->use_default_pad_acceptcaps) {
2879         res =
2880             gst_pad_query_default (GST_AUDIO_DECODER_SINK_PAD (dec),
2881             GST_OBJECT_CAST (dec), query);
2882       } else {
2883         GstCaps *caps;
2884         GstCaps *allowed_caps;
2885         GstCaps *template_caps;
2886         gboolean accept;
2887 
2888         gst_query_parse_accept_caps (query, &caps);
2889 
2890         template_caps = gst_pad_get_pad_template_caps (pad);
2891         accept = gst_caps_is_subset (caps, template_caps);
2892         gst_caps_unref (template_caps);
2893 
2894         if (accept) {
2895           allowed_caps = gst_pad_query_caps (GST_AUDIO_DECODER_SINK_PAD (dec),
2896               caps);
2897 
2898           accept = gst_caps_can_intersect (caps, allowed_caps);
2899 
2900           gst_caps_unref (allowed_caps);
2901         }
2902 
2903         gst_query_set_accept_caps_result (query, accept);
2904         res = TRUE;
2905       }
2906       break;
2907     }
2908     case GST_QUERY_SEEKING:
2909     {
2910       GstFormat format;
2911 
2912       /* non-TIME segments are discarded, so we won't seek that way either */
2913       gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
2914       if (format != GST_FORMAT_TIME) {
2915         GST_DEBUG_OBJECT (dec, "discarding non-TIME SEEKING query");
2916         res = FALSE;
2917         break;
2918       }
2919       /* fall-through */
2920     }
2921     default:
2922       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
2923       break;
2924   }
2925 
2926 error:
2927   return res;
2928 }
2929 
2930 static gboolean
gst_audio_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2931 gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
2932     GstQuery * query)
2933 {
2934   GstAudioDecoderClass *dec_class;
2935   GstAudioDecoder *dec;
2936   gboolean ret = FALSE;
2937 
2938   dec = GST_AUDIO_DECODER (parent);
2939   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
2940 
2941   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
2942 
2943   if (dec_class->sink_query)
2944     ret = dec_class->sink_query (dec, query);
2945 
2946   return ret;
2947 }
2948 
2949 /* FIXME ? are any of these queries (other than latency) a decoder's business ??
2950  * also, the conversion stuff might seem to make sense, but seems to not mind
2951  * segment stuff etc at all
2952  * Supposedly that's backward compatibility ... */
2953 static gboolean
gst_audio_decoder_src_query_default(GstAudioDecoder * dec,GstQuery * query)2954 gst_audio_decoder_src_query_default (GstAudioDecoder * dec, GstQuery * query)
2955 {
2956   GstPad *pad = GST_AUDIO_DECODER_SRC_PAD (dec);
2957   gboolean res = FALSE;
2958 
2959   GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2960 
2961   switch (GST_QUERY_TYPE (query)) {
2962     case GST_QUERY_DURATION:
2963     {
2964       GstFormat format;
2965 
2966       /* upstream in any case */
2967       if ((res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query)))
2968         break;
2969 
2970       gst_query_parse_duration (query, &format, NULL);
2971       /* try answering TIME by converting from BYTE if subclass allows  */
2972       if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) {
2973         gint64 value;
2974 
2975         if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2976                 &value)) {
2977           GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2978           if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value,
2979                   GST_FORMAT_TIME, &value)) {
2980             gst_query_set_duration (query, GST_FORMAT_TIME, value);
2981             res = TRUE;
2982           }
2983         }
2984       }
2985       break;
2986     }
2987     case GST_QUERY_POSITION:
2988     {
2989       GstFormat format;
2990       gint64 time, value;
2991 
2992       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
2993         GST_LOG_OBJECT (dec, "returning peer response");
2994         break;
2995       }
2996 
2997       /* Refuse BYTES format queries. If it made sense to
2998        * answer them, upstream would have already */
2999       gst_query_parse_position (query, &format, NULL);
3000 
3001       if (format == GST_FORMAT_BYTES) {
3002         GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
3003         break;
3004       }
3005 
3006       /* we start from the last seen time */
3007       time = dec->output_segment.position;
3008       /* correct for the segment values */
3009       time =
3010           gst_segment_to_stream_time (&dec->output_segment, GST_FORMAT_TIME,
3011           time);
3012 
3013       GST_LOG_OBJECT (dec,
3014           "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
3015 
3016       /* and convert to the final format */
3017       if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
3018                   format, &value)))
3019         break;
3020 
3021       gst_query_set_position (query, format, value);
3022 
3023       GST_LOG_OBJECT (dec,
3024           "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
3025           format);
3026       break;
3027     }
3028     case GST_QUERY_FORMATS:
3029     {
3030       gst_query_set_formats (query, 3,
3031           GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
3032       res = TRUE;
3033       break;
3034     }
3035     case GST_QUERY_CONVERT:
3036     {
3037       GstFormat src_fmt, dest_fmt;
3038       gint64 src_val, dest_val;
3039 
3040       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
3041       GST_OBJECT_LOCK (dec);
3042       res = gst_audio_info_convert (&dec->priv->ctx.info,
3043           src_fmt, src_val, dest_fmt, &dest_val);
3044       GST_OBJECT_UNLOCK (dec);
3045       if (!res)
3046         break;
3047       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
3048       break;
3049     }
3050     case GST_QUERY_LATENCY:
3051     {
3052       if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
3053         gboolean live;
3054         GstClockTime min_latency, max_latency;
3055 
3056         gst_query_parse_latency (query, &live, &min_latency, &max_latency);
3057         GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %"
3058             GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
3059             GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
3060 
3061         GST_OBJECT_LOCK (dec);
3062         /* add our latency */
3063         min_latency += dec->priv->ctx.min_latency;
3064         if (max_latency == -1 || dec->priv->ctx.max_latency == -1)
3065           max_latency = -1;
3066         else
3067           max_latency += dec->priv->ctx.max_latency;
3068         GST_OBJECT_UNLOCK (dec);
3069 
3070         gst_query_set_latency (query, live, min_latency, max_latency);
3071       }
3072       break;
3073     }
3074     default:
3075       res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
3076       break;
3077   }
3078 
3079   return res;
3080 }
3081 
3082 static gboolean
gst_audio_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)3083 gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
3084 {
3085   GstAudioDecoder *dec;
3086   GstAudioDecoderClass *dec_class;
3087   gboolean ret = FALSE;
3088 
3089   dec = GST_AUDIO_DECODER (parent);
3090   dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
3091 
3092   GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
3093 
3094   if (dec_class->src_query)
3095     ret = dec_class->src_query (dec, query);
3096 
3097   return ret;
3098 }
3099 
3100 static gboolean
gst_audio_decoder_stop(GstAudioDecoder * dec)3101 gst_audio_decoder_stop (GstAudioDecoder * dec)
3102 {
3103   GstAudioDecoderClass *klass;
3104   gboolean ret = TRUE;
3105 
3106   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop");
3107 
3108   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3109 
3110   if (klass->stop) {
3111     ret = klass->stop (dec);
3112   }
3113 
3114   /* clean up */
3115   gst_audio_decoder_reset (dec, TRUE);
3116 
3117   if (ret)
3118     dec->priv->active = FALSE;
3119 
3120   return ret;
3121 }
3122 
3123 static gboolean
gst_audio_decoder_start(GstAudioDecoder * dec)3124 gst_audio_decoder_start (GstAudioDecoder * dec)
3125 {
3126   GstAudioDecoderClass *klass;
3127   gboolean ret = TRUE;
3128 
3129   GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start");
3130 
3131   klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3132 
3133   /* arrange clean state */
3134   gst_audio_decoder_reset (dec, TRUE);
3135 
3136   if (klass->start) {
3137     ret = klass->start (dec);
3138   }
3139 
3140   if (ret)
3141     dec->priv->active = TRUE;
3142 
3143   return ret;
3144 }
3145 
3146 static void
gst_audio_decoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)3147 gst_audio_decoder_get_property (GObject * object, guint prop_id,
3148     GValue * value, GParamSpec * pspec)
3149 {
3150   GstAudioDecoder *dec;
3151 
3152   dec = GST_AUDIO_DECODER (object);
3153 
3154   switch (prop_id) {
3155     case PROP_LATENCY:
3156       g_value_set_int64 (value, dec->priv->latency);
3157       break;
3158     case PROP_TOLERANCE:
3159       g_value_set_int64 (value, dec->priv->tolerance);
3160       break;
3161     case PROP_PLC:
3162       g_value_set_boolean (value, dec->priv->plc);
3163       break;
3164     case PROP_MAX_ERRORS:
3165       g_value_set_int (value, gst_audio_decoder_get_max_errors (dec));
3166       break;
3167     default:
3168       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3169       break;
3170   }
3171 }
3172 
3173 static void
gst_audio_decoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)3174 gst_audio_decoder_set_property (GObject * object, guint prop_id,
3175     const GValue * value, GParamSpec * pspec)
3176 {
3177   GstAudioDecoder *dec;
3178 
3179   dec = GST_AUDIO_DECODER (object);
3180 
3181   switch (prop_id) {
3182     case PROP_LATENCY:
3183       dec->priv->latency = g_value_get_int64 (value);
3184       break;
3185     case PROP_TOLERANCE:
3186       dec->priv->tolerance = g_value_get_int64 (value);
3187       break;
3188     case PROP_PLC:
3189       dec->priv->plc = g_value_get_boolean (value);
3190       break;
3191     case PROP_MAX_ERRORS:
3192       gst_audio_decoder_set_max_errors (dec, g_value_get_int (value));
3193       break;
3194     default:
3195       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3196       break;
3197   }
3198 }
3199 
3200 static GstStateChangeReturn
gst_audio_decoder_change_state(GstElement * element,GstStateChange transition)3201 gst_audio_decoder_change_state (GstElement * element, GstStateChange transition)
3202 {
3203   GstAudioDecoder *codec;
3204   GstAudioDecoderClass *klass;
3205   GstStateChangeReturn ret;
3206 
3207   codec = GST_AUDIO_DECODER (element);
3208   klass = GST_AUDIO_DECODER_GET_CLASS (codec);
3209 
3210   switch (transition) {
3211     case GST_STATE_CHANGE_NULL_TO_READY:
3212       if (klass->open) {
3213         if (!klass->open (codec))
3214           goto open_failed;
3215       }
3216       break;
3217     case GST_STATE_CHANGE_READY_TO_PAUSED:
3218       if (!gst_audio_decoder_start (codec)) {
3219         goto start_failed;
3220       }
3221       break;
3222     case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
3223       break;
3224     default:
3225       break;
3226   }
3227 
3228   ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
3229 
3230   switch (transition) {
3231     case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
3232       break;
3233     case GST_STATE_CHANGE_PAUSED_TO_READY:
3234       if (!gst_audio_decoder_stop (codec)) {
3235         goto stop_failed;
3236       }
3237       break;
3238     case GST_STATE_CHANGE_READY_TO_NULL:
3239       if (klass->close) {
3240         if (!klass->close (codec))
3241           goto close_failed;
3242       }
3243       break;
3244     default:
3245       break;
3246   }
3247 
3248   return ret;
3249 
3250 start_failed:
3251   {
3252     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec"));
3253     return GST_STATE_CHANGE_FAILURE;
3254   }
3255 stop_failed:
3256   {
3257     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec"));
3258     return GST_STATE_CHANGE_FAILURE;
3259   }
3260 open_failed:
3261   {
3262     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to open codec"));
3263     return GST_STATE_CHANGE_FAILURE;
3264   }
3265 close_failed:
3266   {
3267     GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to close codec"));
3268     return GST_STATE_CHANGE_FAILURE;
3269   }
3270 }
3271 
3272 GstFlowReturn
_gst_audio_decoder_error(GstAudioDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)3273 _gst_audio_decoder_error (GstAudioDecoder * dec, gint weight,
3274     GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
3275     const gchar * function, gint line)
3276 {
3277   if (txt)
3278     GST_WARNING_OBJECT (dec, "error: %s", txt);
3279   if (dbg)
3280     GST_WARNING_OBJECT (dec, "error: %s", dbg);
3281   dec->priv->error_count += weight;
3282   dec->priv->discont = TRUE;
3283   if (dec->priv->max_errors >= 0
3284       && dec->priv->max_errors < dec->priv->error_count) {
3285     gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, domain,
3286         code, txt, dbg, file, function, line);
3287     return GST_FLOW_ERROR;
3288   } else {
3289     g_free (txt);
3290     g_free (dbg);
3291     return GST_FLOW_OK;
3292   }
3293 }
3294 
3295 /**
3296  * gst_audio_decoder_get_audio_info:
3297  * @dec: a #GstAudioDecoder
3298  *
3299  * Returns: (transfer none): a #GstAudioInfo describing the input audio format
3300  */
3301 GstAudioInfo *
gst_audio_decoder_get_audio_info(GstAudioDecoder * dec)3302 gst_audio_decoder_get_audio_info (GstAudioDecoder * dec)
3303 {
3304   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL);
3305 
3306   return &dec->priv->ctx.info;
3307 }
3308 
3309 /**
3310  * gst_audio_decoder_set_plc_aware:
3311  * @dec: a #GstAudioDecoder
3312  * @plc: new plc state
3313  *
3314  * Indicates whether or not subclass handles packet loss concealment (plc).
3315  */
3316 void
gst_audio_decoder_set_plc_aware(GstAudioDecoder * dec,gboolean plc)3317 gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc)
3318 {
3319   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3320 
3321   dec->priv->ctx.do_plc = plc;
3322 }
3323 
3324 /**
3325  * gst_audio_decoder_get_plc_aware:
3326  * @dec: a #GstAudioDecoder
3327  *
3328  * Returns: currently configured plc handling
3329  */
3330 gint
gst_audio_decoder_get_plc_aware(GstAudioDecoder * dec)3331 gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec)
3332 {
3333   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3334 
3335   return dec->priv->ctx.do_plc;
3336 }
3337 
3338 /**
3339  * gst_audio_decoder_set_estimate_rate:
3340  * @dec: a #GstAudioDecoder
3341  * @enabled: whether to enable byte to time conversion
3342  *
3343  * Allows baseclass to perform byte to time estimated conversion.
3344  */
3345 void
gst_audio_decoder_set_estimate_rate(GstAudioDecoder * dec,gboolean enabled)3346 gst_audio_decoder_set_estimate_rate (GstAudioDecoder * dec, gboolean enabled)
3347 {
3348   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3349 
3350   dec->priv->ctx.do_estimate_rate = enabled;
3351 }
3352 
3353 /**
3354  * gst_audio_decoder_get_estimate_rate:
3355  * @dec: a #GstAudioDecoder
3356  *
3357  * Returns: currently configured byte to time conversion setting
3358  */
3359 gint
gst_audio_decoder_get_estimate_rate(GstAudioDecoder * dec)3360 gst_audio_decoder_get_estimate_rate (GstAudioDecoder * dec)
3361 {
3362   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3363 
3364   return dec->priv->ctx.do_estimate_rate;
3365 }
3366 
3367 /**
3368  * gst_audio_decoder_get_delay:
3369  * @dec: a #GstAudioDecoder
3370  *
3371  * Returns: currently configured decoder delay
3372  */
3373 gint
gst_audio_decoder_get_delay(GstAudioDecoder * dec)3374 gst_audio_decoder_get_delay (GstAudioDecoder * dec)
3375 {
3376   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3377 
3378   return dec->priv->ctx.delay;
3379 }
3380 
3381 /**
3382  * gst_audio_decoder_set_max_errors:
3383  * @dec: a #GstAudioDecoder
3384  * @num: max tolerated errors
3385  *
3386  * Sets numbers of tolerated decoder errors, where a tolerated one is then only
3387  * warned about, but more than tolerated will lead to fatal error. You can set
3388  * -1 for never returning fatal errors. Default is set to
3389  * GST_AUDIO_DECODER_MAX_ERRORS.
3390  */
3391 void
gst_audio_decoder_set_max_errors(GstAudioDecoder * dec,gint num)3392 gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num)
3393 {
3394   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3395 
3396   dec->priv->max_errors = num;
3397 }
3398 
3399 /**
3400  * gst_audio_decoder_get_max_errors:
3401  * @dec: a #GstAudioDecoder
3402  *
3403  * Returns: currently configured decoder tolerated error count.
3404  */
3405 gint
gst_audio_decoder_get_max_errors(GstAudioDecoder * dec)3406 gst_audio_decoder_get_max_errors (GstAudioDecoder * dec)
3407 {
3408   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3409 
3410   return dec->priv->max_errors;
3411 }
3412 
3413 /**
3414  * gst_audio_decoder_set_latency:
3415  * @dec: a #GstAudioDecoder
3416  * @min: minimum latency
3417  * @max: maximum latency
3418  *
3419  * Sets decoder latency.
3420  */
3421 void
gst_audio_decoder_set_latency(GstAudioDecoder * dec,GstClockTime min,GstClockTime max)3422 gst_audio_decoder_set_latency (GstAudioDecoder * dec,
3423     GstClockTime min, GstClockTime max)
3424 {
3425   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3426   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
3427   g_return_if_fail (min <= max);
3428 
3429   GST_OBJECT_LOCK (dec);
3430   dec->priv->ctx.min_latency = min;
3431   dec->priv->ctx.max_latency = max;
3432   GST_OBJECT_UNLOCK (dec);
3433 
3434   /* post latency message on the bus */
3435   gst_element_post_message (GST_ELEMENT (dec),
3436       gst_message_new_latency (GST_OBJECT (dec)));
3437 }
3438 
3439 /**
3440  * gst_audio_decoder_get_latency:
3441  * @dec: a #GstAudioDecoder
3442  * @min: (out) (allow-none): a pointer to storage to hold minimum latency
3443  * @max: (out) (allow-none): a pointer to storage to hold maximum latency
3444  *
3445  * Sets the variables pointed to by @min and @max to the currently configured
3446  * latency.
3447  */
3448 void
gst_audio_decoder_get_latency(GstAudioDecoder * dec,GstClockTime * min,GstClockTime * max)3449 gst_audio_decoder_get_latency (GstAudioDecoder * dec,
3450     GstClockTime * min, GstClockTime * max)
3451 {
3452   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3453 
3454   GST_OBJECT_LOCK (dec);
3455   if (min)
3456     *min = dec->priv->ctx.min_latency;
3457   if (max)
3458     *max = dec->priv->ctx.max_latency;
3459   GST_OBJECT_UNLOCK (dec);
3460 }
3461 
3462 /**
3463  * gst_audio_decoder_get_parse_state:
3464  * @dec: a #GstAudioDecoder
3465  * @sync: (out) (optional): a pointer to a variable to hold the current sync state
3466  * @eos: (out) (optional): a pointer to a variable to hold the current eos state
3467  *
3468  * Return current parsing (sync and eos) state.
3469  */
3470 void
gst_audio_decoder_get_parse_state(GstAudioDecoder * dec,gboolean * sync,gboolean * eos)3471 gst_audio_decoder_get_parse_state (GstAudioDecoder * dec,
3472     gboolean * sync, gboolean * eos)
3473 {
3474   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3475 
3476   if (sync)
3477     *sync = dec->priv->ctx.sync;
3478   if (eos)
3479     *eos = dec->priv->ctx.eos;
3480 }
3481 
3482 /**
3483  * gst_audio_decoder_set_allocation_caps:
3484  * @dec: a #GstAudioDecoder
3485  * @allocation_caps: (allow-none): a #GstCaps or %NULL
3486  *
3487  * Sets a caps in allocation query which are different from the set
3488  * pad's caps. Use this function before calling
3489  * gst_audio_decoder_negotiate(). Setting to %NULL the allocation
3490  * query will use the caps from the pad.
3491  *
3492  * Since: 1.10
3493  */
3494 void
gst_audio_decoder_set_allocation_caps(GstAudioDecoder * dec,GstCaps * allocation_caps)3495 gst_audio_decoder_set_allocation_caps (GstAudioDecoder * dec,
3496     GstCaps * allocation_caps)
3497 {
3498   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3499 
3500   gst_caps_replace (&dec->priv->ctx.allocation_caps, allocation_caps);
3501 }
3502 
3503 /**
3504  * gst_audio_decoder_set_plc:
3505  * @dec: a #GstAudioDecoder
3506  * @enabled: new state
3507  *
3508  * Enable or disable decoder packet loss concealment, provided subclass
3509  * and codec are capable and allow handling plc.
3510  *
3511  * MT safe.
3512  */
3513 void
gst_audio_decoder_set_plc(GstAudioDecoder * dec,gboolean enabled)3514 gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled)
3515 {
3516   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3517 
3518   GST_LOG_OBJECT (dec, "enabled: %d", enabled);
3519 
3520   GST_OBJECT_LOCK (dec);
3521   dec->priv->plc = enabled;
3522   GST_OBJECT_UNLOCK (dec);
3523 }
3524 
3525 /**
3526  * gst_audio_decoder_get_plc:
3527  * @dec: a #GstAudioDecoder
3528  *
3529  * Queries decoder packet loss concealment handling.
3530  *
3531  * Returns: TRUE if packet loss concealment is enabled.
3532  *
3533  * MT safe.
3534  */
3535 gboolean
gst_audio_decoder_get_plc(GstAudioDecoder * dec)3536 gst_audio_decoder_get_plc (GstAudioDecoder * dec)
3537 {
3538   gboolean result;
3539 
3540   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3541 
3542   GST_OBJECT_LOCK (dec);
3543   result = dec->priv->plc;
3544   GST_OBJECT_UNLOCK (dec);
3545 
3546   return result;
3547 }
3548 
3549 /**
3550  * gst_audio_decoder_set_min_latency:
3551  * @dec: a #GstAudioDecoder
3552  * @num: new minimum latency
3553  *
3554  * Sets decoder minimum aggregation latency.
3555  *
3556  * MT safe.
3557  */
3558 void
gst_audio_decoder_set_min_latency(GstAudioDecoder * dec,GstClockTime num)3559 gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, GstClockTime num)
3560 {
3561   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3562   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (num));
3563 
3564   GST_OBJECT_LOCK (dec);
3565   dec->priv->latency = num;
3566   GST_OBJECT_UNLOCK (dec);
3567 }
3568 
3569 /**
3570  * gst_audio_decoder_get_min_latency:
3571  * @dec: a #GstAudioDecoder
3572  *
3573  * Queries decoder's latency aggregation.
3574  *
3575  * Returns: aggregation latency.
3576  *
3577  * MT safe.
3578  */
3579 GstClockTime
gst_audio_decoder_get_min_latency(GstAudioDecoder * dec)3580 gst_audio_decoder_get_min_latency (GstAudioDecoder * dec)
3581 {
3582   GstClockTime result;
3583 
3584   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3585 
3586   GST_OBJECT_LOCK (dec);
3587   result = dec->priv->latency;
3588   GST_OBJECT_UNLOCK (dec);
3589 
3590   return result;
3591 }
3592 
3593 /**
3594  * gst_audio_decoder_set_tolerance:
3595  * @dec: a #GstAudioDecoder
3596  * @tolerance: new tolerance
3597  *
3598  * Configures decoder audio jitter tolerance threshold.
3599  *
3600  * MT safe.
3601  */
3602 void
gst_audio_decoder_set_tolerance(GstAudioDecoder * dec,GstClockTime tolerance)3603 gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, GstClockTime tolerance)
3604 {
3605   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3606   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (tolerance));
3607 
3608   GST_OBJECT_LOCK (dec);
3609   dec->priv->tolerance = tolerance;
3610   GST_OBJECT_UNLOCK (dec);
3611 }
3612 
3613 /**
3614  * gst_audio_decoder_get_tolerance:
3615  * @dec: a #GstAudioDecoder
3616  *
3617  * Queries current audio jitter tolerance threshold.
3618  *
3619  * Returns: decoder audio jitter tolerance threshold.
3620  *
3621  * MT safe.
3622  */
3623 GstClockTime
gst_audio_decoder_get_tolerance(GstAudioDecoder * dec)3624 gst_audio_decoder_get_tolerance (GstAudioDecoder * dec)
3625 {
3626   GstClockTime result;
3627 
3628   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3629 
3630   GST_OBJECT_LOCK (dec);
3631   result = dec->priv->tolerance;
3632   GST_OBJECT_UNLOCK (dec);
3633 
3634   return result;
3635 }
3636 
3637 /**
3638  * gst_audio_decoder_set_drainable:
3639  * @dec: a #GstAudioDecoder
3640  * @enabled: new state
3641  *
3642  * Configures decoder drain handling.  If drainable, subclass might
3643  * be handed a NULL buffer to have it return any leftover decoded data.
3644  * Otherwise, it is not considered so capable and will only ever be passed
3645  * real data.
3646  *
3647  * MT safe.
3648  */
3649 void
gst_audio_decoder_set_drainable(GstAudioDecoder * dec,gboolean enabled)3650 gst_audio_decoder_set_drainable (GstAudioDecoder * dec, gboolean enabled)
3651 {
3652   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3653 
3654   GST_OBJECT_LOCK (dec);
3655   dec->priv->drainable = enabled;
3656   GST_OBJECT_UNLOCK (dec);
3657 }
3658 
3659 /**
3660  * gst_audio_decoder_get_drainable:
3661  * @dec: a #GstAudioDecoder
3662  *
3663  * Queries decoder drain handling.
3664  *
3665  * Returns: TRUE if drainable handling is enabled.
3666  *
3667  * MT safe.
3668  */
3669 gboolean
gst_audio_decoder_get_drainable(GstAudioDecoder * dec)3670 gst_audio_decoder_get_drainable (GstAudioDecoder * dec)
3671 {
3672   gboolean result;
3673 
3674   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3675 
3676   GST_OBJECT_LOCK (dec);
3677   result = dec->priv->drainable;
3678   GST_OBJECT_UNLOCK (dec);
3679 
3680   return result;
3681 }
3682 
3683 /**
3684  * gst_audio_decoder_set_needs_format:
3685  * @dec: a #GstAudioDecoder
3686  * @enabled: new state
3687  *
3688  * Configures decoder format needs.  If enabled, subclass needs to be
3689  * negotiated with format caps before it can process any data.  It will then
3690  * never be handed any data before it has been configured.
3691  * Otherwise, it might be handed data without having been configured and
3692  * is then expected being able to do so either by default
3693  * or based on the input data.
3694  *
3695  * MT safe.
3696  */
3697 void
gst_audio_decoder_set_needs_format(GstAudioDecoder * dec,gboolean enabled)3698 gst_audio_decoder_set_needs_format (GstAudioDecoder * dec, gboolean enabled)
3699 {
3700   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3701 
3702   GST_OBJECT_LOCK (dec);
3703   dec->priv->needs_format = enabled;
3704   GST_OBJECT_UNLOCK (dec);
3705 }
3706 
3707 /**
3708  * gst_audio_decoder_get_needs_format:
3709  * @dec: a #GstAudioDecoder
3710  *
3711  * Queries decoder required format handling.
3712  *
3713  * Returns: TRUE if required format handling is enabled.
3714  *
3715  * MT safe.
3716  */
3717 gboolean
gst_audio_decoder_get_needs_format(GstAudioDecoder * dec)3718 gst_audio_decoder_get_needs_format (GstAudioDecoder * dec)
3719 {
3720   gboolean result;
3721 
3722   g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3723 
3724   GST_OBJECT_LOCK (dec);
3725   result = dec->priv->needs_format;
3726   GST_OBJECT_UNLOCK (dec);
3727 
3728   return result;
3729 }
3730 
3731 /**
3732  * gst_audio_decoder_merge_tags:
3733  * @dec: a #GstAudioDecoder
3734  * @tags: (allow-none): a #GstTagList to merge, or NULL
3735  * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
3736  *
3737  * Sets the audio decoder tags and how they should be merged with any
3738  * upstream stream tags. This will override any tags previously-set
3739  * with gst_audio_decoder_merge_tags().
3740  *
3741  * Note that this is provided for convenience, and the subclass is
3742  * not required to use this and can still do tag handling on its own.
3743  */
3744 void
gst_audio_decoder_merge_tags(GstAudioDecoder * dec,const GstTagList * tags,GstTagMergeMode mode)3745 gst_audio_decoder_merge_tags (GstAudioDecoder * dec,
3746     const GstTagList * tags, GstTagMergeMode mode)
3747 {
3748   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3749   g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
3750   g_return_if_fail (mode != GST_TAG_MERGE_UNDEFINED);
3751 
3752   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3753   if (dec->priv->taglist != tags) {
3754     if (dec->priv->taglist) {
3755       gst_tag_list_unref (dec->priv->taglist);
3756       dec->priv->taglist = NULL;
3757       dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
3758     }
3759     if (tags) {
3760       dec->priv->taglist = gst_tag_list_ref ((GstTagList *) tags);
3761       dec->priv->decoder_tags_merge_mode = mode;
3762     }
3763 
3764     GST_DEBUG_OBJECT (dec, "setting decoder tags to %" GST_PTR_FORMAT, tags);
3765     dec->priv->taglist_changed = TRUE;
3766   }
3767   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3768 }
3769 
3770 /**
3771  * gst_audio_decoder_allocate_output_buffer:
3772  * @dec: a #GstAudioDecoder
3773  * @size: size of the buffer
3774  *
3775  * Helper function that allocates a buffer to hold an audio frame
3776  * for @dec's current output format.
3777  *
3778  * Returns: (transfer full): allocated buffer
3779  */
3780 GstBuffer *
gst_audio_decoder_allocate_output_buffer(GstAudioDecoder * dec,gsize size)3781 gst_audio_decoder_allocate_output_buffer (GstAudioDecoder * dec, gsize size)
3782 {
3783   GstBuffer *buffer = NULL;
3784   gboolean needs_reconfigure = FALSE;
3785 
3786   g_return_val_if_fail (size > 0, NULL);
3787 
3788   GST_DEBUG ("alloc src buffer");
3789 
3790   GST_AUDIO_DECODER_STREAM_LOCK (dec);
3791 
3792   needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
3793   if (G_UNLIKELY (dec->priv->ctx.output_format_changed ||
3794           (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)
3795               && needs_reconfigure))) {
3796     if (!gst_audio_decoder_negotiate_unlocked (dec)) {
3797       GST_INFO_OBJECT (dec, "Failed to negotiate, fallback allocation");
3798       gst_pad_mark_reconfigure (dec->srcpad);
3799       goto fallback;
3800     }
3801   }
3802 
3803   buffer =
3804       gst_buffer_new_allocate (dec->priv->ctx.allocator, size,
3805       &dec->priv->ctx.params);
3806   if (!buffer) {
3807     GST_INFO_OBJECT (dec, "couldn't allocate output buffer");
3808     goto fallback;
3809   }
3810 
3811   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3812 
3813   return buffer;
3814 fallback:
3815   buffer = gst_buffer_new_allocate (NULL, size, NULL);
3816   GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3817 
3818   return buffer;
3819 }
3820 
3821 /**
3822  * gst_audio_decoder_get_allocator:
3823  * @dec: a #GstAudioDecoder
3824  * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
3825  * used
3826  * @params: (out) (allow-none) (transfer full): the
3827  * #GstAllocationParams of @allocator
3828  *
3829  * Lets #GstAudioDecoder sub-classes to know the memory @allocator
3830  * used by the base class and its @params.
3831  *
3832  * Unref the @allocator after use it.
3833  */
3834 void
gst_audio_decoder_get_allocator(GstAudioDecoder * dec,GstAllocator ** allocator,GstAllocationParams * params)3835 gst_audio_decoder_get_allocator (GstAudioDecoder * dec,
3836     GstAllocator ** allocator, GstAllocationParams * params)
3837 {
3838   g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3839 
3840   if (allocator)
3841     *allocator = dec->priv->ctx.allocator ?
3842         gst_object_ref (dec->priv->ctx.allocator) : NULL;
3843 
3844   if (params)
3845     *params = dec->priv->ctx.params;
3846 }
3847 
3848 /**
3849  * gst_audio_decoder_set_use_default_pad_acceptcaps:
3850  * @decoder: a #GstAudioDecoder
3851  * @use: if the default pad accept-caps query handling should be used
3852  *
3853  * Lets #GstAudioDecoder sub-classes decide if they want the sink pad
3854  * to use the default pad query handler to reply to accept-caps queries.
3855  *
3856  * By setting this to true it is possible to further customize the default
3857  * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
3858  * %GST_PAD_SET_ACCEPT_TEMPLATE
3859  *
3860  * Since: 1.6
3861  */
3862 void
gst_audio_decoder_set_use_default_pad_acceptcaps(GstAudioDecoder * decoder,gboolean use)3863 gst_audio_decoder_set_use_default_pad_acceptcaps (GstAudioDecoder * decoder,
3864     gboolean use)
3865 {
3866   decoder->priv->use_default_pad_acceptcaps = use;
3867 }
3868