1 /* GStreamer
2 * Copyright (C) 2009 Igalia S.L.
3 * Author: Iago Toral Quiroga <itoral@igalia.com>
4 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
5 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
6 * Contact: Stefan Kost <stefan.kost@nokia.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Library General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Library General Public License for more details.
17 *
18 * You should have received a copy of the GNU Library General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
21 * Boston, MA 02110-1301, USA.
22 */
23
24 /**
25 * SECTION:gstaudiodecoder
26 * @title: GstAudioDecoder
27 * @short_description: Base class for audio decoders
28 * @see_also: #GstBaseTransform
29 *
30 * This base class is for audio decoders turning encoded data into
31 * raw audio samples.
32 *
33 * GstAudioDecoder and subclass should cooperate as follows.
34 *
35 * ## Configuration
36 *
37 * * Initially, GstAudioDecoder calls @start when the decoder element
38 * is activated, which allows subclass to perform any global setup.
39 * Base class (context) parameters can already be set according to subclass
40 * capabilities (or possibly upon receive more information in subsequent
41 * @set_format).
42 * * GstAudioDecoder calls @set_format to inform subclass of the format
43 * of input audio data that it is about to receive.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
46 * * GstAudioDecoder calls @stop at end of all processing.
47 *
48 * As of configuration stage, and throughout processing, GstAudioDecoder
49 * provides various (context) parameters, e.g. describing the format of
50 * output audio data (valid when output caps have been set) or current parsing state.
51 * Conversely, subclass can and should configure context to inform
52 * base class of its expectation w.r.t. buffer handling.
53 *
54 * ## Data processing
55 * * Base class gathers input data, and optionally allows subclass
56 * to parse this into subsequently manageable (as defined by subclass)
57 * chunks. Such chunks are subsequently referred to as 'frames',
58 * though they may or may not correspond to 1 (or more) audio format frame.
59 * * Input frame is provided to subclass' @handle_frame.
60 * * If codec processing results in decoded data, subclass should call
61 * @gst_audio_decoder_finish_frame to have decoded data pushed
62 * downstream.
63 * * Just prior to actually pushing a buffer downstream,
64 * it is passed to @pre_push. Subclass should either use this callback
65 * to arrange for additional downstream pushing or otherwise ensure such
66 * custom pushing occurs after at least a method call has finished since
67 * setting src pad caps.
68 * * During the parsing process GstAudioDecoderClass will handle both
69 * srcpad and sinkpad events. Sink events will be passed to subclass
70 * if @event callback has been provided.
71 *
72 * ## Shutdown phase
73 *
74 * * GstAudioDecoder class calls @stop to inform the subclass that data
75 * parsing will be stopped.
76 *
77 * Subclass is responsible for providing pad template caps for
78 * source and sink pads. The pads need to be named "sink" and "src". It also
79 * needs to set the fixed caps on srcpad, when the format is ensured. This
80 * is typically when base class calls subclass' @set_format function, though
81 * it might be delayed until calling @gst_audio_decoder_finish_frame.
82 *
83 * In summary, above process should have subclass concentrating on
84 * codec data processing while leaving other matters to base class,
85 * such as most notably timestamp handling. While it may exert more control
86 * in this area (see e.g. @pre_push), it is very much not recommended.
87 *
88 * In particular, base class will try to arrange for perfect output timestamps
89 * as much as possible while tracking upstream timestamps.
90 * To this end, if deviation between the next ideal expected perfect timestamp
91 * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream
92 * occurs (which would happen always if the tolerance mechanism is disabled).
93 *
94 * In non-live pipelines, baseclass can also (configurably) arrange for
95 * output buffer aggregation which may help to redue large(r) numbers of
96 * small(er) buffers being pushed and processed downstream. Note that this
97 * feature is only available if the buffer layout is interleaved. For planar
98 * buffers, the decoder implementation is fully responsible for the output
99 * buffer size.
100 *
101 * On the other hand, it should be noted that baseclass only provides limited
102 * seeking support (upon explicit subclass request), as full-fledged support
103 * should rather be left to upstream demuxer, parser or alike. This simple
104 * approach caters for seeking and duration reporting using estimated input
105 * bitrates.
106 *
107 * Things that subclass need to take care of:
108 *
109 * * Provide pad templates
110 * * Set source pad caps when appropriate
111 * * Set user-configurable properties to sane defaults for format and
112 * implementing codec at hand, and convey some subclass capabilities and
113 * expectations in context.
114 *
115 * * Accept data in @handle_frame and provide encoded results to
116 * @gst_audio_decoder_finish_frame. If it is prepared to perform
117 * PLC, it should also accept NULL data in @handle_frame and provide for
118 * data for indicated duration.
119 *
120 */
121
122 #ifdef HAVE_CONFIG_H
123 #include "config.h"
124 #endif
125
126 #include "gstaudiodecoder.h"
127 #include "gstaudioutilsprivate.h"
128 #include <gst/pbutils/descriptions.h>
129
130 #include <string.h>
131
132 GST_DEBUG_CATEGORY (audiodecoder_debug);
133 #define GST_CAT_DEFAULT audiodecoder_debug
134
135 enum
136 {
137 LAST_SIGNAL
138 };
139
140 enum
141 {
142 PROP_0,
143 PROP_LATENCY,
144 PROP_TOLERANCE,
145 PROP_PLC
146 };
147
148 #define DEFAULT_LATENCY 0
149 #define DEFAULT_TOLERANCE 0
150 #define DEFAULT_PLC FALSE
151 #define DEFAULT_DRAINABLE TRUE
152 #define DEFAULT_NEEDS_FORMAT FALSE
153
154 typedef struct _GstAudioDecoderContext
155 {
156 /* last negotiated input caps */
157 GstCaps *input_caps;
158
159 /* (output) audio format */
160 GstAudioInfo info;
161 GstCaps *caps;
162 gboolean output_format_changed;
163
164 /* parsing state */
165 gboolean eos;
166 gboolean sync;
167
168 gboolean had_output_data;
169 gboolean had_input_data;
170
171 /* misc */
172 gint delay;
173
174 /* output */
175 gboolean do_plc;
176 gboolean do_estimate_rate;
177 gint max_errors;
178 GstCaps *allocation_caps;
179 /* MT-protected (with LOCK) */
180 GstClockTime min_latency;
181 GstClockTime max_latency;
182
183 GstAllocator *allocator;
184 GstAllocationParams params;
185 } GstAudioDecoderContext;
186
187 struct _GstAudioDecoderPrivate
188 {
189 /* activation status */
190 gboolean active;
191
192 /* input base/first ts as basis for output ts */
193 GstClockTime base_ts;
194 /* input samples processed and sent downstream so far (w.r.t. base_ts) */
195 guint64 samples;
196
197 /* collected input data */
198 GstAdapter *adapter;
199 /* tracking input ts for changes */
200 GstClockTime prev_ts;
201 guint64 prev_distance;
202 /* frames obtained from input */
203 GQueue frames;
204 /* collected output data */
205 GstAdapter *adapter_out;
206 /* ts and duration for output data collected above */
207 GstClockTime out_ts, out_dur;
208 /* mark outgoing discont */
209 gboolean discont;
210
211 /* subclass gave all it could already */
212 gboolean drained;
213 /* subclass currently being forcibly drained */
214 gboolean force;
215 /* input_segment are output_segment identical */
216 gboolean in_out_segment_sync;
217 /* expecting the buffer with DISCONT flag */
218 gboolean expecting_discont_buf;
219
220 /* number of samples pushed out via _finish_subframe(), resets on _finish_frame() */
221 guint subframe_samples;
222
223 /* input bps estimatation */
224 /* global in bytes seen */
225 guint64 bytes_in;
226 /* global samples sent out */
227 guint64 samples_out;
228 /* bytes flushed during parsing */
229 guint sync_flush;
230 /* error count */
231 gint error_count;
232
233 /* upstream stream tags (global tags are passed through as-is) */
234 GstTagList *upstream_tags;
235
236 /* subclass tags */
237 GstTagList *taglist; /* FIXME: rename to decoder_tags */
238 GstTagMergeMode decoder_tags_merge_mode;
239
240 gboolean taglist_changed; /* FIXME: rename to tags_changed */
241
242 /* whether circumstances allow output aggregation */
243 gint agg;
244
245 /* reverse playback queues */
246 /* collect input */
247 GList *gather;
248 /* to-be-decoded */
249 GList *decode;
250 /* reversed output */
251 GList *queued;
252
253 /* context storage */
254 GstAudioDecoderContext ctx;
255
256 /* properties */
257 GstClockTime latency;
258 GstClockTime tolerance;
259 gboolean plc;
260 gboolean drainable;
261 gboolean needs_format;
262
263 /* pending serialized sink events, will be sent from finish_frame() */
264 GList *pending_events;
265
266 /* flags */
267 gboolean use_default_pad_acceptcaps;
268 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
269 gboolean has_recv_first_frame;
270 gboolean has_push_first_frame;
271 #endif
272 };
273
274 static void gst_audio_decoder_finalize (GObject * object);
275 static void gst_audio_decoder_set_property (GObject * object,
276 guint prop_id, const GValue * value, GParamSpec * pspec);
277 static void gst_audio_decoder_get_property (GObject * object,
278 guint prop_id, GValue * value, GParamSpec * pspec);
279
280 static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec);
281 static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder *
282 dec, GstBuffer * buf);
283
284 static GstStateChangeReturn gst_audio_decoder_change_state (GstElement *
285 element, GstStateChange transition);
286 static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec,
287 GstEvent * event);
288 static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec,
289 GstEvent * event);
290 static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
291 GstEvent * event);
292 static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent,
293 GstEvent * event);
294 static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec,
295 GstCaps * caps);
296 static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent,
297 GstBuffer * buf);
298 static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent,
299 GstQuery * query);
300 static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
301 GstQuery * query);
302 static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full);
303
304 static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder *
305 dec, GstQuery * query);
306 static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder *
307 dec, GstQuery * query);
308 static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec);
309 static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec);
310 static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec,
311 GstEvent * event);
312 static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec,
313 GstQuery * query);
314 static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec,
315 GstQuery * query);
316
317 static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder *
318 decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
319
320 static GstFlowReturn
321 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
322 GstBuffer * buf, gint frames);
323
324 static GstElementClass *parent_class = NULL;
325 static gint private_offset = 0;
326
327 static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass);
328 static void gst_audio_decoder_init (GstAudioDecoder * dec,
329 GstAudioDecoderClass * klass);
330
331 GType
gst_audio_decoder_get_type(void)332 gst_audio_decoder_get_type (void)
333 {
334 static volatile gsize audio_decoder_type = 0;
335
336 if (g_once_init_enter (&audio_decoder_type)) {
337 GType _type;
338 static const GTypeInfo audio_decoder_info = {
339 sizeof (GstAudioDecoderClass),
340 NULL,
341 NULL,
342 (GClassInitFunc) gst_audio_decoder_class_init,
343 NULL,
344 NULL,
345 sizeof (GstAudioDecoder),
346 0,
347 (GInstanceInitFunc) gst_audio_decoder_init,
348 };
349
350 _type = g_type_register_static (GST_TYPE_ELEMENT,
351 "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT);
352
353 private_offset =
354 g_type_add_instance_private (_type, sizeof (GstAudioDecoderPrivate));
355
356 g_once_init_leave (&audio_decoder_type, _type);
357 }
358 return audio_decoder_type;
359 }
360
361 static inline GstAudioDecoderPrivate *
gst_audio_decoder_get_instance_private(GstAudioDecoder * self)362 gst_audio_decoder_get_instance_private (GstAudioDecoder * self)
363 {
364 return (G_STRUCT_MEMBER_P (self, private_offset));
365 }
366
367 static void
gst_audio_decoder_class_init(GstAudioDecoderClass * klass)368 gst_audio_decoder_class_init (GstAudioDecoderClass * klass)
369 {
370 GObjectClass *gobject_class;
371 GstElementClass *element_class;
372 GstAudioDecoderClass *audiodecoder_class;
373
374 gobject_class = G_OBJECT_CLASS (klass);
375 element_class = GST_ELEMENT_CLASS (klass);
376 audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
377
378 parent_class = g_type_class_peek_parent (klass);
379
380 if (private_offset != 0)
381 g_type_class_adjust_private_offset (klass, &private_offset);
382
383 GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0,
384 "audio decoder base class");
385
386 gobject_class->set_property = gst_audio_decoder_set_property;
387 gobject_class->get_property = gst_audio_decoder_get_property;
388 gobject_class->finalize = gst_audio_decoder_finalize;
389
390 element_class->change_state =
391 GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state);
392
393 /* Properties */
394 g_object_class_install_property (gobject_class, PROP_LATENCY,
395 g_param_spec_int64 ("min-latency", "Minimum Latency",
396 "Aggregate output data to a minimum of latency time (ns)",
397 0, G_MAXINT64, DEFAULT_LATENCY,
398 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
399
400 g_object_class_install_property (gobject_class, PROP_TOLERANCE,
401 g_param_spec_int64 ("tolerance", "Tolerance",
402 "Perfect ts while timestamp jitter/imperfection within tolerance (ns)",
403 0, G_MAXINT64, DEFAULT_TOLERANCE,
404 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
405
406 g_object_class_install_property (gobject_class, PROP_PLC,
407 g_param_spec_boolean ("plc", "Packet Loss Concealment",
408 "Perform packet loss concealment (if supported)",
409 DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
410
411 audiodecoder_class->sink_event =
412 GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc);
413 audiodecoder_class->src_event =
414 GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc);
415 audiodecoder_class->propose_allocation =
416 GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default);
417 audiodecoder_class->decide_allocation =
418 GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default);
419 audiodecoder_class->negotiate =
420 GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default);
421 audiodecoder_class->sink_query =
422 GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default);
423 audiodecoder_class->src_query =
424 GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default);
425 audiodecoder_class->transform_meta =
426 GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default);
427 }
428
429 static void
gst_audio_decoder_init(GstAudioDecoder * dec,GstAudioDecoderClass * klass)430 gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass)
431 {
432 GstPadTemplate *pad_template;
433
434 GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init");
435
436 dec->priv = gst_audio_decoder_get_instance_private (dec);
437
438 /* Setup sink pad */
439 pad_template =
440 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
441 g_return_if_fail (pad_template != NULL);
442
443 dec->sinkpad = gst_pad_new_from_template (pad_template, "sink");
444 gst_pad_set_event_function (dec->sinkpad,
445 GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event));
446 gst_pad_set_chain_function (dec->sinkpad,
447 GST_DEBUG_FUNCPTR (gst_audio_decoder_chain));
448 gst_pad_set_query_function (dec->sinkpad,
449 GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query));
450 gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
451 GST_DEBUG_OBJECT (dec, "sinkpad created");
452
453 /* Setup source pad */
454 pad_template =
455 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
456 g_return_if_fail (pad_template != NULL);
457
458 dec->srcpad = gst_pad_new_from_template (pad_template, "src");
459 gst_pad_set_event_function (dec->srcpad,
460 GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event));
461 gst_pad_set_query_function (dec->srcpad,
462 GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query));
463 gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
464 GST_DEBUG_OBJECT (dec, "srcpad created");
465
466 dec->priv->adapter = gst_adapter_new ();
467 dec->priv->adapter_out = gst_adapter_new ();
468 g_queue_init (&dec->priv->frames);
469
470 g_rec_mutex_init (&dec->stream_lock);
471
472 /* property default */
473 dec->priv->latency = DEFAULT_LATENCY;
474 dec->priv->tolerance = DEFAULT_TOLERANCE;
475 dec->priv->plc = DEFAULT_PLC;
476 dec->priv->drainable = DEFAULT_DRAINABLE;
477 dec->priv->needs_format = DEFAULT_NEEDS_FORMAT;
478
479 /* init state */
480 dec->priv->ctx.min_latency = 0;
481 dec->priv->ctx.max_latency = 0;
482 gst_audio_decoder_reset (dec, TRUE);
483 GST_DEBUG_OBJECT (dec, "init ok");
484 }
485
486 static void
gst_audio_decoder_reset(GstAudioDecoder * dec,gboolean full)487 gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
488 {
489 GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
490
491 GST_AUDIO_DECODER_STREAM_LOCK (dec);
492
493 if (full) {
494 dec->priv->active = FALSE;
495 GST_OBJECT_LOCK (dec);
496 dec->priv->bytes_in = 0;
497 dec->priv->samples_out = 0;
498 GST_OBJECT_UNLOCK (dec);
499 dec->priv->agg = -1;
500 dec->priv->error_count = 0;
501 gst_audio_decoder_clear_queues (dec);
502
503 if (dec->priv->taglist) {
504 gst_tag_list_unref (dec->priv->taglist);
505 dec->priv->taglist = NULL;
506 }
507 dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
508 if (dec->priv->upstream_tags) {
509 gst_tag_list_unref (dec->priv->upstream_tags);
510 dec->priv->upstream_tags = NULL;
511 }
512 dec->priv->taglist_changed = FALSE;
513
514 gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
515 gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
516 dec->priv->in_out_segment_sync = TRUE;
517
518 g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
519 g_list_free (dec->priv->pending_events);
520 dec->priv->pending_events = NULL;
521
522 if (dec->priv->ctx.allocator)
523 gst_object_unref (dec->priv->ctx.allocator);
524
525 GST_OBJECT_LOCK (dec);
526 gst_caps_replace (&dec->priv->ctx.input_caps, NULL);
527 gst_caps_replace (&dec->priv->ctx.caps, NULL);
528 gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL);
529
530 memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));
531
532 gst_audio_info_init (&dec->priv->ctx.info);
533 GST_OBJECT_UNLOCK (dec);
534 dec->priv->ctx.max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
535 dec->priv->ctx.had_output_data = FALSE;
536 dec->priv->ctx.had_input_data = FALSE;
537 }
538
539 g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
540 g_queue_clear (&dec->priv->frames);
541 gst_adapter_clear (dec->priv->adapter);
542 gst_adapter_clear (dec->priv->adapter_out);
543 dec->priv->out_ts = GST_CLOCK_TIME_NONE;
544 dec->priv->out_dur = 0;
545 dec->priv->prev_ts = GST_CLOCK_TIME_NONE;
546 dec->priv->prev_distance = 0;
547 dec->priv->drained = TRUE;
548 dec->priv->base_ts = GST_CLOCK_TIME_NONE;
549 dec->priv->samples = 0;
550 dec->priv->discont = TRUE;
551 dec->priv->sync_flush = FALSE;
552
553 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
554 }
555
556 static void
gst_audio_decoder_finalize(GObject * object)557 gst_audio_decoder_finalize (GObject * object)
558 {
559 GstAudioDecoder *dec;
560
561 g_return_if_fail (GST_IS_AUDIO_DECODER (object));
562 dec = GST_AUDIO_DECODER (object);
563
564 if (dec->priv->adapter) {
565 g_object_unref (dec->priv->adapter);
566 }
567 if (dec->priv->adapter_out) {
568 g_object_unref (dec->priv->adapter_out);
569 }
570
571 g_rec_mutex_clear (&dec->stream_lock);
572
573 G_OBJECT_CLASS (parent_class)->finalize (object);
574 }
575
576 static GstEvent *
gst_audio_decoder_create_merged_tags_event(GstAudioDecoder * dec)577 gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec)
578 {
579 GstTagList *merged_tags;
580
581 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
582 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->taglist);
583 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->decoder_tags_merge_mode);
584
585 merged_tags =
586 gst_tag_list_merge (dec->priv->upstream_tags,
587 dec->priv->taglist, dec->priv->decoder_tags_merge_mode);
588
589 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
590
591 if (merged_tags == NULL)
592 return NULL;
593
594 if (gst_tag_list_is_empty (merged_tags)) {
595 gst_tag_list_unref (merged_tags);
596 return NULL;
597 }
598
599 return gst_event_new_tag (merged_tags);
600 }
601
602 static gboolean
gst_audio_decoder_push_event(GstAudioDecoder * dec,GstEvent * event)603 gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event)
604 {
605 switch (GST_EVENT_TYPE (event)) {
606 case GST_EVENT_SEGMENT:{
607 GstSegment seg;
608
609 GST_AUDIO_DECODER_STREAM_LOCK (dec);
610 gst_event_copy_segment (event, &seg);
611
612 GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
613
614 dec->output_segment = seg;
615 dec->priv->in_out_segment_sync =
616 gst_segment_is_equal (&dec->input_segment, &seg);
617 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
618 break;
619 }
620 default:
621 break;
622 }
623
624 return gst_pad_push_event (dec->srcpad, event);
625 }
626
627 static gboolean
gst_audio_decoder_negotiate_default(GstAudioDecoder * dec)628 gst_audio_decoder_negotiate_default (GstAudioDecoder * dec)
629 {
630 GstAudioDecoderClass *klass;
631 gboolean res = TRUE;
632 GstCaps *caps;
633 GstCaps *prevcaps;
634 GstQuery *query = NULL;
635 GstAllocator *allocator;
636 GstAllocationParams params;
637
638 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
639 g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE);
640 g_return_val_if_fail (GST_IS_CAPS (dec->priv->ctx.caps), FALSE);
641
642 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
643
644 caps = dec->priv->ctx.caps;
645 if (dec->priv->ctx.allocation_caps == NULL)
646 dec->priv->ctx.allocation_caps = gst_caps_ref (caps);
647
648 GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);
649
650 if (dec->priv->pending_events) {
651 GList **pending_events, *l;
652
653 pending_events = &dec->priv->pending_events;
654
655 GST_DEBUG_OBJECT (dec, "Pushing pending events");
656 for (l = *pending_events; l;) {
657 GstEvent *event = GST_EVENT (l->data);
658 GList *tmp;
659
660 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
661 gst_audio_decoder_push_event (dec, l->data);
662 tmp = l;
663 l = l->next;
664 *pending_events = g_list_delete_link (*pending_events, tmp);
665 } else {
666 l = l->next;
667 }
668 }
669 }
670
671 prevcaps = gst_pad_get_current_caps (dec->srcpad);
672 if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
673 res = gst_pad_set_caps (dec->srcpad, caps);
674 if (prevcaps)
675 gst_caps_unref (prevcaps);
676
677 if (!res)
678 goto done;
679 dec->priv->ctx.output_format_changed = FALSE;
680
681 query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE);
682 if (!gst_pad_peer_query (dec->srcpad, query)) {
683 GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
684 }
685
686 g_assert (klass->decide_allocation != NULL);
687 res = klass->decide_allocation (dec, query);
688
689 GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
690 query);
691
692 if (!res)
693 goto no_decide_allocation;
694
695 /* we got configuration from our peer or the decide_allocation method,
696 * parse them */
697 if (gst_query_get_n_allocation_params (query) > 0) {
698 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
699 } else {
700 allocator = NULL;
701 gst_allocation_params_init (¶ms);
702 }
703
704 if (dec->priv->ctx.allocator)
705 gst_object_unref (dec->priv->ctx.allocator);
706 dec->priv->ctx.allocator = allocator;
707 dec->priv->ctx.params = params;
708
709 done:
710
711 if (query)
712 gst_query_unref (query);
713
714 return res;
715
716 /* ERRORS */
717 no_decide_allocation:
718 {
719 GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation");
720 goto done;
721 }
722 }
723
724 static gboolean
gst_audio_decoder_negotiate_unlocked(GstAudioDecoder * dec)725 gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec)
726 {
727 GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
728 gboolean ret = TRUE;
729
730 if (G_LIKELY (klass->negotiate))
731 ret = klass->negotiate (dec);
732
733 return ret;
734 }
735
736 /**
737 * gst_audio_decoder_negotiate:
738 * @dec: a #GstAudioDecoder
739 *
740 * Negotiate with downstream elements to currently configured #GstAudioInfo.
741 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
742 * negotiate fails.
743 *
744 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
745 */
746 gboolean
gst_audio_decoder_negotiate(GstAudioDecoder * dec)747 gst_audio_decoder_negotiate (GstAudioDecoder * dec)
748 {
749 GstAudioDecoderClass *klass;
750 gboolean res = TRUE;
751
752 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
753
754 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
755
756 GST_AUDIO_DECODER_STREAM_LOCK (dec);
757 gst_pad_check_reconfigure (dec->srcpad);
758 if (klass->negotiate) {
759 res = klass->negotiate (dec);
760 if (!res)
761 gst_pad_mark_reconfigure (dec->srcpad);
762 }
763 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
764
765 return res;
766 }
767
768 /**
769 * gst_audio_decoder_set_output_format:
770 * @dec: a #GstAudioDecoder
771 * @info: #GstAudioInfo
772 *
773 * Configure output info on the srcpad of @dec.
774 *
775 * Returns: %TRUE on success.
776 **/
777 gboolean
gst_audio_decoder_set_output_format(GstAudioDecoder * dec,const GstAudioInfo * info)778 gst_audio_decoder_set_output_format (GstAudioDecoder * dec,
779 const GstAudioInfo * info)
780 {
781 gboolean res = TRUE;
782 GstCaps *caps = NULL;
783
784 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
785 g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE);
786
787 /* If the audio info can't be converted to caps,
788 * it was invalid */
789 caps = gst_audio_info_to_caps (info);
790 if (!caps) {
791 GST_WARNING_OBJECT (dec, "invalid output format");
792 return FALSE;
793 }
794
795 res = gst_audio_decoder_set_output_caps (dec, caps);
796 gst_caps_unref (caps);
797
798 return res;
799 }
800
801 /**
802 * gst_audio_decoder_set_output_caps:
803 * @dec: a #GstAudioDecoder
804 * @caps: (transfer none): (fixed) #GstCaps
805 *
806 * Configure output caps on the srcpad of @dec. Similar to
807 * gst_audio_decoder_set_output_format(), but allows subclasses to specify
808 * output caps that can't be expressed via #GstAudioInfo e.g. caps that have
809 * caps features.
810 *
811 * Returns: %TRUE on success.
812 *
813 * Since: 1.16
814 **/
815 gboolean
gst_audio_decoder_set_output_caps(GstAudioDecoder * dec,GstCaps * caps)816 gst_audio_decoder_set_output_caps (GstAudioDecoder * dec, GstCaps * caps)
817 {
818 gboolean res = TRUE;
819 guint old_rate;
820 GstCaps *templ_caps;
821 GstAudioInfo info;
822
823 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
824
825 GST_DEBUG_OBJECT (dec, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
826
827 GST_AUDIO_DECODER_STREAM_LOCK (dec);
828
829 if (!gst_caps_is_fixed (caps))
830 goto refuse_caps;
831
832 /* check if caps can be parsed */
833 if (!gst_audio_info_from_caps (&info, caps))
834 goto refuse_caps;
835
836 /* Only allow caps that are a subset of the template caps */
837 templ_caps = gst_pad_get_pad_template_caps (dec->srcpad);
838 if (!gst_caps_is_subset (caps, templ_caps)) {
839 GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT
840 " do not match template %" GST_PTR_FORMAT, caps, templ_caps);
841 gst_caps_unref (templ_caps);
842 goto refuse_caps;
843 }
844 gst_caps_unref (templ_caps);
845
846 /* adjust ts tracking to new sample rate */
847 old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info);
848 if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) {
849 dec->priv->base_ts +=
850 GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate);
851 dec->priv->samples = 0;
852 }
853
854 /* copy the GstAudioInfo */
855 GST_OBJECT_LOCK (dec);
856 dec->priv->ctx.info = info;
857 GST_OBJECT_UNLOCK (dec);
858
859 gst_caps_replace (&dec->priv->ctx.caps, caps);
860 dec->priv->ctx.output_format_changed = TRUE;
861
862 done:
863 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
864
865 return res;
866
867 /* ERRORS */
868 refuse_caps:
869 {
870 GST_WARNING_OBJECT (dec, "invalid output format");
871 res = FALSE;
872 goto done;
873 }
874 }
875
876 static gboolean
gst_audio_decoder_sink_setcaps(GstAudioDecoder * dec,GstCaps * caps)877 gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
878 {
879 GstAudioDecoderClass *klass;
880 gboolean res = TRUE;
881
882 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
883
884 GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
885
886 GST_AUDIO_DECODER_STREAM_LOCK (dec);
887
888 if (dec->priv->ctx.input_caps
889 && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) {
890 GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again");
891 goto done;
892 }
893
894 /* NOTE pbutils only needed here */
895 /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
896 #if 0
897 if (!dec->priv->taglist)
898 dec->priv->taglist = gst_tag_list_new ();
899 dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist);
900 gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist,
901 GST_TAG_AUDIO_CODEC, caps);
902 dec->priv->taglist_changed = TRUE;
903 #endif
904
905 if (klass->set_format)
906 res = klass->set_format (dec, caps);
907
908 if (res)
909 gst_caps_replace (&dec->priv->ctx.input_caps, caps);
910
911 done:
912 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
913
914 return res;
915 }
916
917 static void
gst_audio_decoder_setup(GstAudioDecoder * dec)918 gst_audio_decoder_setup (GstAudioDecoder * dec)
919 {
920 GstQuery *query;
921 gboolean res;
922
923 /* check if in live pipeline, then latency messing is no-no */
924 query = gst_query_new_latency ();
925 res = gst_pad_peer_query (dec->sinkpad, query);
926 if (res) {
927 gst_query_parse_latency (query, &res, NULL, NULL);
928 res = !res;
929 }
930 gst_query_unref (query);
931
932 /* normalize to bool */
933 dec->priv->agg = ! !res;
934 }
935
936 static GstFlowReturn
gst_audio_decoder_push_forward(GstAudioDecoder * dec,GstBuffer * buf)937 gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf)
938 {
939 GstAudioDecoderClass *klass;
940 GstAudioDecoderPrivate *priv;
941 GstAudioDecoderContext *ctx;
942 GstFlowReturn ret = GST_FLOW_OK;
943 GstClockTime ts;
944
945 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
946 priv = dec->priv;
947 ctx = &dec->priv->ctx;
948
949 g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR);
950
951 if (G_UNLIKELY (!buf)) {
952 g_assert_not_reached ();
953 return GST_FLOW_OK;
954 }
955
956 ctx->had_output_data = TRUE;
957 ts = GST_BUFFER_TIMESTAMP (buf);
958
959 GST_LOG_OBJECT (dec,
960 "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
961 ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
962 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
963 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
964
965 /* clip buffer */
966 buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate,
967 ctx->info.bpf);
968 if (G_UNLIKELY (!buf)) {
969 GST_DEBUG_OBJECT (dec, "no data after clipping to segment");
970 /* only check and return EOS if upstream still
971 * in the same segment and interested as such */
972 if (dec->priv->in_out_segment_sync) {
973 if (dec->output_segment.rate >= 0) {
974 if (ts >= dec->output_segment.stop)
975 ret = GST_FLOW_EOS;
976 } else if (ts < dec->output_segment.start) {
977 ret = GST_FLOW_EOS;
978 }
979 }
980 goto exit;
981 }
982
983 /* decorate */
984 if (G_UNLIKELY (priv->discont)) {
985 GST_LOG_OBJECT (dec, "marking discont");
986 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
987 priv->discont = FALSE;
988 }
989
990 /* track where we are */
991 if (G_LIKELY (GST_BUFFER_TIMESTAMP_IS_VALID (buf))) {
992 /* duration should always be valid for raw audio */
993 g_assert (GST_BUFFER_DURATION_IS_VALID (buf));
994 dec->output_segment.position =
995 GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
996 }
997
998 if (klass->pre_push) {
999 /* last chance for subclass to do some dirty stuff */
1000 ret = klass->pre_push (dec, &buf);
1001 if (ret != GST_FLOW_OK || !buf) {
1002 GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p",
1003 gst_flow_get_name (ret), buf);
1004 if (buf)
1005 gst_buffer_unref (buf);
1006 goto exit;
1007 }
1008 }
1009
1010 GST_LOG_OBJECT (dec,
1011 "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1012 ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1013 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1014 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1015
1016 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
1017 if (!priv->has_push_first_frame) {
1018 priv->has_push_first_frame = TRUE;
1019 GST_WARNING_OBJECT (dec, "KPI-TRACE: audiodecoder push first frame");
1020 }
1021 #endif
1022 ret = gst_pad_push (dec->srcpad, buf);
1023
1024 exit:
1025 return ret;
1026 }
1027
1028 /* mini aggregator combining output buffers into fewer larger ones,
1029 * if so allowed/configured */
1030 static GstFlowReturn
gst_audio_decoder_output(GstAudioDecoder * dec,GstBuffer * buf)1031 gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf)
1032 {
1033 GstAudioDecoderPrivate *priv;
1034 GstFlowReturn ret = GST_FLOW_OK;
1035 GstBuffer *inbuf = NULL;
1036
1037 priv = dec->priv;
1038
1039 if (G_UNLIKELY (priv->agg < 0))
1040 gst_audio_decoder_setup (dec);
1041
1042 if (G_LIKELY (buf)) {
1043 GST_LOG_OBJECT (dec,
1044 "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1045 ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
1046 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1047 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1048 }
1049
1050 again:
1051 inbuf = NULL;
1052 if (priv->agg && dec->priv->latency > 0 &&
1053 priv->ctx.info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1054 gint av;
1055 gboolean assemble = FALSE;
1056 const GstClockTimeDiff tol = 10 * GST_MSECOND;
1057 GstClockTimeDiff diff = -100 * GST_MSECOND;
1058
1059 av = gst_adapter_available (priv->adapter_out);
1060 if (G_UNLIKELY (!buf)) {
1061 /* forcibly send current */
1062 assemble = TRUE;
1063 GST_LOG_OBJECT (dec, "forcing fragment flush");
1064 } else if (av && (!GST_BUFFER_TIMESTAMP_IS_VALID (buf) ||
1065 !GST_CLOCK_TIME_IS_VALID (priv->out_ts) ||
1066 ((diff = GST_CLOCK_DIFF (GST_BUFFER_TIMESTAMP (buf),
1067 priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) {
1068 assemble = TRUE;
1069 GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment",
1070 (gint) (diff / GST_MSECOND));
1071 } else {
1072 /* add or start collecting */
1073 if (!av) {
1074 GST_LOG_OBJECT (dec, "starting new fragment");
1075 priv->out_ts = GST_BUFFER_TIMESTAMP (buf);
1076 } else {
1077 GST_LOG_OBJECT (dec, "adding to fragment");
1078 }
1079 gst_adapter_push (priv->adapter_out, buf);
1080 priv->out_dur += GST_BUFFER_DURATION (buf);
1081 av += gst_buffer_get_size (buf);
1082 buf = NULL;
1083 }
1084 if (priv->out_dur > dec->priv->latency)
1085 assemble = TRUE;
1086 if (av && assemble) {
1087 GST_LOG_OBJECT (dec, "assembling fragment");
1088 inbuf = buf;
1089 buf = gst_adapter_take_buffer (priv->adapter_out, av);
1090 GST_BUFFER_TIMESTAMP (buf) = priv->out_ts;
1091 GST_BUFFER_DURATION (buf) = priv->out_dur;
1092 priv->out_ts = GST_CLOCK_TIME_NONE;
1093 priv->out_dur = 0;
1094 }
1095 }
1096
1097 if (G_LIKELY (buf)) {
1098 if (dec->output_segment.rate > 0.0) {
1099 ret = gst_audio_decoder_push_forward (dec, buf);
1100 GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret));
1101 } else {
1102 ret = GST_FLOW_OK;
1103 priv->queued = g_list_prepend (priv->queued, buf);
1104 GST_LOG_OBJECT (dec, "buffer queued");
1105 }
1106
1107 if (inbuf) {
1108 buf = inbuf;
1109 goto again;
1110 }
1111 }
1112
1113 return ret;
1114 }
1115
1116 static void
send_pending_events(GstAudioDecoder * dec)1117 send_pending_events (GstAudioDecoder * dec)
1118 {
1119 GstAudioDecoderPrivate *priv = dec->priv;
1120 GList *pending_events, *l;
1121
1122 pending_events = priv->pending_events;
1123 priv->pending_events = NULL;
1124
1125 GST_DEBUG_OBJECT (dec, "Pushing pending events");
1126 for (l = pending_events; l; l = l->next)
1127 gst_audio_decoder_push_event (dec, l->data);
1128 g_list_free (pending_events);
1129 }
1130
1131 /* Iterate the list of pending events, and ensure
1132 * the current output segment is up to date for
1133 * decoding */
1134 static void
apply_pending_events(GstAudioDecoder * dec)1135 apply_pending_events (GstAudioDecoder * dec)
1136 {
1137 GstAudioDecoderPrivate *priv = dec->priv;
1138 GList *l;
1139
1140 GST_DEBUG_OBJECT (dec, "Applying pending segments");
1141 for (l = priv->pending_events; l; l = l->next) {
1142 GstEvent *event = GST_EVENT (l->data);
1143 switch (GST_EVENT_TYPE (event)) {
1144 case GST_EVENT_SEGMENT:{
1145 GstSegment seg;
1146
1147 GST_AUDIO_DECODER_STREAM_LOCK (dec);
1148 gst_event_copy_segment (event, &seg);
1149
1150 GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);
1151
1152 dec->output_segment = seg;
1153 dec->priv->in_out_segment_sync =
1154 gst_segment_is_equal (&dec->input_segment, &seg);
1155 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1156 break;
1157 }
1158 default:
1159 break;
1160 }
1161 }
1162 }
1163
1164 static GstFlowReturn
check_pending_reconfigure(GstAudioDecoder * dec)1165 check_pending_reconfigure (GstAudioDecoder * dec)
1166 {
1167 GstFlowReturn ret = GST_FLOW_OK;
1168 GstAudioDecoderContext *ctx;
1169 gboolean needs_reconfigure;
1170
1171 ctx = &dec->priv->ctx;
1172
1173 needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
1174 if (G_UNLIKELY (ctx->output_format_changed ||
1175 (GST_AUDIO_INFO_IS_VALID (&ctx->info)
1176 && needs_reconfigure))) {
1177 if (!gst_audio_decoder_negotiate_unlocked (dec)) {
1178 gst_pad_mark_reconfigure (dec->srcpad);
1179 if (GST_PAD_IS_FLUSHING (dec->srcpad))
1180 ret = GST_FLOW_FLUSHING;
1181 else
1182 ret = GST_FLOW_NOT_NEGOTIATED;
1183 }
1184 }
1185 return ret;
1186 }
1187
1188 static gboolean
gst_audio_decoder_transform_meta_default(GstAudioDecoder * decoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)1189 gst_audio_decoder_transform_meta_default (GstAudioDecoder *
1190 decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
1191 {
1192 const GstMetaInfo *info = meta->info;
1193 const gchar *const *tags;
1194
1195 tags = gst_meta_api_type_get_tags (info->api);
1196
1197 if (!tags || (g_strv_length ((gchar **) tags) == 1
1198 && gst_meta_api_type_has_tag (info->api,
1199 g_quark_from_string (GST_META_TAG_AUDIO_STR))))
1200 return TRUE;
1201
1202 return FALSE;
1203 }
1204
1205 typedef struct
1206 {
1207 GstAudioDecoder *decoder;
1208 GstBuffer *outbuf;
1209 } CopyMetaData;
1210
1211 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)1212 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
1213 {
1214 CopyMetaData *data = user_data;
1215 GstAudioDecoder *decoder = data->decoder;
1216 GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
1217 GstBuffer *outbuf = data->outbuf;
1218 const GstMetaInfo *info = (*meta)->info;
1219 gboolean do_copy = FALSE;
1220
1221 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
1222 /* never call the transform_meta with memory specific metadata */
1223 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
1224 g_type_name (info->api));
1225 do_copy = FALSE;
1226 } else if (klass->transform_meta) {
1227 do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf);
1228 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
1229 g_type_name (info->api), do_copy);
1230 }
1231
1232 /* we only copy metadata when the subclass implemented a transform_meta
1233 * function and when it returns %TRUE */
1234 if (do_copy && info->transform_func) {
1235 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
1236 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
1237 /* simply copy then */
1238 info->transform_func (outbuf, *meta, inbuf,
1239 _gst_meta_transform_copy, ©_data);
1240 }
1241 return TRUE;
1242 }
1243
1244 /**
1245 * gst_audio_decoder_finish_subframe:
1246 * @dec: a #GstAudioDecoder
1247 * @buf: decoded data
1248 *
1249 * Collects decoded data and pushes it downstream. This function may be called
1250 * multiple times for a given input frame.
1251 *
1252 * @buf may be NULL in which case it is assumed that the current input frame is
1253 * finished. This is equivalent to calling gst_audio_decoder_finish_subframe()
1254 * with a NULL buffer and frames=1 after having pushed out all decoded audio
1255 * subframes using this function.
1256 *
1257 * When called with valid data in @buf the source pad caps must have been set
1258 * already.
1259 *
1260 * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1261 * invalidated by a call to this function.
1262 *
1263 * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1264 *
1265 * Since: 1.16
1266 */
1267 GstFlowReturn
gst_audio_decoder_finish_subframe(GstAudioDecoder * dec,GstBuffer * buf)1268 gst_audio_decoder_finish_subframe (GstAudioDecoder * dec, GstBuffer * buf)
1269 {
1270 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1271
1272 if (buf == NULL)
1273 return gst_audio_decoder_finish_frame_or_subframe (dec, NULL, 1);
1274 else
1275 return gst_audio_decoder_finish_frame_or_subframe (dec, buf, 0);
1276 }
1277
1278 /**
1279 * gst_audio_decoder_finish_frame:
1280 * @dec: a #GstAudioDecoder
1281 * @buf: decoded data
1282 * @frames: number of decoded frames represented by decoded data
1283 *
1284 * Collects decoded data and pushes it downstream.
1285 *
1286 * @buf may be NULL in which case the indicated number of frames
1287 * are discarded and considered to have produced no output
1288 * (e.g. lead-in or setup frames).
1289 * Otherwise, source pad caps must be set when it is called with valid
1290 * data in @buf.
1291 *
1292 * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be
1293 * invalidated by a call to this function.
1294 *
1295 * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
1296 */
1297 GstFlowReturn
gst_audio_decoder_finish_frame(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1298 gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
1299 gint frames)
1300 {
1301 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR);
1302
1303 /* no dummy calls please */
1304 g_return_val_if_fail (frames != 0, GST_FLOW_ERROR);
1305
1306 return gst_audio_decoder_finish_frame_or_subframe (dec, buf, frames);
1307 }
1308
1309 /* frames == 0 indicates that this is a sub-frame and further sub-frames may
1310 * follow for the current input frame. */
1311 static GstFlowReturn
gst_audio_decoder_finish_frame_or_subframe(GstAudioDecoder * dec,GstBuffer * buf,gint frames)1312 gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec,
1313 GstBuffer * buf, gint frames)
1314 {
1315 GstAudioDecoderPrivate *priv;
1316 GstAudioDecoderContext *ctx;
1317 GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1318 GstAudioMeta *meta;
1319 GstClockTime ts, next_ts;
1320 gsize size, samples = 0;
1321 GstFlowReturn ret = GST_FLOW_OK;
1322 GQueue inbufs = G_QUEUE_INIT;
1323 gboolean is_subframe = (frames == 0);
1324 gboolean do_check_resync;
1325
1326 /* subclass should not hand us no data */
1327 g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
1328 GST_FLOW_ERROR);
1329
1330 /* if it's a subframe (frames == 0) we must have a valid buffer */
1331 g_assert (!is_subframe || buf != NULL);
1332
1333 priv = dec->priv;
1334 ctx = &dec->priv->ctx;
1335 meta = buf ? gst_buffer_get_audio_meta (buf) : NULL;
1336 size = buf ? gst_buffer_get_size (buf) : 0;
1337 samples = buf ? (meta ? meta->samples : size / ctx->info.bpf) : 0;
1338
1339 /* must know the output format by now */
1340 g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info),
1341 GST_FLOW_ERROR);
1342
1343 GST_LOG_OBJECT (dec,
1344 "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT
1345 " samples for %d frames", buf ? size : 0, samples, frames);
1346
1347 GST_AUDIO_DECODER_STREAM_LOCK (dec);
1348
1349 if (buf != NULL && priv->subframe_samples == 0) {
1350 ret = check_pending_reconfigure (dec);
1351 if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) {
1352 gst_buffer_unref (buf);
1353 goto exit;
1354 }
1355
1356 if (priv->pending_events)
1357 send_pending_events (dec);
1358 }
1359
1360 /* sanity checking */
1361 if (G_LIKELY (buf && ctx->info.bpf)) {
1362 if (!meta || meta->info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) {
1363 /* output shoud be whole number of sample frames */
1364 if (size % ctx->info.bpf)
1365 goto wrong_buffer;
1366 /* output should have no additional padding */
1367 if (samples != size / ctx->info.bpf)
1368 goto wrong_samples;
1369 } else {
1370 /* can't have more samples than what the buffer fits */
1371 if (samples > size / ctx->info.bpf)
1372 goto wrong_samples;
1373 }
1374 }
1375
1376 /* frame and ts book-keeping */
1377 if (G_UNLIKELY (frames < 0)) {
1378 if (G_UNLIKELY (-frames - 1 > priv->frames.length)) {
1379 GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1380 ("received more decoded frames %d than provided %d", frames,
1381 priv->frames.length), (NULL));
1382 frames = 0;
1383 } else {
1384 frames = priv->frames.length + frames + 1;
1385 }
1386 } else if (G_UNLIKELY (frames > priv->frames.length)) {
1387 if (G_LIKELY (!priv->force)) {
1388 GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1389 ("received more decoded frames %d than provided %d", frames,
1390 priv->frames.length), (NULL));
1391 }
1392 frames = priv->frames.length;
1393 }
1394
1395 if (G_LIKELY (priv->frames.length))
1396 ts = GST_BUFFER_TIMESTAMP (priv->frames.head->data);
1397 else
1398 ts = GST_CLOCK_TIME_NONE;
1399
1400 GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT,
1401 GST_TIME_ARGS (ts));
1402
1403 if (is_subframe && priv->frames.length == 0)
1404 goto subframe_without_pending_input_frame;
1405
1406 /* this will be skipped in the is_subframe case because frames will be 0 */
1407 while (priv->frames.length && frames) {
1408 g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames));
1409 dec->priv->ctx.delay = dec->priv->frames.length;
1410 frames--;
1411 }
1412
1413 if (G_UNLIKELY (!buf))
1414 goto exit;
1415
1416 /* lock on */
1417 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1418 priv->base_ts = ts;
1419 GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
1420 }
1421
1422 /* still no valid ts, track the segment one */
1423 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) &&
1424 dec->output_segment.rate > 0.0) {
1425 priv->base_ts = dec->output_segment.start;
1426 }
1427
1428 /* only check for resync at the beginning of an input/output frame */
1429 do_check_resync = !is_subframe || priv->subframe_samples == 0;
1430
1431 /* slightly convoluted approach caters for perfect ts if subclass desires. */
1432 if (do_check_resync && GST_CLOCK_TIME_IS_VALID (ts)) {
1433 if (dec->priv->tolerance > 0) {
1434 GstClockTimeDiff diff;
1435
1436 g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts));
1437 next_ts = priv->base_ts +
1438 gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
1439 GST_LOG_OBJECT (dec,
1440 "buffer is %" G_GUINT64_FORMAT " samples past base_ts %"
1441 GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples,
1442 GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1443 diff = GST_CLOCK_DIFF (next_ts, ts);
1444 GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1445 /* if within tolerance,
1446 * discard buffer ts and carry on producing perfect stream,
1447 * otherwise resync to ts */
1448 if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance ||
1449 diff > (gint64) dec->priv->tolerance)) {
1450 GST_DEBUG_OBJECT (dec, "base_ts resync");
1451 priv->base_ts = ts;
1452 priv->samples = 0;
1453 }
1454 } else {
1455 GST_DEBUG_OBJECT (dec, "base_ts resync");
1456 priv->base_ts = ts;
1457 priv->samples = 0;
1458 }
1459 }
1460
1461 /* delayed one-shot stuff until confirmed data */
1462 if (priv->taglist && priv->taglist_changed) {
1463 GstEvent *tags_event;
1464
1465 tags_event = gst_audio_decoder_create_merged_tags_event (dec);
1466
1467 if (tags_event != NULL)
1468 gst_audio_decoder_push_event (dec, tags_event);
1469
1470 priv->taglist_changed = FALSE;
1471 }
1472
1473 buf = gst_buffer_make_writable (buf);
1474 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
1475 GST_BUFFER_TIMESTAMP (buf) =
1476 priv->base_ts +
1477 GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->info.rate);
1478 GST_BUFFER_DURATION (buf) = priv->base_ts +
1479 GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->info.rate) -
1480 GST_BUFFER_TIMESTAMP (buf);
1481 } else {
1482 GST_BUFFER_TIMESTAMP (buf) = GST_CLOCK_TIME_NONE;
1483 GST_BUFFER_DURATION (buf) =
1484 GST_FRAMES_TO_CLOCK_TIME (samples, ctx->info.rate);
1485 }
1486
1487 if (klass->transform_meta) {
1488 if (inbufs.length) {
1489 GList *l;
1490 for (l = inbufs.head; l; l = l->next) {
1491 CopyMetaData data;
1492
1493 data.decoder = dec;
1494 data.outbuf = buf;
1495 gst_buffer_foreach_meta (l->data, foreach_metadata, &data);
1496 }
1497 } else if (is_subframe) {
1498 CopyMetaData data;
1499 GstBuffer *in_buf;
1500
1501 /* For subframes we assume a 1:N relationship for now, so we just take
1502 * metas from the first pending input buf */
1503 in_buf = g_queue_peek_head (&priv->frames);
1504 data.decoder = dec;
1505 data.outbuf = buf;
1506 gst_buffer_foreach_meta (in_buf, foreach_metadata, &data);
1507 } else {
1508 GST_WARNING_OBJECT (dec,
1509 "Can't copy metadata because input buffers disappeared");
1510 }
1511 }
1512
1513 GST_OBJECT_LOCK (dec);
1514 priv->samples += samples;
1515 priv->samples_out += samples;
1516 GST_OBJECT_UNLOCK (dec);
1517
1518 /* we got data, so note things are looking up */
1519 if (G_UNLIKELY (dec->priv->error_count))
1520 dec->priv->error_count = 0;
1521
1522 ret = gst_audio_decoder_output (dec, buf);
1523
1524 exit:
1525 g_queue_foreach (&inbufs, (GFunc) gst_buffer_unref, NULL);
1526 g_queue_clear (&inbufs);
1527
1528 if (is_subframe)
1529 dec->priv->subframe_samples += samples;
1530 else
1531 dec->priv->subframe_samples = 0;
1532
1533 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
1534
1535 return ret;
1536
1537 /* ERRORS */
1538 wrong_buffer:
1539 {
1540 /* arguably more of a programming error? */
1541 GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1542 ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d", size,
1543 ctx->info.bpf));
1544 gst_buffer_unref (buf);
1545 ret = GST_FLOW_ERROR;
1546 goto exit;
1547 }
1548 wrong_samples:
1549 {
1550 /* arguably more of a programming error? */
1551 GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1552 ("GstAudioMeta samples (%" G_GSIZE_FORMAT ") are inconsistent with "
1553 "the buffer size and layout (size/bpf = %" G_GSIZE_FORMAT ")",
1554 meta->samples, size / ctx->info.bpf));
1555 gst_buffer_unref (buf);
1556 ret = GST_FLOW_ERROR;
1557 goto exit;
1558 }
1559 subframe_without_pending_input_frame:
1560 {
1561 /* arguably more of a programming error? */
1562 GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
1563 ("Received decoded subframe, but no pending frame"));
1564 gst_buffer_unref (buf);
1565 ret = GST_FLOW_ERROR;
1566 goto exit;
1567 }
1568 }
1569
1570 static GstFlowReturn
gst_audio_decoder_handle_frame(GstAudioDecoder * dec,GstAudioDecoderClass * klass,GstBuffer * buffer)1571 gst_audio_decoder_handle_frame (GstAudioDecoder * dec,
1572 GstAudioDecoderClass * klass, GstBuffer * buffer)
1573 {
1574 /* Skip decoding and send a GAP instead if
1575 * GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO is set and we have timestamps
1576 * FIXME: We only do this for forward playback atm, because reverse
1577 * playback would require accumulating GAP events and pushing them
1578 * out in reverse order as for normal audio samples
1579 */
1580 if (G_UNLIKELY (dec->input_segment.rate > 0.0
1581 && dec->input_segment.flags & GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO)) {
1582 if (buffer) {
1583 GstClockTime ts = GST_BUFFER_PTS (buffer);
1584 if (GST_CLOCK_TIME_IS_VALID (ts)) {
1585 GstEvent *event = gst_event_new_gap (ts, GST_BUFFER_DURATION (buffer));
1586
1587 gst_buffer_unref (buffer);
1588 GST_LOG_OBJECT (dec, "Skipping decode in trickmode and sending gap");
1589 gst_audio_decoder_handle_gap (dec, event);
1590 return GST_FLOW_OK;
1591 }
1592 }
1593 }
1594
1595 if (G_LIKELY (buffer)) {
1596 gsize size = gst_buffer_get_size (buffer);
1597 /* keep around for admin */
1598 GST_LOG_OBJECT (dec,
1599 "tracking frame size %" G_GSIZE_FORMAT ", ts %" GST_TIME_FORMAT, size,
1600 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
1601 g_queue_push_tail (&dec->priv->frames, buffer);
1602 dec->priv->ctx.delay = dec->priv->frames.length;
1603 GST_OBJECT_LOCK (dec);
1604 dec->priv->bytes_in += size;
1605 GST_OBJECT_UNLOCK (dec);
1606 } else {
1607 GST_LOG_OBJECT (dec, "providing subclass with NULL frame");
1608 }
1609
1610 #ifdef OHOS_OPT_PERFORMANCE // ohos.opt.performance.0001: first frame decoded cost time
1611 if (!dec->priv->has_recv_first_frame) {
1612 dec->priv->has_recv_first_frame = TRUE;
1613 GST_WARNING_OBJECT (dec, "KPI-TRACE: audiodecoder recv first frame");
1614 }
1615 #endif
1616
1617 return klass->handle_frame (dec, buffer);
1618 }
1619
1620 /* maybe subclass configurable instead, but this allows for a whole lot of
1621 * raw samples, so at least quite some encoded ... */
1622 #define GST_AUDIO_DECODER_MAX_SYNC 10 * 8 * 2 * 1024
1623
1624 static GstFlowReturn
gst_audio_decoder_push_buffers(GstAudioDecoder * dec,gboolean force)1625 gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force)
1626 {
1627 GstAudioDecoderClass *klass;
1628 GstAudioDecoderPrivate *priv;
1629 GstAudioDecoderContext *ctx;
1630 GstFlowReturn ret = GST_FLOW_OK;
1631 GstBuffer *buffer;
1632 gint av, flush;
1633
1634 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1635 priv = dec->priv;
1636 ctx = &dec->priv->ctx;
1637
1638 g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1639
1640 av = gst_adapter_available (priv->adapter);
1641 GST_DEBUG_OBJECT (dec, "available: %d", av);
1642
1643 while (ret == GST_FLOW_OK) {
1644
1645 flush = 0;
1646 ctx->eos = force;
1647
1648 if (G_LIKELY (av)) {
1649 gint len;
1650 GstClockTime ts;
1651 guint64 distance;
1652
1653 /* parse if needed */
1654 if (klass->parse) {
1655 gint offset = 0;
1656
1657 /* limited (legacy) parsing; avoid whole of baseparse */
1658 GST_DEBUG_OBJECT (dec, "parsing available: %d", av);
1659 /* piggyback sync state on discont */
1660 ctx->sync = !priv->discont;
1661 ret = klass->parse (dec, priv->adapter, &offset, &len);
1662
1663 g_assert (offset <= av);
1664 if (offset) {
1665 /* jumped a bit */
1666 GST_DEBUG_OBJECT (dec, "skipped %d; setting DISCONT", offset);
1667 gst_adapter_flush (priv->adapter, offset);
1668 flush = offset;
1669 /* avoid parsing indefinitely */
1670 priv->sync_flush += offset;
1671 if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC)
1672 goto parse_failed;
1673 }
1674
1675 if (ret == GST_FLOW_EOS) {
1676 GST_LOG_OBJECT (dec, "no frame yet");
1677 ret = GST_FLOW_OK;
1678 break;
1679 } else if (ret == GST_FLOW_OK) {
1680 GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len);
1681 g_assert (len);
1682 g_assert (offset + len <= av);
1683 priv->sync_flush = 0;
1684 } else {
1685 break;
1686 }
1687 } else {
1688 len = av;
1689 }
1690 /* track upstream ts, but do not get stuck if nothing new upstream */
1691 ts = gst_adapter_prev_pts (priv->adapter, &distance);
1692 if (ts != priv->prev_ts || distance <= priv->prev_distance) {
1693 priv->prev_ts = ts;
1694 priv->prev_distance = distance;
1695 } else {
1696 GST_LOG_OBJECT (dec, "ts == prev_ts; discarding");
1697 ts = GST_CLOCK_TIME_NONE;
1698 }
1699 buffer = gst_adapter_take_buffer (priv->adapter, len);
1700 buffer = gst_buffer_make_writable (buffer);
1701 GST_BUFFER_TIMESTAMP (buffer) = ts;
1702 flush += len;
1703 priv->force = FALSE;
1704 } else {
1705 if (!force)
1706 break;
1707 if (!priv->drainable) {
1708 priv->drained = TRUE;
1709 break;
1710 }
1711 buffer = NULL;
1712 priv->force = TRUE;
1713 }
1714
1715 ret = gst_audio_decoder_handle_frame (dec, klass, buffer);
1716
1717 /* do not keep pushing it ... */
1718 if (G_UNLIKELY (!av)) {
1719 priv->drained = TRUE;
1720 break;
1721 }
1722
1723 av -= flush;
1724 g_assert (av >= 0);
1725 }
1726
1727 GST_LOG_OBJECT (dec, "done pushing to subclass");
1728 return ret;
1729
1730 /* ERRORS */
1731 parse_failed:
1732 {
1733 GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream"));
1734 return GST_FLOW_ERROR;
1735 }
1736 }
1737
1738 static GstFlowReturn
gst_audio_decoder_drain(GstAudioDecoder * dec)1739 gst_audio_decoder_drain (GstAudioDecoder * dec)
1740 {
1741 GstFlowReturn ret;
1742
1743 if (dec->priv->drained && !dec->priv->gather)
1744 return GST_FLOW_OK;
1745
1746 /* Apply any pending events before draining, as that
1747 * may update the pending segment info */
1748 apply_pending_events (dec);
1749
1750 /* dispatch reverse pending buffers */
1751 /* chain eventually calls upon drain as well, but by that time
1752 * gather list should be clear, so ok ... */
1753 if (dec->output_segment.rate < 0.0 && dec->priv->gather)
1754 gst_audio_decoder_chain_reverse (dec, NULL);
1755 /* have subclass give all it can */
1756 ret = gst_audio_decoder_push_buffers (dec, TRUE);
1757 if (ret != GST_FLOW_OK) {
1758 GST_WARNING_OBJECT (dec, "audio decoder push buffers failed");
1759 goto drain_failed;
1760 }
1761 /* ensure all output sent */
1762 ret = gst_audio_decoder_output (dec, NULL);
1763 if (ret != GST_FLOW_OK)
1764 GST_WARNING_OBJECT (dec, "audio decoder output failed");
1765
1766 drain_failed:
1767 /* everything should be away now */
1768 if (dec->priv->frames.length) {
1769 /* not fatal/impossible though if subclass/codec eats stuff */
1770 GST_WARNING_OBJECT (dec, "still %d frames left after draining",
1771 dec->priv->frames.length);
1772 g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
1773 g_queue_clear (&dec->priv->frames);
1774 }
1775
1776 /* discard (unparsed) leftover */
1777 gst_adapter_clear (dec->priv->adapter);
1778 return ret;
1779 }
1780
1781 /* hard == FLUSH, otherwise discont */
1782 static GstFlowReturn
gst_audio_decoder_flush(GstAudioDecoder * dec,gboolean hard)1783 gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard)
1784 {
1785 GstAudioDecoderClass *klass;
1786 GstFlowReturn ret = GST_FLOW_OK;
1787
1788 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1789
1790 GST_LOG_OBJECT (dec, "flush hard %d", hard);
1791
1792 if (!hard) {
1793 ret = gst_audio_decoder_drain (dec);
1794 } else {
1795 gst_audio_decoder_clear_queues (dec);
1796 gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
1797 gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
1798 dec->priv->error_count = 0;
1799 }
1800 /* only bother subclass with flushing if known it is already alive
1801 * and kicking out stuff */
1802 if (klass->flush && dec->priv->samples_out > 0)
1803 klass->flush (dec, hard);
1804 /* and get (re)set for the sequel */
1805 gst_audio_decoder_reset (dec, FALSE);
1806
1807 return ret;
1808 }
1809
1810 static GstFlowReturn
gst_audio_decoder_chain_forward(GstAudioDecoder * dec,GstBuffer * buffer)1811 gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer)
1812 {
1813 GstFlowReturn ret = GST_FLOW_OK;
1814
1815 /* discard silly case, though maybe ts may be of value ?? */
1816 if (G_UNLIKELY (gst_buffer_get_size (buffer) == 0)) {
1817 GST_DEBUG_OBJECT (dec, "discarding empty buffer");
1818 gst_buffer_unref (buffer);
1819 goto exit;
1820 }
1821
1822 /* grab buffer */
1823 gst_adapter_push (dec->priv->adapter, buffer);
1824 buffer = NULL;
1825 /* new stuff, so we can push subclass again */
1826 dec->priv->drained = FALSE;
1827
1828 /* hand to subclass */
1829 ret = gst_audio_decoder_push_buffers (dec, FALSE);
1830
1831 exit:
1832 GST_LOG_OBJECT (dec, "chain-done");
1833 return ret;
1834 }
1835
1836 static void
gst_audio_decoder_clear_queues(GstAudioDecoder * dec)1837 gst_audio_decoder_clear_queues (GstAudioDecoder * dec)
1838 {
1839 GstAudioDecoderPrivate *priv = dec->priv;
1840
1841 g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL);
1842 g_list_free (priv->queued);
1843 priv->queued = NULL;
1844 g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL);
1845 g_list_free (priv->gather);
1846 priv->gather = NULL;
1847 g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL);
1848 g_list_free (priv->decode);
1849 priv->decode = NULL;
1850 }
1851
1852 /*
1853 * Input:
1854 * Buffer decoding order: 7 8 9 4 5 6 3 1 2 EOS
1855 * Discont flag: D D D D
1856 *
1857 * - Each Discont marks a discont in the decoding order.
1858 *
1859 * for vorbis, each buffer is a keyframe when we have the previous
1860 * buffer. This means that to decode buffer 7, we need buffer 6, which
1861 * arrives out of order.
1862 *
1863 * we first gather buffers in the gather queue until we get a DISCONT. We
1864 * prepend each incomming buffer so that they are in reversed order.
1865 *
1866 * gather queue: 9 8 7
1867 * decode queue:
1868 * output queue:
1869 *
1870 * When a DISCONT is received (buffer 4), we move the gather queue to the
1871 * decode queue. This is simply done be taking the head of the gather queue
1872 * and prepending it to the decode queue. This yields:
1873 *
1874 * gather queue:
1875 * decode queue: 7 8 9
1876 * output queue:
1877 *
1878 * Then we decode each buffer in the decode queue in order and put the output
1879 * buffer in the output queue. The first buffer (7) will not produce any output
1880 * because it needs the previous buffer (6) which did not arrive yet. This
1881 * yields:
1882 *
1883 * gather queue:
1884 * decode queue: 7 8 9
1885 * output queue: 9 8
1886 *
1887 * Then we remove the consumed buffers from the decode queue. Buffer 7 is not
1888 * completely consumed, we need to keep it around for when we receive buffer
1889 * 6. This yields:
1890 *
1891 * gather queue:
1892 * decode queue: 7
1893 * output queue: 9 8
1894 *
1895 * Then we accumulate more buffers:
1896 *
1897 * gather queue: 6 5 4
1898 * decode queue: 7
1899 * output queue:
1900 *
1901 * prepending to the decode queue on DISCONT yields:
1902 *
1903 * gather queue:
1904 * decode queue: 4 5 6 7
1905 * output queue:
1906 *
1907 * after decoding and keeping buffer 4:
1908 *
1909 * gather queue:
1910 * decode queue: 4
1911 * output queue: 7 6 5
1912 *
1913 * Etc..
1914 */
1915 static GstFlowReturn
gst_audio_decoder_flush_decode(GstAudioDecoder * dec)1916 gst_audio_decoder_flush_decode (GstAudioDecoder * dec)
1917 {
1918 GstAudioDecoderPrivate *priv = dec->priv;
1919 GstFlowReturn res = GST_FLOW_OK;
1920 GstClockTime timestamp;
1921 GList *walk;
1922
1923 walk = priv->decode;
1924
1925 GST_DEBUG_OBJECT (dec, "flushing buffers to decoder");
1926
1927 /* clear buffer and decoder state */
1928 gst_audio_decoder_flush (dec, FALSE);
1929
1930 while (walk) {
1931 GList *next;
1932 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
1933
1934 GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT,
1935 buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
1936
1937 next = g_list_next (walk);
1938 /* decode buffer, resulting data prepended to output queue */
1939 gst_buffer_ref (buf);
1940 res = gst_audio_decoder_chain_forward (dec, buf);
1941
1942 /* if we generated output, we can discard the buffer, else we
1943 * keep it in the queue */
1944 if (priv->queued) {
1945 GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data);
1946 priv->decode = g_list_delete_link (priv->decode, walk);
1947 gst_buffer_unref (buf);
1948 } else {
1949 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
1950 }
1951 walk = next;
1952 }
1953
1954 /* drain any aggregation (or otherwise) leftover */
1955 gst_audio_decoder_drain (dec);
1956
1957 /* now send queued data downstream */
1958 timestamp = GST_CLOCK_TIME_NONE;
1959 while (priv->queued) {
1960 GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
1961 GstClockTime duration;
1962
1963 duration = GST_BUFFER_DURATION (buf);
1964
1965 /* duration should always be valid for raw audio */
1966 g_assert (GST_CLOCK_TIME_IS_VALID (duration));
1967
1968 /* interpolate (backward) if needed */
1969 if (G_LIKELY (timestamp != -1)) {
1970 if (timestamp > duration)
1971 timestamp -= duration;
1972 else
1973 timestamp = 0;
1974 }
1975
1976 if (!GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
1977 GST_LOG_OBJECT (dec, "applying reverse interpolated ts %"
1978 GST_TIME_FORMAT, GST_TIME_ARGS (timestamp));
1979 GST_BUFFER_TIMESTAMP (buf) = timestamp;
1980 } else {
1981 /* track otherwise */
1982 timestamp = GST_BUFFER_TIMESTAMP (buf);
1983 GST_LOG_OBJECT (dec, "tracking ts %" GST_TIME_FORMAT,
1984 GST_TIME_ARGS (timestamp));
1985 }
1986
1987 if (G_LIKELY (res == GST_FLOW_OK)) {
1988 GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
1989 "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
1990 gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1991 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1992 /* should be already, but let's be sure */
1993 buf = gst_buffer_make_writable (buf);
1994 /* avoid stray DISCONT from forward processing,
1995 * which have no meaning in reverse pushing */
1996 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
1997 res = gst_audio_decoder_push_forward (dec, buf);
1998 } else {
1999 gst_buffer_unref (buf);
2000 }
2001
2002 priv->queued = g_list_delete_link (priv->queued, priv->queued);
2003 }
2004
2005 return res;
2006 }
2007
2008 static GstFlowReturn
gst_audio_decoder_chain_reverse(GstAudioDecoder * dec,GstBuffer * buf)2009 gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf)
2010 {
2011 GstAudioDecoderPrivate *priv = dec->priv;
2012 GstFlowReturn result = GST_FLOW_OK;
2013
2014 /* if we have a discont, move buffers to the decode list */
2015 if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) {
2016 GST_DEBUG_OBJECT (dec, "received discont");
2017 while (priv->gather) {
2018 GstBuffer *gbuf;
2019
2020 gbuf = GST_BUFFER_CAST (priv->gather->data);
2021 /* remove from the gather list */
2022 priv->gather = g_list_delete_link (priv->gather, priv->gather);
2023 /* copy to decode queue */
2024 priv->decode = g_list_prepend (priv->decode, gbuf);
2025 }
2026 /* decode stuff in the decode queue */
2027 gst_audio_decoder_flush_decode (dec);
2028 }
2029
2030 if (G_LIKELY (buf)) {
2031 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2032 "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
2033 gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
2034 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2035
2036 /* add buffer to gather queue */
2037 priv->gather = g_list_prepend (priv->gather, buf);
2038 }
2039
2040 return result;
2041 }
2042
2043 static GstFlowReturn
gst_audio_decoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)2044 gst_audio_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
2045 {
2046 GstAudioDecoder *dec;
2047 GstFlowReturn ret;
2048
2049 dec = GST_AUDIO_DECODER (parent);
2050
2051 GST_LOG_OBJECT (dec,
2052 "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
2053 ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buffer),
2054 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
2055 GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
2056
2057 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2058
2059 if (G_UNLIKELY (dec->priv->ctx.input_caps == NULL && dec->priv->needs_format))
2060 goto not_negotiated;
2061
2062 dec->priv->ctx.had_input_data = TRUE;
2063
2064 if (!dec->priv->expecting_discont_buf &&
2065 GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
2066 gint64 samples, ts;
2067
2068 /* track present position */
2069 ts = dec->priv->base_ts;
2070 samples = dec->priv->samples;
2071
2072 GST_DEBUG_OBJECT (dec, "handling discont");
2073 gst_audio_decoder_flush (dec, FALSE);
2074 dec->priv->discont = TRUE;
2075
2076 /* buffer may claim DISCONT loudly, if it can't tell us where we are now,
2077 * we'll stick to where we were ...
2078 * Particularly useful/needed for upstream BYTE based */
2079 if (dec->input_segment.rate > 0.0
2080 && !GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) {
2081 GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking");
2082 dec->priv->base_ts = ts;
2083 dec->priv->samples = samples;
2084 }
2085 }
2086 dec->priv->expecting_discont_buf = FALSE;
2087
2088 if (dec->input_segment.rate > 0.0)
2089 ret = gst_audio_decoder_chain_forward (dec, buffer);
2090 else
2091 ret = gst_audio_decoder_chain_reverse (dec, buffer);
2092
2093 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2094
2095 return ret;
2096
2097 /* ERRORS */
2098 not_negotiated:
2099 {
2100 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2101 GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL),
2102 ("decoder not initialized"));
2103 gst_buffer_unref (buffer);
2104 return GST_FLOW_NOT_NEGOTIATED;
2105 }
2106 }
2107
2108 /* perform upstream byte <-> time conversion (duration, seeking)
2109 * if subclass allows and if enough data for moderately decent conversion */
2110 static inline gboolean
gst_audio_decoder_do_byte(GstAudioDecoder * dec)2111 gst_audio_decoder_do_byte (GstAudioDecoder * dec)
2112 {
2113 gboolean ret;
2114
2115 GST_OBJECT_LOCK (dec);
2116 ret = dec->priv->ctx.do_estimate_rate && dec->priv->ctx.info.bpf &&
2117 dec->priv->ctx.info.rate <= dec->priv->samples_out;
2118 GST_OBJECT_UNLOCK (dec);
2119
2120 return ret;
2121 }
2122
2123 /* Must be called holding the GST_AUDIO_DECODER_STREAM_LOCK */
2124 static gboolean
gst_audio_decoder_negotiate_default_caps(GstAudioDecoder * dec)2125 gst_audio_decoder_negotiate_default_caps (GstAudioDecoder * dec)
2126 {
2127 GstCaps *caps, *templcaps;
2128 gint i;
2129 gint channels = 0;
2130 gint rate;
2131 guint64 channel_mask = 0;
2132 gint caps_size;
2133 GstStructure *structure;
2134 GstAudioInfo info;
2135
2136 templcaps = gst_pad_get_pad_template_caps (dec->srcpad);
2137 caps = gst_pad_peer_query_caps (dec->srcpad, templcaps);
2138 if (caps)
2139 gst_caps_unref (templcaps);
2140 else
2141 caps = templcaps;
2142 templcaps = NULL;
2143
2144 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
2145 goto caps_error;
2146
2147 GST_LOG_OBJECT (dec, "peer caps %" GST_PTR_FORMAT, caps);
2148
2149 /* before fixating, try to use whatever upstream provided */
2150 caps = gst_caps_make_writable (caps);
2151 caps_size = gst_caps_get_size (caps);
2152 if (dec->priv->ctx.input_caps) {
2153 GstCaps *sinkcaps = dec->priv->ctx.input_caps;
2154 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
2155
2156 if (gst_structure_get_int (structure, "rate", &rate)) {
2157 for (i = 0; i < caps_size; i++) {
2158 gst_structure_set (gst_caps_get_structure (caps, i), "rate",
2159 G_TYPE_INT, rate, NULL);
2160 }
2161 }
2162
2163 if (gst_structure_get_int (structure, "channels", &channels)) {
2164 for (i = 0; i < caps_size; i++) {
2165 gst_structure_set (gst_caps_get_structure (caps, i), "channels",
2166 G_TYPE_INT, channels, NULL);
2167 }
2168 }
2169
2170 if (gst_structure_get (structure, "channel-mask", GST_TYPE_BITMASK,
2171 &channel_mask, NULL)) {
2172 for (i = 0; i < caps_size; i++) {
2173 gst_structure_set (gst_caps_get_structure (caps, i), "channel-mask",
2174 GST_TYPE_BITMASK, channel_mask, NULL);
2175 }
2176 }
2177 }
2178
2179 for (i = 0; i < caps_size; i++) {
2180 structure = gst_caps_get_structure (caps, i);
2181 if (gst_structure_has_field (structure, "channels"))
2182 gst_structure_fixate_field_nearest_int (structure,
2183 "channels", GST_AUDIO_DEF_CHANNELS);
2184 else
2185 gst_structure_set (structure, "channels", G_TYPE_INT,
2186 GST_AUDIO_DEF_CHANNELS, NULL);
2187 if (gst_structure_has_field (structure, "rate"))
2188 gst_structure_fixate_field_nearest_int (structure,
2189 "rate", GST_AUDIO_DEF_RATE);
2190 else
2191 gst_structure_set (structure, "rate", G_TYPE_INT, GST_AUDIO_DEF_RATE,
2192 NULL);
2193 }
2194 caps = gst_caps_fixate (caps);
2195 structure = gst_caps_get_structure (caps, 0);
2196
2197 /* Need to add a channel-mask if channels > 2 */
2198 gst_structure_get_int (structure, "channels", &channels);
2199 if (channels > 2 && !gst_structure_has_field (structure, "channel-mask")) {
2200 channel_mask = gst_audio_channel_get_fallback_mask (channels);
2201 if (channel_mask != 0) {
2202 gst_structure_set (structure, "channel-mask",
2203 GST_TYPE_BITMASK, channel_mask, NULL);
2204 } else {
2205 GST_WARNING_OBJECT (dec, "No default channel-mask for %d channels",
2206 channels);
2207 }
2208 }
2209
2210 if (!caps || !gst_audio_info_from_caps (&info, caps))
2211 goto caps_error;
2212
2213 GST_OBJECT_LOCK (dec);
2214 dec->priv->ctx.info = info;
2215 dec->priv->ctx.caps = caps;
2216 GST_OBJECT_UNLOCK (dec);
2217
2218 GST_INFO_OBJECT (dec,
2219 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
2220
2221 return TRUE;
2222
2223 caps_error:
2224 {
2225 if (caps)
2226 gst_caps_unref (caps);
2227 return FALSE;
2228 }
2229 }
2230
2231 static gboolean
gst_audio_decoder_handle_gap(GstAudioDecoder * dec,GstEvent * event)2232 gst_audio_decoder_handle_gap (GstAudioDecoder * dec, GstEvent * event)
2233 {
2234 gboolean ret;
2235 GstClockTime timestamp, duration;
2236 gboolean needs_reconfigure = FALSE;
2237
2238 /* Ensure we have caps first */
2239 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2240 if (!GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)) {
2241 if (!gst_audio_decoder_negotiate_default_caps (dec)) {
2242 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2243 GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL),
2244 ("Decoder output not negotiated before GAP event."));
2245 gst_event_unref (event);
2246 return FALSE;
2247 }
2248 needs_reconfigure = TRUE;
2249 }
2250 needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad)
2251 || needs_reconfigure;
2252 if (G_UNLIKELY (dec->priv->ctx.output_format_changed || needs_reconfigure)) {
2253 if (!gst_audio_decoder_negotiate_unlocked (dec)) {
2254 GST_WARNING_OBJECT (dec, "Failed to negotiate with downstream");
2255 gst_pad_mark_reconfigure (dec->srcpad);
2256 }
2257 }
2258 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2259
2260 gst_event_parse_gap (event, ×tamp, &duration);
2261
2262 /* time progressed without data, see if we can fill the gap with
2263 * some concealment data */
2264 GST_DEBUG_OBJECT (dec,
2265 "gap event: plc %d, do_plc %d, position %" GST_TIME_FORMAT
2266 " duration %" GST_TIME_FORMAT,
2267 dec->priv->plc, dec->priv->ctx.do_plc,
2268 GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration));
2269
2270 if (dec->priv->plc && dec->priv->ctx.do_plc && dec->input_segment.rate > 0.0) {
2271 GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2272 GstBuffer *buf;
2273
2274 /* hand subclass empty frame with duration that needs covering */
2275 buf = gst_buffer_new ();
2276 GST_BUFFER_TIMESTAMP (buf) = timestamp;
2277 GST_BUFFER_DURATION (buf) = duration;
2278 /* best effort, not much error handling */
2279 gst_audio_decoder_handle_frame (dec, klass, buf);
2280 ret = TRUE;
2281 dec->priv->expecting_discont_buf = TRUE;
2282 gst_event_unref (event);
2283 } else {
2284 GstFlowReturn flowret;
2285
2286 /* sub-class doesn't know how to handle empty buffers,
2287 * so just try sending GAP downstream */
2288 flowret = check_pending_reconfigure (dec);
2289 if (flowret == GST_FLOW_OK) {
2290 send_pending_events (dec);
2291 ret = gst_audio_decoder_push_event (dec, event);
2292 } else {
2293 ret = FALSE;
2294 gst_event_unref (event);
2295 }
2296 }
2297 return ret;
2298 }
2299
2300 static GList *
_flush_events(GstPad * pad,GList * events)2301 _flush_events (GstPad * pad, GList * events)
2302 {
2303 GList *tmp;
2304
2305 for (tmp = events; tmp; tmp = tmp->next) {
2306 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
2307 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
2308 GST_EVENT_IS_STICKY (tmp->data)) {
2309 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
2310 }
2311 gst_event_unref (tmp->data);
2312 }
2313 g_list_free (events);
2314
2315 return NULL;
2316 }
2317
2318 static gboolean
gst_audio_decoder_sink_eventfunc(GstAudioDecoder * dec,GstEvent * event)2319 gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2320 {
2321 gboolean ret;
2322
2323 switch (GST_EVENT_TYPE (event)) {
2324 case GST_EVENT_STREAM_START:
2325 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2326 /* finish any data in current segment and clear the decoder
2327 * to be ready for new stream data */
2328 gst_audio_decoder_drain (dec);
2329 gst_audio_decoder_flush (dec, FALSE);
2330
2331 GST_DEBUG_OBJECT (dec, "received STREAM_START. Clearing taglist");
2332 /* Flush upstream tags after a STREAM_START */
2333 if (dec->priv->upstream_tags) {
2334 gst_tag_list_unref (dec->priv->upstream_tags);
2335 dec->priv->upstream_tags = NULL;
2336 dec->priv->taglist_changed = TRUE;
2337 }
2338 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2339
2340 ret = gst_audio_decoder_push_event (dec, event);
2341 break;
2342 case GST_EVENT_SEGMENT:
2343 {
2344 GstSegment seg;
2345 GstFormat format;
2346
2347 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2348 gst_event_copy_segment (event, &seg);
2349
2350 format = seg.format;
2351 if (format == GST_FORMAT_TIME) {
2352 GST_DEBUG_OBJECT (dec, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
2353 &seg);
2354 } else {
2355 gint64 nstart;
2356 GST_DEBUG_OBJECT (dec, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
2357 /* handle newsegment resulting from legacy simple seeking */
2358 /* note that we need to convert this whether or not enough data
2359 * to handle initial newsegment */
2360 if (dec->priv->ctx.do_estimate_rate &&
2361 gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, seg.start,
2362 GST_FORMAT_TIME, &nstart)) {
2363 /* best attempt convert */
2364 /* as these are only estimates, stop is kept open-ended to avoid
2365 * premature cutting */
2366 GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT,
2367 GST_TIME_ARGS (nstart));
2368 seg.format = GST_FORMAT_TIME;
2369 seg.start = nstart;
2370 seg.time = nstart;
2371 seg.stop = GST_CLOCK_TIME_NONE;
2372 /* replace event */
2373 gst_event_unref (event);
2374 event = gst_event_new_segment (&seg);
2375 } else {
2376 GST_DEBUG_OBJECT (dec, "unsupported format; ignoring");
2377 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2378 gst_event_unref (event);
2379 ret = FALSE;
2380 break;
2381 }
2382 }
2383
2384 /* prepare for next segment */
2385 /* Use the segment start as a base timestamp
2386 * in case upstream does not come up with anything better
2387 * (e.g. upstream BYTE) */
2388 if (format != GST_FORMAT_TIME) {
2389 dec->priv->base_ts = seg.start;
2390 dec->priv->samples = 0;
2391 }
2392
2393 /* and follow along with segment */
2394 dec->priv->in_out_segment_sync = FALSE;
2395 dec->input_segment = seg;
2396 dec->priv->pending_events =
2397 g_list_append (dec->priv->pending_events, event);
2398 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2399
2400 ret = TRUE;
2401 break;
2402 }
2403 case GST_EVENT_GAP:
2404 ret = gst_audio_decoder_handle_gap (dec, event);
2405 break;
2406 case GST_EVENT_FLUSH_STOP:
2407 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2408 /* prepare for fresh start */
2409 gst_audio_decoder_flush (dec, TRUE);
2410
2411 dec->priv->pending_events = _flush_events (dec->srcpad,
2412 dec->priv->pending_events);
2413 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2414
2415 /* Forward FLUSH_STOP, it is expected to be forwarded immediately
2416 * and no buffers are queued anyway. */
2417 ret = gst_audio_decoder_push_event (dec, event);
2418 break;
2419
2420 case GST_EVENT_SEGMENT_DONE:
2421 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2422 gst_audio_decoder_drain (dec);
2423 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2424
2425 /* Forward SEGMENT_DONE because no buffer or serialized event might come after
2426 * SEGMENT_DONE and nothing could trigger another _finish_frame() call. */
2427 if (dec->priv->pending_events)
2428 send_pending_events (dec);
2429 ret = gst_audio_decoder_push_event (dec, event);
2430 break;
2431
2432 case GST_EVENT_EOS:
2433 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2434 gst_audio_decoder_drain (dec);
2435 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2436
2437 if (dec->priv->ctx.had_input_data && !dec->priv->ctx.had_output_data) {
2438 GST_ELEMENT_ERROR (dec, STREAM, DECODE,
2439 ("No valid frames decoded before end of stream"),
2440 ("no valid frames found"));
2441 }
2442
2443 /* Forward EOS because no buffer or serialized event will come after
2444 * EOS and nothing could trigger another _finish_frame() call. */
2445 if (dec->priv->pending_events)
2446 send_pending_events (dec);
2447 ret = gst_audio_decoder_push_event (dec, event);
2448 break;
2449
2450 case GST_EVENT_CAPS:
2451 {
2452 GstCaps *caps;
2453
2454 gst_event_parse_caps (event, &caps);
2455 ret = gst_audio_decoder_sink_setcaps (dec, caps);
2456 gst_event_unref (event);
2457 break;
2458 }
2459 case GST_EVENT_TAG:
2460 {
2461 GstTagList *tags;
2462
2463 gst_event_parse_tag (event, &tags);
2464
2465 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
2466 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2467 if (dec->priv->upstream_tags != tags) {
2468 if (dec->priv->upstream_tags)
2469 gst_tag_list_unref (dec->priv->upstream_tags);
2470 dec->priv->upstream_tags = gst_tag_list_ref (tags);
2471 GST_INFO_OBJECT (dec, "upstream stream tags: %" GST_PTR_FORMAT, tags);
2472 }
2473 gst_event_unref (event);
2474 event = gst_audio_decoder_create_merged_tags_event (dec);
2475 dec->priv->taglist_changed = FALSE;
2476 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2477
2478 /* No tags, go out of here instead of fall through */
2479 if (!event) {
2480 ret = TRUE;
2481 break;
2482 }
2483 }
2484
2485 /* fall through */
2486 }
2487 default:
2488 if (!GST_EVENT_IS_SERIALIZED (event)) {
2489 ret =
2490 gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event);
2491 } else {
2492 GST_DEBUG_OBJECT (dec, "Enqueuing event %d, %s", GST_EVENT_TYPE (event),
2493 GST_EVENT_TYPE_NAME (event));
2494 GST_AUDIO_DECODER_STREAM_LOCK (dec);
2495 dec->priv->pending_events =
2496 g_list_append (dec->priv->pending_events, event);
2497 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
2498 ret = TRUE;
2499 }
2500 break;
2501 }
2502 return ret;
2503 }
2504
2505 static gboolean
gst_audio_decoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)2506 gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
2507 GstEvent * event)
2508 {
2509 GstAudioDecoder *dec;
2510 GstAudioDecoderClass *klass;
2511 gboolean ret;
2512
2513 dec = GST_AUDIO_DECODER (parent);
2514 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2515
2516 GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2517 GST_EVENT_TYPE_NAME (event));
2518
2519 if (klass->sink_event)
2520 ret = klass->sink_event (dec, event);
2521 else {
2522 gst_event_unref (event);
2523 ret = FALSE;
2524 }
2525 return ret;
2526 }
2527
2528 static gboolean
gst_audio_decoder_do_seek(GstAudioDecoder * dec,GstEvent * event)2529 gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event)
2530 {
2531 GstSeekFlags flags;
2532 GstSeekType start_type, end_type;
2533 GstFormat format;
2534 gdouble rate;
2535 gint64 start, start_time, end_time;
2536 GstSegment seek_segment;
2537 guint32 seqnum;
2538
2539 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
2540 &start_time, &end_type, &end_time);
2541
2542 /* we'll handle plain open-ended flushing seeks with the simple approach */
2543 if (rate != 1.0) {
2544 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
2545 return FALSE;
2546 }
2547
2548 if (start_type != GST_SEEK_TYPE_SET) {
2549 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
2550 return FALSE;
2551 }
2552
2553 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
2554 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
2555 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
2556 return FALSE;
2557 }
2558
2559 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
2560 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
2561 return FALSE;
2562 }
2563
2564 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
2565 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
2566 start_time, end_type, end_time, NULL);
2567 start_time = seek_segment.position;
2568
2569 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
2570 GST_FORMAT_BYTES, &start)) {
2571 GST_DEBUG_OBJECT (dec, "conversion failed");
2572 return FALSE;
2573 }
2574
2575 seqnum = gst_event_get_seqnum (event);
2576 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
2577 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
2578 gst_event_set_seqnum (event, seqnum);
2579
2580 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
2581 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
2582
2583 return gst_pad_push_event (dec->sinkpad, event);
2584 }
2585
2586 static gboolean
gst_audio_decoder_src_eventfunc(GstAudioDecoder * dec,GstEvent * event)2587 gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, GstEvent * event)
2588 {
2589 gboolean res;
2590
2591 switch (GST_EVENT_TYPE (event)) {
2592 case GST_EVENT_SEEK:
2593 {
2594 GstFormat format;
2595 gdouble rate;
2596 GstSeekFlags flags;
2597 GstSeekType start_type, stop_type;
2598 gint64 start, stop;
2599 gint64 tstart, tstop;
2600 guint32 seqnum;
2601
2602 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
2603 &stop_type, &stop);
2604 seqnum = gst_event_get_seqnum (event);
2605
2606 /* upstream gets a chance first */
2607 if ((res = gst_pad_push_event (dec->sinkpad, event)))
2608 break;
2609
2610 /* if upstream fails for a time seek, maybe we can help if allowed */
2611 if (format == GST_FORMAT_TIME) {
2612 if (gst_audio_decoder_do_byte (dec))
2613 res = gst_audio_decoder_do_seek (dec, event);
2614 break;
2615 }
2616
2617 /* ... though a non-time seek can be aided as well */
2618 /* First bring the requested format to time */
2619 if (!(res =
2620 gst_pad_query_convert (dec->srcpad, format, start,
2621 GST_FORMAT_TIME, &tstart)))
2622 goto convert_error;
2623 if (!(res =
2624 gst_pad_query_convert (dec->srcpad, format, stop, GST_FORMAT_TIME,
2625 &tstop)))
2626 goto convert_error;
2627
2628 /* then seek with time on the peer */
2629 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
2630 flags, start_type, tstart, stop_type, tstop);
2631 gst_event_set_seqnum (event, seqnum);
2632
2633 res = gst_pad_push_event (dec->sinkpad, event);
2634 break;
2635 }
2636 default:
2637 res = gst_pad_event_default (dec->srcpad, GST_OBJECT_CAST (dec), event);
2638 break;
2639 }
2640 done:
2641 return res;
2642
2643 /* ERRORS */
2644 convert_error:
2645 {
2646 GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek");
2647 goto done;
2648 }
2649 }
2650
2651 static gboolean
gst_audio_decoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)2652 gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
2653 {
2654 GstAudioDecoder *dec;
2655 GstAudioDecoderClass *klass;
2656 gboolean ret;
2657
2658 dec = GST_AUDIO_DECODER (parent);
2659 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2660
2661 GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event),
2662 GST_EVENT_TYPE_NAME (event));
2663
2664 if (klass->src_event)
2665 ret = klass->src_event (dec, event);
2666 else {
2667 gst_event_unref (event);
2668 ret = FALSE;
2669 }
2670
2671 return ret;
2672 }
2673
2674 static gboolean
gst_audio_decoder_decide_allocation_default(GstAudioDecoder * dec,GstQuery * query)2675 gst_audio_decoder_decide_allocation_default (GstAudioDecoder * dec,
2676 GstQuery * query)
2677 {
2678 GstAllocator *allocator = NULL;
2679 GstAllocationParams params;
2680 gboolean update_allocator;
2681
2682 /* we got configuration from our peer or the decide_allocation method,
2683 * parse them */
2684 if (gst_query_get_n_allocation_params (query) > 0) {
2685 /* try the allocator */
2686 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
2687 update_allocator = TRUE;
2688 } else {
2689 allocator = NULL;
2690 gst_allocation_params_init (¶ms);
2691 update_allocator = FALSE;
2692 }
2693
2694 if (update_allocator)
2695 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
2696 else
2697 gst_query_add_allocation_param (query, allocator, ¶ms);
2698 if (allocator)
2699 gst_object_unref (allocator);
2700
2701 return TRUE;
2702 }
2703
2704 static gboolean
gst_audio_decoder_propose_allocation_default(GstAudioDecoder * dec,GstQuery * query)2705 gst_audio_decoder_propose_allocation_default (GstAudioDecoder * dec,
2706 GstQuery * query)
2707 {
2708 return TRUE;
2709 }
2710
2711 /**
2712 * gst_audio_decoder_proxy_getcaps:
2713 * @decoder: a #GstAudioDecoder
2714 * @caps: (allow-none): initial caps
2715 * @filter: (allow-none): filter caps
2716 *
2717 * Returns caps that express @caps (or sink template caps if @caps == NULL)
2718 * restricted to rate/channels/... combinations supported by downstream
2719 * elements.
2720 *
2721 * Returns: (transfer full): a #GstCaps owned by caller
2722 *
2723 * Since: 1.6
2724 */
2725 GstCaps *
gst_audio_decoder_proxy_getcaps(GstAudioDecoder * decoder,GstCaps * caps,GstCaps * filter)2726 gst_audio_decoder_proxy_getcaps (GstAudioDecoder * decoder, GstCaps * caps,
2727 GstCaps * filter)
2728 {
2729 return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2730 GST_AUDIO_DECODER_SINK_PAD (decoder),
2731 GST_AUDIO_DECODER_SRC_PAD (decoder), caps, filter);
2732 }
2733
2734 static GstCaps *
gst_audio_decoder_sink_getcaps(GstAudioDecoder * decoder,GstCaps * filter)2735 gst_audio_decoder_sink_getcaps (GstAudioDecoder * decoder, GstCaps * filter)
2736 {
2737 GstAudioDecoderClass *klass;
2738 GstCaps *caps;
2739
2740 klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
2741
2742 if (klass->getcaps)
2743 caps = klass->getcaps (decoder, filter);
2744 else
2745 caps = gst_audio_decoder_proxy_getcaps (decoder, NULL, filter);
2746
2747 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2748
2749 return caps;
2750 }
2751
2752 static gboolean
gst_audio_decoder_sink_query_default(GstAudioDecoder * dec,GstQuery * query)2753 gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, GstQuery * query)
2754 {
2755 GstPad *pad = GST_AUDIO_DECODER_SINK_PAD (dec);
2756 gboolean res = FALSE;
2757
2758 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2759
2760 switch (GST_QUERY_TYPE (query)) {
2761 case GST_QUERY_FORMATS:
2762 {
2763 gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
2764 res = TRUE;
2765 break;
2766 }
2767 case GST_QUERY_CONVERT:
2768 {
2769 GstFormat src_fmt, dest_fmt;
2770 gint64 src_val, dest_val;
2771
2772 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2773 GST_OBJECT_LOCK (dec);
2774 res = __gst_audio_encoded_audio_convert (&dec->priv->ctx.info,
2775 dec->priv->bytes_in, dec->priv->samples_out,
2776 src_fmt, src_val, &dest_fmt, &dest_val);
2777 GST_OBJECT_UNLOCK (dec);
2778 if (!res)
2779 goto error;
2780 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2781 break;
2782 }
2783 case GST_QUERY_ALLOCATION:
2784 {
2785 GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
2786
2787 if (klass->propose_allocation)
2788 res = klass->propose_allocation (dec, query);
2789 break;
2790 }
2791 case GST_QUERY_CAPS:{
2792 GstCaps *filter, *caps;
2793
2794 gst_query_parse_caps (query, &filter);
2795 caps = gst_audio_decoder_sink_getcaps (dec, filter);
2796 gst_query_set_caps_result (query, caps);
2797 gst_caps_unref (caps);
2798 res = TRUE;
2799 break;
2800 }
2801 case GST_QUERY_ACCEPT_CAPS:{
2802 if (dec->priv->use_default_pad_acceptcaps) {
2803 res =
2804 gst_pad_query_default (GST_AUDIO_DECODER_SINK_PAD (dec),
2805 GST_OBJECT_CAST (dec), query);
2806 } else {
2807 GstCaps *caps;
2808 GstCaps *allowed_caps;
2809 GstCaps *template_caps;
2810 gboolean accept;
2811
2812 gst_query_parse_accept_caps (query, &caps);
2813
2814 template_caps = gst_pad_get_pad_template_caps (pad);
2815 accept = gst_caps_is_subset (caps, template_caps);
2816 gst_caps_unref (template_caps);
2817
2818 if (accept) {
2819 allowed_caps = gst_pad_query_caps (GST_AUDIO_DECODER_SINK_PAD (dec),
2820 caps);
2821
2822 accept = gst_caps_can_intersect (caps, allowed_caps);
2823
2824 gst_caps_unref (allowed_caps);
2825 }
2826
2827 gst_query_set_accept_caps_result (query, accept);
2828 res = TRUE;
2829 }
2830 break;
2831 }
2832 case GST_QUERY_SEEKING:
2833 {
2834 GstFormat format;
2835
2836 /* non-TIME segments are discarded, so we won't seek that way either */
2837 gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
2838 if (format != GST_FORMAT_TIME) {
2839 GST_DEBUG_OBJECT (dec, "discarding non-TIME SEEKING query");
2840 res = FALSE;
2841 break;
2842 }
2843 /* fall-through */
2844 }
2845 default:
2846 res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
2847 break;
2848 }
2849
2850 error:
2851 return res;
2852 }
2853
2854 static gboolean
gst_audio_decoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)2855 gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
2856 GstQuery * query)
2857 {
2858 GstAudioDecoderClass *dec_class;
2859 GstAudioDecoder *dec;
2860 gboolean ret = FALSE;
2861
2862 dec = GST_AUDIO_DECODER (parent);
2863 dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
2864
2865 GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
2866
2867 if (dec_class->sink_query)
2868 ret = dec_class->sink_query (dec, query);
2869
2870 return ret;
2871 }
2872
2873 /* FIXME ? are any of these queries (other than latency) a decoder's business ??
2874 * also, the conversion stuff might seem to make sense, but seems to not mind
2875 * segment stuff etc at all
2876 * Supposedly that's backward compatibility ... */
2877 static gboolean
gst_audio_decoder_src_query_default(GstAudioDecoder * dec,GstQuery * query)2878 gst_audio_decoder_src_query_default (GstAudioDecoder * dec, GstQuery * query)
2879 {
2880 GstPad *pad = GST_AUDIO_DECODER_SRC_PAD (dec);
2881 gboolean res = FALSE;
2882
2883 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
2884
2885 switch (GST_QUERY_TYPE (query)) {
2886 case GST_QUERY_DURATION:
2887 {
2888 GstFormat format;
2889
2890 /* upstream in any case */
2891 if ((res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query)))
2892 break;
2893
2894 gst_query_parse_duration (query, &format, NULL);
2895 /* try answering TIME by converting from BYTE if subclass allows */
2896 if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) {
2897 gint64 value;
2898
2899 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2900 &value)) {
2901 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2902 if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value,
2903 GST_FORMAT_TIME, &value)) {
2904 gst_query_set_duration (query, GST_FORMAT_TIME, value);
2905 res = TRUE;
2906 }
2907 }
2908 }
2909 break;
2910 }
2911 case GST_QUERY_POSITION:
2912 {
2913 GstFormat format;
2914 gint64 time, value;
2915
2916 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
2917 GST_LOG_OBJECT (dec, "returning peer response");
2918 break;
2919 }
2920
2921 /* Refuse BYTES format queries. If it made sense to
2922 * answer them, upstream would have already */
2923 gst_query_parse_position (query, &format, NULL);
2924
2925 if (format == GST_FORMAT_BYTES) {
2926 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
2927 break;
2928 }
2929
2930 /* we start from the last seen time */
2931 time = dec->output_segment.position;
2932 /* correct for the segment values */
2933 time =
2934 gst_segment_to_stream_time (&dec->output_segment, GST_FORMAT_TIME,
2935 time);
2936
2937 GST_LOG_OBJECT (dec,
2938 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
2939
2940 /* and convert to the final format */
2941 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
2942 format, &value)))
2943 break;
2944
2945 gst_query_set_position (query, format, value);
2946
2947 GST_LOG_OBJECT (dec,
2948 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
2949 format);
2950 break;
2951 }
2952 case GST_QUERY_FORMATS:
2953 {
2954 gst_query_set_formats (query, 3,
2955 GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
2956 res = TRUE;
2957 break;
2958 }
2959 case GST_QUERY_CONVERT:
2960 {
2961 GstFormat src_fmt, dest_fmt;
2962 gint64 src_val, dest_val;
2963
2964 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2965 GST_OBJECT_LOCK (dec);
2966 res = gst_audio_info_convert (&dec->priv->ctx.info,
2967 src_fmt, src_val, dest_fmt, &dest_val);
2968 GST_OBJECT_UNLOCK (dec);
2969 if (!res)
2970 break;
2971 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2972 break;
2973 }
2974 case GST_QUERY_LATENCY:
2975 {
2976 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
2977 gboolean live;
2978 GstClockTime min_latency, max_latency;
2979
2980 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2981 GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %"
2982 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2983 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2984
2985 GST_OBJECT_LOCK (dec);
2986 /* add our latency */
2987 min_latency += dec->priv->ctx.min_latency;
2988 if (max_latency == -1 || dec->priv->ctx.max_latency == -1)
2989 max_latency = -1;
2990 else
2991 max_latency += dec->priv->ctx.max_latency;
2992 GST_OBJECT_UNLOCK (dec);
2993
2994 gst_query_set_latency (query, live, min_latency, max_latency);
2995 }
2996 break;
2997 }
2998 default:
2999 res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query);
3000 break;
3001 }
3002
3003 return res;
3004 }
3005
3006 static gboolean
gst_audio_decoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)3007 gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
3008 {
3009 GstAudioDecoder *dec;
3010 GstAudioDecoderClass *dec_class;
3011 gboolean ret = FALSE;
3012
3013 dec = GST_AUDIO_DECODER (parent);
3014 dec_class = GST_AUDIO_DECODER_GET_CLASS (dec);
3015
3016 GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query);
3017
3018 if (dec_class->src_query)
3019 ret = dec_class->src_query (dec, query);
3020
3021 return ret;
3022 }
3023
3024 static gboolean
gst_audio_decoder_stop(GstAudioDecoder * dec)3025 gst_audio_decoder_stop (GstAudioDecoder * dec)
3026 {
3027 GstAudioDecoderClass *klass;
3028 gboolean ret = TRUE;
3029
3030 GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop");
3031
3032 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3033
3034 if (klass->stop) {
3035 ret = klass->stop (dec);
3036 }
3037
3038 /* clean up */
3039 gst_audio_decoder_reset (dec, TRUE);
3040
3041 if (ret)
3042 dec->priv->active = FALSE;
3043
3044 return ret;
3045 }
3046
3047 static gboolean
gst_audio_decoder_start(GstAudioDecoder * dec)3048 gst_audio_decoder_start (GstAudioDecoder * dec)
3049 {
3050 GstAudioDecoderClass *klass;
3051 gboolean ret = TRUE;
3052
3053 GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start");
3054
3055 klass = GST_AUDIO_DECODER_GET_CLASS (dec);
3056
3057 /* arrange clean state */
3058 gst_audio_decoder_reset (dec, TRUE);
3059
3060 if (klass->start) {
3061 ret = klass->start (dec);
3062 }
3063
3064 if (ret)
3065 dec->priv->active = TRUE;
3066
3067 return ret;
3068 }
3069
3070 static void
gst_audio_decoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)3071 gst_audio_decoder_get_property (GObject * object, guint prop_id,
3072 GValue * value, GParamSpec * pspec)
3073 {
3074 GstAudioDecoder *dec;
3075
3076 dec = GST_AUDIO_DECODER (object);
3077
3078 switch (prop_id) {
3079 case PROP_LATENCY:
3080 g_value_set_int64 (value, dec->priv->latency);
3081 break;
3082 case PROP_TOLERANCE:
3083 g_value_set_int64 (value, dec->priv->tolerance);
3084 break;
3085 case PROP_PLC:
3086 g_value_set_boolean (value, dec->priv->plc);
3087 break;
3088 default:
3089 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3090 break;
3091 }
3092 }
3093
3094 static void
gst_audio_decoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)3095 gst_audio_decoder_set_property (GObject * object, guint prop_id,
3096 const GValue * value, GParamSpec * pspec)
3097 {
3098 GstAudioDecoder *dec;
3099
3100 dec = GST_AUDIO_DECODER (object);
3101
3102 switch (prop_id) {
3103 case PROP_LATENCY:
3104 dec->priv->latency = g_value_get_int64 (value);
3105 break;
3106 case PROP_TOLERANCE:
3107 dec->priv->tolerance = g_value_get_int64 (value);
3108 break;
3109 case PROP_PLC:
3110 dec->priv->plc = g_value_get_boolean (value);
3111 break;
3112 default:
3113 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
3114 break;
3115 }
3116 }
3117
3118 static GstStateChangeReturn
gst_audio_decoder_change_state(GstElement * element,GstStateChange transition)3119 gst_audio_decoder_change_state (GstElement * element, GstStateChange transition)
3120 {
3121 GstAudioDecoder *codec;
3122 GstAudioDecoderClass *klass;
3123 GstStateChangeReturn ret;
3124
3125 codec = GST_AUDIO_DECODER (element);
3126 klass = GST_AUDIO_DECODER_GET_CLASS (codec);
3127
3128 switch (transition) {
3129 case GST_STATE_CHANGE_NULL_TO_READY:
3130 if (klass->open) {
3131 if (!klass->open (codec))
3132 goto open_failed;
3133 }
3134 break;
3135 case GST_STATE_CHANGE_READY_TO_PAUSED:
3136 if (!gst_audio_decoder_start (codec)) {
3137 goto start_failed;
3138 }
3139 break;
3140 case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
3141 break;
3142 default:
3143 break;
3144 }
3145
3146 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
3147
3148 switch (transition) {
3149 case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
3150 break;
3151 case GST_STATE_CHANGE_PAUSED_TO_READY:
3152 if (!gst_audio_decoder_stop (codec)) {
3153 goto stop_failed;
3154 }
3155 break;
3156 case GST_STATE_CHANGE_READY_TO_NULL:
3157 if (klass->close) {
3158 if (!klass->close (codec))
3159 goto close_failed;
3160 }
3161 break;
3162 default:
3163 break;
3164 }
3165
3166 return ret;
3167
3168 start_failed:
3169 {
3170 GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec"));
3171 return GST_STATE_CHANGE_FAILURE;
3172 }
3173 stop_failed:
3174 {
3175 GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec"));
3176 return GST_STATE_CHANGE_FAILURE;
3177 }
3178 open_failed:
3179 {
3180 GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to open codec"));
3181 return GST_STATE_CHANGE_FAILURE;
3182 }
3183 close_failed:
3184 {
3185 GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to close codec"));
3186 return GST_STATE_CHANGE_FAILURE;
3187 }
3188 }
3189
3190 GstFlowReturn
_gst_audio_decoder_error(GstAudioDecoder * dec,gint weight,GQuark domain,gint code,gchar * txt,gchar * dbg,const gchar * file,const gchar * function,gint line)3191 _gst_audio_decoder_error (GstAudioDecoder * dec, gint weight,
3192 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
3193 const gchar * function, gint line)
3194 {
3195 if (txt)
3196 GST_WARNING_OBJECT (dec, "error: %s", txt);
3197 if (dbg)
3198 GST_WARNING_OBJECT (dec, "error: %s", dbg);
3199 dec->priv->error_count += weight;
3200 dec->priv->discont = TRUE;
3201 if (dec->priv->ctx.max_errors >= 0
3202 && dec->priv->ctx.max_errors < dec->priv->error_count) {
3203 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, domain,
3204 code, txt, dbg, file, function, line);
3205 return GST_FLOW_ERROR;
3206 } else {
3207 g_free (txt);
3208 g_free (dbg);
3209 return GST_FLOW_OK;
3210 }
3211 }
3212
3213 /**
3214 * gst_audio_decoder_get_audio_info:
3215 * @dec: a #GstAudioDecoder
3216 *
3217 * Returns: a #GstAudioInfo describing the input audio format
3218 */
3219 GstAudioInfo *
gst_audio_decoder_get_audio_info(GstAudioDecoder * dec)3220 gst_audio_decoder_get_audio_info (GstAudioDecoder * dec)
3221 {
3222 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL);
3223
3224 return &dec->priv->ctx.info;
3225 }
3226
3227 /**
3228 * gst_audio_decoder_set_plc_aware:
3229 * @dec: a #GstAudioDecoder
3230 * @plc: new plc state
3231 *
3232 * Indicates whether or not subclass handles packet loss concealment (plc).
3233 */
3234 void
gst_audio_decoder_set_plc_aware(GstAudioDecoder * dec,gboolean plc)3235 gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc)
3236 {
3237 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3238
3239 dec->priv->ctx.do_plc = plc;
3240 }
3241
3242 /**
3243 * gst_audio_decoder_get_plc_aware:
3244 * @dec: a #GstAudioDecoder
3245 *
3246 * Returns: currently configured plc handling
3247 */
3248 gint
gst_audio_decoder_get_plc_aware(GstAudioDecoder * dec)3249 gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec)
3250 {
3251 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3252
3253 return dec->priv->ctx.do_plc;
3254 }
3255
3256 /**
3257 * gst_audio_decoder_set_estimate_rate:
3258 * @dec: a #GstAudioDecoder
3259 * @enabled: whether to enable byte to time conversion
3260 *
3261 * Allows baseclass to perform byte to time estimated conversion.
3262 */
3263 void
gst_audio_decoder_set_estimate_rate(GstAudioDecoder * dec,gboolean enabled)3264 gst_audio_decoder_set_estimate_rate (GstAudioDecoder * dec, gboolean enabled)
3265 {
3266 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3267
3268 dec->priv->ctx.do_estimate_rate = enabled;
3269 }
3270
3271 /**
3272 * gst_audio_decoder_get_estimate_rate:
3273 * @dec: a #GstAudioDecoder
3274 *
3275 * Returns: currently configured byte to time conversion setting
3276 */
3277 gint
gst_audio_decoder_get_estimate_rate(GstAudioDecoder * dec)3278 gst_audio_decoder_get_estimate_rate (GstAudioDecoder * dec)
3279 {
3280 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3281
3282 return dec->priv->ctx.do_estimate_rate;
3283 }
3284
3285 /**
3286 * gst_audio_decoder_get_delay:
3287 * @dec: a #GstAudioDecoder
3288 *
3289 * Returns: currently configured decoder delay
3290 */
3291 gint
gst_audio_decoder_get_delay(GstAudioDecoder * dec)3292 gst_audio_decoder_get_delay (GstAudioDecoder * dec)
3293 {
3294 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3295
3296 return dec->priv->ctx.delay;
3297 }
3298
3299 /**
3300 * gst_audio_decoder_set_max_errors:
3301 * @dec: a #GstAudioDecoder
3302 * @num: max tolerated errors
3303 *
3304 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
3305 * warned about, but more than tolerated will lead to fatal error. You can set
3306 * -1 for never returning fatal errors. Default is set to
3307 * GST_AUDIO_DECODER_MAX_ERRORS.
3308 */
3309 void
gst_audio_decoder_set_max_errors(GstAudioDecoder * dec,gint num)3310 gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num)
3311 {
3312 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3313
3314 dec->priv->ctx.max_errors = num;
3315 }
3316
3317 /**
3318 * gst_audio_decoder_get_max_errors:
3319 * @dec: a #GstAudioDecoder
3320 *
3321 * Returns: currently configured decoder tolerated error count.
3322 */
3323 gint
gst_audio_decoder_get_max_errors(GstAudioDecoder * dec)3324 gst_audio_decoder_get_max_errors (GstAudioDecoder * dec)
3325 {
3326 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3327
3328 return dec->priv->ctx.max_errors;
3329 }
3330
3331 /**
3332 * gst_audio_decoder_set_latency:
3333 * @dec: a #GstAudioDecoder
3334 * @min: minimum latency
3335 * @max: maximum latency
3336 *
3337 * Sets decoder latency.
3338 */
3339 void
gst_audio_decoder_set_latency(GstAudioDecoder * dec,GstClockTime min,GstClockTime max)3340 gst_audio_decoder_set_latency (GstAudioDecoder * dec,
3341 GstClockTime min, GstClockTime max)
3342 {
3343 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3344 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
3345 g_return_if_fail (min <= max);
3346
3347 GST_OBJECT_LOCK (dec);
3348 dec->priv->ctx.min_latency = min;
3349 dec->priv->ctx.max_latency = max;
3350 GST_OBJECT_UNLOCK (dec);
3351
3352 /* post latency message on the bus */
3353 gst_element_post_message (GST_ELEMENT (dec),
3354 gst_message_new_latency (GST_OBJECT (dec)));
3355 }
3356
3357 /**
3358 * gst_audio_decoder_get_latency:
3359 * @dec: a #GstAudioDecoder
3360 * @min: (out) (allow-none): a pointer to storage to hold minimum latency
3361 * @max: (out) (allow-none): a pointer to storage to hold maximum latency
3362 *
3363 * Sets the variables pointed to by @min and @max to the currently configured
3364 * latency.
3365 */
3366 void
gst_audio_decoder_get_latency(GstAudioDecoder * dec,GstClockTime * min,GstClockTime * max)3367 gst_audio_decoder_get_latency (GstAudioDecoder * dec,
3368 GstClockTime * min, GstClockTime * max)
3369 {
3370 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3371
3372 GST_OBJECT_LOCK (dec);
3373 if (min)
3374 *min = dec->priv->ctx.min_latency;
3375 if (max)
3376 *max = dec->priv->ctx.max_latency;
3377 GST_OBJECT_UNLOCK (dec);
3378 }
3379
3380 /**
3381 * gst_audio_decoder_get_parse_state:
3382 * @dec: a #GstAudioDecoder
3383 * @sync: (out) (optional): a pointer to a variable to hold the current sync state
3384 * @eos: (out) (optional): a pointer to a variable to hold the current eos state
3385 *
3386 * Return current parsing (sync and eos) state.
3387 */
3388 void
gst_audio_decoder_get_parse_state(GstAudioDecoder * dec,gboolean * sync,gboolean * eos)3389 gst_audio_decoder_get_parse_state (GstAudioDecoder * dec,
3390 gboolean * sync, gboolean * eos)
3391 {
3392 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3393
3394 if (sync)
3395 *sync = dec->priv->ctx.sync;
3396 if (eos)
3397 *eos = dec->priv->ctx.eos;
3398 }
3399
3400 /**
3401 * gst_audio_decoder_set_allocation_caps:
3402 * @dec: a #GstAudioDecoder
3403 * @allocation_caps: (allow-none): a #GstCaps or %NULL
3404 *
3405 * Sets a caps in allocation query which are different from the set
3406 * pad's caps. Use this function before calling
3407 * gst_audio_decoder_negotiate(). Setting to %NULL the allocation
3408 * query will use the caps from the pad.
3409 *
3410 * Since: 1.10
3411 */
3412 void
gst_audio_decoder_set_allocation_caps(GstAudioDecoder * dec,GstCaps * allocation_caps)3413 gst_audio_decoder_set_allocation_caps (GstAudioDecoder * dec,
3414 GstCaps * allocation_caps)
3415 {
3416 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3417
3418 gst_caps_replace (&dec->priv->ctx.allocation_caps, allocation_caps);
3419 }
3420
3421 /**
3422 * gst_audio_decoder_set_plc:
3423 * @dec: a #GstAudioDecoder
3424 * @enabled: new state
3425 *
3426 * Enable or disable decoder packet loss concealment, provided subclass
3427 * and codec are capable and allow handling plc.
3428 *
3429 * MT safe.
3430 */
3431 void
gst_audio_decoder_set_plc(GstAudioDecoder * dec,gboolean enabled)3432 gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled)
3433 {
3434 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3435
3436 GST_LOG_OBJECT (dec, "enabled: %d", enabled);
3437
3438 GST_OBJECT_LOCK (dec);
3439 dec->priv->plc = enabled;
3440 GST_OBJECT_UNLOCK (dec);
3441 }
3442
3443 /**
3444 * gst_audio_decoder_get_plc:
3445 * @dec: a #GstAudioDecoder
3446 *
3447 * Queries decoder packet loss concealment handling.
3448 *
3449 * Returns: TRUE if packet loss concealment is enabled.
3450 *
3451 * MT safe.
3452 */
3453 gboolean
gst_audio_decoder_get_plc(GstAudioDecoder * dec)3454 gst_audio_decoder_get_plc (GstAudioDecoder * dec)
3455 {
3456 gboolean result;
3457
3458 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3459
3460 GST_OBJECT_LOCK (dec);
3461 result = dec->priv->plc;
3462 GST_OBJECT_UNLOCK (dec);
3463
3464 return result;
3465 }
3466
3467 /**
3468 * gst_audio_decoder_set_min_latency:
3469 * @dec: a #GstAudioDecoder
3470 * @num: new minimum latency
3471 *
3472 * Sets decoder minimum aggregation latency.
3473 *
3474 * MT safe.
3475 */
3476 void
gst_audio_decoder_set_min_latency(GstAudioDecoder * dec,GstClockTime num)3477 gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, GstClockTime num)
3478 {
3479 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3480
3481 GST_OBJECT_LOCK (dec);
3482 dec->priv->latency = num;
3483 GST_OBJECT_UNLOCK (dec);
3484 }
3485
3486 /**
3487 * gst_audio_decoder_get_min_latency:
3488 * @dec: a #GstAudioDecoder
3489 *
3490 * Queries decoder's latency aggregation.
3491 *
3492 * Returns: aggregation latency.
3493 *
3494 * MT safe.
3495 */
3496 GstClockTime
gst_audio_decoder_get_min_latency(GstAudioDecoder * dec)3497 gst_audio_decoder_get_min_latency (GstAudioDecoder * dec)
3498 {
3499 GstClockTime result;
3500
3501 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3502
3503 GST_OBJECT_LOCK (dec);
3504 result = dec->priv->latency;
3505 GST_OBJECT_UNLOCK (dec);
3506
3507 return result;
3508 }
3509
3510 /**
3511 * gst_audio_decoder_set_tolerance:
3512 * @dec: a #GstAudioDecoder
3513 * @tolerance: new tolerance
3514 *
3515 * Configures decoder audio jitter tolerance threshold.
3516 *
3517 * MT safe.
3518 */
3519 void
gst_audio_decoder_set_tolerance(GstAudioDecoder * dec,GstClockTime tolerance)3520 gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, GstClockTime tolerance)
3521 {
3522 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3523
3524 GST_OBJECT_LOCK (dec);
3525 dec->priv->tolerance = tolerance;
3526 GST_OBJECT_UNLOCK (dec);
3527 }
3528
3529 /**
3530 * gst_audio_decoder_get_tolerance:
3531 * @dec: a #GstAudioDecoder
3532 *
3533 * Queries current audio jitter tolerance threshold.
3534 *
3535 * Returns: decoder audio jitter tolerance threshold.
3536 *
3537 * MT safe.
3538 */
3539 GstClockTime
gst_audio_decoder_get_tolerance(GstAudioDecoder * dec)3540 gst_audio_decoder_get_tolerance (GstAudioDecoder * dec)
3541 {
3542 GstClockTime result;
3543
3544 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3545
3546 GST_OBJECT_LOCK (dec);
3547 result = dec->priv->tolerance;
3548 GST_OBJECT_UNLOCK (dec);
3549
3550 return result;
3551 }
3552
3553 /**
3554 * gst_audio_decoder_set_drainable:
3555 * @dec: a #GstAudioDecoder
3556 * @enabled: new state
3557 *
3558 * Configures decoder drain handling. If drainable, subclass might
3559 * be handed a NULL buffer to have it return any leftover decoded data.
3560 * Otherwise, it is not considered so capable and will only ever be passed
3561 * real data.
3562 *
3563 * MT safe.
3564 */
3565 void
gst_audio_decoder_set_drainable(GstAudioDecoder * dec,gboolean enabled)3566 gst_audio_decoder_set_drainable (GstAudioDecoder * dec, gboolean enabled)
3567 {
3568 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3569
3570 GST_OBJECT_LOCK (dec);
3571 dec->priv->drainable = enabled;
3572 GST_OBJECT_UNLOCK (dec);
3573 }
3574
3575 /**
3576 * gst_audio_decoder_get_drainable:
3577 * @dec: a #GstAudioDecoder
3578 *
3579 * Queries decoder drain handling.
3580 *
3581 * Returns: TRUE if drainable handling is enabled.
3582 *
3583 * MT safe.
3584 */
3585 gboolean
gst_audio_decoder_get_drainable(GstAudioDecoder * dec)3586 gst_audio_decoder_get_drainable (GstAudioDecoder * dec)
3587 {
3588 gboolean result;
3589
3590 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0);
3591
3592 GST_OBJECT_LOCK (dec);
3593 result = dec->priv->drainable;
3594 GST_OBJECT_UNLOCK (dec);
3595
3596 return result;
3597 }
3598
3599 /**
3600 * gst_audio_decoder_set_needs_format:
3601 * @dec: a #GstAudioDecoder
3602 * @enabled: new state
3603 *
3604 * Configures decoder format needs. If enabled, subclass needs to be
3605 * negotiated with format caps before it can process any data. It will then
3606 * never be handed any data before it has been configured.
3607 * Otherwise, it might be handed data without having been configured and
3608 * is then expected being able to do so either by default
3609 * or based on the input data.
3610 *
3611 * MT safe.
3612 */
3613 void
gst_audio_decoder_set_needs_format(GstAudioDecoder * dec,gboolean enabled)3614 gst_audio_decoder_set_needs_format (GstAudioDecoder * dec, gboolean enabled)
3615 {
3616 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3617
3618 GST_OBJECT_LOCK (dec);
3619 dec->priv->needs_format = enabled;
3620 GST_OBJECT_UNLOCK (dec);
3621 }
3622
3623 /**
3624 * gst_audio_decoder_get_needs_format:
3625 * @dec: a #GstAudioDecoder
3626 *
3627 * Queries decoder required format handling.
3628 *
3629 * Returns: TRUE if required format handling is enabled.
3630 *
3631 * MT safe.
3632 */
3633 gboolean
gst_audio_decoder_get_needs_format(GstAudioDecoder * dec)3634 gst_audio_decoder_get_needs_format (GstAudioDecoder * dec)
3635 {
3636 gboolean result;
3637
3638 g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
3639
3640 GST_OBJECT_LOCK (dec);
3641 result = dec->priv->needs_format;
3642 GST_OBJECT_UNLOCK (dec);
3643
3644 return result;
3645 }
3646
3647 /**
3648 * gst_audio_decoder_merge_tags:
3649 * @dec: a #GstAudioDecoder
3650 * @tags: (allow-none): a #GstTagList to merge, or NULL
3651 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
3652 *
3653 * Sets the audio decoder tags and how they should be merged with any
3654 * upstream stream tags. This will override any tags previously-set
3655 * with gst_audio_decoder_merge_tags().
3656 *
3657 * Note that this is provided for convenience, and the subclass is
3658 * not required to use this and can still do tag handling on its own.
3659 */
3660 void
gst_audio_decoder_merge_tags(GstAudioDecoder * dec,const GstTagList * tags,GstTagMergeMode mode)3661 gst_audio_decoder_merge_tags (GstAudioDecoder * dec,
3662 const GstTagList * tags, GstTagMergeMode mode)
3663 {
3664 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3665 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
3666 g_return_if_fail (mode != GST_TAG_MERGE_UNDEFINED);
3667
3668 GST_AUDIO_DECODER_STREAM_LOCK (dec);
3669 if (dec->priv->taglist != tags) {
3670 if (dec->priv->taglist) {
3671 gst_tag_list_unref (dec->priv->taglist);
3672 dec->priv->taglist = NULL;
3673 dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
3674 }
3675 if (tags) {
3676 dec->priv->taglist = gst_tag_list_ref ((GstTagList *) tags);
3677 dec->priv->decoder_tags_merge_mode = mode;
3678 }
3679
3680 GST_DEBUG_OBJECT (dec, "setting decoder tags to %" GST_PTR_FORMAT, tags);
3681 dec->priv->taglist_changed = TRUE;
3682 }
3683 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3684 }
3685
3686 /**
3687 * gst_audio_decoder_allocate_output_buffer:
3688 * @dec: a #GstAudioDecoder
3689 * @size: size of the buffer
3690 *
3691 * Helper function that allocates a buffer to hold an audio frame
3692 * for @dec's current output format.
3693 *
3694 * Returns: (transfer full): allocated buffer
3695 */
3696 GstBuffer *
gst_audio_decoder_allocate_output_buffer(GstAudioDecoder * dec,gsize size)3697 gst_audio_decoder_allocate_output_buffer (GstAudioDecoder * dec, gsize size)
3698 {
3699 GstBuffer *buffer = NULL;
3700 gboolean needs_reconfigure = FALSE;
3701
3702 g_return_val_if_fail (size > 0, NULL);
3703
3704 GST_DEBUG ("alloc src buffer");
3705
3706 GST_AUDIO_DECODER_STREAM_LOCK (dec);
3707
3708 needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
3709 if (G_UNLIKELY (dec->priv->ctx.output_format_changed ||
3710 (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)
3711 && needs_reconfigure))) {
3712 if (!gst_audio_decoder_negotiate_unlocked (dec)) {
3713 GST_INFO_OBJECT (dec, "Failed to negotiate, fallback allocation");
3714 gst_pad_mark_reconfigure (dec->srcpad);
3715 goto fallback;
3716 }
3717 }
3718
3719 buffer =
3720 gst_buffer_new_allocate (dec->priv->ctx.allocator, size,
3721 &dec->priv->ctx.params);
3722 if (!buffer) {
3723 GST_INFO_OBJECT (dec, "couldn't allocate output buffer");
3724 goto fallback;
3725 }
3726
3727 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3728
3729 return buffer;
3730 fallback:
3731 buffer = gst_buffer_new_allocate (NULL, size, NULL);
3732 GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
3733
3734 return buffer;
3735 }
3736
3737 /**
3738 * gst_audio_decoder_get_allocator:
3739 * @dec: a #GstAudioDecoder
3740 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
3741 * used
3742 * @params: (out) (allow-none) (transfer full): the
3743 * #GstAllocationParams of @allocator
3744 *
3745 * Lets #GstAudioDecoder sub-classes to know the memory @allocator
3746 * used by the base class and its @params.
3747 *
3748 * Unref the @allocator after use it.
3749 */
3750 void
gst_audio_decoder_get_allocator(GstAudioDecoder * dec,GstAllocator ** allocator,GstAllocationParams * params)3751 gst_audio_decoder_get_allocator (GstAudioDecoder * dec,
3752 GstAllocator ** allocator, GstAllocationParams * params)
3753 {
3754 g_return_if_fail (GST_IS_AUDIO_DECODER (dec));
3755
3756 if (allocator)
3757 *allocator = dec->priv->ctx.allocator ?
3758 gst_object_ref (dec->priv->ctx.allocator) : NULL;
3759
3760 if (params)
3761 *params = dec->priv->ctx.params;
3762 }
3763
3764 /**
3765 * gst_audio_decoder_set_use_default_pad_acceptcaps:
3766 * @decoder: a #GstAudioDecoder
3767 * @use: if the default pad accept-caps query handling should be used
3768 *
3769 * Lets #GstAudioDecoder sub-classes decide if they want the sink pad
3770 * to use the default pad query handler to reply to accept-caps queries.
3771 *
3772 * By setting this to true it is possible to further customize the default
3773 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
3774 * %GST_PAD_SET_ACCEPT_TEMPLATE
3775 *
3776 * Since: 1.6
3777 */
3778 void
gst_audio_decoder_set_use_default_pad_acceptcaps(GstAudioDecoder * decoder,gboolean use)3779 gst_audio_decoder_set_use_default_pad_acceptcaps (GstAudioDecoder * decoder,
3780 gboolean use)
3781 {
3782 decoder->priv->use_default_pad_acceptcaps = use;
3783 }
3784