• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer
2  * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
3  * Copyright (C) 2011 Nokia Corporation. All rights reserved.
4  *   Contact: Stefan Kost <stefan.kost@nokia.com>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Library General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Library General Public License for more details.
15  *
16  * You should have received a copy of the GNU Library General Public
17  * License along with this library; if not, write to the
18  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19  * Boston, MA 02110-1301, USA.
20  */
21 
22 /**
23  * SECTION:gstaudioencoder
24  * @title: GstAudioEncoder
25  * @short_description: Base class for audio encoders
26  * @see_also: #GstBaseTransform
27  *
28  * This base class is for audio encoders turning raw audio samples into
29  * encoded audio data.
30  *
31  * GstAudioEncoder and subclass should cooperate as follows.
32  *
33  * ## Configuration
34  *
35  *   * Initially, GstAudioEncoder calls @start when the encoder element
36  *     is activated, which allows subclass to perform any global setup.
37  *
38  *   * GstAudioEncoder calls @set_format to inform subclass of the format
39  *     of input audio data that it is about to receive.  Subclass should
40  *     setup for encoding and configure various base class parameters
41  *     appropriately, notably those directing desired input data handling.
42  *     While unlikely, it might be called more than once, if changing input
43  *     parameters require reconfiguration.
44  *
45  *   * GstAudioEncoder calls @stop at end of all processing.
46  *
47  * As of configuration stage, and throughout processing, GstAudioEncoder
48  * maintains various parameters that provide required context,
49  * e.g. describing the format of input audio data.
50  * Conversely, subclass can and should configure these context parameters
51  * to inform base class of its expectation w.r.t. buffer handling.
52  *
53  * ## Data processing
54  *
55  *     * Base class gathers input sample data (as directed by the context's
56  *       frame_samples and frame_max) and provides this to subclass' @handle_frame.
57  *     * If codec processing results in encoded data, subclass should call
58  *       gst_audio_encoder_finish_frame() to have encoded data pushed
59  *       downstream. Alternatively, it might also call
60  *       gst_audio_encoder_finish_frame() (with a NULL buffer and some number of
61  *       dropped samples) to indicate dropped (non-encoded) samples.
62  *     * Just prior to actually pushing a buffer downstream,
63  *       it is passed to @pre_push.
64  *     * During the parsing process GstAudioEncoderClass will handle both
65  *       srcpad and sinkpad events. Sink events will be passed to subclass
66  *       if @event callback has been provided.
67  *
68  * ## Shutdown phase
69  *
70  *   * GstAudioEncoder class calls @stop to inform the subclass that data
71  *     parsing will be stopped.
72  *
73  * Subclass is responsible for providing pad template caps for
74  * source and sink pads. The pads need to be named "sink" and "src". It also
75  * needs to set the fixed caps on srcpad, when the format is ensured.  This
76  * is typically when base class calls subclass' @set_format function, though
77  * it might be delayed until calling @gst_audio_encoder_finish_frame.
78  *
79  * In summary, above process should have subclass concentrating on
80  * codec data processing while leaving other matters to base class,
81  * such as most notably timestamp handling.  While it may exert more control
82  * in this area (see e.g. @pre_push), it is very much not recommended.
83  *
84  * In particular, base class will either favor tracking upstream timestamps
85  * (at the possible expense of jitter) or aim to arrange for a perfect stream of
86  * output timestamps, depending on #GstAudioEncoder:perfect-timestamp.
87  * However, in the latter case, the input may not be so perfect or ideal, which
88  * is handled as follows.  An input timestamp is compared with the expected
89  * timestamp as dictated by input sample stream and if the deviation is less
90  * than #GstAudioEncoder:tolerance, the deviation is discarded.
91  * Otherwise, it is considered a discontuinity and subsequent output timestamp
92  * is resynced to the new position after performing configured discontinuity
93  * processing.  In the non-perfect-timestamp case, an upstream variation
94  * exceeding tolerance only leads to marking DISCONT on subsequent outgoing
95  * (while timestamps are adjusted to upstream regardless of variation).
96  * While DISCONT is also marked in the perfect-timestamp case, this one
97  * optionally (see #GstAudioEncoder:hard-resync)
98  * performs some additional steps, such as clipping of (early) input samples
99  * or draining all currently remaining input data, depending on the direction
100  * of the discontuinity.
101  *
102  * If perfect timestamps are arranged, it is also possible to request baseclass
103  * (usually set by subclass) to provide additional buffer metadata (in OFFSET
104  * and OFFSET_END) fields according to granule defined semantics currently
105  * needed by oggmux.  Specifically, OFFSET is set to granulepos (= sample count
106  * including buffer) and OFFSET_END to corresponding timestamp (as determined
107  * by same sample count and sample rate).
108  *
109  * Things that subclass need to take care of:
110  *
111  *   * Provide pad templates
112  *   * Set source pad caps when appropriate
113  *   * Inform base class of buffer processing needs using context's
114  *      frame_samples and frame_bytes.
115  *   * Set user-configurable properties to sane defaults for format and
116  *      implementing codec at hand, e.g. those controlling timestamp behaviour
117  *      and discontinuity processing.
118  *   * Accept data in @handle_frame and provide encoded results to
119  *      gst_audio_encoder_finish_frame().
120  *
121  */
122 
123 #ifdef HAVE_CONFIG_H
124 #  include "config.h"
125 #endif
126 
127 #include "gstaudioencoder.h"
128 #include "gstaudioutilsprivate.h"
129 #include <gst/base/gstadapter.h>
130 #include <gst/audio/audio.h>
131 #include <gst/pbutils/descriptions.h>
132 
133 #include <stdlib.h>
134 #include <string.h>
135 
136 
137 GST_DEBUG_CATEGORY_STATIC (gst_audio_encoder_debug);
138 #define GST_CAT_DEFAULT gst_audio_encoder_debug
139 
140 enum
141 {
142   PROP_0,
143   PROP_PERFECT_TS,
144   PROP_GRANULE,
145   PROP_HARD_RESYNC,
146   PROP_TOLERANCE
147 };
148 
149 #define DEFAULT_PERFECT_TS   FALSE
150 #define DEFAULT_GRANULE      FALSE
151 #define DEFAULT_HARD_RESYNC  FALSE
152 #define DEFAULT_TOLERANCE    40000000
153 #define DEFAULT_HARD_MIN     FALSE
154 #define DEFAULT_DRAINABLE    TRUE
155 
156 typedef struct _GstAudioEncoderContext
157 {
158   /* input */
159   /* last negotiated input caps */
160   GstCaps *input_caps;
161   /* last negotiated input info */
162   GstAudioInfo info;
163 
164   /* output */
165   GstCaps *caps;
166   GstCaps *allocation_caps;
167   gboolean output_caps_changed;
168   gint frame_samples_min, frame_samples_max;
169   gint frame_max;
170   gint lookahead;
171   /* MT-protected (with LOCK) */
172   GstClockTime min_latency;
173   GstClockTime max_latency;
174 
175   GList *headers;
176   gboolean new_headers;
177 
178   GstAllocator *allocator;
179   GstAllocationParams params;
180 } GstAudioEncoderContext;
181 
182 struct _GstAudioEncoderPrivate
183 {
184   /* activation status */
185   gboolean active;
186 
187   /* input base/first ts as basis for output ts;
188    * kept nearly constant for perfect_ts,
189    * otherwise resyncs to upstream ts */
190   GstClockTime base_ts;
191   /* corresponding base granulepos */
192   gint64 base_gp;
193   /* input samples processed and sent downstream so far (w.r.t. base_ts) */
194   guint64 samples;
195 
196   /* currently collected sample data */
197   GstAdapter *adapter;
198   /* offset in adapter up to which already supplied to encoder */
199   gint offset;
200   /* mark outgoing discont */
201   gboolean discont;
202   /* to guess duration of drained data */
203   GstClockTime last_duration;
204 
205   /* subclass provided data in processing round */
206   gboolean got_data;
207   /* subclass gave all it could already */
208   gboolean drained;
209   /* subclass currently being forcibly drained */
210   gboolean force;
211   /* need to handle changed input caps */
212   gboolean do_caps;
213 
214   /* output bps estimatation */
215   /* global in samples seen */
216   guint64 samples_in;
217   /* global bytes sent out */
218   guint64 bytes_out;
219 
220   /* context storage */
221   GstAudioEncoderContext ctx;
222 
223   /* properties */
224   gint64 tolerance;
225   gboolean perfect_ts;
226   gboolean hard_resync;
227   gboolean granule;
228   gboolean hard_min;
229   gboolean drainable;
230 
231   /* upstream stream tags (global tags are passed through as-is) */
232   GstTagList *upstream_tags;
233 
234   /* subclass tags */
235   GstTagList *tags;
236   GstTagMergeMode tags_merge_mode;
237 
238   gboolean tags_changed;
239 
240   /* pending serialized sink events, will be sent from finish_frame() */
241   GList *pending_events;
242 };
243 
244 
245 static GstElementClass *parent_class = NULL;
246 static gint private_offset = 0;
247 
248 static void gst_audio_encoder_class_init (GstAudioEncoderClass * klass);
249 static void gst_audio_encoder_init (GstAudioEncoder * parse,
250     GstAudioEncoderClass * klass);
251 
252 GType
gst_audio_encoder_get_type(void)253 gst_audio_encoder_get_type (void)
254 {
255   static GType audio_encoder_type = 0;
256 
257   if (!audio_encoder_type) {
258     static const GTypeInfo audio_encoder_info = {
259       sizeof (GstAudioEncoderClass),
260       (GBaseInitFunc) NULL,
261       (GBaseFinalizeFunc) NULL,
262       (GClassInitFunc) gst_audio_encoder_class_init,
263       NULL,
264       NULL,
265       sizeof (GstAudioEncoder),
266       0,
267       (GInstanceInitFunc) gst_audio_encoder_init,
268     };
269     const GInterfaceInfo preset_interface_info = {
270       NULL,                     /* interface_init */
271       NULL,                     /* interface_finalize */
272       NULL                      /* interface_data */
273     };
274 
275     audio_encoder_type = g_type_register_static (GST_TYPE_ELEMENT,
276         "GstAudioEncoder", &audio_encoder_info, G_TYPE_FLAG_ABSTRACT);
277 
278     private_offset =
279         g_type_add_instance_private (audio_encoder_type,
280         sizeof (GstAudioEncoderPrivate));
281 
282     g_type_add_interface_static (audio_encoder_type, GST_TYPE_PRESET,
283         &preset_interface_info);
284   }
285   return audio_encoder_type;
286 }
287 
288 static inline GstAudioEncoderPrivate *
gst_audio_encoder_get_instance_private(GstAudioEncoder * self)289 gst_audio_encoder_get_instance_private (GstAudioEncoder * self)
290 {
291   return (G_STRUCT_MEMBER_P (self, private_offset));
292 }
293 
294 static void gst_audio_encoder_finalize (GObject * object);
295 static void gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full);
296 
297 static void gst_audio_encoder_set_property (GObject * object,
298     guint prop_id, const GValue * value, GParamSpec * pspec);
299 static void gst_audio_encoder_get_property (GObject * object,
300     guint prop_id, GValue * value, GParamSpec * pspec);
301 
302 static gboolean gst_audio_encoder_sink_activate_mode (GstPad * pad,
303     GstObject * parent, GstPadMode mode, gboolean active);
304 
305 static GstCaps *gst_audio_encoder_getcaps_default (GstAudioEncoder * enc,
306     GstCaps * filter);
307 
308 static gboolean gst_audio_encoder_sink_event_default (GstAudioEncoder * enc,
309     GstEvent * event);
310 static gboolean gst_audio_encoder_src_event_default (GstAudioEncoder * enc,
311     GstEvent * event);
312 static gboolean gst_audio_encoder_sink_event (GstPad * pad, GstObject * parent,
313     GstEvent * event);
314 static gboolean gst_audio_encoder_src_event (GstPad * pad, GstObject * parent,
315     GstEvent * event);
316 static gboolean gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc,
317     GstCaps * caps);
318 static GstFlowReturn gst_audio_encoder_chain (GstPad * pad, GstObject * parent,
319     GstBuffer * buffer);
320 static gboolean gst_audio_encoder_src_query (GstPad * pad, GstObject * parent,
321     GstQuery * query);
322 static gboolean gst_audio_encoder_sink_query (GstPad * pad, GstObject * parent,
323     GstQuery * query);
324 static GstStateChangeReturn gst_audio_encoder_change_state (GstElement *
325     element, GstStateChange transition);
326 
327 static gboolean gst_audio_encoder_decide_allocation_default (GstAudioEncoder *
328     enc, GstQuery * query);
329 static gboolean gst_audio_encoder_propose_allocation_default (GstAudioEncoder *
330     enc, GstQuery * query);
331 static gboolean gst_audio_encoder_negotiate_default (GstAudioEncoder * enc);
332 static gboolean gst_audio_encoder_negotiate_unlocked (GstAudioEncoder * enc);
333 
334 static gboolean gst_audio_encoder_transform_meta_default (GstAudioEncoder *
335     encoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
336 
337 static gboolean gst_audio_encoder_sink_query_default (GstAudioEncoder * encoder,
338     GstQuery * query);
339 static gboolean gst_audio_encoder_src_query_default (GstAudioEncoder * encoder,
340     GstQuery * query);
341 
342 static void
gst_audio_encoder_class_init(GstAudioEncoderClass * klass)343 gst_audio_encoder_class_init (GstAudioEncoderClass * klass)
344 {
345   GObjectClass *gobject_class;
346   GstElementClass *gstelement_class;
347 
348   gobject_class = G_OBJECT_CLASS (klass);
349   gstelement_class = GST_ELEMENT_CLASS (klass);
350   parent_class = g_type_class_peek_parent (klass);
351 
352   GST_DEBUG_CATEGORY_INIT (gst_audio_encoder_debug, "audioencoder", 0,
353       "audio encoder base class");
354 
355   if (private_offset != 0)
356     g_type_class_adjust_private_offset (klass, &private_offset);
357 
358   gobject_class->set_property = gst_audio_encoder_set_property;
359   gobject_class->get_property = gst_audio_encoder_get_property;
360 
361   gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_audio_encoder_finalize);
362 
363   /* properties */
364   g_object_class_install_property (gobject_class, PROP_PERFECT_TS,
365       g_param_spec_boolean ("perfect-timestamp", "Perfect Timestamps",
366           "Favour perfect timestamps over tracking upstream timestamps",
367           DEFAULT_PERFECT_TS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
368   g_object_class_install_property (gobject_class, PROP_GRANULE,
369       g_param_spec_boolean ("mark-granule", "Granule Marking",
370           "Apply granule semantics to buffer metadata (implies perfect-timestamp)",
371           DEFAULT_GRANULE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
372   g_object_class_install_property (gobject_class, PROP_HARD_RESYNC,
373       g_param_spec_boolean ("hard-resync", "Hard Resync",
374           "Perform clipping and sample flushing upon discontinuity",
375           DEFAULT_HARD_RESYNC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
376   g_object_class_install_property (gobject_class, PROP_TOLERANCE,
377       g_param_spec_int64 ("tolerance", "Tolerance",
378           "Consider discontinuity if timestamp jitter/imperfection exceeds tolerance (ns)",
379           0, G_MAXINT64, DEFAULT_TOLERANCE,
380           G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
381 
382   gstelement_class->change_state =
383       GST_DEBUG_FUNCPTR (gst_audio_encoder_change_state);
384 
385   klass->getcaps = gst_audio_encoder_getcaps_default;
386   klass->sink_event = gst_audio_encoder_sink_event_default;
387   klass->src_event = gst_audio_encoder_src_event_default;
388   klass->sink_query = gst_audio_encoder_sink_query_default;
389   klass->src_query = gst_audio_encoder_src_query_default;
390   klass->propose_allocation = gst_audio_encoder_propose_allocation_default;
391   klass->decide_allocation = gst_audio_encoder_decide_allocation_default;
392   klass->negotiate = gst_audio_encoder_negotiate_default;
393   klass->transform_meta = gst_audio_encoder_transform_meta_default;
394 }
395 
396 static void
gst_audio_encoder_init(GstAudioEncoder * enc,GstAudioEncoderClass * bclass)397 gst_audio_encoder_init (GstAudioEncoder * enc, GstAudioEncoderClass * bclass)
398 {
399   GstPadTemplate *pad_template;
400 
401   GST_DEBUG_OBJECT (enc, "gst_audio_encoder_init");
402 
403   enc->priv = gst_audio_encoder_get_instance_private (enc);
404 
405   /* only push mode supported */
406   pad_template =
407       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "sink");
408   g_return_if_fail (pad_template != NULL);
409   enc->sinkpad = gst_pad_new_from_template (pad_template, "sink");
410   gst_pad_set_event_function (enc->sinkpad,
411       GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_event));
412   gst_pad_set_query_function (enc->sinkpad,
413       GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_query));
414   gst_pad_set_chain_function (enc->sinkpad,
415       GST_DEBUG_FUNCPTR (gst_audio_encoder_chain));
416   gst_pad_set_activatemode_function (enc->sinkpad,
417       GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_activate_mode));
418   gst_element_add_pad (GST_ELEMENT (enc), enc->sinkpad);
419 
420   GST_DEBUG_OBJECT (enc, "sinkpad created");
421 
422   /* and we don't mind upstream traveling stuff that much ... */
423   pad_template =
424       gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "src");
425   g_return_if_fail (pad_template != NULL);
426   enc->srcpad = gst_pad_new_from_template (pad_template, "src");
427   gst_pad_set_event_function (enc->srcpad,
428       GST_DEBUG_FUNCPTR (gst_audio_encoder_src_event));
429   gst_pad_set_query_function (enc->srcpad,
430       GST_DEBUG_FUNCPTR (gst_audio_encoder_src_query));
431   gst_pad_use_fixed_caps (enc->srcpad);
432   gst_element_add_pad (GST_ELEMENT (enc), enc->srcpad);
433   GST_DEBUG_OBJECT (enc, "src created");
434 
435   enc->priv->adapter = gst_adapter_new ();
436 
437   g_rec_mutex_init (&enc->stream_lock);
438 
439   /* property default */
440   enc->priv->granule = DEFAULT_GRANULE;
441   enc->priv->perfect_ts = DEFAULT_PERFECT_TS;
442   enc->priv->hard_resync = DEFAULT_HARD_RESYNC;
443   enc->priv->tolerance = DEFAULT_TOLERANCE;
444   enc->priv->hard_min = DEFAULT_HARD_MIN;
445   enc->priv->drainable = DEFAULT_DRAINABLE;
446 
447   /* init state */
448   enc->priv->ctx.min_latency = 0;
449   enc->priv->ctx.max_latency = 0;
450   gst_audio_encoder_reset (enc, TRUE);
451   GST_DEBUG_OBJECT (enc, "init ok");
452 }
453 
454 static void
gst_audio_encoder_reset(GstAudioEncoder * enc,gboolean full)455 gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
456 {
457   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
458 
459   GST_LOG_OBJECT (enc, "reset full %d", full);
460 
461   if (full) {
462     enc->priv->active = FALSE;
463     GST_OBJECT_LOCK (enc);
464     enc->priv->samples_in = 0;
465     enc->priv->bytes_out = 0;
466     GST_OBJECT_UNLOCK (enc);
467 
468     g_list_foreach (enc->priv->ctx.headers, (GFunc) gst_buffer_unref, NULL);
469     g_list_free (enc->priv->ctx.headers);
470     enc->priv->ctx.headers = NULL;
471     enc->priv->ctx.new_headers = FALSE;
472 
473     if (enc->priv->ctx.allocator)
474       gst_object_unref (enc->priv->ctx.allocator);
475     enc->priv->ctx.allocator = NULL;
476 
477     GST_OBJECT_LOCK (enc);
478     gst_caps_replace (&enc->priv->ctx.input_caps, NULL);
479     gst_caps_replace (&enc->priv->ctx.caps, NULL);
480     gst_caps_replace (&enc->priv->ctx.allocation_caps, NULL);
481 
482     memset (&enc->priv->ctx, 0, sizeof (enc->priv->ctx));
483     gst_audio_info_init (&enc->priv->ctx.info);
484     GST_OBJECT_UNLOCK (enc);
485 
486     if (enc->priv->upstream_tags) {
487       gst_tag_list_unref (enc->priv->upstream_tags);
488       enc->priv->upstream_tags = NULL;
489     }
490     if (enc->priv->tags)
491       gst_tag_list_unref (enc->priv->tags);
492     enc->priv->tags = NULL;
493     enc->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
494     enc->priv->tags_changed = FALSE;
495 
496     g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
497     g_list_free (enc->priv->pending_events);
498     enc->priv->pending_events = NULL;
499   }
500 
501   gst_segment_init (&enc->input_segment, GST_FORMAT_TIME);
502   gst_segment_init (&enc->output_segment, GST_FORMAT_TIME);
503 
504   gst_adapter_clear (enc->priv->adapter);
505   enc->priv->got_data = FALSE;
506   enc->priv->drained = TRUE;
507   enc->priv->offset = 0;
508   enc->priv->base_ts = GST_CLOCK_TIME_NONE;
509   enc->priv->base_gp = -1;
510   enc->priv->samples = 0;
511   enc->priv->discont = FALSE;
512 
513   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
514 }
515 
516 static void
gst_audio_encoder_finalize(GObject * object)517 gst_audio_encoder_finalize (GObject * object)
518 {
519   GstAudioEncoder *enc = GST_AUDIO_ENCODER (object);
520 
521   g_object_unref (enc->priv->adapter);
522 
523   g_rec_mutex_clear (&enc->stream_lock);
524 
525   G_OBJECT_CLASS (parent_class)->finalize (object);
526 }
527 
528 static GstStateChangeReturn
gst_audio_encoder_change_state(GstElement * element,GstStateChange transition)529 gst_audio_encoder_change_state (GstElement * element, GstStateChange transition)
530 {
531   GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
532   GstAudioEncoder *enc = GST_AUDIO_ENCODER (element);
533   GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
534 
535   switch (transition) {
536     case GST_STATE_CHANGE_NULL_TO_READY:
537       if (klass->open) {
538         if (!klass->open (enc))
539           goto open_failed;
540       }
541     default:
542       break;
543   }
544 
545   ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
546 
547   switch (transition) {
548     case GST_STATE_CHANGE_READY_TO_NULL:
549       if (klass->close) {
550         if (!klass->close (enc))
551           goto close_failed;
552       }
553     default:
554       break;
555   }
556 
557   return ret;
558 
559 open_failed:
560   {
561     GST_ELEMENT_ERROR (enc, LIBRARY, INIT, (NULL), ("Failed to open codec"));
562     return GST_STATE_CHANGE_FAILURE;
563   }
564 close_failed:
565   {
566     GST_ELEMENT_ERROR (enc, LIBRARY, INIT, (NULL), ("Failed to close codec"));
567     return GST_STATE_CHANGE_FAILURE;
568   }
569 }
570 
571 static gboolean
gst_audio_encoder_push_event(GstAudioEncoder * enc,GstEvent * event)572 gst_audio_encoder_push_event (GstAudioEncoder * enc, GstEvent * event)
573 {
574   switch (GST_EVENT_TYPE (event)) {
575     case GST_EVENT_SEGMENT:{
576       GstSegment seg;
577 
578       GST_AUDIO_ENCODER_STREAM_LOCK (enc);
579       gst_event_copy_segment (event, &seg);
580 
581       GST_DEBUG_OBJECT (enc, "starting segment %" GST_SEGMENT_FORMAT, &seg);
582 
583       enc->output_segment = seg;
584       GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
585       break;
586     }
587     default:
588       break;
589   }
590 
591   return gst_pad_push_event (enc->srcpad, event);
592 }
593 
594 static inline void
gst_audio_encoder_push_pending_events(GstAudioEncoder * enc)595 gst_audio_encoder_push_pending_events (GstAudioEncoder * enc)
596 {
597   GstAudioEncoderPrivate *priv = enc->priv;
598 
599   if (priv->pending_events) {
600     GList *pending_events, *l;
601 
602     pending_events = priv->pending_events;
603     priv->pending_events = NULL;
604 
605     GST_DEBUG_OBJECT (enc, "Pushing pending events");
606     for (l = pending_events; l; l = l->next)
607       gst_audio_encoder_push_event (enc, l->data);
608     g_list_free (pending_events);
609   }
610 }
611 
612 static GstEvent *
gst_audio_encoder_create_merged_tags_event(GstAudioEncoder * enc)613 gst_audio_encoder_create_merged_tags_event (GstAudioEncoder * enc)
614 {
615   GstTagList *merged_tags;
616 
617   GST_LOG_OBJECT (enc, "upstream : %" GST_PTR_FORMAT, enc->priv->upstream_tags);
618   GST_LOG_OBJECT (enc, "encoder  : %" GST_PTR_FORMAT, enc->priv->tags);
619   GST_LOG_OBJECT (enc, "mode     : %d", enc->priv->tags_merge_mode);
620 
621   merged_tags =
622       gst_tag_list_merge (enc->priv->upstream_tags, enc->priv->tags,
623       enc->priv->tags_merge_mode);
624 
625   GST_DEBUG_OBJECT (enc, "merged   : %" GST_PTR_FORMAT, merged_tags);
626 
627   if (merged_tags == NULL)
628     return NULL;
629 
630   if (gst_tag_list_is_empty (merged_tags)) {
631     gst_tag_list_unref (merged_tags);
632     return NULL;
633   }
634 
635   /* add codec info to pending tags */
636 #if 0
637   caps = gst_pad_get_current_caps (enc->srcpad);
638   gst_pb_utils_add_codec_description_to_tag_list (merged_tags,
639       GST_TAG_AUDIO_CODEC, caps);
640 #endif
641 
642   return gst_event_new_tag (merged_tags);
643 }
644 
645 static void
gst_audio_encoder_check_and_push_pending_tags(GstAudioEncoder * enc)646 gst_audio_encoder_check_and_push_pending_tags (GstAudioEncoder * enc)
647 {
648   if (enc->priv->tags_changed) {
649     GstEvent *tags_event;
650 
651     tags_event = gst_audio_encoder_create_merged_tags_event (enc);
652 
653     if (tags_event != NULL)
654       gst_audio_encoder_push_event (enc, tags_event);
655 
656     enc->priv->tags_changed = FALSE;
657   }
658 }
659 
660 
661 static gboolean
gst_audio_encoder_transform_meta_default(GstAudioEncoder * encoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)662 gst_audio_encoder_transform_meta_default (GstAudioEncoder *
663     encoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
664 {
665   const GstMetaInfo *info = meta->info;
666   const gchar *const *tags;
667 
668   tags = gst_meta_api_type_get_tags (info->api);
669 
670   if (!tags || (g_strv_length ((gchar **) tags) == 1
671           && gst_meta_api_type_has_tag (info->api,
672               g_quark_from_string (GST_META_TAG_AUDIO_STR))))
673     return TRUE;
674 
675   return FALSE;
676 }
677 
678 typedef struct
679 {
680   GstAudioEncoder *encoder;
681   GstBuffer *outbuf;
682 } CopyMetaData;
683 
684 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)685 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
686 {
687   CopyMetaData *data = user_data;
688   GstAudioEncoder *encoder = data->encoder;
689   GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (encoder);
690   GstBuffer *outbuf = data->outbuf;
691   const GstMetaInfo *info = (*meta)->info;
692   gboolean do_copy = FALSE;
693 
694   if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
695     /* never call the transform_meta with memory specific metadata */
696     GST_DEBUG_OBJECT (encoder, "not copying memory specific metadata %s",
697         g_type_name (info->api));
698     do_copy = FALSE;
699   } else if (klass->transform_meta) {
700     do_copy = klass->transform_meta (encoder, outbuf, *meta, inbuf);
701     GST_DEBUG_OBJECT (encoder, "transformed metadata %s: copy: %d",
702         g_type_name (info->api), do_copy);
703   }
704 
705   /* we only copy metadata when the subclass implemented a transform_meta
706    * function and when it returns %TRUE */
707   if (do_copy && info->transform_func) {
708     GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
709     GST_DEBUG_OBJECT (encoder, "copy metadata %s", g_type_name (info->api));
710     /* simply copy then */
711     info->transform_func (outbuf, *meta, inbuf,
712         _gst_meta_transform_copy, &copy_data);
713   }
714   return TRUE;
715 }
716 
717 /**
718  * gst_audio_encoder_finish_frame:
719  * @enc: a #GstAudioEncoder
720  * @buffer: encoded data
721  * @samples: number of samples (per channel) represented by encoded data
722  *
723  * Collects encoded data and pushes encoded data downstream.
724  * Source pad caps must be set when this is called.
725  *
726  * If @samples < 0, then best estimate is all samples provided to encoder
727  * (subclass) so far.  @buf may be NULL, in which case next number of @samples
728  * are considered discarded, e.g. as a result of discontinuous transmission,
729  * and a discontinuity is marked.
730  *
731  * Note that samples received in #GstAudioEncoderClass.handle_frame()
732  * may be invalidated by a call to this function.
733  *
734  * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
735  */
736 GstFlowReturn
gst_audio_encoder_finish_frame(GstAudioEncoder * enc,GstBuffer * buf,gint samples)737 gst_audio_encoder_finish_frame (GstAudioEncoder * enc, GstBuffer * buf,
738     gint samples)
739 {
740   GstAudioEncoderClass *klass;
741   GstAudioEncoderPrivate *priv;
742   GstAudioEncoderContext *ctx;
743   GstFlowReturn ret = GST_FLOW_OK;
744   gboolean needs_reconfigure = FALSE;
745   GstBuffer *inbuf = NULL;
746 
747   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
748   priv = enc->priv;
749   ctx = &enc->priv->ctx;
750 
751   /* subclass should not hand us no data */
752   g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
753       GST_FLOW_ERROR);
754 
755   /* subclass should know what it is producing by now */
756   if (!ctx->caps)
757     goto no_caps;
758 
759   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
760 
761   GST_LOG_OBJECT (enc,
762       "accepting %" G_GSIZE_FORMAT " bytes encoded data as %d samples",
763       buf ? gst_buffer_get_size (buf) : -1, samples);
764 
765   needs_reconfigure = gst_pad_check_reconfigure (enc->srcpad);
766   if (G_UNLIKELY (ctx->output_caps_changed || needs_reconfigure)) {
767     if (!gst_audio_encoder_negotiate_unlocked (enc)) {
768       gst_pad_mark_reconfigure (enc->srcpad);
769       if (GST_PAD_IS_FLUSHING (enc->srcpad))
770         ret = GST_FLOW_FLUSHING;
771       else
772         ret = GST_FLOW_NOT_NEGOTIATED;
773       if (buf)
774         gst_buffer_unref (buf);
775       goto exit;
776     }
777   }
778 
779   /* mark subclass still alive and providing */
780   if (G_LIKELY (buf))
781     priv->got_data = TRUE;
782 
783   gst_audio_encoder_push_pending_events (enc);
784 
785   /* send after pending events, which likely includes segment event */
786   gst_audio_encoder_check_and_push_pending_tags (enc);
787 
788   /* remove corresponding samples from input */
789   if (samples < 0)
790     samples = (enc->priv->offset / ctx->info.bpf);
791 
792   if (G_LIKELY (samples)) {
793     /* track upstream ts if so configured */
794     if (!enc->priv->perfect_ts) {
795       guint64 ts, distance;
796 
797       ts = gst_adapter_prev_pts (priv->adapter, &distance);
798       g_assert (distance % ctx->info.bpf == 0);
799       distance /= ctx->info.bpf;
800       GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past prev_ts %"
801           GST_TIME_FORMAT, distance, GST_TIME_ARGS (ts));
802       GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past base_ts %"
803           GST_TIME_FORMAT, priv->samples, GST_TIME_ARGS (priv->base_ts));
804       /* when draining adapter might be empty and no ts to offer */
805       if (GST_CLOCK_TIME_IS_VALID (ts) && ts != priv->base_ts) {
806         GstClockTimeDiff diff;
807         GstClockTime old_ts, next_ts;
808 
809         /* passed into another buffer;
810          * mild check for discontinuity and only mark if so */
811         next_ts = ts +
812             gst_util_uint64_scale (distance, GST_SECOND, ctx->info.rate);
813         old_ts = priv->base_ts +
814             gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
815         diff = GST_CLOCK_DIFF (next_ts, old_ts);
816         GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND));
817         /* only mark discontinuity if beyond tolerance */
818         if (G_UNLIKELY (diff < -enc->priv->tolerance ||
819                 diff > enc->priv->tolerance)) {
820           GST_DEBUG_OBJECT (enc, "marked discont");
821           priv->discont = TRUE;
822         }
823         if (diff > GST_SECOND / ctx->info.rate / 2 ||
824             diff < -GST_SECOND / ctx->info.rate / 2) {
825           GST_LOG_OBJECT (enc, "new upstream ts %" GST_TIME_FORMAT
826               " at distance %" G_GUINT64_FORMAT, GST_TIME_ARGS (ts), distance);
827           /* re-sync to upstream ts */
828           priv->base_ts = ts;
829           priv->samples = distance;
830         } else {
831           GST_LOG_OBJECT (enc, "new upstream ts only introduces jitter");
832         }
833       }
834     }
835     /* advance sample view */
836     if (G_UNLIKELY (samples * ctx->info.bpf > priv->offset)) {
837       guint avail = gst_adapter_available (priv->adapter);
838 
839       if (G_LIKELY (!priv->force)) {
840         /* we should have received EOS to enable force */
841         goto overflow;
842       } else {
843         priv->offset = 0;
844         if (avail > 0 && samples * ctx->info.bpf >= avail) {
845           inbuf = gst_adapter_take_buffer_fast (priv->adapter, avail);
846           gst_adapter_clear (priv->adapter);
847         } else if (avail > 0) {
848           inbuf =
849               gst_adapter_take_buffer_fast (priv->adapter,
850               samples * ctx->info.bpf);
851         }
852       }
853     } else {
854       guint avail = gst_adapter_available (priv->adapter);
855 
856       if (avail > 0) {
857         inbuf =
858             gst_adapter_take_buffer_fast (priv->adapter,
859             samples * ctx->info.bpf);
860       }
861       priv->offset -= samples * ctx->info.bpf;
862       /* avoid subsequent stray prev_ts */
863       if (G_UNLIKELY (gst_adapter_available (priv->adapter) == 0))
864         gst_adapter_clear (priv->adapter);
865     }
866     /* sample count advanced below after buffer handling */
867   }
868 
869   /* collect output */
870   if (G_LIKELY (buf)) {
871     gsize size;
872 
873     /* Pushing headers first */
874     if (G_UNLIKELY (priv->ctx.new_headers)) {
875       GList *tmp;
876 
877       GST_DEBUG_OBJECT (enc, "Sending headers");
878 
879       for (tmp = priv->ctx.headers; tmp; tmp = tmp->next) {
880         GstBuffer *tmpbuf = gst_buffer_ref (tmp->data);
881 
882         tmpbuf = gst_buffer_make_writable (tmpbuf);
883         size = gst_buffer_get_size (tmpbuf);
884 
885         if (G_UNLIKELY (priv->discont)) {
886           GST_LOG_OBJECT (enc, "marking discont");
887           GST_BUFFER_FLAG_SET (tmpbuf, GST_BUFFER_FLAG_DISCONT);
888           priv->discont = FALSE;
889         }
890 
891         /* Ogg codecs like Vorbis use offset/offset-end in a special
892          * way and both should be 0 for these codecs */
893         if (priv->base_gp >= 0) {
894           GST_BUFFER_OFFSET (tmpbuf) = 0;
895           GST_BUFFER_OFFSET_END (tmpbuf) = 0;
896         } else {
897           GST_BUFFER_OFFSET (tmpbuf) = priv->bytes_out;
898           GST_BUFFER_OFFSET_END (tmpbuf) = priv->bytes_out + size;
899         }
900 
901         GST_OBJECT_LOCK (enc);
902         priv->bytes_out += size;
903         GST_OBJECT_UNLOCK (enc);
904 
905         ret = gst_pad_push (enc->srcpad, tmpbuf);
906         if (ret != GST_FLOW_OK) {
907           GST_WARNING_OBJECT (enc, "pushing header returned %s",
908               gst_flow_get_name (ret));
909           goto exit;
910         }
911       }
912       priv->ctx.new_headers = FALSE;
913     }
914 
915     size = gst_buffer_get_size (buf);
916 
917     GST_LOG_OBJECT (enc, "taking %" G_GSIZE_FORMAT " bytes for output", size);
918     buf = gst_buffer_make_writable (buf);
919 
920     /* decorate */
921     if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
922       /* FIXME ? lookahead could lead to weird ts and duration ?
923        * (particularly if not in perfect mode) */
924       /* mind sample rounding and produce perfect output */
925       GST_BUFFER_TIMESTAMP (buf) = priv->base_ts +
926           gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND,
927           ctx->info.rate);
928       GST_BUFFER_DTS (buf) = GST_BUFFER_TIMESTAMP (buf);
929       GST_DEBUG_OBJECT (enc, "out samples %d", samples);
930       if (G_LIKELY (samples > 0)) {
931         priv->samples += samples;
932         GST_BUFFER_DURATION (buf) = priv->base_ts +
933             gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND,
934             ctx->info.rate) - GST_BUFFER_TIMESTAMP (buf);
935         priv->last_duration = GST_BUFFER_DURATION (buf);
936       } else {
937         /* duration forecast in case of handling remainder;
938          * the last one is probably like the previous one ... */
939         GST_BUFFER_DURATION (buf) = priv->last_duration;
940       }
941       if (priv->base_gp >= 0) {
942         /* pamper oggmux */
943         /* FIXME: in longer run, muxer should take care of this ... */
944         /* offset_end = granulepos for ogg muxer */
945         GST_BUFFER_OFFSET_END (buf) = priv->base_gp + priv->samples -
946             enc->priv->ctx.lookahead;
947         /* offset = timestamp corresponding to granulepos for ogg muxer */
948         GST_BUFFER_OFFSET (buf) =
949             GST_FRAMES_TO_CLOCK_TIME (GST_BUFFER_OFFSET_END (buf),
950             ctx->info.rate);
951       } else {
952         GST_BUFFER_OFFSET (buf) = priv->bytes_out;
953         GST_BUFFER_OFFSET_END (buf) = priv->bytes_out + size;
954       }
955     }
956 
957     if (klass->transform_meta) {
958       if (G_LIKELY (inbuf)) {
959         CopyMetaData data;
960 
961         data.encoder = enc;
962         data.outbuf = buf;
963         gst_buffer_foreach_meta (inbuf, foreach_metadata, &data);
964       } else {
965         GST_WARNING_OBJECT (enc,
966             "Can't copy metadata because input buffer disappeared");
967       }
968     }
969 
970     GST_OBJECT_LOCK (enc);
971     priv->bytes_out += size;
972     GST_OBJECT_UNLOCK (enc);
973 
974     if (G_UNLIKELY (priv->discont)) {
975       GST_LOG_OBJECT (enc, "marking discont");
976       GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
977       priv->discont = FALSE;
978     }
979 
980     if (klass->pre_push) {
981       /* last chance for subclass to do some dirty stuff */
982       ret = klass->pre_push (enc, &buf);
983       if (ret != GST_FLOW_OK || !buf) {
984         GST_DEBUG_OBJECT (enc, "subclass returned %s, buf %p",
985             gst_flow_get_name (ret), buf);
986 
987         if (buf)
988           gst_buffer_unref (buf);
989         goto exit;
990       }
991     }
992 
993     GST_LOG_OBJECT (enc,
994         "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
995         ", duration %" GST_TIME_FORMAT, size,
996         GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
997         GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
998 
999     ret = gst_pad_push (enc->srcpad, buf);
1000     GST_LOG_OBJECT (enc, "buffer pushed: %s", gst_flow_get_name (ret));
1001   } else {
1002     /* merely advance samples, most work for that already done above */
1003     priv->samples += samples;
1004   }
1005 
1006 exit:
1007   if (inbuf)
1008     gst_buffer_unref (inbuf);
1009 
1010   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1011 
1012   return ret;
1013 
1014   /* ERRORS */
1015 no_caps:
1016   {
1017     GST_ELEMENT_ERROR (enc, STREAM, ENCODE, ("no caps set"), (NULL));
1018     if (buf)
1019       gst_buffer_unref (buf);
1020     return GST_FLOW_ERROR;
1021   }
1022 overflow:
1023   {
1024     GST_ELEMENT_ERROR (enc, STREAM, ENCODE,
1025         ("received more encoded samples %d than provided %d as inputs",
1026             samples, priv->offset / ctx->info.bpf), (NULL));
1027     if (buf)
1028       gst_buffer_unref (buf);
1029     ret = GST_FLOW_ERROR;
1030     /* no way we can let this pass */
1031     g_assert_not_reached ();
1032     /* really no way */
1033     goto exit;
1034   }
1035 }
1036 
1037  /* adapter tracking idea:
1038   * - start of adapter corresponds with what has already been encoded
1039   * (i.e. really returned by encoder subclass)
1040   * - start + offset is what needs to be fed to subclass next */
1041 static GstFlowReturn
gst_audio_encoder_push_buffers(GstAudioEncoder * enc,gboolean force)1042 gst_audio_encoder_push_buffers (GstAudioEncoder * enc, gboolean force)
1043 {
1044   GstAudioEncoderClass *klass;
1045   GstAudioEncoderPrivate *priv;
1046   GstAudioEncoderContext *ctx;
1047   gint av, need;
1048   GstBuffer *buf;
1049   GstFlowReturn ret = GST_FLOW_OK;
1050 
1051   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1052 
1053   g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1054 
1055   priv = enc->priv;
1056   ctx = &enc->priv->ctx;
1057 
1058   while (ret == GST_FLOW_OK) {
1059 
1060     buf = NULL;
1061     av = gst_adapter_available (priv->adapter);
1062 
1063     g_assert (priv->offset <= av);
1064     av -= priv->offset;
1065 
1066     need =
1067         ctx->frame_samples_min >
1068         0 ? ctx->frame_samples_min * ctx->info.bpf : av;
1069     GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d", av, need,
1070         force);
1071 
1072     if ((need > av) || !av) {
1073       if (G_UNLIKELY (force)) {
1074         priv->force = TRUE;
1075         need = av;
1076       } else {
1077         break;
1078       }
1079     } else {
1080       priv->force = FALSE;
1081     }
1082 
1083     if (ctx->frame_samples_max > 0)
1084       need = MIN (av, ctx->frame_samples_max * ctx->info.bpf);
1085 
1086     if (ctx->frame_samples_min == ctx->frame_samples_max) {
1087       /* if we have some extra metadata,
1088        * provide for integer multiple of frames to allow for better granularity
1089        * of processing */
1090       if (ctx->frame_samples_min > 0 && need) {
1091         if (ctx->frame_max > 1)
1092           need = need * MIN ((av / need), ctx->frame_max);
1093         else if (ctx->frame_max == 0)
1094           need = need * (av / need);
1095       }
1096     }
1097 
1098     priv->got_data = FALSE;
1099     if (G_LIKELY (need)) {
1100       const guint8 *data;
1101 
1102       data = gst_adapter_map (priv->adapter, priv->offset + need);
1103       buf =
1104           gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY,
1105           (gpointer) data, priv->offset + need, priv->offset, need, NULL, NULL);
1106     } else if (!priv->drainable) {
1107       GST_DEBUG_OBJECT (enc, "non-drainable and no more data");
1108       goto finish;
1109     }
1110 
1111     GST_LOG_OBJECT (enc, "providing subclass with %d bytes at offset %d",
1112         need, priv->offset);
1113 
1114     /* mark this already as consumed,
1115      * which it should be when subclass gives us data in exchange for samples */
1116     priv->offset += need;
1117     GST_OBJECT_LOCK (enc);
1118     priv->samples_in += need / ctx->info.bpf;
1119     GST_OBJECT_UNLOCK (enc);
1120 
1121     /* subclass might not want to be bothered with leftover data,
1122      * so take care of that here if so, otherwise pass along */
1123     if (G_UNLIKELY (priv->force && priv->hard_min && buf)) {
1124       GST_DEBUG_OBJECT (enc, "bypassing subclass with leftover");
1125       ret = gst_audio_encoder_finish_frame (enc, NULL, -1);
1126     } else {
1127       ret = klass->handle_frame (enc, buf);
1128     }
1129 
1130     if (G_LIKELY (buf)) {
1131       gst_buffer_unref (buf);
1132       gst_adapter_unmap (priv->adapter);
1133     }
1134 
1135   finish:
1136     /* no data to feed, no leftover provided, then bail out */
1137     if (G_UNLIKELY (!buf && !priv->got_data)) {
1138       priv->drained = TRUE;
1139       GST_LOG_OBJECT (enc, "no more data drained from subclass");
1140       break;
1141     }
1142   }
1143 
1144 /* ohos.ext.func.0003: The media recorder service must support bypassing the abnormal streams to continue
1145  * recording normal streams. However, the gstpipeline cannot work properly if an error message is reported.
1146  * Some error messages are changed to warning messages. Then the media recording service can detects abnormal
1147  * streams by matching expected warning messages.
1148  */
1149 #ifdef OHOS_EXT_FUNC
1150   if ((ret != GST_FLOW_OK) && (ret != GST_FLOW_EOS)) {
1151     GST_ELEMENT_WARNING (enc, STREAM, ENCODE, (NULL),
1152         ("stream encode or push failed"));
1153     ret = GST_FLOW_ERROR;
1154   }
1155 #endif
1156 
1157   return ret;
1158 }
1159 
1160 static GstFlowReturn
gst_audio_encoder_drain(GstAudioEncoder * enc)1161 gst_audio_encoder_drain (GstAudioEncoder * enc)
1162 {
1163   GST_DEBUG_OBJECT (enc, "draining");
1164   if (enc->priv->drained)
1165     return GST_FLOW_OK;
1166   else {
1167     GST_DEBUG_OBJECT (enc, "... really");
1168     return gst_audio_encoder_push_buffers (enc, TRUE);
1169   }
1170 }
1171 
1172 static void
gst_audio_encoder_set_base_gp(GstAudioEncoder * enc)1173 gst_audio_encoder_set_base_gp (GstAudioEncoder * enc)
1174 {
1175   GstClockTime ts;
1176 
1177   if (!enc->priv->granule)
1178     return;
1179 
1180   /* use running time for granule */
1181   /* incoming data is clipped, so a valid input should yield a valid output */
1182   ts = gst_segment_to_running_time (&enc->input_segment, GST_FORMAT_TIME,
1183       enc->priv->base_ts);
1184   if (GST_CLOCK_TIME_IS_VALID (ts)) {
1185     enc->priv->base_gp =
1186         GST_CLOCK_TIME_TO_FRAMES (enc->priv->base_ts, enc->priv->ctx.info.rate);
1187     GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT, enc->priv->base_gp);
1188   } else {
1189     /* should reasonably have a valid base,
1190      * otherwise start at 0 if we did not already start there earlier */
1191     if (enc->priv->base_gp < 0) {
1192       enc->priv->base_gp = 0;
1193       GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT,
1194           enc->priv->base_gp);
1195     }
1196   }
1197 }
1198 
1199 static GstFlowReturn
gst_audio_encoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)1200 gst_audio_encoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
1201 {
1202   GstAudioEncoder *enc;
1203   GstAudioEncoderPrivate *priv;
1204   GstAudioEncoderContext *ctx;
1205   GstFlowReturn ret = GST_FLOW_OK;
1206   gboolean discont;
1207   gsize size;
1208 
1209   enc = GST_AUDIO_ENCODER (parent);
1210 
1211   priv = enc->priv;
1212   ctx = &enc->priv->ctx;
1213 
1214   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1215 
1216   if (G_UNLIKELY (priv->do_caps)) {
1217     GstCaps *caps = gst_pad_get_current_caps (enc->sinkpad);
1218     if (!caps)
1219       goto not_negotiated;
1220     if (!gst_audio_encoder_sink_setcaps (enc, caps)) {
1221       gst_caps_unref (caps);
1222       goto not_negotiated;
1223     }
1224     gst_caps_unref (caps);
1225     priv->do_caps = FALSE;
1226   }
1227 
1228   /* should know what is coming by now */
1229   if (!ctx->info.bpf)
1230     goto not_negotiated;
1231 
1232   size = gst_buffer_get_size (buffer);
1233 
1234   GST_LOG_OBJECT (enc,
1235       "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1236       ", duration %" GST_TIME_FORMAT, size,
1237       GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
1238       GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
1239 
1240   /* input shoud be whole number of sample frames */
1241   if (size % ctx->info.bpf)
1242     goto wrong_buffer;
1243 
1244 #ifndef GST_DISABLE_GST_DEBUG
1245   {
1246     GstClockTime duration;
1247     GstClockTimeDiff diff;
1248 
1249     /* verify buffer duration */
1250     duration = gst_util_uint64_scale (size, GST_SECOND,
1251         ctx->info.rate * ctx->info.bpf);
1252     diff = GST_CLOCK_DIFF (duration, GST_BUFFER_DURATION (buffer));
1253     if (GST_BUFFER_DURATION (buffer) != GST_CLOCK_TIME_NONE &&
1254         (diff > GST_SECOND / ctx->info.rate / 2 ||
1255             diff < -GST_SECOND / ctx->info.rate / 2)) {
1256       GST_DEBUG_OBJECT (enc, "incoming buffer had incorrect duration %"
1257           GST_TIME_FORMAT ", expected duration %" GST_TIME_FORMAT,
1258           GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)),
1259           GST_TIME_ARGS (duration));
1260     }
1261   }
1262 #endif
1263 
1264   discont = GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT);
1265   if (G_UNLIKELY (discont)) {
1266     GST_LOG_OBJECT (buffer, "marked discont");
1267     enc->priv->discont = discont;
1268   }
1269 
1270   /* clip to segment */
1271   buffer = gst_audio_buffer_clip (buffer, &enc->input_segment, ctx->info.rate,
1272       ctx->info.bpf);
1273   if (G_UNLIKELY (!buffer)) {
1274     GST_DEBUG_OBJECT (buffer, "no data after clipping to segment");
1275     goto done;
1276   }
1277 
1278   size = gst_buffer_get_size (buffer);
1279 
1280   GST_LOG_OBJECT (enc,
1281       "buffer after segment clipping has size %" G_GSIZE_FORMAT " with ts %"
1282       GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, size,
1283       GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
1284       GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
1285 
1286   if (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) {
1287     priv->base_ts = GST_BUFFER_TIMESTAMP (buffer);
1288     GST_DEBUG_OBJECT (enc, "new base ts %" GST_TIME_FORMAT,
1289         GST_TIME_ARGS (priv->base_ts));
1290     gst_audio_encoder_set_base_gp (enc);
1291   }
1292 
1293   /* check for continuity;
1294    * checked elsewhere in non-perfect case */
1295   if (enc->priv->perfect_ts) {
1296     GstClockTimeDiff diff = 0;
1297     GstClockTime next_ts = 0;
1298 
1299     if (GST_BUFFER_TIMESTAMP_IS_VALID (buffer) &&
1300         GST_CLOCK_TIME_IS_VALID (priv->base_ts)) {
1301       guint64 samples;
1302 
1303       samples = priv->samples +
1304           gst_adapter_available (priv->adapter) / ctx->info.bpf;
1305       next_ts = priv->base_ts +
1306           gst_util_uint64_scale (samples, GST_SECOND, ctx->info.rate);
1307       GST_LOG_OBJECT (enc, "buffer is %" G_GUINT64_FORMAT
1308           " samples past base_ts %" GST_TIME_FORMAT
1309           ", expected ts %" GST_TIME_FORMAT, samples,
1310           GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1311       diff = GST_CLOCK_DIFF (next_ts, GST_BUFFER_TIMESTAMP (buffer));
1312       GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1313       /* if within tolerance,
1314        * discard buffer ts and carry on producing perfect stream,
1315        * otherwise clip or resync to ts */
1316       if (G_UNLIKELY (diff < -enc->priv->tolerance ||
1317               diff > enc->priv->tolerance)) {
1318         GST_DEBUG_OBJECT (enc, "marked discont");
1319         discont = TRUE;
1320       }
1321     }
1322 
1323     /* do some fancy tweaking in hard resync case */
1324     if (discont && enc->priv->hard_resync) {
1325       if (diff < 0) {
1326         guint64 diff_bytes;
1327 
1328         GST_WARNING_OBJECT (enc, "Buffer is older than expected ts %"
1329             GST_TIME_FORMAT ".  Clipping buffer", GST_TIME_ARGS (next_ts));
1330 
1331         diff_bytes =
1332             GST_CLOCK_TIME_TO_FRAMES (-diff, ctx->info.rate) * ctx->info.bpf;
1333         if (diff_bytes >= size) {
1334           gst_buffer_unref (buffer);
1335           goto done;
1336         }
1337         buffer = gst_buffer_make_writable (buffer);
1338         gst_buffer_resize (buffer, diff_bytes, size - diff_bytes);
1339 
1340         GST_BUFFER_TIMESTAMP (buffer) += diff;
1341         /* care even less about duration after this */
1342       } else {
1343         /* drain stuff prior to resync */
1344         gst_audio_encoder_drain (enc);
1345       }
1346     }
1347     if (discont) {
1348       /* now re-sync ts */
1349       GstClockTime shift =
1350           gst_util_uint64_scale (gst_adapter_available (priv->adapter),
1351           GST_SECOND, ctx->info.rate * ctx->info.bpf);
1352 
1353       if (G_UNLIKELY (shift > GST_BUFFER_TIMESTAMP (buffer))) {
1354         /* ERROR */
1355         goto wrong_time;
1356       }
1357       /* arrange for newly added samples to come out with the ts
1358        * of the incoming buffer that adds these */
1359       priv->base_ts = GST_BUFFER_TIMESTAMP (buffer) - shift;
1360       priv->samples = 0;
1361       gst_audio_encoder_set_base_gp (enc);
1362       priv->discont |= discont;
1363     }
1364   }
1365 
1366   gst_adapter_push (enc->priv->adapter, buffer);
1367   /* new stuff, so we can push subclass again */
1368   enc->priv->drained = FALSE;
1369 
1370   ret = gst_audio_encoder_push_buffers (enc, FALSE);
1371 
1372 done:
1373   GST_LOG_OBJECT (enc, "chain leaving");
1374 
1375   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1376 
1377   return ret;
1378 
1379   /* ERRORS */
1380 not_negotiated:
1381   {
1382     GST_ELEMENT_WARNING (enc, CORE, NEGOTIATION, (NULL),
1383         ("encoder not initialized"));
1384     gst_buffer_unref (buffer);
1385     ret = GST_FLOW_NOT_NEGOTIATED;
1386     goto done;
1387   }
1388 wrong_buffer:
1389   {
1390     GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
1391         ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d",
1392             gst_buffer_get_size (buffer), ctx->info.bpf));
1393     gst_buffer_unref (buffer);
1394     ret = GST_FLOW_ERROR;
1395     goto done;
1396   }
1397 wrong_time:
1398   {
1399     GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
1400         ("buffer going too far back in time"));
1401     gst_buffer_unref (buffer);
1402     ret = GST_FLOW_ERROR;
1403     goto done;
1404   }
1405 }
1406 
1407 static gboolean
gst_audio_encoder_sink_setcaps(GstAudioEncoder * enc,GstCaps * caps)1408 gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
1409 {
1410   GstAudioEncoderClass *klass;
1411   GstAudioEncoderContext *ctx;
1412   GstAudioInfo state;
1413   gboolean res = TRUE;
1414   guint old_rate;
1415 
1416   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1417 
1418   /* subclass must do something here ... */
1419   g_return_val_if_fail (klass->set_format != NULL, FALSE);
1420 
1421   ctx = &enc->priv->ctx;
1422 
1423   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1424 
1425   GST_DEBUG_OBJECT (enc, "caps: %" GST_PTR_FORMAT, caps);
1426 
1427   if (!gst_caps_is_fixed (caps))
1428     goto refuse_caps;
1429 
1430   if (enc->priv->ctx.input_caps
1431       && gst_caps_is_equal (enc->priv->ctx.input_caps, caps))
1432     goto same_caps;
1433 
1434   if (!gst_audio_info_from_caps (&state, caps))
1435     goto refuse_caps;
1436 
1437   if (enc->priv->ctx.input_caps && gst_audio_info_is_equal (&state, &ctx->info))
1438     goto same_caps;
1439 
1440   /* adjust ts tracking to new sample rate */
1441   old_rate = GST_AUDIO_INFO_RATE (&ctx->info);
1442   if (GST_CLOCK_TIME_IS_VALID (enc->priv->base_ts) && old_rate) {
1443     enc->priv->base_ts +=
1444         GST_FRAMES_TO_CLOCK_TIME (enc->priv->samples, old_rate);
1445     enc->priv->samples = 0;
1446   }
1447 
1448   /* drain any pending old data stuff */
1449   gst_audio_encoder_drain (enc);
1450 
1451   /* context defaults */
1452   /* FIXME 2.0: This is quite unexpected behaviour. We should never
1453    * just reset *settings* of a subclass inside the base class */
1454   enc->priv->ctx.frame_samples_min = 0;
1455   enc->priv->ctx.frame_samples_max = 0;
1456   enc->priv->ctx.frame_max = 0;
1457   enc->priv->ctx.lookahead = 0;
1458 
1459   if (klass->set_format)
1460     res = klass->set_format (enc, &state);
1461 
1462   if (res) {
1463     GST_OBJECT_LOCK (enc);
1464     ctx->info = state;
1465     gst_caps_replace (&enc->priv->ctx.input_caps, caps);
1466     GST_OBJECT_UNLOCK (enc);
1467   } else {
1468     /* invalidate state to ensure no casual carrying on */
1469     GST_DEBUG_OBJECT (enc, "subclass did not accept format");
1470     gst_audio_info_init (&state);
1471     goto exit;
1472   }
1473 
1474 exit:
1475 
1476   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1477 
1478   return res;
1479 
1480 same_caps:
1481   {
1482     GST_DEBUG_OBJECT (enc, "new audio format identical to configured format");
1483     goto exit;
1484   }
1485 
1486   /* ERRORS */
1487 refuse_caps:
1488   {
1489     GST_WARNING_OBJECT (enc, "rejected caps %" GST_PTR_FORMAT, caps);
1490     goto exit;
1491   }
1492 }
1493 
1494 
1495 /**
1496  * gst_audio_encoder_proxy_getcaps:
1497  * @enc: a #GstAudioEncoder
1498  * @caps: (allow-none): initial caps
1499  * @filter: (allow-none): filter caps
1500  *
1501  * Returns caps that express @caps (or sink template caps if @caps == NULL)
1502  * restricted to channel/rate combinations supported by downstream elements
1503  * (e.g. muxers).
1504  *
1505  * Returns: (transfer full): a #GstCaps owned by caller
1506  */
1507 GstCaps *
gst_audio_encoder_proxy_getcaps(GstAudioEncoder * enc,GstCaps * caps,GstCaps * filter)1508 gst_audio_encoder_proxy_getcaps (GstAudioEncoder * enc, GstCaps * caps,
1509     GstCaps * filter)
1510 {
1511   return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (enc),
1512       GST_AUDIO_ENCODER_SINK_PAD (enc), GST_AUDIO_ENCODER_SRC_PAD (enc),
1513       caps, filter);
1514 }
1515 
1516 static GstCaps *
gst_audio_encoder_getcaps_default(GstAudioEncoder * enc,GstCaps * filter)1517 gst_audio_encoder_getcaps_default (GstAudioEncoder * enc, GstCaps * filter)
1518 {
1519   GstCaps *caps;
1520 
1521   caps = gst_audio_encoder_proxy_getcaps (enc, NULL, filter);
1522   GST_LOG_OBJECT (enc, "returning caps %" GST_PTR_FORMAT, caps);
1523 
1524   return caps;
1525 }
1526 
1527 static GList *
_flush_events(GstPad * pad,GList * events)1528 _flush_events (GstPad * pad, GList * events)
1529 {
1530   GList *tmp;
1531 
1532   for (tmp = events; tmp; tmp = tmp->next) {
1533     if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1534         GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1535         GST_EVENT_IS_STICKY (tmp->data)) {
1536       gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1537     }
1538     gst_event_unref (tmp->data);
1539   }
1540   g_list_free (events);
1541 
1542   return NULL;
1543 }
1544 
1545 static gboolean
gst_audio_encoder_sink_event_default(GstAudioEncoder * enc,GstEvent * event)1546 gst_audio_encoder_sink_event_default (GstAudioEncoder * enc, GstEvent * event)
1547 {
1548   GstAudioEncoderClass *klass;
1549   gboolean res;
1550 
1551   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1552 
1553   switch (GST_EVENT_TYPE (event)) {
1554     case GST_EVENT_SEGMENT:
1555     {
1556       GstSegment seg;
1557 
1558       gst_event_copy_segment (event, &seg);
1559 
1560       if (seg.format == GST_FORMAT_TIME) {
1561         GST_DEBUG_OBJECT (enc, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
1562             &seg);
1563       } else {
1564         GST_DEBUG_OBJECT (enc, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
1565         GST_DEBUG_OBJECT (enc, "unsupported format; ignoring");
1566         res = TRUE;
1567         break;
1568       }
1569 
1570       GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1571       /* finish current segment */
1572       gst_audio_encoder_drain (enc);
1573       /* reset partially for new segment */
1574       gst_audio_encoder_reset (enc, FALSE);
1575       /* and follow along with segment */
1576       enc->input_segment = seg;
1577 
1578       enc->priv->pending_events =
1579           g_list_append (enc->priv->pending_events, event);
1580       GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1581 
1582       res = TRUE;
1583       break;
1584     }
1585 
1586     case GST_EVENT_FLUSH_START:
1587       res = gst_audio_encoder_push_event (enc, event);
1588       break;
1589 
1590     case GST_EVENT_FLUSH_STOP:
1591       GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1592       /* discard any pending stuff */
1593       /* TODO route through drain ?? */
1594       if (!enc->priv->drained && klass->flush)
1595         klass->flush (enc);
1596       /* and get (re)set for the sequel */
1597       gst_audio_encoder_reset (enc, FALSE);
1598 
1599       enc->priv->pending_events = _flush_events (enc->srcpad,
1600           enc->priv->pending_events);
1601       GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1602 
1603       res = gst_audio_encoder_push_event (enc, event);
1604       break;
1605 
1606     case GST_EVENT_EOS:
1607       GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1608       gst_audio_encoder_drain (enc);
1609 
1610       /* check for pending events and tags */
1611       gst_audio_encoder_push_pending_events (enc);
1612       gst_audio_encoder_check_and_push_pending_tags (enc);
1613 
1614       GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1615 
1616       /* forward immediately because no buffer or serialized event
1617        * will come after EOS and nothing could trigger another
1618        * _finish_frame() call. */
1619       res = gst_audio_encoder_push_event (enc, event);
1620       break;
1621 
1622     case GST_EVENT_CAPS:
1623     {
1624       GstCaps *caps;
1625 
1626       gst_event_parse_caps (event, &caps);
1627       enc->priv->do_caps = TRUE;
1628       res = TRUE;
1629       gst_event_unref (event);
1630       break;
1631     }
1632 
1633     case GST_EVENT_STREAM_START:
1634     {
1635       GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1636       /* Flush upstream tags after a STREAM_START */
1637       GST_DEBUG_OBJECT (enc, "received STREAM_START. Clearing taglist");
1638       if (enc->priv->upstream_tags) {
1639         gst_tag_list_unref (enc->priv->upstream_tags);
1640         enc->priv->upstream_tags = NULL;
1641         enc->priv->tags_changed = TRUE;
1642       }
1643       GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1644       res = gst_audio_encoder_push_event (enc, event);
1645       break;
1646     }
1647 
1648     case GST_EVENT_TAG:
1649     {
1650       GstTagList *tags;
1651 
1652       gst_event_parse_tag (event, &tags);
1653 
1654       if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1655         GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1656         if (enc->priv->upstream_tags != tags) {
1657           tags = gst_tag_list_copy (tags);
1658 
1659           /* FIXME: make generic based on GST_TAG_FLAG_ENCODED */
1660           gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
1661           gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
1662           gst_tag_list_remove_tag (tags, GST_TAG_VIDEO_CODEC);
1663           gst_tag_list_remove_tag (tags, GST_TAG_SUBTITLE_CODEC);
1664           gst_tag_list_remove_tag (tags, GST_TAG_CONTAINER_FORMAT);
1665           gst_tag_list_remove_tag (tags, GST_TAG_BITRATE);
1666           gst_tag_list_remove_tag (tags, GST_TAG_NOMINAL_BITRATE);
1667           gst_tag_list_remove_tag (tags, GST_TAG_MAXIMUM_BITRATE);
1668           gst_tag_list_remove_tag (tags, GST_TAG_MINIMUM_BITRATE);
1669           gst_tag_list_remove_tag (tags, GST_TAG_ENCODER);
1670           gst_tag_list_remove_tag (tags, GST_TAG_ENCODER_VERSION);
1671 
1672           if (enc->priv->upstream_tags)
1673             gst_tag_list_unref (enc->priv->upstream_tags);
1674           enc->priv->upstream_tags = tags;
1675           GST_INFO_OBJECT (enc, "upstream stream tags: %" GST_PTR_FORMAT, tags);
1676         }
1677         gst_event_unref (event);
1678         event = gst_audio_encoder_create_merged_tags_event (enc);
1679         GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1680 
1681         /* No tags, go out of here instead of fall through */
1682         if (!event) {
1683           res = TRUE;
1684           break;
1685         }
1686       }
1687       /* fall through */
1688     }
1689     default:
1690       /* Forward non-serialized events immediately. */
1691       if (!GST_EVENT_IS_SERIALIZED (event)) {
1692         res =
1693             gst_pad_event_default (enc->sinkpad, GST_OBJECT_CAST (enc), event);
1694       } else {
1695         GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1696         enc->priv->pending_events =
1697             g_list_append (enc->priv->pending_events, event);
1698         GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1699         res = TRUE;
1700       }
1701       break;
1702   }
1703   return res;
1704 }
1705 
1706 static gboolean
gst_audio_encoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1707 gst_audio_encoder_sink_event (GstPad * pad, GstObject * parent,
1708     GstEvent * event)
1709 {
1710   GstAudioEncoder *enc;
1711   GstAudioEncoderClass *klass;
1712   gboolean ret;
1713 
1714   enc = GST_AUDIO_ENCODER (parent);
1715   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1716 
1717   GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
1718       GST_EVENT_TYPE_NAME (event));
1719 
1720   if (klass->sink_event)
1721     ret = klass->sink_event (enc, event);
1722   else {
1723     gst_event_unref (event);
1724     ret = FALSE;
1725   }
1726 
1727   GST_DEBUG_OBJECT (enc, "event result %d", ret);
1728 
1729   return ret;
1730 }
1731 
1732 static gboolean
gst_audio_encoder_sink_query_default(GstAudioEncoder * enc,GstQuery * query)1733 gst_audio_encoder_sink_query_default (GstAudioEncoder * enc, GstQuery * query)
1734 {
1735   GstPad *pad = GST_AUDIO_ENCODER_SINK_PAD (enc);
1736   gboolean res = FALSE;
1737 
1738   switch (GST_QUERY_TYPE (query)) {
1739     case GST_QUERY_FORMATS:
1740     {
1741       gst_query_set_formats (query, 3,
1742           GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
1743       res = TRUE;
1744       break;
1745     }
1746     case GST_QUERY_CONVERT:
1747     {
1748       GstFormat src_fmt, dest_fmt;
1749       gint64 src_val, dest_val;
1750 
1751       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1752       GST_OBJECT_LOCK (enc);
1753       res = gst_audio_info_convert (&enc->priv->ctx.info,
1754           src_fmt, src_val, dest_fmt, &dest_val);
1755       GST_OBJECT_UNLOCK (enc);
1756       if (!res)
1757         goto error;
1758       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1759       res = TRUE;
1760       break;
1761     }
1762     case GST_QUERY_CAPS:
1763     {
1764       GstCaps *filter, *caps;
1765       GstAudioEncoderClass *klass;
1766 
1767       gst_query_parse_caps (query, &filter);
1768 
1769       klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1770       if (klass->getcaps) {
1771         caps = klass->getcaps (enc, filter);
1772         gst_query_set_caps_result (query, caps);
1773         gst_caps_unref (caps);
1774         res = TRUE;
1775       }
1776       break;
1777     }
1778     case GST_QUERY_ALLOCATION:
1779     {
1780       GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1781 
1782       if (klass->propose_allocation)
1783         res = klass->propose_allocation (enc, query);
1784       break;
1785     }
1786     default:
1787       res = gst_pad_query_default (pad, GST_OBJECT (enc), query);
1788       break;
1789   }
1790 
1791 error:
1792   return res;
1793 }
1794 
1795 static gboolean
gst_audio_encoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)1796 gst_audio_encoder_sink_query (GstPad * pad, GstObject * parent,
1797     GstQuery * query)
1798 {
1799   GstAudioEncoder *encoder;
1800   GstAudioEncoderClass *encoder_class;
1801   gboolean ret = FALSE;
1802 
1803   encoder = GST_AUDIO_ENCODER (parent);
1804   encoder_class = GST_AUDIO_ENCODER_GET_CLASS (encoder);
1805 
1806   GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
1807       GST_QUERY_TYPE_NAME (query));
1808 
1809   if (encoder_class->sink_query)
1810     ret = encoder_class->sink_query (encoder, query);
1811 
1812   return ret;
1813 }
1814 
1815 static gboolean
gst_audio_encoder_src_event_default(GstAudioEncoder * enc,GstEvent * event)1816 gst_audio_encoder_src_event_default (GstAudioEncoder * enc, GstEvent * event)
1817 {
1818   gboolean res;
1819 
1820   switch (GST_EVENT_TYPE (event)) {
1821     default:
1822       res = gst_pad_event_default (enc->srcpad, GST_OBJECT_CAST (enc), event);
1823       break;
1824   }
1825   return res;
1826 }
1827 
1828 static gboolean
gst_audio_encoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1829 gst_audio_encoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1830 {
1831   GstAudioEncoder *enc;
1832   GstAudioEncoderClass *klass;
1833   gboolean ret;
1834 
1835   enc = GST_AUDIO_ENCODER (parent);
1836   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1837 
1838   GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
1839       GST_EVENT_TYPE_NAME (event));
1840 
1841   if (klass->src_event)
1842     ret = klass->src_event (enc, event);
1843   else {
1844     gst_event_unref (event);
1845     ret = FALSE;
1846   }
1847 
1848   return ret;
1849 }
1850 
1851 static gboolean
gst_audio_encoder_decide_allocation_default(GstAudioEncoder * enc,GstQuery * query)1852 gst_audio_encoder_decide_allocation_default (GstAudioEncoder * enc,
1853     GstQuery * query)
1854 {
1855   GstAllocator *allocator = NULL;
1856   GstAllocationParams params;
1857   gboolean update_allocator;
1858 
1859   /* we got configuration from our peer or the decide_allocation method,
1860    * parse them */
1861   if (gst_query_get_n_allocation_params (query) > 0) {
1862     /* try the allocator */
1863     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
1864     update_allocator = TRUE;
1865   } else {
1866     allocator = NULL;
1867     gst_allocation_params_init (&params);
1868     update_allocator = FALSE;
1869   }
1870 
1871   if (update_allocator)
1872     gst_query_set_nth_allocation_param (query, 0, allocator, &params);
1873   else
1874     gst_query_add_allocation_param (query, allocator, &params);
1875   if (allocator)
1876     gst_object_unref (allocator);
1877 
1878   return TRUE;
1879 }
1880 
1881 static gboolean
gst_audio_encoder_propose_allocation_default(GstAudioEncoder * enc,GstQuery * query)1882 gst_audio_encoder_propose_allocation_default (GstAudioEncoder * enc,
1883     GstQuery * query)
1884 {
1885   return TRUE;
1886 }
1887 
1888 /* FIXME ? are any of these queries (other than latency) an encoder's business
1889  * also, the conversion stuff might seem to make sense, but seems to not mind
1890  * segment stuff etc at all
1891  * Supposedly that's backward compatibility ... */
1892 static gboolean
gst_audio_encoder_src_query_default(GstAudioEncoder * enc,GstQuery * query)1893 gst_audio_encoder_src_query_default (GstAudioEncoder * enc, GstQuery * query)
1894 {
1895   GstPad *pad = GST_AUDIO_ENCODER_SRC_PAD (enc);
1896   gboolean res = FALSE;
1897 
1898   GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query);
1899 
1900   switch (GST_QUERY_TYPE (query)) {
1901     case GST_QUERY_POSITION:
1902     {
1903       GstFormat fmt, req_fmt;
1904       gint64 pos, val;
1905 
1906       if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
1907         GST_LOG_OBJECT (enc, "returning peer response");
1908         break;
1909       }
1910 
1911       gst_query_parse_position (query, &req_fmt, NULL);
1912 
1913       /* Refuse BYTES format queries. If it made sense to
1914        *        * answer them, upstream would have already */
1915       if (req_fmt == GST_FORMAT_BYTES) {
1916         GST_LOG_OBJECT (enc, "Ignoring BYTES position query");
1917         break;
1918       }
1919 
1920       fmt = GST_FORMAT_TIME;
1921       if (!(res = gst_pad_peer_query_position (enc->sinkpad, fmt, &pos)))
1922         break;
1923 
1924       if ((res =
1925               gst_pad_peer_query_convert (enc->sinkpad, fmt, pos, req_fmt,
1926                   &val))) {
1927         gst_query_set_position (query, req_fmt, val);
1928       }
1929       break;
1930     }
1931     case GST_QUERY_DURATION:
1932     {
1933       GstFormat fmt, req_fmt;
1934       gint64 dur, val;
1935 
1936       if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
1937         GST_LOG_OBJECT (enc, "returning peer response");
1938         break;
1939       }
1940 
1941       gst_query_parse_duration (query, &req_fmt, NULL);
1942 
1943       /* Refuse BYTES format queries. If it made sense to
1944        *        * answer them, upstream would have already */
1945       if (req_fmt == GST_FORMAT_BYTES) {
1946         GST_LOG_OBJECT (enc, "Ignoring BYTES position query");
1947         break;
1948       }
1949 
1950       fmt = GST_FORMAT_TIME;
1951       if (!(res = gst_pad_peer_query_duration (enc->sinkpad, fmt, &dur)))
1952         break;
1953 
1954       if ((res =
1955               gst_pad_peer_query_convert (enc->sinkpad, fmt, dur, req_fmt,
1956                   &val))) {
1957         gst_query_set_duration (query, req_fmt, val);
1958       }
1959       break;
1960     }
1961     case GST_QUERY_FORMATS:
1962     {
1963       gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
1964       res = TRUE;
1965       break;
1966     }
1967     case GST_QUERY_CONVERT:
1968     {
1969       GstFormat src_fmt, dest_fmt;
1970       gint64 src_val, dest_val;
1971 
1972       gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1973       GST_OBJECT_LOCK (enc);
1974       res = __gst_audio_encoded_audio_convert (&enc->priv->ctx.info,
1975           enc->priv->bytes_out, enc->priv->samples_in, src_fmt, src_val,
1976           &dest_fmt, &dest_val);
1977       GST_OBJECT_UNLOCK (enc);
1978       if (!res)
1979         break;
1980       gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1981       break;
1982     }
1983     case GST_QUERY_LATENCY:
1984     {
1985       if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
1986         gboolean live;
1987         GstClockTime min_latency, max_latency;
1988 
1989         gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1990         GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %"
1991             GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1992             GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1993 
1994         GST_OBJECT_LOCK (enc);
1995         /* add our latency */
1996         min_latency += enc->priv->ctx.min_latency;
1997         if (max_latency == -1 || enc->priv->ctx.max_latency == -1)
1998           max_latency = -1;
1999         else
2000           max_latency += enc->priv->ctx.max_latency;
2001         GST_OBJECT_UNLOCK (enc);
2002 
2003         gst_query_set_latency (query, live, min_latency, max_latency);
2004       }
2005       break;
2006     }
2007     default:
2008       res = gst_pad_query_default (pad, GST_OBJECT (enc), query);
2009       break;
2010   }
2011 
2012   return res;
2013 }
2014 
2015 static gboolean
gst_audio_encoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2016 gst_audio_encoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2017 {
2018   GstAudioEncoder *encoder;
2019   GstAudioEncoderClass *encoder_class;
2020   gboolean ret = FALSE;
2021 
2022   encoder = GST_AUDIO_ENCODER (parent);
2023   encoder_class = GST_AUDIO_ENCODER_GET_CLASS (encoder);
2024 
2025   GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
2026       GST_QUERY_TYPE_NAME (query));
2027 
2028   if (encoder_class->src_query)
2029     ret = encoder_class->src_query (encoder, query);
2030 
2031   return ret;
2032 }
2033 
2034 
2035 static void
gst_audio_encoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)2036 gst_audio_encoder_set_property (GObject * object, guint prop_id,
2037     const GValue * value, GParamSpec * pspec)
2038 {
2039   GstAudioEncoder *enc;
2040 
2041   enc = GST_AUDIO_ENCODER (object);
2042 
2043   switch (prop_id) {
2044     case PROP_PERFECT_TS:
2045       if (enc->priv->granule && !g_value_get_boolean (value))
2046         GST_WARNING_OBJECT (enc, "perfect-timestamp can not be set FALSE "
2047             "while granule handling is enabled");
2048       else
2049         enc->priv->perfect_ts = g_value_get_boolean (value);
2050       break;
2051     case PROP_HARD_RESYNC:
2052       enc->priv->hard_resync = g_value_get_boolean (value);
2053       break;
2054     case PROP_TOLERANCE:
2055       enc->priv->tolerance = g_value_get_int64 (value);
2056       break;
2057     default:
2058       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2059       break;
2060   }
2061 }
2062 
2063 static void
gst_audio_encoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)2064 gst_audio_encoder_get_property (GObject * object, guint prop_id,
2065     GValue * value, GParamSpec * pspec)
2066 {
2067   GstAudioEncoder *enc;
2068 
2069   enc = GST_AUDIO_ENCODER (object);
2070 
2071   switch (prop_id) {
2072     case PROP_PERFECT_TS:
2073       g_value_set_boolean (value, enc->priv->perfect_ts);
2074       break;
2075     case PROP_GRANULE:
2076       g_value_set_boolean (value, enc->priv->granule);
2077       break;
2078     case PROP_HARD_RESYNC:
2079       g_value_set_boolean (value, enc->priv->hard_resync);
2080       break;
2081     case PROP_TOLERANCE:
2082       g_value_set_int64 (value, enc->priv->tolerance);
2083       break;
2084     default:
2085       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2086       break;
2087   }
2088 }
2089 
2090 static gboolean
gst_audio_encoder_activate(GstAudioEncoder * enc,gboolean active)2091 gst_audio_encoder_activate (GstAudioEncoder * enc, gboolean active)
2092 {
2093   GstAudioEncoderClass *klass;
2094   gboolean result = TRUE;
2095 
2096   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2097 
2098   g_return_val_if_fail (!enc->priv->granule || enc->priv->perfect_ts, FALSE);
2099 
2100   GST_DEBUG_OBJECT (enc, "activate %d", active);
2101 
2102   if (active) {
2103     /* arrange clean state */
2104     gst_audio_encoder_reset (enc, TRUE);
2105 
2106     if (!enc->priv->active && klass->start)
2107       result = klass->start (enc);
2108   } else {
2109     /* We must make sure streaming has finished before resetting things
2110      * and calling the ::stop vfunc */
2111     GST_PAD_STREAM_LOCK (enc->sinkpad);
2112     GST_PAD_STREAM_UNLOCK (enc->sinkpad);
2113 
2114     if (enc->priv->active && klass->stop)
2115       result = klass->stop (enc);
2116 
2117     /* clean up */
2118     gst_audio_encoder_reset (enc, TRUE);
2119   }
2120   GST_DEBUG_OBJECT (enc, "activate return: %d", result);
2121   return result;
2122 }
2123 
2124 
2125 static gboolean
gst_audio_encoder_sink_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2126 gst_audio_encoder_sink_activate_mode (GstPad * pad, GstObject * parent,
2127     GstPadMode mode, gboolean active)
2128 {
2129   gboolean result = TRUE;
2130   GstAudioEncoder *enc;
2131 
2132   enc = GST_AUDIO_ENCODER (parent);
2133 
2134   GST_DEBUG_OBJECT (enc, "sink activate push %d", active);
2135 
2136   result = gst_audio_encoder_activate (enc, active);
2137 
2138   if (result)
2139     enc->priv->active = active;
2140 
2141   GST_DEBUG_OBJECT (enc, "sink activate push return: %d", result);
2142 
2143   return result;
2144 }
2145 
2146 /**
2147  * gst_audio_encoder_get_audio_info:
2148  * @enc: a #GstAudioEncoder
2149  *
2150  * Returns: a #GstAudioInfo describing the input audio format
2151  */
2152 GstAudioInfo *
gst_audio_encoder_get_audio_info(GstAudioEncoder * enc)2153 gst_audio_encoder_get_audio_info (GstAudioEncoder * enc)
2154 {
2155   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), NULL);
2156 
2157   return &enc->priv->ctx.info;
2158 }
2159 
2160 /**
2161  * gst_audio_encoder_set_frame_samples_min:
2162  * @enc: a #GstAudioEncoder
2163  * @num: number of samples per frame
2164  *
2165  * Sets number of samples (per channel) subclass needs to be handed,
2166  * at least or will be handed all available if 0.
2167  *
2168  * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_max()
2169  * must be called with the same number.
2170  *
2171  * Note: This value will be reset to 0 every time before
2172  * #GstAudioEncoderClass.set_format() is called.
2173  */
2174 void
gst_audio_encoder_set_frame_samples_min(GstAudioEncoder * enc,gint num)2175 gst_audio_encoder_set_frame_samples_min (GstAudioEncoder * enc, gint num)
2176 {
2177   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2178 
2179   enc->priv->ctx.frame_samples_min = num;
2180   GST_LOG_OBJECT (enc, "set to %d", num);
2181 }
2182 
2183 /**
2184  * gst_audio_encoder_get_frame_samples_min:
2185  * @enc: a #GstAudioEncoder
2186  *
2187  * Returns: currently minimum requested samples per frame
2188  */
2189 gint
gst_audio_encoder_get_frame_samples_min(GstAudioEncoder * enc)2190 gst_audio_encoder_get_frame_samples_min (GstAudioEncoder * enc)
2191 {
2192   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2193 
2194   return enc->priv->ctx.frame_samples_min;
2195 }
2196 
2197 /**
2198  * gst_audio_encoder_set_frame_samples_max:
2199  * @enc: a #GstAudioEncoder
2200  * @num: number of samples per frame
2201  *
2202  * Sets number of samples (per channel) subclass needs to be handed,
2203  * at most or will be handed all available if 0.
2204  *
2205  * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_min()
2206  * must be called with the same number.
2207  *
2208  * Note: This value will be reset to 0 every time before
2209  * #GstAudioEncoderClass.set_format() is called.
2210  */
2211 void
gst_audio_encoder_set_frame_samples_max(GstAudioEncoder * enc,gint num)2212 gst_audio_encoder_set_frame_samples_max (GstAudioEncoder * enc, gint num)
2213 {
2214   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2215 
2216   enc->priv->ctx.frame_samples_max = num;
2217   GST_LOG_OBJECT (enc, "set to %d", num);
2218 }
2219 
2220 /**
2221  * gst_audio_encoder_get_frame_samples_max:
2222  * @enc: a #GstAudioEncoder
2223  *
2224  * Returns: currently maximum requested samples per frame
2225  */
2226 gint
gst_audio_encoder_get_frame_samples_max(GstAudioEncoder * enc)2227 gst_audio_encoder_get_frame_samples_max (GstAudioEncoder * enc)
2228 {
2229   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2230 
2231   return enc->priv->ctx.frame_samples_max;
2232 }
2233 
2234 /**
2235  * gst_audio_encoder_set_frame_max:
2236  * @enc: a #GstAudioEncoder
2237  * @num: number of frames
2238  *
2239  * Sets max number of frames accepted at once (assumed minimally 1).
2240  * Requires @frame_samples_min and @frame_samples_max to be the equal.
2241  *
2242  * Note: This value will be reset to 0 every time before
2243  * #GstAudioEncoderClass.set_format() is called.
2244  */
2245 void
gst_audio_encoder_set_frame_max(GstAudioEncoder * enc,gint num)2246 gst_audio_encoder_set_frame_max (GstAudioEncoder * enc, gint num)
2247 {
2248   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2249 
2250   enc->priv->ctx.frame_max = num;
2251   GST_LOG_OBJECT (enc, "set to %d", num);
2252 }
2253 
2254 /**
2255  * gst_audio_encoder_get_frame_max:
2256  * @enc: a #GstAudioEncoder
2257  *
2258  * Returns: currently configured maximum handled frames
2259  */
2260 gint
gst_audio_encoder_get_frame_max(GstAudioEncoder * enc)2261 gst_audio_encoder_get_frame_max (GstAudioEncoder * enc)
2262 {
2263   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2264 
2265   return enc->priv->ctx.frame_max;
2266 }
2267 
2268 /**
2269  * gst_audio_encoder_set_lookahead:
2270  * @enc: a #GstAudioEncoder
2271  * @num: lookahead
2272  *
2273  * Sets encoder lookahead (in units of input rate samples)
2274  *
2275  * Note: This value will be reset to 0 every time before
2276  * #GstAudioEncoderClass.set_format() is called.
2277  */
2278 void
gst_audio_encoder_set_lookahead(GstAudioEncoder * enc,gint num)2279 gst_audio_encoder_set_lookahead (GstAudioEncoder * enc, gint num)
2280 {
2281   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2282 
2283   enc->priv->ctx.lookahead = num;
2284   GST_LOG_OBJECT (enc, "set to %d", num);
2285 }
2286 
2287 /**
2288  * gst_audio_encoder_get_lookahead:
2289  * @enc: a #GstAudioEncoder
2290  *
2291  * Returns: currently configured encoder lookahead
2292  */
2293 gint
gst_audio_encoder_get_lookahead(GstAudioEncoder * enc)2294 gst_audio_encoder_get_lookahead (GstAudioEncoder * enc)
2295 {
2296   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2297 
2298   return enc->priv->ctx.lookahead;
2299 }
2300 
2301 /**
2302  * gst_audio_encoder_set_latency:
2303  * @enc: a #GstAudioEncoder
2304  * @min: minimum latency
2305  * @max: maximum latency
2306  *
2307  * Sets encoder latency.
2308  */
2309 void
gst_audio_encoder_set_latency(GstAudioEncoder * enc,GstClockTime min,GstClockTime max)2310 gst_audio_encoder_set_latency (GstAudioEncoder * enc,
2311     GstClockTime min, GstClockTime max)
2312 {
2313   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2314   g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
2315   g_return_if_fail (min <= max);
2316 
2317   GST_OBJECT_LOCK (enc);
2318   enc->priv->ctx.min_latency = min;
2319   enc->priv->ctx.max_latency = max;
2320   GST_OBJECT_UNLOCK (enc);
2321 
2322   GST_LOG_OBJECT (enc, "set to %" GST_TIME_FORMAT "-%" GST_TIME_FORMAT,
2323       GST_TIME_ARGS (min), GST_TIME_ARGS (max));
2324 
2325   /* post latency message on the bus */
2326   gst_element_post_message (GST_ELEMENT (enc),
2327       gst_message_new_latency (GST_OBJECT (enc)));
2328 }
2329 
2330 /**
2331  * gst_audio_encoder_get_latency:
2332  * @enc: a #GstAudioEncoder
2333  * @min: (out) (allow-none): a pointer to storage to hold minimum latency
2334  * @max: (out) (allow-none): a pointer to storage to hold maximum latency
2335  *
2336  * Sets the variables pointed to by @min and @max to the currently configured
2337  * latency.
2338  */
2339 void
gst_audio_encoder_get_latency(GstAudioEncoder * enc,GstClockTime * min,GstClockTime * max)2340 gst_audio_encoder_get_latency (GstAudioEncoder * enc,
2341     GstClockTime * min, GstClockTime * max)
2342 {
2343   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2344 
2345   GST_OBJECT_LOCK (enc);
2346   if (min)
2347     *min = enc->priv->ctx.min_latency;
2348   if (max)
2349     *max = enc->priv->ctx.max_latency;
2350   GST_OBJECT_UNLOCK (enc);
2351 }
2352 
2353 /**
2354  * gst_audio_encoder_set_headers:
2355  * @enc: a #GstAudioEncoder
2356  * @headers: (transfer full) (element-type Gst.Buffer): a list of
2357  *   #GstBuffer containing the codec header
2358  *
2359  * Set the codec headers to be sent downstream whenever requested.
2360  */
2361 void
gst_audio_encoder_set_headers(GstAudioEncoder * enc,GList * headers)2362 gst_audio_encoder_set_headers (GstAudioEncoder * enc, GList * headers)
2363 {
2364   GST_DEBUG_OBJECT (enc, "new headers %p", headers);
2365 
2366   if (enc->priv->ctx.headers) {
2367     g_list_foreach (enc->priv->ctx.headers, (GFunc) gst_buffer_unref, NULL);
2368     g_list_free (enc->priv->ctx.headers);
2369   }
2370   enc->priv->ctx.headers = headers;
2371   enc->priv->ctx.new_headers = TRUE;
2372 }
2373 
2374 /**
2375  * gst_audio_encoder_set_allocation_caps:
2376  * @enc: a #GstAudioEncoder
2377  * @allocation_caps: (allow-none): a #GstCaps or %NULL
2378  *
2379  * Sets a caps in allocation query which are different from the set
2380  * pad's caps. Use this function before calling
2381  * gst_audio_encoder_negotiate(). Setting to %NULL the allocation
2382  * query will use the caps from the pad.
2383  *
2384  * Since: 1.10
2385  */
2386 void
gst_audio_encoder_set_allocation_caps(GstAudioEncoder * enc,GstCaps * allocation_caps)2387 gst_audio_encoder_set_allocation_caps (GstAudioEncoder * enc,
2388     GstCaps * allocation_caps)
2389 {
2390   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2391 
2392   gst_caps_replace (&enc->priv->ctx.allocation_caps, allocation_caps);
2393 }
2394 
2395 /**
2396  * gst_audio_encoder_set_mark_granule:
2397  * @enc: a #GstAudioEncoder
2398  * @enabled: new state
2399  *
2400  * Enable or disable encoder granule handling.
2401  *
2402  * MT safe.
2403  */
2404 void
gst_audio_encoder_set_mark_granule(GstAudioEncoder * enc,gboolean enabled)2405 gst_audio_encoder_set_mark_granule (GstAudioEncoder * enc, gboolean enabled)
2406 {
2407   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2408 
2409   GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2410 
2411   GST_OBJECT_LOCK (enc);
2412   enc->priv->granule = enabled;
2413   GST_OBJECT_UNLOCK (enc);
2414 }
2415 
2416 /**
2417  * gst_audio_encoder_get_mark_granule:
2418  * @enc: a #GstAudioEncoder
2419  *
2420  * Queries if the encoder will handle granule marking.
2421  *
2422  * Returns: TRUE if granule marking is enabled.
2423  *
2424  * MT safe.
2425  */
2426 gboolean
gst_audio_encoder_get_mark_granule(GstAudioEncoder * enc)2427 gst_audio_encoder_get_mark_granule (GstAudioEncoder * enc)
2428 {
2429   gboolean result;
2430 
2431   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2432 
2433   GST_OBJECT_LOCK (enc);
2434   result = enc->priv->granule;
2435   GST_OBJECT_UNLOCK (enc);
2436 
2437   return result;
2438 }
2439 
2440 /**
2441  * gst_audio_encoder_set_perfect_timestamp:
2442  * @enc: a #GstAudioEncoder
2443  * @enabled: new state
2444  *
2445  * Enable or disable encoder perfect output timestamp preference.
2446  *
2447  * MT safe.
2448  */
2449 void
gst_audio_encoder_set_perfect_timestamp(GstAudioEncoder * enc,gboolean enabled)2450 gst_audio_encoder_set_perfect_timestamp (GstAudioEncoder * enc,
2451     gboolean enabled)
2452 {
2453   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2454 
2455   GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2456 
2457   GST_OBJECT_LOCK (enc);
2458   enc->priv->perfect_ts = enabled;
2459   GST_OBJECT_UNLOCK (enc);
2460 }
2461 
2462 /**
2463  * gst_audio_encoder_get_perfect_timestamp:
2464  * @enc: a #GstAudioEncoder
2465  *
2466  * Queries encoder perfect timestamp behaviour.
2467  *
2468  * Returns: TRUE if perfect timestamp setting enabled.
2469  *
2470  * MT safe.
2471  */
2472 gboolean
gst_audio_encoder_get_perfect_timestamp(GstAudioEncoder * enc)2473 gst_audio_encoder_get_perfect_timestamp (GstAudioEncoder * enc)
2474 {
2475   gboolean result;
2476 
2477   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2478 
2479   GST_OBJECT_LOCK (enc);
2480   result = enc->priv->perfect_ts;
2481   GST_OBJECT_UNLOCK (enc);
2482 
2483   return result;
2484 }
2485 
2486 /**
2487  * gst_audio_encoder_set_hard_sync:
2488  * @enc: a #GstAudioEncoder
2489  * @enabled: new state
2490  *
2491  * Sets encoder hard resync handling.
2492  *
2493  * MT safe.
2494  */
2495 void
gst_audio_encoder_set_hard_resync(GstAudioEncoder * enc,gboolean enabled)2496 gst_audio_encoder_set_hard_resync (GstAudioEncoder * enc, gboolean enabled)
2497 {
2498   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2499 
2500   GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2501 
2502   GST_OBJECT_LOCK (enc);
2503   enc->priv->hard_resync = enabled;
2504   GST_OBJECT_UNLOCK (enc);
2505 }
2506 
2507 /**
2508  * gst_audio_encoder_get_hard_sync:
2509  * @enc: a #GstAudioEncoder
2510  *
2511  * Queries encoder's hard resync setting.
2512  *
2513  * Returns: TRUE if hard resync is enabled.
2514  *
2515  * MT safe.
2516  */
2517 gboolean
gst_audio_encoder_get_hard_resync(GstAudioEncoder * enc)2518 gst_audio_encoder_get_hard_resync (GstAudioEncoder * enc)
2519 {
2520   gboolean result;
2521 
2522   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2523 
2524   GST_OBJECT_LOCK (enc);
2525   result = enc->priv->hard_resync;
2526   GST_OBJECT_UNLOCK (enc);
2527 
2528   return result;
2529 }
2530 
2531 /**
2532  * gst_audio_encoder_set_tolerance:
2533  * @enc: a #GstAudioEncoder
2534  * @tolerance: new tolerance
2535  *
2536  * Configures encoder audio jitter tolerance threshold.
2537  *
2538  * MT safe.
2539  */
2540 void
gst_audio_encoder_set_tolerance(GstAudioEncoder * enc,GstClockTime tolerance)2541 gst_audio_encoder_set_tolerance (GstAudioEncoder * enc, GstClockTime tolerance)
2542 {
2543   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2544 
2545   GST_OBJECT_LOCK (enc);
2546   enc->priv->tolerance = tolerance;
2547   GST_OBJECT_UNLOCK (enc);
2548 
2549   GST_LOG_OBJECT (enc, "set to %" GST_TIME_FORMAT, GST_TIME_ARGS (tolerance));
2550 }
2551 
2552 /**
2553  * gst_audio_encoder_get_tolerance:
2554  * @enc: a #GstAudioEncoder
2555  *
2556  * Queries current audio jitter tolerance threshold.
2557  *
2558  * Returns: encoder audio jitter tolerance threshold.
2559  *
2560  * MT safe.
2561  */
2562 GstClockTime
gst_audio_encoder_get_tolerance(GstAudioEncoder * enc)2563 gst_audio_encoder_get_tolerance (GstAudioEncoder * enc)
2564 {
2565   GstClockTime result;
2566 
2567   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2568 
2569   GST_OBJECT_LOCK (enc);
2570   result = enc->priv->tolerance;
2571   GST_OBJECT_UNLOCK (enc);
2572 
2573   return result;
2574 }
2575 
2576 /**
2577  * gst_audio_encoder_set_hard_min:
2578  * @enc: a #GstAudioEncoder
2579  * @enabled: new state
2580  *
2581  * Configures encoder hard minimum handling.  If enabled, subclass
2582  * will never be handed less samples than it configured, which otherwise
2583  * might occur near end-of-data handling.  Instead, the leftover samples
2584  * will simply be discarded.
2585  *
2586  * MT safe.
2587  */
2588 void
gst_audio_encoder_set_hard_min(GstAudioEncoder * enc,gboolean enabled)2589 gst_audio_encoder_set_hard_min (GstAudioEncoder * enc, gboolean enabled)
2590 {
2591   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2592 
2593   GST_OBJECT_LOCK (enc);
2594   enc->priv->hard_min = enabled;
2595   GST_OBJECT_UNLOCK (enc);
2596 }
2597 
2598 /**
2599  * gst_audio_encoder_get_hard_min:
2600  * @enc: a #GstAudioEncoder
2601  *
2602  * Queries encoder hard minimum handling.
2603  *
2604  * Returns: TRUE if hard minimum handling is enabled.
2605  *
2606  * MT safe.
2607  */
2608 gboolean
gst_audio_encoder_get_hard_min(GstAudioEncoder * enc)2609 gst_audio_encoder_get_hard_min (GstAudioEncoder * enc)
2610 {
2611   gboolean result;
2612 
2613   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2614 
2615   GST_OBJECT_LOCK (enc);
2616   result = enc->priv->hard_min;
2617   GST_OBJECT_UNLOCK (enc);
2618 
2619   return result;
2620 }
2621 
2622 /**
2623  * gst_audio_encoder_set_drainable:
2624  * @enc: a #GstAudioEncoder
2625  * @enabled: new state
2626  *
2627  * Configures encoder drain handling.  If drainable, subclass might
2628  * be handed a NULL buffer to have it return any leftover encoded data.
2629  * Otherwise, it is not considered so capable and will only ever be passed
2630  * real data.
2631  *
2632  * MT safe.
2633  */
2634 void
gst_audio_encoder_set_drainable(GstAudioEncoder * enc,gboolean enabled)2635 gst_audio_encoder_set_drainable (GstAudioEncoder * enc, gboolean enabled)
2636 {
2637   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2638 
2639   GST_OBJECT_LOCK (enc);
2640   enc->priv->drainable = enabled;
2641   GST_OBJECT_UNLOCK (enc);
2642 }
2643 
2644 /**
2645  * gst_audio_encoder_get_drainable:
2646  * @enc: a #GstAudioEncoder
2647  *
2648  * Queries encoder drain handling.
2649  *
2650  * Returns: TRUE if drainable handling is enabled.
2651  *
2652  * MT safe.
2653  */
2654 gboolean
gst_audio_encoder_get_drainable(GstAudioEncoder * enc)2655 gst_audio_encoder_get_drainable (GstAudioEncoder * enc)
2656 {
2657   gboolean result;
2658 
2659   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2660 
2661   GST_OBJECT_LOCK (enc);
2662   result = enc->priv->drainable;
2663   GST_OBJECT_UNLOCK (enc);
2664 
2665   return result;
2666 }
2667 
2668 /**
2669  * gst_audio_encoder_merge_tags:
2670  * @enc: a #GstAudioEncoder
2671  * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
2672  *     previously-set tags
2673  * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
2674  *
2675  * Sets the audio encoder tags and how they should be merged with any
2676  * upstream stream tags. This will override any tags previously-set
2677  * with gst_audio_encoder_merge_tags().
2678  *
2679  * Note that this is provided for convenience, and the subclass is
2680  * not required to use this and can still do tag handling on its own.
2681  *
2682  * MT safe.
2683  */
2684 void
gst_audio_encoder_merge_tags(GstAudioEncoder * enc,const GstTagList * tags,GstTagMergeMode mode)2685 gst_audio_encoder_merge_tags (GstAudioEncoder * enc,
2686     const GstTagList * tags, GstTagMergeMode mode)
2687 {
2688   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2689   g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
2690   g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
2691 
2692   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2693   if (enc->priv->tags != tags) {
2694     if (enc->priv->tags) {
2695       gst_tag_list_unref (enc->priv->tags);
2696       enc->priv->tags = NULL;
2697       enc->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2698     }
2699     if (tags) {
2700       enc->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
2701       enc->priv->tags_merge_mode = mode;
2702     }
2703 
2704     GST_DEBUG_OBJECT (enc, "setting encoder tags to %" GST_PTR_FORMAT, tags);
2705     enc->priv->tags_changed = TRUE;
2706   }
2707   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2708 }
2709 
2710 static gboolean
gst_audio_encoder_negotiate_default(GstAudioEncoder * enc)2711 gst_audio_encoder_negotiate_default (GstAudioEncoder * enc)
2712 {
2713   GstAudioEncoderClass *klass;
2714   gboolean res = TRUE;
2715   GstQuery *query = NULL;
2716   GstAllocator *allocator;
2717   GstAllocationParams params;
2718   GstCaps *caps, *prevcaps;
2719 
2720   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2721   g_return_val_if_fail (GST_IS_CAPS (enc->priv->ctx.caps), FALSE);
2722 
2723   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2724 
2725   caps = enc->priv->ctx.caps;
2726   if (enc->priv->ctx.allocation_caps == NULL)
2727     enc->priv->ctx.allocation_caps = gst_caps_ref (caps);
2728 
2729   GST_DEBUG_OBJECT (enc, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
2730 
2731   if (enc->priv->pending_events) {
2732     GList **pending_events, *l;
2733 
2734     pending_events = &enc->priv->pending_events;
2735 
2736     GST_DEBUG_OBJECT (enc, "Pushing pending events");
2737     for (l = *pending_events; l;) {
2738       GstEvent *event = GST_EVENT (l->data);
2739       GList *tmp;
2740 
2741       if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
2742         gst_audio_encoder_push_event (enc, l->data);
2743         tmp = l;
2744         l = l->next;
2745         *pending_events = g_list_delete_link (*pending_events, tmp);
2746       } else {
2747         l = l->next;
2748       }
2749     }
2750   }
2751 
2752   prevcaps = gst_pad_get_current_caps (enc->srcpad);
2753   if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
2754     res = gst_pad_set_caps (enc->srcpad, caps);
2755   if (prevcaps)
2756     gst_caps_unref (prevcaps);
2757 
2758   if (!res)
2759     goto done;
2760   enc->priv->ctx.output_caps_changed = FALSE;
2761 
2762   query = gst_query_new_allocation (enc->priv->ctx.allocation_caps, TRUE);
2763   if (!gst_pad_peer_query (enc->srcpad, query)) {
2764     GST_DEBUG_OBJECT (enc, "didn't get downstream ALLOCATION hints");
2765   }
2766 
2767   g_assert (klass->decide_allocation != NULL);
2768   res = klass->decide_allocation (enc, query);
2769 
2770   GST_DEBUG_OBJECT (enc, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
2771       query);
2772 
2773   if (!res)
2774     goto no_decide_allocation;
2775 
2776   /* we got configuration from our peer or the decide_allocation method,
2777    * parse them */
2778   if (gst_query_get_n_allocation_params (query) > 0) {
2779     gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
2780   } else {
2781     allocator = NULL;
2782     gst_allocation_params_init (&params);
2783   }
2784 
2785   if (enc->priv->ctx.allocator)
2786     gst_object_unref (enc->priv->ctx.allocator);
2787   enc->priv->ctx.allocator = allocator;
2788   enc->priv->ctx.params = params;
2789 
2790 done:
2791   if (query)
2792     gst_query_unref (query);
2793 
2794   return res;
2795 
2796   /* ERRORS */
2797 no_decide_allocation:
2798   {
2799     GST_WARNING_OBJECT (enc, "Subclass failed to decide allocation");
2800     goto done;
2801   }
2802 }
2803 
2804 static gboolean
gst_audio_encoder_negotiate_unlocked(GstAudioEncoder * enc)2805 gst_audio_encoder_negotiate_unlocked (GstAudioEncoder * enc)
2806 {
2807   GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2808   gboolean ret = TRUE;
2809 
2810   if (G_LIKELY (klass->negotiate))
2811     ret = klass->negotiate (enc);
2812 
2813   return ret;
2814 }
2815 
2816 /**
2817  * gst_audio_encoder_negotiate:
2818  * @enc: a #GstAudioEncoder
2819  *
2820  * Negotiate with downstream elements to currently configured #GstCaps.
2821  * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
2822  * negotiate fails.
2823  *
2824  * Returns: %TRUE if the negotiation succeeded, else %FALSE.
2825  */
2826 gboolean
gst_audio_encoder_negotiate(GstAudioEncoder * enc)2827 gst_audio_encoder_negotiate (GstAudioEncoder * enc)
2828 {
2829   GstAudioEncoderClass *klass;
2830   gboolean ret = TRUE;
2831 
2832   g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2833 
2834   klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2835 
2836   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2837   gst_pad_check_reconfigure (enc->srcpad);
2838   if (klass->negotiate) {
2839     ret = klass->negotiate (enc);
2840     if (!ret)
2841       gst_pad_mark_reconfigure (enc->srcpad);
2842   }
2843   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2844 
2845   return ret;
2846 }
2847 
2848 /**
2849  * gst_audio_encoder_set_output_format:
2850  * @enc: a #GstAudioEncoder
2851  * @caps: (transfer none): #GstCaps
2852  *
2853  * Configure output caps on the srcpad of @enc.
2854  *
2855  * Returns: %TRUE on success.
2856  */
2857 gboolean
gst_audio_encoder_set_output_format(GstAudioEncoder * enc,GstCaps * caps)2858 gst_audio_encoder_set_output_format (GstAudioEncoder * enc, GstCaps * caps)
2859 {
2860   gboolean res = TRUE;
2861   GstCaps *templ_caps;
2862 
2863   GST_DEBUG_OBJECT (enc, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
2864 
2865   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2866   if (!gst_caps_is_fixed (caps))
2867     goto refuse_caps;
2868 
2869   /* Only allow caps that are a subset of the template caps */
2870   templ_caps = gst_pad_get_pad_template_caps (enc->srcpad);
2871   if (!gst_caps_is_subset (caps, templ_caps)) {
2872     gst_caps_unref (templ_caps);
2873     goto refuse_caps;
2874   }
2875   gst_caps_unref (templ_caps);
2876 
2877   gst_caps_replace (&enc->priv->ctx.caps, caps);
2878   enc->priv->ctx.output_caps_changed = TRUE;
2879 
2880 done:
2881   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2882 
2883   return res;
2884 
2885   /* ERRORS */
2886 refuse_caps:
2887   {
2888     GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
2889     res = FALSE;
2890     goto done;
2891   }
2892 }
2893 
2894 /**
2895  * gst_audio_encoder_allocate_output_buffer:
2896  * @enc: a #GstAudioEncoder
2897  * @size: size of the buffer
2898  *
2899  * Helper function that allocates a buffer to hold an encoded audio frame
2900  * for @enc's current output format.
2901  *
2902  * Returns: (transfer full): allocated buffer
2903  */
2904 GstBuffer *
gst_audio_encoder_allocate_output_buffer(GstAudioEncoder * enc,gsize size)2905 gst_audio_encoder_allocate_output_buffer (GstAudioEncoder * enc, gsize size)
2906 {
2907   GstBuffer *buffer = NULL;
2908   gboolean needs_reconfigure = FALSE;
2909 
2910   g_return_val_if_fail (size > 0, NULL);
2911 
2912   GST_DEBUG ("alloc src buffer");
2913 
2914   GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2915 
2916   needs_reconfigure = gst_pad_check_reconfigure (enc->srcpad);
2917   if (G_UNLIKELY (enc->priv->ctx.output_caps_changed || (enc->priv->ctx.caps
2918               && needs_reconfigure))) {
2919     if (!gst_audio_encoder_negotiate_unlocked (enc)) {
2920       GST_INFO_OBJECT (enc, "Failed to negotiate, fallback allocation");
2921       gst_pad_mark_reconfigure (enc->srcpad);
2922       goto fallback;
2923     }
2924   }
2925 
2926   buffer =
2927       gst_buffer_new_allocate (enc->priv->ctx.allocator, size,
2928       &enc->priv->ctx.params);
2929   if (!buffer) {
2930     GST_INFO_OBJECT (enc, "couldn't allocate output buffer");
2931     goto fallback;
2932   }
2933 
2934   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2935 
2936   return buffer;
2937 
2938 fallback:
2939   buffer = gst_buffer_new_allocate (NULL, size, NULL);
2940   GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2941 
2942   return buffer;
2943 }
2944 
2945 /**
2946  * gst_audio_encoder_get_allocator:
2947  * @enc: a #GstAudioEncoder
2948  * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
2949  * used
2950  * @params: (out) (allow-none) (transfer full): the
2951  * #GstAllocationParams of @allocator
2952  *
2953  * Lets #GstAudioEncoder sub-classes to know the memory @allocator
2954  * used by the base class and its @params.
2955  *
2956  * Unref the @allocator after use it.
2957  */
2958 void
gst_audio_encoder_get_allocator(GstAudioEncoder * enc,GstAllocator ** allocator,GstAllocationParams * params)2959 gst_audio_encoder_get_allocator (GstAudioEncoder * enc,
2960     GstAllocator ** allocator, GstAllocationParams * params)
2961 {
2962   g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2963 
2964   if (allocator)
2965     *allocator = enc->priv->ctx.allocator ?
2966         gst_object_ref (enc->priv->ctx.allocator) : NULL;
2967 
2968   if (params)
2969     *params = enc->priv->ctx.params;
2970 }
2971