1 /* GStreamer
2 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
3 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
4 * Contact: Stefan Kost <stefan.kost@nokia.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 */
21
22 /**
23 * SECTION:gstaudioencoder
24 * @title: GstAudioEncoder
25 * @short_description: Base class for audio encoders
26 * @see_also: #GstBaseTransform
27 *
28 * This base class is for audio encoders turning raw audio samples into
29 * encoded audio data.
30 *
31 * GstAudioEncoder and subclass should cooperate as follows.
32 *
33 * ## Configuration
34 *
35 * * Initially, GstAudioEncoder calls @start when the encoder element
36 * is activated, which allows subclass to perform any global setup.
37 *
38 * * GstAudioEncoder calls @set_format to inform subclass of the format
39 * of input audio data that it is about to receive. Subclass should
40 * setup for encoding and configure various base class parameters
41 * appropriately, notably those directing desired input data handling.
42 * While unlikely, it might be called more than once, if changing input
43 * parameters require reconfiguration.
44 *
45 * * GstAudioEncoder calls @stop at end of all processing.
46 *
47 * As of configuration stage, and throughout processing, GstAudioEncoder
48 * maintains various parameters that provide required context,
49 * e.g. describing the format of input audio data.
50 * Conversely, subclass can and should configure these context parameters
51 * to inform base class of its expectation w.r.t. buffer handling.
52 *
53 * ## Data processing
54 *
55 * * Base class gathers input sample data (as directed by the context's
56 * frame_samples and frame_max) and provides this to subclass' @handle_frame.
57 * * If codec processing results in encoded data, subclass should call
58 * gst_audio_encoder_finish_frame() to have encoded data pushed
59 * downstream. Alternatively, it might also call
60 * gst_audio_encoder_finish_frame() (with a NULL buffer and some number of
61 * dropped samples) to indicate dropped (non-encoded) samples.
62 * * Just prior to actually pushing a buffer downstream,
63 * it is passed to @pre_push.
64 * * During the parsing process GstAudioEncoderClass will handle both
65 * srcpad and sinkpad events. Sink events will be passed to subclass
66 * if @event callback has been provided.
67 *
68 * ## Shutdown phase
69 *
70 * * GstAudioEncoder class calls @stop to inform the subclass that data
71 * parsing will be stopped.
72 *
73 * Subclass is responsible for providing pad template caps for
74 * source and sink pads. The pads need to be named "sink" and "src". It also
75 * needs to set the fixed caps on srcpad, when the format is ensured. This
76 * is typically when base class calls subclass' @set_format function, though
77 * it might be delayed until calling @gst_audio_encoder_finish_frame.
78 *
79 * In summary, above process should have subclass concentrating on
80 * codec data processing while leaving other matters to base class,
81 * such as most notably timestamp handling. While it may exert more control
82 * in this area (see e.g. @pre_push), it is very much not recommended.
83 *
84 * In particular, base class will either favor tracking upstream timestamps
85 * (at the possible expense of jitter) or aim to arrange for a perfect stream of
86 * output timestamps, depending on #GstAudioEncoder:perfect-timestamp.
87 * However, in the latter case, the input may not be so perfect or ideal, which
88 * is handled as follows. An input timestamp is compared with the expected
89 * timestamp as dictated by input sample stream and if the deviation is less
90 * than #GstAudioEncoder:tolerance, the deviation is discarded.
91 * Otherwise, it is considered a discontuinity and subsequent output timestamp
92 * is resynced to the new position after performing configured discontinuity
93 * processing. In the non-perfect-timestamp case, an upstream variation
94 * exceeding tolerance only leads to marking DISCONT on subsequent outgoing
95 * (while timestamps are adjusted to upstream regardless of variation).
96 * While DISCONT is also marked in the perfect-timestamp case, this one
97 * optionally (see #GstAudioEncoder:hard-resync)
98 * performs some additional steps, such as clipping of (early) input samples
99 * or draining all currently remaining input data, depending on the direction
100 * of the discontuinity.
101 *
102 * If perfect timestamps are arranged, it is also possible to request baseclass
103 * (usually set by subclass) to provide additional buffer metadata (in OFFSET
104 * and OFFSET_END) fields according to granule defined semantics currently
105 * needed by oggmux. Specifically, OFFSET is set to granulepos (= sample count
106 * including buffer) and OFFSET_END to corresponding timestamp (as determined
107 * by same sample count and sample rate).
108 *
109 * Things that subclass need to take care of:
110 *
111 * * Provide pad templates
112 * * Set source pad caps when appropriate
113 * * Inform base class of buffer processing needs using context's
114 * frame_samples and frame_bytes.
115 * * Set user-configurable properties to sane defaults for format and
116 * implementing codec at hand, e.g. those controlling timestamp behaviour
117 * and discontinuity processing.
118 * * Accept data in @handle_frame and provide encoded results to
119 * gst_audio_encoder_finish_frame().
120 *
121 */
122
123 #ifdef HAVE_CONFIG_H
124 # include "config.h"
125 #endif
126
127 #include "gstaudioencoder.h"
128 #include "gstaudioutilsprivate.h"
129 #include <gst/base/gstadapter.h>
130 #include <gst/audio/audio.h>
131 #include <gst/pbutils/descriptions.h>
132
133 #include <stdlib.h>
134 #include <string.h>
135
136
137 GST_DEBUG_CATEGORY_STATIC (gst_audio_encoder_debug);
138 #define GST_CAT_DEFAULT gst_audio_encoder_debug
139
140 enum
141 {
142 PROP_0,
143 PROP_PERFECT_TS,
144 PROP_GRANULE,
145 PROP_HARD_RESYNC,
146 PROP_TOLERANCE
147 };
148
149 #define DEFAULT_PERFECT_TS FALSE
150 #define DEFAULT_GRANULE FALSE
151 #define DEFAULT_HARD_RESYNC FALSE
152 #define DEFAULT_TOLERANCE 40000000
153 #define DEFAULT_HARD_MIN FALSE
154 #define DEFAULT_DRAINABLE TRUE
155
156 typedef struct _GstAudioEncoderContext
157 {
158 /* input */
159 /* last negotiated input caps */
160 GstCaps *input_caps;
161 /* last negotiated input info */
162 GstAudioInfo info;
163
164 /* output */
165 GstCaps *caps;
166 GstCaps *allocation_caps;
167 gboolean output_caps_changed;
168 gint frame_samples_min, frame_samples_max;
169 gint frame_max;
170 gint lookahead;
171 /* MT-protected (with LOCK) */
172 GstClockTime min_latency;
173 GstClockTime max_latency;
174
175 GList *headers;
176 gboolean new_headers;
177
178 GstAllocator *allocator;
179 GstAllocationParams params;
180 } GstAudioEncoderContext;
181
182 struct _GstAudioEncoderPrivate
183 {
184 /* activation status */
185 gboolean active;
186
187 /* input base/first ts as basis for output ts;
188 * kept nearly constant for perfect_ts,
189 * otherwise resyncs to upstream ts */
190 GstClockTime base_ts;
191 /* corresponding base granulepos */
192 gint64 base_gp;
193 /* input samples processed and sent downstream so far (w.r.t. base_ts) */
194 guint64 samples;
195
196 /* currently collected sample data */
197 GstAdapter *adapter;
198 /* offset in adapter up to which already supplied to encoder */
199 gint offset;
200 /* mark outgoing discont */
201 gboolean discont;
202 /* to guess duration of drained data */
203 GstClockTime last_duration;
204
205 /* subclass provided data in processing round */
206 gboolean got_data;
207 /* subclass gave all it could already */
208 gboolean drained;
209 /* subclass currently being forcibly drained */
210 gboolean force;
211 /* need to handle changed input caps */
212 gboolean do_caps;
213
214 /* output bps estimatation */
215 /* global in samples seen */
216 guint64 samples_in;
217 /* global bytes sent out */
218 guint64 bytes_out;
219
220 /* context storage */
221 GstAudioEncoderContext ctx;
222
223 /* properties */
224 gint64 tolerance;
225 gboolean perfect_ts;
226 gboolean hard_resync;
227 gboolean granule;
228 gboolean hard_min;
229 gboolean drainable;
230
231 /* upstream stream tags (global tags are passed through as-is) */
232 GstTagList *upstream_tags;
233
234 /* subclass tags */
235 GstTagList *tags;
236 GstTagMergeMode tags_merge_mode;
237
238 gboolean tags_changed;
239
240 /* pending serialized sink events, will be sent from finish_frame() */
241 GList *pending_events;
242 };
243
244
245 static GstElementClass *parent_class = NULL;
246 static gint private_offset = 0;
247
248 /* cached quark to avoid contention on the global quark table lock */
249 #define META_TAG_AUDIO meta_tag_audio_quark
250 static GQuark meta_tag_audio_quark;
251
252 static void gst_audio_encoder_class_init (GstAudioEncoderClass * klass);
253 static void gst_audio_encoder_init (GstAudioEncoder * parse,
254 GstAudioEncoderClass * klass);
255
256 GType
gst_audio_encoder_get_type(void)257 gst_audio_encoder_get_type (void)
258 {
259 static GType audio_encoder_type = 0;
260
261 if (!audio_encoder_type) {
262 static const GTypeInfo audio_encoder_info = {
263 sizeof (GstAudioEncoderClass),
264 (GBaseInitFunc) NULL,
265 (GBaseFinalizeFunc) NULL,
266 (GClassInitFunc) gst_audio_encoder_class_init,
267 NULL,
268 NULL,
269 sizeof (GstAudioEncoder),
270 0,
271 (GInstanceInitFunc) gst_audio_encoder_init,
272 };
273 const GInterfaceInfo preset_interface_info = {
274 NULL, /* interface_init */
275 NULL, /* interface_finalize */
276 NULL /* interface_data */
277 };
278
279 audio_encoder_type = g_type_register_static (GST_TYPE_ELEMENT,
280 "GstAudioEncoder", &audio_encoder_info, G_TYPE_FLAG_ABSTRACT);
281
282 private_offset =
283 g_type_add_instance_private (audio_encoder_type,
284 sizeof (GstAudioEncoderPrivate));
285
286 g_type_add_interface_static (audio_encoder_type, GST_TYPE_PRESET,
287 &preset_interface_info);
288 }
289 return audio_encoder_type;
290 }
291
292 static inline GstAudioEncoderPrivate *
gst_audio_encoder_get_instance_private(GstAudioEncoder * self)293 gst_audio_encoder_get_instance_private (GstAudioEncoder * self)
294 {
295 return (G_STRUCT_MEMBER_P (self, private_offset));
296 }
297
298 static void gst_audio_encoder_finalize (GObject * object);
299 static void gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full);
300
301 static void gst_audio_encoder_set_property (GObject * object,
302 guint prop_id, const GValue * value, GParamSpec * pspec);
303 static void gst_audio_encoder_get_property (GObject * object,
304 guint prop_id, GValue * value, GParamSpec * pspec);
305
306 static gboolean gst_audio_encoder_sink_activate_mode (GstPad * pad,
307 GstObject * parent, GstPadMode mode, gboolean active);
308
309 static GstCaps *gst_audio_encoder_getcaps_default (GstAudioEncoder * enc,
310 GstCaps * filter);
311
312 static gboolean gst_audio_encoder_sink_event_default (GstAudioEncoder * enc,
313 GstEvent * event);
314 static gboolean gst_audio_encoder_src_event_default (GstAudioEncoder * enc,
315 GstEvent * event);
316 static gboolean gst_audio_encoder_sink_event (GstPad * pad, GstObject * parent,
317 GstEvent * event);
318 static gboolean gst_audio_encoder_src_event (GstPad * pad, GstObject * parent,
319 GstEvent * event);
320 static gboolean gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc,
321 GstCaps * caps);
322 static GstFlowReturn gst_audio_encoder_chain (GstPad * pad, GstObject * parent,
323 GstBuffer * buffer);
324 static gboolean gst_audio_encoder_src_query (GstPad * pad, GstObject * parent,
325 GstQuery * query);
326 static gboolean gst_audio_encoder_sink_query (GstPad * pad, GstObject * parent,
327 GstQuery * query);
328 static GstStateChangeReturn gst_audio_encoder_change_state (GstElement *
329 element, GstStateChange transition);
330
331 static gboolean gst_audio_encoder_decide_allocation_default (GstAudioEncoder *
332 enc, GstQuery * query);
333 static gboolean gst_audio_encoder_propose_allocation_default (GstAudioEncoder *
334 enc, GstQuery * query);
335 static gboolean gst_audio_encoder_negotiate_default (GstAudioEncoder * enc);
336 static gboolean gst_audio_encoder_negotiate_unlocked (GstAudioEncoder * enc);
337
338 static gboolean gst_audio_encoder_transform_meta_default (GstAudioEncoder *
339 encoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);
340
341 static gboolean gst_audio_encoder_sink_query_default (GstAudioEncoder * encoder,
342 GstQuery * query);
343 static gboolean gst_audio_encoder_src_query_default (GstAudioEncoder * encoder,
344 GstQuery * query);
345
346 static void
gst_audio_encoder_class_init(GstAudioEncoderClass * klass)347 gst_audio_encoder_class_init (GstAudioEncoderClass * klass)
348 {
349 GObjectClass *gobject_class;
350 GstElementClass *gstelement_class;
351
352 gobject_class = G_OBJECT_CLASS (klass);
353 gstelement_class = GST_ELEMENT_CLASS (klass);
354 parent_class = g_type_class_peek_parent (klass);
355
356 GST_DEBUG_CATEGORY_INIT (gst_audio_encoder_debug, "audioencoder", 0,
357 "audio encoder base class");
358
359 if (private_offset != 0)
360 g_type_class_adjust_private_offset (klass, &private_offset);
361
362 gobject_class->set_property = gst_audio_encoder_set_property;
363 gobject_class->get_property = gst_audio_encoder_get_property;
364
365 gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_audio_encoder_finalize);
366
367 /* properties */
368 g_object_class_install_property (gobject_class, PROP_PERFECT_TS,
369 g_param_spec_boolean ("perfect-timestamp", "Perfect Timestamps",
370 "Favour perfect timestamps over tracking upstream timestamps",
371 DEFAULT_PERFECT_TS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
372 g_object_class_install_property (gobject_class, PROP_GRANULE,
373 g_param_spec_boolean ("mark-granule", "Granule Marking",
374 "Apply granule semantics to buffer metadata (implies perfect-timestamp)",
375 DEFAULT_GRANULE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
376 g_object_class_install_property (gobject_class, PROP_HARD_RESYNC,
377 g_param_spec_boolean ("hard-resync", "Hard Resync",
378 "Perform clipping and sample flushing upon discontinuity",
379 DEFAULT_HARD_RESYNC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
380 g_object_class_install_property (gobject_class, PROP_TOLERANCE,
381 g_param_spec_int64 ("tolerance", "Tolerance",
382 "Consider discontinuity if timestamp jitter/imperfection exceeds tolerance (ns)",
383 0, G_MAXINT64, DEFAULT_TOLERANCE,
384 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
385
386 gstelement_class->change_state =
387 GST_DEBUG_FUNCPTR (gst_audio_encoder_change_state);
388
389 klass->getcaps = gst_audio_encoder_getcaps_default;
390 klass->sink_event = gst_audio_encoder_sink_event_default;
391 klass->src_event = gst_audio_encoder_src_event_default;
392 klass->sink_query = gst_audio_encoder_sink_query_default;
393 klass->src_query = gst_audio_encoder_src_query_default;
394 klass->propose_allocation = gst_audio_encoder_propose_allocation_default;
395 klass->decide_allocation = gst_audio_encoder_decide_allocation_default;
396 klass->negotiate = gst_audio_encoder_negotiate_default;
397 klass->transform_meta = gst_audio_encoder_transform_meta_default;
398
399 meta_tag_audio_quark = g_quark_from_static_string (GST_META_TAG_AUDIO_STR);
400 }
401
402 static void
gst_audio_encoder_init(GstAudioEncoder * enc,GstAudioEncoderClass * bclass)403 gst_audio_encoder_init (GstAudioEncoder * enc, GstAudioEncoderClass * bclass)
404 {
405 GstPadTemplate *pad_template;
406
407 GST_DEBUG_OBJECT (enc, "gst_audio_encoder_init");
408
409 enc->priv = gst_audio_encoder_get_instance_private (enc);
410
411 /* only push mode supported */
412 pad_template =
413 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "sink");
414 g_return_if_fail (pad_template != NULL);
415 enc->sinkpad = gst_pad_new_from_template (pad_template, "sink");
416 gst_pad_set_event_function (enc->sinkpad,
417 GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_event));
418 gst_pad_set_query_function (enc->sinkpad,
419 GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_query));
420 gst_pad_set_chain_function (enc->sinkpad,
421 GST_DEBUG_FUNCPTR (gst_audio_encoder_chain));
422 gst_pad_set_activatemode_function (enc->sinkpad,
423 GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_activate_mode));
424 gst_element_add_pad (GST_ELEMENT (enc), enc->sinkpad);
425
426 GST_DEBUG_OBJECT (enc, "sinkpad created");
427
428 /* and we don't mind upstream traveling stuff that much ... */
429 pad_template =
430 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "src");
431 g_return_if_fail (pad_template != NULL);
432 enc->srcpad = gst_pad_new_from_template (pad_template, "src");
433 gst_pad_set_event_function (enc->srcpad,
434 GST_DEBUG_FUNCPTR (gst_audio_encoder_src_event));
435 gst_pad_set_query_function (enc->srcpad,
436 GST_DEBUG_FUNCPTR (gst_audio_encoder_src_query));
437 gst_pad_use_fixed_caps (enc->srcpad);
438 gst_element_add_pad (GST_ELEMENT (enc), enc->srcpad);
439 GST_DEBUG_OBJECT (enc, "src created");
440
441 enc->priv->adapter = gst_adapter_new ();
442
443 g_rec_mutex_init (&enc->stream_lock);
444
445 /* property default */
446 enc->priv->granule = DEFAULT_GRANULE;
447 enc->priv->perfect_ts = DEFAULT_PERFECT_TS;
448 enc->priv->hard_resync = DEFAULT_HARD_RESYNC;
449 enc->priv->tolerance = DEFAULT_TOLERANCE;
450 enc->priv->hard_min = DEFAULT_HARD_MIN;
451 enc->priv->drainable = DEFAULT_DRAINABLE;
452
453 /* init state */
454 enc->priv->ctx.min_latency = 0;
455 enc->priv->ctx.max_latency = 0;
456 gst_audio_encoder_reset (enc, TRUE);
457 GST_DEBUG_OBJECT (enc, "init ok");
458 }
459
460 static void
gst_audio_encoder_reset(GstAudioEncoder * enc,gboolean full)461 gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
462 {
463 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
464
465 GST_LOG_OBJECT (enc, "reset full %d", full);
466
467 if (full) {
468 enc->priv->active = FALSE;
469 GST_OBJECT_LOCK (enc);
470 enc->priv->samples_in = 0;
471 enc->priv->bytes_out = 0;
472 GST_OBJECT_UNLOCK (enc);
473
474 g_list_foreach (enc->priv->ctx.headers, (GFunc) gst_buffer_unref, NULL);
475 g_list_free (enc->priv->ctx.headers);
476 enc->priv->ctx.headers = NULL;
477 enc->priv->ctx.new_headers = FALSE;
478
479 if (enc->priv->ctx.allocator)
480 gst_object_unref (enc->priv->ctx.allocator);
481 enc->priv->ctx.allocator = NULL;
482
483 GST_OBJECT_LOCK (enc);
484 gst_caps_replace (&enc->priv->ctx.input_caps, NULL);
485 gst_caps_replace (&enc->priv->ctx.caps, NULL);
486 gst_caps_replace (&enc->priv->ctx.allocation_caps, NULL);
487
488 memset (&enc->priv->ctx, 0, sizeof (enc->priv->ctx));
489 gst_audio_info_init (&enc->priv->ctx.info);
490 GST_OBJECT_UNLOCK (enc);
491
492 if (enc->priv->upstream_tags) {
493 gst_tag_list_unref (enc->priv->upstream_tags);
494 enc->priv->upstream_tags = NULL;
495 }
496 if (enc->priv->tags)
497 gst_tag_list_unref (enc->priv->tags);
498 enc->priv->tags = NULL;
499 enc->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
500 enc->priv->tags_changed = FALSE;
501
502 g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
503 g_list_free (enc->priv->pending_events);
504 enc->priv->pending_events = NULL;
505 }
506
507 gst_segment_init (&enc->input_segment, GST_FORMAT_TIME);
508 gst_segment_init (&enc->output_segment, GST_FORMAT_TIME);
509
510 gst_adapter_clear (enc->priv->adapter);
511 enc->priv->got_data = FALSE;
512 enc->priv->drained = TRUE;
513 enc->priv->offset = 0;
514 enc->priv->base_ts = GST_CLOCK_TIME_NONE;
515 enc->priv->base_gp = -1;
516 enc->priv->samples = 0;
517 enc->priv->discont = FALSE;
518
519 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
520 }
521
522 static void
gst_audio_encoder_finalize(GObject * object)523 gst_audio_encoder_finalize (GObject * object)
524 {
525 GstAudioEncoder *enc = GST_AUDIO_ENCODER (object);
526
527 g_object_unref (enc->priv->adapter);
528
529 g_rec_mutex_clear (&enc->stream_lock);
530
531 G_OBJECT_CLASS (parent_class)->finalize (object);
532 }
533
534 static GstStateChangeReturn
gst_audio_encoder_change_state(GstElement * element,GstStateChange transition)535 gst_audio_encoder_change_state (GstElement * element, GstStateChange transition)
536 {
537 GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
538 GstAudioEncoder *enc = GST_AUDIO_ENCODER (element);
539 GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
540
541 switch (transition) {
542 case GST_STATE_CHANGE_NULL_TO_READY:
543 if (klass->open) {
544 if (!klass->open (enc))
545 goto open_failed;
546 }
547 default:
548 break;
549 }
550
551 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
552
553 switch (transition) {
554 case GST_STATE_CHANGE_READY_TO_NULL:
555 if (klass->close) {
556 if (!klass->close (enc))
557 goto close_failed;
558 }
559 default:
560 break;
561 }
562
563 return ret;
564
565 open_failed:
566 {
567 GST_ELEMENT_ERROR (enc, LIBRARY, INIT, (NULL), ("Failed to open codec"));
568 return GST_STATE_CHANGE_FAILURE;
569 }
570 close_failed:
571 {
572 GST_ELEMENT_ERROR (enc, LIBRARY, INIT, (NULL), ("Failed to close codec"));
573 return GST_STATE_CHANGE_FAILURE;
574 }
575 }
576
577 static gboolean
gst_audio_encoder_push_event(GstAudioEncoder * enc,GstEvent * event)578 gst_audio_encoder_push_event (GstAudioEncoder * enc, GstEvent * event)
579 {
580 switch (GST_EVENT_TYPE (event)) {
581 case GST_EVENT_SEGMENT:{
582 GstSegment seg;
583
584 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
585 gst_event_copy_segment (event, &seg);
586
587 GST_DEBUG_OBJECT (enc, "starting segment %" GST_SEGMENT_FORMAT, &seg);
588
589 enc->output_segment = seg;
590 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
591 break;
592 }
593 default:
594 break;
595 }
596
597 return gst_pad_push_event (enc->srcpad, event);
598 }
599
600 static inline void
gst_audio_encoder_push_pending_events(GstAudioEncoder * enc)601 gst_audio_encoder_push_pending_events (GstAudioEncoder * enc)
602 {
603 GstAudioEncoderPrivate *priv = enc->priv;
604
605 if (priv->pending_events) {
606 GList *pending_events, *l;
607
608 pending_events = priv->pending_events;
609 priv->pending_events = NULL;
610
611 GST_DEBUG_OBJECT (enc, "Pushing pending events");
612 for (l = pending_events; l; l = l->next)
613 gst_audio_encoder_push_event (enc, l->data);
614 g_list_free (pending_events);
615 }
616 }
617
618 static GstEvent *
gst_audio_encoder_create_merged_tags_event(GstAudioEncoder * enc)619 gst_audio_encoder_create_merged_tags_event (GstAudioEncoder * enc)
620 {
621 GstTagList *merged_tags;
622
623 GST_LOG_OBJECT (enc, "upstream : %" GST_PTR_FORMAT, enc->priv->upstream_tags);
624 GST_LOG_OBJECT (enc, "encoder : %" GST_PTR_FORMAT, enc->priv->tags);
625 GST_LOG_OBJECT (enc, "mode : %d", enc->priv->tags_merge_mode);
626
627 merged_tags =
628 gst_tag_list_merge (enc->priv->upstream_tags, enc->priv->tags,
629 enc->priv->tags_merge_mode);
630
631 GST_DEBUG_OBJECT (enc, "merged : %" GST_PTR_FORMAT, merged_tags);
632
633 if (merged_tags == NULL)
634 return NULL;
635
636 if (gst_tag_list_is_empty (merged_tags)) {
637 gst_tag_list_unref (merged_tags);
638 return NULL;
639 }
640
641 /* add codec info to pending tags */
642 #if 0
643 caps = gst_pad_get_current_caps (enc->srcpad);
644 gst_pb_utils_add_codec_description_to_tag_list (merged_tags,
645 GST_TAG_AUDIO_CODEC, caps);
646 #endif
647
648 return gst_event_new_tag (merged_tags);
649 }
650
651 static void
gst_audio_encoder_check_and_push_pending_tags(GstAudioEncoder * enc)652 gst_audio_encoder_check_and_push_pending_tags (GstAudioEncoder * enc)
653 {
654 if (enc->priv->tags_changed) {
655 GstEvent *tags_event;
656
657 tags_event = gst_audio_encoder_create_merged_tags_event (enc);
658
659 if (tags_event != NULL)
660 gst_audio_encoder_push_event (enc, tags_event);
661
662 enc->priv->tags_changed = FALSE;
663 }
664 }
665
666
667 static gboolean
gst_audio_encoder_transform_meta_default(GstAudioEncoder * encoder,GstBuffer * outbuf,GstMeta * meta,GstBuffer * inbuf)668 gst_audio_encoder_transform_meta_default (GstAudioEncoder *
669 encoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
670 {
671 const GstMetaInfo *info = meta->info;
672 const gchar *const *tags;
673 const gchar *const supported_tags[] = {
674 GST_META_TAG_AUDIO_STR,
675 GST_META_TAG_AUDIO_CHANNELS_STR,
676 NULL,
677 };
678
679 tags = gst_meta_api_type_get_tags (info->api);
680
681 if (!tags)
682 return TRUE;
683
684 while (*tags) {
685 if (!g_strv_contains (supported_tags, *tags))
686 return FALSE;
687 tags++;
688 }
689
690 return TRUE;
691 }
692
693 typedef struct
694 {
695 GstAudioEncoder *encoder;
696 GstBuffer *outbuf;
697 } CopyMetaData;
698
699 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)700 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
701 {
702 CopyMetaData *data = user_data;
703 GstAudioEncoder *encoder = data->encoder;
704 GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (encoder);
705 GstBuffer *outbuf = data->outbuf;
706 const GstMetaInfo *info = (*meta)->info;
707 gboolean do_copy = FALSE;
708
709 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
710 /* never call the transform_meta with memory specific metadata */
711 GST_DEBUG_OBJECT (encoder, "not copying memory specific metadata %s",
712 g_type_name (info->api));
713 do_copy = FALSE;
714 } else if (klass->transform_meta) {
715 do_copy = klass->transform_meta (encoder, outbuf, *meta, inbuf);
716 GST_DEBUG_OBJECT (encoder, "transformed metadata %s: copy: %d",
717 g_type_name (info->api), do_copy);
718 }
719
720 /* we only copy metadata when the subclass implemented a transform_meta
721 * function and when it returns %TRUE */
722 if (do_copy && info->transform_func) {
723 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
724 GST_DEBUG_OBJECT (encoder, "copy metadata %s", g_type_name (info->api));
725 /* simply copy then */
726 info->transform_func (outbuf, *meta, inbuf,
727 _gst_meta_transform_copy, ©_data);
728 }
729 return TRUE;
730 }
731
732 /**
733 * gst_audio_encoder_finish_frame:
734 * @enc: a #GstAudioEncoder
735 * @buffer: (transfer full) (allow-none): encoded data
736 * @samples: number of samples (per channel) represented by encoded data
737 *
738 * Collects encoded data and pushes encoded data downstream.
739 * Source pad caps must be set when this is called.
740 *
741 * If @samples < 0, then best estimate is all samples provided to encoder
742 * (subclass) so far. @buf may be NULL, in which case next number of @samples
743 * are considered discarded, e.g. as a result of discontinuous transmission,
744 * and a discontinuity is marked.
745 *
746 * Note that samples received in #GstAudioEncoderClass.handle_frame()
747 * may be invalidated by a call to this function.
748 *
749 * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
750 */
751 GstFlowReturn
gst_audio_encoder_finish_frame(GstAudioEncoder * enc,GstBuffer * buf,gint samples)752 gst_audio_encoder_finish_frame (GstAudioEncoder * enc, GstBuffer * buf,
753 gint samples)
754 {
755 GstAudioEncoderClass *klass;
756 GstAudioEncoderPrivate *priv;
757 GstAudioEncoderContext *ctx;
758 GstFlowReturn ret = GST_FLOW_OK;
759 gboolean needs_reconfigure = FALSE;
760 GstBuffer *inbuf = NULL;
761
762 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
763 priv = enc->priv;
764 ctx = &enc->priv->ctx;
765
766 /* subclass should not hand us no data */
767 g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
768 GST_FLOW_ERROR);
769
770 /* subclass should know what it is producing by now */
771 if (!ctx->caps)
772 goto no_caps;
773
774 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
775
776 GST_LOG_OBJECT (enc,
777 "accepting %" G_GSIZE_FORMAT " bytes encoded data as %d samples",
778 buf ? gst_buffer_get_size (buf) : -1, samples);
779
780 needs_reconfigure = gst_pad_check_reconfigure (enc->srcpad);
781 if (G_UNLIKELY (ctx->output_caps_changed || needs_reconfigure)) {
782 if (!gst_audio_encoder_negotiate_unlocked (enc)) {
783 gst_pad_mark_reconfigure (enc->srcpad);
784 if (GST_PAD_IS_FLUSHING (enc->srcpad))
785 ret = GST_FLOW_FLUSHING;
786 else
787 ret = GST_FLOW_NOT_NEGOTIATED;
788 if (buf)
789 gst_buffer_unref (buf);
790 goto exit;
791 }
792 }
793
794 /* mark subclass still alive and providing */
795 if (G_LIKELY (buf))
796 priv->got_data = TRUE;
797
798 gst_audio_encoder_push_pending_events (enc);
799
800 /* send after pending events, which likely includes segment event */
801 gst_audio_encoder_check_and_push_pending_tags (enc);
802
803 /* remove corresponding samples from input */
804 if (samples < 0)
805 samples = (enc->priv->offset / ctx->info.bpf);
806
807 if (G_LIKELY (samples)) {
808 /* track upstream ts if so configured */
809 if (!enc->priv->perfect_ts) {
810 guint64 ts, distance;
811
812 ts = gst_adapter_prev_pts (priv->adapter, &distance);
813 g_assert (distance % ctx->info.bpf == 0);
814 distance /= ctx->info.bpf;
815 GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past prev_ts %"
816 GST_TIME_FORMAT, distance, GST_TIME_ARGS (ts));
817 GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past base_ts %"
818 GST_TIME_FORMAT, priv->samples, GST_TIME_ARGS (priv->base_ts));
819 /* when draining adapter might be empty and no ts to offer */
820 if (GST_CLOCK_TIME_IS_VALID (ts) && ts != priv->base_ts) {
821 GstClockTimeDiff diff;
822 GstClockTime old_ts, next_ts;
823
824 /* passed into another buffer;
825 * mild check for discontinuity and only mark if so */
826 next_ts = ts +
827 gst_util_uint64_scale (distance, GST_SECOND, ctx->info.rate);
828 old_ts = priv->base_ts +
829 gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
830 diff = GST_CLOCK_DIFF (next_ts, old_ts);
831 GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND));
832 /* only mark discontinuity if beyond tolerance */
833 if (G_UNLIKELY (diff < -enc->priv->tolerance ||
834 diff > enc->priv->tolerance)) {
835 GST_DEBUG_OBJECT (enc, "marked discont");
836 priv->discont = TRUE;
837 }
838 if (diff > GST_SECOND / ctx->info.rate / 2 ||
839 diff < -GST_SECOND / ctx->info.rate / 2) {
840 GST_LOG_OBJECT (enc, "new upstream ts %" GST_TIME_FORMAT
841 " at distance %" G_GUINT64_FORMAT, GST_TIME_ARGS (ts), distance);
842 /* re-sync to upstream ts */
843 priv->base_ts = ts;
844 priv->samples = distance;
845 } else {
846 GST_LOG_OBJECT (enc, "new upstream ts only introduces jitter");
847 }
848 }
849 }
850 /* advance sample view */
851 if (G_UNLIKELY (samples * ctx->info.bpf > priv->offset)) {
852 guint avail = gst_adapter_available (priv->adapter);
853
854 if (G_LIKELY (!priv->force)) {
855 /* we should have received EOS to enable force */
856 goto overflow;
857 } else {
858 priv->offset = 0;
859 if (avail > 0 && samples * ctx->info.bpf >= avail) {
860 inbuf = gst_adapter_take_buffer_fast (priv->adapter, avail);
861 gst_adapter_clear (priv->adapter);
862 } else if (avail > 0) {
863 inbuf =
864 gst_adapter_take_buffer_fast (priv->adapter,
865 samples * ctx->info.bpf);
866 }
867 }
868 } else {
869 guint avail = gst_adapter_available (priv->adapter);
870
871 if (avail > 0) {
872 inbuf =
873 gst_adapter_take_buffer_fast (priv->adapter,
874 samples * ctx->info.bpf);
875 }
876 priv->offset -= samples * ctx->info.bpf;
877 /* avoid subsequent stray prev_ts */
878 if (G_UNLIKELY (gst_adapter_available (priv->adapter) == 0))
879 gst_adapter_clear (priv->adapter);
880 }
881 /* sample count advanced below after buffer handling */
882 }
883
884 /* collect output */
885 if (G_LIKELY (buf)) {
886 gsize size;
887
888 /* Pushing headers first */
889 if (G_UNLIKELY (priv->ctx.new_headers)) {
890 GList *tmp;
891
892 GST_DEBUG_OBJECT (enc, "Sending headers");
893
894 for (tmp = priv->ctx.headers; tmp; tmp = tmp->next) {
895 GstBuffer *tmpbuf = gst_buffer_ref (tmp->data);
896
897 tmpbuf = gst_buffer_make_writable (tmpbuf);
898 size = gst_buffer_get_size (tmpbuf);
899
900 if (G_UNLIKELY (priv->discont)) {
901 GST_LOG_OBJECT (enc, "marking discont");
902 GST_BUFFER_FLAG_SET (tmpbuf, GST_BUFFER_FLAG_DISCONT);
903 priv->discont = FALSE;
904 }
905
906 /* Ogg codecs like Vorbis use offset/offset-end in a special
907 * way and both should be 0 for these codecs */
908 if (priv->base_gp >= 0) {
909 GST_BUFFER_OFFSET (tmpbuf) = 0;
910 GST_BUFFER_OFFSET_END (tmpbuf) = 0;
911 } else {
912 GST_BUFFER_OFFSET (tmpbuf) = priv->bytes_out;
913 GST_BUFFER_OFFSET_END (tmpbuf) = priv->bytes_out + size;
914 }
915
916 GST_OBJECT_LOCK (enc);
917 priv->bytes_out += size;
918 GST_OBJECT_UNLOCK (enc);
919
920 ret = gst_pad_push (enc->srcpad, tmpbuf);
921 if (ret != GST_FLOW_OK) {
922 GST_WARNING_OBJECT (enc, "pushing header returned %s",
923 gst_flow_get_name (ret));
924 goto exit;
925 }
926 }
927 priv->ctx.new_headers = FALSE;
928 }
929
930 size = gst_buffer_get_size (buf);
931
932 GST_LOG_OBJECT (enc, "taking %" G_GSIZE_FORMAT " bytes for output", size);
933 buf = gst_buffer_make_writable (buf);
934
935 /* decorate */
936 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
937 /* FIXME ? lookahead could lead to weird ts and duration ?
938 * (particularly if not in perfect mode) */
939 /* mind sample rounding and produce perfect output */
940 GST_BUFFER_PTS (buf) = priv->base_ts +
941 gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND,
942 ctx->info.rate);
943 GST_BUFFER_DTS (buf) = GST_BUFFER_PTS (buf);
944 GST_DEBUG_OBJECT (enc, "out samples %d", samples);
945 if (G_LIKELY (samples > 0)) {
946 priv->samples += samples;
947 GST_BUFFER_DURATION (buf) = priv->base_ts +
948 gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND,
949 ctx->info.rate) - GST_BUFFER_PTS (buf);
950 priv->last_duration = GST_BUFFER_DURATION (buf);
951 } else {
952 /* duration forecast in case of handling remainder;
953 * the last one is probably like the previous one ... */
954 GST_BUFFER_DURATION (buf) = priv->last_duration;
955 }
956 if (priv->base_gp >= 0) {
957 /* pamper oggmux */
958 /* FIXME: in longer run, muxer should take care of this ... */
959 /* offset_end = granulepos for ogg muxer */
960 GST_BUFFER_OFFSET_END (buf) = priv->base_gp + priv->samples -
961 enc->priv->ctx.lookahead;
962 /* offset = timestamp corresponding to granulepos for ogg muxer */
963 GST_BUFFER_OFFSET (buf) =
964 GST_FRAMES_TO_CLOCK_TIME (GST_BUFFER_OFFSET_END (buf),
965 ctx->info.rate);
966 } else {
967 GST_BUFFER_OFFSET (buf) = priv->bytes_out;
968 GST_BUFFER_OFFSET_END (buf) = priv->bytes_out + size;
969 }
970 }
971
972 if (klass->transform_meta) {
973 if (G_LIKELY (inbuf)) {
974 CopyMetaData data;
975
976 data.encoder = enc;
977 data.outbuf = buf;
978 gst_buffer_foreach_meta (inbuf, foreach_metadata, &data);
979 } else {
980 GST_WARNING_OBJECT (enc,
981 "Can't copy metadata because input buffer disappeared");
982 }
983 }
984
985 GST_OBJECT_LOCK (enc);
986 priv->bytes_out += size;
987 GST_OBJECT_UNLOCK (enc);
988
989 if (G_UNLIKELY (priv->discont)) {
990 GST_LOG_OBJECT (enc, "marking discont");
991 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
992 priv->discont = FALSE;
993 }
994
995 if (klass->pre_push) {
996 /* last chance for subclass to do some dirty stuff */
997 ret = klass->pre_push (enc, &buf);
998 if (ret != GST_FLOW_OK || !buf) {
999 GST_DEBUG_OBJECT (enc, "subclass returned %s, buf %p",
1000 gst_flow_get_name (ret), buf);
1001
1002 if (buf)
1003 gst_buffer_unref (buf);
1004 goto exit;
1005 }
1006 }
1007
1008 GST_LOG_OBJECT (enc,
1009 "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1010 ", duration %" GST_TIME_FORMAT, size,
1011 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
1012 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1013
1014 ret = gst_pad_push (enc->srcpad, buf);
1015 GST_LOG_OBJECT (enc, "buffer pushed: %s", gst_flow_get_name (ret));
1016 } else {
1017 /* merely advance samples, most work for that already done above */
1018 priv->samples += samples;
1019 }
1020
1021 exit:
1022 if (inbuf)
1023 gst_buffer_unref (inbuf);
1024
1025 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1026
1027 return ret;
1028
1029 /* ERRORS */
1030 no_caps:
1031 {
1032 GST_ELEMENT_ERROR (enc, STREAM, ENCODE, ("no caps set"), (NULL));
1033 if (buf)
1034 gst_buffer_unref (buf);
1035 return GST_FLOW_ERROR;
1036 }
1037 overflow:
1038 {
1039 GST_ELEMENT_ERROR (enc, STREAM, ENCODE,
1040 ("received more encoded samples %d than provided %d as inputs",
1041 samples, priv->offset / ctx->info.bpf), (NULL));
1042 if (buf)
1043 gst_buffer_unref (buf);
1044 ret = GST_FLOW_ERROR;
1045 /* no way we can let this pass */
1046 g_assert_not_reached ();
1047 /* really no way */
1048 goto exit;
1049 }
1050 }
1051
1052 /* adapter tracking idea:
1053 * - start of adapter corresponds with what has already been encoded
1054 * (i.e. really returned by encoder subclass)
1055 * - start + offset is what needs to be fed to subclass next */
1056 static GstFlowReturn
gst_audio_encoder_push_buffers(GstAudioEncoder * enc,gboolean force)1057 gst_audio_encoder_push_buffers (GstAudioEncoder * enc, gboolean force)
1058 {
1059 GstAudioEncoderClass *klass;
1060 GstAudioEncoderPrivate *priv;
1061 GstAudioEncoderContext *ctx;
1062 gint av, need;
1063 GstBuffer *buf;
1064 GstFlowReturn ret = GST_FLOW_OK;
1065
1066 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1067
1068 g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
1069
1070 priv = enc->priv;
1071 ctx = &enc->priv->ctx;
1072
1073 while (ret == GST_FLOW_OK) {
1074
1075 buf = NULL;
1076 av = gst_adapter_available (priv->adapter);
1077
1078 g_assert (priv->offset <= av);
1079 av -= priv->offset;
1080
1081 need =
1082 ctx->frame_samples_min >
1083 0 ? ctx->frame_samples_min * ctx->info.bpf : av;
1084 GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d", av, need,
1085 force);
1086
1087 if ((need > av) || !av) {
1088 if (G_UNLIKELY (force)) {
1089 priv->force = TRUE;
1090 need = av;
1091 } else {
1092 break;
1093 }
1094 } else {
1095 priv->force = FALSE;
1096 }
1097
1098 if (ctx->frame_samples_max > 0)
1099 need = MIN (av, ctx->frame_samples_max * ctx->info.bpf);
1100
1101 if (ctx->frame_samples_min == ctx->frame_samples_max) {
1102 /* if we have some extra metadata,
1103 * provide for integer multiple of frames to allow for better granularity
1104 * of processing */
1105 if (ctx->frame_samples_min > 0 && need) {
1106 if (ctx->frame_max > 1)
1107 need = need * MIN ((av / need), ctx->frame_max);
1108 else if (ctx->frame_max == 0)
1109 need = need * (av / need);
1110 }
1111 }
1112
1113 priv->got_data = FALSE;
1114 if (G_LIKELY (need)) {
1115 const guint8 *data;
1116
1117 data = gst_adapter_map (priv->adapter, priv->offset + need);
1118 buf =
1119 gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY,
1120 (gpointer) data, priv->offset + need, priv->offset, need, NULL, NULL);
1121 } else if (!priv->drainable) {
1122 GST_DEBUG_OBJECT (enc, "non-drainable and no more data");
1123 goto finish;
1124 }
1125
1126 GST_LOG_OBJECT (enc, "providing subclass with %d bytes at offset %d",
1127 need, priv->offset);
1128
1129 /* mark this already as consumed,
1130 * which it should be when subclass gives us data in exchange for samples */
1131 priv->offset += need;
1132 GST_OBJECT_LOCK (enc);
1133 priv->samples_in += need / ctx->info.bpf;
1134 GST_OBJECT_UNLOCK (enc);
1135
1136 /* subclass might not want to be bothered with leftover data,
1137 * so take care of that here if so, otherwise pass along */
1138 if (G_UNLIKELY (priv->force && priv->hard_min && buf)) {
1139 GST_DEBUG_OBJECT (enc, "bypassing subclass with leftover");
1140 ret = gst_audio_encoder_finish_frame (enc, NULL, -1);
1141 } else {
1142 ret = klass->handle_frame (enc, buf);
1143 }
1144
1145 if (G_LIKELY (buf)) {
1146 gst_buffer_unref (buf);
1147 gst_adapter_unmap (priv->adapter);
1148 }
1149
1150 finish:
1151 /* no data to feed, no leftover provided, then bail out */
1152 if (G_UNLIKELY (!buf && !priv->got_data)) {
1153 priv->drained = TRUE;
1154 GST_LOG_OBJECT (enc, "no more data drained from subclass");
1155 break;
1156 }
1157 }
1158
1159 /* ohos.ext.func.0003: The media recorder service must support bypassing the abnormal streams to continue
1160 * recording normal streams. However, the gstpipeline cannot work properly if an error message is reported.
1161 * Some error messages are changed to warning messages. Then the media recording service can detects abnormal
1162 * streams by matching expected warning messages.
1163 */
1164 #ifdef OHOS_EXT_FUNC
1165 if ((ret != GST_FLOW_OK) && (ret != GST_FLOW_EOS)) {
1166 GST_ELEMENT_WARNING (enc, STREAM, ENCODE, (NULL),
1167 ("stream encode or push failed"));
1168 ret = GST_FLOW_ERROR;
1169 }
1170 #endif
1171
1172 return ret;
1173 }
1174
1175 static GstFlowReturn
gst_audio_encoder_drain(GstAudioEncoder * enc)1176 gst_audio_encoder_drain (GstAudioEncoder * enc)
1177 {
1178 GST_DEBUG_OBJECT (enc, "draining");
1179 if (enc->priv->drained)
1180 return GST_FLOW_OK;
1181 else {
1182 GST_DEBUG_OBJECT (enc, "... really");
1183 return gst_audio_encoder_push_buffers (enc, TRUE);
1184 }
1185 }
1186
1187 static void
gst_audio_encoder_set_base_gp(GstAudioEncoder * enc)1188 gst_audio_encoder_set_base_gp (GstAudioEncoder * enc)
1189 {
1190 GstClockTime ts;
1191
1192 if (!enc->priv->granule)
1193 return;
1194
1195 /* use running time for granule */
1196 /* incoming data is clipped, so a valid input should yield a valid output */
1197 ts = gst_segment_to_running_time (&enc->input_segment, GST_FORMAT_TIME,
1198 enc->priv->base_ts);
1199 if (GST_CLOCK_TIME_IS_VALID (ts)) {
1200 enc->priv->base_gp =
1201 GST_CLOCK_TIME_TO_FRAMES (enc->priv->base_ts, enc->priv->ctx.info.rate);
1202 GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT, enc->priv->base_gp);
1203 } else {
1204 /* should reasonably have a valid base,
1205 * otherwise start at 0 if we did not already start there earlier */
1206 if (enc->priv->base_gp < 0) {
1207 enc->priv->base_gp = 0;
1208 GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT,
1209 enc->priv->base_gp);
1210 }
1211 }
1212 }
1213
1214 static GstFlowReturn
gst_audio_encoder_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)1215 gst_audio_encoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
1216 {
1217 GstAudioEncoder *enc;
1218 GstAudioEncoderPrivate *priv;
1219 GstAudioEncoderContext *ctx;
1220 GstFlowReturn ret = GST_FLOW_OK;
1221 gboolean discont;
1222 gsize size;
1223
1224 enc = GST_AUDIO_ENCODER (parent);
1225
1226 priv = enc->priv;
1227 ctx = &enc->priv->ctx;
1228
1229 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1230
1231 if (G_UNLIKELY (priv->do_caps)) {
1232 GstCaps *caps = gst_pad_get_current_caps (enc->sinkpad);
1233 if (!caps)
1234 goto not_negotiated;
1235 if (!gst_audio_encoder_sink_setcaps (enc, caps)) {
1236 gst_caps_unref (caps);
1237 goto not_negotiated;
1238 }
1239 gst_caps_unref (caps);
1240 priv->do_caps = FALSE;
1241 }
1242
1243 /* should know what is coming by now */
1244 if (!ctx->info.bpf)
1245 goto not_negotiated;
1246
1247 size = gst_buffer_get_size (buffer);
1248
1249 GST_LOG_OBJECT (enc,
1250 "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
1251 ", duration %" GST_TIME_FORMAT, size,
1252 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
1253 GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
1254
1255 /* input should be whole number of sample frames */
1256 if (size % ctx->info.bpf)
1257 goto wrong_buffer;
1258
1259 #ifndef GST_DISABLE_GST_DEBUG
1260 {
1261 GstClockTime duration;
1262 GstClockTimeDiff diff;
1263
1264 /* verify buffer duration */
1265 duration = gst_util_uint64_scale (size, GST_SECOND,
1266 ctx->info.rate * ctx->info.bpf);
1267 diff = GST_CLOCK_DIFF (duration, GST_BUFFER_DURATION (buffer));
1268 if (GST_BUFFER_DURATION (buffer) != GST_CLOCK_TIME_NONE &&
1269 (diff > GST_SECOND / ctx->info.rate / 2 ||
1270 diff < -GST_SECOND / ctx->info.rate / 2)) {
1271 GST_DEBUG_OBJECT (enc, "incoming buffer had incorrect duration %"
1272 GST_TIME_FORMAT ", expected duration %" GST_TIME_FORMAT,
1273 GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)),
1274 GST_TIME_ARGS (duration));
1275 }
1276 }
1277 #endif
1278
1279 discont = GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT);
1280 if (G_UNLIKELY (discont)) {
1281 GST_LOG_OBJECT (enc, "marked discont");
1282 enc->priv->discont = discont;
1283 }
1284
1285 /* clip to segment */
1286 buffer = gst_audio_buffer_clip (buffer, &enc->input_segment, ctx->info.rate,
1287 ctx->info.bpf);
1288 if (G_UNLIKELY (!buffer)) {
1289 GST_DEBUG_OBJECT (buffer, "no data after clipping to segment");
1290 goto done;
1291 }
1292
1293 size = gst_buffer_get_size (buffer);
1294
1295 GST_LOG_OBJECT (enc,
1296 "buffer after segment clipping has size %" G_GSIZE_FORMAT " with ts %"
1297 GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, size,
1298 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
1299 GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
1300
1301 if (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) {
1302 priv->base_ts = GST_BUFFER_PTS (buffer);
1303 GST_DEBUG_OBJECT (enc, "new base ts %" GST_TIME_FORMAT,
1304 GST_TIME_ARGS (priv->base_ts));
1305 gst_audio_encoder_set_base_gp (enc);
1306 }
1307
1308 /* check for continuity;
1309 * checked elsewhere in non-perfect case */
1310 if (enc->priv->perfect_ts) {
1311 GstClockTimeDiff diff = 0;
1312 GstClockTime next_ts = 0;
1313
1314 if (GST_BUFFER_PTS_IS_VALID (buffer) &&
1315 GST_CLOCK_TIME_IS_VALID (priv->base_ts)) {
1316 guint64 samples;
1317
1318 samples = priv->samples +
1319 gst_adapter_available (priv->adapter) / ctx->info.bpf;
1320 next_ts = priv->base_ts +
1321 gst_util_uint64_scale (samples, GST_SECOND, ctx->info.rate);
1322 GST_LOG_OBJECT (enc, "buffer is %" G_GUINT64_FORMAT
1323 " samples past base_ts %" GST_TIME_FORMAT
1324 ", expected ts %" GST_TIME_FORMAT, samples,
1325 GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1326 diff = GST_CLOCK_DIFF (next_ts, GST_BUFFER_PTS (buffer));
1327 GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND));
1328 /* if within tolerance,
1329 * discard buffer ts and carry on producing perfect stream,
1330 * otherwise clip or resync to ts */
1331 if (G_UNLIKELY (diff < -enc->priv->tolerance ||
1332 diff > enc->priv->tolerance)) {
1333 GST_DEBUG_OBJECT (enc, "marked discont");
1334 discont = TRUE;
1335 }
1336 }
1337
1338 /* do some fancy tweaking in hard resync case */
1339 if (discont && enc->priv->hard_resync) {
1340 if (diff < 0) {
1341 guint64 diff_bytes;
1342
1343 GST_WARNING_OBJECT (enc, "Buffer is older than expected ts %"
1344 GST_TIME_FORMAT ". Clipping buffer", GST_TIME_ARGS (next_ts));
1345
1346 diff_bytes =
1347 GST_CLOCK_TIME_TO_FRAMES (-diff, ctx->info.rate) * ctx->info.bpf;
1348 if (diff_bytes >= size) {
1349 gst_buffer_unref (buffer);
1350 goto done;
1351 }
1352 buffer = gst_buffer_make_writable (buffer);
1353 gst_buffer_resize (buffer, diff_bytes, size - diff_bytes);
1354
1355 GST_BUFFER_PTS (buffer) += diff;
1356 /* care even less about duration after this */
1357 } else {
1358 /* drain stuff prior to resync */
1359 gst_audio_encoder_drain (enc);
1360 }
1361 }
1362 if (discont) {
1363 /* now re-sync ts */
1364 GstClockTime shift =
1365 gst_util_uint64_scale (gst_adapter_available (priv->adapter),
1366 GST_SECOND, ctx->info.rate * ctx->info.bpf);
1367
1368 if (G_UNLIKELY (shift > GST_BUFFER_PTS (buffer))) {
1369 /* ERROR */
1370 goto wrong_time;
1371 }
1372 /* arrange for newly added samples to come out with the ts
1373 * of the incoming buffer that adds these */
1374 priv->base_ts = GST_BUFFER_PTS (buffer) - shift;
1375 priv->samples = 0;
1376 gst_audio_encoder_set_base_gp (enc);
1377 priv->discont |= discont;
1378 }
1379 }
1380
1381 gst_adapter_push (enc->priv->adapter, buffer);
1382 /* new stuff, so we can push subclass again */
1383 enc->priv->drained = FALSE;
1384
1385 ret = gst_audio_encoder_push_buffers (enc, FALSE);
1386
1387 done:
1388 GST_LOG_OBJECT (enc, "chain leaving");
1389
1390 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1391
1392 return ret;
1393
1394 /* ERRORS */
1395 not_negotiated:
1396 {
1397 #ifdef OHOS_OPT_COMPAT
1398 GST_ELEMENT_WARNING (enc, CORE, NEGOTIATION, (NULL),
1399 ("encoder not initialized"));
1400 #else
1401 GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL),
1402 ("encoder not initialized"));
1403 #endif
1404 gst_buffer_unref (buffer);
1405 ret = GST_FLOW_NOT_NEGOTIATED;
1406 goto done;
1407 }
1408 wrong_buffer:
1409 {
1410 GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
1411 ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d",
1412 gst_buffer_get_size (buffer), ctx->info.bpf));
1413 gst_buffer_unref (buffer);
1414 ret = GST_FLOW_ERROR;
1415 goto done;
1416 }
1417 wrong_time:
1418 {
1419 GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
1420 ("buffer going too far back in time"));
1421 gst_buffer_unref (buffer);
1422 ret = GST_FLOW_ERROR;
1423 goto done;
1424 }
1425 }
1426
1427 static gboolean
gst_audio_encoder_sink_setcaps(GstAudioEncoder * enc,GstCaps * caps)1428 gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
1429 {
1430 GstAudioEncoderClass *klass;
1431 GstAudioEncoderContext *ctx;
1432 GstAudioInfo state;
1433 gboolean res = TRUE;
1434 guint old_rate;
1435
1436 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1437
1438 /* subclass must do something here ... */
1439 g_return_val_if_fail (klass->set_format != NULL, FALSE);
1440
1441 ctx = &enc->priv->ctx;
1442
1443 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1444
1445 GST_DEBUG_OBJECT (enc, "caps: %" GST_PTR_FORMAT, caps);
1446
1447 if (!gst_caps_is_fixed (caps))
1448 goto refuse_caps;
1449
1450 if (enc->priv->ctx.input_caps
1451 && gst_caps_is_equal (enc->priv->ctx.input_caps, caps))
1452 goto same_caps;
1453
1454 if (!gst_audio_info_from_caps (&state, caps))
1455 goto refuse_caps;
1456
1457 if (enc->priv->ctx.input_caps && gst_audio_info_is_equal (&state, &ctx->info))
1458 goto same_caps;
1459
1460 /* adjust ts tracking to new sample rate */
1461 old_rate = GST_AUDIO_INFO_RATE (&ctx->info);
1462 if (GST_CLOCK_TIME_IS_VALID (enc->priv->base_ts) && old_rate) {
1463 enc->priv->base_ts +=
1464 GST_FRAMES_TO_CLOCK_TIME (enc->priv->samples, old_rate);
1465 enc->priv->samples = 0;
1466 }
1467
1468 /* drain any pending old data stuff */
1469 gst_audio_encoder_drain (enc);
1470
1471 /* context defaults */
1472 /* FIXME 2.0: This is quite unexpected behaviour. We should never
1473 * just reset *settings* of a subclass inside the base class */
1474 enc->priv->ctx.frame_samples_min = 0;
1475 enc->priv->ctx.frame_samples_max = 0;
1476 enc->priv->ctx.frame_max = 0;
1477 enc->priv->ctx.lookahead = 0;
1478
1479 if (klass->set_format)
1480 res = klass->set_format (enc, &state);
1481
1482 if (res) {
1483 GST_OBJECT_LOCK (enc);
1484 ctx->info = state;
1485 gst_caps_replace (&enc->priv->ctx.input_caps, caps);
1486 GST_OBJECT_UNLOCK (enc);
1487 } else {
1488 /* invalidate state to ensure no casual carrying on */
1489 GST_DEBUG_OBJECT (enc, "subclass did not accept format");
1490 gst_audio_info_init (&state);
1491 goto exit;
1492 }
1493
1494 exit:
1495
1496 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1497
1498 return res;
1499
1500 same_caps:
1501 {
1502 GST_DEBUG_OBJECT (enc, "new audio format identical to configured format");
1503 goto exit;
1504 }
1505
1506 /* ERRORS */
1507 refuse_caps:
1508 {
1509 GST_WARNING_OBJECT (enc, "rejected caps %" GST_PTR_FORMAT, caps);
1510 goto exit;
1511 }
1512 }
1513
1514
1515 /**
1516 * gst_audio_encoder_proxy_getcaps:
1517 * @enc: a #GstAudioEncoder
1518 * @caps: (allow-none): initial caps
1519 * @filter: (allow-none): filter caps
1520 *
1521 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1522 * restricted to channel/rate combinations supported by downstream elements
1523 * (e.g. muxers).
1524 *
1525 * Returns: (transfer full): a #GstCaps owned by caller
1526 */
1527 GstCaps *
gst_audio_encoder_proxy_getcaps(GstAudioEncoder * enc,GstCaps * caps,GstCaps * filter)1528 gst_audio_encoder_proxy_getcaps (GstAudioEncoder * enc, GstCaps * caps,
1529 GstCaps * filter)
1530 {
1531 return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (enc),
1532 GST_AUDIO_ENCODER_SINK_PAD (enc), GST_AUDIO_ENCODER_SRC_PAD (enc),
1533 caps, filter);
1534 }
1535
1536 static GstCaps *
gst_audio_encoder_getcaps_default(GstAudioEncoder * enc,GstCaps * filter)1537 gst_audio_encoder_getcaps_default (GstAudioEncoder * enc, GstCaps * filter)
1538 {
1539 GstCaps *caps;
1540
1541 caps = gst_audio_encoder_proxy_getcaps (enc, NULL, filter);
1542 GST_LOG_OBJECT (enc, "returning caps %" GST_PTR_FORMAT, caps);
1543
1544 return caps;
1545 }
1546
1547 static GList *
_flush_events(GstPad * pad,GList * events)1548 _flush_events (GstPad * pad, GList * events)
1549 {
1550 GList *tmp;
1551
1552 for (tmp = events; tmp; tmp = tmp->next) {
1553 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1554 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1555 GST_EVENT_IS_STICKY (tmp->data)) {
1556 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1557 }
1558 gst_event_unref (tmp->data);
1559 }
1560 g_list_free (events);
1561
1562 return NULL;
1563 }
1564
1565 static gboolean
gst_audio_encoder_sink_event_default(GstAudioEncoder * enc,GstEvent * event)1566 gst_audio_encoder_sink_event_default (GstAudioEncoder * enc, GstEvent * event)
1567 {
1568 GstAudioEncoderClass *klass;
1569 gboolean res;
1570
1571 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1572
1573 switch (GST_EVENT_TYPE (event)) {
1574 case GST_EVENT_SEGMENT:
1575 {
1576 GstSegment seg;
1577
1578 gst_event_copy_segment (event, &seg);
1579
1580 if (seg.format == GST_FORMAT_TIME) {
1581 GST_DEBUG_OBJECT (enc, "received TIME SEGMENT %" GST_SEGMENT_FORMAT,
1582 &seg);
1583 } else {
1584 GST_DEBUG_OBJECT (enc, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg);
1585 GST_DEBUG_OBJECT (enc, "unsupported format; ignoring");
1586 res = TRUE;
1587 gst_event_unref (event);
1588 break;
1589 }
1590
1591 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1592 /* finish current segment */
1593 gst_audio_encoder_drain (enc);
1594 /* reset partially for new segment */
1595 gst_audio_encoder_reset (enc, FALSE);
1596 /* and follow along with segment */
1597 enc->input_segment = seg;
1598
1599 enc->priv->pending_events =
1600 g_list_append (enc->priv->pending_events, event);
1601 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1602
1603 res = TRUE;
1604 break;
1605 }
1606
1607 case GST_EVENT_FLUSH_START:
1608 res = gst_audio_encoder_push_event (enc, event);
1609 break;
1610
1611 case GST_EVENT_FLUSH_STOP:
1612 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1613 /* discard any pending stuff */
1614 /* TODO route through drain ?? */
1615 if (!enc->priv->drained && klass->flush)
1616 klass->flush (enc);
1617 /* and get (re)set for the sequel */
1618 gst_audio_encoder_reset (enc, FALSE);
1619
1620 enc->priv->pending_events = _flush_events (enc->srcpad,
1621 enc->priv->pending_events);
1622 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1623
1624 res = gst_audio_encoder_push_event (enc, event);
1625 break;
1626
1627 case GST_EVENT_EOS:
1628 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1629 gst_audio_encoder_drain (enc);
1630
1631 /* check for pending events and tags */
1632 gst_audio_encoder_push_pending_events (enc);
1633 gst_audio_encoder_check_and_push_pending_tags (enc);
1634
1635 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1636
1637 /* forward immediately because no buffer or serialized event
1638 * will come after EOS and nothing could trigger another
1639 * _finish_frame() call. */
1640 res = gst_audio_encoder_push_event (enc, event);
1641 break;
1642
1643 case GST_EVENT_CAPS:
1644 {
1645 GstCaps *caps;
1646
1647 gst_event_parse_caps (event, &caps);
1648 enc->priv->do_caps = TRUE;
1649 res = TRUE;
1650 gst_event_unref (event);
1651 break;
1652 }
1653
1654 case GST_EVENT_STREAM_START:
1655 {
1656 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1657 /* Flush upstream tags after a STREAM_START */
1658 GST_DEBUG_OBJECT (enc, "received STREAM_START. Clearing taglist");
1659 if (enc->priv->upstream_tags) {
1660 gst_tag_list_unref (enc->priv->upstream_tags);
1661 enc->priv->upstream_tags = NULL;
1662 enc->priv->tags_changed = TRUE;
1663 }
1664 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1665 res = gst_audio_encoder_push_event (enc, event);
1666 break;
1667 }
1668
1669 case GST_EVENT_TAG:
1670 {
1671 GstTagList *tags;
1672
1673 gst_event_parse_tag (event, &tags);
1674
1675 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1676 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1677 if (enc->priv->upstream_tags != tags) {
1678 tags = gst_tag_list_copy (tags);
1679
1680 /* FIXME: make generic based on GST_TAG_FLAG_ENCODED */
1681 gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
1682 gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
1683 gst_tag_list_remove_tag (tags, GST_TAG_VIDEO_CODEC);
1684 gst_tag_list_remove_tag (tags, GST_TAG_SUBTITLE_CODEC);
1685 gst_tag_list_remove_tag (tags, GST_TAG_CONTAINER_FORMAT);
1686 gst_tag_list_remove_tag (tags, GST_TAG_BITRATE);
1687 gst_tag_list_remove_tag (tags, GST_TAG_NOMINAL_BITRATE);
1688 gst_tag_list_remove_tag (tags, GST_TAG_MAXIMUM_BITRATE);
1689 gst_tag_list_remove_tag (tags, GST_TAG_MINIMUM_BITRATE);
1690 gst_tag_list_remove_tag (tags, GST_TAG_ENCODER);
1691 gst_tag_list_remove_tag (tags, GST_TAG_ENCODER_VERSION);
1692
1693 if (enc->priv->upstream_tags)
1694 gst_tag_list_unref (enc->priv->upstream_tags);
1695 enc->priv->upstream_tags = tags;
1696 GST_INFO_OBJECT (enc, "upstream stream tags: %" GST_PTR_FORMAT, tags);
1697 }
1698 gst_event_unref (event);
1699 event = gst_audio_encoder_create_merged_tags_event (enc);
1700 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1701
1702 /* No tags, go out of here instead of fall through */
1703 if (!event) {
1704 res = TRUE;
1705 break;
1706 }
1707 }
1708 /* fall through */
1709 }
1710 default:
1711 /* Forward non-serialized events immediately. */
1712 if (!GST_EVENT_IS_SERIALIZED (event)) {
1713 res =
1714 gst_pad_event_default (enc->sinkpad, GST_OBJECT_CAST (enc), event);
1715 } else {
1716 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
1717 enc->priv->pending_events =
1718 g_list_append (enc->priv->pending_events, event);
1719 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
1720 res = TRUE;
1721 }
1722 break;
1723 }
1724 return res;
1725 }
1726
1727 static gboolean
gst_audio_encoder_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)1728 gst_audio_encoder_sink_event (GstPad * pad, GstObject * parent,
1729 GstEvent * event)
1730 {
1731 GstAudioEncoder *enc;
1732 GstAudioEncoderClass *klass;
1733 gboolean ret;
1734
1735 enc = GST_AUDIO_ENCODER (parent);
1736 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1737
1738 GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
1739 GST_EVENT_TYPE_NAME (event));
1740
1741 if (klass->sink_event)
1742 ret = klass->sink_event (enc, event);
1743 else {
1744 gst_event_unref (event);
1745 ret = FALSE;
1746 }
1747
1748 GST_DEBUG_OBJECT (enc, "event result %d", ret);
1749
1750 return ret;
1751 }
1752
1753 static gboolean
gst_audio_encoder_sink_query_default(GstAudioEncoder * enc,GstQuery * query)1754 gst_audio_encoder_sink_query_default (GstAudioEncoder * enc, GstQuery * query)
1755 {
1756 GstPad *pad = GST_AUDIO_ENCODER_SINK_PAD (enc);
1757 gboolean res = FALSE;
1758
1759 switch (GST_QUERY_TYPE (query)) {
1760 case GST_QUERY_FORMATS:
1761 {
1762 gst_query_set_formats (query, 3,
1763 GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT);
1764 res = TRUE;
1765 break;
1766 }
1767 case GST_QUERY_CONVERT:
1768 {
1769 GstFormat src_fmt, dest_fmt;
1770 gint64 src_val, dest_val;
1771
1772 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1773 GST_OBJECT_LOCK (enc);
1774 res = gst_audio_info_convert (&enc->priv->ctx.info,
1775 src_fmt, src_val, dest_fmt, &dest_val);
1776 GST_OBJECT_UNLOCK (enc);
1777 if (!res)
1778 goto error;
1779 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1780 res = TRUE;
1781 break;
1782 }
1783 case GST_QUERY_CAPS:
1784 {
1785 GstCaps *filter, *caps;
1786 GstAudioEncoderClass *klass;
1787
1788 gst_query_parse_caps (query, &filter);
1789
1790 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1791 if (klass->getcaps) {
1792 caps = klass->getcaps (enc, filter);
1793 gst_query_set_caps_result (query, caps);
1794 gst_caps_unref (caps);
1795 res = TRUE;
1796 }
1797 break;
1798 }
1799 case GST_QUERY_ALLOCATION:
1800 {
1801 GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1802
1803 if (klass->propose_allocation)
1804 res = klass->propose_allocation (enc, query);
1805 break;
1806 }
1807 default:
1808 res = gst_pad_query_default (pad, GST_OBJECT (enc), query);
1809 break;
1810 }
1811
1812 error:
1813 return res;
1814 }
1815
1816 static gboolean
gst_audio_encoder_sink_query(GstPad * pad,GstObject * parent,GstQuery * query)1817 gst_audio_encoder_sink_query (GstPad * pad, GstObject * parent,
1818 GstQuery * query)
1819 {
1820 GstAudioEncoder *encoder;
1821 GstAudioEncoderClass *encoder_class;
1822 gboolean ret = FALSE;
1823
1824 encoder = GST_AUDIO_ENCODER (parent);
1825 encoder_class = GST_AUDIO_ENCODER_GET_CLASS (encoder);
1826
1827 GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
1828 GST_QUERY_TYPE_NAME (query));
1829
1830 if (encoder_class->sink_query)
1831 ret = encoder_class->sink_query (encoder, query);
1832
1833 return ret;
1834 }
1835
1836 static gboolean
gst_audio_encoder_src_event_default(GstAudioEncoder * enc,GstEvent * event)1837 gst_audio_encoder_src_event_default (GstAudioEncoder * enc, GstEvent * event)
1838 {
1839 gboolean res;
1840
1841 switch (GST_EVENT_TYPE (event)) {
1842 default:
1843 res = gst_pad_event_default (enc->srcpad, GST_OBJECT_CAST (enc), event);
1844 break;
1845 }
1846 return res;
1847 }
1848
1849 static gboolean
gst_audio_encoder_src_event(GstPad * pad,GstObject * parent,GstEvent * event)1850 gst_audio_encoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1851 {
1852 GstAudioEncoder *enc;
1853 GstAudioEncoderClass *klass;
1854 gboolean ret;
1855
1856 enc = GST_AUDIO_ENCODER (parent);
1857 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
1858
1859 GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
1860 GST_EVENT_TYPE_NAME (event));
1861
1862 if (klass->src_event)
1863 ret = klass->src_event (enc, event);
1864 else {
1865 gst_event_unref (event);
1866 ret = FALSE;
1867 }
1868
1869 return ret;
1870 }
1871
1872 static gboolean
gst_audio_encoder_decide_allocation_default(GstAudioEncoder * enc,GstQuery * query)1873 gst_audio_encoder_decide_allocation_default (GstAudioEncoder * enc,
1874 GstQuery * query)
1875 {
1876 GstAllocator *allocator = NULL;
1877 GstAllocationParams params;
1878 gboolean update_allocator;
1879
1880 /* we got configuration from our peer or the decide_allocation method,
1881 * parse them */
1882 if (gst_query_get_n_allocation_params (query) > 0) {
1883 /* try the allocator */
1884 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
1885 update_allocator = TRUE;
1886 } else {
1887 allocator = NULL;
1888 gst_allocation_params_init (¶ms);
1889 update_allocator = FALSE;
1890 }
1891
1892 if (update_allocator)
1893 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
1894 else
1895 gst_query_add_allocation_param (query, allocator, ¶ms);
1896 if (allocator)
1897 gst_object_unref (allocator);
1898
1899 return TRUE;
1900 }
1901
1902 static gboolean
gst_audio_encoder_propose_allocation_default(GstAudioEncoder * enc,GstQuery * query)1903 gst_audio_encoder_propose_allocation_default (GstAudioEncoder * enc,
1904 GstQuery * query)
1905 {
1906 return TRUE;
1907 }
1908
1909 /* FIXME ? are any of these queries (other than latency) an encoder's business
1910 * also, the conversion stuff might seem to make sense, but seems to not mind
1911 * segment stuff etc at all
1912 * Supposedly that's backward compatibility ... */
1913 static gboolean
gst_audio_encoder_src_query_default(GstAudioEncoder * enc,GstQuery * query)1914 gst_audio_encoder_src_query_default (GstAudioEncoder * enc, GstQuery * query)
1915 {
1916 GstPad *pad = GST_AUDIO_ENCODER_SRC_PAD (enc);
1917 gboolean res = FALSE;
1918
1919 GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query);
1920
1921 switch (GST_QUERY_TYPE (query)) {
1922 case GST_QUERY_POSITION:
1923 {
1924 GstFormat fmt, req_fmt;
1925 gint64 pos, val;
1926
1927 if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
1928 GST_LOG_OBJECT (enc, "returning peer response");
1929 break;
1930 }
1931
1932 gst_query_parse_position (query, &req_fmt, NULL);
1933
1934 /* Refuse BYTES format queries. If it made sense to
1935 * * answer them, upstream would have already */
1936 if (req_fmt == GST_FORMAT_BYTES) {
1937 GST_LOG_OBJECT (enc, "Ignoring BYTES position query");
1938 break;
1939 }
1940
1941 fmt = GST_FORMAT_TIME;
1942 if (!(res = gst_pad_peer_query_position (enc->sinkpad, fmt, &pos)))
1943 break;
1944
1945 if ((res =
1946 gst_pad_peer_query_convert (enc->sinkpad, fmt, pos, req_fmt,
1947 &val))) {
1948 gst_query_set_position (query, req_fmt, val);
1949 }
1950 break;
1951 }
1952 case GST_QUERY_DURATION:
1953 {
1954 GstFormat fmt, req_fmt;
1955 gint64 dur, val;
1956
1957 if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
1958 GST_LOG_OBJECT (enc, "returning peer response");
1959 break;
1960 }
1961
1962 gst_query_parse_duration (query, &req_fmt, NULL);
1963
1964 /* Refuse BYTES format queries. If it made sense to
1965 * * answer them, upstream would have already */
1966 if (req_fmt == GST_FORMAT_BYTES) {
1967 GST_LOG_OBJECT (enc, "Ignoring BYTES position query");
1968 break;
1969 }
1970
1971 fmt = GST_FORMAT_TIME;
1972 if (!(res = gst_pad_peer_query_duration (enc->sinkpad, fmt, &dur)))
1973 break;
1974
1975 if ((res =
1976 gst_pad_peer_query_convert (enc->sinkpad, fmt, dur, req_fmt,
1977 &val))) {
1978 gst_query_set_duration (query, req_fmt, val);
1979 }
1980 break;
1981 }
1982 case GST_QUERY_FORMATS:
1983 {
1984 gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES);
1985 res = TRUE;
1986 break;
1987 }
1988 case GST_QUERY_CONVERT:
1989 {
1990 GstFormat src_fmt, dest_fmt;
1991 gint64 src_val, dest_val;
1992
1993 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1994 GST_OBJECT_LOCK (enc);
1995 res = __gst_audio_encoded_audio_convert (&enc->priv->ctx.info,
1996 enc->priv->bytes_out, enc->priv->samples_in, src_fmt, src_val,
1997 &dest_fmt, &dest_val);
1998 GST_OBJECT_UNLOCK (enc);
1999 if (!res)
2000 break;
2001 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2002 break;
2003 }
2004 case GST_QUERY_LATENCY:
2005 {
2006 if ((res = gst_pad_peer_query (enc->sinkpad, query))) {
2007 gboolean live;
2008 GstClockTime min_latency, max_latency;
2009
2010 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2011 GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %"
2012 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2013 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2014
2015 GST_OBJECT_LOCK (enc);
2016 /* add our latency */
2017 min_latency += enc->priv->ctx.min_latency;
2018 if (max_latency == -1 || enc->priv->ctx.max_latency == -1)
2019 max_latency = -1;
2020 else
2021 max_latency += enc->priv->ctx.max_latency;
2022 GST_OBJECT_UNLOCK (enc);
2023
2024 gst_query_set_latency (query, live, min_latency, max_latency);
2025 }
2026 break;
2027 }
2028 default:
2029 res = gst_pad_query_default (pad, GST_OBJECT (enc), query);
2030 break;
2031 }
2032
2033 return res;
2034 }
2035
2036 static gboolean
gst_audio_encoder_src_query(GstPad * pad,GstObject * parent,GstQuery * query)2037 gst_audio_encoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2038 {
2039 GstAudioEncoder *encoder;
2040 GstAudioEncoderClass *encoder_class;
2041 gboolean ret = FALSE;
2042
2043 encoder = GST_AUDIO_ENCODER (parent);
2044 encoder_class = GST_AUDIO_ENCODER_GET_CLASS (encoder);
2045
2046 GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
2047 GST_QUERY_TYPE_NAME (query));
2048
2049 if (encoder_class->src_query)
2050 ret = encoder_class->src_query (encoder, query);
2051
2052 return ret;
2053 }
2054
2055
2056 static void
gst_audio_encoder_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)2057 gst_audio_encoder_set_property (GObject * object, guint prop_id,
2058 const GValue * value, GParamSpec * pspec)
2059 {
2060 GstAudioEncoder *enc;
2061
2062 enc = GST_AUDIO_ENCODER (object);
2063
2064 switch (prop_id) {
2065 case PROP_PERFECT_TS:
2066 if (enc->priv->granule && !g_value_get_boolean (value))
2067 GST_WARNING_OBJECT (enc, "perfect-timestamp can not be set FALSE "
2068 "while granule handling is enabled");
2069 else
2070 enc->priv->perfect_ts = g_value_get_boolean (value);
2071 break;
2072 case PROP_HARD_RESYNC:
2073 enc->priv->hard_resync = g_value_get_boolean (value);
2074 break;
2075 case PROP_TOLERANCE:
2076 enc->priv->tolerance = g_value_get_int64 (value);
2077 break;
2078 default:
2079 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2080 break;
2081 }
2082 }
2083
2084 static void
gst_audio_encoder_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)2085 gst_audio_encoder_get_property (GObject * object, guint prop_id,
2086 GValue * value, GParamSpec * pspec)
2087 {
2088 GstAudioEncoder *enc;
2089
2090 enc = GST_AUDIO_ENCODER (object);
2091
2092 switch (prop_id) {
2093 case PROP_PERFECT_TS:
2094 g_value_set_boolean (value, enc->priv->perfect_ts);
2095 break;
2096 case PROP_GRANULE:
2097 g_value_set_boolean (value, enc->priv->granule);
2098 break;
2099 case PROP_HARD_RESYNC:
2100 g_value_set_boolean (value, enc->priv->hard_resync);
2101 break;
2102 case PROP_TOLERANCE:
2103 g_value_set_int64 (value, enc->priv->tolerance);
2104 break;
2105 default:
2106 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2107 break;
2108 }
2109 }
2110
2111 static gboolean
gst_audio_encoder_activate(GstAudioEncoder * enc,gboolean active)2112 gst_audio_encoder_activate (GstAudioEncoder * enc, gboolean active)
2113 {
2114 GstAudioEncoderClass *klass;
2115 gboolean result = TRUE;
2116
2117 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2118
2119 g_return_val_if_fail (!enc->priv->granule || enc->priv->perfect_ts, FALSE);
2120
2121 GST_DEBUG_OBJECT (enc, "activate %d", active);
2122
2123 if (active) {
2124 /* arrange clean state */
2125 gst_audio_encoder_reset (enc, TRUE);
2126
2127 if (!enc->priv->active && klass->start)
2128 result = klass->start (enc);
2129 } else {
2130 /* We must make sure streaming has finished before resetting things
2131 * and calling the ::stop vfunc */
2132 GST_PAD_STREAM_LOCK (enc->sinkpad);
2133 GST_PAD_STREAM_UNLOCK (enc->sinkpad);
2134
2135 if (enc->priv->active && klass->stop)
2136 result = klass->stop (enc);
2137
2138 /* clean up */
2139 gst_audio_encoder_reset (enc, TRUE);
2140 }
2141 GST_DEBUG_OBJECT (enc, "activate return: %d", result);
2142 return result;
2143 }
2144
2145
2146 static gboolean
gst_audio_encoder_sink_activate_mode(GstPad * pad,GstObject * parent,GstPadMode mode,gboolean active)2147 gst_audio_encoder_sink_activate_mode (GstPad * pad, GstObject * parent,
2148 GstPadMode mode, gboolean active)
2149 {
2150 gboolean result = TRUE;
2151 GstAudioEncoder *enc;
2152
2153 enc = GST_AUDIO_ENCODER (parent);
2154
2155 GST_DEBUG_OBJECT (enc, "sink activate push %d", active);
2156
2157 result = gst_audio_encoder_activate (enc, active);
2158
2159 if (result)
2160 enc->priv->active = active;
2161
2162 GST_DEBUG_OBJECT (enc, "sink activate push return: %d", result);
2163
2164 return result;
2165 }
2166
2167 /**
2168 * gst_audio_encoder_get_audio_info:
2169 * @enc: a #GstAudioEncoder
2170 *
2171 * Returns: (transfer none): a #GstAudioInfo describing the input audio format
2172 */
2173 GstAudioInfo *
gst_audio_encoder_get_audio_info(GstAudioEncoder * enc)2174 gst_audio_encoder_get_audio_info (GstAudioEncoder * enc)
2175 {
2176 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), NULL);
2177
2178 return &enc->priv->ctx.info;
2179 }
2180
2181 /**
2182 * gst_audio_encoder_set_frame_samples_min:
2183 * @enc: a #GstAudioEncoder
2184 * @num: number of samples per frame
2185 *
2186 * Sets number of samples (per channel) subclass needs to be handed,
2187 * at least or will be handed all available if 0.
2188 *
2189 * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_max()
2190 * must be called with the same number.
2191 *
2192 * Note: This value will be reset to 0 every time before
2193 * #GstAudioEncoderClass.set_format() is called.
2194 */
2195 void
gst_audio_encoder_set_frame_samples_min(GstAudioEncoder * enc,gint num)2196 gst_audio_encoder_set_frame_samples_min (GstAudioEncoder * enc, gint num)
2197 {
2198 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2199
2200 enc->priv->ctx.frame_samples_min = num;
2201 GST_LOG_OBJECT (enc, "set to %d", num);
2202 }
2203
2204 /**
2205 * gst_audio_encoder_get_frame_samples_min:
2206 * @enc: a #GstAudioEncoder
2207 *
2208 * Returns: currently minimum requested samples per frame
2209 */
2210 gint
gst_audio_encoder_get_frame_samples_min(GstAudioEncoder * enc)2211 gst_audio_encoder_get_frame_samples_min (GstAudioEncoder * enc)
2212 {
2213 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2214
2215 return enc->priv->ctx.frame_samples_min;
2216 }
2217
2218 /**
2219 * gst_audio_encoder_set_frame_samples_max:
2220 * @enc: a #GstAudioEncoder
2221 * @num: number of samples per frame
2222 *
2223 * Sets number of samples (per channel) subclass needs to be handed,
2224 * at most or will be handed all available if 0.
2225 *
2226 * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_min()
2227 * must be called with the same number.
2228 *
2229 * Note: This value will be reset to 0 every time before
2230 * #GstAudioEncoderClass.set_format() is called.
2231 */
2232 void
gst_audio_encoder_set_frame_samples_max(GstAudioEncoder * enc,gint num)2233 gst_audio_encoder_set_frame_samples_max (GstAudioEncoder * enc, gint num)
2234 {
2235 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2236
2237 enc->priv->ctx.frame_samples_max = num;
2238 GST_LOG_OBJECT (enc, "set to %d", num);
2239 }
2240
2241 /**
2242 * gst_audio_encoder_get_frame_samples_max:
2243 * @enc: a #GstAudioEncoder
2244 *
2245 * Returns: currently maximum requested samples per frame
2246 */
2247 gint
gst_audio_encoder_get_frame_samples_max(GstAudioEncoder * enc)2248 gst_audio_encoder_get_frame_samples_max (GstAudioEncoder * enc)
2249 {
2250 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2251
2252 return enc->priv->ctx.frame_samples_max;
2253 }
2254
2255 /**
2256 * gst_audio_encoder_set_frame_max:
2257 * @enc: a #GstAudioEncoder
2258 * @num: number of frames
2259 *
2260 * Sets max number of frames accepted at once (assumed minimally 1).
2261 * Requires @frame_samples_min and @frame_samples_max to be the equal.
2262 *
2263 * Note: This value will be reset to 0 every time before
2264 * #GstAudioEncoderClass.set_format() is called.
2265 */
2266 void
gst_audio_encoder_set_frame_max(GstAudioEncoder * enc,gint num)2267 gst_audio_encoder_set_frame_max (GstAudioEncoder * enc, gint num)
2268 {
2269 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2270
2271 enc->priv->ctx.frame_max = num;
2272 GST_LOG_OBJECT (enc, "set to %d", num);
2273 }
2274
2275 /**
2276 * gst_audio_encoder_get_frame_max:
2277 * @enc: a #GstAudioEncoder
2278 *
2279 * Returns: currently configured maximum handled frames
2280 */
2281 gint
gst_audio_encoder_get_frame_max(GstAudioEncoder * enc)2282 gst_audio_encoder_get_frame_max (GstAudioEncoder * enc)
2283 {
2284 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2285
2286 return enc->priv->ctx.frame_max;
2287 }
2288
2289 /**
2290 * gst_audio_encoder_set_lookahead:
2291 * @enc: a #GstAudioEncoder
2292 * @num: lookahead
2293 *
2294 * Sets encoder lookahead (in units of input rate samples)
2295 *
2296 * Note: This value will be reset to 0 every time before
2297 * #GstAudioEncoderClass.set_format() is called.
2298 */
2299 void
gst_audio_encoder_set_lookahead(GstAudioEncoder * enc,gint num)2300 gst_audio_encoder_set_lookahead (GstAudioEncoder * enc, gint num)
2301 {
2302 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2303
2304 enc->priv->ctx.lookahead = num;
2305 GST_LOG_OBJECT (enc, "set to %d", num);
2306 }
2307
2308 /**
2309 * gst_audio_encoder_get_lookahead:
2310 * @enc: a #GstAudioEncoder
2311 *
2312 * Returns: currently configured encoder lookahead
2313 */
2314 gint
gst_audio_encoder_get_lookahead(GstAudioEncoder * enc)2315 gst_audio_encoder_get_lookahead (GstAudioEncoder * enc)
2316 {
2317 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2318
2319 return enc->priv->ctx.lookahead;
2320 }
2321
2322 /**
2323 * gst_audio_encoder_set_latency:
2324 * @enc: a #GstAudioEncoder
2325 * @min: minimum latency
2326 * @max: maximum latency
2327 *
2328 * Sets encoder latency.
2329 */
2330 void
gst_audio_encoder_set_latency(GstAudioEncoder * enc,GstClockTime min,GstClockTime max)2331 gst_audio_encoder_set_latency (GstAudioEncoder * enc,
2332 GstClockTime min, GstClockTime max)
2333 {
2334 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2335 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min));
2336 g_return_if_fail (min <= max);
2337
2338 GST_OBJECT_LOCK (enc);
2339 enc->priv->ctx.min_latency = min;
2340 enc->priv->ctx.max_latency = max;
2341 GST_OBJECT_UNLOCK (enc);
2342
2343 GST_LOG_OBJECT (enc, "set to %" GST_TIME_FORMAT "-%" GST_TIME_FORMAT,
2344 GST_TIME_ARGS (min), GST_TIME_ARGS (max));
2345
2346 /* post latency message on the bus */
2347 gst_element_post_message (GST_ELEMENT (enc),
2348 gst_message_new_latency (GST_OBJECT (enc)));
2349 }
2350
2351 /**
2352 * gst_audio_encoder_get_latency:
2353 * @enc: a #GstAudioEncoder
2354 * @min: (out) (allow-none): a pointer to storage to hold minimum latency
2355 * @max: (out) (allow-none): a pointer to storage to hold maximum latency
2356 *
2357 * Sets the variables pointed to by @min and @max to the currently configured
2358 * latency.
2359 */
2360 void
gst_audio_encoder_get_latency(GstAudioEncoder * enc,GstClockTime * min,GstClockTime * max)2361 gst_audio_encoder_get_latency (GstAudioEncoder * enc,
2362 GstClockTime * min, GstClockTime * max)
2363 {
2364 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2365
2366 GST_OBJECT_LOCK (enc);
2367 if (min)
2368 *min = enc->priv->ctx.min_latency;
2369 if (max)
2370 *max = enc->priv->ctx.max_latency;
2371 GST_OBJECT_UNLOCK (enc);
2372 }
2373
2374 /**
2375 * gst_audio_encoder_set_headers:
2376 * @enc: a #GstAudioEncoder
2377 * @headers: (transfer full) (element-type Gst.Buffer): a list of
2378 * #GstBuffer containing the codec header
2379 *
2380 * Set the codec headers to be sent downstream whenever requested.
2381 */
2382 void
gst_audio_encoder_set_headers(GstAudioEncoder * enc,GList * headers)2383 gst_audio_encoder_set_headers (GstAudioEncoder * enc, GList * headers)
2384 {
2385 GST_DEBUG_OBJECT (enc, "new headers %p", headers);
2386
2387 if (enc->priv->ctx.headers) {
2388 g_list_foreach (enc->priv->ctx.headers, (GFunc) gst_buffer_unref, NULL);
2389 g_list_free (enc->priv->ctx.headers);
2390 }
2391 enc->priv->ctx.headers = headers;
2392 enc->priv->ctx.new_headers = TRUE;
2393 }
2394
2395 /**
2396 * gst_audio_encoder_set_allocation_caps:
2397 * @enc: a #GstAudioEncoder
2398 * @allocation_caps: (allow-none): a #GstCaps or %NULL
2399 *
2400 * Sets a caps in allocation query which are different from the set
2401 * pad's caps. Use this function before calling
2402 * gst_audio_encoder_negotiate(). Setting to %NULL the allocation
2403 * query will use the caps from the pad.
2404 *
2405 * Since: 1.10
2406 */
2407 void
gst_audio_encoder_set_allocation_caps(GstAudioEncoder * enc,GstCaps * allocation_caps)2408 gst_audio_encoder_set_allocation_caps (GstAudioEncoder * enc,
2409 GstCaps * allocation_caps)
2410 {
2411 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2412
2413 gst_caps_replace (&enc->priv->ctx.allocation_caps, allocation_caps);
2414 }
2415
2416 /**
2417 * gst_audio_encoder_set_mark_granule:
2418 * @enc: a #GstAudioEncoder
2419 * @enabled: new state
2420 *
2421 * Enable or disable encoder granule handling.
2422 *
2423 * MT safe.
2424 */
2425 void
gst_audio_encoder_set_mark_granule(GstAudioEncoder * enc,gboolean enabled)2426 gst_audio_encoder_set_mark_granule (GstAudioEncoder * enc, gboolean enabled)
2427 {
2428 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2429
2430 GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2431
2432 GST_OBJECT_LOCK (enc);
2433 enc->priv->granule = enabled;
2434 GST_OBJECT_UNLOCK (enc);
2435 }
2436
2437 /**
2438 * gst_audio_encoder_get_mark_granule:
2439 * @enc: a #GstAudioEncoder
2440 *
2441 * Queries if the encoder will handle granule marking.
2442 *
2443 * Returns: TRUE if granule marking is enabled.
2444 *
2445 * MT safe.
2446 */
2447 gboolean
gst_audio_encoder_get_mark_granule(GstAudioEncoder * enc)2448 gst_audio_encoder_get_mark_granule (GstAudioEncoder * enc)
2449 {
2450 gboolean result;
2451
2452 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2453
2454 GST_OBJECT_LOCK (enc);
2455 result = enc->priv->granule;
2456 GST_OBJECT_UNLOCK (enc);
2457
2458 return result;
2459 }
2460
2461 /**
2462 * gst_audio_encoder_set_perfect_timestamp:
2463 * @enc: a #GstAudioEncoder
2464 * @enabled: new state
2465 *
2466 * Enable or disable encoder perfect output timestamp preference.
2467 *
2468 * MT safe.
2469 */
2470 void
gst_audio_encoder_set_perfect_timestamp(GstAudioEncoder * enc,gboolean enabled)2471 gst_audio_encoder_set_perfect_timestamp (GstAudioEncoder * enc,
2472 gboolean enabled)
2473 {
2474 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2475
2476 GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2477
2478 GST_OBJECT_LOCK (enc);
2479 enc->priv->perfect_ts = enabled;
2480 GST_OBJECT_UNLOCK (enc);
2481 }
2482
2483 /**
2484 * gst_audio_encoder_get_perfect_timestamp:
2485 * @enc: a #GstAudioEncoder
2486 *
2487 * Queries encoder perfect timestamp behaviour.
2488 *
2489 * Returns: TRUE if perfect timestamp setting enabled.
2490 *
2491 * MT safe.
2492 */
2493 gboolean
gst_audio_encoder_get_perfect_timestamp(GstAudioEncoder * enc)2494 gst_audio_encoder_get_perfect_timestamp (GstAudioEncoder * enc)
2495 {
2496 gboolean result;
2497
2498 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2499
2500 GST_OBJECT_LOCK (enc);
2501 result = enc->priv->perfect_ts;
2502 GST_OBJECT_UNLOCK (enc);
2503
2504 return result;
2505 }
2506
2507 /**
2508 * gst_audio_encoder_set_hard_sync:
2509 * @enc: a #GstAudioEncoder
2510 * @enabled: new state
2511 *
2512 * Sets encoder hard resync handling.
2513 *
2514 * MT safe.
2515 */
2516 void
gst_audio_encoder_set_hard_resync(GstAudioEncoder * enc,gboolean enabled)2517 gst_audio_encoder_set_hard_resync (GstAudioEncoder * enc, gboolean enabled)
2518 {
2519 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2520
2521 GST_LOG_OBJECT (enc, "enabled: %d", enabled);
2522
2523 GST_OBJECT_LOCK (enc);
2524 enc->priv->hard_resync = enabled;
2525 GST_OBJECT_UNLOCK (enc);
2526 }
2527
2528 /**
2529 * gst_audio_encoder_get_hard_sync:
2530 * @enc: a #GstAudioEncoder
2531 *
2532 * Queries encoder's hard resync setting.
2533 *
2534 * Returns: TRUE if hard resync is enabled.
2535 *
2536 * MT safe.
2537 */
2538 gboolean
gst_audio_encoder_get_hard_resync(GstAudioEncoder * enc)2539 gst_audio_encoder_get_hard_resync (GstAudioEncoder * enc)
2540 {
2541 gboolean result;
2542
2543 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2544
2545 GST_OBJECT_LOCK (enc);
2546 result = enc->priv->hard_resync;
2547 GST_OBJECT_UNLOCK (enc);
2548
2549 return result;
2550 }
2551
2552 /**
2553 * gst_audio_encoder_set_tolerance:
2554 * @enc: a #GstAudioEncoder
2555 * @tolerance: new tolerance
2556 *
2557 * Configures encoder audio jitter tolerance threshold.
2558 *
2559 * MT safe.
2560 */
2561 void
gst_audio_encoder_set_tolerance(GstAudioEncoder * enc,GstClockTime tolerance)2562 gst_audio_encoder_set_tolerance (GstAudioEncoder * enc, GstClockTime tolerance)
2563 {
2564 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2565 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (tolerance));
2566
2567 GST_OBJECT_LOCK (enc);
2568 enc->priv->tolerance = tolerance;
2569 GST_OBJECT_UNLOCK (enc);
2570
2571 GST_LOG_OBJECT (enc, "set to %" GST_TIME_FORMAT, GST_TIME_ARGS (tolerance));
2572 }
2573
2574 /**
2575 * gst_audio_encoder_get_tolerance:
2576 * @enc: a #GstAudioEncoder
2577 *
2578 * Queries current audio jitter tolerance threshold.
2579 *
2580 * Returns: encoder audio jitter tolerance threshold.
2581 *
2582 * MT safe.
2583 */
2584 GstClockTime
gst_audio_encoder_get_tolerance(GstAudioEncoder * enc)2585 gst_audio_encoder_get_tolerance (GstAudioEncoder * enc)
2586 {
2587 GstClockTime result;
2588
2589 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2590
2591 GST_OBJECT_LOCK (enc);
2592 result = enc->priv->tolerance;
2593 GST_OBJECT_UNLOCK (enc);
2594
2595 return result;
2596 }
2597
2598 /**
2599 * gst_audio_encoder_set_hard_min:
2600 * @enc: a #GstAudioEncoder
2601 * @enabled: new state
2602 *
2603 * Configures encoder hard minimum handling. If enabled, subclass
2604 * will never be handed less samples than it configured, which otherwise
2605 * might occur near end-of-data handling. Instead, the leftover samples
2606 * will simply be discarded.
2607 *
2608 * MT safe.
2609 */
2610 void
gst_audio_encoder_set_hard_min(GstAudioEncoder * enc,gboolean enabled)2611 gst_audio_encoder_set_hard_min (GstAudioEncoder * enc, gboolean enabled)
2612 {
2613 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2614
2615 GST_OBJECT_LOCK (enc);
2616 enc->priv->hard_min = enabled;
2617 GST_OBJECT_UNLOCK (enc);
2618 }
2619
2620 /**
2621 * gst_audio_encoder_get_hard_min:
2622 * @enc: a #GstAudioEncoder
2623 *
2624 * Queries encoder hard minimum handling.
2625 *
2626 * Returns: TRUE if hard minimum handling is enabled.
2627 *
2628 * MT safe.
2629 */
2630 gboolean
gst_audio_encoder_get_hard_min(GstAudioEncoder * enc)2631 gst_audio_encoder_get_hard_min (GstAudioEncoder * enc)
2632 {
2633 gboolean result;
2634
2635 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2636
2637 GST_OBJECT_LOCK (enc);
2638 result = enc->priv->hard_min;
2639 GST_OBJECT_UNLOCK (enc);
2640
2641 return result;
2642 }
2643
2644 /**
2645 * gst_audio_encoder_set_drainable:
2646 * @enc: a #GstAudioEncoder
2647 * @enabled: new state
2648 *
2649 * Configures encoder drain handling. If drainable, subclass might
2650 * be handed a NULL buffer to have it return any leftover encoded data.
2651 * Otherwise, it is not considered so capable and will only ever be passed
2652 * real data.
2653 *
2654 * MT safe.
2655 */
2656 void
gst_audio_encoder_set_drainable(GstAudioEncoder * enc,gboolean enabled)2657 gst_audio_encoder_set_drainable (GstAudioEncoder * enc, gboolean enabled)
2658 {
2659 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2660
2661 GST_OBJECT_LOCK (enc);
2662 enc->priv->drainable = enabled;
2663 GST_OBJECT_UNLOCK (enc);
2664 }
2665
2666 /**
2667 * gst_audio_encoder_get_drainable:
2668 * @enc: a #GstAudioEncoder
2669 *
2670 * Queries encoder drain handling.
2671 *
2672 * Returns: TRUE if drainable handling is enabled.
2673 *
2674 * MT safe.
2675 */
2676 gboolean
gst_audio_encoder_get_drainable(GstAudioEncoder * enc)2677 gst_audio_encoder_get_drainable (GstAudioEncoder * enc)
2678 {
2679 gboolean result;
2680
2681 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
2682
2683 GST_OBJECT_LOCK (enc);
2684 result = enc->priv->drainable;
2685 GST_OBJECT_UNLOCK (enc);
2686
2687 return result;
2688 }
2689
2690 /**
2691 * gst_audio_encoder_merge_tags:
2692 * @enc: a #GstAudioEncoder
2693 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
2694 * previously-set tags
2695 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
2696 *
2697 * Sets the audio encoder tags and how they should be merged with any
2698 * upstream stream tags. This will override any tags previously-set
2699 * with gst_audio_encoder_merge_tags().
2700 *
2701 * Note that this is provided for convenience, and the subclass is
2702 * not required to use this and can still do tag handling on its own.
2703 *
2704 * MT safe.
2705 */
2706 void
gst_audio_encoder_merge_tags(GstAudioEncoder * enc,const GstTagList * tags,GstTagMergeMode mode)2707 gst_audio_encoder_merge_tags (GstAudioEncoder * enc,
2708 const GstTagList * tags, GstTagMergeMode mode)
2709 {
2710 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2711 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
2712 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
2713
2714 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2715 if (enc->priv->tags != tags) {
2716 if (enc->priv->tags) {
2717 gst_tag_list_unref (enc->priv->tags);
2718 enc->priv->tags = NULL;
2719 enc->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2720 }
2721 if (tags) {
2722 enc->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
2723 enc->priv->tags_merge_mode = mode;
2724 }
2725
2726 GST_DEBUG_OBJECT (enc, "setting encoder tags to %" GST_PTR_FORMAT, tags);
2727 enc->priv->tags_changed = TRUE;
2728 }
2729 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2730 }
2731
2732 static gboolean
gst_audio_encoder_negotiate_default(GstAudioEncoder * enc)2733 gst_audio_encoder_negotiate_default (GstAudioEncoder * enc)
2734 {
2735 GstAudioEncoderClass *klass;
2736 gboolean res = TRUE;
2737 GstQuery *query = NULL;
2738 GstAllocator *allocator;
2739 GstAllocationParams params;
2740 GstCaps *caps, *prevcaps;
2741
2742 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2743 g_return_val_if_fail (GST_IS_CAPS (enc->priv->ctx.caps), FALSE);
2744
2745 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2746
2747 caps = enc->priv->ctx.caps;
2748 if (enc->priv->ctx.allocation_caps == NULL)
2749 enc->priv->ctx.allocation_caps = gst_caps_ref (caps);
2750
2751 GST_DEBUG_OBJECT (enc, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
2752
2753 if (enc->priv->pending_events) {
2754 GList **pending_events, *l;
2755
2756 pending_events = &enc->priv->pending_events;
2757
2758 GST_DEBUG_OBJECT (enc, "Pushing pending events");
2759 for (l = *pending_events; l;) {
2760 GstEvent *event = GST_EVENT (l->data);
2761 GList *tmp;
2762
2763 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
2764 gst_audio_encoder_push_event (enc, l->data);
2765 tmp = l;
2766 l = l->next;
2767 *pending_events = g_list_delete_link (*pending_events, tmp);
2768 } else {
2769 l = l->next;
2770 }
2771 }
2772 }
2773
2774 prevcaps = gst_pad_get_current_caps (enc->srcpad);
2775 if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
2776 res = gst_pad_set_caps (enc->srcpad, caps);
2777 if (prevcaps)
2778 gst_caps_unref (prevcaps);
2779
2780 if (!res)
2781 goto done;
2782 enc->priv->ctx.output_caps_changed = FALSE;
2783
2784 query = gst_query_new_allocation (enc->priv->ctx.allocation_caps, TRUE);
2785 if (!gst_pad_peer_query (enc->srcpad, query)) {
2786 GST_DEBUG_OBJECT (enc, "didn't get downstream ALLOCATION hints");
2787 }
2788
2789 g_assert (klass->decide_allocation != NULL);
2790 res = klass->decide_allocation (enc, query);
2791
2792 GST_DEBUG_OBJECT (enc, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
2793 query);
2794
2795 if (!res)
2796 goto no_decide_allocation;
2797
2798 /* we got configuration from our peer or the decide_allocation method,
2799 * parse them */
2800 if (gst_query_get_n_allocation_params (query) > 0) {
2801 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
2802 } else {
2803 allocator = NULL;
2804 gst_allocation_params_init (¶ms);
2805 }
2806
2807 if (enc->priv->ctx.allocator)
2808 gst_object_unref (enc->priv->ctx.allocator);
2809 enc->priv->ctx.allocator = allocator;
2810 enc->priv->ctx.params = params;
2811
2812 done:
2813 if (query)
2814 gst_query_unref (query);
2815
2816 return res;
2817
2818 /* ERRORS */
2819 no_decide_allocation:
2820 {
2821 GST_WARNING_OBJECT (enc, "Subclass failed to decide allocation");
2822 goto done;
2823 }
2824 }
2825
2826 static gboolean
gst_audio_encoder_negotiate_unlocked(GstAudioEncoder * enc)2827 gst_audio_encoder_negotiate_unlocked (GstAudioEncoder * enc)
2828 {
2829 GstAudioEncoderClass *klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2830 gboolean ret = TRUE;
2831
2832 if (G_LIKELY (klass->negotiate))
2833 ret = klass->negotiate (enc);
2834
2835 return ret;
2836 }
2837
2838 /**
2839 * gst_audio_encoder_negotiate:
2840 * @enc: a #GstAudioEncoder
2841 *
2842 * Negotiate with downstream elements to currently configured #GstCaps.
2843 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
2844 * negotiate fails.
2845 *
2846 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
2847 */
2848 gboolean
gst_audio_encoder_negotiate(GstAudioEncoder * enc)2849 gst_audio_encoder_negotiate (GstAudioEncoder * enc)
2850 {
2851 GstAudioEncoderClass *klass;
2852 gboolean ret = TRUE;
2853
2854 g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE);
2855
2856 klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
2857
2858 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2859 gst_pad_check_reconfigure (enc->srcpad);
2860 if (klass->negotiate) {
2861 ret = klass->negotiate (enc);
2862 if (!ret)
2863 gst_pad_mark_reconfigure (enc->srcpad);
2864 }
2865 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2866
2867 return ret;
2868 }
2869
2870 /**
2871 * gst_audio_encoder_set_output_format:
2872 * @enc: a #GstAudioEncoder
2873 * @caps: (transfer none): #GstCaps
2874 *
2875 * Configure output caps on the srcpad of @enc.
2876 *
2877 * Returns: %TRUE on success.
2878 */
2879 gboolean
gst_audio_encoder_set_output_format(GstAudioEncoder * enc,GstCaps * caps)2880 gst_audio_encoder_set_output_format (GstAudioEncoder * enc, GstCaps * caps)
2881 {
2882 gboolean res = TRUE;
2883 GstCaps *templ_caps;
2884
2885 GST_DEBUG_OBJECT (enc, "Setting srcpad caps %" GST_PTR_FORMAT, caps);
2886
2887 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2888 if (!gst_caps_is_fixed (caps))
2889 goto refuse_caps;
2890
2891 /* Only allow caps that are a subset of the template caps */
2892 templ_caps = gst_pad_get_pad_template_caps (enc->srcpad);
2893 if (!gst_caps_is_subset (caps, templ_caps)) {
2894 gst_caps_unref (templ_caps);
2895 goto refuse_caps;
2896 }
2897 gst_caps_unref (templ_caps);
2898
2899 gst_caps_replace (&enc->priv->ctx.caps, caps);
2900 enc->priv->ctx.output_caps_changed = TRUE;
2901
2902 done:
2903 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2904
2905 return res;
2906
2907 /* ERRORS */
2908 refuse_caps:
2909 {
2910 GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps);
2911 res = FALSE;
2912 goto done;
2913 }
2914 }
2915
2916 /**
2917 * gst_audio_encoder_allocate_output_buffer:
2918 * @enc: a #GstAudioEncoder
2919 * @size: size of the buffer
2920 *
2921 * Helper function that allocates a buffer to hold an encoded audio frame
2922 * for @enc's current output format.
2923 *
2924 * Returns: (transfer full): allocated buffer
2925 */
2926 GstBuffer *
gst_audio_encoder_allocate_output_buffer(GstAudioEncoder * enc,gsize size)2927 gst_audio_encoder_allocate_output_buffer (GstAudioEncoder * enc, gsize size)
2928 {
2929 GstBuffer *buffer = NULL;
2930 gboolean needs_reconfigure = FALSE;
2931
2932 g_return_val_if_fail (size > 0, NULL);
2933
2934 GST_DEBUG ("alloc src buffer");
2935
2936 GST_AUDIO_ENCODER_STREAM_LOCK (enc);
2937
2938 needs_reconfigure = gst_pad_check_reconfigure (enc->srcpad);
2939 if (G_UNLIKELY (enc->priv->ctx.output_caps_changed || (enc->priv->ctx.caps
2940 && needs_reconfigure))) {
2941 if (!gst_audio_encoder_negotiate_unlocked (enc)) {
2942 GST_INFO_OBJECT (enc, "Failed to negotiate, fallback allocation");
2943 gst_pad_mark_reconfigure (enc->srcpad);
2944 goto fallback;
2945 }
2946 }
2947
2948 buffer =
2949 gst_buffer_new_allocate (enc->priv->ctx.allocator, size,
2950 &enc->priv->ctx.params);
2951 if (!buffer) {
2952 GST_INFO_OBJECT (enc, "couldn't allocate output buffer");
2953 goto fallback;
2954 }
2955
2956 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2957
2958 return buffer;
2959
2960 fallback:
2961 buffer = gst_buffer_new_allocate (NULL, size, NULL);
2962 GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
2963
2964 return buffer;
2965 }
2966
2967 /**
2968 * gst_audio_encoder_get_allocator:
2969 * @enc: a #GstAudioEncoder
2970 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
2971 * used
2972 * @params: (out) (allow-none) (transfer full): the
2973 * #GstAllocationParams of @allocator
2974 *
2975 * Lets #GstAudioEncoder sub-classes to know the memory @allocator
2976 * used by the base class and its @params.
2977 *
2978 * Unref the @allocator after use it.
2979 */
2980 void
gst_audio_encoder_get_allocator(GstAudioEncoder * enc,GstAllocator ** allocator,GstAllocationParams * params)2981 gst_audio_encoder_get_allocator (GstAudioEncoder * enc,
2982 GstAllocator ** allocator, GstAllocationParams * params)
2983 {
2984 g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
2985
2986 if (allocator)
2987 *allocator = enc->priv->ctx.allocator ?
2988 gst_object_ref (enc->priv->ctx.allocator) : NULL;
2989
2990 if (params)
2991 *params = enc->priv->ctx.params;
2992 }
2993