1 /* GStreamer
2 * Copyright (C) <2006> Philippe Khalaf <philippe.kalaf@collabora.co.uk>
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
17 * Boston, MA 02110-1301, USA.
18 */
19
20 /**
21 * SECTION:gstrtpbaseaudiopayload
22 * @title: GstRTPBaseAudioPayload
23 * @short_description: Base class for audio RTP payloader
24 *
25 * Provides a base class for audio RTP payloaders for frame or sample based
26 * audio codecs (constant bitrate)
27 *
28 * This class derives from GstRTPBasePayload. It can be used for payloading
29 * audio codecs. It will only work with constant bitrate codecs. It supports
30 * both frame based and sample based codecs. It takes care of packing up the
31 * audio data into RTP packets and filling up the headers accordingly. The
32 * payloading is done based on the maximum MTU (mtu) and the maximum time per
33 * packet (max-ptime). The general idea is to divide large data buffers into
34 * smaller RTP packets. The RTP packet size is the minimum of either the MTU,
35 * max-ptime (if set) or available data. The RTP packet size is always larger or
36 * equal to min-ptime (if set). If min-ptime is not set, any residual data is
37 * sent in a last RTP packet. In the case of frame based codecs, the resulting
38 * RTP packets always contain full frames.
39 *
40 * ## Usage
41 *
42 * To use this base class, your child element needs to call either
43 * gst_rtp_base_audio_payload_set_frame_based() or
44 * gst_rtp_base_audio_payload_set_sample_based(). This is usually done in the
45 * element's `_init()` function. Then, the child element must call either
46 * gst_rtp_base_audio_payload_set_frame_options(),
47 * gst_rtp_base_audio_payload_set_sample_options() or
48 * gst_rtp_base_audio_payload_set_samplebits_options. Since
49 * GstRTPBaseAudioPayload derives from GstRTPBasePayload, the child element
50 * must set any variables or call/override any functions required by that base
51 * class. The child element does not need to override any other functions
52 * specific to GstRTPBaseAudioPayload.
53 *
54 */
55
56 #ifdef HAVE_CONFIG_H
57 #include "config.h"
58 #endif
59
60 #include <stdlib.h>
61 #include <string.h>
62 #include <gst/rtp/gstrtpbuffer.h>
63 #include <gst/base/gstadapter.h>
64 #include <gst/audio/audio.h>
65
66 #include "gstrtpbaseaudiopayload.h"
67
68 GST_DEBUG_CATEGORY_STATIC (rtpbaseaudiopayload_debug);
69 #define GST_CAT_DEFAULT (rtpbaseaudiopayload_debug)
70
71 #define DEFAULT_BUFFER_LIST FALSE
72
73 enum
74 {
75 PROP_0,
76 PROP_BUFFER_LIST,
77 PROP_LAST
78 };
79
80 /* function to convert bytes to a time */
81 typedef GstClockTime (*GetBytesToTimeFunc) (GstRTPBaseAudioPayload * payload,
82 guint64 bytes);
83 /* function to convert bytes to a RTP time */
84 typedef guint32 (*GetBytesToRTPTimeFunc) (GstRTPBaseAudioPayload * payload,
85 guint64 bytes);
86 /* function to convert time to bytes */
87 typedef guint64 (*GetTimeToBytesFunc) (GstRTPBaseAudioPayload * payload,
88 GstClockTime time);
89
90 struct _GstRTPBaseAudioPayloadPrivate
91 {
92 GetBytesToTimeFunc bytes_to_time;
93 GetBytesToRTPTimeFunc bytes_to_rtptime;
94 GetTimeToBytesFunc time_to_bytes;
95
96 GstAdapter *adapter;
97 guint fragment_size;
98 GstClockTime frame_duration_ns;
99 gboolean discont;
100 guint64 offset;
101 GstClockTime last_timestamp;
102 guint32 last_rtptime;
103 guint align;
104
105 guint cached_mtu;
106 guint cached_min_ptime;
107 guint cached_max_ptime;
108 guint cached_ptime;
109 guint cached_min_length;
110 guint cached_max_length;
111 guint cached_ptime_multiple;
112 guint cached_align;
113 guint cached_csrc_count;
114
115 gboolean buffer_list;
116 };
117
118 static void gst_rtp_base_audio_payload_finalize (GObject * object);
119
120 static void gst_rtp_base_audio_payload_set_property (GObject * object,
121 guint prop_id, const GValue * value, GParamSpec * pspec);
122 static void gst_rtp_base_audio_payload_get_property (GObject * object,
123 guint prop_id, GValue * value, GParamSpec * pspec);
124
125 /* bytes to time functions */
126 static GstClockTime
127 gst_rtp_base_audio_payload_frame_bytes_to_time (GstRTPBaseAudioPayload *
128 payload, guint64 bytes);
129 static GstClockTime
130 gst_rtp_base_audio_payload_sample_bytes_to_time (GstRTPBaseAudioPayload *
131 payload, guint64 bytes);
132
133 /* bytes to RTP time functions */
134 static guint32
135 gst_rtp_base_audio_payload_frame_bytes_to_rtptime (GstRTPBaseAudioPayload *
136 payload, guint64 bytes);
137 static guint32
138 gst_rtp_base_audio_payload_sample_bytes_to_rtptime (GstRTPBaseAudioPayload *
139 payload, guint64 bytes);
140
141 /* time to bytes functions */
142 static guint64
143 gst_rtp_base_audio_payload_frame_time_to_bytes (GstRTPBaseAudioPayload *
144 payload, GstClockTime time);
145 static guint64
146 gst_rtp_base_audio_payload_sample_time_to_bytes (GstRTPBaseAudioPayload *
147 payload, GstClockTime time);
148
149 static GstFlowReturn gst_rtp_base_audio_payload_handle_buffer (GstRTPBasePayload
150 * payload, GstBuffer * buffer);
151 static GstStateChangeReturn gst_rtp_base_payload_audio_change_state (GstElement
152 * element, GstStateChange transition);
153 static gboolean gst_rtp_base_payload_audio_sink_event (GstRTPBasePayload
154 * payload, GstEvent * event);
155
156 /* cached quark to avoid contention on the global quark table lock */
157 #define META_TAG_AUDIO meta_tag_audio_quark
158 static GQuark meta_tag_audio_quark;
159
160 #define gst_rtp_base_audio_payload_parent_class parent_class
161 G_DEFINE_TYPE_WITH_PRIVATE (GstRTPBaseAudioPayload, gst_rtp_base_audio_payload,
162 GST_TYPE_RTP_BASE_PAYLOAD);
163
164 static void
gst_rtp_base_audio_payload_class_init(GstRTPBaseAudioPayloadClass * klass)165 gst_rtp_base_audio_payload_class_init (GstRTPBaseAudioPayloadClass * klass)
166 {
167 GObjectClass *gobject_class;
168 GstElementClass *gstelement_class;
169 GstRTPBasePayloadClass *gstrtpbasepayload_class;
170
171 meta_tag_audio_quark = g_quark_from_static_string (GST_META_TAG_AUDIO_STR);
172
173 gobject_class = (GObjectClass *) klass;
174 gstelement_class = (GstElementClass *) klass;
175 gstrtpbasepayload_class = (GstRTPBasePayloadClass *) klass;
176
177 gobject_class->finalize = gst_rtp_base_audio_payload_finalize;
178 gobject_class->set_property = gst_rtp_base_audio_payload_set_property;
179 gobject_class->get_property = gst_rtp_base_audio_payload_get_property;
180
181 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BUFFER_LIST,
182 g_param_spec_boolean ("buffer-list", "Buffer List",
183 "Use Buffer Lists",
184 DEFAULT_BUFFER_LIST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
185
186 gstelement_class->change_state =
187 GST_DEBUG_FUNCPTR (gst_rtp_base_payload_audio_change_state);
188
189 gstrtpbasepayload_class->handle_buffer =
190 GST_DEBUG_FUNCPTR (gst_rtp_base_audio_payload_handle_buffer);
191 gstrtpbasepayload_class->sink_event =
192 GST_DEBUG_FUNCPTR (gst_rtp_base_payload_audio_sink_event);
193
194 GST_DEBUG_CATEGORY_INIT (rtpbaseaudiopayload_debug, "rtpbaseaudiopayload", 0,
195 "base audio RTP payloader");
196 }
197
198 static void
gst_rtp_base_audio_payload_init(GstRTPBaseAudioPayload * payload)199 gst_rtp_base_audio_payload_init (GstRTPBaseAudioPayload * payload)
200 {
201 payload->priv = gst_rtp_base_audio_payload_get_instance_private (payload);
202
203 /* these need to be set by child object if frame based */
204 payload->frame_size = 0;
205 payload->frame_duration = 0;
206
207 /* these need to be set by child object if sample based */
208 payload->sample_size = 0;
209
210 payload->priv->adapter = gst_adapter_new ();
211
212 payload->priv->buffer_list = DEFAULT_BUFFER_LIST;
213 }
214
215 static void
gst_rtp_base_audio_payload_finalize(GObject * object)216 gst_rtp_base_audio_payload_finalize (GObject * object)
217 {
218 GstRTPBaseAudioPayload *payload;
219
220 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
221
222 g_object_unref (payload->priv->adapter);
223
224 GST_CALL_PARENT (G_OBJECT_CLASS, finalize, (object));
225 }
226
227 static void
gst_rtp_base_audio_payload_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)228 gst_rtp_base_audio_payload_set_property (GObject * object,
229 guint prop_id, const GValue * value, GParamSpec * pspec)
230 {
231 GstRTPBaseAudioPayload *payload;
232
233 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
234
235 switch (prop_id) {
236 case PROP_BUFFER_LIST:
237 #if 0
238 payload->priv->buffer_list = g_value_get_boolean (value);
239 #endif
240 payload->priv->buffer_list = FALSE;
241 break;
242 default:
243 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
244 break;
245 }
246 }
247
248 static void
gst_rtp_base_audio_payload_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)249 gst_rtp_base_audio_payload_get_property (GObject * object,
250 guint prop_id, GValue * value, GParamSpec * pspec)
251 {
252 GstRTPBaseAudioPayload *payload;
253
254 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
255
256 switch (prop_id) {
257 case PROP_BUFFER_LIST:
258 g_value_set_boolean (value, payload->priv->buffer_list);
259 break;
260 default:
261 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
262 break;
263 }
264 }
265
266 /**
267 * gst_rtp_base_audio_payload_set_frame_based:
268 * @rtpbaseaudiopayload: a pointer to the element.
269 *
270 * Tells #GstRTPBaseAudioPayload that the child element is for a frame based
271 * audio codec
272 */
273 void
gst_rtp_base_audio_payload_set_frame_based(GstRTPBaseAudioPayload * rtpbaseaudiopayload)274 gst_rtp_base_audio_payload_set_frame_based (GstRTPBaseAudioPayload *
275 rtpbaseaudiopayload)
276 {
277 g_return_if_fail (rtpbaseaudiopayload != NULL);
278 g_return_if_fail (rtpbaseaudiopayload->priv->time_to_bytes == NULL);
279 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_time == NULL);
280 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_rtptime == NULL);
281
282 rtpbaseaudiopayload->priv->bytes_to_time =
283 gst_rtp_base_audio_payload_frame_bytes_to_time;
284 rtpbaseaudiopayload->priv->bytes_to_rtptime =
285 gst_rtp_base_audio_payload_frame_bytes_to_rtptime;
286 rtpbaseaudiopayload->priv->time_to_bytes =
287 gst_rtp_base_audio_payload_frame_time_to_bytes;
288 }
289
290 /**
291 * gst_rtp_base_audio_payload_set_sample_based:
292 * @rtpbaseaudiopayload: a pointer to the element.
293 *
294 * Tells #GstRTPBaseAudioPayload that the child element is for a sample based
295 * audio codec
296 */
297 void
gst_rtp_base_audio_payload_set_sample_based(GstRTPBaseAudioPayload * rtpbaseaudiopayload)298 gst_rtp_base_audio_payload_set_sample_based (GstRTPBaseAudioPayload *
299 rtpbaseaudiopayload)
300 {
301 g_return_if_fail (rtpbaseaudiopayload != NULL);
302 g_return_if_fail (rtpbaseaudiopayload->priv->time_to_bytes == NULL);
303 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_time == NULL);
304 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_rtptime == NULL);
305
306 rtpbaseaudiopayload->priv->bytes_to_time =
307 gst_rtp_base_audio_payload_sample_bytes_to_time;
308 rtpbaseaudiopayload->priv->bytes_to_rtptime =
309 gst_rtp_base_audio_payload_sample_bytes_to_rtptime;
310 rtpbaseaudiopayload->priv->time_to_bytes =
311 gst_rtp_base_audio_payload_sample_time_to_bytes;
312 }
313
314 /**
315 * gst_rtp_base_audio_payload_set_frame_options:
316 * @rtpbaseaudiopayload: a pointer to the element.
317 * @frame_duration: The duraction of an audio frame in milliseconds.
318 * @frame_size: The size of an audio frame in bytes.
319 *
320 * Sets the options for frame based audio codecs.
321 *
322 */
323 void
gst_rtp_base_audio_payload_set_frame_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint frame_duration,gint frame_size)324 gst_rtp_base_audio_payload_set_frame_options (GstRTPBaseAudioPayload
325 * rtpbaseaudiopayload, gint frame_duration, gint frame_size)
326 {
327 GstRTPBaseAudioPayloadPrivate *priv;
328
329 g_return_if_fail (rtpbaseaudiopayload != NULL);
330
331 priv = rtpbaseaudiopayload->priv;
332
333 rtpbaseaudiopayload->frame_duration = frame_duration;
334 priv->frame_duration_ns = frame_duration * GST_MSECOND;
335 rtpbaseaudiopayload->frame_size = frame_size;
336 priv->align = frame_size;
337
338 gst_adapter_clear (priv->adapter);
339
340 GST_DEBUG_OBJECT (rtpbaseaudiopayload, "frame set to %d ms and size %d",
341 frame_duration, frame_size);
342 }
343
344 /**
345 * gst_rtp_base_audio_payload_set_sample_options:
346 * @rtpbaseaudiopayload: a pointer to the element.
347 * @sample_size: Size per sample in bytes.
348 *
349 * Sets the options for sample based audio codecs.
350 */
351 void
gst_rtp_base_audio_payload_set_sample_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint sample_size)352 gst_rtp_base_audio_payload_set_sample_options (GstRTPBaseAudioPayload
353 * rtpbaseaudiopayload, gint sample_size)
354 {
355 g_return_if_fail (rtpbaseaudiopayload != NULL);
356
357 /* sample_size is in bits internally */
358 gst_rtp_base_audio_payload_set_samplebits_options (rtpbaseaudiopayload,
359 sample_size * 8);
360 }
361
362 /**
363 * gst_rtp_base_audio_payload_set_samplebits_options:
364 * @rtpbaseaudiopayload: a pointer to the element.
365 * @sample_size: Size per sample in bits.
366 *
367 * Sets the options for sample based audio codecs.
368 */
369 void
gst_rtp_base_audio_payload_set_samplebits_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint sample_size)370 gst_rtp_base_audio_payload_set_samplebits_options (GstRTPBaseAudioPayload
371 * rtpbaseaudiopayload, gint sample_size)
372 {
373 guint fragment_size;
374 GstRTPBaseAudioPayloadPrivate *priv;
375
376 g_return_if_fail (rtpbaseaudiopayload != NULL);
377
378 priv = rtpbaseaudiopayload->priv;
379
380 rtpbaseaudiopayload->sample_size = sample_size;
381
382 /* sample_size is in bits and is converted into multiple bytes */
383 fragment_size = sample_size;
384 while ((fragment_size % 8) != 0)
385 fragment_size += fragment_size;
386 priv->fragment_size = fragment_size / 8;
387 priv->align = priv->fragment_size;
388
389 gst_adapter_clear (priv->adapter);
390
391 GST_DEBUG_OBJECT (rtpbaseaudiopayload,
392 "Samplebits set to sample size %d bits", sample_size);
393 }
394
395 static void
gst_rtp_base_audio_payload_set_meta(GstRTPBaseAudioPayload * payload,GstBuffer * buffer,guint payload_len,GstClockTime timestamp)396 gst_rtp_base_audio_payload_set_meta (GstRTPBaseAudioPayload * payload,
397 GstBuffer * buffer, guint payload_len, GstClockTime timestamp)
398 {
399 GstRTPBasePayload *basepayload;
400 GstRTPBaseAudioPayloadPrivate *priv;
401 GstRTPBuffer rtp = { NULL };
402
403 basepayload = GST_RTP_BASE_PAYLOAD_CAST (payload);
404 priv = payload->priv;
405
406 /* set payload type */
407 gst_rtp_buffer_map (buffer, GST_MAP_WRITE, &rtp);
408 gst_rtp_buffer_set_payload_type (&rtp, basepayload->pt);
409 /* set marker bit for disconts */
410 if (priv->discont) {
411 GST_DEBUG_OBJECT (payload, "Setting marker and DISCONT");
412 gst_rtp_buffer_set_marker (&rtp, TRUE);
413 GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
414 priv->discont = FALSE;
415 }
416 gst_rtp_buffer_unmap (&rtp);
417
418 GST_BUFFER_PTS (buffer) = timestamp;
419
420 /* get the offset in RTP time */
421 GST_BUFFER_OFFSET (buffer) = priv->bytes_to_rtptime (payload, priv->offset);
422
423 priv->offset += payload_len;
424
425 /* Set the duration from the size */
426 GST_BUFFER_DURATION (buffer) = priv->bytes_to_time (payload, payload_len);
427
428 /* remember the last rtptime/timestamp pair. We will use this to realign our
429 * RTP timestamp after a buffer discont */
430 priv->last_rtptime = GST_BUFFER_OFFSET (buffer);
431 priv->last_timestamp = timestamp;
432 }
433
434 /**
435 * gst_rtp_base_audio_payload_push:
436 * @baseaudiopayload: a #GstRTPBasePayload
437 * @data: (array length=payload_len): data to set as payload
438 * @payload_len: length of payload
439 * @timestamp: a #GstClockTime
440 *
441 * Create an RTP buffer and store @payload_len bytes of @data as the
442 * payload. Set the timestamp on the new buffer to @timestamp before pushing
443 * the buffer downstream.
444 *
445 * Returns: a #GstFlowReturn
446 */
447 GstFlowReturn
gst_rtp_base_audio_payload_push(GstRTPBaseAudioPayload * baseaudiopayload,const guint8 * data,guint payload_len,GstClockTime timestamp)448 gst_rtp_base_audio_payload_push (GstRTPBaseAudioPayload * baseaudiopayload,
449 const guint8 * data, guint payload_len, GstClockTime timestamp)
450 {
451 GstRTPBasePayload *basepayload;
452 GstBuffer *outbuf;
453 guint8 *payload;
454 GstFlowReturn ret;
455 GstRTPBuffer rtp = { NULL };
456
457 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
458
459 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
460 payload_len, GST_TIME_ARGS (timestamp));
461
462 /* create buffer to hold the payload */
463 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload,
464 payload_len, 0, 0);
465
466 /* copy payload */
467 gst_rtp_buffer_map (outbuf, GST_MAP_WRITE, &rtp);
468 payload = gst_rtp_buffer_get_payload (&rtp);
469 memcpy (payload, data, payload_len);
470 gst_rtp_buffer_unmap (&rtp);
471
472 /* set metadata */
473 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
474 timestamp);
475
476 ret = gst_rtp_base_payload_push (basepayload, outbuf);
477
478 return ret;
479 }
480
481 typedef struct
482 {
483 GstRTPBaseAudioPayload *pay;
484 GstBuffer *outbuf;
485 } CopyMetaData;
486
487 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)488 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
489 {
490 CopyMetaData *data = user_data;
491 GstRTPBaseAudioPayload *pay = data->pay;
492 GstBuffer *outbuf = data->outbuf;
493 const GstMetaInfo *info = (*meta)->info;
494 const gchar *const *tags = gst_meta_api_type_get_tags (info->api);
495
496 if (info->transform_func && (!tags || (g_strv_length ((gchar **) tags) == 1
497 && gst_meta_api_type_has_tag (info->api, META_TAG_AUDIO)))) {
498 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
499 GST_DEBUG_OBJECT (pay, "copy metadata %s", g_type_name (info->api));
500 /* simply copy then */
501 info->transform_func (outbuf, *meta, inbuf,
502 _gst_meta_transform_copy, ©_data);
503 } else {
504 GST_DEBUG_OBJECT (pay, "not copying metadata %s", g_type_name (info->api));
505 }
506
507 return TRUE;
508 }
509
510 static GstFlowReturn
gst_rtp_base_audio_payload_push_buffer(GstRTPBaseAudioPayload * baseaudiopayload,GstBuffer * buffer,GstClockTime timestamp)511 gst_rtp_base_audio_payload_push_buffer (GstRTPBaseAudioPayload *
512 baseaudiopayload, GstBuffer * buffer, GstClockTime timestamp)
513 {
514 GstRTPBasePayload *basepayload;
515 GstRTPBaseAudioPayloadPrivate *priv;
516 GstBuffer *outbuf;
517 guint payload_len;
518 GstFlowReturn ret;
519
520 priv = baseaudiopayload->priv;
521 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
522
523 payload_len = gst_buffer_get_size (buffer);
524
525 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
526 payload_len, GST_TIME_ARGS (timestamp));
527
528 /* create just the RTP header buffer */
529 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload, 0, 0, 0);
530
531 /* set metadata */
532 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
533 timestamp);
534
535 if (priv->buffer_list) {
536 GstBufferList *list;
537 guint i, len;
538
539 list = gst_buffer_list_new ();
540 len = gst_buffer_list_length (list);
541
542 for (i = 0; i < len; i++) {
543 /* FIXME */
544 g_warning ("bufferlist not implemented");
545 gst_buffer_list_add (list, outbuf);
546 gst_buffer_list_add (list, buffer);
547 }
548
549 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing list %p", list);
550 ret = gst_rtp_base_payload_push_list (basepayload, list);
551 } else {
552 CopyMetaData data;
553
554 /* copy payload */
555 data.pay = baseaudiopayload;
556 data.outbuf = outbuf;
557 gst_buffer_foreach_meta (buffer, foreach_metadata, &data);
558 outbuf = gst_buffer_append (outbuf, buffer);
559
560 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing buffer %p", outbuf);
561 ret = gst_rtp_base_payload_push (basepayload, outbuf);
562 }
563
564 return ret;
565 }
566
567 /**
568 * gst_rtp_base_audio_payload_flush:
569 * @baseaudiopayload: a #GstRTPBasePayload
570 * @payload_len: length of payload
571 * @timestamp: a #GstClockTime
572 *
573 * Create an RTP buffer and store @payload_len bytes of the adapter as the
574 * payload. Set the timestamp on the new buffer to @timestamp before pushing
575 * the buffer downstream.
576 *
577 * If @payload_len is -1, all pending bytes will be flushed. If @timestamp is
578 * -1, the timestamp will be calculated automatically.
579 *
580 * Returns: a #GstFlowReturn
581 */
582 GstFlowReturn
gst_rtp_base_audio_payload_flush(GstRTPBaseAudioPayload * baseaudiopayload,guint payload_len,GstClockTime timestamp)583 gst_rtp_base_audio_payload_flush (GstRTPBaseAudioPayload * baseaudiopayload,
584 guint payload_len, GstClockTime timestamp)
585 {
586 GstRTPBasePayload *basepayload;
587 GstRTPBaseAudioPayloadPrivate *priv;
588 GstBuffer *outbuf;
589 GstFlowReturn ret;
590 GstAdapter *adapter;
591 guint64 distance;
592
593 priv = baseaudiopayload->priv;
594 adapter = priv->adapter;
595
596 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
597
598 if (payload_len == -1)
599 payload_len = gst_adapter_available (adapter);
600
601 /* nothing to do, just return */
602 if (payload_len == 0)
603 return GST_FLOW_OK;
604
605 if (timestamp == -1) {
606 /* calculate the timestamp */
607 timestamp = gst_adapter_prev_pts (adapter, &distance);
608
609 GST_LOG_OBJECT (baseaudiopayload,
610 "last timestamp %" GST_TIME_FORMAT ", distance %" G_GUINT64_FORMAT,
611 GST_TIME_ARGS (timestamp), distance);
612
613 if (GST_CLOCK_TIME_IS_VALID (timestamp) && distance > 0) {
614 /* convert the number of bytes since the last timestamp to time and add to
615 * the last seen timestamp */
616 timestamp += priv->bytes_to_time (baseaudiopayload, distance);
617 }
618 }
619
620 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
621 payload_len, GST_TIME_ARGS (timestamp));
622
623 if (priv->buffer_list && gst_adapter_available_fast (adapter) >= payload_len) {
624 GstBuffer *buffer;
625 /* we can quickly take a buffer out of the adapter without having to copy
626 * anything. */
627 buffer = gst_adapter_take_buffer (adapter, payload_len);
628
629 ret =
630 gst_rtp_base_audio_payload_push_buffer (baseaudiopayload, buffer,
631 timestamp);
632 } else {
633 GstBuffer *paybuf;
634 CopyMetaData data;
635
636
637 /* create buffer to hold the payload */
638 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload, 0, 0, 0);
639
640 paybuf = gst_adapter_take_buffer_fast (adapter, payload_len);
641
642 data.pay = baseaudiopayload;
643 data.outbuf = outbuf;
644 gst_buffer_foreach_meta (paybuf, foreach_metadata, &data);
645 outbuf = gst_buffer_append (outbuf, paybuf);
646
647 /* set metadata */
648 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
649 timestamp);
650
651 ret = gst_rtp_base_payload_push (basepayload, outbuf);
652 }
653
654 return ret;
655 }
656
657 #define ALIGN_DOWN(val,len) ((val) - ((val) % (len)))
658
659 /* calculate the min and max length of a packet. This depends on the configured
660 * mtu and min/max_ptime values. We cache those so that we don't have to redo
661 * all the calculations */
662 static gboolean
gst_rtp_base_audio_payload_get_lengths(GstRTPBasePayload * basepayload,guint csrc_count,guint * min_payload_len,guint * max_payload_len,guint * align)663 gst_rtp_base_audio_payload_get_lengths (GstRTPBasePayload * basepayload,
664 guint csrc_count, guint * min_payload_len, guint * max_payload_len,
665 guint * align)
666 {
667 GstRTPBaseAudioPayload *payload;
668 GstRTPBaseAudioPayloadPrivate *priv;
669 guint max_mtu, mtu;
670 guint maxptime_octets;
671 guint minptime_octets;
672 guint ptime_mult_octets;
673
674 payload = GST_RTP_BASE_AUDIO_PAYLOAD_CAST (basepayload);
675 priv = payload->priv;
676
677 if (priv->align == 0)
678 return FALSE;
679
680 mtu = GST_RTP_BASE_PAYLOAD_MTU (payload);
681
682 /* check cached values. Since csrc_count may vary for each packet, we only
683 * check whether the new value exceeds the cached value and thus result in
684 * smaller payload. */
685 if (G_LIKELY (priv->cached_mtu == mtu
686 && priv->cached_ptime_multiple ==
687 basepayload->ptime_multiple
688 && priv->cached_ptime == basepayload->ptime
689 && priv->cached_max_ptime == basepayload->max_ptime
690 && priv->cached_min_ptime == basepayload->min_ptime
691 && priv->cached_csrc_count >= csrc_count)) {
692 /* if nothing changed, return cached values */
693 *min_payload_len = priv->cached_min_length;
694 *max_payload_len = priv->cached_max_length;
695 *align = priv->cached_align;
696 return TRUE;
697 }
698
699 ptime_mult_octets = priv->time_to_bytes (payload,
700 basepayload->ptime_multiple);
701 *align = ALIGN_DOWN (MAX (priv->align, ptime_mult_octets), priv->align);
702
703 /* ptime max */
704 if (basepayload->max_ptime != -1) {
705 maxptime_octets = priv->time_to_bytes (payload, basepayload->max_ptime);
706 } else {
707 maxptime_octets = G_MAXUINT;
708 }
709 /* MTU max */
710 max_mtu = gst_rtp_buffer_calc_payload_len (mtu, 0, csrc_count);
711 /* round down to alignment */
712 max_mtu = ALIGN_DOWN (max_mtu, *align);
713
714 /* combine max ptime and max payload length */
715 *max_payload_len = MIN (max_mtu, maxptime_octets);
716
717 /* min number of bytes based on a given ptime */
718 minptime_octets = priv->time_to_bytes (payload, basepayload->min_ptime);
719 /* must be at least one frame size */
720 *min_payload_len = MAX (minptime_octets, *align);
721
722 if (*min_payload_len > *max_payload_len)
723 *min_payload_len = *max_payload_len;
724
725 /* If the ptime is specified in the caps, tried to adhere to it exactly */
726 if (basepayload->ptime) {
727 guint ptime_in_bytes = priv->time_to_bytes (payload,
728 basepayload->ptime);
729
730 /* clip to computed min and max lengths */
731 ptime_in_bytes = MAX (*min_payload_len, ptime_in_bytes);
732 ptime_in_bytes = MIN (*max_payload_len, ptime_in_bytes);
733
734 *min_payload_len = *max_payload_len = ptime_in_bytes;
735 }
736
737 /* cache values */
738 priv->cached_mtu = mtu;
739 priv->cached_ptime = basepayload->ptime;
740 priv->cached_min_ptime = basepayload->min_ptime;
741 priv->cached_max_ptime = basepayload->max_ptime;
742 priv->cached_ptime_multiple = basepayload->ptime_multiple;
743 priv->cached_min_length = *min_payload_len;
744 priv->cached_max_length = *max_payload_len;
745 priv->cached_align = *align;
746 priv->cached_csrc_count = csrc_count;
747
748 return TRUE;
749 }
750
751 /* frame conversions functions */
752 static GstClockTime
gst_rtp_base_audio_payload_frame_bytes_to_time(GstRTPBaseAudioPayload * payload,guint64 bytes)753 gst_rtp_base_audio_payload_frame_bytes_to_time (GstRTPBaseAudioPayload *
754 payload, guint64 bytes)
755 {
756 guint64 framecount;
757
758 framecount = bytes / payload->frame_size;
759 if (G_UNLIKELY (bytes % payload->frame_size))
760 framecount++;
761
762 return framecount * payload->priv->frame_duration_ns;
763 }
764
765 static guint32
gst_rtp_base_audio_payload_frame_bytes_to_rtptime(GstRTPBaseAudioPayload * payload,guint64 bytes)766 gst_rtp_base_audio_payload_frame_bytes_to_rtptime (GstRTPBaseAudioPayload *
767 payload, guint64 bytes)
768 {
769 guint64 framecount;
770 guint64 time;
771
772 framecount = bytes / payload->frame_size;
773 if (G_UNLIKELY (bytes % payload->frame_size))
774 framecount++;
775
776 time = framecount * payload->priv->frame_duration_ns;
777
778 return gst_util_uint64_scale_int (time,
779 GST_RTP_BASE_PAYLOAD (payload)->clock_rate, GST_SECOND);
780 }
781
782 static guint64
gst_rtp_base_audio_payload_frame_time_to_bytes(GstRTPBaseAudioPayload * payload,GstClockTime time)783 gst_rtp_base_audio_payload_frame_time_to_bytes (GstRTPBaseAudioPayload *
784 payload, GstClockTime time)
785 {
786 return gst_util_uint64_scale (time, payload->frame_size,
787 payload->priv->frame_duration_ns);
788 }
789
790 /* sample conversion functions */
791 static GstClockTime
gst_rtp_base_audio_payload_sample_bytes_to_time(GstRTPBaseAudioPayload * payload,guint64 bytes)792 gst_rtp_base_audio_payload_sample_bytes_to_time (GstRTPBaseAudioPayload *
793 payload, guint64 bytes)
794 {
795 guint64 rtptime;
796
797 /* avoid division when we can */
798 if (G_LIKELY (payload->sample_size != 8))
799 rtptime = gst_util_uint64_scale_int (bytes, 8, payload->sample_size);
800 else
801 rtptime = bytes;
802
803 return gst_util_uint64_scale_int (rtptime, GST_SECOND,
804 GST_RTP_BASE_PAYLOAD (payload)->clock_rate);
805 }
806
807 static guint32
gst_rtp_base_audio_payload_sample_bytes_to_rtptime(GstRTPBaseAudioPayload * payload,guint64 bytes)808 gst_rtp_base_audio_payload_sample_bytes_to_rtptime (GstRTPBaseAudioPayload *
809 payload, guint64 bytes)
810 {
811 /* avoid division when we can */
812 if (G_LIKELY (payload->sample_size != 8))
813 return gst_util_uint64_scale_int (bytes, 8, payload->sample_size);
814 else
815 return bytes;
816 }
817
818 static guint64
gst_rtp_base_audio_payload_sample_time_to_bytes(GstRTPBaseAudioPayload * payload,guint64 time)819 gst_rtp_base_audio_payload_sample_time_to_bytes (GstRTPBaseAudioPayload *
820 payload, guint64 time)
821 {
822 guint64 samples;
823
824 samples = gst_util_uint64_scale_int (time,
825 GST_RTP_BASE_PAYLOAD (payload)->clock_rate, GST_SECOND);
826
827 /* avoid multiplication when we can */
828 if (G_LIKELY (payload->sample_size != 8))
829 return gst_util_uint64_scale_int (samples, payload->sample_size, 8);
830 else
831 return samples;
832 }
833
834 static GstFlowReturn
gst_rtp_base_audio_payload_handle_buffer(GstRTPBasePayload * basepayload,GstBuffer * buffer)835 gst_rtp_base_audio_payload_handle_buffer (GstRTPBasePayload *
836 basepayload, GstBuffer * buffer)
837 {
838 GstRTPBaseAudioPayload *payload;
839 GstRTPBaseAudioPayloadPrivate *priv;
840 guint payload_len;
841 GstFlowReturn ret;
842 guint available;
843 guint min_payload_len;
844 guint max_payload_len;
845 guint align;
846 guint size;
847 gboolean discont;
848 GstClockTime timestamp;
849
850 ret = GST_FLOW_OK;
851
852 payload = GST_RTP_BASE_AUDIO_PAYLOAD_CAST (basepayload);
853 priv = payload->priv;
854
855 timestamp = GST_BUFFER_PTS (buffer);
856 discont = GST_BUFFER_IS_DISCONT (buffer);
857 if (discont) {
858
859 GST_DEBUG_OBJECT (payload, "Got DISCONT");
860 /* flush everything out of the adapter, mark DISCONT */
861 ret = gst_rtp_base_audio_payload_flush (payload, -1, -1);
862 priv->discont = TRUE;
863
864 /* get the distance between the timestamp gap and produce the same gap in
865 * the RTP timestamps */
866 if (priv->last_timestamp != -1 && timestamp != -1) {
867 /* we had a last timestamp, compare it to the new timestamp and update the
868 * offset counter for RTP timestamps. The effect is that we will produce
869 * output buffers containing the same RTP timestamp gap as the gap
870 * between the GST timestamps. */
871 if (timestamp > priv->last_timestamp) {
872 GstClockTime diff;
873 guint64 bytes;
874 /* we're only going to apply a positive gap, otherwise we let the marker
875 * bit do its thing. simply convert to bytes and add the current
876 * offset */
877 diff = timestamp - priv->last_timestamp;
878 bytes = priv->time_to_bytes (payload, diff);
879 priv->offset += bytes;
880
881 GST_DEBUG_OBJECT (payload,
882 "elapsed time %" GST_TIME_FORMAT ", bytes %" G_GUINT64_FORMAT
883 ", new offset %" G_GUINT64_FORMAT, GST_TIME_ARGS (diff), bytes,
884 priv->offset);
885 }
886 }
887 }
888
889 if (!gst_rtp_base_audio_payload_get_lengths (basepayload,
890 gst_rtp_base_payload_get_source_count (basepayload, buffer),
891 &min_payload_len, &max_payload_len, &align))
892 goto config_error;
893
894 GST_DEBUG_OBJECT (payload,
895 "Calculated min_payload_len %u and max_payload_len %u",
896 min_payload_len, max_payload_len);
897
898 size = gst_buffer_get_size (buffer);
899
900 /* shortcut, we don't need to use the adapter when the packet can be pushed
901 * through directly. */
902 available = gst_adapter_available (priv->adapter);
903
904 GST_DEBUG_OBJECT (payload, "got buffer size %u, available %u",
905 size, available);
906
907 if (available == 0 && (size >= min_payload_len && size <= max_payload_len) &&
908 (size % align == 0)) {
909 /* If buffer fits on an RTP packet, let's just push it through
910 * this will check against max_ptime and max_mtu */
911 GST_DEBUG_OBJECT (payload, "Fast packet push");
912 ret = gst_rtp_base_audio_payload_push_buffer (payload, buffer, timestamp);
913 } else {
914 /* push the buffer in the adapter */
915 gst_adapter_push (priv->adapter, buffer);
916 available += size;
917
918 GST_DEBUG_OBJECT (payload, "available now %u", available);
919
920 /* as long as we have full frames */
921 /* TODO: Use buffer lists here */
922 while (available >= min_payload_len) {
923 /* get multiple of alignment */
924 payload_len = MIN (max_payload_len, available);
925 payload_len = ALIGN_DOWN (payload_len, align);
926
927 /* and flush out the bytes from the adapter, automatically set the
928 * timestamp. */
929 ret = gst_rtp_base_audio_payload_flush (payload, payload_len, -1);
930
931 available -= payload_len;
932 GST_DEBUG_OBJECT (payload, "available after push %u", available);
933 }
934 }
935 return ret;
936
937 /* ERRORS */
938 config_error:
939 {
940 GST_ELEMENT_ERROR (payload, STREAM, NOT_IMPLEMENTED, (NULL),
941 ("subclass did not configure us properly"));
942 gst_buffer_unref (buffer);
943 return GST_FLOW_ERROR;
944 }
945 }
946
947 static GstStateChangeReturn
gst_rtp_base_payload_audio_change_state(GstElement * element,GstStateChange transition)948 gst_rtp_base_payload_audio_change_state (GstElement * element,
949 GstStateChange transition)
950 {
951 GstRTPBaseAudioPayload *rtpbasepayload;
952 GstStateChangeReturn ret;
953
954 rtpbasepayload = GST_RTP_BASE_AUDIO_PAYLOAD (element);
955
956 switch (transition) {
957 case GST_STATE_CHANGE_READY_TO_PAUSED:
958 rtpbasepayload->priv->cached_mtu = -1;
959 rtpbasepayload->priv->last_rtptime = -1;
960 rtpbasepayload->priv->last_timestamp = -1;
961 break;
962 default:
963 break;
964 }
965
966 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
967
968 switch (transition) {
969 case GST_STATE_CHANGE_PAUSED_TO_READY:
970 gst_adapter_clear (rtpbasepayload->priv->adapter);
971 break;
972 default:
973 break;
974 }
975
976 return ret;
977 }
978
979 static gboolean
gst_rtp_base_payload_audio_sink_event(GstRTPBasePayload * basep,GstEvent * event)980 gst_rtp_base_payload_audio_sink_event (GstRTPBasePayload * basep,
981 GstEvent * event)
982 {
983 GstRTPBaseAudioPayload *payload;
984 gboolean res = FALSE;
985
986 payload = GST_RTP_BASE_AUDIO_PAYLOAD (basep);
987
988 switch (GST_EVENT_TYPE (event)) {
989 case GST_EVENT_EOS:
990 /* flush remaining bytes in the adapter */
991 gst_rtp_base_audio_payload_flush (payload, -1, -1);
992 break;
993 case GST_EVENT_FLUSH_STOP:
994 gst_adapter_clear (payload->priv->adapter);
995 break;
996 default:
997 break;
998 }
999
1000 /* let parent handle the remainder of the event */
1001 res = GST_RTP_BASE_PAYLOAD_CLASS (parent_class)->sink_event (basep, event);
1002
1003 return res;
1004 }
1005
1006 /**
1007 * gst_rtp_base_audio_payload_get_adapter:
1008 * @rtpbaseaudiopayload: a #GstRTPBaseAudioPayload
1009 *
1010 * Gets the internal adapter used by the depayloader.
1011 *
1012 * Returns: (transfer full): a #GstAdapter.
1013 */
1014 GstAdapter *
gst_rtp_base_audio_payload_get_adapter(GstRTPBaseAudioPayload * rtpbaseaudiopayload)1015 gst_rtp_base_audio_payload_get_adapter (GstRTPBaseAudioPayload
1016 * rtpbaseaudiopayload)
1017 {
1018 GstAdapter *adapter;
1019
1020 if ((adapter = rtpbaseaudiopayload->priv->adapter))
1021 g_object_ref (adapter);
1022
1023 return adapter;
1024 }
1025