1 /* GStreamer
2 * Copyright (C) <2006> Philippe Khalaf <philippe.kalaf@collabora.co.uk>
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
17 * Boston, MA 02110-1301, USA.
18 */
19
20 /**
21 * SECTION:gstrtpbaseaudiopayload
22 * @title: GstRTPBaseAudioPayload
23 * @short_description: Base class for audio RTP payloader
24 *
25 * Provides a base class for audio RTP payloaders for frame or sample based
26 * audio codecs (constant bitrate)
27 *
28 * This class derives from GstRTPBasePayload. It can be used for payloading
29 * audio codecs. It will only work with constant bitrate codecs. It supports
30 * both frame based and sample based codecs. It takes care of packing up the
31 * audio data into RTP packets and filling up the headers accordingly. The
32 * payloading is done based on the maximum MTU (mtu) and the maximum time per
33 * packet (max-ptime). The general idea is to divide large data buffers into
34 * smaller RTP packets. The RTP packet size is the minimum of either the MTU,
35 * max-ptime (if set) or available data. The RTP packet size is always larger or
36 * equal to min-ptime (if set). If min-ptime is not set, any residual data is
37 * sent in a last RTP packet. In the case of frame based codecs, the resulting
38 * RTP packets always contain full frames.
39 *
40 * ## Usage
41 *
42 * To use this base class, your child element needs to call either
43 * gst_rtp_base_audio_payload_set_frame_based() or
44 * gst_rtp_base_audio_payload_set_sample_based(). This is usually done in the
45 * element's _init() function. Then, the child element must call either
46 * gst_rtp_base_audio_payload_set_frame_options(),
47 * gst_rtp_base_audio_payload_set_sample_options() or
48 * gst_rtp_base_audio_payload_set_samplebits_options. Since
49 * GstRTPBaseAudioPayload derives from GstRTPBasePayload, the child element
50 * must set any variables or call/override any functions required by that base
51 * class. The child element does not need to override any other functions
52 * specific to GstRTPBaseAudioPayload.
53 *
54 */
55
56 #ifdef HAVE_CONFIG_H
57 #include "config.h"
58 #endif
59
60 #include <stdlib.h>
61 #include <string.h>
62 #include <gst/rtp/gstrtpbuffer.h>
63 #include <gst/base/gstadapter.h>
64 #include <gst/audio/audio.h>
65
66 #include "gstrtpbaseaudiopayload.h"
67
68 GST_DEBUG_CATEGORY_STATIC (rtpbaseaudiopayload_debug);
69 #define GST_CAT_DEFAULT (rtpbaseaudiopayload_debug)
70
71 #define DEFAULT_BUFFER_LIST FALSE
72
73 enum
74 {
75 PROP_0,
76 PROP_BUFFER_LIST,
77 PROP_LAST
78 };
79
80 /* function to convert bytes to a time */
81 typedef GstClockTime (*GetBytesToTimeFunc) (GstRTPBaseAudioPayload * payload,
82 guint64 bytes);
83 /* function to convert bytes to a RTP time */
84 typedef guint32 (*GetBytesToRTPTimeFunc) (GstRTPBaseAudioPayload * payload,
85 guint64 bytes);
86 /* function to convert time to bytes */
87 typedef guint64 (*GetTimeToBytesFunc) (GstRTPBaseAudioPayload * payload,
88 GstClockTime time);
89
90 struct _GstRTPBaseAudioPayloadPrivate
91 {
92 GetBytesToTimeFunc bytes_to_time;
93 GetBytesToRTPTimeFunc bytes_to_rtptime;
94 GetTimeToBytesFunc time_to_bytes;
95
96 GstAdapter *adapter;
97 guint fragment_size;
98 GstClockTime frame_duration_ns;
99 gboolean discont;
100 guint64 offset;
101 GstClockTime last_timestamp;
102 guint32 last_rtptime;
103 guint align;
104
105 guint cached_mtu;
106 guint cached_min_ptime;
107 guint cached_max_ptime;
108 guint cached_ptime;
109 guint cached_min_length;
110 guint cached_max_length;
111 guint cached_ptime_multiple;
112 guint cached_align;
113 guint cached_csrc_count;
114
115 gboolean buffer_list;
116 };
117
118 static void gst_rtp_base_audio_payload_finalize (GObject * object);
119
120 static void gst_rtp_base_audio_payload_set_property (GObject * object,
121 guint prop_id, const GValue * value, GParamSpec * pspec);
122 static void gst_rtp_base_audio_payload_get_property (GObject * object,
123 guint prop_id, GValue * value, GParamSpec * pspec);
124
125 /* bytes to time functions */
126 static GstClockTime
127 gst_rtp_base_audio_payload_frame_bytes_to_time (GstRTPBaseAudioPayload *
128 payload, guint64 bytes);
129 static GstClockTime
130 gst_rtp_base_audio_payload_sample_bytes_to_time (GstRTPBaseAudioPayload *
131 payload, guint64 bytes);
132
133 /* bytes to RTP time functions */
134 static guint32
135 gst_rtp_base_audio_payload_frame_bytes_to_rtptime (GstRTPBaseAudioPayload *
136 payload, guint64 bytes);
137 static guint32
138 gst_rtp_base_audio_payload_sample_bytes_to_rtptime (GstRTPBaseAudioPayload *
139 payload, guint64 bytes);
140
141 /* time to bytes functions */
142 static guint64
143 gst_rtp_base_audio_payload_frame_time_to_bytes (GstRTPBaseAudioPayload *
144 payload, GstClockTime time);
145 static guint64
146 gst_rtp_base_audio_payload_sample_time_to_bytes (GstRTPBaseAudioPayload *
147 payload, GstClockTime time);
148
149 static GstFlowReturn gst_rtp_base_audio_payload_handle_buffer (GstRTPBasePayload
150 * payload, GstBuffer * buffer);
151 static GstStateChangeReturn gst_rtp_base_payload_audio_change_state (GstElement
152 * element, GstStateChange transition);
153 static gboolean gst_rtp_base_payload_audio_sink_event (GstRTPBasePayload
154 * payload, GstEvent * event);
155
156 #define gst_rtp_base_audio_payload_parent_class parent_class
157 G_DEFINE_TYPE_WITH_PRIVATE (GstRTPBaseAudioPayload, gst_rtp_base_audio_payload,
158 GST_TYPE_RTP_BASE_PAYLOAD);
159
160 static void
gst_rtp_base_audio_payload_class_init(GstRTPBaseAudioPayloadClass * klass)161 gst_rtp_base_audio_payload_class_init (GstRTPBaseAudioPayloadClass * klass)
162 {
163 GObjectClass *gobject_class;
164 GstElementClass *gstelement_class;
165 GstRTPBasePayloadClass *gstrtpbasepayload_class;
166
167 gobject_class = (GObjectClass *) klass;
168 gstelement_class = (GstElementClass *) klass;
169 gstrtpbasepayload_class = (GstRTPBasePayloadClass *) klass;
170
171 gobject_class->finalize = gst_rtp_base_audio_payload_finalize;
172 gobject_class->set_property = gst_rtp_base_audio_payload_set_property;
173 gobject_class->get_property = gst_rtp_base_audio_payload_get_property;
174
175 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BUFFER_LIST,
176 g_param_spec_boolean ("buffer-list", "Buffer List",
177 "Use Buffer Lists",
178 DEFAULT_BUFFER_LIST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
179
180 gstelement_class->change_state =
181 GST_DEBUG_FUNCPTR (gst_rtp_base_payload_audio_change_state);
182
183 gstrtpbasepayload_class->handle_buffer =
184 GST_DEBUG_FUNCPTR (gst_rtp_base_audio_payload_handle_buffer);
185 gstrtpbasepayload_class->sink_event =
186 GST_DEBUG_FUNCPTR (gst_rtp_base_payload_audio_sink_event);
187
188 GST_DEBUG_CATEGORY_INIT (rtpbaseaudiopayload_debug, "rtpbaseaudiopayload", 0,
189 "base audio RTP payloader");
190 }
191
192 static void
gst_rtp_base_audio_payload_init(GstRTPBaseAudioPayload * payload)193 gst_rtp_base_audio_payload_init (GstRTPBaseAudioPayload * payload)
194 {
195 payload->priv = gst_rtp_base_audio_payload_get_instance_private (payload);
196
197 /* these need to be set by child object if frame based */
198 payload->frame_size = 0;
199 payload->frame_duration = 0;
200
201 /* these need to be set by child object if sample based */
202 payload->sample_size = 0;
203
204 payload->priv->adapter = gst_adapter_new ();
205
206 payload->priv->buffer_list = DEFAULT_BUFFER_LIST;
207 }
208
209 static void
gst_rtp_base_audio_payload_finalize(GObject * object)210 gst_rtp_base_audio_payload_finalize (GObject * object)
211 {
212 GstRTPBaseAudioPayload *payload;
213
214 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
215
216 g_object_unref (payload->priv->adapter);
217
218 GST_CALL_PARENT (G_OBJECT_CLASS, finalize, (object));
219 }
220
221 static void
gst_rtp_base_audio_payload_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)222 gst_rtp_base_audio_payload_set_property (GObject * object,
223 guint prop_id, const GValue * value, GParamSpec * pspec)
224 {
225 GstRTPBaseAudioPayload *payload;
226
227 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
228
229 switch (prop_id) {
230 case PROP_BUFFER_LIST:
231 #if 0
232 payload->priv->buffer_list = g_value_get_boolean (value);
233 #endif
234 payload->priv->buffer_list = FALSE;
235 break;
236 default:
237 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
238 break;
239 }
240 }
241
242 static void
gst_rtp_base_audio_payload_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)243 gst_rtp_base_audio_payload_get_property (GObject * object,
244 guint prop_id, GValue * value, GParamSpec * pspec)
245 {
246 GstRTPBaseAudioPayload *payload;
247
248 payload = GST_RTP_BASE_AUDIO_PAYLOAD (object);
249
250 switch (prop_id) {
251 case PROP_BUFFER_LIST:
252 g_value_set_boolean (value, payload->priv->buffer_list);
253 break;
254 default:
255 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
256 break;
257 }
258 }
259
260 /**
261 * gst_rtp_base_audio_payload_set_frame_based:
262 * @rtpbaseaudiopayload: a pointer to the element.
263 *
264 * Tells #GstRTPBaseAudioPayload that the child element is for a frame based
265 * audio codec
266 */
267 void
gst_rtp_base_audio_payload_set_frame_based(GstRTPBaseAudioPayload * rtpbaseaudiopayload)268 gst_rtp_base_audio_payload_set_frame_based (GstRTPBaseAudioPayload *
269 rtpbaseaudiopayload)
270 {
271 g_return_if_fail (rtpbaseaudiopayload != NULL);
272 g_return_if_fail (rtpbaseaudiopayload->priv->time_to_bytes == NULL);
273 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_time == NULL);
274 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_rtptime == NULL);
275
276 rtpbaseaudiopayload->priv->bytes_to_time =
277 gst_rtp_base_audio_payload_frame_bytes_to_time;
278 rtpbaseaudiopayload->priv->bytes_to_rtptime =
279 gst_rtp_base_audio_payload_frame_bytes_to_rtptime;
280 rtpbaseaudiopayload->priv->time_to_bytes =
281 gst_rtp_base_audio_payload_frame_time_to_bytes;
282 }
283
284 /**
285 * gst_rtp_base_audio_payload_set_sample_based:
286 * @rtpbaseaudiopayload: a pointer to the element.
287 *
288 * Tells #GstRTPBaseAudioPayload that the child element is for a sample based
289 * audio codec
290 */
291 void
gst_rtp_base_audio_payload_set_sample_based(GstRTPBaseAudioPayload * rtpbaseaudiopayload)292 gst_rtp_base_audio_payload_set_sample_based (GstRTPBaseAudioPayload *
293 rtpbaseaudiopayload)
294 {
295 g_return_if_fail (rtpbaseaudiopayload != NULL);
296 g_return_if_fail (rtpbaseaudiopayload->priv->time_to_bytes == NULL);
297 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_time == NULL);
298 g_return_if_fail (rtpbaseaudiopayload->priv->bytes_to_rtptime == NULL);
299
300 rtpbaseaudiopayload->priv->bytes_to_time =
301 gst_rtp_base_audio_payload_sample_bytes_to_time;
302 rtpbaseaudiopayload->priv->bytes_to_rtptime =
303 gst_rtp_base_audio_payload_sample_bytes_to_rtptime;
304 rtpbaseaudiopayload->priv->time_to_bytes =
305 gst_rtp_base_audio_payload_sample_time_to_bytes;
306 }
307
308 /**
309 * gst_rtp_base_audio_payload_set_frame_options:
310 * @rtpbaseaudiopayload: a pointer to the element.
311 * @frame_duration: The duraction of an audio frame in milliseconds.
312 * @frame_size: The size of an audio frame in bytes.
313 *
314 * Sets the options for frame based audio codecs.
315 *
316 */
317 void
gst_rtp_base_audio_payload_set_frame_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint frame_duration,gint frame_size)318 gst_rtp_base_audio_payload_set_frame_options (GstRTPBaseAudioPayload
319 * rtpbaseaudiopayload, gint frame_duration, gint frame_size)
320 {
321 GstRTPBaseAudioPayloadPrivate *priv;
322
323 g_return_if_fail (rtpbaseaudiopayload != NULL);
324
325 priv = rtpbaseaudiopayload->priv;
326
327 rtpbaseaudiopayload->frame_duration = frame_duration;
328 priv->frame_duration_ns = frame_duration * GST_MSECOND;
329 rtpbaseaudiopayload->frame_size = frame_size;
330 priv->align = frame_size;
331
332 gst_adapter_clear (priv->adapter);
333
334 GST_DEBUG_OBJECT (rtpbaseaudiopayload, "frame set to %d ms and size %d",
335 frame_duration, frame_size);
336 }
337
338 /**
339 * gst_rtp_base_audio_payload_set_sample_options:
340 * @rtpbaseaudiopayload: a pointer to the element.
341 * @sample_size: Size per sample in bytes.
342 *
343 * Sets the options for sample based audio codecs.
344 */
345 void
gst_rtp_base_audio_payload_set_sample_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint sample_size)346 gst_rtp_base_audio_payload_set_sample_options (GstRTPBaseAudioPayload
347 * rtpbaseaudiopayload, gint sample_size)
348 {
349 g_return_if_fail (rtpbaseaudiopayload != NULL);
350
351 /* sample_size is in bits internally */
352 gst_rtp_base_audio_payload_set_samplebits_options (rtpbaseaudiopayload,
353 sample_size * 8);
354 }
355
356 /**
357 * gst_rtp_base_audio_payload_set_samplebits_options:
358 * @rtpbaseaudiopayload: a pointer to the element.
359 * @sample_size: Size per sample in bits.
360 *
361 * Sets the options for sample based audio codecs.
362 */
363 void
gst_rtp_base_audio_payload_set_samplebits_options(GstRTPBaseAudioPayload * rtpbaseaudiopayload,gint sample_size)364 gst_rtp_base_audio_payload_set_samplebits_options (GstRTPBaseAudioPayload
365 * rtpbaseaudiopayload, gint sample_size)
366 {
367 guint fragment_size;
368 GstRTPBaseAudioPayloadPrivate *priv;
369
370 g_return_if_fail (rtpbaseaudiopayload != NULL);
371
372 priv = rtpbaseaudiopayload->priv;
373
374 rtpbaseaudiopayload->sample_size = sample_size;
375
376 /* sample_size is in bits and is converted into multiple bytes */
377 fragment_size = sample_size;
378 while ((fragment_size % 8) != 0)
379 fragment_size += fragment_size;
380 priv->fragment_size = fragment_size / 8;
381 priv->align = priv->fragment_size;
382
383 gst_adapter_clear (priv->adapter);
384
385 GST_DEBUG_OBJECT (rtpbaseaudiopayload,
386 "Samplebits set to sample size %d bits", sample_size);
387 }
388
389 static void
gst_rtp_base_audio_payload_set_meta(GstRTPBaseAudioPayload * payload,GstBuffer * buffer,guint payload_len,GstClockTime timestamp)390 gst_rtp_base_audio_payload_set_meta (GstRTPBaseAudioPayload * payload,
391 GstBuffer * buffer, guint payload_len, GstClockTime timestamp)
392 {
393 GstRTPBasePayload *basepayload;
394 GstRTPBaseAudioPayloadPrivate *priv;
395 GstRTPBuffer rtp = { NULL };
396
397 basepayload = GST_RTP_BASE_PAYLOAD_CAST (payload);
398 priv = payload->priv;
399
400 /* set payload type */
401 gst_rtp_buffer_map (buffer, GST_MAP_WRITE, &rtp);
402 gst_rtp_buffer_set_payload_type (&rtp, basepayload->pt);
403 /* set marker bit for disconts */
404 if (priv->discont) {
405 GST_DEBUG_OBJECT (payload, "Setting marker and DISCONT");
406 gst_rtp_buffer_set_marker (&rtp, TRUE);
407 GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
408 priv->discont = FALSE;
409 }
410 gst_rtp_buffer_unmap (&rtp);
411
412 GST_BUFFER_PTS (buffer) = timestamp;
413
414 /* get the offset in RTP time */
415 GST_BUFFER_OFFSET (buffer) = priv->bytes_to_rtptime (payload, priv->offset);
416
417 priv->offset += payload_len;
418
419 /* Set the duration from the size */
420 GST_BUFFER_DURATION (buffer) = priv->bytes_to_time (payload, payload_len);
421
422 /* remember the last rtptime/timestamp pair. We will use this to realign our
423 * RTP timestamp after a buffer discont */
424 priv->last_rtptime = GST_BUFFER_OFFSET (buffer);
425 priv->last_timestamp = timestamp;
426 }
427
428 /**
429 * gst_rtp_base_audio_payload_push:
430 * @baseaudiopayload: a #GstRTPBasePayload
431 * @data: (array length=payload_len): data to set as payload
432 * @payload_len: length of payload
433 * @timestamp: a #GstClockTime
434 *
435 * Create an RTP buffer and store @payload_len bytes of @data as the
436 * payload. Set the timestamp on the new buffer to @timestamp before pushing
437 * the buffer downstream.
438 *
439 * Returns: a #GstFlowReturn
440 */
441 GstFlowReturn
gst_rtp_base_audio_payload_push(GstRTPBaseAudioPayload * baseaudiopayload,const guint8 * data,guint payload_len,GstClockTime timestamp)442 gst_rtp_base_audio_payload_push (GstRTPBaseAudioPayload * baseaudiopayload,
443 const guint8 * data, guint payload_len, GstClockTime timestamp)
444 {
445 GstRTPBasePayload *basepayload;
446 GstBuffer *outbuf;
447 guint8 *payload;
448 GstFlowReturn ret;
449 GstRTPBuffer rtp = { NULL };
450
451 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
452
453 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
454 payload_len, GST_TIME_ARGS (timestamp));
455
456 /* create buffer to hold the payload */
457 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload,
458 payload_len, 0, 0);
459
460 /* copy payload */
461 gst_rtp_buffer_map (outbuf, GST_MAP_WRITE, &rtp);
462 payload = gst_rtp_buffer_get_payload (&rtp);
463 memcpy (payload, data, payload_len);
464 gst_rtp_buffer_unmap (&rtp);
465
466 /* set metadata */
467 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
468 timestamp);
469
470 ret = gst_rtp_base_payload_push (basepayload, outbuf);
471
472 return ret;
473 }
474
475 typedef struct
476 {
477 GstRTPBaseAudioPayload *pay;
478 GstBuffer *outbuf;
479 } CopyMetaData;
480
481 static gboolean
foreach_metadata(GstBuffer * inbuf,GstMeta ** meta,gpointer user_data)482 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
483 {
484 CopyMetaData *data = user_data;
485 GstRTPBaseAudioPayload *pay = data->pay;
486 GstBuffer *outbuf = data->outbuf;
487 const GstMetaInfo *info = (*meta)->info;
488 const gchar *const *tags = gst_meta_api_type_get_tags (info->api);
489
490 if (info->transform_func && (!tags || (g_strv_length ((gchar **) tags) == 1
491 && gst_meta_api_type_has_tag (info->api,
492 g_quark_from_string (GST_META_TAG_AUDIO_STR))))) {
493 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
494 GST_DEBUG_OBJECT (pay, "copy metadata %s", g_type_name (info->api));
495 /* simply copy then */
496 info->transform_func (outbuf, *meta, inbuf,
497 _gst_meta_transform_copy, ©_data);
498 } else {
499 GST_DEBUG_OBJECT (pay, "not copying metadata %s", g_type_name (info->api));
500 }
501
502 return TRUE;
503 }
504
505 static GstFlowReturn
gst_rtp_base_audio_payload_push_buffer(GstRTPBaseAudioPayload * baseaudiopayload,GstBuffer * buffer,GstClockTime timestamp)506 gst_rtp_base_audio_payload_push_buffer (GstRTPBaseAudioPayload *
507 baseaudiopayload, GstBuffer * buffer, GstClockTime timestamp)
508 {
509 GstRTPBasePayload *basepayload;
510 GstRTPBaseAudioPayloadPrivate *priv;
511 GstBuffer *outbuf;
512 guint payload_len;
513 GstFlowReturn ret;
514
515 priv = baseaudiopayload->priv;
516 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
517
518 payload_len = gst_buffer_get_size (buffer);
519
520 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
521 payload_len, GST_TIME_ARGS (timestamp));
522
523 /* create just the RTP header buffer */
524 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload, 0, 0, 0);
525
526 /* set metadata */
527 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
528 timestamp);
529
530 if (priv->buffer_list) {
531 GstBufferList *list;
532 guint i, len;
533
534 list = gst_buffer_list_new ();
535 len = gst_buffer_list_length (list);
536
537 for (i = 0; i < len; i++) {
538 /* FIXME */
539 g_warning ("bufferlist not implemented");
540 gst_buffer_list_add (list, outbuf);
541 gst_buffer_list_add (list, buffer);
542 }
543
544 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing list %p", list);
545 ret = gst_rtp_base_payload_push_list (basepayload, list);
546 } else {
547 CopyMetaData data;
548
549 /* copy payload */
550 data.pay = baseaudiopayload;
551 data.outbuf = outbuf;
552 gst_buffer_foreach_meta (buffer, foreach_metadata, &data);
553 outbuf = gst_buffer_append (outbuf, buffer);
554
555 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing buffer %p", outbuf);
556 ret = gst_rtp_base_payload_push (basepayload, outbuf);
557 }
558
559 return ret;
560 }
561
562 /**
563 * gst_rtp_base_audio_payload_flush:
564 * @baseaudiopayload: a #GstRTPBasePayload
565 * @payload_len: length of payload
566 * @timestamp: a #GstClockTime
567 *
568 * Create an RTP buffer and store @payload_len bytes of the adapter as the
569 * payload. Set the timestamp on the new buffer to @timestamp before pushing
570 * the buffer downstream.
571 *
572 * If @payload_len is -1, all pending bytes will be flushed. If @timestamp is
573 * -1, the timestamp will be calculated automatically.
574 *
575 * Returns: a #GstFlowReturn
576 */
577 GstFlowReturn
gst_rtp_base_audio_payload_flush(GstRTPBaseAudioPayload * baseaudiopayload,guint payload_len,GstClockTime timestamp)578 gst_rtp_base_audio_payload_flush (GstRTPBaseAudioPayload * baseaudiopayload,
579 guint payload_len, GstClockTime timestamp)
580 {
581 GstRTPBasePayload *basepayload;
582 GstRTPBaseAudioPayloadPrivate *priv;
583 GstBuffer *outbuf;
584 GstFlowReturn ret;
585 GstAdapter *adapter;
586 guint64 distance;
587
588 priv = baseaudiopayload->priv;
589 adapter = priv->adapter;
590
591 basepayload = GST_RTP_BASE_PAYLOAD (baseaudiopayload);
592
593 if (payload_len == -1)
594 payload_len = gst_adapter_available (adapter);
595
596 /* nothing to do, just return */
597 if (payload_len == 0)
598 return GST_FLOW_OK;
599
600 if (timestamp == -1) {
601 /* calculate the timestamp */
602 timestamp = gst_adapter_prev_pts (adapter, &distance);
603
604 GST_LOG_OBJECT (baseaudiopayload,
605 "last timestamp %" GST_TIME_FORMAT ", distance %" G_GUINT64_FORMAT,
606 GST_TIME_ARGS (timestamp), distance);
607
608 if (GST_CLOCK_TIME_IS_VALID (timestamp) && distance > 0) {
609 /* convert the number of bytes since the last timestamp to time and add to
610 * the last seen timestamp */
611 timestamp += priv->bytes_to_time (baseaudiopayload, distance);
612 }
613 }
614
615 GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,
616 payload_len, GST_TIME_ARGS (timestamp));
617
618 if (priv->buffer_list && gst_adapter_available_fast (adapter) >= payload_len) {
619 GstBuffer *buffer;
620 /* we can quickly take a buffer out of the adapter without having to copy
621 * anything. */
622 buffer = gst_adapter_take_buffer (adapter, payload_len);
623
624 ret =
625 gst_rtp_base_audio_payload_push_buffer (baseaudiopayload, buffer,
626 timestamp);
627 } else {
628 GstBuffer *paybuf;
629 CopyMetaData data;
630
631
632 /* create buffer to hold the payload */
633 outbuf = gst_rtp_base_payload_allocate_output_buffer (basepayload, 0, 0, 0);
634
635 paybuf = gst_adapter_take_buffer_fast (adapter, payload_len);
636
637 data.pay = baseaudiopayload;
638 data.outbuf = outbuf;
639 gst_buffer_foreach_meta (paybuf, foreach_metadata, &data);
640 outbuf = gst_buffer_append (outbuf, paybuf);
641
642 /* set metadata */
643 gst_rtp_base_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,
644 timestamp);
645
646 ret = gst_rtp_base_payload_push (basepayload, outbuf);
647 }
648
649 return ret;
650 }
651
652 #define ALIGN_DOWN(val,len) ((val) - ((val) % (len)))
653
654 /* calculate the min and max length of a packet. This depends on the configured
655 * mtu and min/max_ptime values. We cache those so that we don't have to redo
656 * all the calculations */
657 static gboolean
gst_rtp_base_audio_payload_get_lengths(GstRTPBasePayload * basepayload,guint csrc_count,guint * min_payload_len,guint * max_payload_len,guint * align)658 gst_rtp_base_audio_payload_get_lengths (GstRTPBasePayload * basepayload,
659 guint csrc_count, guint * min_payload_len, guint * max_payload_len,
660 guint * align)
661 {
662 GstRTPBaseAudioPayload *payload;
663 GstRTPBaseAudioPayloadPrivate *priv;
664 guint max_mtu, mtu;
665 guint maxptime_octets;
666 guint minptime_octets;
667 guint ptime_mult_octets;
668
669 payload = GST_RTP_BASE_AUDIO_PAYLOAD_CAST (basepayload);
670 priv = payload->priv;
671
672 if (priv->align == 0)
673 return FALSE;
674
675 mtu = GST_RTP_BASE_PAYLOAD_MTU (payload);
676
677 /* check cached values. Since csrc_count may vary for each packet, we only
678 * check whether the new value exceeds the cached value and thus result in
679 * smaller payload. */
680 if (G_LIKELY (priv->cached_mtu == mtu
681 && priv->cached_ptime_multiple ==
682 basepayload->ptime_multiple
683 && priv->cached_ptime == basepayload->ptime
684 && priv->cached_max_ptime == basepayload->max_ptime
685 && priv->cached_min_ptime == basepayload->min_ptime
686 && priv->cached_csrc_count >= csrc_count)) {
687 /* if nothing changed, return cached values */
688 *min_payload_len = priv->cached_min_length;
689 *max_payload_len = priv->cached_max_length;
690 *align = priv->cached_align;
691 return TRUE;
692 }
693
694 ptime_mult_octets = priv->time_to_bytes (payload,
695 basepayload->ptime_multiple);
696 *align = ALIGN_DOWN (MAX (priv->align, ptime_mult_octets), priv->align);
697
698 /* ptime max */
699 if (basepayload->max_ptime != -1) {
700 maxptime_octets = priv->time_to_bytes (payload, basepayload->max_ptime);
701 } else {
702 maxptime_octets = G_MAXUINT;
703 }
704 /* MTU max */
705 max_mtu = gst_rtp_buffer_calc_payload_len (mtu, 0, csrc_count);
706 /* round down to alignment */
707 max_mtu = ALIGN_DOWN (max_mtu, *align);
708
709 /* combine max ptime and max payload length */
710 *max_payload_len = MIN (max_mtu, maxptime_octets);
711
712 /* min number of bytes based on a given ptime */
713 minptime_octets = priv->time_to_bytes (payload, basepayload->min_ptime);
714 /* must be at least one frame size */
715 *min_payload_len = MAX (minptime_octets, *align);
716
717 if (*min_payload_len > *max_payload_len)
718 *min_payload_len = *max_payload_len;
719
720 /* If the ptime is specified in the caps, tried to adhere to it exactly */
721 if (basepayload->ptime) {
722 guint ptime_in_bytes = priv->time_to_bytes (payload,
723 basepayload->ptime);
724
725 /* clip to computed min and max lengths */
726 ptime_in_bytes = MAX (*min_payload_len, ptime_in_bytes);
727 ptime_in_bytes = MIN (*max_payload_len, ptime_in_bytes);
728
729 *min_payload_len = *max_payload_len = ptime_in_bytes;
730 }
731
732 /* cache values */
733 priv->cached_mtu = mtu;
734 priv->cached_ptime = basepayload->ptime;
735 priv->cached_min_ptime = basepayload->min_ptime;
736 priv->cached_max_ptime = basepayload->max_ptime;
737 priv->cached_ptime_multiple = basepayload->ptime_multiple;
738 priv->cached_min_length = *min_payload_len;
739 priv->cached_max_length = *max_payload_len;
740 priv->cached_align = *align;
741 priv->cached_csrc_count = csrc_count;
742
743 return TRUE;
744 }
745
746 /* frame conversions functions */
747 static GstClockTime
gst_rtp_base_audio_payload_frame_bytes_to_time(GstRTPBaseAudioPayload * payload,guint64 bytes)748 gst_rtp_base_audio_payload_frame_bytes_to_time (GstRTPBaseAudioPayload *
749 payload, guint64 bytes)
750 {
751 guint64 framecount;
752
753 framecount = bytes / payload->frame_size;
754 if (G_UNLIKELY (bytes % payload->frame_size))
755 framecount++;
756
757 return framecount * payload->priv->frame_duration_ns;
758 }
759
760 static guint32
gst_rtp_base_audio_payload_frame_bytes_to_rtptime(GstRTPBaseAudioPayload * payload,guint64 bytes)761 gst_rtp_base_audio_payload_frame_bytes_to_rtptime (GstRTPBaseAudioPayload *
762 payload, guint64 bytes)
763 {
764 guint64 framecount;
765 guint64 time;
766
767 framecount = bytes / payload->frame_size;
768 if (G_UNLIKELY (bytes % payload->frame_size))
769 framecount++;
770
771 time = framecount * payload->priv->frame_duration_ns;
772
773 return gst_util_uint64_scale_int (time,
774 GST_RTP_BASE_PAYLOAD (payload)->clock_rate, GST_SECOND);
775 }
776
777 static guint64
gst_rtp_base_audio_payload_frame_time_to_bytes(GstRTPBaseAudioPayload * payload,GstClockTime time)778 gst_rtp_base_audio_payload_frame_time_to_bytes (GstRTPBaseAudioPayload *
779 payload, GstClockTime time)
780 {
781 return gst_util_uint64_scale (time, payload->frame_size,
782 payload->priv->frame_duration_ns);
783 }
784
785 /* sample conversion functions */
786 static GstClockTime
gst_rtp_base_audio_payload_sample_bytes_to_time(GstRTPBaseAudioPayload * payload,guint64 bytes)787 gst_rtp_base_audio_payload_sample_bytes_to_time (GstRTPBaseAudioPayload *
788 payload, guint64 bytes)
789 {
790 guint64 rtptime;
791
792 /* avoid division when we can */
793 if (G_LIKELY (payload->sample_size != 8))
794 rtptime = gst_util_uint64_scale_int (bytes, 8, payload->sample_size);
795 else
796 rtptime = bytes;
797
798 return gst_util_uint64_scale_int (rtptime, GST_SECOND,
799 GST_RTP_BASE_PAYLOAD (payload)->clock_rate);
800 }
801
802 static guint32
gst_rtp_base_audio_payload_sample_bytes_to_rtptime(GstRTPBaseAudioPayload * payload,guint64 bytes)803 gst_rtp_base_audio_payload_sample_bytes_to_rtptime (GstRTPBaseAudioPayload *
804 payload, guint64 bytes)
805 {
806 /* avoid division when we can */
807 if (G_LIKELY (payload->sample_size != 8))
808 return gst_util_uint64_scale_int (bytes, 8, payload->sample_size);
809 else
810 return bytes;
811 }
812
813 static guint64
gst_rtp_base_audio_payload_sample_time_to_bytes(GstRTPBaseAudioPayload * payload,guint64 time)814 gst_rtp_base_audio_payload_sample_time_to_bytes (GstRTPBaseAudioPayload *
815 payload, guint64 time)
816 {
817 guint64 samples;
818
819 samples = gst_util_uint64_scale_int (time,
820 GST_RTP_BASE_PAYLOAD (payload)->clock_rate, GST_SECOND);
821
822 /* avoid multiplication when we can */
823 if (G_LIKELY (payload->sample_size != 8))
824 return gst_util_uint64_scale_int (samples, payload->sample_size, 8);
825 else
826 return samples;
827 }
828
829 static GstFlowReturn
gst_rtp_base_audio_payload_handle_buffer(GstRTPBasePayload * basepayload,GstBuffer * buffer)830 gst_rtp_base_audio_payload_handle_buffer (GstRTPBasePayload *
831 basepayload, GstBuffer * buffer)
832 {
833 GstRTPBaseAudioPayload *payload;
834 GstRTPBaseAudioPayloadPrivate *priv;
835 guint payload_len;
836 GstFlowReturn ret;
837 guint available;
838 guint min_payload_len;
839 guint max_payload_len;
840 guint align;
841 guint size;
842 gboolean discont;
843 GstClockTime timestamp;
844
845 ret = GST_FLOW_OK;
846
847 payload = GST_RTP_BASE_AUDIO_PAYLOAD_CAST (basepayload);
848 priv = payload->priv;
849
850 timestamp = GST_BUFFER_PTS (buffer);
851 discont = GST_BUFFER_IS_DISCONT (buffer);
852 if (discont) {
853
854 GST_DEBUG_OBJECT (payload, "Got DISCONT");
855 /* flush everything out of the adapter, mark DISCONT */
856 ret = gst_rtp_base_audio_payload_flush (payload, -1, -1);
857 priv->discont = TRUE;
858
859 /* get the distance between the timestamp gap and produce the same gap in
860 * the RTP timestamps */
861 if (priv->last_timestamp != -1 && timestamp != -1) {
862 /* we had a last timestamp, compare it to the new timestamp and update the
863 * offset counter for RTP timestamps. The effect is that we will produce
864 * output buffers containing the same RTP timestamp gap as the gap
865 * between the GST timestamps. */
866 if (timestamp > priv->last_timestamp) {
867 GstClockTime diff;
868 guint64 bytes;
869 /* we're only going to apply a positive gap, otherwise we let the marker
870 * bit do its thing. simply convert to bytes and add the current
871 * offset */
872 diff = timestamp - priv->last_timestamp;
873 bytes = priv->time_to_bytes (payload, diff);
874 priv->offset += bytes;
875
876 GST_DEBUG_OBJECT (payload,
877 "elapsed time %" GST_TIME_FORMAT ", bytes %" G_GUINT64_FORMAT
878 ", new offset %" G_GUINT64_FORMAT, GST_TIME_ARGS (diff), bytes,
879 priv->offset);
880 }
881 }
882 }
883
884 if (!gst_rtp_base_audio_payload_get_lengths (basepayload,
885 gst_rtp_base_payload_get_source_count (basepayload, buffer),
886 &min_payload_len, &max_payload_len, &align))
887 goto config_error;
888
889 GST_DEBUG_OBJECT (payload,
890 "Calculated min_payload_len %u and max_payload_len %u",
891 min_payload_len, max_payload_len);
892
893 size = gst_buffer_get_size (buffer);
894
895 /* shortcut, we don't need to use the adapter when the packet can be pushed
896 * through directly. */
897 available = gst_adapter_available (priv->adapter);
898
899 GST_DEBUG_OBJECT (payload, "got buffer size %u, available %u",
900 size, available);
901
902 if (available == 0 && (size >= min_payload_len && size <= max_payload_len) &&
903 (size % align == 0)) {
904 /* If buffer fits on an RTP packet, let's just push it through
905 * this will check against max_ptime and max_mtu */
906 GST_DEBUG_OBJECT (payload, "Fast packet push");
907 ret = gst_rtp_base_audio_payload_push_buffer (payload, buffer, timestamp);
908 } else {
909 /* push the buffer in the adapter */
910 gst_adapter_push (priv->adapter, buffer);
911 available += size;
912
913 GST_DEBUG_OBJECT (payload, "available now %u", available);
914
915 /* as long as we have full frames */
916 /* TODO: Use buffer lists here */
917 while (available >= min_payload_len) {
918 /* get multiple of alignment */
919 payload_len = MIN (max_payload_len, available);
920 payload_len = ALIGN_DOWN (payload_len, align);
921
922 /* and flush out the bytes from the adapter, automatically set the
923 * timestamp. */
924 ret = gst_rtp_base_audio_payload_flush (payload, payload_len, -1);
925
926 available -= payload_len;
927 GST_DEBUG_OBJECT (payload, "available after push %u", available);
928 }
929 }
930 return ret;
931
932 /* ERRORS */
933 config_error:
934 {
935 GST_ELEMENT_ERROR (payload, STREAM, NOT_IMPLEMENTED, (NULL),
936 ("subclass did not configure us properly"));
937 gst_buffer_unref (buffer);
938 return GST_FLOW_ERROR;
939 }
940 }
941
942 static GstStateChangeReturn
gst_rtp_base_payload_audio_change_state(GstElement * element,GstStateChange transition)943 gst_rtp_base_payload_audio_change_state (GstElement * element,
944 GstStateChange transition)
945 {
946 GstRTPBaseAudioPayload *rtpbasepayload;
947 GstStateChangeReturn ret;
948
949 rtpbasepayload = GST_RTP_BASE_AUDIO_PAYLOAD (element);
950
951 switch (transition) {
952 case GST_STATE_CHANGE_READY_TO_PAUSED:
953 rtpbasepayload->priv->cached_mtu = -1;
954 rtpbasepayload->priv->last_rtptime = -1;
955 rtpbasepayload->priv->last_timestamp = -1;
956 break;
957 default:
958 break;
959 }
960
961 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
962
963 switch (transition) {
964 case GST_STATE_CHANGE_PAUSED_TO_READY:
965 gst_adapter_clear (rtpbasepayload->priv->adapter);
966 break;
967 default:
968 break;
969 }
970
971 return ret;
972 }
973
974 static gboolean
gst_rtp_base_payload_audio_sink_event(GstRTPBasePayload * basep,GstEvent * event)975 gst_rtp_base_payload_audio_sink_event (GstRTPBasePayload * basep,
976 GstEvent * event)
977 {
978 GstRTPBaseAudioPayload *payload;
979 gboolean res = FALSE;
980
981 payload = GST_RTP_BASE_AUDIO_PAYLOAD (basep);
982
983 switch (GST_EVENT_TYPE (event)) {
984 case GST_EVENT_EOS:
985 /* flush remaining bytes in the adapter */
986 gst_rtp_base_audio_payload_flush (payload, -1, -1);
987 break;
988 case GST_EVENT_FLUSH_STOP:
989 gst_adapter_clear (payload->priv->adapter);
990 break;
991 default:
992 break;
993 }
994
995 /* let parent handle the remainder of the event */
996 res = GST_RTP_BASE_PAYLOAD_CLASS (parent_class)->sink_event (basep, event);
997
998 return res;
999 }
1000
1001 /**
1002 * gst_rtp_base_audio_payload_get_adapter:
1003 * @rtpbaseaudiopayload: a #GstRTPBaseAudioPayload
1004 *
1005 * Gets the internal adapter used by the depayloader.
1006 *
1007 * Returns: (transfer full): a #GstAdapter.
1008 */
1009 GstAdapter *
gst_rtp_base_audio_payload_get_adapter(GstRTPBaseAudioPayload * rtpbaseaudiopayload)1010 gst_rtp_base_audio_payload_get_adapter (GstRTPBaseAudioPayload
1011 * rtpbaseaudiopayload)
1012 {
1013 GstAdapter *adapter;
1014
1015 if ((adapter = rtpbaseaudiopayload->priv->adapter))
1016 g_object_ref (adapter);
1017
1018 return adapter;
1019 }
1020