1 /*
2 * GStreamer AVTP Plugin
3 * Copyright (C) 2019 Intel Corporation
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later
9 * version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * SECTION:element-avtpcvfpay
24 * @see_also: avtpcvfdepay
25 *
26 * Payload compressed video (currently, only H.264) into AVTPDUs according
27 * to IEEE 1722-2016. For detailed information see
28 * https://standards.ieee.org/standard/1722-2016.html.
29 *
30 * <refsect2>
31 * <title>Example pipeline</title>
32 * |[
33 * gst-launch-1.0 videotestsrc ! x264enc ! avtpcvfpay ! avtpsink
34 * ]| This example pipeline will payload H.264 video. Refer to the avtpcvfdepay
35 * example to depayload and play the AVTP stream.
36 * </refsect2>
37 */
38
39 #include <avtp.h>
40 #include <avtp_cvf.h>
41
42 #include "gstavtpcvfpay.h"
43
44 GST_DEBUG_CATEGORY_STATIC (avtpcvfpay_debug);
45 #define GST_CAT_DEFAULT avtpcvfpay_debug
46
47 /* prototypes */
48
49 static GstFlowReturn gst_avtp_cvf_pay_chain (GstPad * pad, GstObject * parent,
50 GstBuffer * buffer);
51 static gboolean gst_avtp_cvf_pay_sink_event (GstPad * pad, GstObject * parent,
52 GstEvent * event);
53
54 static void gst_avtp_cvf_set_property (GObject * object, guint prop_id,
55 const GValue * value, GParamSpec * pspec);
56 static void gst_avtp_cvf_get_property (GObject * object, guint prop_id,
57 GValue * value, GParamSpec * pspec);
58
59 static GstStateChangeReturn gst_avtp_cvf_change_state (GstElement *
60 element, GstStateChange transition);
61
62 enum
63 {
64 PROP_0,
65 PROP_MTU,
66 PROP_MEASUREMENT_INTERVAL,
67 PROP_MAX_INTERVAL_FRAME
68 };
69
70 #define DEFAULT_MTU 1500
71 #define DEFAULT_MEASUREMENT_INTERVAL 250000
72 #define DEFAULT_MAX_INTERVAL_FRAMES 1
73
74 #define AVTP_CVF_H264_HEADER_SIZE (sizeof(struct avtp_stream_pdu) + sizeof(guint32))
75 #define FU_A_TYPE 28
76 #define FU_A_HEADER_SIZE (sizeof(guint16))
77
78 #define NRI_MASK 0x60
79 #define NRI_SHIFT 5
80 #define START_SHIFT 7
81 #define END_SHIFT 6
82 #define NAL_TYPE_MASK 0x1f
83 #define FIRST_NAL_VCL_TYPE 0x01
84 #define LAST_NAL_VCL_TYPE 0x05
85 #define NAL_LEN_SIZE_MASK 0x03
86
87 /* pad templates */
88
89 static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink",
90 GST_PAD_SINK,
91 GST_PAD_ALWAYS,
92 GST_STATIC_CAPS ("video/x-h264, "
93 "stream-format = (string) avc, alignment = (string) au")
94 );
95
96 /* class initialization */
97
98 #define gst_avtp_cvf_pay_parent_class parent_class
99 G_DEFINE_TYPE (GstAvtpCvfPay, gst_avtp_cvf_pay, GST_TYPE_AVTP_BASE_PAYLOAD);
100 GST_ELEMENT_REGISTER_DEFINE (avtpcvfpay, "avtpcvfpay", GST_RANK_NONE,
101 GST_TYPE_AVTP_CVF_PAY);
102
103 static void
gst_avtp_cvf_pay_class_init(GstAvtpCvfPayClass * klass)104 gst_avtp_cvf_pay_class_init (GstAvtpCvfPayClass * klass)
105 {
106 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
107 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
108 GstAvtpBasePayloadClass *avtpbasepayload_class =
109 GST_AVTP_BASE_PAYLOAD_CLASS (klass);
110
111 gst_element_class_add_static_pad_template (gstelement_class, &sink_template);
112
113 gst_element_class_set_static_metadata (gstelement_class,
114 "AVTP Compressed Video Format (CVF) payloader",
115 "Codec/Payloader/Network/AVTP",
116 "Payload-encode compressed video into CVF AVTPDU (IEEE 1722)",
117 "Ederson de Souza <ederson.desouza@intel.com>");
118
119 gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_avtp_cvf_set_property);
120 gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_avtp_cvf_get_property);
121
122 gstelement_class->change_state =
123 GST_DEBUG_FUNCPTR (gst_avtp_cvf_change_state);
124
125 avtpbasepayload_class->chain = GST_DEBUG_FUNCPTR (gst_avtp_cvf_pay_chain);
126 avtpbasepayload_class->sink_event =
127 GST_DEBUG_FUNCPTR (gst_avtp_cvf_pay_sink_event);
128
129 g_object_class_install_property (gobject_class, PROP_MTU,
130 g_param_spec_uint ("mtu", "Maximum Transit Unit",
131 "Maximum Transit Unit (MTU) of underlying network in bytes", 0,
132 G_MAXUINT, DEFAULT_MTU, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
133
134 g_object_class_install_property (gobject_class, PROP_MEASUREMENT_INTERVAL,
135 g_param_spec_uint64 ("measurement-interval", "Measurement Interval",
136 "Measurement interval of stream in nanoseconds", 0,
137 G_MAXUINT64, DEFAULT_MEASUREMENT_INTERVAL,
138 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
139
140 g_object_class_install_property (gobject_class, PROP_MAX_INTERVAL_FRAME,
141 g_param_spec_uint ("max-interval-frames", "Maximum Interval Frames",
142 "Maximum number of network frames to be sent on each Measurement Interval",
143 1, G_MAXUINT, DEFAULT_MAX_INTERVAL_FRAMES,
144 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
145
146 GST_DEBUG_CATEGORY_INIT (avtpcvfpay_debug, "avtpcvfpay",
147 0, "debug category for avtpcvfpay element");
148 }
149
150 static void
gst_avtp_cvf_pay_init(GstAvtpCvfPay * avtpcvfpay)151 gst_avtp_cvf_pay_init (GstAvtpCvfPay * avtpcvfpay)
152 {
153 avtpcvfpay->mtu = DEFAULT_MTU;
154 avtpcvfpay->header = NULL;
155 avtpcvfpay->nal_length_size = 0;
156 avtpcvfpay->measurement_interval = DEFAULT_MEASUREMENT_INTERVAL;
157 avtpcvfpay->max_interval_frames = DEFAULT_MAX_INTERVAL_FRAMES;
158 avtpcvfpay->last_interval_ct = 0;
159 }
160
161 static void
gst_avtp_cvf_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)162 gst_avtp_cvf_set_property (GObject * object, guint prop_id,
163 const GValue * value, GParamSpec * pspec)
164 {
165 GstAvtpCvfPay *avtpcvfpay = GST_AVTP_CVF_PAY (object);
166
167 GST_DEBUG_OBJECT (avtpcvfpay, "prop_id: %u", prop_id);
168
169 switch (prop_id) {
170 case PROP_MTU:
171 avtpcvfpay->mtu = g_value_get_uint (value);
172 break;
173 case PROP_MEASUREMENT_INTERVAL:
174 avtpcvfpay->measurement_interval = g_value_get_uint64 (value);
175 break;
176 case PROP_MAX_INTERVAL_FRAME:
177 avtpcvfpay->max_interval_frames = g_value_get_uint (value);
178 break;
179 default:
180 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
181 break;
182 }
183 }
184
185 static void
gst_avtp_cvf_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)186 gst_avtp_cvf_get_property (GObject * object, guint prop_id,
187 GValue * value, GParamSpec * pspec)
188 {
189 GstAvtpCvfPay *avtpcvfpay = GST_AVTP_CVF_PAY (object);
190
191 GST_DEBUG_OBJECT (avtpcvfpay, "prop_id: %u", prop_id);
192
193 switch (prop_id) {
194 case PROP_MTU:
195 g_value_set_uint (value, avtpcvfpay->mtu);
196 break;
197 case PROP_MEASUREMENT_INTERVAL:
198 g_value_set_uint64 (value, avtpcvfpay->measurement_interval);
199 break;
200 case PROP_MAX_INTERVAL_FRAME:
201 g_value_set_uint (value, avtpcvfpay->max_interval_frames);
202 break;
203 default:
204 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
205 break;
206 }
207 }
208
209 static GstStateChangeReturn
gst_avtp_cvf_change_state(GstElement * element,GstStateChange transition)210 gst_avtp_cvf_change_state (GstElement * element, GstStateChange transition)
211 {
212 GstAvtpCvfPay *avtpcvfpay = GST_AVTP_CVF_PAY (element);
213 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (avtpcvfpay);
214 GstStateChangeReturn ret;
215
216 if (transition == GST_STATE_CHANGE_NULL_TO_READY) {
217 GstMapInfo map;
218 struct avtp_stream_pdu *pdu;
219 int res;
220
221 avtpcvfpay->header = gst_buffer_new_allocate (NULL,
222 AVTP_CVF_H264_HEADER_SIZE, NULL);
223 if (avtpcvfpay->header == NULL) {
224 GST_ERROR_OBJECT (avtpcvfpay, "Could not allocate buffer");
225 return GST_STATE_CHANGE_FAILURE;
226 }
227
228 gst_buffer_map (avtpcvfpay->header, &map, GST_MAP_WRITE);
229 pdu = (struct avtp_stream_pdu *) map.data;
230
231 res = avtp_cvf_pdu_init (pdu, AVTP_CVF_FORMAT_SUBTYPE_H264);
232 g_assert (res == 0);
233
234 res =
235 avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_STREAM_ID,
236 avtpbasepayload->streamid);
237 g_assert (res == 0);
238
239 gst_buffer_unmap (avtpcvfpay->header, &map);
240 }
241
242 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
243 if (ret == GST_STATE_CHANGE_FAILURE) {
244 return ret;
245 }
246
247 if (transition == GST_STATE_CHANGE_READY_TO_NULL) {
248 gst_buffer_unref (avtpcvfpay->header);
249 }
250
251 return ret;
252 }
253
254 static void
gst_avtp_cvf_pay_extract_nals(GstAvtpCvfPay * avtpcvfpay,GstBuffer * buffer,GPtrArray * nals)255 gst_avtp_cvf_pay_extract_nals (GstAvtpCvfPay * avtpcvfpay,
256 GstBuffer * buffer, GPtrArray * nals)
257 {
258 /* The buffer may have more than one NAL. They are grouped together, and before
259 * each NAL there are some bytes that indicate how big is the NAL */
260
261 gsize size, offset = 0;
262 GstMapInfo map;
263 guint8 *data;
264 gboolean res;
265
266 if (G_UNLIKELY (avtpcvfpay->nal_length_size == 0)) {
267 GST_ERROR_OBJECT (avtpcvfpay,
268 "Can't extract NAL units without nal length size. Missing codec_data caps?");
269 goto end;
270 }
271
272 res = gst_buffer_map (buffer, &map, GST_MAP_READ);
273 if (!res) {
274 GST_ERROR_OBJECT (avtpcvfpay, "Could not map buffer");
275 goto end;
276 }
277
278 size = map.size;
279 data = map.data;
280
281 while (size > avtpcvfpay->nal_length_size) {
282 gint i;
283 guint nal_len = 0;
284 GstBuffer *nal;
285
286 /* Gets NAL length */
287 for (i = 0; i < avtpcvfpay->nal_length_size; i++) {
288 nal_len = (nal_len << 8) + data[i];
289 }
290
291 if (nal_len == 0) {
292 GST_WARNING_OBJECT (avtpcvfpay, "Invalid NAL unit size: 0");
293 break;
294 }
295
296 offset += avtpcvfpay->nal_length_size;
297 data += avtpcvfpay->nal_length_size;
298 size -= avtpcvfpay->nal_length_size;
299
300 if (G_UNLIKELY (size < nal_len)) {
301 GST_WARNING_OBJECT (avtpcvfpay,
302 "Got incomplete NAL: NAL len %u, buffer len %zu", nal_len, size);
303 nal_len = size;
304 }
305
306 nal = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_ALL, offset, nal_len);
307 GST_BUFFER_PTS (nal) = GST_BUFFER_PTS (buffer);
308 GST_BUFFER_DTS (nal) = GST_BUFFER_DTS (buffer);
309 g_ptr_array_add (nals, nal);
310
311 offset += nal_len;
312 data += nal_len;
313 size -= nal_len;
314 }
315
316 gst_buffer_unmap (buffer, &map);
317
318 end:
319 /* This function consumes the buffer, and all references to it are in the
320 * extracted nals, so we can release the reference to the buffer itself */
321 gst_buffer_unref (buffer);
322
323 GST_LOG_OBJECT (avtpcvfpay, "Extracted %u NALu's from buffer", nals->len);
324 }
325
326 static gboolean
gst_avtp_cvf_pay_is_nal_vcl(GstAvtpCvfPay * avtpcvfpay,GstBuffer * nal)327 gst_avtp_cvf_pay_is_nal_vcl (GstAvtpCvfPay * avtpcvfpay, GstBuffer * nal)
328 {
329 guint8 nal_header, nal_type;
330
331 gst_buffer_extract (nal, 0, &nal_header, 1);
332 nal_type = nal_header & NAL_TYPE_MASK;
333
334 return nal_type >= FIRST_NAL_VCL_TYPE && nal_type <= LAST_NAL_VCL_TYPE;
335 }
336
337 static GstBuffer *
gst_avtpcvpay_fragment_nal(GstAvtpCvfPay * avtpcvfpay,GstBuffer * nal,gsize * offset,gboolean * last_fragment)338 gst_avtpcvpay_fragment_nal (GstAvtpCvfPay * avtpcvfpay, GstBuffer * nal,
339 gsize * offset, gboolean * last_fragment)
340 {
341 GstBuffer *fragment_header, *fragment;
342 guint8 nal_header, nal_type, nal_nri, fu_indicator, fu_header;
343 gsize available, nal_size, fragment_size, remaining;
344 GstMapInfo map;
345
346 nal_size = gst_buffer_get_size (nal);
347
348 /* If NAL + header will be smaller than MTU, nothing to fragment */
349 if (*offset == 0 && (nal_size + AVTP_CVF_H264_HEADER_SIZE) <= avtpcvfpay->mtu) {
350 *last_fragment = TRUE;
351 *offset = nal_size;
352 GST_DEBUG_OBJECT (avtpcvfpay,
353 "Generated fragment with size %" G_GSIZE_FORMAT, nal_size);
354 return gst_buffer_ref (nal);
355 }
356
357 /* We're done with this buffer */
358 if (*offset == nal_size) {
359 return NULL;
360 }
361
362 *last_fragment = FALSE;
363
364 /* Remaining size is smaller than MTU, so this is the last fragment */
365 remaining = nal_size - *offset + AVTP_CVF_H264_HEADER_SIZE + FU_A_HEADER_SIZE;
366 if (remaining <= avtpcvfpay->mtu) {
367 *last_fragment = TRUE;
368 }
369
370 fragment_header = gst_buffer_new_allocate (NULL, FU_A_HEADER_SIZE, NULL);
371 if (G_UNLIKELY (fragment_header == NULL)) {
372 GST_ERROR_OBJECT (avtpcvfpay, "Could not allocate memory for buffer");
373 return NULL;
374 }
375
376 /* NAL header info is spread to all FUs */
377 gst_buffer_extract (nal, 0, &nal_header, 1);
378 nal_type = nal_header & NAL_TYPE_MASK;
379 nal_nri = (nal_header & NRI_MASK) >> NRI_SHIFT;
380
381 fu_indicator = (nal_nri << NRI_SHIFT) | FU_A_TYPE;
382 fu_header = ((*offset == 0) << START_SHIFT) |
383 ((*last_fragment == TRUE) << END_SHIFT) | nal_type;
384
385 gst_buffer_map (fragment_header, &map, GST_MAP_WRITE);
386 map.data[0] = fu_indicator;
387 map.data[1] = fu_header;
388 gst_buffer_unmap (fragment_header, &map);
389
390 available =
391 avtpcvfpay->mtu - AVTP_CVF_H264_HEADER_SIZE -
392 gst_buffer_get_size (fragment_header);
393
394 /* NAL unit header is not sent, but spread into FU indicator and header,
395 * and reconstructed on depayloader */
396 if (*offset == 0)
397 *offset = 1;
398
399 fragment_size =
400 available < (nal_size - *offset) ? available : (nal_size - *offset);
401
402 fragment =
403 gst_buffer_append (fragment_header, gst_buffer_copy_region (nal,
404 GST_BUFFER_COPY_MEMORY, *offset, fragment_size));
405
406 *offset += fragment_size;
407
408 GST_DEBUG_OBJECT (avtpcvfpay,
409 "Generated fragment with size %" G_GSIZE_FORMAT, fragment_size);
410
411 return fragment;
412 }
413
414 static void
gst_avtp_cvf_pay_spread_ts(GstAvtpCvfPay * avtpcvfpay,GPtrArray * avtp_packets)415 gst_avtp_cvf_pay_spread_ts (GstAvtpCvfPay * avtpcvfpay,
416 GPtrArray * avtp_packets)
417 {
418 /* A bit of the idea of what this function do:
419 *
420 * After fragmenting the NAL unit, we have a series of AVTPDUs (AVTP Data Units)
421 * that should be transmitted. They are going to be transmitted according to GstBuffer
422 * DTS (or PTS in case there's no DTS), but all of them have the same DTS, as they
423 * came from the same original NAL unit.
424 *
425 * However, TSN streams should send their data according to a "measurement interval",
426 * which is an arbitrary interval defined for the stream. For instance, a class A
427 * stream has measurement interval of 125us. Also, there's a MaxIntervalFrames
428 * parameter, that defines how many network frames can be sent on a given measurement
429 * interval. We also spread MaxIntervalFrames per measurement interval.
430 *
431 * To that end, this function will spread the DTS so that fragments follow measurement
432 * interval and MaxIntervalFrames, adjusting them to end before the actual DTS of the
433 * original NAL unit.
434 *
435 * Roughly, this function does:
436 *
437 * DTSn = DTSbase - (measurement_interval/MaxIntervalFrames) * (total - n - 1)
438 *
439 * Where:
440 * DTSn = DTS of nth fragment
441 * DTSbase = DTS of original NAL unit
442 * total = # of fragments
443 *
444 * Another issue that this function takes care of is avoiding DTSs that overlap between
445 * two different set of fragments. Assuming DTSlast is the DTS of the last fragment
446 * generated on previous call to this function, we don't want any DTSn for the current
447 * call to be smaller than DTSlast + (measurement_interval / MaxIntervalFrames). If
448 * that's the case, we adjust DTSbase to preserve this difference (so we don't schedule
449 * packets transmission times that violate stream spec). This will cause the last
450 * fragment DTS to be bigger than DTSbase - we emit a warning, as this may be a sign
451 * of a bad pipeline setup or inappropriate stream spec.
452 *
453 * Finally, we also avoid underflows - which would occur when DTSbase is zero or small
454 * enough. In this case, we'll again make last fragment DTS > DTSbase, so we log it.
455 *
456 */
457
458 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (avtpcvfpay);
459
460 gint i, ret;
461 guint len;
462 guint64 tx_interval, total_interval;
463 GstClockTime base_time, base_dts, rt;
464 GstBuffer *packet;
465
466 base_time = gst_element_get_base_time (GST_ELEMENT (avtpcvfpay));
467 base_dts = GST_BUFFER_DTS (g_ptr_array_index (avtp_packets, 0));
468
469 tx_interval =
470 avtpcvfpay->measurement_interval / avtpcvfpay->max_interval_frames;
471 len = avtp_packets->len;
472 total_interval = tx_interval * (len - 1);
473
474 /* We don't want packets transmission time to overlap, so let's ensure
475 * packets are scheduled after last interval used */
476 if (avtpcvfpay->last_interval_ct != 0) {
477 GstClockTime dts_ct, dts_rt;
478
479 ret =
480 gst_segment_to_running_time_full (&avtpbasepayload->segment,
481 GST_FORMAT_TIME, base_dts, &dts_rt);
482 if (ret == -1)
483 dts_rt = -dts_rt;
484
485 dts_ct = base_time + dts_rt;
486
487 if (dts_ct < avtpcvfpay->last_interval_ct + total_interval + tx_interval) {
488 base_dts +=
489 avtpcvfpay->last_interval_ct + total_interval + tx_interval - dts_ct;
490
491 GST_WARNING_OBJECT (avtpcvfpay,
492 "Not enough measurements intervals between frames to transmit fragments"
493 ". Check stream transmission spec.");
494 }
495 }
496
497 /* Not enough room to spread tx before DTS (or we would underflow),
498 * add offset */
499 if (total_interval > base_dts) {
500 base_dts += total_interval - base_dts;
501
502 GST_INFO_OBJECT (avtpcvfpay,
503 "Not enough measurements intervals to transmit fragments before base "
504 "DTS. Check pipeline settings. Are we live?");
505 }
506
507 for (i = 0; i < len; i++) {
508 packet = g_ptr_array_index (avtp_packets, i);
509 GST_BUFFER_DTS (packet) = base_dts - tx_interval * (len - i - 1);
510 }
511
512 /* Remember last interval used, in clock time */
513 ret =
514 gst_segment_to_running_time_full (&avtpbasepayload->segment,
515 GST_FORMAT_TIME, GST_BUFFER_DTS (g_ptr_array_index (avtp_packets,
516 avtp_packets->len - 1)), &rt);
517 if (ret == -1)
518 rt = -rt;
519 avtpcvfpay->last_interval_ct = base_time + rt;
520 }
521
522 static gboolean
gst_avtp_cvf_pay_prepare_avtp_packets(GstAvtpCvfPay * avtpcvfpay,GPtrArray * nals,GPtrArray * avtp_packets)523 gst_avtp_cvf_pay_prepare_avtp_packets (GstAvtpCvfPay * avtpcvfpay,
524 GPtrArray * nals, GPtrArray * avtp_packets)
525 {
526 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (avtpcvfpay);
527 GstBuffer *header, *nal;
528 GstMapInfo map;
529 gint i;
530
531 for (i = 0; i < nals->len; i++) {
532 guint64 avtp_time, h264_time;
533 gboolean last_fragment;
534 GstBuffer *fragment;
535 gsize offset;
536
537 nal = g_ptr_array_index (nals, i);
538 GST_LOG_OBJECT (avtpcvfpay,
539 "Preparing AVTP packets for NAL whose size is %" G_GSIZE_FORMAT,
540 gst_buffer_get_size (nal));
541
542 /* Calculate timestamps. Note that we do it twice, one using DTS as base,
543 * the other using PTS - using code inherited from avtpbasepayload.
544 * Also worth noting: `avtpbasepayload->latency` is updated after
545 * first call to gst_avtp_base_payload_calc_ptime, so we MUST call
546 * it before using the latency value */
547 h264_time = gst_avtp_base_payload_calc_ptime (avtpbasepayload, nal);
548
549 avtp_time =
550 gst_element_get_base_time (GST_ELEMENT (avtpcvfpay)) +
551 gst_segment_to_running_time (&avtpbasepayload->segment, GST_FORMAT_TIME,
552 GST_BUFFER_DTS_OR_PTS (nal)) + avtpbasepayload->mtt +
553 avtpbasepayload->tu + avtpbasepayload->processing_deadline +
554 avtpbasepayload->latency;
555
556 offset = 0;
557 while ((fragment =
558 gst_avtpcvpay_fragment_nal (avtpcvfpay, nal, &offset,
559 &last_fragment))) {
560 GstBuffer *packet;
561 struct avtp_stream_pdu *pdu;
562 gint res;
563
564 /* Copy header to reuse common fields and change what is needed */
565 header = gst_buffer_copy (avtpcvfpay->header);
566 gst_buffer_map (header, &map, GST_MAP_WRITE);
567 pdu = (struct avtp_stream_pdu *) map.data;
568
569 /* Stream data len includes AVTP H264 header len as this is part of
570 * the payload too. It's just the uint32_t with the h264 timestamp*/
571 res =
572 avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_STREAM_DATA_LEN,
573 gst_buffer_get_size (fragment) + sizeof (uint32_t));
574 g_assert (res == 0);
575
576 res =
577 avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_SEQ_NUM,
578 avtpbasepayload->seqnum++);
579 g_assert (res == 0);
580
581 /* Although AVTP_TIMESTAMP is only set on the very last fragment, IEEE 1722
582 * doesn't mention such need for H264_TIMESTAMP. So, we set it for all
583 * fragments */
584 res = avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_H264_TIMESTAMP, h264_time);
585 g_assert (res == 0);
586 res = avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_H264_PTV, 1);
587 g_assert (res == 0);
588
589 /* Only last fragment should have M, AVTP_TS and TV fields set */
590 if (last_fragment) {
591 gboolean M;
592
593 res = avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_TV, 1);
594 g_assert (res == 0);
595
596 res = avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_TIMESTAMP, avtp_time);
597 g_assert (res == 0);
598
599 /* Set M only if last NAL and it is a VCL NAL */
600 M = (i == nals->len - 1)
601 && gst_avtp_cvf_pay_is_nal_vcl (avtpcvfpay, nal);
602 res = avtp_cvf_pdu_set (pdu, AVTP_CVF_FIELD_M, M);
603 g_assert (res == 0);
604
605 if (M) {
606 GST_LOG_OBJECT (avtpcvfpay, "M packet sent, PTS: %" GST_TIME_FORMAT
607 " DTS: %" GST_TIME_FORMAT " AVTP_TS: %" GST_TIME_FORMAT
608 " H264_TS: %" GST_TIME_FORMAT "\navtp_time: %" G_GUINT64_FORMAT
609 " h264_time: %" G_GUINT64_FORMAT, GST_TIME_ARGS (h264_time),
610 GST_TIME_ARGS (avtp_time), GST_TIME_ARGS (avtp_time & 0xffffffff),
611 GST_TIME_ARGS (h264_time & 0xffffffff), avtp_time, h264_time);
612 }
613 }
614
615 packet = gst_buffer_append (header, fragment);
616
617 /* Keep original timestamps */
618 GST_BUFFER_PTS (packet) = GST_BUFFER_PTS (nal);
619 GST_BUFFER_DTS (packet) = GST_BUFFER_DTS (nal);
620
621 g_ptr_array_add (avtp_packets, packet);
622
623 gst_buffer_unmap (header, &map);
624 }
625
626 gst_buffer_unref (nal);
627 }
628
629 GST_LOG_OBJECT (avtpcvfpay, "Prepared %u AVTP packets", avtp_packets->len);
630
631 /* Ensure DTS/PTS respect stream transmit spec, so PDUs are transmitted
632 * according to measurement interval. */
633 if (avtp_packets->len > 0)
634 gst_avtp_cvf_pay_spread_ts (avtpcvfpay, avtp_packets);
635
636 return TRUE;
637 }
638
639 static GstFlowReturn
gst_avtp_cvf_pay_push_packets(GstAvtpCvfPay * avtpcvfpay,GPtrArray * avtp_packets)640 gst_avtp_cvf_pay_push_packets (GstAvtpCvfPay * avtpcvfpay,
641 GPtrArray * avtp_packets)
642 {
643 int i;
644 GstFlowReturn ret;
645 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (avtpcvfpay);
646
647 for (i = 0; i < avtp_packets->len; i++) {
648 GstBuffer *packet;
649
650 packet = g_ptr_array_index (avtp_packets, i);
651 ret = gst_pad_push (avtpbasepayload->srcpad, packet);
652 if (ret != GST_FLOW_OK)
653 return ret;
654 }
655
656 return GST_FLOW_OK;
657 }
658
659 static GstFlowReturn
gst_avtp_cvf_pay_chain(GstPad * pad,GstObject * parent,GstBuffer * buffer)660 gst_avtp_cvf_pay_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
661 {
662 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (parent);
663 GstAvtpCvfPay *avtpcvfpay = GST_AVTP_CVF_PAY (avtpbasepayload);
664 GPtrArray *nals, *avtp_packets;
665 GstFlowReturn ret = GST_FLOW_OK;
666
667 GST_LOG_OBJECT (avtpcvfpay,
668 "Incoming buffer size: %" G_GSIZE_FORMAT " PTS: %" GST_TIME_FORMAT
669 " DTS: %" GST_TIME_FORMAT, gst_buffer_get_size (buffer),
670 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
671 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)));
672
673 /* Get all NALs inside buffer */
674 nals = g_ptr_array_new ();
675 gst_avtp_cvf_pay_extract_nals (avtpcvfpay, buffer, nals);
676
677 /* Prepare a list of avtp_packets to send */
678 avtp_packets = g_ptr_array_new ();
679 gst_avtp_cvf_pay_prepare_avtp_packets (avtpcvfpay, nals, avtp_packets);
680
681 ret = gst_avtp_cvf_pay_push_packets (avtpcvfpay, avtp_packets);
682
683 /* Contents of both ptr_arrays should be unref'd or transferred
684 * to rightful owner by this point, no need to unref them again */
685 g_ptr_array_free (nals, TRUE);
686 g_ptr_array_free (avtp_packets, TRUE);
687
688 return ret;
689 }
690
691 static gboolean
gst_avtp_cvf_pay_new_caps(GstAvtpCvfPay * avtpcvfpay,GstCaps * caps)692 gst_avtp_cvf_pay_new_caps (GstAvtpCvfPay * avtpcvfpay, GstCaps * caps)
693 {
694 const GValue *value;
695 GstStructure *str;
696 GstBuffer *buffer;
697 GstMapInfo map;
698
699 str = gst_caps_get_structure (caps, 0);
700
701 if ((value = gst_structure_get_value (str, "codec_data"))) {
702 guint8 *data;
703 gsize size;
704
705 buffer = gst_value_get_buffer (value);
706 gst_buffer_map (buffer, &map, GST_MAP_READ);
707 data = map.data;
708 size = map.size;
709
710 if (G_UNLIKELY (size < 7)) {
711 GST_ERROR_OBJECT (avtpcvfpay, "avcC size %" G_GSIZE_FORMAT " < 7", size);
712 goto error;
713 }
714 if (G_UNLIKELY (data[0] != 1)) {
715 GST_ERROR_OBJECT (avtpcvfpay, "avcC version %u != 1", data[0]);
716 goto error;
717 }
718
719 /* Number of bytes in front of NAL units marking their size */
720 avtpcvfpay->nal_length_size = (data[4] & NAL_LEN_SIZE_MASK) + 1;
721 GST_DEBUG_OBJECT (avtpcvfpay, "Got NAL length from caps: %u",
722 avtpcvfpay->nal_length_size);
723
724 gst_buffer_unmap (buffer, &map);
725 }
726
727 return TRUE;
728
729 error:
730 gst_buffer_unmap (buffer, &map);
731 return FALSE;
732 }
733
734 static gboolean
gst_avtp_cvf_pay_sink_event(GstPad * pad,GstObject * parent,GstEvent * event)735 gst_avtp_cvf_pay_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
736 {
737 GstCaps *caps;
738 GstAvtpBasePayload *avtpbasepayload = GST_AVTP_BASE_PAYLOAD (parent);
739 GstAvtpCvfPay *avtpcvfpay = GST_AVTP_CVF_PAY (avtpbasepayload);
740 gboolean ret;
741
742 GST_DEBUG_OBJECT (avtpcvfpay, "Sink event %s", GST_EVENT_TYPE_NAME (event));
743
744 switch (GST_EVENT_TYPE (event)) {
745 case GST_EVENT_CAPS:
746 gst_event_parse_caps (event, &caps);
747 ret = gst_avtp_cvf_pay_new_caps (avtpcvfpay, caps);
748 gst_event_unref (event);
749 return ret;
750 case GST_EVENT_FLUSH_STOP:
751 if (GST_ELEMENT (avtpcvfpay)->current_state == GST_STATE_PLAYING) {
752 /* After a flush, the sink will reset pipeline base_time, but only
753 * after it gets the first buffer. So, here, we used the wrong
754 * base_time to calculate DTS. We'll just notice base_time changed
755 * when we get the next buffer. So, we'll basically mess with
756 * timestamps of two frames, which is bad. Known workaround is
757 * to pause the pipeline before a flushing seek - so that we'll
758 * be up to date to new pipeline base_time */
759 GST_WARNING_OBJECT (avtpcvfpay,
760 "Flushing seek performed while pipeline is PLAYING, "
761 "AVTP timestamps will be incorrect!");
762 }
763 break;
764 default:
765 break;
766 }
767
768 return GST_AVTP_BASE_PAYLOAD_CLASS (parent_class)->sink_event (pad, parent,
769 event);
770 }
771