1 /* GStreamer
2 * Copyright (C) 2018 Edward Hervey <edward@centricular.com>
3 * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
14 *
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24
25 #include <string.h>
26 #include <gst/base/gstbytereader.h>
27 #include "video-anc.h"
28
29 /**
30 * SECTION:gstvideoanc
31 * @title: GstVideo Ancillary
32 * @short_description: Utilities for Ancillary data, VBI and Closed Caption
33 *
34 * A collection of objects and methods to assist with handling Ancillary Data
35 * present in Vertical Blanking Interval as well as Closed Caption.
36 */
37
38 #ifndef GST_DISABLE_GST_DEBUG
39 #define GST_CAT_DEFAULT ensure_debug_category()
40 static GstDebugCategory *
ensure_debug_category(void)41 ensure_debug_category (void)
42 {
43 static gsize cat_gonce = 0;
44
45 if (g_once_init_enter (&cat_gonce)) {
46 gsize cat_done;
47
48 cat_done = (gsize) _gst_debug_category_new ("video-anc", 0,
49 "Ancillary data, VBI and CC utilities");
50
51 g_once_init_leave (&cat_gonce, cat_done);
52 }
53
54 return (GstDebugCategory *) cat_gonce;
55 }
56 #else
57 #define ensure_debug_category() /* NOOP */
58 #endif /* GST_DISABLE_GST_DEBUG */
59
60 struct _GstVideoVBIParser
61 {
62 GstVideoInfo info; /* format of the lines provided */
63 guint8 *work_data; /* Converted line in planar 16bit format */
64 guint32 work_data_size; /* Size in bytes of work_data */
65 guint offset; /* Current offset (in bytes) in work_data */
66 gboolean bit16; /* Data is stored as 16bit if TRUE. Else 8bit(without parity) */
67 };
68
69 G_DEFINE_BOXED_TYPE (GstVideoVBIParser, gst_video_vbi_parser,
70 (GBoxedCopyFunc) gst_video_vbi_parser_copy,
71 (GBoxedFreeFunc) gst_video_vbi_parser_free);
72
73 GstVideoVBIParser *
gst_video_vbi_parser_copy(const GstVideoVBIParser * parser)74 gst_video_vbi_parser_copy (const GstVideoVBIParser * parser)
75 {
76 GstVideoVBIParser *res;
77
78 g_return_val_if_fail (parser != NULL, NULL);
79
80 res = gst_video_vbi_parser_new (GST_VIDEO_INFO_FORMAT (&parser->info),
81 parser->info.width);
82 if (res) {
83 memcpy (res->work_data, parser->work_data, parser->work_data_size);
84 }
85 return res;
86 }
87
88 /* See SMPTE S291 */
89 static GstVideoVBIParserResult
get_ancillary_16(GstVideoVBIParser * parser,GstVideoAncillary * anc)90 get_ancillary_16 (GstVideoVBIParser * parser, GstVideoAncillary * anc)
91 {
92 gboolean found = FALSE;
93 const guint16 *data = (const guint16 *) parser->work_data;
94
95 g_return_val_if_fail (parser != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
96 g_return_val_if_fail (anc != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
97
98 /* 3 words are needed at least to detect what kind of packet we look at
99 *
100 * - ADF (SMPTE S291 3.2.1) in case of component ancillary format:
101 * 0x000 0x3ff 0x3ff (followed by DID, SDID)
102 * - ADF (SMPTE S291 3.2.2) in case of composite ancillary format:
103 * 0x3fc DID SDID
104 */
105 while (parser->offset + 3 < parser->work_data_size) {
106 guint8 DID, SDID, DC;
107 guint i = 0, j;
108 guint checksum = 0;
109 gboolean composite;
110
111 /* Look for ADF */
112 if (data[parser->offset] == 0x3fc) {
113 /* composite */
114 i += 1;
115 composite = TRUE;
116 } else if (data[parser->offset] == 0x000 &&
117 data[parser->offset + 1] == 0x3ff &&
118 data[parser->offset + 2] == 0x3ff) {
119 /* component */
120 i += 3;
121 composite = FALSE;
122 } else {
123 parser->offset += 1;
124 continue;
125 }
126
127 /* TODO: Might want to check parity bits here but the checksum in
128 * the end should really be enough */
129
130 /* 4 words: DID, SDID, DC, [DATA], checksum */
131 if (parser->offset + i + 4 >= parser->work_data_size)
132 goto not_enough_data;
133
134 /* We have a valid ADF */
135 DID = data[parser->offset + i] & 0xff;
136 SDID = data[parser->offset + i + 1] & 0xff;
137 DC = data[parser->offset + i + 2] & 0xff;
138 i += 3;
139
140 /* Check if we have enough room to get the User Data and checksum */
141 if (parser->offset + i + DC + 1 >= parser->work_data_size)
142 goto not_enough_data;
143
144 /* We found a valid ANC \o/ */
145 anc->DID = DID;
146 anc->SDID_block_number = SDID;
147 anc->data_count = DC;
148 memset (anc->data, 0, 256);
149
150 /* FIXME: We assume here the same data format for the user data as for the
151 * DID/SDID: 10 bits with parity in the upper 2 bits. In theory some
152 * standards could define this differently and even have full 10 bits of
153 * user data but there does not seem to be a single such standard after
154 * all these years.
155 */
156
157 /* i is at the beginning of the user data now */
158 for (j = 0; j < anc->data_count; j++)
159 anc->data[j] = data[parser->offset + i + j] & 0xff;
160 i += DC;
161
162 /* Checksum calculation SMPTE S291 3.2.1 */
163 for (j = (composite ? 1 : 3); j < i; j++)
164 checksum += data[parser->offset + j] & 0x1ff;
165 checksum &= 0x1ff;
166 checksum |= (!(checksum >> 8)) << 9;
167
168 if (checksum != (data[parser->offset + i] & 0x3ff)) {
169 GST_WARNING ("ADF checksum mismatch: expected 0x%03x, got 0x%03x",
170 checksum, (data[parser->offset + i] & 0x3ff));
171 parser->offset += 1;
172 continue;
173 }
174
175 i += 1;
176
177 found = TRUE;
178 parser->offset += i;
179 break;
180 }
181
182 if (found)
183 return GST_VIDEO_VBI_PARSER_RESULT_OK;
184
185 return GST_VIDEO_VBI_PARSER_RESULT_DONE;
186
187 /* ERRORS */
188 not_enough_data:
189 {
190 GST_WARNING ("ANC requires more User Data than available line size");
191 /* Avoid further calls to go in the same error */
192 parser->offset = parser->work_data_size;
193 return GST_VIDEO_VBI_PARSER_RESULT_ERROR;
194 }
195 }
196
197 /* See SMPTE S291 */
198 static GstVideoVBIParserResult
get_ancillary_8(GstVideoVBIParser * parser,GstVideoAncillary * anc)199 get_ancillary_8 (GstVideoVBIParser * parser, GstVideoAncillary * anc)
200 {
201 gboolean found = FALSE;
202 const guint8 *data = parser->work_data;
203
204 g_return_val_if_fail (parser != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
205 g_return_val_if_fail (anc != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
206
207 /* 3 words are needed at least to detect what kind of packet we look at
208 *
209 * - ADF (SMPTE S291 3.2.1) in case of component ancillary format:
210 * 0x000 0x3ff 0x3ff (followed by DID, SDID)
211 * - ADF (SMPTE S291 3.2.2) in case of composite ancillary format:
212 * 0x3fc DID SDID
213 */
214 while (parser->offset + 3 < parser->work_data_size) {
215 guint8 DID, SDID, DC;
216 guint i = 0, j;
217 gboolean composite;
218 guint checksum = 0;
219
220 /* Look for ADF */
221 if (data[parser->offset] == 0xfc) {
222 /* composite */
223 composite = TRUE;
224 i += 1;
225 } else if (data[parser->offset] == 0x00 &&
226 data[parser->offset + 1] == 0xff && data[parser->offset + 2] == 0xff) {
227 /* component */
228 composite = FALSE;
229 i += 3;
230 } else {
231 parser->offset += 1;
232 continue;
233 }
234
235 /* 4 words: DID, SDID, DC, [DATA], checksum */
236 if (parser->offset + i + 4 >= parser->work_data_size)
237 goto not_enough_data;
238
239 /* We have a valid ADF */
240 DID = data[parser->offset + i];
241 SDID = data[parser->offset + i + 1];
242 DC = data[parser->offset + i + 2];
243 i += 3;
244
245 /* Check if we have enough room to get the User Data and checksum */
246 if (parser->offset + i + DC + 1 >= parser->work_data_size)
247 goto not_enough_data;
248
249 /* We found a valid ANC \o/ */
250 anc->DID = DID;
251 anc->SDID_block_number = SDID;
252 anc->data_count = DC;
253 memset (anc->data, 0, 256);
254
255 /* i is at the beginning of the user data now */
256 for (j = 0; j < anc->data_count; j++)
257 anc->data[j] = data[parser->offset + i + j] & 0xff;
258 i += DC;
259
260 /* Checksum calculation SMPTE S291 3.2.1 */
261 for (j = (composite ? 1 : 3); j < i; j++)
262 checksum += data[parser->offset + j];
263 checksum &= 0xff;
264
265 if (checksum != data[parser->offset + i]) {
266 GST_WARNING ("ADF checksum mismatch: expected 0x%02x, got 0x%02x",
267 checksum, data[parser->offset + i]);
268 parser->offset += 1;
269 continue;
270 }
271
272 i += 1;
273
274 found = TRUE;
275 parser->offset += i;
276 break;
277 }
278
279 if (found)
280 return GST_VIDEO_VBI_PARSER_RESULT_OK;
281
282 return GST_VIDEO_VBI_PARSER_RESULT_DONE;
283
284 /* ERRORS */
285 not_enough_data:
286 {
287 GST_WARNING ("ANC requires more User Data than available line size");
288 /* Avoid further calls to go in the same error */
289 parser->offset = parser->work_data_size;
290 return GST_VIDEO_VBI_PARSER_RESULT_ERROR;
291 }
292 }
293
294 /**
295 * gst_video_vbi_parser_get_ancillary:
296 * @parser: a #GstVideoVBIParser
297 * @anc: (out caller-allocates): a #GstVideoAncillary to start the eventual ancillary data
298 *
299 * Parse the line provided previously by gst_video_vbi_parser_add_line().
300 *
301 * Since: 1.16
302 *
303 * Returns: %GST_VIDEO_VBI_PARSER_RESULT_OK if ancillary data was found and
304 * @anc was filled. %GST_VIDEO_VBI_PARSER_RESULT_DONE if there wasn't any
305 * data.
306 */
307
308 GstVideoVBIParserResult
gst_video_vbi_parser_get_ancillary(GstVideoVBIParser * parser,GstVideoAncillary * anc)309 gst_video_vbi_parser_get_ancillary (GstVideoVBIParser * parser,
310 GstVideoAncillary * anc)
311 {
312 g_return_val_if_fail (parser != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
313 g_return_val_if_fail (anc != NULL, GST_VIDEO_VBI_PARSER_RESULT_ERROR);
314
315 if (parser->bit16)
316 return get_ancillary_16 (parser, anc);
317 return get_ancillary_8 (parser, anc);
318 }
319
320 /**
321 * gst_video_vbi_parser_new:
322 * @format: a #GstVideoFormat
323 * @pixel_width: The width in pixel to use
324 *
325 * Create a new #GstVideoVBIParser for the specified @format and @pixel_width.
326 *
327 * Since: 1.16
328 *
329 * Returns: The new #GstVideoVBIParser or %NULL if the @format and/or @pixel_width
330 * is not supported.
331 */
332 GstVideoVBIParser *
gst_video_vbi_parser_new(GstVideoFormat format,guint32 pixel_width)333 gst_video_vbi_parser_new (GstVideoFormat format, guint32 pixel_width)
334 {
335 GstVideoVBIParser *parser;
336
337 g_return_val_if_fail (pixel_width > 0, NULL);
338
339 switch (format) {
340 case GST_VIDEO_FORMAT_v210:
341 parser = g_new0 (GstVideoVBIParser, 1);
342 parser->bit16 = TRUE;
343 break;
344 case GST_VIDEO_FORMAT_UYVY:
345 parser = g_new0 (GstVideoVBIParser, 1);
346 parser->bit16 = FALSE;
347 break;
348 default:
349 GST_WARNING ("Format not supported by GstVideoVBIParser");
350 return NULL;
351 }
352
353 gst_video_info_init (&parser->info);
354 if (!gst_video_info_set_format (&parser->info, format, pixel_width, 1)) {
355 GST_ERROR ("Could not create GstVideoInfo");
356 g_free (parser);
357 return NULL;
358 }
359
360 /* Allocate the workspace which is going to be 2 * pixel_width big
361 * 2 : number of pixels per "component" (we only deal with 4:2:2)
362 * We use 1 or 2 bytes per pixel depending on whether we are internally
363 * working in 8 or 16bit */
364 parser->work_data_size = 2 * pixel_width;
365 if (parser->bit16)
366 parser->work_data = g_malloc0 (parser->work_data_size * 2);
367 else
368 parser->work_data = g_malloc0 (parser->work_data_size);
369 parser->offset = 0;
370
371 return parser;
372 }
373
374 /**
375 * gst_video_vbi_parser_free:
376 * @parser: a #GstVideoVBIParser
377 *
378 * Frees the @parser.
379 *
380 * Since: 1.16
381 */
382 void
gst_video_vbi_parser_free(GstVideoVBIParser * parser)383 gst_video_vbi_parser_free (GstVideoVBIParser * parser)
384 {
385 g_return_if_fail (parser != NULL);
386
387 g_free (parser->work_data);
388 g_free (parser);
389 }
390
391 static void
convert_line_from_uyvy(GstVideoVBIParser * parser,const guint8 * data)392 convert_line_from_uyvy (GstVideoVBIParser * parser, const guint8 * data)
393 {
394 guint i;
395 guint8 *y = parser->work_data;
396
397 /* Data is stored differently in SD, making no distinction between Y and UV */
398 if (parser->info.width < 1280) {
399 for (i = 0; i < parser->info.width - 3; i += 4) {
400 *y++ = data[(i / 4) * 4 + 0];
401 *y++ = data[(i / 4) * 4 + 1];
402 *y++ = data[(i / 4) * 4 + 2];
403 *y++ = data[(i / 4) * 4 + 3];
404 }
405 } else {
406 guint8 *uv = y + parser->info.width;
407
408 for (i = 0; i < parser->info.width - 3; i += 4) {
409 *uv++ = data[(i / 4) * 4 + 0];
410 *y++ = data[(i / 4) * 4 + 1];
411 *uv++ = data[(i / 4) * 4 + 2];
412 *y++ = data[(i / 4) * 4 + 3];
413 }
414 }
415 GST_MEMDUMP ("Converted line", parser->work_data, 128);
416 }
417
418 static void
gst_info_dump_mem16_line(gchar * linebuf,gsize linebuf_size,const guint16 * mem,gsize mem_offset,gsize mem_size)419 gst_info_dump_mem16_line (gchar * linebuf, gsize linebuf_size,
420 const guint16 * mem, gsize mem_offset, gsize mem_size)
421 {
422 gchar hexstr[50], digitstr[6];
423
424 if (mem_size > 8)
425 mem_size = 8;
426
427 hexstr[0] = '\0';
428
429 if (mem != NULL) {
430 guint i = 0;
431
432 mem += mem_offset;
433 while (i < mem_size) {
434 g_snprintf (digitstr, sizeof (digitstr), "%04x ", mem[i]);
435 g_strlcat (hexstr, digitstr, sizeof (hexstr));
436 ++i;
437 }
438 }
439
440 g_snprintf (linebuf, linebuf_size, "%08x: %-48.48s",
441 (guint) mem_offset, hexstr);
442 }
443
444 static void
convert_line_from_v210(GstVideoVBIParser * parser,const guint8 * data)445 convert_line_from_v210 (GstVideoVBIParser * parser, const guint8 * data)
446 {
447 guint i;
448 guint16 *y = (guint16 *) parser->work_data;
449 guint32 a, b, c, d;
450
451 /* Data is stored differently in SD, making no distinction between Y and UV */
452 if (parser->info.width < 1280) {
453 /* Convert the line */
454 for (i = 0; i < parser->info.width - 5; i += 6) {
455 a = GST_READ_UINT32_LE (data + (i / 6) * 16 + 0);
456 b = GST_READ_UINT32_LE (data + (i / 6) * 16 + 4);
457 c = GST_READ_UINT32_LE (data + (i / 6) * 16 + 8);
458 d = GST_READ_UINT32_LE (data + (i / 6) * 16 + 12);
459
460 *y++ = (a >> 0) & 0x3ff;
461 *y++ = (a >> 10) & 0x3ff;
462 *y++ = (a >> 20) & 0x3ff;
463 *y++ = (b >> 0) & 0x3ff;
464
465 *y++ = (b >> 10) & 0x3ff;
466 *y++ = (b >> 20) & 0x3ff;
467 *y++ = (c >> 0) & 0x3ff;
468 *y++ = (c >> 10) & 0x3ff;
469
470 *y++ = (c >> 20) & 0x3ff;
471 *y++ = (d >> 0) & 0x3ff;
472 *y++ = (d >> 10) & 0x3ff;
473 *y++ = (d >> 20) & 0x3ff;
474 }
475 } else {
476 guint16 *uv = y + parser->info.width;
477
478 /* Convert the line */
479 for (i = 0; i < parser->info.width - 5; i += 6) {
480 a = GST_READ_UINT32_LE (data + (i / 6) * 16 + 0);
481 b = GST_READ_UINT32_LE (data + (i / 6) * 16 + 4);
482 c = GST_READ_UINT32_LE (data + (i / 6) * 16 + 8);
483 d = GST_READ_UINT32_LE (data + (i / 6) * 16 + 12);
484
485 *uv++ = (a >> 0) & 0x3ff;
486 *y++ = (a >> 10) & 0x3ff;
487 *uv++ = (a >> 20) & 0x3ff;
488 *y++ = (b >> 0) & 0x3ff;
489
490 *uv++ = (b >> 10) & 0x3ff;
491 *y++ = (b >> 20) & 0x3ff;
492 *uv++ = (c >> 0) & 0x3ff;
493 *y++ = (c >> 10) & 0x3ff;
494
495 *uv++ = (c >> 20) & 0x3ff;
496 *y++ = (d >> 0) & 0x3ff;
497 *uv++ = (d >> 10) & 0x3ff;
498 *y++ = (d >> 20) & 0x3ff;
499 }
500 }
501
502 if (0) {
503 guint off = 0;
504 gsize length = parser->info.width * 2;
505
506 GST_TRACE ("--------"
507 "-------------------------------------------------------------------");
508
509 while (off < length) {
510 gchar buf[128];
511
512 /* gst_info_dump_mem_line will process 16 bytes (8 16bit chunks) at most */
513 gst_info_dump_mem16_line (buf, sizeof (buf),
514 (guint16 *) parser->work_data, off, length - off);
515 GST_TRACE ("%s", buf);
516 off += 8;
517 }
518 GST_TRACE ("--------"
519 "-------------------------------------------------------------------");
520 }
521 }
522
523 /**
524 * gst_video_vbi_parser_add_line:
525 * @parser: a #GstVideoVBIParser
526 * @data: (array) (transfer none): The line of data to parse
527 *
528 * Provide a new line of data to the @parser. Call gst_video_vbi_parser_get_ancillary()
529 * to get the Ancillary data that might be present on that line.
530 *
531 * Since: 1.16
532 */
533 void
gst_video_vbi_parser_add_line(GstVideoVBIParser * parser,const guint8 * data)534 gst_video_vbi_parser_add_line (GstVideoVBIParser * parser, const guint8 * data)
535 {
536 g_return_if_fail (parser != NULL);
537 g_return_if_fail (data != NULL);
538
539 /* Reset offset */
540 parser->offset = 0;
541
542 switch (GST_VIDEO_INFO_FORMAT (&parser->info)) {
543 case GST_VIDEO_FORMAT_v210:
544 convert_line_from_v210 (parser, data);
545 break;
546 case GST_VIDEO_FORMAT_UYVY:
547 convert_line_from_uyvy (parser, data);
548 break;
549 default:
550 GST_ERROR ("UNSUPPORTED FORMAT !");
551 g_assert_not_reached ();
552 break;
553 }
554 }
555
556 struct _GstVideoVBIEncoder
557 {
558 GstVideoInfo info; /* format of the lines provided */
559 guint8 *work_data; /* Converted line in planar 16bit format */
560 guint32 work_data_size; /* Size in bytes of work_data */
561 guint offset; /* Current offset (in bytes) in work_data */
562 gboolean bit16; /* Data is stored as 16bit if TRUE. Else 8bit(without parity) */
563 };
564
565 G_DEFINE_BOXED_TYPE (GstVideoVBIEncoder, gst_video_vbi_encoder,
566 (GBoxedCopyFunc) gst_video_vbi_encoder_copy,
567 (GBoxedFreeFunc) gst_video_vbi_encoder_free);
568
569 GstVideoVBIEncoder *
gst_video_vbi_encoder_copy(const GstVideoVBIEncoder * encoder)570 gst_video_vbi_encoder_copy (const GstVideoVBIEncoder * encoder)
571 {
572 GstVideoVBIEncoder *res;
573
574 g_return_val_if_fail (encoder != NULL, NULL);
575
576 res = gst_video_vbi_encoder_new (GST_VIDEO_INFO_FORMAT (&encoder->info),
577 encoder->info.width);
578 if (res) {
579 memcpy (res->work_data, encoder->work_data, encoder->work_data_size);
580 }
581 return res;
582 }
583
584 /**
585 * gst_video_vbi_encoder_free:
586 * @encoder: a #GstVideoVBIEncoder
587 *
588 * Frees the @encoder.
589 *
590 * Since: 1.16
591 */
592 void
gst_video_vbi_encoder_free(GstVideoVBIEncoder * encoder)593 gst_video_vbi_encoder_free (GstVideoVBIEncoder * encoder)
594 {
595 g_return_if_fail (encoder != NULL);
596
597 g_free (encoder->work_data);
598 g_free (encoder);
599 }
600
601 /**
602 * gst_video_vbi_encoder_new:
603 * @format: a #GstVideoFormat
604 * @pixel_width: The width in pixel to use
605 *
606 * Create a new #GstVideoVBIEncoder for the specified @format and @pixel_width.
607 *
608 * Since: 1.16
609 *
610 * Returns: The new #GstVideoVBIEncoder or %NULL if the @format and/or @pixel_width
611 * is not supported.
612 */
613 GstVideoVBIEncoder *
gst_video_vbi_encoder_new(GstVideoFormat format,guint32 pixel_width)614 gst_video_vbi_encoder_new (GstVideoFormat format, guint32 pixel_width)
615 {
616 GstVideoVBIEncoder *encoder;
617
618 g_return_val_if_fail (pixel_width > 0, NULL);
619
620 switch (format) {
621 case GST_VIDEO_FORMAT_v210:
622 encoder = g_new0 (GstVideoVBIEncoder, 1);
623 encoder->bit16 = TRUE;
624 break;
625 case GST_VIDEO_FORMAT_UYVY:
626 encoder = g_new0 (GstVideoVBIEncoder, 1);
627 encoder->bit16 = FALSE;
628 break;
629 default:
630 GST_WARNING ("Format not supported by GstVideoVBIEncoder");
631 return NULL;
632 }
633
634 gst_video_info_init (&encoder->info);
635 if (!gst_video_info_set_format (&encoder->info, format, pixel_width, 1)) {
636 GST_ERROR ("Could not create GstVideoInfo");
637 g_free (encoder);
638 return NULL;
639 }
640
641 /* Allocate the workspace which is going to be 2 * pixel_width big
642 * 2 : number of pixels per "component" (we only deal with 4:2:2)
643 * We use 1 or 2 bytes per pixel depending on whether we are internally
644 * working in 8 or 16bit */
645 encoder->work_data_size = 2 * pixel_width;
646 if (encoder->bit16)
647 encoder->work_data = g_malloc0 (encoder->work_data_size * 2);
648 else
649 encoder->work_data = g_malloc0 (encoder->work_data_size);
650 encoder->offset = 0;
651
652 return encoder;
653 }
654
655 #if G_GNUC_CHECK_VERSION(3,4)
656 static inline guint
parity(guint8 x)657 parity (guint8 x)
658 {
659 return __builtin_parity (x);
660 }
661 #else
662 static guint
parity(guint8 x)663 parity (guint8 x)
664 {
665 guint count = 0;
666
667 while (x) {
668 count += x & 1;
669 x >>= 1;
670 }
671
672 return count & 1;
673 }
674 #endif
675
676 /* Odd/even parity in the upper two bits */
677 #define SET_WITH_PARITY(buf, val) G_STMT_START { \
678 *(buf) = val; \
679 if (parity (val)) \
680 *(buf) |= 0x100; \
681 else \
682 *(buf) |= 0x200; \
683 } G_STMT_END;
684
685 /**
686 * gst_video_vbi_encoder_add_ancillary:
687 * @encoder: a #GstVideoVBIEncoder
688 * @composite: %TRUE if composite ADF should be created, component otherwise
689 * @DID: The Data Identifier
690 * @SDID_block_number: The Secondary Data Identifier (if type 2) or the Data
691 * Block Number (if type 1)
692 * @data_count: The amount of data (in bytes) in @data (max 255 bytes)
693 * @data: (array length=data_count): The user data content of the Ancillary packet.
694 * Does not contain the ADF, DID, SDID nor CS.
695 *
696 * Stores Video Ancillary data, according to SMPTE-291M specification.
697 *
698 * Note that the contents of the data are always read as 8bit data (i.e. do not contain
699 * the parity check bits).
700 *
701 * Since: 1.16
702 *
703 * Returns: %TRUE if enough space was left in the current line, %FALSE
704 * otherwise.
705 */
706 gboolean
gst_video_vbi_encoder_add_ancillary(GstVideoVBIEncoder * encoder,gboolean composite,guint8 DID,guint8 SDID_block_number,const guint8 * data,guint data_count)707 gst_video_vbi_encoder_add_ancillary (GstVideoVBIEncoder * encoder,
708 gboolean composite, guint8 DID, guint8 SDID_block_number,
709 const guint8 * data, guint data_count)
710 {
711 g_return_val_if_fail (encoder != NULL, FALSE);
712 g_return_val_if_fail (data != NULL, FALSE);
713 g_return_val_if_fail (data_count < 256, FALSE);
714
715 /* Doesn't fit into this line anymore */
716 if (encoder->offset + data_count + (composite ? 5 : 7) >
717 encoder->work_data_size)
718 return FALSE;
719
720 if (encoder->bit16) {
721 guint16 *work_data = ((guint16 *) encoder->work_data) + encoder->offset;
722 guint i = 0, j;
723 guint checksum = 0;
724
725 /* Write ADF */
726 if (composite) {
727 work_data[i] = 0x3fc;
728 i += 1;
729 } else {
730 work_data[i] = 0x000;
731 work_data[i + 1] = 0x3ff;
732 work_data[i + 2] = 0x3ff;
733 i += 3;
734 }
735
736 SET_WITH_PARITY (&work_data[i], DID);
737 SET_WITH_PARITY (&work_data[i + 1], SDID_block_number);
738 SET_WITH_PARITY (&work_data[i + 2], data_count);
739 i += 3;
740
741 for (j = 0; j < data_count; j++)
742 SET_WITH_PARITY (&work_data[i + j], data[j]);
743 i += data_count;
744
745 for (j = (composite ? 1 : 3); j < i; j++)
746 checksum += work_data[j];
747 checksum &= 0x1ff;
748 checksum |= (!(checksum >> 8)) << 9;
749
750 work_data[i] = checksum;
751 i += 1;
752
753 encoder->offset += i;
754 } else {
755 guint8 *work_data = ((guint8 *) encoder->work_data) + encoder->offset;
756 guint i = 0, j;
757 guint checksum = 0;
758
759 /* Write ADF */
760 if (composite) {
761 work_data[i] = 0xfc;
762 i += 1;
763 } else {
764 work_data[i] = 0x00;
765 work_data[i + 1] = 0xff;
766 work_data[i + 2] = 0xff;
767 i += 3;
768 }
769
770 work_data[i] = DID;
771 work_data[i + 1] = SDID_block_number;
772 work_data[i + 2] = data_count;
773 i += 3;
774
775 for (j = 0; j < data_count; j++)
776 work_data[i + j] = data[j];
777 i += data_count;
778
779 for (j = (composite ? 1 : 3); j < i; j++)
780 checksum += work_data[j];
781 checksum &= 0xff;
782
783 work_data[i] = checksum;
784 i += 1;
785
786 encoder->offset += i;
787 }
788
789 return TRUE;
790 }
791
792 static void
convert_line_to_v210(GstVideoVBIEncoder * encoder,guint8 * data)793 convert_line_to_v210 (GstVideoVBIEncoder * encoder, guint8 * data)
794 {
795 guint i;
796 const guint16 *y = (const guint16 *) encoder->work_data;
797 guint32 a, b, c, d;
798
799 /* Data is stored differently in SD, making no distinction between Y and UV */
800 if (encoder->info.width < 1280) {
801 /* Convert the line */
802 for (i = 0; i < encoder->info.width - 5; i += 6) {
803 a = ((y[0] & 0x3ff) << 0)
804 | ((y[1] & 0x3ff) << 10)
805 | ((y[2] & 0x3ff) << 20);
806 y += 3;
807
808 b = ((y[0] & 0x3ff) << 0)
809 | ((y[1] & 0x3ff) << 10)
810 | ((y[2] & 0x3ff) << 20);
811 y += 3;
812
813 c = ((y[0] & 0x3ff) << 0)
814 | ((y[1] & 0x3ff) << 10)
815 | ((y[2] & 0x3ff) << 20);
816 y += 3;
817
818 d = ((y[0] & 0x3ff) << 0)
819 | ((y[1] & 0x3ff) << 10)
820 | ((y[2] & 0x3ff) << 20);
821 y += 3;
822
823 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 0, a);
824 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 4, b);
825 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 8, c);
826 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 12, d);
827 }
828 } else {
829 const guint16 *uv = y + encoder->info.width;
830
831 /* Convert the line */
832 for (i = 0; i < encoder->info.width - 5; i += 6) {
833 a = ((uv[0] & 0x3ff) << 0)
834 | ((y[0] & 0x3ff) << 10)
835 | ((uv[1] & 0x3ff) << 20);
836 uv += 2;
837 y++;
838
839 b = ((y[0] & 0x3ff) << 0)
840 | ((uv[0] & 0x3ff) << 10)
841 | ((y[1] & 0x3ff) << 20);
842 y += 2;
843 uv++;
844
845 c = ((uv[0] & 0x3ff) << 0)
846 | ((y[0] & 0x3ff) << 10)
847 | ((uv[1] & 0x3ff) << 20);
848 uv += 2;
849 y++;
850
851 d = ((y[0] & 0x3ff) << 0)
852 | ((uv[0] & 0x3ff) << 10)
853 | ((y[1] & 0x3ff) << 20);
854 y += 2;
855 uv++;
856
857 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 0, a);
858 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 4, b);
859 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 8, c);
860 GST_WRITE_UINT32_LE (data + (i / 6) * 16 + 12, d);
861 }
862 }
863 }
864
865 static void
convert_line_to_uyvy(GstVideoVBIEncoder * encoder,guint8 * data)866 convert_line_to_uyvy (GstVideoVBIEncoder * encoder, guint8 * data)
867 {
868 guint i;
869 const guint8 *y = encoder->work_data;
870
871 /* Data is stored differently in SD, making no distinction between Y and UV */
872 if (encoder->info.width < 1280) {
873 for (i = 0; i < encoder->info.width - 3; i += 4) {
874 data[(i / 4) * 4 + 0] = *y++;
875 data[(i / 4) * 4 + 1] = *y++;
876 data[(i / 4) * 4 + 2] = *y++;
877 data[(i / 4) * 4 + 3] = *y++;
878 }
879 } else {
880 const guint8 *uv = y + encoder->info.width;
881
882 for (i = 0; i < encoder->info.width - 3; i += 4) {
883 data[(i / 4) * 4 + 0] = *uv++;
884 data[(i / 4) * 4 + 1] = *y++;
885 data[(i / 4) * 4 + 2] = *uv++;
886 data[(i / 4) * 4 + 3] = *y++;
887 }
888 }
889 }
890
891 void
gst_video_vbi_encoder_write_line(GstVideoVBIEncoder * encoder,guint8 * data)892 gst_video_vbi_encoder_write_line (GstVideoVBIEncoder * encoder, guint8 * data)
893 {
894 g_return_if_fail (encoder != NULL);
895 g_return_if_fail (data != NULL);
896
897 /* nothing to write? just exit early */
898 if (!encoder->offset)
899 return;
900
901 switch (GST_VIDEO_INFO_FORMAT (&encoder->info)) {
902 case GST_VIDEO_FORMAT_v210:
903 convert_line_to_v210 (encoder, data);
904 break;
905 case GST_VIDEO_FORMAT_UYVY:
906 convert_line_to_uyvy (encoder, data);
907 break;
908 default:
909 GST_ERROR ("UNSUPPORTED FORMAT !");
910 g_assert_not_reached ();
911 break;
912 }
913
914 encoder->offset = 0;
915 memset (encoder->work_data, 0,
916 encoder->work_data_size * (encoder->bit16 ? 2 : 1));
917 }
918
919 /* Closed Caption Meta implementation *******************************************/
920
921 GType
gst_video_caption_meta_api_get_type(void)922 gst_video_caption_meta_api_get_type (void)
923 {
924 static GType type = 0;
925
926 if (g_once_init_enter (&type)) {
927 static const gchar *tags[] = { NULL };
928 GType _type = gst_meta_api_type_register ("GstVideoCaptionMetaAPI", tags);
929 GST_INFO ("registering");
930 g_once_init_leave (&type, _type);
931 }
932 return type;
933 }
934
935
936 static gboolean
gst_video_caption_meta_transform(GstBuffer * dest,GstMeta * meta,GstBuffer * buffer,GQuark type,gpointer data)937 gst_video_caption_meta_transform (GstBuffer * dest, GstMeta * meta,
938 GstBuffer * buffer, GQuark type, gpointer data)
939 {
940 GstVideoCaptionMeta *dmeta, *smeta;
941
942 /* We always copy over the caption meta */
943 smeta = (GstVideoCaptionMeta *) meta;
944
945 GST_DEBUG ("copy caption metadata");
946 dmeta =
947 gst_buffer_add_video_caption_meta (dest, smeta->caption_type,
948 smeta->data, smeta->size);
949 if (!dmeta)
950 return FALSE;
951
952 return TRUE;
953 }
954
955 static gboolean
gst_video_caption_meta_init(GstMeta * meta,gpointer params,GstBuffer * buffer)956 gst_video_caption_meta_init (GstMeta * meta, gpointer params,
957 GstBuffer * buffer)
958 {
959 GstVideoCaptionMeta *emeta = (GstVideoCaptionMeta *) meta;
960
961 emeta->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
962 emeta->data = NULL;
963 emeta->size = 0;
964
965 return TRUE;
966 }
967
968 static void
gst_video_caption_meta_free(GstMeta * meta,GstBuffer * buffer)969 gst_video_caption_meta_free (GstMeta * meta, GstBuffer * buffer)
970 {
971 GstVideoCaptionMeta *emeta = (GstVideoCaptionMeta *) meta;
972
973 g_free (emeta->data);
974 }
975
976 const GstMetaInfo *
gst_video_caption_meta_get_info(void)977 gst_video_caption_meta_get_info (void)
978 {
979 static const GstMetaInfo *meta_info = NULL;
980
981 if (g_once_init_enter ((GstMetaInfo **) & meta_info)) {
982 const GstMetaInfo *mi = gst_meta_register (GST_VIDEO_CAPTION_META_API_TYPE,
983 "GstVideoCaptionMeta",
984 sizeof (GstVideoCaptionMeta),
985 gst_video_caption_meta_init,
986 gst_video_caption_meta_free,
987 gst_video_caption_meta_transform);
988 g_once_init_leave ((GstMetaInfo **) & meta_info, (GstMetaInfo *) mi);
989 }
990 return meta_info;
991 }
992
993 /**
994 * gst_buffer_add_video_caption_meta:
995 * @buffer: a #GstBuffer
996 * @caption_type: The type of Closed Caption to add
997 * @data: (array length=size) (transfer none): The Closed Caption data
998 * @size: The size of @data in bytes
999 *
1000 * Attaches #GstVideoCaptionMeta metadata to @buffer with the given
1001 * parameters.
1002 *
1003 * Returns: (transfer none): the #GstVideoCaptionMeta on @buffer.
1004 *
1005 * Since: 1.16
1006 */
1007 GstVideoCaptionMeta *
gst_buffer_add_video_caption_meta(GstBuffer * buffer,GstVideoCaptionType caption_type,const guint8 * data,gsize size)1008 gst_buffer_add_video_caption_meta (GstBuffer * buffer,
1009 GstVideoCaptionType caption_type, const guint8 * data, gsize size)
1010 {
1011 GstVideoCaptionMeta *meta;
1012
1013 g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL);
1014 g_return_val_if_fail (data != NULL, NULL);
1015 g_return_val_if_fail (size > 0, NULL);
1016
1017 switch (caption_type) {
1018 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1019 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1020 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1021 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1022 break;
1023 default:
1024 GST_ERROR ("Unknown caption type !");
1025 return NULL;
1026 }
1027 /* FIXME : Add checks for content ? */
1028
1029 meta = (GstVideoCaptionMeta *) gst_buffer_add_meta (buffer,
1030 GST_VIDEO_CAPTION_META_INFO, NULL);
1031 g_return_val_if_fail (meta != NULL, NULL);
1032
1033 meta->caption_type = caption_type;
1034 meta->data = g_memdup2 (data, size);
1035 meta->size = size;
1036
1037 return meta;
1038 }
1039
1040 /**
1041 * gst_video_caption_type_from_caps:
1042 * @caps: Fixed #GstCaps to parse
1043 *
1044 * Parses fixed Closed Caption #GstCaps and returns the corresponding caption
1045 * type, or %GST_VIDEO_CAPTION_TYPE_UNKNOWN.
1046 *
1047 * Returns: #GstVideoCaptionType.
1048 *
1049 * Since: 1.16
1050 */
1051 GstVideoCaptionType
gst_video_caption_type_from_caps(const GstCaps * caps)1052 gst_video_caption_type_from_caps (const GstCaps * caps)
1053 {
1054 const GstStructure *s;
1055 const gchar *format;
1056
1057 g_return_val_if_fail (gst_caps_is_fixed (caps),
1058 GST_VIDEO_CAPTION_TYPE_UNKNOWN);
1059
1060 s = gst_caps_get_structure (caps, 0);
1061 g_return_val_if_fail (s != NULL, GST_VIDEO_CAPTION_TYPE_UNKNOWN);
1062
1063 format = gst_structure_get_string (s, "format");
1064 if (gst_structure_has_name (s, "closedcaption/x-cea-608")) {
1065 if (g_strcmp0 (format, "raw") == 0) {
1066 return GST_VIDEO_CAPTION_TYPE_CEA608_RAW;
1067 } else if (g_strcmp0 (format, "s334-1a") == 0) {
1068 return GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A;
1069 }
1070 } else if (gst_structure_has_name (s, "closedcaption/x-cea-708")) {
1071 if (g_strcmp0 (format, "cc_data") == 0) {
1072 return GST_VIDEO_CAPTION_TYPE_CEA708_RAW;
1073 } else if (g_strcmp0 (format, "cdp") == 0) {
1074 return GST_VIDEO_CAPTION_TYPE_CEA708_CDP;
1075 }
1076 }
1077 return GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1078 }
1079
1080 /**
1081 * gst_video_caption_type_to_caps:
1082 * @type: #GstVideoCaptionType
1083 *
1084 * Creates new caps corresponding to @type.
1085 *
1086 * Returns: (transfer full): new #GstCaps
1087 *
1088 * Since: 1.16
1089 */
1090 GstCaps *
gst_video_caption_type_to_caps(GstVideoCaptionType type)1091 gst_video_caption_type_to_caps (GstVideoCaptionType type)
1092 {
1093 GstCaps *caption_caps;
1094
1095 g_return_val_if_fail (type != GST_VIDEO_CAPTION_TYPE_UNKNOWN, NULL);
1096
1097 switch (type) {
1098 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1099 caption_caps = gst_caps_new_simple ("closedcaption/x-cea-608",
1100 "format", G_TYPE_STRING, "raw", NULL);
1101 break;
1102 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1103 caption_caps = gst_caps_new_simple ("closedcaption/x-cea-608",
1104 "format", G_TYPE_STRING, "s334-1a", NULL);
1105 break;
1106 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1107 caption_caps = gst_caps_new_simple ("closedcaption/x-cea-708",
1108 "format", G_TYPE_STRING, "cc_data", NULL);
1109 break;
1110 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1111 caption_caps = gst_caps_new_simple ("closedcaption/x-cea-708",
1112 "format", G_TYPE_STRING, "cdp", NULL);
1113 break;
1114 default:
1115 g_return_val_if_reached (NULL);
1116 break;
1117 }
1118
1119 return caption_caps;
1120 }
1121
1122 /* Active Format Description (AFD) Meta implementation */
1123
1124 GType
gst_video_afd_meta_api_get_type(void)1125 gst_video_afd_meta_api_get_type (void)
1126 {
1127 static GType type = 0;
1128
1129 if (g_once_init_enter (&type)) {
1130 static const gchar *tags[] = {
1131 GST_META_TAG_VIDEO_SIZE_STR,
1132 GST_META_TAG_VIDEO_ORIENTATION_STR,
1133 GST_META_TAG_VIDEO_STR,
1134 NULL
1135 };
1136 GType _type = gst_meta_api_type_register ("GstVideoAFDMetaAPI", tags);
1137 g_once_init_leave (&type, _type);
1138 }
1139 return type;
1140 }
1141
1142 static gboolean
gst_video_afd_meta_init(GstMeta * meta,gpointer params,GstBuffer * buffer)1143 gst_video_afd_meta_init (GstMeta * meta, gpointer params, GstBuffer * buffer)
1144 {
1145 GstVideoAFDMeta *emeta = (GstVideoAFDMeta *) meta;
1146
1147 emeta->field = 0;
1148 emeta->spec = GST_VIDEO_AFD_SPEC_ATSC_A53;
1149 emeta->afd = GST_VIDEO_AFD_UNAVAILABLE;
1150
1151 return TRUE;
1152 }
1153
1154 static gboolean
gst_video_afd_meta_transform(GstBuffer * dest,GstMeta * meta,GstBuffer * buffer,GQuark type,gpointer data)1155 gst_video_afd_meta_transform (GstBuffer * dest, GstMeta * meta,
1156 GstBuffer * buffer, GQuark type, gpointer data)
1157 {
1158 GstVideoAFDMeta *smeta = (GstVideoAFDMeta *) meta;
1159
1160 if (GST_META_TRANSFORM_IS_COPY (type)) {
1161 GST_DEBUG ("copy AFD metadata");
1162 gst_buffer_add_video_afd_meta (dest, smeta->field, smeta->spec, smeta->afd);
1163 return TRUE;
1164 } else if (GST_VIDEO_META_TRANSFORM_IS_SCALE (type)) {
1165 GstVideoMetaTransform *trans = data;
1166 gdouble diff;
1167 gint ow, oh, nw, nh;
1168 gint opn, opd, npn, npd;
1169
1170 ow = GST_VIDEO_INFO_WIDTH (trans->in_info);
1171 nw = GST_VIDEO_INFO_WIDTH (trans->out_info);
1172 oh = GST_VIDEO_INFO_HEIGHT (trans->in_info);
1173 nh = GST_VIDEO_INFO_HEIGHT (trans->out_info);
1174 opn = GST_VIDEO_INFO_PAR_N (trans->in_info);
1175 opd = GST_VIDEO_INFO_PAR_D (trans->in_info);
1176 npn = GST_VIDEO_INFO_PAR_N (trans->out_info);
1177 npd = GST_VIDEO_INFO_PAR_D (trans->out_info);
1178
1179 /* if the aspect ratio stays the same we can copy the meta, otherwise
1180 * we can't know if the aspect ratio was changed or black borders were
1181 * introduced. Both would invalidate the AFD meta */
1182
1183 diff =
1184 ABS (((gdouble) ow / (gdouble) oh) * ((gdouble) opn / (gdouble) opd) -
1185 ((gdouble) nw / (gdouble) nh) * ((gdouble) npn / (gdouble) npd));
1186 if (diff < 0.0001) {
1187 GST_DEBUG ("copying AFD metadata, aspect ratio did not change");
1188 gst_buffer_add_video_afd_meta (dest, smeta->field, smeta->spec,
1189 smeta->afd);
1190 return TRUE;
1191 } else {
1192 return FALSE;
1193 }
1194 } else {
1195 /* return FALSE, if transform type is not supported */
1196 return FALSE;
1197 }
1198
1199 }
1200
1201 const GstMetaInfo *
gst_video_afd_meta_get_info(void)1202 gst_video_afd_meta_get_info (void)
1203 {
1204 static const GstMetaInfo *meta_info = NULL;
1205
1206 if (g_once_init_enter ((GstMetaInfo **) & meta_info)) {
1207 const GstMetaInfo *mi = gst_meta_register (GST_VIDEO_AFD_META_API_TYPE,
1208 "GstVideoAFDMeta",
1209 sizeof (GstVideoAFDMeta),
1210 gst_video_afd_meta_init,
1211 NULL,
1212 gst_video_afd_meta_transform);
1213 g_once_init_leave ((GstMetaInfo **) & meta_info, (GstMetaInfo *) mi);
1214 }
1215 return meta_info;
1216 }
1217
1218 /**
1219 * gst_buffer_add_video_afd_meta:
1220 * @buffer: a #GstBuffer
1221 * @field: 0 for progressive or field 1 and 1 for field 2
1222 * @spec: #GstVideoAFDSpec that applies to AFD value
1223 * @afd: #GstVideoAFDValue AFD enumeration
1224 *
1225 * Attaches #GstVideoAFDMeta metadata to @buffer with the given
1226 * parameters.
1227 *
1228 * Returns: (transfer none): the #GstVideoAFDMeta on @buffer.
1229 *
1230 * Since: 1.18
1231 */
1232 GstVideoAFDMeta *
gst_buffer_add_video_afd_meta(GstBuffer * buffer,guint8 field,GstVideoAFDSpec spec,GstVideoAFDValue afd)1233 gst_buffer_add_video_afd_meta (GstBuffer * buffer, guint8 field,
1234 GstVideoAFDSpec spec, GstVideoAFDValue afd)
1235 {
1236 GstVideoAFDMeta *meta;
1237 gint8 afd_data = (gint8) afd;
1238 g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL);
1239 g_return_val_if_fail (field <= 1, NULL);
1240 g_return_val_if_fail ((guint8) spec <= 2, NULL);
1241 /* AFD is stored in a nybble */
1242 g_return_val_if_fail (afd_data <= 0xF, NULL);
1243 /* reserved values for all specifications */
1244 g_return_val_if_fail (afd_data != 1 && (afd_data < 5 || afd_data > 7)
1245 && afd_data != 12, NULL);
1246 /* reserved for DVB/ETSI */
1247 g_return_val_if_fail ((spec != GST_VIDEO_AFD_SPEC_DVB_ETSI)
1248 || (afd_data != 0), NULL);
1249
1250 meta = (GstVideoAFDMeta *) gst_buffer_add_meta (buffer,
1251 GST_VIDEO_AFD_META_INFO, NULL);
1252 g_assert (meta != NULL);
1253
1254 meta->field = field;
1255 meta->spec = spec;
1256 meta->afd = afd;
1257
1258 return meta;
1259 }
1260
1261 /* Bar Meta implementation */
1262
1263 GType
gst_video_bar_meta_api_get_type(void)1264 gst_video_bar_meta_api_get_type (void)
1265 {
1266 static GType type = 0;
1267
1268 if (g_once_init_enter (&type)) {
1269 static const gchar *tags[] = {
1270 GST_META_TAG_VIDEO_SIZE_STR,
1271 GST_META_TAG_VIDEO_ORIENTATION_STR,
1272 GST_META_TAG_VIDEO_STR,
1273 NULL
1274 };
1275 GType _type = gst_meta_api_type_register ("GstVideoBarMetaAPI", tags);
1276 g_once_init_leave (&type, _type);
1277 }
1278 return type;
1279 }
1280
1281 static gboolean
gst_video_bar_meta_init(GstMeta * meta,gpointer params,GstBuffer * buffer)1282 gst_video_bar_meta_init (GstMeta * meta, gpointer params, GstBuffer * buffer)
1283 {
1284 GstVideoBarMeta *emeta = (GstVideoBarMeta *) meta;
1285
1286 emeta->field = 0;
1287 emeta->is_letterbox = FALSE;
1288 emeta->bar_data1 = 0;
1289 emeta->bar_data2 = 0;
1290
1291 return TRUE;
1292 }
1293
1294 static gboolean
gst_video_bar_meta_transform(GstBuffer * dest,GstMeta * meta,GstBuffer * buffer,GQuark type,gpointer data)1295 gst_video_bar_meta_transform (GstBuffer * dest, GstMeta * meta,
1296 GstBuffer * buffer, GQuark type, gpointer data)
1297 {
1298 GstVideoBarMeta *smeta = (GstVideoBarMeta *) meta;
1299
1300 if (GST_META_TRANSFORM_IS_COPY (type)) {
1301 GST_DEBUG ("copy Bar metadata");
1302 gst_buffer_add_video_bar_meta (dest, smeta->field, smeta->is_letterbox,
1303 smeta->bar_data1, smeta->bar_data2);
1304 return TRUE;
1305 } else {
1306 /* return FALSE, if transform type is not supported */
1307 return FALSE;
1308 }
1309 }
1310
1311 const GstMetaInfo *
gst_video_bar_meta_get_info(void)1312 gst_video_bar_meta_get_info (void)
1313 {
1314 static const GstMetaInfo *meta_info = NULL;
1315
1316 if (g_once_init_enter ((GstMetaInfo **) & meta_info)) {
1317 const GstMetaInfo *mi = gst_meta_register (GST_VIDEO_BAR_META_API_TYPE,
1318 "GstVideoBarMeta",
1319 sizeof (GstVideoBarMeta),
1320 gst_video_bar_meta_init,
1321 NULL,
1322 gst_video_bar_meta_transform);
1323 g_once_init_leave ((GstMetaInfo **) & meta_info, (GstMetaInfo *) mi);
1324 }
1325 return meta_info;
1326 }
1327
1328 /**
1329 * gst_buffer_add_video_bar_meta:
1330 * @buffer: a #GstBuffer
1331 * @field: 0 for progressive or field 1 and 1 for field 2
1332 * @is_letterbox: if true then bar data specifies letterbox, otherwise pillarbox
1333 * @bar_data1: If @is_letterbox is true, then the value specifies the
1334 * last line of a horizontal letterbox bar area at top of reconstructed frame.
1335 * Otherwise, it specifies the last horizontal luminance sample of a vertical pillarbox
1336 * bar area at the left side of the reconstructed frame
1337 * @bar_data2: If @is_letterbox is true, then the value specifies the
1338 * first line of a horizontal letterbox bar area at bottom of reconstructed frame.
1339 * Otherwise, it specifies the first horizontal
1340 * luminance sample of a vertical pillarbox bar area at the right side of the reconstructed frame.
1341 *
1342 * Attaches #GstVideoBarMeta metadata to @buffer with the given
1343 * parameters.
1344 *
1345 * Returns: (transfer none): the #GstVideoBarMeta on @buffer.
1346 *
1347 * See Table 6.11 Bar Data Syntax
1348 *
1349 * https://www.atsc.org/wp-content/uploads/2015/03/a_53-Part-4-2009.pdf
1350 *
1351 * Since: 1.18
1352 */
1353 GstVideoBarMeta *
gst_buffer_add_video_bar_meta(GstBuffer * buffer,guint8 field,gboolean is_letterbox,guint bar_data1,guint bar_data2)1354 gst_buffer_add_video_bar_meta (GstBuffer * buffer, guint8 field,
1355 gboolean is_letterbox, guint bar_data1, guint bar_data2)
1356 {
1357 GstVideoBarMeta *meta;
1358
1359 g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL);
1360 g_return_val_if_fail (field <= 1, NULL);
1361
1362 meta = (GstVideoBarMeta *) gst_buffer_add_meta (buffer,
1363 GST_VIDEO_BAR_META_INFO, NULL);
1364 g_assert (meta != NULL);
1365
1366 meta->field = field;
1367 meta->is_letterbox = is_letterbox;
1368 meta->bar_data1 = bar_data1;
1369 meta->bar_data2 = bar_data2;
1370 return meta;
1371 }
1372