1 /* GStreamer
2 * Copyright (C) 2013 David Schleef <ds@schleef.org>
3 * Copyright (C) 2013 Rdio Inc <ingestions@rdio.com>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
14 *
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
18 * Boston, MA 02110-1335, USA.
19 */
20 /**
21 * SECTION:element-gstivtc
22 * @title: gstivtc
23 *
24 * The ivtc element is an inverse telecine filter. It takes interlaced
25 * video that was created from progressive content using a telecine
26 * filter, and reconstructs the original progressive content.
27 *
28 * ## Example launch line
29 * |[
30 * gst-launch-1.0 -v videotestsrc pattern=ball ! video/x-raw,framerate=24/1 !
31 * interlace !
32 * ivtc ! video/x-raw,framerate=24/1 ! fakesink
33 * ]|
34 *
35 * This pipeline creates a progressive video stream at 24 fps, and
36 * converts it to a 60 fields per second interlaced stream. Then the
37 * stream is inversed telecine'd back to 24 fps, yielding approximately
38 * the original videotestsrc content.
39 *
40 */
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 #include <gst/gst.h>
47 #include <gst/base/gstbasetransform.h>
48 #include <gst/video/video.h>
49 #include "gstivtc.h"
50 #include <string.h>
51 #include <math.h>
52
53 /* only because element registration is in this file */
54 #include "gstcombdetect.h"
55
56 GST_DEBUG_CATEGORY_STATIC (gst_ivtc_debug_category);
57 #define GST_CAT_DEFAULT gst_ivtc_debug_category
58
59 /* prototypes */
60
61
62 static GstCaps *gst_ivtc_transform_caps (GstBaseTransform * trans,
63 GstPadDirection direction, GstCaps * caps, GstCaps * filter);
64 static GstCaps *gst_ivtc_fixate_caps (GstBaseTransform * trans,
65 GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
66 static gboolean gst_ivtc_set_caps (GstBaseTransform * trans, GstCaps * incaps,
67 GstCaps * outcaps);
68 static gboolean gst_ivtc_sink_event (GstBaseTransform * trans,
69 GstEvent * event);
70 static GstFlowReturn gst_ivtc_transform (GstBaseTransform * trans,
71 GstBuffer * inbuf, GstBuffer * outbuf);
72 static void gst_ivtc_flush (GstIvtc * ivtc);
73 static void gst_ivtc_retire_fields (GstIvtc * ivtc, int n_fields);
74 static void gst_ivtc_construct_frame (GstIvtc * itvc, GstBuffer * outbuf);
75
76 static int get_comb_score (GstVideoFrame * top, GstVideoFrame * bottom);
77
78 enum
79 {
80 PROP_0
81 };
82
83 /* pad templates */
84
85 #define MAX_WIDTH 2048
86 #define VIDEO_CAPS \
87 "video/x-raw, " \
88 "format = (string) { I420, Y444, Y42B }, " \
89 "width = [1, 2048], " \
90 "height = " GST_VIDEO_SIZE_RANGE ", " \
91 "framerate = " GST_VIDEO_FPS_RANGE
92
93 static GstStaticPadTemplate gst_ivtc_sink_template =
94 GST_STATIC_PAD_TEMPLATE ("sink",
95 GST_PAD_SINK,
96 GST_PAD_ALWAYS,
97 GST_STATIC_CAPS (VIDEO_CAPS)
98 );
99
100 static GstStaticPadTemplate gst_ivtc_src_template =
101 GST_STATIC_PAD_TEMPLATE ("src",
102 GST_PAD_SRC,
103 GST_PAD_ALWAYS,
104 GST_STATIC_CAPS (VIDEO_CAPS)
105 );
106
107
108 /* class initialization */
109
110 G_DEFINE_TYPE_WITH_CODE (GstIvtc, gst_ivtc, GST_TYPE_BASE_TRANSFORM,
111 GST_DEBUG_CATEGORY_INIT (gst_ivtc_debug_category, "ivtc", 0,
112 "debug category for ivtc element"));
113
114 static void
gst_ivtc_class_init(GstIvtcClass * klass)115 gst_ivtc_class_init (GstIvtcClass * klass)
116 {
117 GstBaseTransformClass *base_transform_class =
118 GST_BASE_TRANSFORM_CLASS (klass);
119
120 /* Setting up pads and setting metadata should be moved to
121 base_class_init if you intend to subclass this class. */
122 gst_element_class_add_static_pad_template (GST_ELEMENT_CLASS (klass),
123 &gst_ivtc_sink_template);
124 gst_element_class_add_static_pad_template (GST_ELEMENT_CLASS (klass),
125 &gst_ivtc_src_template);
126
127 gst_element_class_set_static_metadata (GST_ELEMENT_CLASS (klass),
128 "Inverse Telecine", "Video/Filter", "Inverse Telecine Filter",
129 "David Schleef <ds@schleef.org>");
130
131 base_transform_class->transform_caps =
132 GST_DEBUG_FUNCPTR (gst_ivtc_transform_caps);
133 base_transform_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_ivtc_fixate_caps);
134 base_transform_class->set_caps = GST_DEBUG_FUNCPTR (gst_ivtc_set_caps);
135 base_transform_class->sink_event = GST_DEBUG_FUNCPTR (gst_ivtc_sink_event);
136 base_transform_class->transform = GST_DEBUG_FUNCPTR (gst_ivtc_transform);
137 }
138
139 static void
gst_ivtc_init(GstIvtc * ivtc)140 gst_ivtc_init (GstIvtc * ivtc)
141 {
142 }
143
144 static GstCaps *
gst_ivtc_transform_caps(GstBaseTransform * trans,GstPadDirection direction,GstCaps * caps,GstCaps * filter)145 gst_ivtc_transform_caps (GstBaseTransform * trans,
146 GstPadDirection direction, GstCaps * caps, GstCaps * filter)
147 {
148 GstCaps *othercaps;
149 int i;
150
151 othercaps = gst_caps_copy (caps);
152
153 if (direction == GST_PAD_SRC) {
154 GValue value = G_VALUE_INIT;
155 GValue v = G_VALUE_INIT;
156
157 g_value_init (&value, GST_TYPE_LIST);
158 g_value_init (&v, G_TYPE_STRING);
159
160 g_value_set_string (&v, "interleaved");
161 gst_value_list_append_value (&value, &v);
162 g_value_set_string (&v, "mixed");
163 gst_value_list_append_value (&value, &v);
164 g_value_set_string (&v, "progressive");
165 gst_value_list_append_value (&value, &v);
166
167 for (i = 0; i < gst_caps_get_size (othercaps); i++) {
168 GstStructure *structure = gst_caps_get_structure (othercaps, i);
169 gst_structure_set_value (structure, "interlace-mode", &value);
170 gst_structure_remove_field (structure, "framerate");
171 }
172 g_value_unset (&value);
173 g_value_unset (&v);
174 } else {
175 for (i = 0; i < gst_caps_get_size (othercaps); i++) {
176 GstStructure *structure = gst_caps_get_structure (othercaps, i);
177 gst_structure_set (structure, "interlace-mode", G_TYPE_STRING,
178 "progressive", NULL);
179 gst_structure_remove_field (structure, "framerate");
180 }
181 }
182
183 if (filter) {
184 GstCaps *intersect;
185
186 intersect = gst_caps_intersect (othercaps, filter);
187 gst_caps_unref (othercaps);
188 othercaps = intersect;
189 }
190
191 return othercaps;
192 }
193
194 static GstCaps *
gst_ivtc_fixate_caps(GstBaseTransform * trans,GstPadDirection direction,GstCaps * caps,GstCaps * othercaps)195 gst_ivtc_fixate_caps (GstBaseTransform * trans, GstPadDirection direction,
196 GstCaps * caps, GstCaps * othercaps)
197 {
198 GstCaps *result;
199
200 GST_DEBUG_OBJECT (trans, "fixating caps %" GST_PTR_FORMAT, othercaps);
201
202 result = gst_caps_make_writable (othercaps);
203 if (direction == GST_PAD_SINK) {
204 GstVideoInfo info;
205 if (gst_video_info_from_caps (&info, caps)) {
206 /* Smarter decision */
207 GST_DEBUG_OBJECT (trans, "Input framerate is %d/%d", info.fps_n,
208 info.fps_d);
209 if (info.fps_n == 30000 && info.fps_d == 1001)
210 gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24000,
211 1001, NULL);
212 else
213 gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24, 1,
214 NULL);
215 } else {
216 gst_caps_set_simple (result, "framerate", GST_TYPE_FRACTION, 24, 1, NULL);
217 }
218 }
219
220 result = gst_caps_fixate (result);
221
222 return result;
223 }
224
225 static gboolean
gst_ivtc_set_caps(GstBaseTransform * trans,GstCaps * incaps,GstCaps * outcaps)226 gst_ivtc_set_caps (GstBaseTransform * trans, GstCaps * incaps,
227 GstCaps * outcaps)
228 {
229 GstIvtc *ivtc = GST_IVTC (trans);
230
231 gst_video_info_from_caps (&ivtc->sink_video_info, incaps);
232 gst_video_info_from_caps (&ivtc->src_video_info, outcaps);
233
234 ivtc->field_duration = gst_util_uint64_scale_int (GST_SECOND,
235 ivtc->sink_video_info.fps_d, ivtc->sink_video_info.fps_n * 2);
236 GST_DEBUG_OBJECT (trans, "field duration %" GST_TIME_FORMAT,
237 GST_TIME_ARGS (ivtc->field_duration));
238
239 return TRUE;
240 }
241
242 /* sink and src pad event handlers */
243 static gboolean
gst_ivtc_sink_event(GstBaseTransform * trans,GstEvent * event)244 gst_ivtc_sink_event (GstBaseTransform * trans, GstEvent * event)
245 {
246 GstIvtc *ivtc = GST_IVTC (trans);
247
248 GST_DEBUG_OBJECT (ivtc, "sink_event");
249
250 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
251 const GstSegment *seg;
252
253 gst_ivtc_flush (ivtc);
254
255 /* FIXME this should handle update events */
256
257 gst_event_parse_segment (event, &seg);
258 gst_segment_copy_into (seg, &ivtc->segment);
259 ivtc->current_ts = ivtc->segment.start;
260 }
261
262 return GST_BASE_TRANSFORM_CLASS (gst_ivtc_parent_class)->sink_event (trans,
263 event);
264 }
265
266 static void
gst_ivtc_flush(GstIvtc * ivtc)267 gst_ivtc_flush (GstIvtc * ivtc)
268 {
269 if (ivtc->n_fields > 0) {
270 GST_FIXME_OBJECT (ivtc, "not sending flushed fields to srcpad");
271 }
272
273 gst_ivtc_retire_fields (ivtc, ivtc->n_fields);
274 }
275
276 enum
277 {
278 TOP_FIELD = 0,
279 BOTTOM_FIELD = 1
280 };
281
282 static void
add_field(GstIvtc * ivtc,GstBuffer * buffer,int parity,int index)283 add_field (GstIvtc * ivtc, GstBuffer * buffer, int parity, int index)
284 {
285 int i = ivtc->n_fields;
286 GstClockTime ts;
287 GstIvtcField *field = &ivtc->fields[i];
288
289 g_return_if_fail (i < GST_IVTC_MAX_FIELDS);
290
291 ts = GST_BUFFER_PTS (buffer) + index * ivtc->field_duration;
292 if (ts + ivtc->field_duration < ivtc->segment.start) {
293 /* drop, it's before our segment */
294 return;
295 }
296
297 GST_DEBUG ("adding field %d", i);
298
299 field->buffer = gst_buffer_ref (buffer);
300 field->parity = parity;
301 field->ts = ts;
302
303 gst_video_frame_map (&ivtc->fields[i].frame, &ivtc->sink_video_info,
304 buffer, GST_MAP_READ);
305
306 ivtc->n_fields++;
307 }
308
309 static int
similarity(GstIvtc * ivtc,int i1,int i2)310 similarity (GstIvtc * ivtc, int i1, int i2)
311 {
312 GstIvtcField *f1, *f2;
313 int score;
314
315 g_return_val_if_fail (i1 >= 0 && i1 < ivtc->n_fields, 0);
316 g_return_val_if_fail (i2 >= 0 && i2 < ivtc->n_fields, 0);
317
318 f1 = &ivtc->fields[i1];
319 f2 = &ivtc->fields[i2];
320
321 if (f1->parity == TOP_FIELD) {
322 score = get_comb_score (&f1->frame, &f2->frame);
323 } else {
324 score = get_comb_score (&f2->frame, &f1->frame);
325 }
326
327 GST_DEBUG ("score %d", score);
328
329 return score;
330 }
331
332 #define GET_LINE(frame,comp,line) (((unsigned char *)(frame)->data[k]) + \
333 (line) * GST_VIDEO_FRAME_COMP_STRIDE((frame), (comp)))
334 #define GET_LINE_IL(top,bottom,comp,line) \
335 (((unsigned char *)(((line)&1)?(bottom):(top))->data[k]) + \
336 (line) * GST_VIDEO_FRAME_COMP_STRIDE((top), (comp)))
337
338 static void
reconstruct(GstIvtc * ivtc,GstVideoFrame * dest_frame,int i1,int i2)339 reconstruct (GstIvtc * ivtc, GstVideoFrame * dest_frame, int i1, int i2)
340 {
341 GstVideoFrame *top, *bottom;
342 int width, height;
343 int j, k;
344
345 g_return_if_fail (i1 >= 0 && i1 < ivtc->n_fields);
346 g_return_if_fail (i2 >= 0 && i2 < ivtc->n_fields);
347
348 if (ivtc->fields[i1].parity == TOP_FIELD) {
349 top = &ivtc->fields[i1].frame;
350 bottom = &ivtc->fields[i2].frame;
351 } else {
352 bottom = &ivtc->fields[i1].frame;
353 top = &ivtc->fields[i2].frame;
354 }
355
356 for (k = 0; k < 3; k++) {
357 height = GST_VIDEO_FRAME_COMP_HEIGHT (top, k);
358 width = GST_VIDEO_FRAME_COMP_WIDTH (top, k);
359 for (j = 0; j < height; j++) {
360 guint8 *dest = GET_LINE (dest_frame, k, j);
361 guint8 *src = GET_LINE_IL (top, bottom, k, j);
362
363 memcpy (dest, src, width);
364 }
365 }
366
367 }
368
369 static int
reconstruct_line(guint8 * line1,guint8 * line2,int i,int a,int b,int c,int d)370 reconstruct_line (guint8 * line1, guint8 * line2, int i, int a, int b, int c,
371 int d)
372 {
373 int x;
374
375 x = line1[i - 3] * a;
376 x += line1[i - 2] * b;
377 x += line1[i - 1] * c;
378 x += line1[i - 0] * d;
379 x += line2[i + 0] * d;
380 x += line2[i + 1] * c;
381 x += line2[i + 2] * b;
382 x += line2[i + 3] * a;
383 return (x + 16) >> 5;
384 }
385
386
387 static void
reconstruct_single(GstIvtc * ivtc,GstVideoFrame * dest_frame,int i1)388 reconstruct_single (GstIvtc * ivtc, GstVideoFrame * dest_frame, int i1)
389 {
390 int j;
391 int k;
392 int height;
393 int width;
394 GstIvtcField *field = &ivtc->fields[i1];
395
396 for (k = 0; k < 1; k++) {
397 height = GST_VIDEO_FRAME_COMP_HEIGHT (dest_frame, k);
398 width = GST_VIDEO_FRAME_COMP_WIDTH (dest_frame, k);
399 for (j = 0; j < height; j++) {
400 if ((j & 1) == field->parity) {
401 memcpy (GET_LINE (dest_frame, k, j),
402 GET_LINE (&field->frame, k, j), width);
403 } else {
404 if (j == 0 || j == height - 1) {
405 memcpy (GET_LINE (dest_frame, k, j),
406 GET_LINE (&field->frame, k, (j ^ 1)), width);
407 } else {
408 guint8 *dest = GET_LINE (dest_frame, k, j);
409 guint8 *line1 = GET_LINE (&field->frame, k, j - 1);
410 guint8 *line2 = GET_LINE (&field->frame, k, j + 1);
411 int i;
412
413 #define MARGIN 3
414 for (i = MARGIN; i < width - MARGIN; i++) {
415 int dx, dy;
416
417 dx = -line1[i - 1] - line2[i - 1] + line1[i + 1] + line2[i + 1];
418 dx *= 2;
419
420 dy = -line1[i - 1] - 2 * line1[i] - line1[i + 1]
421 + line2[i - 1] + 2 * line2[i] + line2[i + 1];
422 if (dy < 0) {
423 dy = -dy;
424 dx = -dx;
425 }
426
427 if (dx == 0 && dy == 0) {
428 dest[i] = (line1[i] + line2[i] + 1) >> 1;
429 } else if (dx < 0) {
430 if (dx < -2 * dy) {
431 dest[i] = reconstruct_line (line1, line2, i, 0, 0, 0, 16);
432 } else if (dx < -dy) {
433 dest[i] = reconstruct_line (line1, line2, i, 0, 0, 8, 8);
434 } else if (2 * dx < -dy) {
435 dest[i] = reconstruct_line (line1, line2, i, 0, 4, 8, 4);
436 } else if (3 * dx < -dy) {
437 dest[i] = reconstruct_line (line1, line2, i, 1, 7, 7, 1);
438 } else {
439 dest[i] = reconstruct_line (line1, line2, i, 4, 8, 4, 0);
440 }
441 } else {
442 if (dx > 2 * dy) {
443 dest[i] = reconstruct_line (line2, line1, i, 0, 0, 0, 16);
444 } else if (dx > dy) {
445 dest[i] = reconstruct_line (line2, line1, i, 0, 0, 8, 8);
446 } else if (2 * dx > dy) {
447 dest[i] = reconstruct_line (line2, line1, i, 0, 4, 8, 4);
448 } else if (3 * dx > dy) {
449 dest[i] = reconstruct_line (line2, line1, i, 1, 7, 7, 1);
450 } else {
451 dest[i] = reconstruct_line (line2, line1, i, 4, 8, 4, 0);
452 }
453 }
454 }
455
456 for (i = 0; i < MARGIN; i++) {
457 dest[i] = (line1[i] + line2[i] + 1) >> 1;
458 }
459 for (i = width - MARGIN; i < width; i++) {
460 dest[i] = (line1[i] + line2[i] + 1) >> 1;
461 }
462 }
463 }
464 }
465 }
466 for (k = 1; k < 3; k++) {
467 height = GST_VIDEO_FRAME_COMP_HEIGHT (dest_frame, k);
468 width = GST_VIDEO_FRAME_COMP_WIDTH (dest_frame, k);
469 for (j = 0; j < height; j++) {
470 if ((j & 1) == field->parity) {
471 memcpy (GET_LINE (dest_frame, k, j),
472 GET_LINE (&field->frame, k, j), width);
473 } else {
474 if (j == 0 || j == height - 1) {
475 memcpy (GET_LINE (dest_frame, k, j),
476 GET_LINE (&field->frame, k, (j ^ 1)), width);
477 } else {
478 guint8 *dest = GET_LINE (dest_frame, k, j);
479 guint8 *line1 = GET_LINE (&field->frame, k, j - 1);
480 guint8 *line2 = GET_LINE (&field->frame, k, j + 1);
481 int i;
482 for (i = 0; i < width; i++) {
483 dest[i] = (line1[i] + line2[i] + 1) >> 1;
484 }
485 }
486 }
487 }
488 }
489 }
490
491 static void
gst_ivtc_retire_fields(GstIvtc * ivtc,int n_fields)492 gst_ivtc_retire_fields (GstIvtc * ivtc, int n_fields)
493 {
494 int i;
495
496 if (n_fields == 0)
497 return;
498
499 for (i = 0; i < n_fields; i++) {
500 gst_video_frame_unmap (&ivtc->fields[i].frame);
501 gst_buffer_unref (ivtc->fields[i].buffer);
502 }
503
504 memmove (ivtc->fields, ivtc->fields + n_fields,
505 sizeof (GstIvtcField) * (ivtc->n_fields - n_fields));
506 ivtc->n_fields -= n_fields;
507 }
508
509 static GstFlowReturn
gst_ivtc_transform(GstBaseTransform * trans,GstBuffer * inbuf,GstBuffer * outbuf)510 gst_ivtc_transform (GstBaseTransform * trans, GstBuffer * inbuf,
511 GstBuffer * outbuf)
512 {
513 GstIvtc *ivtc = GST_IVTC (trans);
514 GstFlowReturn ret;
515
516 GST_DEBUG_OBJECT (ivtc, "transform");
517
518 if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_VIDEO_BUFFER_FLAG_TFF)) {
519 add_field (ivtc, inbuf, TOP_FIELD, 0);
520 if (!GST_BUFFER_FLAG_IS_SET (inbuf, GST_VIDEO_BUFFER_FLAG_ONEFIELD)) {
521 add_field (ivtc, inbuf, BOTTOM_FIELD, 1);
522 if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_VIDEO_BUFFER_FLAG_RFF)) {
523 add_field (ivtc, inbuf, TOP_FIELD, 2);
524 }
525 }
526 } else {
527 add_field (ivtc, inbuf, BOTTOM_FIELD, 0);
528 if (!GST_BUFFER_FLAG_IS_SET (inbuf, GST_VIDEO_BUFFER_FLAG_ONEFIELD)) {
529 add_field (ivtc, inbuf, TOP_FIELD, 1);
530 if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_VIDEO_BUFFER_FLAG_RFF)) {
531 add_field (ivtc, inbuf, BOTTOM_FIELD, 2);
532 }
533 }
534 }
535
536 while (ivtc->n_fields > 0 &&
537 ivtc->fields[0].ts + GST_MSECOND * 50 < ivtc->current_ts) {
538 GST_DEBUG ("retiring early field");
539 gst_ivtc_retire_fields (ivtc, 1);
540 }
541
542 GST_DEBUG ("n_fields %d", ivtc->n_fields);
543 if (ivtc->n_fields < 4) {
544 return GST_BASE_TRANSFORM_FLOW_DROPPED;
545 }
546
547 gst_ivtc_construct_frame (ivtc, outbuf);
548 while (ivtc->n_fields >= 4) {
549 GstBuffer *buf;
550 buf = gst_buffer_copy (outbuf);
551 GST_DEBUG ("pushing extra frame");
552 ret = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (trans), buf);
553 if (ret != GST_FLOW_OK) {
554 return ret;
555 }
556
557 gst_ivtc_construct_frame (ivtc, outbuf);
558 }
559
560 return GST_FLOW_OK;
561 }
562
563 static void
gst_ivtc_construct_frame(GstIvtc * ivtc,GstBuffer * outbuf)564 gst_ivtc_construct_frame (GstIvtc * ivtc, GstBuffer * outbuf)
565 {
566 int anchor_index;
567 int prev_score, next_score;
568 GstVideoFrame dest_frame;
569 int n_retire;
570 gboolean forward_ok;
571
572 anchor_index = 1;
573 if (ivtc->fields[anchor_index].ts < ivtc->current_ts) {
574 forward_ok = TRUE;
575 } else {
576 forward_ok = FALSE;
577 }
578
579 prev_score = similarity (ivtc, anchor_index - 1, anchor_index);
580 next_score = similarity (ivtc, anchor_index, anchor_index + 1);
581
582 gst_video_frame_map (&dest_frame, &ivtc->src_video_info, outbuf,
583 GST_MAP_WRITE);
584
585 #define THRESHOLD 100
586 if (prev_score < THRESHOLD) {
587 if (forward_ok && next_score < prev_score) {
588 reconstruct (ivtc, &dest_frame, anchor_index, anchor_index + 1);
589 n_retire = anchor_index + 2;
590 } else {
591 if (prev_score >= THRESHOLD / 2) {
592 GST_INFO ("borderline prev (%d, %d)", prev_score, next_score);
593 }
594 reconstruct (ivtc, &dest_frame, anchor_index, anchor_index - 1);
595 n_retire = anchor_index + 1;
596 }
597 } else if (next_score < THRESHOLD) {
598 if (next_score >= THRESHOLD / 2) {
599 GST_INFO ("borderline prev (%d, %d)", prev_score, next_score);
600 }
601 reconstruct (ivtc, &dest_frame, anchor_index, anchor_index + 1);
602 if (forward_ok) {
603 n_retire = anchor_index + 2;
604 } else {
605 n_retire = anchor_index + 1;
606 }
607 } else {
608 if (prev_score < THRESHOLD * 2 || next_score < THRESHOLD * 2) {
609 GST_INFO ("borderline single (%d, %d)", prev_score, next_score);
610 }
611 reconstruct_single (ivtc, &dest_frame, anchor_index);
612 n_retire = anchor_index + 1;
613 }
614
615 GST_DEBUG ("retiring %d", n_retire);
616 gst_ivtc_retire_fields (ivtc, n_retire);
617
618 gst_video_frame_unmap (&dest_frame);
619
620 GST_BUFFER_PTS (outbuf) = ivtc->current_ts;
621 GST_BUFFER_DTS (outbuf) = ivtc->current_ts;
622 /* FIXME this is not how to produce durations */
623 GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (GST_SECOND,
624 ivtc->src_video_info.fps_d, ivtc->src_video_info.fps_n);
625 GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED |
626 GST_VIDEO_BUFFER_FLAG_TFF | GST_VIDEO_BUFFER_FLAG_RFF |
627 GST_VIDEO_BUFFER_FLAG_ONEFIELD);
628 ivtc->current_ts += GST_BUFFER_DURATION (outbuf);
629
630 }
631
632 static int
get_comb_score(GstVideoFrame * top,GstVideoFrame * bottom)633 get_comb_score (GstVideoFrame * top, GstVideoFrame * bottom)
634 {
635 int j;
636 int thisline[MAX_WIDTH];
637 int score = 0;
638 int height;
639 int width;
640 int k;
641
642 height = GST_VIDEO_FRAME_COMP_HEIGHT (top, 0);
643 width = GST_VIDEO_FRAME_COMP_WIDTH (top, 0);
644
645 memset (thisline, 0, sizeof (thisline));
646
647 k = 0;
648 /* remove a few lines from top and bottom, as they sometimes contain
649 * artifacts */
650 for (j = 2; j < height - 2; j++) {
651 guint8 *src1 = GET_LINE_IL (top, bottom, 0, j - 1);
652 guint8 *src2 = GET_LINE_IL (top, bottom, 0, j);
653 guint8 *src3 = GET_LINE_IL (top, bottom, 0, j + 1);
654 int i;
655
656 for (i = 0; i < width; i++) {
657 if (src2[i] < MIN (src1[i], src3[i]) - 5 ||
658 src2[i] > MAX (src1[i], src3[i]) + 5) {
659 if (i > 0) {
660 thisline[i] += thisline[i - 1];
661 }
662 thisline[i]++;
663 if (thisline[i] > 1000)
664 thisline[i] = 1000;
665 } else {
666 thisline[i] = 0;
667 }
668 if (thisline[i] > 100) {
669 score++;
670 }
671 }
672 }
673
674 GST_DEBUG ("score %d", score);
675
676 return score;
677 }
678
679
680
681 static gboolean
plugin_init(GstPlugin * plugin)682 plugin_init (GstPlugin * plugin)
683 {
684 gst_element_register (plugin, "ivtc", GST_RANK_NONE, GST_TYPE_IVTC);
685 gst_element_register (plugin, "combdetect", GST_RANK_NONE,
686 GST_TYPE_COMB_DETECT);
687
688 return TRUE;
689 }
690
691 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
692 GST_VERSION_MINOR,
693 ivtc,
694 "Inverse Telecine",
695 plugin_init, VERSION, "LGPL", PACKAGE_NAME, GST_PACKAGE_ORIGIN)
696