1 /*
2 * Copyright (c) 2012 Rudolf Polzer
3 * Copyright (c) 2013 Paul B Mahol
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
24 * Rudolf Polzer.
25 */
26
27 #include "libavutil/avstring.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "avfilter.h"
32 #include "formats.h"
33 #include "internal.h"
34 #include "video.h"
35
36 typedef struct TelecineContext {
37 const AVClass *class;
38 int first_field;
39 char *pattern;
40 unsigned int pattern_pos;
41 int64_t start_time;
42
43 AVRational pts;
44 AVRational ts_unit;
45 int out_cnt;
46 int occupied;
47
48 int nb_planes;
49 int planeheight[4];
50 int stride[4];
51
52 AVFrame *frame[5];
53 AVFrame *temp;
54 } TelecineContext;
55
56 #define OFFSET(x) offsetof(TelecineContext, x)
57 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
58
59 static const AVOption telecine_options[] = {
60 {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
61 {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
62 {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
63 {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
64 {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
65 {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
66 {NULL}
67 };
68
69 AVFILTER_DEFINE_CLASS(telecine);
70
init(AVFilterContext * ctx)71 static av_cold int init(AVFilterContext *ctx)
72 {
73 TelecineContext *s = ctx->priv;
74 const char *p;
75 int max = 0;
76
77 if (!strlen(s->pattern)) {
78 av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
79 return AVERROR_INVALIDDATA;
80 }
81
82 for (p = s->pattern; *p; p++) {
83 if (!av_isdigit(*p)) {
84 av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
85 return AVERROR_INVALIDDATA;
86 }
87
88 max = FFMAX(*p - '0', max);
89 s->pts.num += 2;
90 s->pts.den += *p - '0';
91 }
92
93 s->start_time = AV_NOPTS_VALUE;
94
95 s->out_cnt = (max + 1) / 2;
96 av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
97 s->pattern, s->out_cnt, s->pts.num, s->pts.den);
98
99 return 0;
100 }
101
query_formats(AVFilterContext * ctx)102 static int query_formats(AVFilterContext *ctx)
103 {
104 int reject_flags = AV_PIX_FMT_FLAG_BITSTREAM |
105 AV_PIX_FMT_FLAG_HWACCEL |
106 AV_PIX_FMT_FLAG_PAL;
107
108 return ff_set_common_formats(ctx, ff_formats_pixdesc_filter(0, reject_flags));
109 }
110
config_input(AVFilterLink * inlink)111 static int config_input(AVFilterLink *inlink)
112 {
113 TelecineContext *s = inlink->dst->priv;
114 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
115 int i, ret;
116
117 s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
118 if (!s->temp)
119 return AVERROR(ENOMEM);
120 for (i = 0; i < s->out_cnt; i++) {
121 s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
122 if (!s->frame[i])
123 return AVERROR(ENOMEM);
124 }
125
126 if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
127 return ret;
128
129 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
130 s->planeheight[0] = s->planeheight[3] = inlink->h;
131
132 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
133
134 return 0;
135 }
136
config_output(AVFilterLink * outlink)137 static int config_output(AVFilterLink *outlink)
138 {
139 AVFilterContext *ctx = outlink->src;
140 TelecineContext *s = ctx->priv;
141 const AVFilterLink *inlink = ctx->inputs[0];
142 AVRational fps = inlink->frame_rate;
143
144 if (!fps.num || !fps.den) {
145 av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
146 "current rate of %d/%d is invalid\n", fps.num, fps.den);
147 return AVERROR(EINVAL);
148 }
149 fps = av_mul_q(fps, av_inv_q(s->pts));
150 av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
151 inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
152
153 outlink->frame_rate = fps;
154 outlink->time_base = av_mul_q(inlink->time_base, s->pts);
155 av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
156 inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
157
158 s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
159
160 return 0;
161 }
162
filter_frame(AVFilterLink * inlink,AVFrame * inpicref)163 static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
164 {
165 AVFilterContext *ctx = inlink->dst;
166 AVFilterLink *outlink = ctx->outputs[0];
167 TelecineContext *s = ctx->priv;
168 int i, len, ret = 0, nout = 0;
169
170 if (s->start_time == AV_NOPTS_VALUE)
171 s->start_time = inpicref->pts;
172
173 len = s->pattern[s->pattern_pos] - '0';
174
175 s->pattern_pos++;
176 if (!s->pattern[s->pattern_pos])
177 s->pattern_pos = 0;
178
179 if (!len) { // do not output any field from this frame
180 av_frame_free(&inpicref);
181 return 0;
182 }
183
184 if (s->occupied) {
185 av_frame_make_writable(s->frame[nout]);
186 for (i = 0; i < s->nb_planes; i++) {
187 // fill in the EARLIER field from the buffered pic
188 av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
189 s->frame[nout]->linesize[i] * 2,
190 s->temp->data[i] + s->temp->linesize[i] * s->first_field,
191 s->temp->linesize[i] * 2,
192 s->stride[i],
193 (s->planeheight[i] - s->first_field + 1) / 2);
194 // fill in the LATER field from the new pic
195 av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
196 s->frame[nout]->linesize[i] * 2,
197 inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
198 inpicref->linesize[i] * 2,
199 s->stride[i],
200 (s->planeheight[i] - !s->first_field + 1) / 2);
201 }
202 s->frame[nout]->interlaced_frame = 1;
203 s->frame[nout]->top_field_first = !s->first_field;
204 nout++;
205 len--;
206 s->occupied = 0;
207 }
208
209 while (len >= 2) {
210 // output THIS image as-is
211 av_frame_make_writable(s->frame[nout]);
212 for (i = 0; i < s->nb_planes; i++)
213 av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
214 inpicref->data[i], inpicref->linesize[i],
215 s->stride[i],
216 s->planeheight[i]);
217 s->frame[nout]->interlaced_frame = inpicref->interlaced_frame;
218 s->frame[nout]->top_field_first = inpicref->top_field_first;
219 nout++;
220 len -= 2;
221 }
222
223 if (len >= 1) {
224 // copy THIS image to the buffer, we need it later
225 for (i = 0; i < s->nb_planes; i++)
226 av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
227 inpicref->data[i], inpicref->linesize[i],
228 s->stride[i],
229 s->planeheight[i]);
230 s->occupied = 1;
231 }
232
233 for (i = 0; i < nout; i++) {
234 AVFrame *frame = av_frame_clone(s->frame[i]);
235 int interlaced = frame ? frame->interlaced_frame : 0;
236 int tff = frame ? frame->top_field_first : 0;
237
238 if (!frame) {
239 av_frame_free(&inpicref);
240 return AVERROR(ENOMEM);
241 }
242
243 av_frame_copy_props(frame, inpicref);
244 frame->interlaced_frame = interlaced;
245 frame->top_field_first = tff;
246 frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
247 av_rescale(outlink->frame_count_in, s->ts_unit.num,
248 s->ts_unit.den);
249 ret = ff_filter_frame(outlink, frame);
250 }
251 av_frame_free(&inpicref);
252
253 return ret;
254 }
255
uninit(AVFilterContext * ctx)256 static av_cold void uninit(AVFilterContext *ctx)
257 {
258 TelecineContext *s = ctx->priv;
259 int i;
260
261 av_frame_free(&s->temp);
262 for (i = 0; i < s->out_cnt; i++)
263 av_frame_free(&s->frame[i]);
264 }
265
266 static const AVFilterPad telecine_inputs[] = {
267 {
268 .name = "default",
269 .type = AVMEDIA_TYPE_VIDEO,
270 .filter_frame = filter_frame,
271 .config_props = config_input,
272 },
273 };
274
275 static const AVFilterPad telecine_outputs[] = {
276 {
277 .name = "default",
278 .type = AVMEDIA_TYPE_VIDEO,
279 .config_props = config_output,
280 },
281 };
282
283 const AVFilter ff_vf_telecine = {
284 .name = "telecine",
285 .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
286 .priv_size = sizeof(TelecineContext),
287 .priv_class = &telecine_class,
288 .init = init,
289 .uninit = uninit,
290 FILTER_INPUTS(telecine_inputs),
291 FILTER_OUTPUTS(telecine_outputs),
292 FILTER_QUERY_FUNC(query_formats),
293 };
294