1 /*
2 * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define DEFAULT_RESULT_NAME "transforms.trf"
22
23 #include <vid.stab/libvidstab.h>
24
25 #include "libavutil/common.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/imgutils.h"
28 #include "avfilter.h"
29 #include "internal.h"
30
31 #include "vidstabutils.h"
32
33 typedef struct StabData {
34 const AVClass *class;
35
36 VSMotionDetect md;
37 VSMotionDetectConfig conf;
38
39 char *result;
40 FILE *f;
41 } StabData;
42
43
44 #define OFFSET(x) offsetof(StabData, x)
45 #define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x))
46 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
47
48 static const AVOption vidstabdetect_options[] = {
49 {"result", "path to the file used to write the transforms", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}, .flags = FLAGS},
50 {"shakiness", "how shaky is the video and how quick is the camera?"
51 " 1: little (fast) 10: very strong/quick (slow)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS},
52 {"accuracy", "(>=shakiness) 1: low 15: high (slow)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 15}, 1, 15, FLAGS},
53 {"stepsize", "region around minimum is scanned with 1 pixel resolution", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS},
54 {"mincontrast", "below this contrast a field is discarded (0-1)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS},
55 {"show", "0: draw nothing; 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS},
56 {"tripod", "virtual tripod mode (if >0): motion is compared to a reference"
57 " reference frame (frame # is the value)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS},
58 {NULL}
59 };
60
61 AVFILTER_DEFINE_CLASS(vidstabdetect);
62
init(AVFilterContext * ctx)63 static av_cold int init(AVFilterContext *ctx)
64 {
65 StabData *s = ctx->priv;
66 ff_vs_init();
67 s->class = &vidstabdetect_class;
68 av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION);
69 return 0;
70 }
71
uninit(AVFilterContext * ctx)72 static av_cold void uninit(AVFilterContext *ctx)
73 {
74 StabData *s = ctx->priv;
75 VSMotionDetect *md = &(s->md);
76
77 if (s->f) {
78 fclose(s->f);
79 s->f = NULL;
80 }
81
82 vsMotionDetectionCleanup(md);
83 }
84
query_formats(AVFilterContext * ctx)85 static int query_formats(AVFilterContext *ctx)
86 {
87 // If you add something here also add it in vidstabutils.c
88 static const enum AVPixelFormat pix_fmts[] = {
89 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
90 AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
91 AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
92 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
93 AV_PIX_FMT_NONE
94 };
95
96 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
97 if (!fmts_list)
98 return AVERROR(ENOMEM);
99 return ff_set_common_formats(ctx, fmts_list);
100 }
101
config_input(AVFilterLink * inlink)102 static int config_input(AVFilterLink *inlink)
103 {
104 AVFilterContext *ctx = inlink->dst;
105 StabData *s = ctx->priv;
106
107 VSMotionDetect* md = &(s->md);
108 VSFrameInfo fi;
109 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
110 int is_planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
111
112 vsFrameInfoInit(&fi, inlink->w, inlink->h,
113 ff_av2vs_pixfmt(ctx, inlink->format));
114 if (!is_planar && fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) {
115 av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG");
116 return AVERROR(EINVAL);
117 }
118 if (fi.log2ChromaW != desc->log2_chroma_w) {
119 av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG");
120 return AVERROR(EINVAL);
121 }
122
123 if (fi.log2ChromaH != desc->log2_chroma_h) {
124 av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG");
125 return AVERROR(EINVAL);
126 }
127
128 // set values that are not initialized by the options
129 s->conf.algo = 1;
130 s->conf.modName = "vidstabdetect";
131 if (vsMotionDetectInit(md, &s->conf, &fi) != VS_OK) {
132 av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG");
133 return AVERROR(EINVAL);
134 }
135
136 vsMotionDetectGetConfig(&s->conf, md);
137 av_log(ctx, AV_LOG_INFO, "Video stabilization settings (pass 1/2):\n");
138 av_log(ctx, AV_LOG_INFO, " shakiness = %d\n", s->conf.shakiness);
139 av_log(ctx, AV_LOG_INFO, " accuracy = %d\n", s->conf.accuracy);
140 av_log(ctx, AV_LOG_INFO, " stepsize = %d\n", s->conf.stepSize);
141 av_log(ctx, AV_LOG_INFO, " mincontrast = %f\n", s->conf.contrastThreshold);
142 av_log(ctx, AV_LOG_INFO, " tripod = %d\n", s->conf.virtualTripod);
143 av_log(ctx, AV_LOG_INFO, " show = %d\n", s->conf.show);
144 av_log(ctx, AV_LOG_INFO, " result = %s\n", s->result);
145
146 s->f = fopen(s->result, "w");
147 if (s->f == NULL) {
148 av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", s->result);
149 return AVERROR(EINVAL);
150 } else {
151 if (vsPrepareFile(md, s->f) != VS_OK) {
152 av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", s->result);
153 return AVERROR(EINVAL);
154 }
155 }
156 return 0;
157 }
158
filter_frame(AVFilterLink * inlink,AVFrame * in)159 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
160 {
161 AVFilterContext *ctx = inlink->dst;
162 StabData *s = ctx->priv;
163 VSMotionDetect *md = &(s->md);
164 LocalMotions localmotions;
165
166 AVFilterLink *outlink = inlink->dst->outputs[0];
167 VSFrame frame;
168 int plane;
169
170 if (s->conf.show > 0 && !av_frame_is_writable(in))
171 av_frame_make_writable(in);
172
173 for (plane = 0; plane < md->fi.planes; plane++) {
174 frame.data[plane] = in->data[plane];
175 frame.linesize[plane] = in->linesize[plane];
176 }
177 if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) {
178 av_log(ctx, AV_LOG_ERROR, "motion detection failed");
179 return AVERROR(AVERROR_EXTERNAL);
180 } else {
181 if (vsWriteToFile(md, s->f, &localmotions) != VS_OK) {
182 int ret = AVERROR(errno);
183 av_log(ctx, AV_LOG_ERROR, "cannot write to transform file");
184 return ret;
185 }
186 vs_vector_del(&localmotions);
187 }
188
189 return ff_filter_frame(outlink, in);
190 }
191
192 static const AVFilterPad avfilter_vf_vidstabdetect_inputs[] = {
193 {
194 .name = "default",
195 .type = AVMEDIA_TYPE_VIDEO,
196 .filter_frame = filter_frame,
197 .config_props = config_input,
198 },
199 { NULL }
200 };
201
202 static const AVFilterPad avfilter_vf_vidstabdetect_outputs[] = {
203 {
204 .name = "default",
205 .type = AVMEDIA_TYPE_VIDEO,
206 },
207 { NULL }
208 };
209
210 AVFilter ff_vf_vidstabdetect = {
211 .name = "vidstabdetect",
212 .description = NULL_IF_CONFIG_SMALL("Extract relative transformations, "
213 "pass 1 of 2 for stabilization "
214 "(see vidstabtransform for pass 2)."),
215 .priv_size = sizeof(StabData),
216 .init = init,
217 .uninit = uninit,
218 .query_formats = query_formats,
219 .inputs = avfilter_vf_vidstabdetect_inputs,
220 .outputs = avfilter_vf_vidstabdetect_outputs,
221 .priv_class = &vidstabdetect_class,
222 };
223