1 /*
2 * Copyright (c) 2019 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "libavutil/imgutils.h"
22 #include "libavutil/pixdesc.h"
23 #include "libavutil/opt.h"
24 #include "avfilter.h"
25 #include "formats.h"
26 #include "internal.h"
27 #include "video.h"
28 #include "framesync.h"
29
30 #define OFFSET(x) offsetof(MaskedMinMaxContext, x)
31 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
32
33 typedef struct ThreadData {
34 AVFrame *src, *f1, *f2, *dst;
35 } ThreadData;
36
37 typedef struct MaskedMinMaxContext {
38 const AVClass *class;
39
40 int planes;
41 int maskedmin;
42
43 int linesize[4];
44 int planewidth[4], planeheight[4];
45 int nb_planes;
46 int depth;
47 FFFrameSync fs;
48
49 void (*maskedminmax)(const uint8_t *src, uint8_t *dst, const uint8_t *f1, const uint8_t *f2, int w);
50 } MaskedMinMaxContext;
51
52 static const AVOption maskedminmax_options[] = {
53 { "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
54 { NULL }
55 };
56
maskedmin_init(AVFilterContext * ctx)57 static av_cold int maskedmin_init(AVFilterContext *ctx)
58 {
59 MaskedMinMaxContext *s = ctx->priv;
60
61 s->maskedmin = 1;
62
63 return 0;
64 }
65
66 static const enum AVPixelFormat pix_fmts[] = {
67 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
68 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
69 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
70 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
71 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
72 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
73 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
74 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
75 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
76 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
77 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
78 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
79 AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12,
80 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
81 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
82 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
83 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
84 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
85 AV_PIX_FMT_GRAYF32, AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32,
86 AV_PIX_FMT_NONE
87 };
88
89 #define MASKED(n, type, op) \
90 static void masked##n(const uint8_t *ssrc, uint8_t *ddst, \
91 const uint8_t *ff1, \
92 const uint8_t *ff2, int w) \
93 { \
94 const type *src = (const type *)ssrc; \
95 const type *f1 = (const type *)ff1; \
96 const type *f2 = (const type *)ff2; \
97 type *dst = (type *)ddst; \
98 \
99 for (int x = 0; x < w; x++) \
100 dst[x] = FFABS(src[x] - f2[x]) op FFABS(src[x] - f1[x]) ? f2[x] : f1[x]; \
101 }
102
103 MASKED(min8, uint8_t, <)
104 MASKED(max8, uint8_t, >)
105 MASKED(min16, uint16_t, <)
106 MASKED(max16, uint16_t, >)
107 MASKED(min32, float, <)
108 MASKED(max32, float, >)
109
config_input(AVFilterLink * inlink)110 static int config_input(AVFilterLink *inlink)
111 {
112 AVFilterContext *ctx = inlink->dst;
113 MaskedMinMaxContext *s = ctx->priv;
114 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
115 int vsub, hsub, ret;
116
117 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
118
119 if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
120 return ret;
121
122 hsub = desc->log2_chroma_w;
123 vsub = desc->log2_chroma_h;
124 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
125 s->planeheight[0] = s->planeheight[3] = inlink->h;
126 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
127 s->planewidth[0] = s->planewidth[3] = inlink->w;
128
129 s->depth = desc->comp[0].depth;
130
131 if (desc->comp[0].depth == 8)
132 s->maskedminmax = s->maskedmin ? maskedmin8 : maskedmax8;
133 else if (desc->comp[0].depth <= 16)
134 s->maskedminmax = s->maskedmin ? maskedmin16 : maskedmax16;
135 else
136 s->maskedminmax = s->maskedmin ? maskedmin32 : maskedmax32;
137
138 return 0;
139 }
140
maskedminmax_slice(AVFilterContext * ctx,void * arg,int jobnr,int nb_jobs)141 static int maskedminmax_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
142 {
143 MaskedMinMaxContext *s = ctx->priv;
144 ThreadData *td = arg;
145
146 for (int p = 0; p < s->nb_planes; p++) {
147 const ptrdiff_t src_linesize = td->src->linesize[p];
148 const ptrdiff_t f1_linesize = td->f1->linesize[p];
149 const ptrdiff_t f2_linesize = td->f2->linesize[p];
150 const ptrdiff_t dst_linesize = td->dst->linesize[p];
151 const int w = s->planewidth[p];
152 const int h = s->planeheight[p];
153 const int slice_start = (h * jobnr) / nb_jobs;
154 const int slice_end = (h * (jobnr+1)) / nb_jobs;
155 const uint8_t *src = td->src->data[p] + slice_start * src_linesize;
156 const uint8_t *f1 = td->f1->data[p] + slice_start * f1_linesize;
157 const uint8_t *f2 = td->f2->data[p] + slice_start * f2_linesize;
158 uint8_t *dst = td->dst->data[p] + slice_start * dst_linesize;
159
160 if (!((1 << p) & s->planes)) {
161 av_image_copy_plane(dst, dst_linesize, src, src_linesize,
162 s->linesize[p], slice_end - slice_start);
163 continue;
164 }
165
166 for (int y = slice_start; y < slice_end; y++) {
167 s->maskedminmax(src, dst, f1, f2, w);
168
169 dst += dst_linesize;
170 src += src_linesize;
171 f1 += f1_linesize;
172 f2 += f2_linesize;
173 }
174 }
175
176 return 0;
177 }
178
process_frame(FFFrameSync * fs)179 static int process_frame(FFFrameSync *fs)
180 {
181 AVFilterContext *ctx = fs->parent;
182 MaskedMinMaxContext *s = fs->opaque;
183 AVFilterLink *outlink = ctx->outputs[0];
184 AVFrame *out, *src, *f1, *f2;
185 int ret;
186
187 if ((ret = ff_framesync_get_frame(&s->fs, 0, &src, 0)) < 0 ||
188 (ret = ff_framesync_get_frame(&s->fs, 1, &f1, 0)) < 0 ||
189 (ret = ff_framesync_get_frame(&s->fs, 2, &f2, 0)) < 0)
190 return ret;
191
192 if (ctx->is_disabled) {
193 out = av_frame_clone(src);
194 if (!out)
195 return AVERROR(ENOMEM);
196 } else {
197 ThreadData td;
198
199 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
200 if (!out)
201 return AVERROR(ENOMEM);
202 av_frame_copy_props(out, src);
203
204 td.src = src;
205 td.f1 = f1;
206 td.f2 = f2;
207 td.dst = out;
208
209 ff_filter_execute(ctx, maskedminmax_slice, &td, NULL,
210 FFMIN(s->planeheight[0], ff_filter_get_nb_threads(ctx)));
211 }
212 out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
213
214 return ff_filter_frame(outlink, out);
215 }
216
config_output(AVFilterLink * outlink)217 static int config_output(AVFilterLink *outlink)
218 {
219 AVFilterContext *ctx = outlink->src;
220 MaskedMinMaxContext *s = ctx->priv;
221 AVFilterLink *source = ctx->inputs[0];
222 AVFilterLink *f1 = ctx->inputs[1];
223 AVFilterLink *f2 = ctx->inputs[2];
224 FFFrameSyncIn *in;
225 int ret;
226
227 if (source->w != f1->w || source->h != f1->h ||
228 source->w != f2->w || source->h != f2->h) {
229 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
230 "(size %dx%d) do not match the corresponding "
231 "second input link %s parameters (%dx%d) "
232 "and/or third input link %s parameters (size %dx%d)\n",
233 ctx->input_pads[0].name, source->w, source->h,
234 ctx->input_pads[1].name, f1->w, f1->h,
235 ctx->input_pads[2].name, f2->w, f2->h);
236 return AVERROR(EINVAL);
237 }
238
239 outlink->w = source->w;
240 outlink->h = source->h;
241 outlink->sample_aspect_ratio = source->sample_aspect_ratio;
242 outlink->frame_rate = source->frame_rate;
243
244 if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0)
245 return ret;
246
247 in = s->fs.in;
248 in[0].time_base = source->time_base;
249 in[1].time_base = f1->time_base;
250 in[2].time_base = f2->time_base;
251 in[0].sync = 1;
252 in[0].before = EXT_STOP;
253 in[0].after = EXT_INFINITY;
254 in[1].sync = 1;
255 in[1].before = EXT_STOP;
256 in[1].after = EXT_INFINITY;
257 in[2].sync = 1;
258 in[2].before = EXT_STOP;
259 in[2].after = EXT_INFINITY;
260 s->fs.opaque = s;
261 s->fs.on_event = process_frame;
262
263 ret = ff_framesync_configure(&s->fs);
264 outlink->time_base = s->fs.time_base;
265
266 return ret;
267 }
268
activate(AVFilterContext * ctx)269 static int activate(AVFilterContext *ctx)
270 {
271 MaskedMinMaxContext *s = ctx->priv;
272 return ff_framesync_activate(&s->fs);
273 }
274
uninit(AVFilterContext * ctx)275 static av_cold void uninit(AVFilterContext *ctx)
276 {
277 MaskedMinMaxContext *s = ctx->priv;
278
279 ff_framesync_uninit(&s->fs);
280 }
281
282 static const AVFilterPad maskedminmax_inputs[] = {
283 {
284 .name = "source",
285 .type = AVMEDIA_TYPE_VIDEO,
286 .config_props = config_input,
287 },
288 {
289 .name = "filter1",
290 .type = AVMEDIA_TYPE_VIDEO,
291 },
292 {
293 .name = "filter2",
294 .type = AVMEDIA_TYPE_VIDEO,
295 },
296 };
297
298 static const AVFilterPad maskedminmax_outputs[] = {
299 {
300 .name = "default",
301 .type = AVMEDIA_TYPE_VIDEO,
302 .config_props = config_output,
303 },
304 };
305
306 AVFILTER_DEFINE_CLASS_EXT(maskedminmax, "masked(min|max)", maskedminmax_options);
307
308 const AVFilter ff_vf_maskedmin = {
309 .name = "maskedmin",
310 .description = NULL_IF_CONFIG_SMALL("Apply filtering with minimum difference of two streams."),
311 .priv_class = &maskedminmax_class,
312 .priv_size = sizeof(MaskedMinMaxContext),
313 .init = maskedmin_init,
314 .uninit = uninit,
315 .activate = activate,
316 FILTER_INPUTS(maskedminmax_inputs),
317 FILTER_OUTPUTS(maskedminmax_outputs),
318 FILTER_PIXFMTS_ARRAY(pix_fmts),
319 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
320 .process_command = ff_filter_process_command,
321 };
322
323 const AVFilter ff_vf_maskedmax = {
324 .name = "maskedmax",
325 .description = NULL_IF_CONFIG_SMALL("Apply filtering with maximum difference of two streams."),
326 .priv_class = &maskedminmax_class,
327 .priv_size = sizeof(MaskedMinMaxContext),
328 .uninit = uninit,
329 .activate = activate,
330 FILTER_INPUTS(maskedminmax_inputs),
331 FILTER_OUTPUTS(maskedminmax_outputs),
332 FILTER_PIXFMTS_ARRAY(pix_fmts),
333 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
334 .process_command = ff_filter_process_command,
335 };
336