1 /*
2 * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
3 * Copyright (c) 2010 Baptiste Coudurier
4 * Copyright (c) 2012 Loren Merritt
5 *
6 * This file is part of FFmpeg, ported from MPlayer.
7 *
8 * FFmpeg is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 /**
24 * @file
25 * high quality 3d video denoiser, ported from MPlayer
26 * libmpcodecs/vf_hqdn3d.c.
27 */
28
29 #include <float.h>
30
31 #include "config.h"
32 #include "libavutil/attributes.h"
33 #include "libavutil/common.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/opt.h"
37
38 #include "avfilter.h"
39 #include "formats.h"
40 #include "internal.h"
41 #include "video.h"
42 #include "vf_hqdn3d.h"
43
44 #define LUT_BITS (depth==16 ? 8 : 4)
45 #define LOAD(x) (((depth == 8 ? src[x] : AV_RN16A(src + (x) * 2)) << (16 - depth))\
46 + (((1 << (16 - depth)) - 1) >> 1))
47 #define STORE(x,val) (depth == 8 ? dst[x] = (val) >> (16 - depth) : \
48 AV_WN16A(dst + (x) * 2, (val) >> (16 - depth)))
49
50 av_always_inline
lowpass(int prev,int cur,int16_t * coef,int depth)51 static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth)
52 {
53 int d = (prev - cur) >> (8 - LUT_BITS);
54 return cur + coef[d];
55 }
56
57 av_always_inline
denoise_temporal(uint8_t * src,uint8_t * dst,uint16_t * frame_ant,int w,int h,int sstride,int dstride,int16_t * temporal,int depth)58 static void denoise_temporal(uint8_t *src, uint8_t *dst,
59 uint16_t *frame_ant,
60 int w, int h, int sstride, int dstride,
61 int16_t *temporal, int depth)
62 {
63 long x, y;
64 uint32_t tmp;
65
66 temporal += 256 << LUT_BITS;
67
68 for (y = 0; y < h; y++) {
69 for (x = 0; x < w; x++) {
70 frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal, depth);
71 STORE(x, tmp);
72 }
73 src += sstride;
74 dst += dstride;
75 frame_ant += w;
76 }
77 }
78
79 av_always_inline
denoise_spatial(HQDN3DContext * s,uint8_t * src,uint8_t * dst,uint16_t * line_ant,uint16_t * frame_ant,int w,int h,int sstride,int dstride,int16_t * spatial,int16_t * temporal,int depth)80 static void denoise_spatial(HQDN3DContext *s,
81 uint8_t *src, uint8_t *dst,
82 uint16_t *line_ant, uint16_t *frame_ant,
83 int w, int h, int sstride, int dstride,
84 int16_t *spatial, int16_t *temporal, int depth)
85 {
86 long x, y;
87 uint32_t pixel_ant;
88 uint32_t tmp;
89
90 spatial += 256 << LUT_BITS;
91 temporal += 256 << LUT_BITS;
92
93 /* First line has no top neighbor. Only left one for each tmp and
94 * last frame */
95 pixel_ant = LOAD(0);
96 for (x = 0; x < w; x++) {
97 line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial, depth);
98 frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
99 STORE(x, tmp);
100 }
101
102 for (y = 1; y < h; y++) {
103 src += sstride;
104 dst += dstride;
105 frame_ant += w;
106 if (s->denoise_row[depth]) {
107 s->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
108 continue;
109 }
110 pixel_ant = LOAD(0);
111 for (x = 0; x < w-1; x++) {
112 line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
113 pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial, depth);
114 frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
115 STORE(x, tmp);
116 }
117 line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
118 frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
119 STORE(x, tmp);
120 }
121 }
122
123 av_always_inline
denoise_depth(HQDN3DContext * s,uint8_t * src,uint8_t * dst,uint16_t * line_ant,uint16_t ** frame_ant_ptr,int w,int h,int sstride,int dstride,int16_t * spatial,int16_t * temporal,int depth)124 static int denoise_depth(HQDN3DContext *s,
125 uint8_t *src, uint8_t *dst,
126 uint16_t *line_ant, uint16_t **frame_ant_ptr,
127 int w, int h, int sstride, int dstride,
128 int16_t *spatial, int16_t *temporal, int depth)
129 {
130 // FIXME: For 16-bit depth, frame_ant could be a pointer to the previous
131 // filtered frame rather than a separate buffer.
132 long x, y;
133 uint16_t *frame_ant = *frame_ant_ptr;
134 if (!frame_ant) {
135 uint8_t *frame_src = src;
136 *frame_ant_ptr = frame_ant = av_malloc_array(w, h*sizeof(uint16_t));
137 if (!frame_ant)
138 return AVERROR(ENOMEM);
139 for (y = 0; y < h; y++, src += sstride, frame_ant += w)
140 for (x = 0; x < w; x++)
141 frame_ant[x] = LOAD(x);
142 src = frame_src;
143 frame_ant = *frame_ant_ptr;
144 }
145
146 if (spatial[0])
147 denoise_spatial(s, src, dst, line_ant, frame_ant,
148 w, h, sstride, dstride, spatial, temporal, depth);
149 else
150 denoise_temporal(src, dst, frame_ant,
151 w, h, sstride, dstride, temporal, depth);
152 emms_c();
153 return 0;
154 }
155
156 #define denoise(...) \
157 do { \
158 int ret = AVERROR_BUG; \
159 switch (s->depth) { \
160 case 8: ret = denoise_depth(__VA_ARGS__, 8); break; \
161 case 9: ret = denoise_depth(__VA_ARGS__, 9); break; \
162 case 10: ret = denoise_depth(__VA_ARGS__, 10); break; \
163 case 12: ret = denoise_depth(__VA_ARGS__, 12); break; \
164 case 14: ret = denoise_depth(__VA_ARGS__, 14); break; \
165 case 16: ret = denoise_depth(__VA_ARGS__, 16); break; \
166 } \
167 if (ret < 0) { \
168 av_frame_free(&out); \
169 if (!direct) \
170 av_frame_free(&in); \
171 return ret; \
172 } \
173 } while (0)
174
precalc_coefs(double dist25,int depth,int16_t * ct)175 static void precalc_coefs(double dist25, int depth, int16_t *ct)
176 {
177 int i;
178 double gamma, simil, C;
179
180 gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
181
182 for (i = -256<<LUT_BITS; i < 256<<LUT_BITS; i++) {
183 double f = ((i<<(9-LUT_BITS)) + (1<<(8-LUT_BITS)) - 1) / 512.0; // midpoint of the bin
184 simil = FFMAX(0, 1.0 - fabs(f) / 255.0);
185 C = pow(simil, gamma) * 256.0 * f;
186 ct[(256<<LUT_BITS)+i] = lrint(C);
187 }
188
189 ct[0] = !!dist25;
190 }
191
192 #define PARAM1_DEFAULT 4.0
193 #define PARAM2_DEFAULT 3.0
194 #define PARAM3_DEFAULT 6.0
195
init(AVFilterContext * ctx)196 static av_cold int init(AVFilterContext *ctx)
197 {
198 HQDN3DContext *s = ctx->priv;
199
200 if (!s->strength[LUMA_SPATIAL])
201 s->strength[LUMA_SPATIAL] = PARAM1_DEFAULT;
202 if (!s->strength[CHROMA_SPATIAL])
203 s->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
204 if (!s->strength[LUMA_TMP])
205 s->strength[LUMA_TMP] = PARAM3_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
206 if (!s->strength[CHROMA_TMP])
207 s->strength[CHROMA_TMP] = s->strength[LUMA_TMP] * s->strength[CHROMA_SPATIAL] / s->strength[LUMA_SPATIAL];
208
209 av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n",
210 s->strength[LUMA_SPATIAL], s->strength[CHROMA_SPATIAL],
211 s->strength[LUMA_TMP], s->strength[CHROMA_TMP]);
212
213 return 0;
214 }
215
uninit(AVFilterContext * ctx)216 static av_cold void uninit(AVFilterContext *ctx)
217 {
218 HQDN3DContext *s = ctx->priv;
219
220 av_freep(&s->coefs[0]);
221 av_freep(&s->coefs[1]);
222 av_freep(&s->coefs[2]);
223 av_freep(&s->coefs[3]);
224 av_freep(&s->line[0]);
225 av_freep(&s->line[1]);
226 av_freep(&s->line[2]);
227 av_freep(&s->frame_prev[0]);
228 av_freep(&s->frame_prev[1]);
229 av_freep(&s->frame_prev[2]);
230 }
231
query_formats(AVFilterContext * ctx)232 static int query_formats(AVFilterContext *ctx)
233 {
234 static const enum AVPixelFormat pix_fmts[] = {
235 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
236 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P,
237 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
238 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
239 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
240 AV_PIX_FMT_YUV440P10,
241 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
242 AV_PIX_FMT_YUV440P12,
243 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
244 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
245 AV_PIX_FMT_NONE
246 };
247 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
248 if (!fmts_list)
249 return AVERROR(ENOMEM);
250 return ff_set_common_formats(ctx, fmts_list);
251 }
252
calc_coefs(AVFilterContext * ctx)253 static void calc_coefs(AVFilterContext *ctx)
254 {
255 HQDN3DContext *s = ctx->priv;
256
257 for (int i = 0; i < 4; i++)
258 precalc_coefs(s->strength[i], s->depth, s->coefs[i]);
259 }
260
config_input(AVFilterLink * inlink)261 static int config_input(AVFilterLink *inlink)
262 {
263 AVFilterContext *ctx = inlink->dst;
264 HQDN3DContext *s = inlink->dst->priv;
265 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
266 int i, depth;
267
268 uninit(inlink->dst);
269
270 s->hsub = desc->log2_chroma_w;
271 s->vsub = desc->log2_chroma_h;
272 s->depth = depth = desc->comp[0].depth;
273
274 for (i = 0; i < 3; i++) {
275 s->line[i] = av_malloc_array(inlink->w, sizeof(*s->line[i]));
276 if (!s->line[i])
277 return AVERROR(ENOMEM);
278 }
279
280 for (i = 0; i < 4; i++) {
281 s->coefs[i] = av_malloc((512<<LUT_BITS) * sizeof(int16_t));
282 if (!s->coefs[i])
283 return AVERROR(ENOMEM);
284 }
285
286 calc_coefs(ctx);
287
288 if (ARCH_X86)
289 ff_hqdn3d_init_x86(s);
290
291 return 0;
292 }
293
294 typedef struct ThreadData {
295 AVFrame *in, *out;
296 int direct;
297 } ThreadData;
298
do_denoise(AVFilterContext * ctx,void * data,int job_nr,int n_jobs)299 static int do_denoise(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
300 {
301 HQDN3DContext *s = ctx->priv;
302 const ThreadData *td = data;
303 AVFrame *out = td->out;
304 AVFrame *in = td->in;
305 int direct = td->direct;
306
307 denoise(s, in->data[job_nr], out->data[job_nr],
308 s->line[job_nr], &s->frame_prev[job_nr],
309 AV_CEIL_RSHIFT(in->width, (!!job_nr * s->hsub)),
310 AV_CEIL_RSHIFT(in->height, (!!job_nr * s->vsub)),
311 in->linesize[job_nr], out->linesize[job_nr],
312 s->coefs[job_nr ? CHROMA_SPATIAL : LUMA_SPATIAL],
313 s->coefs[job_nr ? CHROMA_TMP : LUMA_TMP]);
314
315 return 0;
316 }
317
filter_frame(AVFilterLink * inlink,AVFrame * in)318 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
319 {
320 AVFilterContext *ctx = inlink->dst;
321 AVFilterLink *outlink = ctx->outputs[0];
322
323 AVFrame *out;
324 int direct = av_frame_is_writable(in) && !ctx->is_disabled;
325 ThreadData td;
326
327 if (direct) {
328 out = in;
329 } else {
330 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
331 if (!out) {
332 av_frame_free(&in);
333 return AVERROR(ENOMEM);
334 }
335
336 av_frame_copy_props(out, in);
337 }
338
339 td.in = in;
340 td.out = out;
341 td.direct = direct;
342 /* one thread per plane */
343 ctx->internal->execute(ctx, do_denoise, &td, NULL, 3);
344
345 if (ctx->is_disabled) {
346 av_frame_free(&out);
347 return ff_filter_frame(outlink, in);
348 }
349
350 if (!direct)
351 av_frame_free(&in);
352
353 return ff_filter_frame(outlink, out);
354 }
355
process_command(AVFilterContext * ctx,const char * cmd,const char * args,char * res,int res_len,int flags)356 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
357 char *res, int res_len, int flags)
358 {
359 int ret;
360
361 ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
362 if (ret < 0)
363 return ret;
364
365 calc_coefs(ctx);
366
367 return 0;
368 }
369
370 #define OFFSET(x) offsetof(HQDN3DContext, x)
371 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
372 static const AVOption hqdn3d_options[] = {
373 { "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
374 { "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
375 { "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
376 { "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
377 { NULL }
378 };
379
380 AVFILTER_DEFINE_CLASS(hqdn3d);
381
382 static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
383 {
384 .name = "default",
385 .type = AVMEDIA_TYPE_VIDEO,
386 .config_props = config_input,
387 .filter_frame = filter_frame,
388 },
389 { NULL }
390 };
391
392
393 static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
394 {
395 .name = "default",
396 .type = AVMEDIA_TYPE_VIDEO
397 },
398 { NULL }
399 };
400
401 AVFilter ff_vf_hqdn3d = {
402 .name = "hqdn3d",
403 .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
404 .priv_size = sizeof(HQDN3DContext),
405 .priv_class = &hqdn3d_class,
406 .init = init,
407 .uninit = uninit,
408 .query_formats = query_formats,
409 .inputs = avfilter_vf_hqdn3d_inputs,
410 .outputs = avfilter_vf_hqdn3d_outputs,
411 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
412 .process_command = process_command,
413 };
414