1 /*
2 * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3 * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Calculate the VMAF between two input videos.
25 */
26
27 #include <pthread.h>
28 #include <libvmaf.h>
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "drawutils.h"
34 #include "formats.h"
35 #include "framesync.h"
36 #include "internal.h"
37 #include "video.h"
38
39 typedef struct LIBVMAFContext {
40 const AVClass *class;
41 FFFrameSync fs;
42 const AVPixFmtDescriptor *desc;
43 int width;
44 int height;
45 double vmaf_score;
46 int vmaf_thread_created;
47 pthread_t vmaf_thread;
48 pthread_mutex_t lock;
49 pthread_cond_t cond;
50 int eof;
51 AVFrame *gmain;
52 AVFrame *gref;
53 int frame_set;
54 char *model_path;
55 char *log_path;
56 char *log_fmt;
57 int disable_clip;
58 int disable_avx;
59 int enable_transform;
60 int phone_model;
61 int psnr;
62 int ssim;
63 int ms_ssim;
64 char *pool;
65 int n_threads;
66 int n_subsample;
67 int enable_conf_interval;
68 int error;
69 } LIBVMAFContext;
70
71 #define OFFSET(x) offsetof(LIBVMAFContext, x)
72 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
73
74 static const AVOption libvmaf_options[] = {
75 {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
76 {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
77 {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
78 {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
79 {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
80 {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
81 {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
82 {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
83 {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
84 {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
85 {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
86 {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
87 { NULL }
88 };
89
90 FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
91
92 #define read_frame_fn(type, bits) \
93 static int read_frame_##bits##bit(float *ref_data, float *main_data, \
94 float *temp_data, int stride, void *ctx) \
95 { \
96 LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
97 int ret; \
98 \
99 pthread_mutex_lock(&s->lock); \
100 \
101 while (!s->frame_set && !s->eof) { \
102 pthread_cond_wait(&s->cond, &s->lock); \
103 } \
104 \
105 if (s->frame_set) { \
106 int ref_stride = s->gref->linesize[0]; \
107 int main_stride = s->gmain->linesize[0]; \
108 \
109 const type *ref_ptr = (const type *) s->gref->data[0]; \
110 const type *main_ptr = (const type *) s->gmain->data[0]; \
111 \
112 float *ptr = ref_data; \
113 float factor = 1.f / (1 << (bits - 8)); \
114 \
115 int h = s->height; \
116 int w = s->width; \
117 \
118 int i,j; \
119 \
120 for (i = 0; i < h; i++) { \
121 for ( j = 0; j < w; j++) { \
122 ptr[j] = ref_ptr[j] * factor; \
123 } \
124 ref_ptr += ref_stride / sizeof(*ref_ptr); \
125 ptr += stride / sizeof(*ptr); \
126 } \
127 \
128 ptr = main_data; \
129 \
130 for (i = 0; i < h; i++) { \
131 for (j = 0; j < w; j++) { \
132 ptr[j] = main_ptr[j] * factor; \
133 } \
134 main_ptr += main_stride / sizeof(*main_ptr); \
135 ptr += stride / sizeof(*ptr); \
136 } \
137 } \
138 \
139 ret = !s->frame_set; \
140 \
141 av_frame_unref(s->gref); \
142 av_frame_unref(s->gmain); \
143 s->frame_set = 0; \
144 \
145 pthread_cond_signal(&s->cond); \
146 pthread_mutex_unlock(&s->lock); \
147 \
148 if (ret) { \
149 return 2; \
150 } \
151 \
152 return 0; \
153 }
154
155 read_frame_fn(uint8_t, 8);
156 read_frame_fn(uint16_t, 10);
157
compute_vmaf_score(LIBVMAFContext * s)158 static void compute_vmaf_score(LIBVMAFContext *s)
159 {
160 int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
161 int stride, void *ctx);
162 char *format;
163
164 if (s->desc->comp[0].depth <= 8) {
165 read_frame = read_frame_8bit;
166 } else {
167 read_frame = read_frame_10bit;
168 }
169
170 format = (char *) s->desc->name;
171
172 s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
173 read_frame, s, s->model_path, s->log_path,
174 s->log_fmt, 0, 0, s->enable_transform,
175 s->phone_model, s->psnr, s->ssim,
176 s->ms_ssim, s->pool,
177 s->n_threads, s->n_subsample, s->enable_conf_interval);
178 }
179
call_vmaf(void * ctx)180 static void *call_vmaf(void *ctx)
181 {
182 LIBVMAFContext *s = (LIBVMAFContext *) ctx;
183 compute_vmaf_score(s);
184 if (!s->error) {
185 av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
186 } else {
187 pthread_mutex_lock(&s->lock);
188 pthread_cond_signal(&s->cond);
189 pthread_mutex_unlock(&s->lock);
190 }
191 pthread_exit(NULL);
192 return NULL;
193 }
194
do_vmaf(FFFrameSync * fs)195 static int do_vmaf(FFFrameSync *fs)
196 {
197 AVFilterContext *ctx = fs->parent;
198 LIBVMAFContext *s = ctx->priv;
199 AVFrame *master, *ref;
200 int ret;
201
202 ret = ff_framesync_dualinput_get(fs, &master, &ref);
203 if (ret < 0)
204 return ret;
205 if (!ref)
206 return ff_filter_frame(ctx->outputs[0], master);
207
208 pthread_mutex_lock(&s->lock);
209
210 while (s->frame_set && !s->error) {
211 pthread_cond_wait(&s->cond, &s->lock);
212 }
213
214 if (s->error) {
215 av_log(ctx, AV_LOG_ERROR,
216 "libvmaf encountered an error, check log for details\n");
217 pthread_mutex_unlock(&s->lock);
218 return AVERROR(EINVAL);
219 }
220
221 av_frame_ref(s->gref, ref);
222 av_frame_ref(s->gmain, master);
223
224 s->frame_set = 1;
225
226 pthread_cond_signal(&s->cond);
227 pthread_mutex_unlock(&s->lock);
228
229 return ff_filter_frame(ctx->outputs[0], master);
230 }
231
init(AVFilterContext * ctx)232 static av_cold int init(AVFilterContext *ctx)
233 {
234 LIBVMAFContext *s = ctx->priv;
235
236 s->gref = av_frame_alloc();
237 s->gmain = av_frame_alloc();
238 if (!s->gref || !s->gmain)
239 return AVERROR(ENOMEM);
240
241 s->error = 0;
242
243 s->vmaf_thread_created = 0;
244 pthread_mutex_init(&s->lock, NULL);
245 pthread_cond_init (&s->cond, NULL);
246
247 s->fs.on_event = do_vmaf;
248 return 0;
249 }
250
query_formats(AVFilterContext * ctx)251 static int query_formats(AVFilterContext *ctx)
252 {
253 static const enum AVPixelFormat pix_fmts[] = {
254 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
255 AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
256 AV_PIX_FMT_NONE
257 };
258
259 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
260 if (!fmts_list)
261 return AVERROR(ENOMEM);
262 return ff_set_common_formats(ctx, fmts_list);
263 }
264
265
config_input_ref(AVFilterLink * inlink)266 static int config_input_ref(AVFilterLink *inlink)
267 {
268 AVFilterContext *ctx = inlink->dst;
269 LIBVMAFContext *s = ctx->priv;
270 int th;
271
272 if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
273 ctx->inputs[0]->h != ctx->inputs[1]->h) {
274 av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
275 return AVERROR(EINVAL);
276 }
277 if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
278 av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
279 return AVERROR(EINVAL);
280 }
281
282 s->desc = av_pix_fmt_desc_get(inlink->format);
283 s->width = ctx->inputs[0]->w;
284 s->height = ctx->inputs[0]->h;
285
286 th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
287 if (th) {
288 av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
289 return AVERROR(EINVAL);
290 }
291 s->vmaf_thread_created = 1;
292
293 return 0;
294 }
295
config_output(AVFilterLink * outlink)296 static int config_output(AVFilterLink *outlink)
297 {
298 AVFilterContext *ctx = outlink->src;
299 LIBVMAFContext *s = ctx->priv;
300 AVFilterLink *mainlink = ctx->inputs[0];
301 int ret;
302
303 ret = ff_framesync_init_dualinput(&s->fs, ctx);
304 if (ret < 0)
305 return ret;
306 outlink->w = mainlink->w;
307 outlink->h = mainlink->h;
308 outlink->time_base = mainlink->time_base;
309 outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
310 outlink->frame_rate = mainlink->frame_rate;
311 if ((ret = ff_framesync_configure(&s->fs)) < 0)
312 return ret;
313
314 return 0;
315 }
316
activate(AVFilterContext * ctx)317 static int activate(AVFilterContext *ctx)
318 {
319 LIBVMAFContext *s = ctx->priv;
320 return ff_framesync_activate(&s->fs);
321 }
322
uninit(AVFilterContext * ctx)323 static av_cold void uninit(AVFilterContext *ctx)
324 {
325 LIBVMAFContext *s = ctx->priv;
326
327 ff_framesync_uninit(&s->fs);
328
329 pthread_mutex_lock(&s->lock);
330 s->eof = 1;
331 pthread_cond_signal(&s->cond);
332 pthread_mutex_unlock(&s->lock);
333
334 if (s->vmaf_thread_created)
335 {
336 pthread_join(s->vmaf_thread, NULL);
337 s->vmaf_thread_created = 0;
338 }
339
340 av_frame_free(&s->gref);
341 av_frame_free(&s->gmain);
342
343 pthread_mutex_destroy(&s->lock);
344 pthread_cond_destroy(&s->cond);
345 }
346
347 static const AVFilterPad libvmaf_inputs[] = {
348 {
349 .name = "main",
350 .type = AVMEDIA_TYPE_VIDEO,
351 },{
352 .name = "reference",
353 .type = AVMEDIA_TYPE_VIDEO,
354 .config_props = config_input_ref,
355 },
356 { NULL }
357 };
358
359 static const AVFilterPad libvmaf_outputs[] = {
360 {
361 .name = "default",
362 .type = AVMEDIA_TYPE_VIDEO,
363 .config_props = config_output,
364 },
365 { NULL }
366 };
367
368 AVFilter ff_vf_libvmaf = {
369 .name = "libvmaf",
370 .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
371 .preinit = libvmaf_framesync_preinit,
372 .init = init,
373 .uninit = uninit,
374 .query_formats = query_formats,
375 .activate = activate,
376 .priv_size = sizeof(LIBVMAFContext),
377 .priv_class = &libvmaf_class,
378 .inputs = libvmaf_inputs,
379 .outputs = libvmaf_outputs,
380 };
381