1 /*
2 * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3 * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Calculate the VMAF between two input videos.
25 */
26
27 #include <libvmaf.h>
28
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "drawutils.h"
34 #include "formats.h"
35 #include "framesync.h"
36 #include "internal.h"
37 #include "video.h"
38
39 typedef struct LIBVMAFContext {
40 const AVClass *class;
41 FFFrameSync fs;
42 char *model_path;
43 char *log_path;
44 char *log_fmt;
45 int enable_transform;
46 int phone_model;
47 int psnr;
48 int ssim;
49 int ms_ssim;
50 char *pool;
51 int n_threads;
52 int n_subsample;
53 int enable_conf_interval;
54 char *model_cfg;
55 char *feature_cfg;
56 VmafContext *vmaf;
57 VmafModel **model;
58 unsigned model_cnt;
59 unsigned frame_cnt;
60 unsigned bpc;
61 } LIBVMAFContext;
62
63 #define OFFSET(x) offsetof(LIBVMAFContext, x)
64 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65
66 static const AVOption libvmaf_options[] = {
67 {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
68 {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
69 {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
70 {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
71 {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
72 {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
73 {"ssim", "use feature='name=ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
74 {"ms_ssim", "use feature='name=ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
75 {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
76 {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
77 {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
78 {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
79 {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
80 {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
81 { NULL }
82 };
83
84 FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
85
pix_fmt_map(enum AVPixelFormat av_pix_fmt)86 static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
87 {
88 switch (av_pix_fmt) {
89 case AV_PIX_FMT_YUV420P:
90 case AV_PIX_FMT_YUV420P10LE:
91 case AV_PIX_FMT_YUV420P12LE:
92 case AV_PIX_FMT_YUV420P16LE:
93 return VMAF_PIX_FMT_YUV420P;
94 case AV_PIX_FMT_YUV422P:
95 case AV_PIX_FMT_YUV422P10LE:
96 case AV_PIX_FMT_YUV422P12LE:
97 case AV_PIX_FMT_YUV422P16LE:
98 return VMAF_PIX_FMT_YUV422P;
99 case AV_PIX_FMT_YUV444P:
100 case AV_PIX_FMT_YUV444P10LE:
101 case AV_PIX_FMT_YUV444P12LE:
102 case AV_PIX_FMT_YUV444P16LE:
103 return VMAF_PIX_FMT_YUV444P;
104 default:
105 return VMAF_PIX_FMT_UNKNOWN;
106 }
107 }
108
copy_picture_data(AVFrame * src,VmafPicture * dst,unsigned bpc)109 static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
110 {
111 int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
112 src->width, src->height);
113 if (err)
114 return AVERROR(ENOMEM);
115
116 for (unsigned i = 0; i < 3; i++) {
117 uint8_t *src_data = src->data[i];
118 uint8_t *dst_data = dst->data[i];
119 for (unsigned j = 0; j < dst->h[i]; j++) {
120 memcpy(dst_data, src_data, sizeof(*dst_data) * dst->w[i]);
121 src_data += src->linesize[i];
122 dst_data += dst->stride[i];
123 }
124 }
125
126 return 0;
127 }
128
do_vmaf(FFFrameSync * fs)129 static int do_vmaf(FFFrameSync *fs)
130 {
131 AVFilterContext *ctx = fs->parent;
132 LIBVMAFContext *s = ctx->priv;
133 VmafPicture pic_ref, pic_dist;
134 AVFrame *ref, *dist;
135 int err = 0;
136
137 int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
138 if (ret < 0)
139 return ret;
140 if (ctx->is_disabled || !ref)
141 return ff_filter_frame(ctx->outputs[0], dist);
142
143 err = copy_picture_data(ref, &pic_ref, s->bpc);
144 if (err) {
145 av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
146 return AVERROR(ENOMEM);
147 }
148
149 err = copy_picture_data(dist, &pic_dist, s->bpc);
150 if (err) {
151 av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
152 vmaf_picture_unref(&pic_ref);
153 return AVERROR(ENOMEM);
154 }
155
156 err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
157 if (err) {
158 av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
159 return AVERROR(EINVAL);
160 }
161
162 return ff_filter_frame(ctx->outputs[0], dist);
163 }
164
165
delimited_dict_parse(char * str,unsigned * cnt)166 static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
167 {
168 AVDictionary **dict = NULL;
169 char *str_copy = NULL;
170 char *saveptr = NULL;
171 unsigned cnt2;
172 int err = 0;
173
174 if (!str)
175 return NULL;
176
177 cnt2 = 1;
178 for (char *p = str; *p; p++) {
179 if (*p == '|')
180 cnt2++;
181 }
182
183 dict = av_calloc(cnt2, sizeof(*dict));
184 if (!dict)
185 goto fail;
186
187 str_copy = av_strdup(str);
188 if (!str_copy)
189 goto fail;
190
191 *cnt = 0;
192 for (unsigned i = 0; i < cnt2; i++) {
193 char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
194 if (!s)
195 continue;
196 err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
197 if (err)
198 goto fail;
199 }
200
201 av_free(str_copy);
202 return dict;
203
204 fail:
205 if (dict) {
206 for (unsigned i = 0; i < *cnt; i++) {
207 if (dict[i])
208 av_dict_free(&dict[i]);
209 }
210 av_free(dict);
211 }
212
213 av_free(str_copy);
214 *cnt = 0;
215 return NULL;
216 }
217
parse_features(AVFilterContext * ctx)218 static int parse_features(AVFilterContext *ctx)
219 {
220 LIBVMAFContext *s = ctx->priv;
221 AVDictionary **dict = NULL;
222 unsigned dict_cnt;
223 int err = 0;
224
225 if (!s->feature_cfg)
226 return 0;
227
228 dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
229 if (!dict) {
230 av_log(ctx, AV_LOG_ERROR,
231 "could not parse feature config: %s\n", s->feature_cfg);
232 return AVERROR(EINVAL);
233 }
234
235 for (unsigned i = 0; i < dict_cnt; i++) {
236 char *feature_name = NULL;
237 VmafFeatureDictionary *feature_opts_dict = NULL;
238 AVDictionaryEntry *e = NULL;
239
240 while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
241 if (av_stristr(e->key, "name")) {
242 feature_name = e->value;
243 continue;
244 }
245
246 err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
247 e->value);
248 if (err) {
249 av_log(ctx, AV_LOG_ERROR,
250 "could not set feature option: %s.%s=%s\n",
251 feature_name, e->key, e->value);
252 goto exit;
253 }
254 }
255
256 err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
257 if (err) {
258 av_log(ctx, AV_LOG_ERROR,
259 "problem during vmaf_use_feature: %s\n", feature_name);
260 goto exit;
261 }
262 }
263
264 exit:
265 for (unsigned i = 0; i < dict_cnt; i++) {
266 if (dict[i])
267 av_dict_free(&dict[i]);
268 }
269 av_free(dict);
270 return err;
271 }
272
parse_models(AVFilterContext * ctx)273 static int parse_models(AVFilterContext *ctx)
274 {
275 LIBVMAFContext *s = ctx->priv;
276 AVDictionary **dict;
277 unsigned dict_cnt;
278 int err = 0;
279
280 if (!s->model_cfg) return 0;
281
282 dict_cnt = 0;
283 dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
284 if (!dict) {
285 av_log(ctx, AV_LOG_ERROR,
286 "could not parse model config: %s\n", s->model_cfg);
287 return AVERROR(EINVAL);
288 }
289
290 s->model_cnt = dict_cnt;
291 s->model = av_calloc(s->model_cnt, sizeof(*s->model));
292 if (!s->model)
293 return AVERROR(ENOMEM);
294
295 for (unsigned i = 0; i < dict_cnt; i++) {
296 VmafModelConfig model_cfg = { 0 };
297 AVDictionaryEntry *e = NULL;
298 char *version = NULL;
299 char *path = NULL;
300
301 while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
302 if (av_stristr(e->key, "disable_clip")) {
303 model_cfg.flags |= av_stristr(e->value, "true") ?
304 VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
305 continue;
306 }
307
308 if (av_stristr(e->key, "enable_transform")) {
309 model_cfg.flags |= av_stristr(e->value, "true") ?
310 VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
311 continue;
312 }
313
314 if (av_stristr(e->key, "name")) {
315 model_cfg.name = e->value;
316 continue;
317 }
318
319 if (av_stristr(e->key, "version")) {
320 version = e->value;
321 continue;
322 }
323
324 if (av_stristr(e->key, "path")) {
325 path = e->value;
326 continue;
327 }
328 }
329
330 if (version) {
331 err = vmaf_model_load(&s->model[i], &model_cfg, version);
332 if (err) {
333 av_log(ctx, AV_LOG_ERROR,
334 "could not load libvmaf model with version: %s\n",
335 version);
336 goto exit;
337 }
338 }
339
340 if (path && !s->model[i]) {
341 err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
342 if (err) {
343 av_log(ctx, AV_LOG_ERROR,
344 "could not load libvmaf model with path: %s\n",
345 path);
346 goto exit;
347 }
348 }
349
350 if (!s->model[i]) {
351 av_log(ctx, AV_LOG_ERROR,
352 "could not load libvmaf model with config: %s\n",
353 s->model_cfg);
354 goto exit;
355 }
356
357 while (e = av_dict_get(dict[i], "", e, AV_DICT_IGNORE_SUFFIX)) {
358 VmafFeatureDictionary *feature_opts_dict = NULL;
359 char *feature_opt = NULL;
360
361 char *feature_name = av_strtok(e->key, ".", &feature_opt);
362 if (!feature_opt)
363 continue;
364
365 err = vmaf_feature_dictionary_set(&feature_opts_dict,
366 feature_opt, e->value);
367 if (err) {
368 av_log(ctx, AV_LOG_ERROR,
369 "could not set feature option: %s.%s=%s\n",
370 feature_name, feature_opt, e->value);
371 err = AVERROR(EINVAL);
372 goto exit;
373 }
374
375 err = vmaf_model_feature_overload(s->model[i], feature_name,
376 feature_opts_dict);
377 if (err) {
378 av_log(ctx, AV_LOG_ERROR,
379 "could not overload feature: %s\n", feature_name);
380 err = AVERROR(EINVAL);
381 goto exit;
382 }
383 }
384 }
385
386 for (unsigned i = 0; i < s->model_cnt; i++) {
387 err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
388 if (err) {
389 av_log(ctx, AV_LOG_ERROR,
390 "problem during vmaf_use_features_from_model\n");
391 err = AVERROR(EINVAL);
392 goto exit;
393 }
394 }
395
396 exit:
397 for (unsigned i = 0; i < dict_cnt; i++) {
398 if (dict[i])
399 av_dict_free(&dict[i]);
400 }
401 av_free(dict);
402 return err;
403 }
404
log_level_map(int log_level)405 static enum VmafLogLevel log_level_map(int log_level)
406 {
407 switch (log_level) {
408 case AV_LOG_QUIET:
409 return VMAF_LOG_LEVEL_NONE;
410 case AV_LOG_ERROR:
411 return VMAF_LOG_LEVEL_ERROR;
412 case AV_LOG_WARNING:
413 return VMAF_LOG_LEVEL_WARNING;
414 case AV_LOG_INFO:
415 return VMAF_LOG_LEVEL_INFO;
416 case AV_LOG_DEBUG:
417 return VMAF_LOG_LEVEL_DEBUG;
418 default:
419 return VMAF_LOG_LEVEL_INFO;
420 }
421 }
422
parse_deprecated_options(AVFilterContext * ctx)423 static int parse_deprecated_options(AVFilterContext *ctx)
424 {
425 LIBVMAFContext *s = ctx->priv;
426 VmafModel *model = NULL;
427 VmafModelCollection *model_collection = NULL;
428 enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT;
429 int err = 0;
430
431 VmafModelConfig model_cfg = {
432 .name = "vmaf",
433 .flags = flags,
434 };
435
436 if (s->enable_transform || s->phone_model)
437 flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM;
438
439 if (!s->model_path)
440 goto extra_metrics_only;
441
442 if (s->enable_conf_interval) {
443 err = vmaf_model_collection_load_from_path(&model, &model_collection,
444 &model_cfg, s->model_path);
445 if (err) {
446 av_log(ctx, AV_LOG_ERROR,
447 "problem loading model file: %s\n", s->model_path);
448 goto exit;
449 }
450
451 err = vmaf_use_features_from_model_collection(s->vmaf, model_collection);
452 if (err) {
453 av_log(ctx, AV_LOG_ERROR,
454 "problem loading feature extractors from model file: %s\n",
455 s->model_path);
456 goto exit;
457 }
458 } else {
459 err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path);
460 if (err) {
461 av_log(ctx, AV_LOG_ERROR,
462 "problem loading model file: %s\n", s->model_path);
463 goto exit;
464 }
465 err = vmaf_use_features_from_model(s->vmaf, model);
466 if (err) {
467 av_log(ctx, AV_LOG_ERROR,
468 "problem loading feature extractors from model file: %s\n",
469 s->model_path);
470 goto exit;
471 }
472 }
473
474 extra_metrics_only:
475 if (s->psnr) {
476 VmafFeatureDictionary *d = NULL;
477 vmaf_feature_dictionary_set(&d, "enable_chroma", "false");
478
479 err = vmaf_use_feature(s->vmaf, "psnr", d);
480 if (err) {
481 av_log(ctx, AV_LOG_ERROR,
482 "problem loading feature extractor: psnr\n");
483 goto exit;
484 }
485 }
486
487 if (s->ssim) {
488 err = vmaf_use_feature(s->vmaf, "float_ssim", NULL);
489 if (err) {
490 av_log(ctx, AV_LOG_ERROR,
491 "problem loading feature extractor: ssim\n");
492 goto exit;
493 }
494 }
495
496 if (s->ms_ssim) {
497 err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL);
498 if (err) {
499 av_log(ctx, AV_LOG_ERROR,
500 "problem loading feature extractor: ms_ssim\n");
501 goto exit;
502 }
503 }
504
505 exit:
506 return err;
507 }
508
init(AVFilterContext * ctx)509 static av_cold int init(AVFilterContext *ctx)
510 {
511 LIBVMAFContext *s = ctx->priv;
512 int err = 0;
513
514 VmafConfiguration cfg = {
515 .log_level = log_level_map(av_log_get_level()),
516 .n_subsample = s->n_subsample,
517 .n_threads = s->n_threads,
518 };
519
520 err = vmaf_init(&s->vmaf, cfg);
521 if (err)
522 return AVERROR(EINVAL);
523
524 err = parse_deprecated_options(ctx);
525 if (err)
526 return err;
527
528 err = parse_models(ctx);
529 if (err)
530 return err;
531
532 err = parse_features(ctx);
533 if (err)
534 return err;
535
536 s->fs.on_event = do_vmaf;
537 return 0;
538 }
539
540 static const enum AVPixelFormat pix_fmts[] = {
541 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
542 AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
543 AV_PIX_FMT_NONE
544 };
545
config_input_ref(AVFilterLink * inlink)546 static int config_input_ref(AVFilterLink *inlink)
547 {
548 AVFilterContext *ctx = inlink->dst;
549 LIBVMAFContext *s = ctx->priv;
550 const AVPixFmtDescriptor *desc;
551 int err = 0;
552
553 if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
554 av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
555 err |= AVERROR(EINVAL);
556 }
557
558 if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
559 av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
560 err |= AVERROR(EINVAL);
561 }
562
563 if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
564 av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
565 err |= AVERROR(EINVAL);
566 }
567
568 if (err)
569 return err;
570
571 desc = av_pix_fmt_desc_get(inlink->format);
572 s->bpc = desc->comp[0].depth;
573
574 return 0;
575 }
576
config_output(AVFilterLink * outlink)577 static int config_output(AVFilterLink *outlink)
578 {
579 AVFilterContext *ctx = outlink->src;
580 LIBVMAFContext *s = ctx->priv;
581 AVFilterLink *mainlink = ctx->inputs[0];
582 int ret;
583
584 ret = ff_framesync_init_dualinput(&s->fs, ctx);
585 if (ret < 0)
586 return ret;
587 outlink->w = mainlink->w;
588 outlink->h = mainlink->h;
589 outlink->time_base = mainlink->time_base;
590 outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
591 outlink->frame_rate = mainlink->frame_rate;
592 if ((ret = ff_framesync_configure(&s->fs)) < 0)
593 return ret;
594
595 return 0;
596 }
597
activate(AVFilterContext * ctx)598 static int activate(AVFilterContext *ctx)
599 {
600 LIBVMAFContext *s = ctx->priv;
601 return ff_framesync_activate(&s->fs);
602 }
603
log_fmt_map(const char * log_fmt)604 static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
605 {
606 if (log_fmt) {
607 if (av_stristr(log_fmt, "xml"))
608 return VMAF_OUTPUT_FORMAT_XML;
609 if (av_stristr(log_fmt, "json"))
610 return VMAF_OUTPUT_FORMAT_JSON;
611 if (av_stristr(log_fmt, "csv"))
612 return VMAF_OUTPUT_FORMAT_CSV;
613 if (av_stristr(log_fmt, "sub"))
614 return VMAF_OUTPUT_FORMAT_SUB;
615 }
616
617 return VMAF_OUTPUT_FORMAT_XML;
618 }
619
pool_method_map(const char * pool_method)620 static enum VmafPoolingMethod pool_method_map(const char *pool_method)
621 {
622 if (pool_method) {
623 if (av_stristr(pool_method, "min"))
624 return VMAF_POOL_METHOD_MIN;
625 if (av_stristr(pool_method, "mean"))
626 return VMAF_POOL_METHOD_MEAN;
627 if (av_stristr(pool_method, "harmonic_mean"))
628 return VMAF_POOL_METHOD_HARMONIC_MEAN;
629 }
630
631 return VMAF_POOL_METHOD_MEAN;
632 }
633
uninit(AVFilterContext * ctx)634 static av_cold void uninit(AVFilterContext *ctx)
635 {
636 LIBVMAFContext *s = ctx->priv;
637 int err = 0;
638
639 ff_framesync_uninit(&s->fs);
640
641 if (!s->frame_cnt)
642 goto clean_up;
643
644 err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
645 if (err) {
646 av_log(ctx, AV_LOG_ERROR,
647 "problem flushing libvmaf context.\n");
648 }
649
650 for (unsigned i = 0; i < s->model_cnt; i++) {
651 double vmaf_score;
652 err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
653 &vmaf_score, 0, s->frame_cnt - 1);
654 if (err) {
655 av_log(ctx, AV_LOG_ERROR,
656 "problem getting pooled vmaf score.\n");
657 }
658
659 av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
660 }
661
662 if (s->vmaf) {
663 if (s->log_path && !err)
664 vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
665 }
666
667 clean_up:
668 if (s->model) {
669 for (unsigned i = 0; i < s->model_cnt; i++) {
670 if (s->model[i])
671 vmaf_model_destroy(s->model[i]);
672 }
673 av_free(s->model);
674 }
675
676 if (s->vmaf)
677 vmaf_close(s->vmaf);
678 }
679
680 static const AVFilterPad libvmaf_inputs[] = {
681 {
682 .name = "main",
683 .type = AVMEDIA_TYPE_VIDEO,
684 },{
685 .name = "reference",
686 .type = AVMEDIA_TYPE_VIDEO,
687 .config_props = config_input_ref,
688 },
689 };
690
691 static const AVFilterPad libvmaf_outputs[] = {
692 {
693 .name = "default",
694 .type = AVMEDIA_TYPE_VIDEO,
695 .config_props = config_output,
696 },
697 };
698
699 const AVFilter ff_vf_libvmaf = {
700 .name = "libvmaf",
701 .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
702 .preinit = libvmaf_framesync_preinit,
703 .init = init,
704 .uninit = uninit,
705 .activate = activate,
706 .priv_size = sizeof(LIBVMAFContext),
707 .priv_class = &libvmaf_class,
708 FILTER_INPUTS(libvmaf_inputs),
709 FILTER_OUTPUTS(libvmaf_outputs),
710 FILTER_PIXFMTS_ARRAY(pix_fmts),
711 };
712