1 /*
2 * filter layer
3 * Copyright (c) 2007 Bobby Bingham
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
36
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
39
40 #include "audio.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "formats.h"
44 #include "internal.h"
45
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
48
ff_tlog_ref(void * ctx,AVFrame * ref,int end)49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
50 {
51 av_unused char buf[16];
52 ff_tlog(ctx,
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
57
58 if (ref->width) {
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
64 ref->key_frame,
65 av_get_picture_type_char(ref->pict_type));
66 }
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
69 ref->channel_layout,
70 ref->nb_samples,
71 ref->sample_rate);
72 }
73
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
75 }
76
avfilter_version(void)77 unsigned avfilter_version(void)
78 {
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
81 }
82
avfilter_configuration(void)83 const char *avfilter_configuration(void)
84 {
85 return FFMPEG_CONFIGURATION;
86 }
87
avfilter_license(void)88 const char *avfilter_license(void)
89 {
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
92 }
93
ff_command_queue_pop(AVFilterContext * filter)94 void ff_command_queue_pop(AVFilterContext *filter)
95 {
96 AVFilterCommand *c= filter->command_queue;
97 av_freep(&c->arg);
98 av_freep(&c->command);
99 filter->command_queue= c->next;
100 av_free(c);
101 }
102
ff_insert_pad(unsigned idx,unsigned * count,size_t padidx_off,AVFilterPad ** pads,AVFilterLink *** links,AVFilterPad * newpad)103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
105 AVFilterPad *newpad)
106 {
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
109 unsigned i;
110
111 idx = FFMIN(idx, *count);
112
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
115 if (newpads)
116 *pads = newpads;
117 if (newlinks)
118 *links = newlinks;
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
121
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
126
127 (*count)++;
128 for (i = idx + 1; i < *count; i++)
129 if ((*links)[i])
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
131
132 return 0;
133 }
134
avfilter_link(AVFilterContext * src,unsigned srcpad,AVFilterContext * dst,unsigned dstpad)135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
137 {
138 AVFilterLink *link;
139
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
143
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
147
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
154 }
155
156 link = av_mallocz(sizeof(*link));
157 if (!link)
158 return AVERROR(ENOMEM);
159
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
161
162 link->src = src;
163 link->dst = dst;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
168 link->format = -1;
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
170
171 return 0;
172 }
173
avfilter_link_free(AVFilterLink ** link)174 void avfilter_link_free(AVFilterLink **link)
175 {
176 if (!*link)
177 return;
178
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
182
183 av_freep(link);
184 }
185
186 #if FF_API_FILTER_GET_SET
avfilter_link_get_channels(AVFilterLink * link)187 int avfilter_link_get_channels(AVFilterLink *link)
188 {
189 return link->channels;
190 }
191 #endif
192
ff_filter_set_ready(AVFilterContext * filter,unsigned priority)193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
194 {
195 filter->ready = FFMAX(filter->ready, priority);
196 }
197
198 /**
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
201 */
filter_unblock(AVFilterContext * filter)202 static void filter_unblock(AVFilterContext *filter)
203 {
204 unsigned i;
205
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
208 }
209
210
ff_avfilter_link_set_in_status(AVFilterLink * link,int status,int64_t pts)211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
212 {
213 if (link->status_in == status)
214 return;
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
222 }
223
ff_avfilter_link_set_out_status(AVFilterLink * link,int status,int64_t pts)224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
225 {
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
233 }
234
avfilter_link_set_closed(AVFilterLink * link,int closed)235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
236 {
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 }
239
avfilter_insert_filter(AVFilterLink * link,AVFilterContext * filt,unsigned filt_srcpad_idx,unsigned filt_dstpad_idx)240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 {
243 int ret;
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
245
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
249
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
254 return ret;
255 }
256
257 /* re-hookup the link to the new destination filter we inserted */
258 link->dst = filt;
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
261
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->out_formats)
265 ff_formats_changeref(&link->out_formats,
266 &filt->outputs[filt_dstpad_idx]->out_formats);
267 if (link->out_samplerates)
268 ff_formats_changeref(&link->out_samplerates,
269 &filt->outputs[filt_dstpad_idx]->out_samplerates);
270 if (link->out_channel_layouts)
271 ff_channel_layouts_changeref(&link->out_channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
273
274 return 0;
275 }
276
avfilter_config_links(AVFilterContext * filter)277 int avfilter_config_links(AVFilterContext *filter)
278 {
279 int (*config_link)(AVFilterLink *);
280 unsigned i;
281 int ret;
282
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
286
287 if (!link) continue;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
292 }
293
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
295 link->current_pts =
296 link->current_pts_us = AV_NOPTS_VALUE;
297
298 switch (link->init_state) {
299 case AVLINK_INIT:
300 continue;
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 return 0;
304 case AVLINK_UNINIT:
305 link->init_state = AVLINK_STARTINIT;
306
307 if ((ret = avfilter_config_links(link->src)) < 0)
308 return ret;
309
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
317 }
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
321 link->src->name);
322 return ret;
323 }
324
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
329
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
333
334 if (inlink) {
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
337 if (!link->w)
338 link->w = inlink->w;
339 if (!link->h)
340 link->h = inlink->h;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
346 }
347 break;
348
349 case AVMEDIA_TYPE_AUDIO:
350 if (inlink) {
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
353 }
354
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
357 }
358
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
361 av_assert0(!link->hw_frames_ctx &&
362 "should not be set by non-hwframe-aware filter");
363 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364 if (!link->hw_frames_ctx)
365 return AVERROR(ENOMEM);
366 }
367
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
372 link->dst->name);
373 return ret;
374 }
375
376 link->init_state = AVLINK_INIT;
377 }
378 }
379
380 return 0;
381 }
382
ff_tlog_link(void * ctx,AVFilterLink * link,int end)383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
384 {
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
386 ff_tlog(ctx,
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
392 end ? "\n" : "");
393 } else {
394 char buf[128];
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
396
397 ff_tlog(ctx,
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
403 end ? "\n" : "");
404 }
405 }
406
ff_request_frame(AVFilterLink * link)407 int ff_request_frame(AVFilterLink *link)
408 {
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
410
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
418 return 0;
419 } else {
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
425 }
426 }
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
429 return 0;
430 }
431
guess_status_pts(AVFilterContext * ctx,int status,AVRational link_time_base)432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
433 {
434 unsigned i;
435 int64_t r = INT64_MAX;
436
437 for (i = 0; i < ctx->nb_inputs; i++)
438 if (ctx->inputs[i]->status_out == status)
439 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
440 if (r < INT64_MAX)
441 return r;
442 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443 for (i = 0; i < ctx->nb_inputs; i++)
444 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
445 if (r < INT64_MAX)
446 return r;
447 return AV_NOPTS_VALUE;
448 }
449
ff_request_frame_to_filter(AVFilterLink * link)450 static int ff_request_frame_to_filter(AVFilterLink *link)
451 {
452 int ret = -1;
453
454 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455 /* Assume the filter is blocked, let the method clear it if not */
456 link->frame_blocked_in = 1;
457 if (link->srcpad->request_frame)
458 ret = link->srcpad->request_frame(link);
459 else if (link->src->inputs[0])
460 ret = ff_request_frame(link->src->inputs[0]);
461 if (ret < 0) {
462 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464 if (ret == AVERROR_EOF)
465 ret = 0;
466 }
467 return ret;
468 }
469
470 static const char *const var_names[] = {
471 "t",
472 "n",
473 "pos",
474 "w",
475 "h",
476 NULL
477 };
478
479 enum {
480 VAR_T,
481 VAR_N,
482 VAR_POS,
483 VAR_W,
484 VAR_H,
485 VAR_VARS_NB
486 };
487
set_enable_expr(AVFilterContext * ctx,const char * expr)488 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
489 {
490 int ret;
491 char *expr_dup;
492 AVExpr *old = ctx->enable;
493
494 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
495 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
496 "with filter '%s'\n", ctx->filter->name);
497 return AVERROR_PATCHWELCOME;
498 }
499
500 expr_dup = av_strdup(expr);
501 if (!expr_dup)
502 return AVERROR(ENOMEM);
503
504 if (!ctx->var_values) {
505 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
506 if (!ctx->var_values) {
507 av_free(expr_dup);
508 return AVERROR(ENOMEM);
509 }
510 }
511
512 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
513 NULL, NULL, NULL, NULL, 0, ctx->priv);
514 if (ret < 0) {
515 av_log(ctx->priv, AV_LOG_ERROR,
516 "Error when evaluating the expression '%s' for enable\n",
517 expr_dup);
518 av_free(expr_dup);
519 return ret;
520 }
521
522 av_expr_free(old);
523 av_free(ctx->enable_str);
524 ctx->enable_str = expr_dup;
525 return 0;
526 }
527
ff_update_link_current_pts(AVFilterLink * link,int64_t pts)528 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
529 {
530 if (pts == AV_NOPTS_VALUE)
531 return;
532 link->current_pts = pts;
533 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
534 /* TODO use duration */
535 if (link->graph && link->age_index >= 0)
536 ff_avfilter_graph_update_heap(link->graph, link);
537 }
538
avfilter_process_command(AVFilterContext * filter,const char * cmd,const char * arg,char * res,int res_len,int flags)539 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
540 {
541 if(!strcmp(cmd, "ping")){
542 char local_res[256] = {0};
543
544 if (!res) {
545 res = local_res;
546 res_len = sizeof(local_res);
547 }
548 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
549 if (res == local_res)
550 av_log(filter, AV_LOG_INFO, "%s", res);
551 return 0;
552 }else if(!strcmp(cmd, "enable")) {
553 return set_enable_expr(filter, arg);
554 }else if(filter->filter->process_command) {
555 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
556 }
557 return AVERROR(ENOSYS);
558 }
559
avfilter_pad_count(const AVFilterPad * pads)560 int avfilter_pad_count(const AVFilterPad *pads)
561 {
562 int count;
563
564 if (!pads)
565 return 0;
566
567 for (count = 0; pads->name; count++)
568 pads++;
569 return count;
570 }
571
default_filter_name(void * filter_ctx)572 static const char *default_filter_name(void *filter_ctx)
573 {
574 AVFilterContext *ctx = filter_ctx;
575 return ctx->name ? ctx->name : ctx->filter->name;
576 }
577
filter_child_next(void * obj,void * prev)578 static void *filter_child_next(void *obj, void *prev)
579 {
580 AVFilterContext *ctx = obj;
581 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
582 return ctx->priv;
583 return NULL;
584 }
585
filter_child_class_next(const AVClass * prev)586 static const AVClass *filter_child_class_next(const AVClass *prev)
587 {
588 void *opaque = NULL;
589 const AVFilter *f = NULL;
590
591 /* find the filter that corresponds to prev */
592 while (prev && (f = av_filter_iterate(&opaque)))
593 if (f->priv_class == prev)
594 break;
595
596 /* could not find filter corresponding to prev */
597 if (prev && !f)
598 return NULL;
599
600 /* find next filter with specific options */
601 while ((f = av_filter_iterate(&opaque)))
602 if (f->priv_class)
603 return f->priv_class;
604
605 return NULL;
606 }
607
608 #define OFFSET(x) offsetof(AVFilterContext, x)
609 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
610 static const AVOption avfilter_options[] = {
611 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
612 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
613 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
614 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
615 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
616 { .i64 = 0 }, 0, INT_MAX, FLAGS },
617 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
618 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
619 { NULL },
620 };
621
622 static const AVClass avfilter_class = {
623 .class_name = "AVFilter",
624 .item_name = default_filter_name,
625 .version = LIBAVUTIL_VERSION_INT,
626 .category = AV_CLASS_CATEGORY_FILTER,
627 .child_next = filter_child_next,
628 .child_class_next = filter_child_class_next,
629 .option = avfilter_options,
630 };
631
default_execute(AVFilterContext * ctx,avfilter_action_func * func,void * arg,int * ret,int nb_jobs)632 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
633 int *ret, int nb_jobs)
634 {
635 int i;
636
637 for (i = 0; i < nb_jobs; i++) {
638 int r = func(ctx, arg, i, nb_jobs);
639 if (ret)
640 ret[i] = r;
641 }
642 return 0;
643 }
644
ff_filter_alloc(const AVFilter * filter,const char * inst_name)645 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
646 {
647 AVFilterContext *ret;
648 int preinited = 0;
649
650 if (!filter)
651 return NULL;
652
653 ret = av_mallocz(sizeof(AVFilterContext));
654 if (!ret)
655 return NULL;
656
657 ret->av_class = &avfilter_class;
658 ret->filter = filter;
659 ret->name = inst_name ? av_strdup(inst_name) : NULL;
660 if (filter->priv_size) {
661 ret->priv = av_mallocz(filter->priv_size);
662 if (!ret->priv)
663 goto err;
664 }
665 if (filter->preinit) {
666 if (filter->preinit(ret) < 0)
667 goto err;
668 preinited = 1;
669 }
670
671 av_opt_set_defaults(ret);
672 if (filter->priv_class) {
673 *(const AVClass**)ret->priv = filter->priv_class;
674 av_opt_set_defaults(ret->priv);
675 }
676
677 ret->internal = av_mallocz(sizeof(*ret->internal));
678 if (!ret->internal)
679 goto err;
680 ret->internal->execute = default_execute;
681
682 ret->nb_inputs = avfilter_pad_count(filter->inputs);
683 if (ret->nb_inputs ) {
684 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
685 if (!ret->input_pads)
686 goto err;
687 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
688 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
689 if (!ret->inputs)
690 goto err;
691 }
692
693 ret->nb_outputs = avfilter_pad_count(filter->outputs);
694 if (ret->nb_outputs) {
695 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
696 if (!ret->output_pads)
697 goto err;
698 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
699 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
700 if (!ret->outputs)
701 goto err;
702 }
703
704 return ret;
705
706 err:
707 if (preinited)
708 filter->uninit(ret);
709 av_freep(&ret->inputs);
710 av_freep(&ret->input_pads);
711 ret->nb_inputs = 0;
712 av_freep(&ret->outputs);
713 av_freep(&ret->output_pads);
714 ret->nb_outputs = 0;
715 av_freep(&ret->priv);
716 av_freep(&ret->internal);
717 av_free(ret);
718 return NULL;
719 }
720
free_link(AVFilterLink * link)721 static void free_link(AVFilterLink *link)
722 {
723 if (!link)
724 return;
725
726 if (link->src)
727 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
728 if (link->dst)
729 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
730
731 av_buffer_unref(&link->hw_frames_ctx);
732
733 ff_formats_unref(&link->in_formats);
734 ff_formats_unref(&link->out_formats);
735 ff_formats_unref(&link->in_samplerates);
736 ff_formats_unref(&link->out_samplerates);
737 ff_channel_layouts_unref(&link->in_channel_layouts);
738 ff_channel_layouts_unref(&link->out_channel_layouts);
739 avfilter_link_free(&link);
740 }
741
avfilter_free(AVFilterContext * filter)742 void avfilter_free(AVFilterContext *filter)
743 {
744 int i;
745
746 if (!filter)
747 return;
748
749 if (filter->graph)
750 ff_filter_graph_remove_filter(filter->graph, filter);
751
752 if (filter->filter->uninit)
753 filter->filter->uninit(filter);
754
755 for (i = 0; i < filter->nb_inputs; i++) {
756 free_link(filter->inputs[i]);
757 }
758 for (i = 0; i < filter->nb_outputs; i++) {
759 free_link(filter->outputs[i]);
760 }
761
762 if (filter->filter->priv_class)
763 av_opt_free(filter->priv);
764
765 av_buffer_unref(&filter->hw_device_ctx);
766
767 av_freep(&filter->name);
768 av_freep(&filter->input_pads);
769 av_freep(&filter->output_pads);
770 av_freep(&filter->inputs);
771 av_freep(&filter->outputs);
772 av_freep(&filter->priv);
773 while(filter->command_queue){
774 ff_command_queue_pop(filter);
775 }
776 av_opt_free(filter);
777 av_expr_free(filter->enable);
778 filter->enable = NULL;
779 av_freep(&filter->var_values);
780 av_freep(&filter->internal);
781 av_free(filter);
782 }
783
ff_filter_get_nb_threads(AVFilterContext * ctx)784 int ff_filter_get_nb_threads(AVFilterContext *ctx)
785 {
786 if (ctx->nb_threads > 0)
787 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
788 return ctx->graph->nb_threads;
789 }
790
process_options(AVFilterContext * ctx,AVDictionary ** options,const char * args)791 static int process_options(AVFilterContext *ctx, AVDictionary **options,
792 const char *args)
793 {
794 const AVOption *o = NULL;
795 int ret, count = 0;
796 char *av_uninit(parsed_key), *av_uninit(value);
797 const char *key;
798 int offset= -1;
799
800 if (!args)
801 return 0;
802
803 while (*args) {
804 const char *shorthand = NULL;
805
806 o = av_opt_next(ctx->priv, o);
807 if (o) {
808 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
809 continue;
810 offset = o->offset;
811 shorthand = o->name;
812 }
813
814 ret = av_opt_get_key_value(&args, "=", ":",
815 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
816 &parsed_key, &value);
817 if (ret < 0) {
818 if (ret == AVERROR(EINVAL))
819 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
820 else
821 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
822 av_err2str(ret));
823 return ret;
824 }
825 if (*args)
826 args++;
827 if (parsed_key) {
828 key = parsed_key;
829 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
830 } else {
831 key = shorthand;
832 }
833
834 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
835
836 if (av_opt_find(ctx, key, NULL, 0, 0)) {
837 ret = av_opt_set(ctx, key, value, 0);
838 if (ret < 0) {
839 av_free(value);
840 av_free(parsed_key);
841 return ret;
842 }
843 } else {
844 av_dict_set(options, key, value, 0);
845 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
846 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
847 if (ret == AVERROR_OPTION_NOT_FOUND)
848 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
849 av_free(value);
850 av_free(parsed_key);
851 return ret;
852 }
853 }
854 }
855
856 av_free(value);
857 av_free(parsed_key);
858 count++;
859 }
860
861 if (ctx->enable_str) {
862 ret = set_enable_expr(ctx, ctx->enable_str);
863 if (ret < 0)
864 return ret;
865 }
866 return count;
867 }
868
ff_filter_process_command(AVFilterContext * ctx,const char * cmd,const char * arg,char * res,int res_len,int flags)869 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
870 const char *arg, char *res, int res_len, int flags)
871 {
872 const AVOption *o;
873
874 if (!ctx->filter->priv_class)
875 return 0;
876 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
877 if (!o)
878 return AVERROR(ENOSYS);
879 return av_opt_set(ctx->priv, cmd, arg, 0);
880 }
881
avfilter_init_dict(AVFilterContext * ctx,AVDictionary ** options)882 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
883 {
884 int ret = 0;
885
886 ret = av_opt_set_dict(ctx, options);
887 if (ret < 0) {
888 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
889 return ret;
890 }
891
892 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
893 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
894 ctx->graph->internal->thread_execute) {
895 ctx->thread_type = AVFILTER_THREAD_SLICE;
896 ctx->internal->execute = ctx->graph->internal->thread_execute;
897 } else {
898 ctx->thread_type = 0;
899 }
900
901 if (ctx->filter->priv_class) {
902 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
903 if (ret < 0) {
904 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
905 return ret;
906 }
907 }
908
909 if (ctx->filter->init_opaque)
910 ret = ctx->filter->init_opaque(ctx, NULL);
911 else if (ctx->filter->init)
912 ret = ctx->filter->init(ctx);
913 else if (ctx->filter->init_dict)
914 ret = ctx->filter->init_dict(ctx, options);
915
916 return ret;
917 }
918
avfilter_init_str(AVFilterContext * filter,const char * args)919 int avfilter_init_str(AVFilterContext *filter, const char *args)
920 {
921 AVDictionary *options = NULL;
922 AVDictionaryEntry *e;
923 int ret = 0;
924
925 if (args && *args) {
926 if (!filter->filter->priv_class) {
927 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
928 "options, but options were provided: %s.\n", args);
929 return AVERROR(EINVAL);
930 }
931
932 #if FF_API_OLD_FILTER_OPTS_ERROR
933 if ( !strcmp(filter->filter->name, "format") ||
934 !strcmp(filter->filter->name, "noformat") ||
935 !strcmp(filter->filter->name, "frei0r") ||
936 !strcmp(filter->filter->name, "frei0r_src") ||
937 !strcmp(filter->filter->name, "ocv") ||
938 !strcmp(filter->filter->name, "pan") ||
939 !strcmp(filter->filter->name, "pp") ||
940 !strcmp(filter->filter->name, "aevalsrc")) {
941 /* a hack for compatibility with the old syntax
942 * replace colons with |s */
943 char *copy = av_strdup(args);
944 char *p = copy;
945 int nb_leading = 0; // number of leading colons to skip
946 int deprecated = 0;
947
948 if (!copy) {
949 ret = AVERROR(ENOMEM);
950 goto fail;
951 }
952
953 if (!strcmp(filter->filter->name, "frei0r") ||
954 !strcmp(filter->filter->name, "ocv"))
955 nb_leading = 1;
956 else if (!strcmp(filter->filter->name, "frei0r_src"))
957 nb_leading = 3;
958
959 while (nb_leading--) {
960 p = strchr(p, ':');
961 if (!p) {
962 p = copy + strlen(copy);
963 break;
964 }
965 p++;
966 }
967
968 deprecated = strchr(p, ':') != NULL;
969
970 if (!strcmp(filter->filter->name, "aevalsrc")) {
971 deprecated = 0;
972 while ((p = strchr(p, ':')) && p[1] != ':') {
973 const char *epos = strchr(p + 1, '=');
974 const char *spos = strchr(p + 1, ':');
975 const int next_token_is_opt = epos && (!spos || epos < spos);
976 if (next_token_is_opt) {
977 p++;
978 break;
979 }
980 /* next token does not contain a '=', assume a channel expression */
981 deprecated = 1;
982 *p++ = '|';
983 }
984 if (p && *p == ':') { // double sep '::' found
985 deprecated = 1;
986 memmove(p, p + 1, strlen(p));
987 }
988 } else
989 while ((p = strchr(p, ':')))
990 *p++ = '|';
991
992 if (deprecated) {
993 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
994 "'|' to separate the list items ('%s' instead of '%s')\n",
995 copy, args);
996 ret = AVERROR(EINVAL);
997 } else {
998 ret = process_options(filter, &options, copy);
999 }
1000 av_freep(©);
1001
1002 if (ret < 0)
1003 goto fail;
1004 } else
1005 #endif
1006 {
1007 ret = process_options(filter, &options, args);
1008 if (ret < 0)
1009 goto fail;
1010 }
1011 }
1012
1013 ret = avfilter_init_dict(filter, &options);
1014 if (ret < 0)
1015 goto fail;
1016
1017 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1018 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1019 ret = AVERROR_OPTION_NOT_FOUND;
1020 goto fail;
1021 }
1022
1023 fail:
1024 av_dict_free(&options);
1025
1026 return ret;
1027 }
1028
avfilter_pad_get_name(const AVFilterPad * pads,int pad_idx)1029 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1030 {
1031 return pads[pad_idx].name;
1032 }
1033
avfilter_pad_get_type(const AVFilterPad * pads,int pad_idx)1034 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1035 {
1036 return pads[pad_idx].type;
1037 }
1038
default_filter_frame(AVFilterLink * link,AVFrame * frame)1039 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1040 {
1041 return ff_filter_frame(link->dst->outputs[0], frame);
1042 }
1043
ff_filter_frame_framed(AVFilterLink * link,AVFrame * frame)1044 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1045 {
1046 int (*filter_frame)(AVFilterLink *, AVFrame *);
1047 AVFilterContext *dstctx = link->dst;
1048 AVFilterPad *dst = link->dstpad;
1049 int ret;
1050
1051 if (!(filter_frame = dst->filter_frame))
1052 filter_frame = default_filter_frame;
1053
1054 if (dst->needs_writable) {
1055 ret = ff_inlink_make_frame_writable(link, &frame);
1056 if (ret < 0)
1057 goto fail;
1058 }
1059
1060 ff_inlink_process_commands(link, frame);
1061 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1062
1063 if (dstctx->is_disabled &&
1064 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1065 filter_frame = default_filter_frame;
1066 ret = filter_frame(link, frame);
1067 link->frame_count_out++;
1068 return ret;
1069
1070 fail:
1071 av_frame_free(&frame);
1072 return ret;
1073 }
1074
ff_filter_frame(AVFilterLink * link,AVFrame * frame)1075 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1076 {
1077 int ret;
1078 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1079
1080 /* Consistency checks */
1081 if (link->type == AVMEDIA_TYPE_VIDEO) {
1082 if (strcmp(link->dst->filter->name, "buffersink") &&
1083 strcmp(link->dst->filter->name, "format") &&
1084 strcmp(link->dst->filter->name, "idet") &&
1085 strcmp(link->dst->filter->name, "null") &&
1086 strcmp(link->dst->filter->name, "scale")) {
1087 av_assert1(frame->format == link->format);
1088 av_assert1(frame->width == link->w);
1089 av_assert1(frame->height == link->h);
1090 }
1091 } else {
1092 if (frame->format != link->format) {
1093 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1094 goto error;
1095 }
1096 if (frame->channels != link->channels) {
1097 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1098 goto error;
1099 }
1100 if (frame->channel_layout != link->channel_layout) {
1101 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1102 goto error;
1103 }
1104 if (frame->sample_rate != link->sample_rate) {
1105 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1106 goto error;
1107 }
1108 }
1109
1110 link->frame_blocked_in = link->frame_wanted_out = 0;
1111 link->frame_count_in++;
1112 filter_unblock(link->dst);
1113 ret = ff_framequeue_add(&link->fifo, frame);
1114 if (ret < 0) {
1115 av_frame_free(&frame);
1116 return ret;
1117 }
1118 ff_filter_set_ready(link->dst, 300);
1119 return 0;
1120
1121 error:
1122 av_frame_free(&frame);
1123 return AVERROR_PATCHWELCOME;
1124 }
1125
samples_ready(AVFilterLink * link,unsigned min)1126 static int samples_ready(AVFilterLink *link, unsigned min)
1127 {
1128 return ff_framequeue_queued_frames(&link->fifo) &&
1129 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1130 link->status_in);
1131 }
1132
take_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1133 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1134 AVFrame **rframe)
1135 {
1136 AVFrame *frame0, *frame, *buf;
1137 unsigned nb_samples, nb_frames, i, p;
1138 int ret;
1139
1140 /* Note: this function relies on no format changes and must only be
1141 called with enough samples. */
1142 av_assert1(samples_ready(link, link->min_samples));
1143 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1144 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1145 *rframe = ff_framequeue_take(&link->fifo);
1146 return 0;
1147 }
1148 nb_frames = 0;
1149 nb_samples = 0;
1150 while (1) {
1151 if (nb_samples + frame->nb_samples > max) {
1152 if (nb_samples < min)
1153 nb_samples = max;
1154 break;
1155 }
1156 nb_samples += frame->nb_samples;
1157 nb_frames++;
1158 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1159 break;
1160 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1161 }
1162
1163 buf = ff_get_audio_buffer(link, nb_samples);
1164 if (!buf)
1165 return AVERROR(ENOMEM);
1166 ret = av_frame_copy_props(buf, frame0);
1167 if (ret < 0) {
1168 av_frame_free(&buf);
1169 return ret;
1170 }
1171 buf->pts = frame0->pts;
1172
1173 p = 0;
1174 for (i = 0; i < nb_frames; i++) {
1175 frame = ff_framequeue_take(&link->fifo);
1176 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1177 frame->nb_samples, link->channels, link->format);
1178 p += frame->nb_samples;
1179 av_frame_free(&frame);
1180 }
1181 if (p < nb_samples) {
1182 unsigned n = nb_samples - p;
1183 frame = ff_framequeue_peek(&link->fifo, 0);
1184 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1185 link->channels, link->format);
1186 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1187 }
1188
1189 *rframe = buf;
1190 return 0;
1191 }
1192
ff_filter_frame_to_filter(AVFilterLink * link)1193 static int ff_filter_frame_to_filter(AVFilterLink *link)
1194 {
1195 AVFrame *frame = NULL;
1196 AVFilterContext *dst = link->dst;
1197 int ret;
1198
1199 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1200 ret = link->min_samples ?
1201 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1202 ff_inlink_consume_frame(link, &frame);
1203 av_assert1(ret);
1204 if (ret < 0) {
1205 av_assert1(!frame);
1206 return ret;
1207 }
1208 /* The filter will soon have received a new frame, that may allow it to
1209 produce one or more: unblock its outputs. */
1210 filter_unblock(dst);
1211 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1212 before the frame; ff_filter_frame_framed() will re-increment it. */
1213 link->frame_count_out--;
1214 ret = ff_filter_frame_framed(link, frame);
1215 if (ret < 0 && ret != link->status_out) {
1216 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1217 } else {
1218 /* Run once again, to see if several frames were available, or if
1219 the input status has also changed, or any other reason. */
1220 ff_filter_set_ready(dst, 300);
1221 }
1222 return ret;
1223 }
1224
forward_status_change(AVFilterContext * filter,AVFilterLink * in)1225 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1226 {
1227 unsigned out = 0, progress = 0;
1228 int ret;
1229
1230 av_assert0(!in->status_out);
1231 if (!filter->nb_outputs) {
1232 /* not necessary with the current API and sinks */
1233 return 0;
1234 }
1235 while (!in->status_out) {
1236 if (!filter->outputs[out]->status_in) {
1237 progress++;
1238 ret = ff_request_frame_to_filter(filter->outputs[out]);
1239 if (ret < 0)
1240 return ret;
1241 }
1242 if (++out == filter->nb_outputs) {
1243 if (!progress) {
1244 /* Every output already closed: input no longer interesting
1245 (example: overlay in shortest mode, other input closed). */
1246 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1247 return 0;
1248 }
1249 progress = 0;
1250 out = 0;
1251 }
1252 }
1253 ff_filter_set_ready(filter, 200);
1254 return 0;
1255 }
1256
ff_filter_activate_default(AVFilterContext * filter)1257 static int ff_filter_activate_default(AVFilterContext *filter)
1258 {
1259 unsigned i;
1260
1261 for (i = 0; i < filter->nb_inputs; i++) {
1262 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1263 return ff_filter_frame_to_filter(filter->inputs[i]);
1264 }
1265 }
1266 for (i = 0; i < filter->nb_inputs; i++) {
1267 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1268 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1269 return forward_status_change(filter, filter->inputs[i]);
1270 }
1271 }
1272 for (i = 0; i < filter->nb_outputs; i++) {
1273 if (filter->outputs[i]->frame_wanted_out &&
1274 !filter->outputs[i]->frame_blocked_in) {
1275 return ff_request_frame_to_filter(filter->outputs[i]);
1276 }
1277 }
1278 return FFERROR_NOT_READY;
1279 }
1280
1281 /*
1282 Filter scheduling and activation
1283
1284 When a filter is activated, it must:
1285 - if possible, output a frame;
1286 - else, if relevant, forward the input status change;
1287 - else, check outputs for wanted frames and forward the requests.
1288
1289 The following AVFilterLink fields are used for activation:
1290
1291 - frame_wanted_out:
1292
1293 This field indicates if a frame is needed on this input of the
1294 destination filter. A positive value indicates that a frame is needed
1295 to process queued frames or internal data or to satisfy the
1296 application; a zero value indicates that a frame is not especially
1297 needed but could be processed anyway; a negative value indicates that a
1298 frame would just be queued.
1299
1300 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1301 when requested by the application through a specific API or when it is
1302 set on one of the outputs.
1303
1304 It is cleared when a frame is sent from the source using
1305 ff_filter_frame().
1306
1307 It is also cleared when a status change is sent from the source using
1308 ff_avfilter_link_set_in_status().
1309
1310 - frame_blocked_in:
1311
1312 This field means that the source filter can not generate a frame as is.
1313 Its goal is to avoid repeatedly calling the request_frame() method on
1314 the same link.
1315
1316 It is set by the framework on all outputs of a filter before activating it.
1317
1318 It is automatically cleared by ff_filter_frame().
1319
1320 It is also automatically cleared by ff_avfilter_link_set_in_status().
1321
1322 It is also cleared on all outputs (using filter_unblock()) when
1323 something happens on an input: processing a frame or changing the
1324 status.
1325
1326 - fifo:
1327
1328 Contains the frames queued on a filter input. If it contains frames and
1329 frame_wanted_out is not set, then the filter can be activated. If that
1330 result in the filter not able to use these frames, the filter must set
1331 frame_wanted_out to ask for more frames.
1332
1333 - status_in and status_in_pts:
1334
1335 Status (EOF or error code) of the link and timestamp of the status
1336 change (in link time base, same as frames) as seen from the input of
1337 the link. The status change is considered happening after the frames
1338 queued in fifo.
1339
1340 It is set by the source filter using ff_avfilter_link_set_in_status().
1341
1342 - status_out:
1343
1344 Status of the link as seen from the output of the link. The status
1345 change is considered having already happened.
1346
1347 It is set by the destination filter using
1348 ff_avfilter_link_set_out_status().
1349
1350 Filters are activated according to the ready field, set using the
1351 ff_filter_set_ready(). Eventually, a priority queue will be used.
1352 ff_filter_set_ready() is called whenever anything could cause progress to
1353 be possible. Marking a filter ready when it is not is not a problem,
1354 except for the small overhead it causes.
1355
1356 Conditions that cause a filter to be marked ready are:
1357
1358 - frames added on an input link;
1359
1360 - changes in the input or output status of an input link;
1361
1362 - requests for a frame on an output link;
1363
1364 - after any actual processing using the legacy methods (filter_frame(),
1365 and request_frame() to acknowledge status changes), to run once more
1366 and check if enough input was present for several frames.
1367
1368 Examples of scenarios to consider:
1369
1370 - buffersrc: activate if frame_wanted_out to notify the application;
1371 activate when the application adds a frame to push it immediately.
1372
1373 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1374
1375 - concat (not at stitch points): can process a frame on any output.
1376 Activate if frame_wanted_out on output to forward on the corresponding
1377 input. Activate when a frame is present on input to process it
1378 immediately.
1379
1380 - framesync: needs at least one frame on each input; extra frames on the
1381 wrong input will accumulate. When a frame is first added on one input,
1382 set frame_wanted_out<0 on it to avoid getting more (would trigger
1383 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1384
1385 Activation of old filters:
1386
1387 In order to activate a filter implementing the legacy filter_frame() and
1388 request_frame() methods, perform the first possible of the following
1389 actions:
1390
1391 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1392 frame and call filter_frame().
1393
1394 Rationale: filter frames as soon as possible instead of leaving them
1395 queued; frame_wanted_out < 0 is not possible since the old API does not
1396 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1397 when min_samples > 0 and there are not enough samples queued.
1398
1399 - If an input has status_in set but not status_out, try to call
1400 request_frame() on one of the outputs in the hope that it will trigger
1401 request_frame() on the input with status_in and acknowledge it. This is
1402 awkward and fragile, filters with several inputs or outputs should be
1403 updated to direct activation as soon as possible.
1404
1405 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1406 request_frame().
1407
1408 Rationale: checking frame_blocked_in is necessary to avoid requesting
1409 repeatedly on a blocked input if another is not blocked (example:
1410 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1411
1412 TODO: respect needs_fifo and remove auto-inserted fifos.
1413
1414 */
1415
ff_filter_activate(AVFilterContext * filter)1416 int ff_filter_activate(AVFilterContext *filter)
1417 {
1418 int ret;
1419
1420 /* Generic timeline support is not yet implemented but should be easy */
1421 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1422 filter->filter->activate));
1423 filter->ready = 0;
1424 ret = filter->filter->activate ? filter->filter->activate(filter) :
1425 ff_filter_activate_default(filter);
1426 if (ret == FFERROR_NOT_READY)
1427 ret = 0;
1428 return ret;
1429 }
1430
ff_inlink_acknowledge_status(AVFilterLink * link,int * rstatus,int64_t * rpts)1431 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1432 {
1433 *rpts = link->current_pts;
1434 if (ff_framequeue_queued_frames(&link->fifo))
1435 return *rstatus = 0;
1436 if (link->status_out)
1437 return *rstatus = link->status_out;
1438 if (!link->status_in)
1439 return *rstatus = 0;
1440 *rstatus = link->status_out = link->status_in;
1441 ff_update_link_current_pts(link, link->status_in_pts);
1442 *rpts = link->current_pts;
1443 return 1;
1444 }
1445
ff_inlink_queued_frames(AVFilterLink * link)1446 size_t ff_inlink_queued_frames(AVFilterLink *link)
1447 {
1448 return ff_framequeue_queued_frames(&link->fifo);
1449 }
1450
ff_inlink_check_available_frame(AVFilterLink * link)1451 int ff_inlink_check_available_frame(AVFilterLink *link)
1452 {
1453 return ff_framequeue_queued_frames(&link->fifo) > 0;
1454 }
1455
ff_inlink_queued_samples(AVFilterLink * link)1456 int ff_inlink_queued_samples(AVFilterLink *link)
1457 {
1458 return ff_framequeue_queued_samples(&link->fifo);
1459 }
1460
ff_inlink_check_available_samples(AVFilterLink * link,unsigned min)1461 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1462 {
1463 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1464 av_assert1(min);
1465 return samples >= min || (link->status_in && samples);
1466 }
1467
consume_update(AVFilterLink * link,const AVFrame * frame)1468 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1469 {
1470 ff_update_link_current_pts(link, frame->pts);
1471 ff_inlink_process_commands(link, frame);
1472 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1473 link->frame_count_out++;
1474 }
1475
ff_inlink_consume_frame(AVFilterLink * link,AVFrame ** rframe)1476 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1477 {
1478 AVFrame *frame;
1479
1480 *rframe = NULL;
1481 if (!ff_inlink_check_available_frame(link))
1482 return 0;
1483
1484 if (link->fifo.samples_skipped) {
1485 frame = ff_framequeue_peek(&link->fifo, 0);
1486 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1487 }
1488
1489 frame = ff_framequeue_take(&link->fifo);
1490 consume_update(link, frame);
1491 *rframe = frame;
1492 return 1;
1493 }
1494
ff_inlink_consume_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1495 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1496 AVFrame **rframe)
1497 {
1498 AVFrame *frame;
1499 int ret;
1500
1501 av_assert1(min);
1502 *rframe = NULL;
1503 if (!ff_inlink_check_available_samples(link, min))
1504 return 0;
1505 if (link->status_in)
1506 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1507 ret = take_samples(link, min, max, &frame);
1508 if (ret < 0)
1509 return ret;
1510 consume_update(link, frame);
1511 *rframe = frame;
1512 return 1;
1513 }
1514
ff_inlink_peek_frame(AVFilterLink * link,size_t idx)1515 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1516 {
1517 return ff_framequeue_peek(&link->fifo, idx);
1518 }
1519
ff_inlink_make_frame_writable(AVFilterLink * link,AVFrame ** rframe)1520 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1521 {
1522 AVFrame *frame = *rframe;
1523 AVFrame *out;
1524 int ret;
1525
1526 if (av_frame_is_writable(frame))
1527 return 0;
1528 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1529
1530 switch (link->type) {
1531 case AVMEDIA_TYPE_VIDEO:
1532 out = ff_get_video_buffer(link, link->w, link->h);
1533 break;
1534 case AVMEDIA_TYPE_AUDIO:
1535 out = ff_get_audio_buffer(link, frame->nb_samples);
1536 break;
1537 default:
1538 return AVERROR(EINVAL);
1539 }
1540 if (!out)
1541 return AVERROR(ENOMEM);
1542
1543 ret = av_frame_copy_props(out, frame);
1544 if (ret < 0) {
1545 av_frame_free(&out);
1546 return ret;
1547 }
1548
1549 switch (link->type) {
1550 case AVMEDIA_TYPE_VIDEO:
1551 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1552 frame->format, frame->width, frame->height);
1553 break;
1554 case AVMEDIA_TYPE_AUDIO:
1555 av_samples_copy(out->extended_data, frame->extended_data,
1556 0, 0, frame->nb_samples,
1557 frame->channels,
1558 frame->format);
1559 break;
1560 default:
1561 av_assert0(!"reached");
1562 }
1563
1564 av_frame_free(&frame);
1565 *rframe = out;
1566 return 0;
1567 }
1568
ff_inlink_process_commands(AVFilterLink * link,const AVFrame * frame)1569 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1570 {
1571 AVFilterCommand *cmd = link->dst->command_queue;
1572
1573 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1574 av_log(link->dst, AV_LOG_DEBUG,
1575 "Processing command time:%f command:%s arg:%s\n",
1576 cmd->time, cmd->command, cmd->arg);
1577 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1578 ff_command_queue_pop(link->dst);
1579 cmd= link->dst->command_queue;
1580 }
1581 return 0;
1582 }
1583
ff_inlink_evaluate_timeline_at_frame(AVFilterLink * link,const AVFrame * frame)1584 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1585 {
1586 AVFilterContext *dstctx = link->dst;
1587 int64_t pts = frame->pts;
1588 int64_t pos = frame->pkt_pos;
1589
1590 if (!dstctx->enable_str)
1591 return 1;
1592
1593 dstctx->var_values[VAR_N] = link->frame_count_out;
1594 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1595 dstctx->var_values[VAR_W] = link->w;
1596 dstctx->var_values[VAR_H] = link->h;
1597 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1598
1599 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1600 }
1601
ff_inlink_request_frame(AVFilterLink * link)1602 void ff_inlink_request_frame(AVFilterLink *link)
1603 {
1604 av_assert1(!link->status_in);
1605 av_assert1(!link->status_out);
1606 link->frame_wanted_out = 1;
1607 ff_filter_set_ready(link->src, 100);
1608 }
1609
ff_inlink_set_status(AVFilterLink * link,int status)1610 void ff_inlink_set_status(AVFilterLink *link, int status)
1611 {
1612 if (link->status_out)
1613 return;
1614 link->frame_wanted_out = 0;
1615 link->frame_blocked_in = 0;
1616 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1617 while (ff_framequeue_queued_frames(&link->fifo)) {
1618 AVFrame *frame = ff_framequeue_take(&link->fifo);
1619 av_frame_free(&frame);
1620 }
1621 if (!link->status_in)
1622 link->status_in = status;
1623 }
1624
ff_outlink_get_status(AVFilterLink * link)1625 int ff_outlink_get_status(AVFilterLink *link)
1626 {
1627 return link->status_in;
1628 }
1629
avfilter_get_class(void)1630 const AVClass *avfilter_get_class(void)
1631 {
1632 return &avfilter_class;
1633 }
1634
ff_filter_init_hw_frames(AVFilterContext * avctx,AVFilterLink * link,int default_pool_size)1635 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1636 int default_pool_size)
1637 {
1638 AVHWFramesContext *frames;
1639
1640 // Must already be set by caller.
1641 av_assert0(link->hw_frames_ctx);
1642
1643 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1644
1645 if (frames->initial_pool_size == 0) {
1646 // Dynamic allocation is necessarily supported.
1647 } else if (avctx->extra_hw_frames >= 0) {
1648 frames->initial_pool_size += avctx->extra_hw_frames;
1649 } else {
1650 frames->initial_pool_size = default_pool_size;
1651 }
1652
1653 return 0;
1654 }
1655