• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * filter layer
3  * Copyright (c) 2007 Bobby Bingham
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
36 
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
39 
40 #include "audio.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "formats.h"
44 #include "internal.h"
45 
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
48 
ff_tlog_ref(void * ctx,AVFrame * ref,int end)49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
50 {
51     av_unused char buf[16];
52     ff_tlog(ctx,
53             "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54             ref, ref->buf, ref->data[0],
55             ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56             ref->pts, ref->pkt_pos);
57 
58     if (ref->width) {
59         ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60                 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61                 ref->width, ref->height,
62                 !ref->interlaced_frame     ? 'P' :         /* Progressive  */
63                 ref->top_field_first ? 'T' : 'B',    /* Top / Bottom */
64                 ref->key_frame,
65                 av_get_picture_type_char(ref->pict_type));
66     }
67     if (ref->nb_samples) {
68         ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
69                 ref->channel_layout,
70                 ref->nb_samples,
71                 ref->sample_rate);
72     }
73 
74     ff_tlog(ctx, "]%s", end ? "\n" : "");
75 }
76 
avfilter_version(void)77 unsigned avfilter_version(void)
78 {
79     av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80     return LIBAVFILTER_VERSION_INT;
81 }
82 
avfilter_configuration(void)83 const char *avfilter_configuration(void)
84 {
85     return FFMPEG_CONFIGURATION;
86 }
87 
avfilter_license(void)88 const char *avfilter_license(void)
89 {
90 #define LICENSE_PREFIX "libavfilter license: "
91     return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
92 }
93 
ff_command_queue_pop(AVFilterContext * filter)94 void ff_command_queue_pop(AVFilterContext *filter)
95 {
96     AVFilterCommand *c= filter->command_queue;
97     av_freep(&c->arg);
98     av_freep(&c->command);
99     filter->command_queue= c->next;
100     av_free(c);
101 }
102 
ff_insert_pad(unsigned idx,unsigned * count,size_t padidx_off,AVFilterPad ** pads,AVFilterLink *** links,AVFilterPad * newpad)103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104                    AVFilterPad **pads, AVFilterLink ***links,
105                    AVFilterPad *newpad)
106 {
107     AVFilterLink **newlinks;
108     AVFilterPad *newpads;
109     unsigned i;
110 
111     idx = FFMIN(idx, *count);
112 
113     newpads  = av_realloc_array(*pads,  *count + 1, sizeof(AVFilterPad));
114     newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
115     if (newpads)
116         *pads  = newpads;
117     if (newlinks)
118         *links = newlinks;
119     if (!newpads || !newlinks)
120         return AVERROR(ENOMEM);
121 
122     memmove(*pads  + idx + 1, *pads  + idx, sizeof(AVFilterPad)   * (*count - idx));
123     memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124     memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125     (*links)[idx] = NULL;
126 
127     (*count)++;
128     for (i = idx + 1; i < *count; i++)
129         if ((*links)[i])
130             (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
131 
132     return 0;
133 }
134 
avfilter_link(AVFilterContext * src,unsigned srcpad,AVFilterContext * dst,unsigned dstpad)135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136                   AVFilterContext *dst, unsigned dstpad)
137 {
138     AVFilterLink *link;
139 
140     av_assert0(src->graph);
141     av_assert0(dst->graph);
142     av_assert0(src->graph == dst->graph);
143 
144     if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145         src->outputs[srcpad]      || dst->inputs[dstpad])
146         return AVERROR(EINVAL);
147 
148     if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149         av_log(src, AV_LOG_ERROR,
150                "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151                src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152                dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153         return AVERROR(EINVAL);
154     }
155 
156     link = av_mallocz(sizeof(*link));
157     if (!link)
158         return AVERROR(ENOMEM);
159 
160     src->outputs[srcpad] = dst->inputs[dstpad] = link;
161 
162     link->src     = src;
163     link->dst     = dst;
164     link->srcpad  = &src->output_pads[srcpad];
165     link->dstpad  = &dst->input_pads[dstpad];
166     link->type    = src->output_pads[srcpad].type;
167     av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
168     link->format  = -1;
169     ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
170 
171     return 0;
172 }
173 
avfilter_link_free(AVFilterLink ** link)174 void avfilter_link_free(AVFilterLink **link)
175 {
176     if (!*link)
177         return;
178 
179     av_frame_free(&(*link)->partial_buf);
180     ff_framequeue_free(&(*link)->fifo);
181     ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
182 
183     av_freep(link);
184 }
185 
186 #if FF_API_FILTER_GET_SET
avfilter_link_get_channels(AVFilterLink * link)187 int avfilter_link_get_channels(AVFilterLink *link)
188 {
189     return link->channels;
190 }
191 #endif
192 
ff_filter_set_ready(AVFilterContext * filter,unsigned priority)193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
194 {
195     filter->ready = FFMAX(filter->ready, priority);
196 }
197 
198 /**
199  * Clear frame_blocked_in on all outputs.
200  * This is necessary whenever something changes on input.
201  */
filter_unblock(AVFilterContext * filter)202 static void filter_unblock(AVFilterContext *filter)
203 {
204     unsigned i;
205 
206     for (i = 0; i < filter->nb_outputs; i++)
207         filter->outputs[i]->frame_blocked_in = 0;
208 }
209 
210 
ff_avfilter_link_set_in_status(AVFilterLink * link,int status,int64_t pts)211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
212 {
213     if (link->status_in == status)
214         return;
215     av_assert0(!link->status_in);
216     link->status_in = status;
217     link->status_in_pts = pts;
218     link->frame_wanted_out = 0;
219     link->frame_blocked_in = 0;
220     filter_unblock(link->dst);
221     ff_filter_set_ready(link->dst, 200);
222 }
223 
ff_avfilter_link_set_out_status(AVFilterLink * link,int status,int64_t pts)224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
225 {
226     av_assert0(!link->frame_wanted_out);
227     av_assert0(!link->status_out);
228     link->status_out = status;
229     if (pts != AV_NOPTS_VALUE)
230         ff_update_link_current_pts(link, pts);
231     filter_unblock(link->dst);
232     ff_filter_set_ready(link->src, 200);
233 }
234 
235 #if FF_API_FILTER_LINK_SET_CLOSED
avfilter_link_set_closed(AVFilterLink * link,int closed)236 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 {
238     ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
239 }
240 #endif
avfilter_insert_filter(AVFilterLink * link,AVFilterContext * filt,unsigned filt_srcpad_idx,unsigned filt_dstpad_idx)241 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
242                            unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
243 {
244     int ret;
245     unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 
247     av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
248            "between the filter '%s' and the filter '%s'\n",
249            filt->name, link->src->name, link->dst->name);
250 
251     link->dst->inputs[dstpad_idx] = NULL;
252     if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
253         /* failed to link output filter to new filter */
254         link->dst->inputs[dstpad_idx] = link;
255         return ret;
256     }
257 
258     /* re-hookup the link to the new destination filter we inserted */
259     link->dst                     = filt;
260     link->dstpad                  = &filt->input_pads[filt_srcpad_idx];
261     filt->inputs[filt_srcpad_idx] = link;
262 
263     /* if any information on supported media formats already exists on the
264      * link, we need to preserve that */
265     if (link->outcfg.formats)
266         ff_formats_changeref(&link->outcfg.formats,
267                              &filt->outputs[filt_dstpad_idx]->outcfg.formats);
268     if (link->outcfg.samplerates)
269         ff_formats_changeref(&link->outcfg.samplerates,
270                              &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
271     if (link->outcfg.channel_layouts)
272         ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
273                                      &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
274 
275     return 0;
276 }
277 
avfilter_config_links(AVFilterContext * filter)278 int avfilter_config_links(AVFilterContext *filter)
279 {
280     int (*config_link)(AVFilterLink *);
281     unsigned i;
282     int ret;
283 
284     for (i = 0; i < filter->nb_inputs; i ++) {
285         AVFilterLink *link = filter->inputs[i];
286         AVFilterLink *inlink;
287 
288         if (!link) continue;
289         if (!link->src || !link->dst) {
290             av_log(filter, AV_LOG_ERROR,
291                    "Not all input and output are properly linked (%d).\n", i);
292             return AVERROR(EINVAL);
293         }
294 
295         inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296         link->current_pts =
297         link->current_pts_us = AV_NOPTS_VALUE;
298 
299         switch (link->init_state) {
300         case AVLINK_INIT:
301             continue;
302         case AVLINK_STARTINIT:
303             av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
304             return 0;
305         case AVLINK_UNINIT:
306             link->init_state = AVLINK_STARTINIT;
307 
308             if ((ret = avfilter_config_links(link->src)) < 0)
309                 return ret;
310 
311             if (!(config_link = link->srcpad->config_props)) {
312                 if (link->src->nb_inputs != 1) {
313                     av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
314                                                     "with more than one input "
315                                                     "must set config_props() "
316                                                     "callbacks on all outputs\n");
317                     return AVERROR(EINVAL);
318                 }
319             } else if ((ret = config_link(link)) < 0) {
320                 av_log(link->src, AV_LOG_ERROR,
321                        "Failed to configure output pad on %s\n",
322                        link->src->name);
323                 return ret;
324             }
325 
326             switch (link->type) {
327             case AVMEDIA_TYPE_VIDEO:
328                 if (!link->time_base.num && !link->time_base.den)
329                     link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 
331                 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
332                     link->sample_aspect_ratio = inlink ?
333                         inlink->sample_aspect_ratio : (AVRational){1,1};
334 
335                 if (inlink) {
336                     if (!link->frame_rate.num && !link->frame_rate.den)
337                         link->frame_rate = inlink->frame_rate;
338                     if (!link->w)
339                         link->w = inlink->w;
340                     if (!link->h)
341                         link->h = inlink->h;
342                 } else if (!link->w || !link->h) {
343                     av_log(link->src, AV_LOG_ERROR,
344                            "Video source filters must set their output link's "
345                            "width and height\n");
346                     return AVERROR(EINVAL);
347                 }
348                 break;
349 
350             case AVMEDIA_TYPE_AUDIO:
351                 if (inlink) {
352                     if (!link->time_base.num && !link->time_base.den)
353                         link->time_base = inlink->time_base;
354                 }
355 
356                 if (!link->time_base.num && !link->time_base.den)
357                     link->time_base = (AVRational) {1, link->sample_rate};
358             }
359 
360             if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
361                 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
362                 av_assert0(!link->hw_frames_ctx &&
363                            "should not be set by non-hwframe-aware filter");
364                 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
365                 if (!link->hw_frames_ctx)
366                     return AVERROR(ENOMEM);
367             }
368 
369             if ((config_link = link->dstpad->config_props))
370                 if ((ret = config_link(link)) < 0) {
371                     av_log(link->dst, AV_LOG_ERROR,
372                            "Failed to configure input pad on %s\n",
373                            link->dst->name);
374                     return ret;
375                 }
376 
377             link->init_state = AVLINK_INIT;
378         }
379     }
380 
381     return 0;
382 }
383 
ff_tlog_link(void * ctx,AVFilterLink * link,int end)384 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 {
386     if (link->type == AVMEDIA_TYPE_VIDEO) {
387         ff_tlog(ctx,
388                 "link[%p s:%dx%d fmt:%s %s->%s]%s",
389                 link, link->w, link->h,
390                 av_get_pix_fmt_name(link->format),
391                 link->src ? link->src->filter->name : "",
392                 link->dst ? link->dst->filter->name : "",
393                 end ? "\n" : "");
394     } else {
395         char buf[128];
396         av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
397 
398         ff_tlog(ctx,
399                 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
400                 link, (int)link->sample_rate, buf,
401                 av_get_sample_fmt_name(link->format),
402                 link->src ? link->src->filter->name : "",
403                 link->dst ? link->dst->filter->name : "",
404                 end ? "\n" : "");
405     }
406 }
407 
ff_request_frame(AVFilterLink * link)408 int ff_request_frame(AVFilterLink *link)
409 {
410     FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 
412     av_assert1(!link->dst->filter->activate);
413     if (link->status_out)
414         return link->status_out;
415     if (link->status_in) {
416         if (ff_framequeue_queued_frames(&link->fifo)) {
417             av_assert1(!link->frame_wanted_out);
418             av_assert1(link->dst->ready >= 300);
419             return 0;
420         } else {
421             /* Acknowledge status change. Filters using ff_request_frame() will
422                handle the change automatically. Filters can also check the
423                status directly but none do yet. */
424             ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
425             return link->status_out;
426         }
427     }
428     link->frame_wanted_out = 1;
429     ff_filter_set_ready(link->src, 100);
430     return 0;
431 }
432 
guess_status_pts(AVFilterContext * ctx,int status,AVRational link_time_base)433 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
434 {
435     unsigned i;
436     int64_t r = INT64_MAX;
437 
438     for (i = 0; i < ctx->nb_inputs; i++)
439         if (ctx->inputs[i]->status_out == status)
440             r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
441     if (r < INT64_MAX)
442         return r;
443     av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
444     for (i = 0; i < ctx->nb_inputs; i++)
445         r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
446     if (r < INT64_MAX)
447         return r;
448     return AV_NOPTS_VALUE;
449 }
450 
ff_request_frame_to_filter(AVFilterLink * link)451 static int ff_request_frame_to_filter(AVFilterLink *link)
452 {
453     int ret = -1;
454 
455     FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
456     /* Assume the filter is blocked, let the method clear it if not */
457     link->frame_blocked_in = 1;
458     if (link->srcpad->request_frame)
459         ret = link->srcpad->request_frame(link);
460     else if (link->src->inputs[0])
461         ret = ff_request_frame(link->src->inputs[0]);
462     if (ret < 0) {
463         if (ret != AVERROR(EAGAIN) && ret != link->status_in)
464             ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
465         if (ret == AVERROR_EOF)
466             ret = 0;
467     }
468     return ret;
469 }
470 
471 static const char *const var_names[] = {
472     "t",
473     "n",
474     "pos",
475     "w",
476     "h",
477     NULL
478 };
479 
480 enum {
481     VAR_T,
482     VAR_N,
483     VAR_POS,
484     VAR_W,
485     VAR_H,
486     VAR_VARS_NB
487 };
488 
set_enable_expr(AVFilterContext * ctx,const char * expr)489 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
490 {
491     int ret;
492     char *expr_dup;
493     AVExpr *old = ctx->enable;
494 
495     if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
496         av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
497                "with filter '%s'\n", ctx->filter->name);
498         return AVERROR_PATCHWELCOME;
499     }
500 
501     expr_dup = av_strdup(expr);
502     if (!expr_dup)
503         return AVERROR(ENOMEM);
504 
505     if (!ctx->var_values) {
506         ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
507         if (!ctx->var_values) {
508             av_free(expr_dup);
509             return AVERROR(ENOMEM);
510         }
511     }
512 
513     ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
514                         NULL, NULL, NULL, NULL, 0, ctx->priv);
515     if (ret < 0) {
516         av_log(ctx->priv, AV_LOG_ERROR,
517                "Error when evaluating the expression '%s' for enable\n",
518                expr_dup);
519         av_free(expr_dup);
520         return ret;
521     }
522 
523     av_expr_free(old);
524     av_free(ctx->enable_str);
525     ctx->enable_str = expr_dup;
526     return 0;
527 }
528 
ff_update_link_current_pts(AVFilterLink * link,int64_t pts)529 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
530 {
531     if (pts == AV_NOPTS_VALUE)
532         return;
533     link->current_pts = pts;
534     link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
535     /* TODO use duration */
536     if (link->graph && link->age_index >= 0)
537         ff_avfilter_graph_update_heap(link->graph, link);
538 }
539 
avfilter_process_command(AVFilterContext * filter,const char * cmd,const char * arg,char * res,int res_len,int flags)540 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
541 {
542     if(!strcmp(cmd, "ping")){
543         char local_res[256] = {0};
544 
545         if (!res) {
546             res = local_res;
547             res_len = sizeof(local_res);
548         }
549         av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
550         if (res == local_res)
551             av_log(filter, AV_LOG_INFO, "%s", res);
552         return 0;
553     }else if(!strcmp(cmd, "enable")) {
554         return set_enable_expr(filter, arg);
555     }else if(filter->filter->process_command) {
556         return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
557     }
558     return AVERROR(ENOSYS);
559 }
560 
avfilter_pad_count(const AVFilterPad * pads)561 int avfilter_pad_count(const AVFilterPad *pads)
562 {
563     int count;
564 
565     if (!pads)
566         return 0;
567 
568     for (count = 0; pads->name; count++)
569         pads++;
570     return count;
571 }
572 
default_filter_name(void * filter_ctx)573 static const char *default_filter_name(void *filter_ctx)
574 {
575     AVFilterContext *ctx = filter_ctx;
576     return ctx->name ? ctx->name : ctx->filter->name;
577 }
578 
filter_child_next(void * obj,void * prev)579 static void *filter_child_next(void *obj, void *prev)
580 {
581     AVFilterContext *ctx = obj;
582     if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
583         return ctx->priv;
584     return NULL;
585 }
586 
587 #if FF_API_CHILD_CLASS_NEXT
filter_child_class_next(const AVClass * prev)588 static const AVClass *filter_child_class_next(const AVClass *prev)
589 {
590     void *opaque = NULL;
591     const AVFilter *f = NULL;
592 
593     /* find the filter that corresponds to prev */
594     while (prev && (f = av_filter_iterate(&opaque)))
595         if (f->priv_class == prev)
596             break;
597 
598     /* could not find filter corresponding to prev */
599     if (prev && !f)
600         return NULL;
601 
602     /* find next filter with specific options */
603     while ((f = av_filter_iterate(&opaque)))
604         if (f->priv_class)
605             return f->priv_class;
606 
607     return NULL;
608 }
609 #endif
610 
filter_child_class_iterate(void ** iter)611 static const AVClass *filter_child_class_iterate(void **iter)
612 {
613     const AVFilter *f;
614 
615     while ((f = av_filter_iterate(iter)))
616         if (f->priv_class)
617             return f->priv_class;
618 
619     return NULL;
620 }
621 
622 #define OFFSET(x) offsetof(AVFilterContext, x)
623 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
624 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
625 static const AVOption avfilter_options[] = {
626     { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
627         { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
628         { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
629     { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
630     { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
631         { .i64 = 0 }, 0, INT_MAX, FLAGS },
632     { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
633         OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
634     { NULL },
635 };
636 
637 static const AVClass avfilter_class = {
638     .class_name = "AVFilter",
639     .item_name  = default_filter_name,
640     .version    = LIBAVUTIL_VERSION_INT,
641     .category   = AV_CLASS_CATEGORY_FILTER,
642     .child_next = filter_child_next,
643 #if FF_API_CHILD_CLASS_NEXT
644     .child_class_next = filter_child_class_next,
645 #endif
646     .child_class_iterate = filter_child_class_iterate,
647     .option           = avfilter_options,
648 };
649 
default_execute(AVFilterContext * ctx,avfilter_action_func * func,void * arg,int * ret,int nb_jobs)650 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
651                            int *ret, int nb_jobs)
652 {
653     int i;
654 
655     for (i = 0; i < nb_jobs; i++) {
656         int r = func(ctx, arg, i, nb_jobs);
657         if (ret)
658             ret[i] = r;
659     }
660     return 0;
661 }
662 
ff_filter_alloc(const AVFilter * filter,const char * inst_name)663 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
664 {
665     AVFilterContext *ret;
666     int preinited = 0;
667 
668     if (!filter)
669         return NULL;
670 
671     ret = av_mallocz(sizeof(AVFilterContext));
672     if (!ret)
673         return NULL;
674 
675     ret->av_class = &avfilter_class;
676     ret->filter   = filter;
677     ret->name     = inst_name ? av_strdup(inst_name) : NULL;
678     if (filter->priv_size) {
679         ret->priv     = av_mallocz(filter->priv_size);
680         if (!ret->priv)
681             goto err;
682     }
683     if (filter->preinit) {
684         if (filter->preinit(ret) < 0)
685             goto err;
686         preinited = 1;
687     }
688 
689     av_opt_set_defaults(ret);
690     if (filter->priv_class) {
691         *(const AVClass**)ret->priv = filter->priv_class;
692         av_opt_set_defaults(ret->priv);
693     }
694 
695     ret->internal = av_mallocz(sizeof(*ret->internal));
696     if (!ret->internal)
697         goto err;
698     ret->internal->execute = default_execute;
699 
700     ret->nb_inputs = avfilter_pad_count(filter->inputs);
701     if (ret->nb_inputs ) {
702         ret->input_pads   = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
703         if (!ret->input_pads)
704             goto err;
705         memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
706         ret->inputs       = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
707         if (!ret->inputs)
708             goto err;
709     }
710 
711     ret->nb_outputs = avfilter_pad_count(filter->outputs);
712     if (ret->nb_outputs) {
713         ret->output_pads  = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
714         if (!ret->output_pads)
715             goto err;
716         memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
717         ret->outputs      = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
718         if (!ret->outputs)
719             goto err;
720     }
721 
722     return ret;
723 
724 err:
725     if (preinited)
726         filter->uninit(ret);
727     av_freep(&ret->inputs);
728     av_freep(&ret->input_pads);
729     ret->nb_inputs = 0;
730     av_freep(&ret->outputs);
731     av_freep(&ret->output_pads);
732     ret->nb_outputs = 0;
733     av_freep(&ret->priv);
734     av_freep(&ret->internal);
735     av_free(ret);
736     return NULL;
737 }
738 
free_link(AVFilterLink * link)739 static void free_link(AVFilterLink *link)
740 {
741     if (!link)
742         return;
743 
744     if (link->src)
745         link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
746     if (link->dst)
747         link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
748 
749     av_buffer_unref(&link->hw_frames_ctx);
750 
751     ff_formats_unref(&link->incfg.formats);
752     ff_formats_unref(&link->outcfg.formats);
753     ff_formats_unref(&link->incfg.samplerates);
754     ff_formats_unref(&link->outcfg.samplerates);
755     ff_channel_layouts_unref(&link->incfg.channel_layouts);
756     ff_channel_layouts_unref(&link->outcfg.channel_layouts);
757     avfilter_link_free(&link);
758 }
759 
avfilter_free(AVFilterContext * filter)760 void avfilter_free(AVFilterContext *filter)
761 {
762     int i;
763 
764     if (!filter)
765         return;
766 
767     if (filter->graph)
768         ff_filter_graph_remove_filter(filter->graph, filter);
769 
770     if (filter->filter->uninit)
771         filter->filter->uninit(filter);
772 
773     for (i = 0; i < filter->nb_inputs; i++) {
774         free_link(filter->inputs[i]);
775     }
776     for (i = 0; i < filter->nb_outputs; i++) {
777         free_link(filter->outputs[i]);
778     }
779 
780     if (filter->filter->priv_class)
781         av_opt_free(filter->priv);
782 
783     av_buffer_unref(&filter->hw_device_ctx);
784 
785     av_freep(&filter->name);
786     av_freep(&filter->input_pads);
787     av_freep(&filter->output_pads);
788     av_freep(&filter->inputs);
789     av_freep(&filter->outputs);
790     av_freep(&filter->priv);
791     while(filter->command_queue){
792         ff_command_queue_pop(filter);
793     }
794     av_opt_free(filter);
795     av_expr_free(filter->enable);
796     filter->enable = NULL;
797     av_freep(&filter->var_values);
798     av_freep(&filter->internal);
799     av_free(filter);
800 }
801 
ff_filter_get_nb_threads(AVFilterContext * ctx)802 int ff_filter_get_nb_threads(AVFilterContext *ctx)
803 {
804     if (ctx->nb_threads > 0)
805         return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
806     return ctx->graph->nb_threads;
807 }
808 
process_options(AVFilterContext * ctx,AVDictionary ** options,const char * args)809 static int process_options(AVFilterContext *ctx, AVDictionary **options,
810                            const char *args)
811 {
812     const AVOption *o = NULL;
813     int ret, count = 0;
814     char *av_uninit(parsed_key), *av_uninit(value);
815     const char *key;
816     int offset= -1;
817 
818     if (!args)
819         return 0;
820 
821     while (*args) {
822         const char *shorthand = NULL;
823 
824         o = av_opt_next(ctx->priv, o);
825         if (o) {
826             if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
827                 continue;
828             offset = o->offset;
829             shorthand = o->name;
830         }
831 
832         ret = av_opt_get_key_value(&args, "=", ":",
833                                    shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
834                                    &parsed_key, &value);
835         if (ret < 0) {
836             if (ret == AVERROR(EINVAL))
837                 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
838             else
839                 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
840                        av_err2str(ret));
841             return ret;
842         }
843         if (*args)
844             args++;
845         if (parsed_key) {
846             key = parsed_key;
847             while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
848         } else {
849             key = shorthand;
850         }
851 
852         av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
853 
854         if (av_opt_find(ctx, key, NULL, 0, 0)) {
855             ret = av_opt_set(ctx, key, value, 0);
856             if (ret < 0) {
857                 av_free(value);
858                 av_free(parsed_key);
859                 return ret;
860             }
861         } else {
862             av_dict_set(options, key, value, 0);
863             if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
864                 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
865                     if (ret == AVERROR_OPTION_NOT_FOUND)
866                         av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
867                     av_free(value);
868                     av_free(parsed_key);
869                     return ret;
870                 }
871             }
872         }
873 
874         av_free(value);
875         av_free(parsed_key);
876         count++;
877     }
878 
879     return count;
880 }
881 
ff_filter_process_command(AVFilterContext * ctx,const char * cmd,const char * arg,char * res,int res_len,int flags)882 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
883                               const char *arg, char *res, int res_len, int flags)
884 {
885     const AVOption *o;
886 
887     if (!ctx->filter->priv_class)
888         return 0;
889     o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
890     if (!o)
891         return AVERROR(ENOSYS);
892     return av_opt_set(ctx->priv, cmd, arg, 0);
893 }
894 
avfilter_init_dict(AVFilterContext * ctx,AVDictionary ** options)895 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
896 {
897     int ret = 0;
898 
899     ret = av_opt_set_dict(ctx, options);
900     if (ret < 0) {
901         av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
902         return ret;
903     }
904 
905     if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
906         ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
907         ctx->graph->internal->thread_execute) {
908         ctx->thread_type       = AVFILTER_THREAD_SLICE;
909         ctx->internal->execute = ctx->graph->internal->thread_execute;
910     } else {
911         ctx->thread_type = 0;
912     }
913 
914     if (ctx->filter->priv_class) {
915         ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
916         if (ret < 0) {
917             av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
918             return ret;
919         }
920     }
921 
922     if (ctx->filter->init_opaque)
923         ret = ctx->filter->init_opaque(ctx, NULL);
924     else if (ctx->filter->init)
925         ret = ctx->filter->init(ctx);
926     else if (ctx->filter->init_dict)
927         ret = ctx->filter->init_dict(ctx, options);
928 
929     if (ctx->enable_str) {
930         ret = set_enable_expr(ctx, ctx->enable_str);
931         if (ret < 0)
932             return ret;
933     }
934 
935     return ret;
936 }
937 
avfilter_init_str(AVFilterContext * filter,const char * args)938 int avfilter_init_str(AVFilterContext *filter, const char *args)
939 {
940     AVDictionary *options = NULL;
941     AVDictionaryEntry *e;
942     int ret = 0;
943 
944     if (args && *args) {
945         if (!filter->filter->priv_class) {
946             av_log(filter, AV_LOG_ERROR, "This filter does not take any "
947                    "options, but options were provided: %s.\n", args);
948             return AVERROR(EINVAL);
949         }
950 
951 #if FF_API_OLD_FILTER_OPTS_ERROR
952             if (   !strcmp(filter->filter->name, "format")     ||
953                    !strcmp(filter->filter->name, "noformat")   ||
954                    !strcmp(filter->filter->name, "frei0r")     ||
955                    !strcmp(filter->filter->name, "frei0r_src") ||
956                    !strcmp(filter->filter->name, "ocv")        ||
957                    !strcmp(filter->filter->name, "pan")        ||
958                    !strcmp(filter->filter->name, "pp")         ||
959                    !strcmp(filter->filter->name, "aevalsrc")) {
960             /* a hack for compatibility with the old syntax
961              * replace colons with |s */
962             char *copy = av_strdup(args);
963             char *p    = copy;
964             int nb_leading = 0; // number of leading colons to skip
965             int deprecated = 0;
966 
967             if (!copy) {
968                 ret = AVERROR(ENOMEM);
969                 goto fail;
970             }
971 
972             if (!strcmp(filter->filter->name, "frei0r") ||
973                 !strcmp(filter->filter->name, "ocv"))
974                 nb_leading = 1;
975             else if (!strcmp(filter->filter->name, "frei0r_src"))
976                 nb_leading = 3;
977 
978             while (nb_leading--) {
979                 p = strchr(p, ':');
980                 if (!p) {
981                     p = copy + strlen(copy);
982                     break;
983                 }
984                 p++;
985             }
986 
987             deprecated = strchr(p, ':') != NULL;
988 
989             if (!strcmp(filter->filter->name, "aevalsrc")) {
990                 deprecated = 0;
991                 while ((p = strchr(p, ':')) && p[1] != ':') {
992                     const char *epos = strchr(p + 1, '=');
993                     const char *spos = strchr(p + 1, ':');
994                     const int next_token_is_opt = epos && (!spos || epos < spos);
995                     if (next_token_is_opt) {
996                         p++;
997                         break;
998                     }
999                     /* next token does not contain a '=', assume a channel expression */
1000                     deprecated = 1;
1001                     *p++ = '|';
1002                 }
1003                 if (p && *p == ':') { // double sep '::' found
1004                     deprecated = 1;
1005                     memmove(p, p + 1, strlen(p));
1006                 }
1007             } else
1008             while ((p = strchr(p, ':')))
1009                 *p++ = '|';
1010 
1011             if (deprecated) {
1012                 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1013                        "'|' to separate the list items ('%s' instead of '%s')\n",
1014                        copy, args);
1015                 ret = AVERROR(EINVAL);
1016             } else {
1017                 ret = process_options(filter, &options, copy);
1018             }
1019             av_freep(&copy);
1020 
1021             if (ret < 0)
1022                 goto fail;
1023         } else
1024 #endif
1025         {
1026             ret = process_options(filter, &options, args);
1027             if (ret < 0)
1028                 goto fail;
1029         }
1030     }
1031 
1032     ret = avfilter_init_dict(filter, &options);
1033     if (ret < 0)
1034         goto fail;
1035 
1036     if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1037         av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1038         ret = AVERROR_OPTION_NOT_FOUND;
1039         goto fail;
1040     }
1041 
1042 fail:
1043     av_dict_free(&options);
1044 
1045     return ret;
1046 }
1047 
avfilter_pad_get_name(const AVFilterPad * pads,int pad_idx)1048 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1049 {
1050     return pads[pad_idx].name;
1051 }
1052 
avfilter_pad_get_type(const AVFilterPad * pads,int pad_idx)1053 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1054 {
1055     return pads[pad_idx].type;
1056 }
1057 
default_filter_frame(AVFilterLink * link,AVFrame * frame)1058 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1059 {
1060     return ff_filter_frame(link->dst->outputs[0], frame);
1061 }
1062 
ff_filter_frame_framed(AVFilterLink * link,AVFrame * frame)1063 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1064 {
1065     int (*filter_frame)(AVFilterLink *, AVFrame *);
1066     AVFilterContext *dstctx = link->dst;
1067     AVFilterPad *dst = link->dstpad;
1068     int ret;
1069 
1070     if (!(filter_frame = dst->filter_frame))
1071         filter_frame = default_filter_frame;
1072 
1073     if (dst->needs_writable) {
1074         ret = ff_inlink_make_frame_writable(link, &frame);
1075         if (ret < 0)
1076             goto fail;
1077     }
1078 
1079     ff_inlink_process_commands(link, frame);
1080     dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1081 
1082     if (dstctx->is_disabled &&
1083         (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1084         filter_frame = default_filter_frame;
1085     ret = filter_frame(link, frame);
1086     link->frame_count_out++;
1087     return ret;
1088 
1089 fail:
1090     av_frame_free(&frame);
1091     return ret;
1092 }
1093 
ff_filter_frame(AVFilterLink * link,AVFrame * frame)1094 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1095 {
1096     int ret;
1097     FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1098 
1099     /* Consistency checks */
1100     if (link->type == AVMEDIA_TYPE_VIDEO) {
1101         if (strcmp(link->dst->filter->name, "buffersink") &&
1102             strcmp(link->dst->filter->name, "format") &&
1103             strcmp(link->dst->filter->name, "idet") &&
1104             strcmp(link->dst->filter->name, "null") &&
1105             strcmp(link->dst->filter->name, "scale")) {
1106             av_assert1(frame->format                 == link->format);
1107             av_assert1(frame->width               == link->w);
1108             av_assert1(frame->height               == link->h);
1109         }
1110     } else {
1111         if (frame->format != link->format) {
1112             av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1113             goto error;
1114         }
1115         if (frame->channels != link->channels) {
1116             av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1117             goto error;
1118         }
1119         if (frame->channel_layout != link->channel_layout) {
1120             av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1121             goto error;
1122         }
1123         if (frame->sample_rate != link->sample_rate) {
1124             av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1125             goto error;
1126         }
1127     }
1128 
1129     link->frame_blocked_in = link->frame_wanted_out = 0;
1130     link->frame_count_in++;
1131     filter_unblock(link->dst);
1132     ret = ff_framequeue_add(&link->fifo, frame);
1133     if (ret < 0) {
1134         av_frame_free(&frame);
1135         return ret;
1136     }
1137     ff_filter_set_ready(link->dst, 300);
1138     return 0;
1139 
1140 error:
1141     av_frame_free(&frame);
1142     return AVERROR_PATCHWELCOME;
1143 }
1144 
samples_ready(AVFilterLink * link,unsigned min)1145 static int samples_ready(AVFilterLink *link, unsigned min)
1146 {
1147     return ff_framequeue_queued_frames(&link->fifo) &&
1148            (ff_framequeue_queued_samples(&link->fifo) >= min ||
1149             link->status_in);
1150 }
1151 
take_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1152 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1153                         AVFrame **rframe)
1154 {
1155     AVFrame *frame0, *frame, *buf;
1156     unsigned nb_samples, nb_frames, i, p;
1157     int ret;
1158 
1159     /* Note: this function relies on no format changes and must only be
1160        called with enough samples. */
1161     av_assert1(samples_ready(link, link->min_samples));
1162     frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1163     if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1164         *rframe = ff_framequeue_take(&link->fifo);
1165         return 0;
1166     }
1167     nb_frames = 0;
1168     nb_samples = 0;
1169     while (1) {
1170         if (nb_samples + frame->nb_samples > max) {
1171             if (nb_samples < min)
1172                 nb_samples = max;
1173             break;
1174         }
1175         nb_samples += frame->nb_samples;
1176         nb_frames++;
1177         if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1178             break;
1179         frame = ff_framequeue_peek(&link->fifo, nb_frames);
1180     }
1181 
1182     buf = ff_get_audio_buffer(link, nb_samples);
1183     if (!buf)
1184         return AVERROR(ENOMEM);
1185     ret = av_frame_copy_props(buf, frame0);
1186     if (ret < 0) {
1187         av_frame_free(&buf);
1188         return ret;
1189     }
1190     buf->pts = frame0->pts;
1191 
1192     p = 0;
1193     for (i = 0; i < nb_frames; i++) {
1194         frame = ff_framequeue_take(&link->fifo);
1195         av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1196                         frame->nb_samples, link->channels, link->format);
1197         p += frame->nb_samples;
1198         av_frame_free(&frame);
1199     }
1200     if (p < nb_samples) {
1201         unsigned n = nb_samples - p;
1202         frame = ff_framequeue_peek(&link->fifo, 0);
1203         av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1204                         link->channels, link->format);
1205         ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1206     }
1207 
1208     *rframe = buf;
1209     return 0;
1210 }
1211 
ff_filter_frame_to_filter(AVFilterLink * link)1212 static int ff_filter_frame_to_filter(AVFilterLink *link)
1213 {
1214     AVFrame *frame = NULL;
1215     AVFilterContext *dst = link->dst;
1216     int ret;
1217 
1218     av_assert1(ff_framequeue_queued_frames(&link->fifo));
1219     ret = link->min_samples ?
1220           ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1221           ff_inlink_consume_frame(link, &frame);
1222     av_assert1(ret);
1223     if (ret < 0) {
1224         av_assert1(!frame);
1225         return ret;
1226     }
1227     /* The filter will soon have received a new frame, that may allow it to
1228        produce one or more: unblock its outputs. */
1229     filter_unblock(dst);
1230     /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1231        before the frame; ff_filter_frame_framed() will re-increment it. */
1232     link->frame_count_out--;
1233     ret = ff_filter_frame_framed(link, frame);
1234     if (ret < 0 && ret != link->status_out) {
1235         ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1236     } else {
1237         /* Run once again, to see if several frames were available, or if
1238            the input status has also changed, or any other reason. */
1239         ff_filter_set_ready(dst, 300);
1240     }
1241     return ret;
1242 }
1243 
forward_status_change(AVFilterContext * filter,AVFilterLink * in)1244 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1245 {
1246     unsigned out = 0, progress = 0;
1247     int ret;
1248 
1249     av_assert0(!in->status_out);
1250     if (!filter->nb_outputs) {
1251         /* not necessary with the current API and sinks */
1252         return 0;
1253     }
1254     while (!in->status_out) {
1255         if (!filter->outputs[out]->status_in) {
1256             progress++;
1257             ret = ff_request_frame_to_filter(filter->outputs[out]);
1258             if (ret < 0)
1259                 return ret;
1260         }
1261         if (++out == filter->nb_outputs) {
1262             if (!progress) {
1263                 /* Every output already closed: input no longer interesting
1264                    (example: overlay in shortest mode, other input closed). */
1265                 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1266                 return 0;
1267             }
1268             progress = 0;
1269             out = 0;
1270         }
1271     }
1272     ff_filter_set_ready(filter, 200);
1273     return 0;
1274 }
1275 
ff_filter_activate_default(AVFilterContext * filter)1276 static int ff_filter_activate_default(AVFilterContext *filter)
1277 {
1278     unsigned i;
1279 
1280     for (i = 0; i < filter->nb_inputs; i++) {
1281         if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1282             return ff_filter_frame_to_filter(filter->inputs[i]);
1283         }
1284     }
1285     for (i = 0; i < filter->nb_inputs; i++) {
1286         if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1287             av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1288             return forward_status_change(filter, filter->inputs[i]);
1289         }
1290     }
1291     for (i = 0; i < filter->nb_outputs; i++) {
1292         if (filter->outputs[i]->frame_wanted_out &&
1293             !filter->outputs[i]->frame_blocked_in) {
1294             return ff_request_frame_to_filter(filter->outputs[i]);
1295         }
1296     }
1297     return FFERROR_NOT_READY;
1298 }
1299 
1300 /*
1301    Filter scheduling and activation
1302 
1303    When a filter is activated, it must:
1304    - if possible, output a frame;
1305    - else, if relevant, forward the input status change;
1306    - else, check outputs for wanted frames and forward the requests.
1307 
1308    The following AVFilterLink fields are used for activation:
1309 
1310    - frame_wanted_out:
1311 
1312      This field indicates if a frame is needed on this input of the
1313      destination filter. A positive value indicates that a frame is needed
1314      to process queued frames or internal data or to satisfy the
1315      application; a zero value indicates that a frame is not especially
1316      needed but could be processed anyway; a negative value indicates that a
1317      frame would just be queued.
1318 
1319      It is set by filters using ff_request_frame() or ff_request_no_frame(),
1320      when requested by the application through a specific API or when it is
1321      set on one of the outputs.
1322 
1323      It is cleared when a frame is sent from the source using
1324      ff_filter_frame().
1325 
1326      It is also cleared when a status change is sent from the source using
1327      ff_avfilter_link_set_in_status().
1328 
1329    - frame_blocked_in:
1330 
1331      This field means that the source filter can not generate a frame as is.
1332      Its goal is to avoid repeatedly calling the request_frame() method on
1333      the same link.
1334 
1335      It is set by the framework on all outputs of a filter before activating it.
1336 
1337      It is automatically cleared by ff_filter_frame().
1338 
1339      It is also automatically cleared by ff_avfilter_link_set_in_status().
1340 
1341      It is also cleared on all outputs (using filter_unblock()) when
1342      something happens on an input: processing a frame or changing the
1343      status.
1344 
1345    - fifo:
1346 
1347      Contains the frames queued on a filter input. If it contains frames and
1348      frame_wanted_out is not set, then the filter can be activated. If that
1349      result in the filter not able to use these frames, the filter must set
1350      frame_wanted_out to ask for more frames.
1351 
1352    - status_in and status_in_pts:
1353 
1354      Status (EOF or error code) of the link and timestamp of the status
1355      change (in link time base, same as frames) as seen from the input of
1356      the link. The status change is considered happening after the frames
1357      queued in fifo.
1358 
1359      It is set by the source filter using ff_avfilter_link_set_in_status().
1360 
1361    - status_out:
1362 
1363      Status of the link as seen from the output of the link. The status
1364      change is considered having already happened.
1365 
1366      It is set by the destination filter using
1367      ff_avfilter_link_set_out_status().
1368 
1369    Filters are activated according to the ready field, set using the
1370    ff_filter_set_ready(). Eventually, a priority queue will be used.
1371    ff_filter_set_ready() is called whenever anything could cause progress to
1372    be possible. Marking a filter ready when it is not is not a problem,
1373    except for the small overhead it causes.
1374 
1375    Conditions that cause a filter to be marked ready are:
1376 
1377    - frames added on an input link;
1378 
1379    - changes in the input or output status of an input link;
1380 
1381    - requests for a frame on an output link;
1382 
1383    - after any actual processing using the legacy methods (filter_frame(),
1384      and request_frame() to acknowledge status changes), to run once more
1385      and check if enough input was present for several frames.
1386 
1387    Examples of scenarios to consider:
1388 
1389    - buffersrc: activate if frame_wanted_out to notify the application;
1390      activate when the application adds a frame to push it immediately.
1391 
1392    - testsrc: activate only if frame_wanted_out to produce and push a frame.
1393 
1394    - concat (not at stitch points): can process a frame on any output.
1395      Activate if frame_wanted_out on output to forward on the corresponding
1396      input. Activate when a frame is present on input to process it
1397      immediately.
1398 
1399    - framesync: needs at least one frame on each input; extra frames on the
1400      wrong input will accumulate. When a frame is first added on one input,
1401      set frame_wanted_out<0 on it to avoid getting more (would trigger
1402      testsrc) and frame_wanted_out>0 on the other to allow processing it.
1403 
1404    Activation of old filters:
1405 
1406    In order to activate a filter implementing the legacy filter_frame() and
1407    request_frame() methods, perform the first possible of the following
1408    actions:
1409 
1410    - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1411      frame and call filter_frame().
1412 
1413      Rationale: filter frames as soon as possible instead of leaving them
1414      queued; frame_wanted_out < 0 is not possible since the old API does not
1415      set it nor provides any similar feedback; frame_wanted_out > 0 happens
1416      when min_samples > 0 and there are not enough samples queued.
1417 
1418    - If an input has status_in set but not status_out, try to call
1419      request_frame() on one of the outputs in the hope that it will trigger
1420      request_frame() on the input with status_in and acknowledge it. This is
1421      awkward and fragile, filters with several inputs or outputs should be
1422      updated to direct activation as soon as possible.
1423 
1424    - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1425      request_frame().
1426 
1427      Rationale: checking frame_blocked_in is necessary to avoid requesting
1428      repeatedly on a blocked input if another is not blocked (example:
1429      [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1430  */
1431 
ff_filter_activate(AVFilterContext * filter)1432 int ff_filter_activate(AVFilterContext *filter)
1433 {
1434     int ret;
1435 
1436     /* Generic timeline support is not yet implemented but should be easy */
1437     av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1438                  filter->filter->activate));
1439     filter->ready = 0;
1440     ret = filter->filter->activate ? filter->filter->activate(filter) :
1441           ff_filter_activate_default(filter);
1442     if (ret == FFERROR_NOT_READY)
1443         ret = 0;
1444     return ret;
1445 }
1446 
ff_inlink_acknowledge_status(AVFilterLink * link,int * rstatus,int64_t * rpts)1447 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1448 {
1449     *rpts = link->current_pts;
1450     if (ff_framequeue_queued_frames(&link->fifo))
1451         return *rstatus = 0;
1452     if (link->status_out)
1453         return *rstatus = link->status_out;
1454     if (!link->status_in)
1455         return *rstatus = 0;
1456     *rstatus = link->status_out = link->status_in;
1457     ff_update_link_current_pts(link, link->status_in_pts);
1458     *rpts = link->current_pts;
1459     return 1;
1460 }
1461 
ff_inlink_queued_frames(AVFilterLink * link)1462 size_t ff_inlink_queued_frames(AVFilterLink *link)
1463 {
1464     return ff_framequeue_queued_frames(&link->fifo);
1465 }
1466 
ff_inlink_check_available_frame(AVFilterLink * link)1467 int ff_inlink_check_available_frame(AVFilterLink *link)
1468 {
1469     return ff_framequeue_queued_frames(&link->fifo) > 0;
1470 }
1471 
ff_inlink_queued_samples(AVFilterLink * link)1472 int ff_inlink_queued_samples(AVFilterLink *link)
1473 {
1474     return ff_framequeue_queued_samples(&link->fifo);
1475 }
1476 
ff_inlink_check_available_samples(AVFilterLink * link,unsigned min)1477 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1478 {
1479     uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1480     av_assert1(min);
1481     return samples >= min || (link->status_in && samples);
1482 }
1483 
consume_update(AVFilterLink * link,const AVFrame * frame)1484 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1485 {
1486     ff_update_link_current_pts(link, frame->pts);
1487     ff_inlink_process_commands(link, frame);
1488     link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1489     link->frame_count_out++;
1490 }
1491 
ff_inlink_consume_frame(AVFilterLink * link,AVFrame ** rframe)1492 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1493 {
1494     AVFrame *frame;
1495 
1496     *rframe = NULL;
1497     if (!ff_inlink_check_available_frame(link))
1498         return 0;
1499 
1500     if (link->fifo.samples_skipped) {
1501         frame = ff_framequeue_peek(&link->fifo, 0);
1502         return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1503     }
1504 
1505     frame = ff_framequeue_take(&link->fifo);
1506     consume_update(link, frame);
1507     *rframe = frame;
1508     return 1;
1509 }
1510 
ff_inlink_consume_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1511 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1512                             AVFrame **rframe)
1513 {
1514     AVFrame *frame;
1515     int ret;
1516 
1517     av_assert1(min);
1518     *rframe = NULL;
1519     if (!ff_inlink_check_available_samples(link, min))
1520         return 0;
1521     if (link->status_in)
1522         min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1523     ret = take_samples(link, min, max, &frame);
1524     if (ret < 0)
1525         return ret;
1526     consume_update(link, frame);
1527     *rframe = frame;
1528     return 1;
1529 }
1530 
ff_inlink_peek_frame(AVFilterLink * link,size_t idx)1531 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1532 {
1533     return ff_framequeue_peek(&link->fifo, idx);
1534 }
1535 
ff_inlink_make_frame_writable(AVFilterLink * link,AVFrame ** rframe)1536 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1537 {
1538     AVFrame *frame = *rframe;
1539     AVFrame *out;
1540     int ret;
1541 
1542     if (av_frame_is_writable(frame))
1543         return 0;
1544     av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1545 
1546     switch (link->type) {
1547     case AVMEDIA_TYPE_VIDEO:
1548         out = ff_get_video_buffer(link, link->w, link->h);
1549         break;
1550     case AVMEDIA_TYPE_AUDIO:
1551         out = ff_get_audio_buffer(link, frame->nb_samples);
1552         break;
1553     default:
1554         return AVERROR(EINVAL);
1555     }
1556     if (!out)
1557         return AVERROR(ENOMEM);
1558 
1559     ret = av_frame_copy_props(out, frame);
1560     if (ret < 0) {
1561         av_frame_free(&out);
1562         return ret;
1563     }
1564 
1565     switch (link->type) {
1566     case AVMEDIA_TYPE_VIDEO:
1567         av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1568                       frame->format, frame->width, frame->height);
1569         break;
1570     case AVMEDIA_TYPE_AUDIO:
1571         av_samples_copy(out->extended_data, frame->extended_data,
1572                         0, 0, frame->nb_samples,
1573                         frame->channels,
1574                         frame->format);
1575         break;
1576     default:
1577         av_assert0(!"reached");
1578     }
1579 
1580     av_frame_free(&frame);
1581     *rframe = out;
1582     return 0;
1583 }
1584 
ff_inlink_process_commands(AVFilterLink * link,const AVFrame * frame)1585 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1586 {
1587     AVFilterCommand *cmd = link->dst->command_queue;
1588 
1589     while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1590         av_log(link->dst, AV_LOG_DEBUG,
1591                "Processing command time:%f command:%s arg:%s\n",
1592                cmd->time, cmd->command, cmd->arg);
1593         avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1594         ff_command_queue_pop(link->dst);
1595         cmd= link->dst->command_queue;
1596     }
1597     return 0;
1598 }
1599 
ff_inlink_evaluate_timeline_at_frame(AVFilterLink * link,const AVFrame * frame)1600 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1601 {
1602     AVFilterContext *dstctx = link->dst;
1603     int64_t pts = frame->pts;
1604     int64_t pos = frame->pkt_pos;
1605 
1606     if (!dstctx->enable_str)
1607         return 1;
1608 
1609     dstctx->var_values[VAR_N] = link->frame_count_out;
1610     dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1611     dstctx->var_values[VAR_W] = link->w;
1612     dstctx->var_values[VAR_H] = link->h;
1613     dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1614 
1615     return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1616 }
1617 
ff_inlink_request_frame(AVFilterLink * link)1618 void ff_inlink_request_frame(AVFilterLink *link)
1619 {
1620     av_assert1(!link->status_in);
1621     av_assert1(!link->status_out);
1622     link->frame_wanted_out = 1;
1623     ff_filter_set_ready(link->src, 100);
1624 }
1625 
ff_inlink_set_status(AVFilterLink * link,int status)1626 void ff_inlink_set_status(AVFilterLink *link, int status)
1627 {
1628     if (link->status_out)
1629         return;
1630     link->frame_wanted_out = 0;
1631     link->frame_blocked_in = 0;
1632     ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1633     while (ff_framequeue_queued_frames(&link->fifo)) {
1634            AVFrame *frame = ff_framequeue_take(&link->fifo);
1635            av_frame_free(&frame);
1636     }
1637     if (!link->status_in)
1638         link->status_in = status;
1639 }
1640 
ff_outlink_get_status(AVFilterLink * link)1641 int ff_outlink_get_status(AVFilterLink *link)
1642 {
1643     return link->status_in;
1644 }
1645 
avfilter_get_class(void)1646 const AVClass *avfilter_get_class(void)
1647 {
1648     return &avfilter_class;
1649 }
1650 
ff_filter_init_hw_frames(AVFilterContext * avctx,AVFilterLink * link,int default_pool_size)1651 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1652                              int default_pool_size)
1653 {
1654     AVHWFramesContext *frames;
1655 
1656     // Must already be set by caller.
1657     av_assert0(link->hw_frames_ctx);
1658 
1659     frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1660 
1661     if (frames->initial_pool_size == 0) {
1662         // Dynamic allocation is necessarily supported.
1663     } else if (avctx->extra_hw_frames >= 0) {
1664         frames->initial_pool_size += avctx->extra_hw_frames;
1665     } else {
1666         frames->initial_pool_size = default_pool_size;
1667     }
1668 
1669     return 0;
1670 }
1671