• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * filter layer
3  * Copyright (c) 2007 Bobby Bingham
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/frame.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 
36 #define FF_INTERNAL_FIELDS 1
37 #include "framequeue.h"
38 
39 #include "audio.h"
40 #include "avfilter.h"
41 #include "filters.h"
42 #include "formats.h"
43 #include "framepool.h"
44 #include "internal.h"
45 
tlog_ref(void * ctx,AVFrame * ref,int end)46 static void tlog_ref(void *ctx, AVFrame *ref, int end)
47 {
48     ff_tlog(ctx,
49             "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
50             ref, ref->buf, ref->data[0],
51             ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
52             ref->pts, ref->pkt_pos);
53 
54     if (ref->width) {
55         ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
56                 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
57                 ref->width, ref->height,
58                 !ref->interlaced_frame     ? 'P' :         /* Progressive  */
59                 ref->top_field_first ? 'T' : 'B',    /* Top / Bottom */
60                 ref->key_frame,
61                 av_get_picture_type_char(ref->pict_type));
62     }
63     if (ref->nb_samples) {
64         ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
65                 ref->channel_layout,
66                 ref->nb_samples,
67                 ref->sample_rate);
68     }
69 
70     ff_tlog(ctx, "]%s", end ? "\n" : "");
71 }
72 
ff_command_queue_pop(AVFilterContext * filter)73 void ff_command_queue_pop(AVFilterContext *filter)
74 {
75     AVFilterCommand *c= filter->command_queue;
76     av_freep(&c->arg);
77     av_freep(&c->command);
78     filter->command_queue= c->next;
79     av_free(c);
80 }
81 
82 /**
83  * Append a new pad.
84  *
85  * @param count  Pointer to the number of pads in the list
86  * @param pads   Pointer to the pointer to the beginning of the list of pads
87  * @param links  Pointer to the pointer to the beginning of the list of links
88  * @param newpad The new pad to add. A copy is made when adding.
89  * @return >= 0 in case of success, a negative AVERROR code on error
90  */
append_pad(unsigned * count,AVFilterPad ** pads,AVFilterLink *** links,AVFilterPad * newpad)91 static int append_pad(unsigned *count, AVFilterPad **pads,
92                       AVFilterLink ***links, AVFilterPad *newpad)
93 {
94     AVFilterLink **newlinks;
95     AVFilterPad *newpads;
96     unsigned idx = *count;
97 
98     newpads  = av_realloc_array(*pads,  idx + 1, sizeof(*newpads));
99     newlinks = av_realloc_array(*links, idx + 1, sizeof(*newlinks));
100     if (newpads)
101         *pads  = newpads;
102     if (newlinks)
103         *links = newlinks;
104     if (!newpads || !newlinks) {
105         if (newpad->flags & AVFILTERPAD_FLAG_FREE_NAME)
106             av_freep(&newpad->name);
107         return AVERROR(ENOMEM);
108     }
109 
110     memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
111     (*links)[idx] = NULL;
112 
113     (*count)++;
114 
115     return 0;
116 }
117 
ff_append_inpad(AVFilterContext * f,AVFilterPad * p)118 int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
119 {
120     return append_pad(&f->nb_inputs, &f->input_pads, &f->inputs, p);
121 }
122 
ff_append_inpad_free_name(AVFilterContext * f,AVFilterPad * p)123 int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
124 {
125     p->flags |= AVFILTERPAD_FLAG_FREE_NAME;
126     return ff_append_inpad(f, p);
127 }
128 
ff_append_outpad(AVFilterContext * f,AVFilterPad * p)129 int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
130 {
131     return append_pad(&f->nb_outputs, &f->output_pads, &f->outputs, p);
132 }
133 
ff_append_outpad_free_name(AVFilterContext * f,AVFilterPad * p)134 int ff_append_outpad_free_name(AVFilterContext *f, AVFilterPad *p)
135 {
136     p->flags |= AVFILTERPAD_FLAG_FREE_NAME;
137     return ff_append_outpad(f, p);
138 }
139 
avfilter_link(AVFilterContext * src,unsigned srcpad,AVFilterContext * dst,unsigned dstpad)140 int avfilter_link(AVFilterContext *src, unsigned srcpad,
141                   AVFilterContext *dst, unsigned dstpad)
142 {
143     AVFilterLink *link;
144 
145     av_assert0(src->graph);
146     av_assert0(dst->graph);
147     av_assert0(src->graph == dst->graph);
148 
149     if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
150         src->outputs[srcpad]      || dst->inputs[dstpad])
151         return AVERROR(EINVAL);
152 
153     if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
154         av_log(src, AV_LOG_ERROR,
155                "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
156                src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
157                dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
158         return AVERROR(EINVAL);
159     }
160 
161     link = av_mallocz(sizeof(*link));
162     if (!link)
163         return AVERROR(ENOMEM);
164 
165     src->outputs[srcpad] = dst->inputs[dstpad] = link;
166 
167     link->src     = src;
168     link->dst     = dst;
169     link->srcpad  = &src->output_pads[srcpad];
170     link->dstpad  = &dst->input_pads[dstpad];
171     link->type    = src->output_pads[srcpad].type;
172     av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
173     link->format  = -1;
174     ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
175 
176     return 0;
177 }
178 
avfilter_link_free(AVFilterLink ** link)179 void avfilter_link_free(AVFilterLink **link)
180 {
181     if (!*link)
182         return;
183 
184     ff_framequeue_free(&(*link)->fifo);
185     ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186     av_channel_layout_uninit(&(*link)->ch_layout);
187 
188     av_freep(link);
189 }
190 
ff_filter_set_ready(AVFilterContext * filter,unsigned priority)191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
192 {
193     filter->ready = FFMAX(filter->ready, priority);
194 }
195 
196 /**
197  * Clear frame_blocked_in on all outputs.
198  * This is necessary whenever something changes on input.
199  */
filter_unblock(AVFilterContext * filter)200 static void filter_unblock(AVFilterContext *filter)
201 {
202     unsigned i;
203 
204     for (i = 0; i < filter->nb_outputs; i++)
205         filter->outputs[i]->frame_blocked_in = 0;
206 }
207 
208 
ff_avfilter_link_set_in_status(AVFilterLink * link,int status,int64_t pts)209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
210 {
211     if (link->status_in == status)
212         return;
213     av_assert0(!link->status_in);
214     link->status_in = status;
215     link->status_in_pts = pts;
216     link->frame_wanted_out = 0;
217     link->frame_blocked_in = 0;
218     filter_unblock(link->dst);
219     ff_filter_set_ready(link->dst, 200);
220 }
221 
ff_avfilter_link_set_out_status(AVFilterLink * link,int status,int64_t pts)222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
223 {
224     av_assert0(!link->frame_wanted_out);
225     av_assert0(!link->status_out);
226     link->status_out = status;
227     if (pts != AV_NOPTS_VALUE)
228         ff_update_link_current_pts(link, pts);
229     filter_unblock(link->dst);
230     ff_filter_set_ready(link->src, 200);
231 }
232 
avfilter_insert_filter(AVFilterLink * link,AVFilterContext * filt,unsigned filt_srcpad_idx,unsigned filt_dstpad_idx)233 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
234                            unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
235 {
236     int ret;
237     unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
238 
239     av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
240            "between the filter '%s' and the filter '%s'\n",
241            filt->name, link->src->name, link->dst->name);
242 
243     link->dst->inputs[dstpad_idx] = NULL;
244     if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
245         /* failed to link output filter to new filter */
246         link->dst->inputs[dstpad_idx] = link;
247         return ret;
248     }
249 
250     /* re-hookup the link to the new destination filter we inserted */
251     link->dst                     = filt;
252     link->dstpad                  = &filt->input_pads[filt_srcpad_idx];
253     filt->inputs[filt_srcpad_idx] = link;
254 
255     /* if any information on supported media formats already exists on the
256      * link, we need to preserve that */
257     if (link->outcfg.formats)
258         ff_formats_changeref(&link->outcfg.formats,
259                              &filt->outputs[filt_dstpad_idx]->outcfg.formats);
260     if (link->outcfg.samplerates)
261         ff_formats_changeref(&link->outcfg.samplerates,
262                              &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
263     if (link->outcfg.channel_layouts)
264         ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
265                                      &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
266 
267     return 0;
268 }
269 
avfilter_config_links(AVFilterContext * filter)270 int avfilter_config_links(AVFilterContext *filter)
271 {
272     int (*config_link)(AVFilterLink *);
273     unsigned i;
274     int ret;
275 
276     for (i = 0; i < filter->nb_inputs; i ++) {
277         AVFilterLink *link = filter->inputs[i];
278         AVFilterLink *inlink;
279 
280         if (!link) continue;
281         if (!link->src || !link->dst) {
282             av_log(filter, AV_LOG_ERROR,
283                    "Not all input and output are properly linked (%d).\n", i);
284             return AVERROR(EINVAL);
285         }
286 
287         inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
288         link->current_pts =
289         link->current_pts_us = AV_NOPTS_VALUE;
290 
291         switch (link->init_state) {
292         case AVLINK_INIT:
293             continue;
294         case AVLINK_STARTINIT:
295             av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
296             return 0;
297         case AVLINK_UNINIT:
298             link->init_state = AVLINK_STARTINIT;
299 
300             if ((ret = avfilter_config_links(link->src)) < 0)
301                 return ret;
302 
303             if (!(config_link = link->srcpad->config_props)) {
304                 if (link->src->nb_inputs != 1) {
305                     av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
306                                                     "with more than one input "
307                                                     "must set config_props() "
308                                                     "callbacks on all outputs\n");
309                     return AVERROR(EINVAL);
310                 }
311             } else if ((ret = config_link(link)) < 0) {
312                 av_log(link->src, AV_LOG_ERROR,
313                        "Failed to configure output pad on %s\n",
314                        link->src->name);
315                 return ret;
316             }
317 
318             switch (link->type) {
319             case AVMEDIA_TYPE_VIDEO:
320                 if (!link->time_base.num && !link->time_base.den)
321                     link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
322 
323                 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
324                     link->sample_aspect_ratio = inlink ?
325                         inlink->sample_aspect_ratio : (AVRational){1,1};
326 
327                 if (inlink) {
328                     if (!link->frame_rate.num && !link->frame_rate.den)
329                         link->frame_rate = inlink->frame_rate;
330                     if (!link->w)
331                         link->w = inlink->w;
332                     if (!link->h)
333                         link->h = inlink->h;
334                 } else if (!link->w || !link->h) {
335                     av_log(link->src, AV_LOG_ERROR,
336                            "Video source filters must set their output link's "
337                            "width and height\n");
338                     return AVERROR(EINVAL);
339                 }
340                 break;
341 
342             case AVMEDIA_TYPE_AUDIO:
343                 if (inlink) {
344                     if (!link->time_base.num && !link->time_base.den)
345                         link->time_base = inlink->time_base;
346                 }
347 
348                 if (!link->time_base.num && !link->time_base.den)
349                     link->time_base = (AVRational) {1, link->sample_rate};
350             }
351 
352             if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
353                 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
354                 av_assert0(!link->hw_frames_ctx &&
355                            "should not be set by non-hwframe-aware filter");
356                 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
357                 if (!link->hw_frames_ctx)
358                     return AVERROR(ENOMEM);
359             }
360 
361             if ((config_link = link->dstpad->config_props))
362                 if ((ret = config_link(link)) < 0) {
363                     av_log(link->dst, AV_LOG_ERROR,
364                            "Failed to configure input pad on %s\n",
365                            link->dst->name);
366                     return ret;
367                 }
368 
369             link->init_state = AVLINK_INIT;
370         }
371     }
372 
373     return 0;
374 }
375 
ff_tlog_link(void * ctx,AVFilterLink * link,int end)376 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
377 {
378     if (link->type == AVMEDIA_TYPE_VIDEO) {
379         ff_tlog(ctx,
380                 "link[%p s:%dx%d fmt:%s %s->%s]%s",
381                 link, link->w, link->h,
382                 av_get_pix_fmt_name(link->format),
383                 link->src ? link->src->filter->name : "",
384                 link->dst ? link->dst->filter->name : "",
385                 end ? "\n" : "");
386     } else {
387         char buf[128];
388         av_channel_layout_describe(&link->ch_layout, buf, sizeof(buf));
389 
390         ff_tlog(ctx,
391                 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
392                 link, (int)link->sample_rate, buf,
393                 av_get_sample_fmt_name(link->format),
394                 link->src ? link->src->filter->name : "",
395                 link->dst ? link->dst->filter->name : "",
396                 end ? "\n" : "");
397     }
398 }
399 
ff_request_frame(AVFilterLink * link)400 int ff_request_frame(AVFilterLink *link)
401 {
402     FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
403 
404     av_assert1(!link->dst->filter->activate);
405     if (link->status_out)
406         return link->status_out;
407     if (link->status_in) {
408         if (ff_framequeue_queued_frames(&link->fifo)) {
409             av_assert1(!link->frame_wanted_out);
410             av_assert1(link->dst->ready >= 300);
411             return 0;
412         } else {
413             /* Acknowledge status change. Filters using ff_request_frame() will
414                handle the change automatically. Filters can also check the
415                status directly but none do yet. */
416             ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
417             return link->status_out;
418         }
419     }
420     link->frame_wanted_out = 1;
421     ff_filter_set_ready(link->src, 100);
422     return 0;
423 }
424 
guess_status_pts(AVFilterContext * ctx,int status,AVRational link_time_base)425 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
426 {
427     unsigned i;
428     int64_t r = INT64_MAX;
429 
430     for (i = 0; i < ctx->nb_inputs; i++)
431         if (ctx->inputs[i]->status_out == status)
432             r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
433     if (r < INT64_MAX)
434         return r;
435     av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
436     for (i = 0; i < ctx->nb_inputs; i++)
437         r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
438     if (r < INT64_MAX)
439         return r;
440     return AV_NOPTS_VALUE;
441 }
442 
ff_request_frame_to_filter(AVFilterLink * link)443 static int ff_request_frame_to_filter(AVFilterLink *link)
444 {
445     int ret = -1;
446 
447     FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
448     /* Assume the filter is blocked, let the method clear it if not */
449     link->frame_blocked_in = 1;
450     if (link->srcpad->request_frame)
451         ret = link->srcpad->request_frame(link);
452     else if (link->src->inputs[0])
453         ret = ff_request_frame(link->src->inputs[0]);
454     if (ret < 0) {
455         if (ret != AVERROR(EAGAIN) && ret != link->status_in)
456             ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
457         if (ret == AVERROR_EOF)
458             ret = 0;
459     }
460     return ret;
461 }
462 
463 static const char *const var_names[] = {
464     "t",
465     "n",
466     "pos",
467     "w",
468     "h",
469     NULL
470 };
471 
472 enum {
473     VAR_T,
474     VAR_N,
475     VAR_POS,
476     VAR_W,
477     VAR_H,
478     VAR_VARS_NB
479 };
480 
set_enable_expr(AVFilterContext * ctx,const char * expr)481 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
482 {
483     int ret;
484     char *expr_dup;
485     AVExpr *old = ctx->enable;
486 
487     if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
488         av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
489                "with filter '%s'\n", ctx->filter->name);
490         return AVERROR_PATCHWELCOME;
491     }
492 
493     expr_dup = av_strdup(expr);
494     if (!expr_dup)
495         return AVERROR(ENOMEM);
496 
497     if (!ctx->var_values) {
498         ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
499         if (!ctx->var_values) {
500             av_free(expr_dup);
501             return AVERROR(ENOMEM);
502         }
503     }
504 
505     ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
506                         NULL, NULL, NULL, NULL, 0, ctx->priv);
507     if (ret < 0) {
508         av_log(ctx->priv, AV_LOG_ERROR,
509                "Error when evaluating the expression '%s' for enable\n",
510                expr_dup);
511         av_free(expr_dup);
512         return ret;
513     }
514 
515     av_expr_free(old);
516     av_free(ctx->enable_str);
517     ctx->enable_str = expr_dup;
518     return 0;
519 }
520 
ff_update_link_current_pts(AVFilterLink * link,int64_t pts)521 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
522 {
523     if (pts == AV_NOPTS_VALUE)
524         return;
525     link->current_pts = pts;
526     link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
527     /* TODO use duration */
528     if (link->graph && link->age_index >= 0)
529         ff_avfilter_graph_update_heap(link->graph, link);
530 }
531 
avfilter_process_command(AVFilterContext * filter,const char * cmd,const char * arg,char * res,int res_len,int flags)532 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
533 {
534     if(!strcmp(cmd, "ping")){
535         char local_res[256] = {0};
536 
537         if (!res) {
538             res = local_res;
539             res_len = sizeof(local_res);
540         }
541         av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
542         if (res == local_res)
543             av_log(filter, AV_LOG_INFO, "%s", res);
544         return 0;
545     }else if(!strcmp(cmd, "enable")) {
546         return set_enable_expr(filter, arg);
547     }else if(filter->filter->process_command) {
548         return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
549     }
550     return AVERROR(ENOSYS);
551 }
552 
553 #if FF_API_PAD_COUNT
avfilter_pad_count(const AVFilterPad * pads)554 int avfilter_pad_count(const AVFilterPad *pads)
555 {
556     const AVFilter *filter;
557     void *opaque = NULL;
558 
559     if (!pads)
560         return 0;
561 
562     while (filter = av_filter_iterate(&opaque)) {
563         if (pads == filter->inputs)
564             return filter->nb_inputs;
565         if (pads == filter->outputs)
566             return filter->nb_outputs;
567     }
568 
569     av_assert0(!"AVFilterPad list not from a filter");
570     return AVERROR_BUG;
571 }
572 #endif
573 
avfilter_filter_pad_count(const AVFilter * filter,int is_output)574 unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
575 {
576     return is_output ? filter->nb_outputs : filter->nb_inputs;
577 }
578 
default_filter_name(void * filter_ctx)579 static const char *default_filter_name(void *filter_ctx)
580 {
581     AVFilterContext *ctx = filter_ctx;
582     return ctx->name ? ctx->name : ctx->filter->name;
583 }
584 
filter_child_next(void * obj,void * prev)585 static void *filter_child_next(void *obj, void *prev)
586 {
587     AVFilterContext *ctx = obj;
588     if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
589         return ctx->priv;
590     return NULL;
591 }
592 
filter_child_class_iterate(void ** iter)593 static const AVClass *filter_child_class_iterate(void **iter)
594 {
595     const AVFilter *f;
596 
597     while ((f = av_filter_iterate(iter)))
598         if (f->priv_class)
599             return f->priv_class;
600 
601     return NULL;
602 }
603 
604 #define OFFSET(x) offsetof(AVFilterContext, x)
605 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
606 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
607 static const AVOption avfilter_options[] = {
608     { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
609         { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
610         { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
611     { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
612     { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
613         { .i64 = 0 }, 0, INT_MAX, FLAGS },
614     { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
615         OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
616     { NULL },
617 };
618 
619 static const AVClass avfilter_class = {
620     .class_name = "AVFilter",
621     .item_name  = default_filter_name,
622     .version    = LIBAVUTIL_VERSION_INT,
623     .category   = AV_CLASS_CATEGORY_FILTER,
624     .child_next = filter_child_next,
625     .child_class_iterate = filter_child_class_iterate,
626     .option           = avfilter_options,
627 };
628 
default_execute(AVFilterContext * ctx,avfilter_action_func * func,void * arg,int * ret,int nb_jobs)629 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
630                            int *ret, int nb_jobs)
631 {
632     int i;
633 
634     for (i = 0; i < nb_jobs; i++) {
635         int r = func(ctx, arg, i, nb_jobs);
636         if (ret)
637             ret[i] = r;
638     }
639     return 0;
640 }
641 
ff_filter_alloc(const AVFilter * filter,const char * inst_name)642 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
643 {
644     AVFilterContext *ret;
645     int preinited = 0;
646 
647     if (!filter)
648         return NULL;
649 
650     ret = av_mallocz(sizeof(AVFilterContext));
651     if (!ret)
652         return NULL;
653 
654     ret->av_class = &avfilter_class;
655     ret->filter   = filter;
656     ret->name     = inst_name ? av_strdup(inst_name) : NULL;
657     if (filter->priv_size) {
658         ret->priv     = av_mallocz(filter->priv_size);
659         if (!ret->priv)
660             goto err;
661     }
662     if (filter->preinit) {
663         if (filter->preinit(ret) < 0)
664             goto err;
665         preinited = 1;
666     }
667 
668     av_opt_set_defaults(ret);
669     if (filter->priv_class) {
670         *(const AVClass**)ret->priv = filter->priv_class;
671         av_opt_set_defaults(ret->priv);
672     }
673 
674     ret->internal = av_mallocz(sizeof(*ret->internal));
675     if (!ret->internal)
676         goto err;
677     ret->internal->execute = default_execute;
678 
679     ret->nb_inputs  = filter->nb_inputs;
680     if (ret->nb_inputs ) {
681         ret->input_pads   = av_memdup(filter->inputs,  ret->nb_inputs  * sizeof(*filter->inputs));
682         if (!ret->input_pads)
683             goto err;
684         ret->inputs      = av_calloc(ret->nb_inputs, sizeof(*ret->inputs));
685         if (!ret->inputs)
686             goto err;
687     }
688 
689     ret->nb_outputs = filter->nb_outputs;
690     if (ret->nb_outputs) {
691         ret->output_pads  = av_memdup(filter->outputs, ret->nb_outputs * sizeof(*filter->outputs));
692         if (!ret->output_pads)
693             goto err;
694         ret->outputs     = av_calloc(ret->nb_outputs, sizeof(*ret->outputs));
695         if (!ret->outputs)
696             goto err;
697     }
698 
699     return ret;
700 
701 err:
702     if (preinited)
703         filter->uninit(ret);
704     av_freep(&ret->inputs);
705     av_freep(&ret->input_pads);
706     ret->nb_inputs = 0;
707     av_freep(&ret->outputs);
708     av_freep(&ret->output_pads);
709     ret->nb_outputs = 0;
710     av_freep(&ret->priv);
711     av_freep(&ret->internal);
712     av_free(ret);
713     return NULL;
714 }
715 
free_link(AVFilterLink * link)716 static void free_link(AVFilterLink *link)
717 {
718     if (!link)
719         return;
720 
721     if (link->src)
722         link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
723     if (link->dst)
724         link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
725 
726     av_buffer_unref(&link->hw_frames_ctx);
727 
728     ff_formats_unref(&link->incfg.formats);
729     ff_formats_unref(&link->outcfg.formats);
730     ff_formats_unref(&link->incfg.samplerates);
731     ff_formats_unref(&link->outcfg.samplerates);
732     ff_channel_layouts_unref(&link->incfg.channel_layouts);
733     ff_channel_layouts_unref(&link->outcfg.channel_layouts);
734     avfilter_link_free(&link);
735 }
736 
avfilter_free(AVFilterContext * filter)737 void avfilter_free(AVFilterContext *filter)
738 {
739     int i;
740 
741     if (!filter)
742         return;
743 
744     if (filter->graph)
745         ff_filter_graph_remove_filter(filter->graph, filter);
746 
747     if (filter->filter->uninit)
748         filter->filter->uninit(filter);
749 
750     for (i = 0; i < filter->nb_inputs; i++) {
751         free_link(filter->inputs[i]);
752         if (filter->input_pads[i].flags  & AVFILTERPAD_FLAG_FREE_NAME)
753             av_freep(&filter->input_pads[i].name);
754     }
755     for (i = 0; i < filter->nb_outputs; i++) {
756         free_link(filter->outputs[i]);
757         if (filter->output_pads[i].flags & AVFILTERPAD_FLAG_FREE_NAME)
758             av_freep(&filter->output_pads[i].name);
759     }
760 
761     if (filter->filter->priv_class)
762         av_opt_free(filter->priv);
763 
764     av_buffer_unref(&filter->hw_device_ctx);
765 
766     av_freep(&filter->name);
767     av_freep(&filter->input_pads);
768     av_freep(&filter->output_pads);
769     av_freep(&filter->inputs);
770     av_freep(&filter->outputs);
771     av_freep(&filter->priv);
772     while(filter->command_queue){
773         ff_command_queue_pop(filter);
774     }
775     av_opt_free(filter);
776     av_expr_free(filter->enable);
777     filter->enable = NULL;
778     av_freep(&filter->var_values);
779     av_freep(&filter->internal);
780     av_free(filter);
781 }
782 
ff_filter_get_nb_threads(AVFilterContext * ctx)783 int ff_filter_get_nb_threads(AVFilterContext *ctx)
784 {
785     if (ctx->nb_threads > 0)
786         return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
787     return ctx->graph->nb_threads;
788 }
789 
process_options(AVFilterContext * ctx,AVDictionary ** options,const char * args)790 static int process_options(AVFilterContext *ctx, AVDictionary **options,
791                            const char *args)
792 {
793     const AVOption *o = NULL;
794     int ret;
795     char *av_uninit(parsed_key), *av_uninit(value);
796     const char *key;
797     int offset= -1;
798 
799     if (!args)
800         return 0;
801 
802     while (*args) {
803         const char *shorthand = NULL;
804 
805         o = av_opt_next(ctx->priv, o);
806         if (o) {
807             if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
808                 continue;
809             offset = o->offset;
810             shorthand = o->name;
811         }
812 
813         ret = av_opt_get_key_value(&args, "=", ":",
814                                    shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
815                                    &parsed_key, &value);
816         if (ret < 0) {
817             if (ret == AVERROR(EINVAL))
818                 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
819             else
820                 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
821                        av_err2str(ret));
822             return ret;
823         }
824         if (*args)
825             args++;
826         if (parsed_key) {
827             key = parsed_key;
828             while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
829         } else {
830             key = shorthand;
831         }
832 
833         av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
834 
835         if (av_opt_find(ctx, key, NULL, 0, 0)) {
836             ret = av_opt_set(ctx, key, value, 0);
837             if (ret < 0) {
838                 av_free(value);
839                 av_free(parsed_key);
840                 return ret;
841             }
842         } else {
843             o = av_opt_find(ctx->priv, key, NULL, 0,
844                             AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
845             if (!o) {
846                 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
847                 av_free(value);
848                 av_free(parsed_key);
849                 return AVERROR_OPTION_NOT_FOUND;
850             }
851             av_dict_set(options, key, value,
852                         (o->type == AV_OPT_TYPE_FLAGS &&
853                          (value[0] == '-' || value[0] == '+')) ? AV_DICT_APPEND : 0);
854         }
855 
856         av_free(value);
857         av_free(parsed_key);
858     }
859 
860     return 0;
861 }
862 
ff_filter_process_command(AVFilterContext * ctx,const char * cmd,const char * arg,char * res,int res_len,int flags)863 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
864                               const char *arg, char *res, int res_len, int flags)
865 {
866     const AVOption *o;
867 
868     if (!ctx->filter->priv_class)
869         return 0;
870     o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
871     if (!o)
872         return AVERROR(ENOSYS);
873     return av_opt_set(ctx->priv, cmd, arg, 0);
874 }
875 
avfilter_init_dict(AVFilterContext * ctx,AVDictionary ** options)876 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
877 {
878     int ret = 0;
879 
880     ret = av_opt_set_dict(ctx, options);
881     if (ret < 0) {
882         av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
883         return ret;
884     }
885 
886     if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
887         ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
888         ctx->graph->internal->thread_execute) {
889         ctx->thread_type       = AVFILTER_THREAD_SLICE;
890         ctx->internal->execute = ctx->graph->internal->thread_execute;
891     } else {
892         ctx->thread_type = 0;
893     }
894 
895     if (ctx->filter->priv_class) {
896         ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
897         if (ret < 0) {
898             av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
899             return ret;
900         }
901     }
902 
903     if (ctx->filter->init)
904         ret = ctx->filter->init(ctx);
905     else if (ctx->filter->init_dict)
906         ret = ctx->filter->init_dict(ctx, options);
907     if (ret < 0)
908         return ret;
909 
910     if (ctx->enable_str) {
911         ret = set_enable_expr(ctx, ctx->enable_str);
912         if (ret < 0)
913             return ret;
914     }
915 
916     return 0;
917 }
918 
avfilter_init_str(AVFilterContext * filter,const char * args)919 int avfilter_init_str(AVFilterContext *filter, const char *args)
920 {
921     AVDictionary *options = NULL;
922     AVDictionaryEntry *e;
923     int ret = 0;
924 
925     if (args && *args) {
926         if (!filter->filter->priv_class) {
927             av_log(filter, AV_LOG_ERROR, "This filter does not take any "
928                    "options, but options were provided: %s.\n", args);
929             return AVERROR(EINVAL);
930         }
931 
932         ret = process_options(filter, &options, args);
933         if (ret < 0)
934             goto fail;
935     }
936 
937     ret = avfilter_init_dict(filter, &options);
938     if (ret < 0)
939         goto fail;
940 
941     if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
942         av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
943         ret = AVERROR_OPTION_NOT_FOUND;
944         goto fail;
945     }
946 
947 fail:
948     av_dict_free(&options);
949 
950     return ret;
951 }
952 
avfilter_pad_get_name(const AVFilterPad * pads,int pad_idx)953 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
954 {
955     return pads[pad_idx].name;
956 }
957 
avfilter_pad_get_type(const AVFilterPad * pads,int pad_idx)958 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
959 {
960     return pads[pad_idx].type;
961 }
962 
default_filter_frame(AVFilterLink * link,AVFrame * frame)963 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
964 {
965     return ff_filter_frame(link->dst->outputs[0], frame);
966 }
967 
ff_filter_frame_framed(AVFilterLink * link,AVFrame * frame)968 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
969 {
970     int (*filter_frame)(AVFilterLink *, AVFrame *);
971     AVFilterContext *dstctx = link->dst;
972     AVFilterPad *dst = link->dstpad;
973     int ret;
974 
975     if (!(filter_frame = dst->filter_frame))
976         filter_frame = default_filter_frame;
977 
978     if (dst->flags & AVFILTERPAD_FLAG_NEEDS_WRITABLE) {
979         ret = ff_inlink_make_frame_writable(link, &frame);
980         if (ret < 0)
981             goto fail;
982     }
983 
984     ff_inlink_process_commands(link, frame);
985     dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
986 
987     if (dstctx->is_disabled &&
988         (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
989         filter_frame = default_filter_frame;
990     ret = filter_frame(link, frame);
991     link->frame_count_out++;
992     return ret;
993 
994 fail:
995     av_frame_free(&frame);
996     return ret;
997 }
998 
ff_filter_frame(AVFilterLink * link,AVFrame * frame)999 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1000 {
1001     int ret;
1002     FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); tlog_ref(NULL, frame, 1);
1003 
1004     /* Consistency checks */
1005     if (link->type == AVMEDIA_TYPE_VIDEO) {
1006         if (strcmp(link->dst->filter->name, "buffersink") &&
1007             strcmp(link->dst->filter->name, "format") &&
1008             strcmp(link->dst->filter->name, "idet") &&
1009             strcmp(link->dst->filter->name, "null") &&
1010             strcmp(link->dst->filter->name, "scale")) {
1011             av_assert1(frame->format                 == link->format);
1012             av_assert1(frame->width               == link->w);
1013             av_assert1(frame->height               == link->h);
1014         }
1015     } else {
1016         if (frame->format != link->format) {
1017             av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1018             goto error;
1019         }
1020         if (av_channel_layout_compare(&frame->ch_layout, &link->ch_layout)) {
1021             av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1022             goto error;
1023         }
1024         if (frame->sample_rate != link->sample_rate) {
1025             av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1026             goto error;
1027         }
1028     }
1029 
1030     link->frame_blocked_in = link->frame_wanted_out = 0;
1031     link->frame_count_in++;
1032     link->sample_count_in += frame->nb_samples;
1033     filter_unblock(link->dst);
1034     ret = ff_framequeue_add(&link->fifo, frame);
1035     if (ret < 0) {
1036         av_frame_free(&frame);
1037         return ret;
1038     }
1039     ff_filter_set_ready(link->dst, 300);
1040     return 0;
1041 
1042 error:
1043     av_frame_free(&frame);
1044     return AVERROR_PATCHWELCOME;
1045 }
1046 
samples_ready(AVFilterLink * link,unsigned min)1047 static int samples_ready(AVFilterLink *link, unsigned min)
1048 {
1049     return ff_framequeue_queued_frames(&link->fifo) &&
1050            (ff_framequeue_queued_samples(&link->fifo) >= min ||
1051             link->status_in);
1052 }
1053 
take_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1054 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1055                         AVFrame **rframe)
1056 {
1057     AVFrame *frame0, *frame, *buf;
1058     unsigned nb_samples, nb_frames, i, p;
1059     int ret;
1060 
1061     /* Note: this function relies on no format changes and must only be
1062        called with enough samples. */
1063     av_assert1(samples_ready(link, link->min_samples));
1064     frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1065     if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1066         *rframe = ff_framequeue_take(&link->fifo);
1067         return 0;
1068     }
1069     nb_frames = 0;
1070     nb_samples = 0;
1071     while (1) {
1072         if (nb_samples + frame->nb_samples > max) {
1073             if (nb_samples < min)
1074                 nb_samples = max;
1075             break;
1076         }
1077         nb_samples += frame->nb_samples;
1078         nb_frames++;
1079         if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1080             break;
1081         frame = ff_framequeue_peek(&link->fifo, nb_frames);
1082     }
1083 
1084     buf = ff_get_audio_buffer(link, nb_samples);
1085     if (!buf)
1086         return AVERROR(ENOMEM);
1087     ret = av_frame_copy_props(buf, frame0);
1088     if (ret < 0) {
1089         av_frame_free(&buf);
1090         return ret;
1091     }
1092 
1093     p = 0;
1094     for (i = 0; i < nb_frames; i++) {
1095         frame = ff_framequeue_take(&link->fifo);
1096         av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1097                         frame->nb_samples, link->ch_layout.nb_channels, link->format);
1098         p += frame->nb_samples;
1099         av_frame_free(&frame);
1100     }
1101     if (p < nb_samples) {
1102         unsigned n = nb_samples - p;
1103         frame = ff_framequeue_peek(&link->fifo, 0);
1104         av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1105                         link->ch_layout.nb_channels, link->format);
1106         ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1107     }
1108 
1109     *rframe = buf;
1110     return 0;
1111 }
1112 
ff_filter_frame_to_filter(AVFilterLink * link)1113 static int ff_filter_frame_to_filter(AVFilterLink *link)
1114 {
1115     AVFrame *frame = NULL;
1116     AVFilterContext *dst = link->dst;
1117     int ret;
1118 
1119     av_assert1(ff_framequeue_queued_frames(&link->fifo));
1120     ret = link->min_samples ?
1121           ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1122           ff_inlink_consume_frame(link, &frame);
1123     av_assert1(ret);
1124     if (ret < 0) {
1125         av_assert1(!frame);
1126         return ret;
1127     }
1128     /* The filter will soon have received a new frame, that may allow it to
1129        produce one or more: unblock its outputs. */
1130     filter_unblock(dst);
1131     /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1132        before the frame; ff_filter_frame_framed() will re-increment it. */
1133     link->frame_count_out--;
1134     ret = ff_filter_frame_framed(link, frame);
1135     if (ret < 0 && ret != link->status_out) {
1136         ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1137     } else {
1138         /* Run once again, to see if several frames were available, or if
1139            the input status has also changed, or any other reason. */
1140         ff_filter_set_ready(dst, 300);
1141     }
1142     return ret;
1143 }
1144 
forward_status_change(AVFilterContext * filter,AVFilterLink * in)1145 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1146 {
1147     unsigned out = 0, progress = 0;
1148     int ret;
1149 
1150     av_assert0(!in->status_out);
1151     if (!filter->nb_outputs) {
1152         /* not necessary with the current API and sinks */
1153         return 0;
1154     }
1155     while (!in->status_out) {
1156         if (!filter->outputs[out]->status_in) {
1157             progress++;
1158             ret = ff_request_frame_to_filter(filter->outputs[out]);
1159             if (ret < 0)
1160                 return ret;
1161         }
1162         if (++out == filter->nb_outputs) {
1163             if (!progress) {
1164                 /* Every output already closed: input no longer interesting
1165                    (example: overlay in shortest mode, other input closed). */
1166                 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1167                 return 0;
1168             }
1169             progress = 0;
1170             out = 0;
1171         }
1172     }
1173     ff_filter_set_ready(filter, 200);
1174     return 0;
1175 }
1176 
ff_filter_activate_default(AVFilterContext * filter)1177 static int ff_filter_activate_default(AVFilterContext *filter)
1178 {
1179     unsigned i;
1180 
1181     for (i = 0; i < filter->nb_inputs; i++) {
1182         if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1183             return ff_filter_frame_to_filter(filter->inputs[i]);
1184         }
1185     }
1186     for (i = 0; i < filter->nb_inputs; i++) {
1187         if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1188             av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1189             return forward_status_change(filter, filter->inputs[i]);
1190         }
1191     }
1192     for (i = 0; i < filter->nb_outputs; i++) {
1193         if (filter->outputs[i]->frame_wanted_out &&
1194             !filter->outputs[i]->frame_blocked_in) {
1195             return ff_request_frame_to_filter(filter->outputs[i]);
1196         }
1197     }
1198     return FFERROR_NOT_READY;
1199 }
1200 
1201 /*
1202    Filter scheduling and activation
1203 
1204    When a filter is activated, it must:
1205    - if possible, output a frame;
1206    - else, if relevant, forward the input status change;
1207    - else, check outputs for wanted frames and forward the requests.
1208 
1209    The following AVFilterLink fields are used for activation:
1210 
1211    - frame_wanted_out:
1212 
1213      This field indicates if a frame is needed on this input of the
1214      destination filter. A positive value indicates that a frame is needed
1215      to process queued frames or internal data or to satisfy the
1216      application; a zero value indicates that a frame is not especially
1217      needed but could be processed anyway; a negative value indicates that a
1218      frame would just be queued.
1219 
1220      It is set by filters using ff_request_frame() or ff_request_no_frame(),
1221      when requested by the application through a specific API or when it is
1222      set on one of the outputs.
1223 
1224      It is cleared when a frame is sent from the source using
1225      ff_filter_frame().
1226 
1227      It is also cleared when a status change is sent from the source using
1228      ff_avfilter_link_set_in_status().
1229 
1230    - frame_blocked_in:
1231 
1232      This field means that the source filter can not generate a frame as is.
1233      Its goal is to avoid repeatedly calling the request_frame() method on
1234      the same link.
1235 
1236      It is set by the framework on all outputs of a filter before activating it.
1237 
1238      It is automatically cleared by ff_filter_frame().
1239 
1240      It is also automatically cleared by ff_avfilter_link_set_in_status().
1241 
1242      It is also cleared on all outputs (using filter_unblock()) when
1243      something happens on an input: processing a frame or changing the
1244      status.
1245 
1246    - fifo:
1247 
1248      Contains the frames queued on a filter input. If it contains frames and
1249      frame_wanted_out is not set, then the filter can be activated. If that
1250      result in the filter not able to use these frames, the filter must set
1251      frame_wanted_out to ask for more frames.
1252 
1253    - status_in and status_in_pts:
1254 
1255      Status (EOF or error code) of the link and timestamp of the status
1256      change (in link time base, same as frames) as seen from the input of
1257      the link. The status change is considered happening after the frames
1258      queued in fifo.
1259 
1260      It is set by the source filter using ff_avfilter_link_set_in_status().
1261 
1262    - status_out:
1263 
1264      Status of the link as seen from the output of the link. The status
1265      change is considered having already happened.
1266 
1267      It is set by the destination filter using
1268      ff_avfilter_link_set_out_status().
1269 
1270    Filters are activated according to the ready field, set using the
1271    ff_filter_set_ready(). Eventually, a priority queue will be used.
1272    ff_filter_set_ready() is called whenever anything could cause progress to
1273    be possible. Marking a filter ready when it is not is not a problem,
1274    except for the small overhead it causes.
1275 
1276    Conditions that cause a filter to be marked ready are:
1277 
1278    - frames added on an input link;
1279 
1280    - changes in the input or output status of an input link;
1281 
1282    - requests for a frame on an output link;
1283 
1284    - after any actual processing using the legacy methods (filter_frame(),
1285      and request_frame() to acknowledge status changes), to run once more
1286      and check if enough input was present for several frames.
1287 
1288    Examples of scenarios to consider:
1289 
1290    - buffersrc: activate if frame_wanted_out to notify the application;
1291      activate when the application adds a frame to push it immediately.
1292 
1293    - testsrc: activate only if frame_wanted_out to produce and push a frame.
1294 
1295    - concat (not at stitch points): can process a frame on any output.
1296      Activate if frame_wanted_out on output to forward on the corresponding
1297      input. Activate when a frame is present on input to process it
1298      immediately.
1299 
1300    - framesync: needs at least one frame on each input; extra frames on the
1301      wrong input will accumulate. When a frame is first added on one input,
1302      set frame_wanted_out<0 on it to avoid getting more (would trigger
1303      testsrc) and frame_wanted_out>0 on the other to allow processing it.
1304 
1305    Activation of old filters:
1306 
1307    In order to activate a filter implementing the legacy filter_frame() and
1308    request_frame() methods, perform the first possible of the following
1309    actions:
1310 
1311    - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1312      frame and call filter_frame().
1313 
1314      Rationale: filter frames as soon as possible instead of leaving them
1315      queued; frame_wanted_out < 0 is not possible since the old API does not
1316      set it nor provides any similar feedback; frame_wanted_out > 0 happens
1317      when min_samples > 0 and there are not enough samples queued.
1318 
1319    - If an input has status_in set but not status_out, try to call
1320      request_frame() on one of the outputs in the hope that it will trigger
1321      request_frame() on the input with status_in and acknowledge it. This is
1322      awkward and fragile, filters with several inputs or outputs should be
1323      updated to direct activation as soon as possible.
1324 
1325    - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1326      request_frame().
1327 
1328      Rationale: checking frame_blocked_in is necessary to avoid requesting
1329      repeatedly on a blocked input if another is not blocked (example:
1330      [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1331  */
1332 
ff_filter_activate(AVFilterContext * filter)1333 int ff_filter_activate(AVFilterContext *filter)
1334 {
1335     int ret;
1336 
1337     /* Generic timeline support is not yet implemented but should be easy */
1338     av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1339                  filter->filter->activate));
1340     filter->ready = 0;
1341     ret = filter->filter->activate ? filter->filter->activate(filter) :
1342           ff_filter_activate_default(filter);
1343     if (ret == FFERROR_NOT_READY)
1344         ret = 0;
1345     return ret;
1346 }
1347 
ff_inlink_acknowledge_status(AVFilterLink * link,int * rstatus,int64_t * rpts)1348 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1349 {
1350     *rpts = link->current_pts;
1351     if (ff_framequeue_queued_frames(&link->fifo))
1352         return *rstatus = 0;
1353     if (link->status_out)
1354         return *rstatus = link->status_out;
1355     if (!link->status_in)
1356         return *rstatus = 0;
1357     *rstatus = link->status_out = link->status_in;
1358     ff_update_link_current_pts(link, link->status_in_pts);
1359     *rpts = link->current_pts;
1360     return 1;
1361 }
1362 
ff_inlink_queued_frames(AVFilterLink * link)1363 size_t ff_inlink_queued_frames(AVFilterLink *link)
1364 {
1365     return ff_framequeue_queued_frames(&link->fifo);
1366 }
1367 
ff_inlink_check_available_frame(AVFilterLink * link)1368 int ff_inlink_check_available_frame(AVFilterLink *link)
1369 {
1370     return ff_framequeue_queued_frames(&link->fifo) > 0;
1371 }
1372 
ff_inlink_queued_samples(AVFilterLink * link)1373 int ff_inlink_queued_samples(AVFilterLink *link)
1374 {
1375     return ff_framequeue_queued_samples(&link->fifo);
1376 }
1377 
ff_inlink_check_available_samples(AVFilterLink * link,unsigned min)1378 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1379 {
1380     uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1381     av_assert1(min);
1382     return samples >= min || (link->status_in && samples);
1383 }
1384 
consume_update(AVFilterLink * link,const AVFrame * frame)1385 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1386 {
1387     ff_update_link_current_pts(link, frame->pts);
1388     ff_inlink_process_commands(link, frame);
1389     link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1390     link->frame_count_out++;
1391     link->sample_count_out += frame->nb_samples;
1392 }
1393 
ff_inlink_consume_frame(AVFilterLink * link,AVFrame ** rframe)1394 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1395 {
1396     AVFrame *frame;
1397 
1398     *rframe = NULL;
1399     if (!ff_inlink_check_available_frame(link))
1400         return 0;
1401 
1402     if (link->fifo.samples_skipped) {
1403         frame = ff_framequeue_peek(&link->fifo, 0);
1404         return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1405     }
1406 
1407     frame = ff_framequeue_take(&link->fifo);
1408     consume_update(link, frame);
1409     *rframe = frame;
1410     return 1;
1411 }
1412 
ff_inlink_consume_samples(AVFilterLink * link,unsigned min,unsigned max,AVFrame ** rframe)1413 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1414                             AVFrame **rframe)
1415 {
1416     AVFrame *frame;
1417     int ret;
1418 
1419     av_assert1(min);
1420     *rframe = NULL;
1421     if (!ff_inlink_check_available_samples(link, min))
1422         return 0;
1423     if (link->status_in)
1424         min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1425     ret = take_samples(link, min, max, &frame);
1426     if (ret < 0)
1427         return ret;
1428     consume_update(link, frame);
1429     *rframe = frame;
1430     return 1;
1431 }
1432 
ff_inlink_peek_frame(AVFilterLink * link,size_t idx)1433 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1434 {
1435     return ff_framequeue_peek(&link->fifo, idx);
1436 }
1437 
ff_inlink_make_frame_writable(AVFilterLink * link,AVFrame ** rframe)1438 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1439 {
1440     AVFrame *frame = *rframe;
1441     AVFrame *out;
1442     int ret;
1443 
1444     if (av_frame_is_writable(frame))
1445         return 0;
1446     av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1447 
1448     switch (link->type) {
1449     case AVMEDIA_TYPE_VIDEO:
1450         out = ff_get_video_buffer(link, link->w, link->h);
1451         break;
1452     case AVMEDIA_TYPE_AUDIO:
1453         out = ff_get_audio_buffer(link, frame->nb_samples);
1454         break;
1455     default:
1456         return AVERROR(EINVAL);
1457     }
1458     if (!out)
1459         return AVERROR(ENOMEM);
1460 
1461     ret = av_frame_copy_props(out, frame);
1462     if (ret < 0) {
1463         av_frame_free(&out);
1464         return ret;
1465     }
1466 
1467     ret = av_frame_copy(out, frame);
1468     if (ret < 0) {
1469         av_frame_free(&out);
1470         return ret;
1471     }
1472 
1473     av_frame_free(&frame);
1474     *rframe = out;
1475     return 0;
1476 }
1477 
ff_inlink_process_commands(AVFilterLink * link,const AVFrame * frame)1478 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1479 {
1480     AVFilterCommand *cmd = link->dst->command_queue;
1481 
1482     while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1483         av_log(link->dst, AV_LOG_DEBUG,
1484                "Processing command time:%f command:%s arg:%s\n",
1485                cmd->time, cmd->command, cmd->arg);
1486         avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1487         ff_command_queue_pop(link->dst);
1488         cmd= link->dst->command_queue;
1489     }
1490     return 0;
1491 }
1492 
ff_inlink_evaluate_timeline_at_frame(AVFilterLink * link,const AVFrame * frame)1493 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1494 {
1495     AVFilterContext *dstctx = link->dst;
1496     int64_t pts = frame->pts;
1497     int64_t pos = frame->pkt_pos;
1498 
1499     if (!dstctx->enable_str)
1500         return 1;
1501 
1502     dstctx->var_values[VAR_N] = link->frame_count_out;
1503     dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1504     dstctx->var_values[VAR_W] = link->w;
1505     dstctx->var_values[VAR_H] = link->h;
1506     dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1507 
1508     return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1509 }
1510 
ff_inlink_request_frame(AVFilterLink * link)1511 void ff_inlink_request_frame(AVFilterLink *link)
1512 {
1513     av_assert1(!link->status_in);
1514     av_assert1(!link->status_out);
1515     link->frame_wanted_out = 1;
1516     ff_filter_set_ready(link->src, 100);
1517 }
1518 
ff_inlink_set_status(AVFilterLink * link,int status)1519 void ff_inlink_set_status(AVFilterLink *link, int status)
1520 {
1521     if (link->status_out)
1522         return;
1523     link->frame_wanted_out = 0;
1524     link->frame_blocked_in = 0;
1525     ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1526     while (ff_framequeue_queued_frames(&link->fifo)) {
1527            AVFrame *frame = ff_framequeue_take(&link->fifo);
1528            av_frame_free(&frame);
1529     }
1530     if (!link->status_in)
1531         link->status_in = status;
1532 }
1533 
ff_outlink_get_status(AVFilterLink * link)1534 int ff_outlink_get_status(AVFilterLink *link)
1535 {
1536     return link->status_in;
1537 }
1538 
ff_inoutlink_check_flow(AVFilterLink * inlink,AVFilterLink * outlink)1539 int ff_inoutlink_check_flow(AVFilterLink *inlink, AVFilterLink *outlink)
1540 {
1541     return ff_outlink_frame_wanted(outlink) ||
1542            ff_inlink_check_available_frame(inlink) ||
1543            inlink->status_out;
1544 }
1545 
1546 
avfilter_get_class(void)1547 const AVClass *avfilter_get_class(void)
1548 {
1549     return &avfilter_class;
1550 }
1551 
ff_filter_init_hw_frames(AVFilterContext * avctx,AVFilterLink * link,int default_pool_size)1552 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1553                              int default_pool_size)
1554 {
1555     AVHWFramesContext *frames;
1556 
1557     // Must already be set by caller.
1558     av_assert0(link->hw_frames_ctx);
1559 
1560     frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1561 
1562     if (frames->initial_pool_size == 0) {
1563         // Dynamic allocation is necessarily supported.
1564     } else if (avctx->extra_hw_frames >= 0) {
1565         frames->initial_pool_size += avctx->extra_hw_frames;
1566     } else {
1567         frames->initial_pool_size = default_pool_size;
1568     }
1569 
1570     return 0;
1571 }
1572