• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * feedback video filter
22  */
23 
24 #include "libavutil/fifo.h"
25 #include "libavutil/imgutils.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/internal.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "internal.h"
31 #include "video.h"
32 
33 typedef struct FeedbackContext {
34     const AVClass *class;
35 
36     int x, y;
37     int w, h;
38 
39     int max_step[4];
40     int hsub, vsub;
41 
42     AVFrame *feed;
43 
44     AVFifo *fifo;
45 } FeedbackContext;
46 
adjust_pos(AVFilterContext * ctx,FeedbackContext * s)47 static void adjust_pos(AVFilterContext *ctx, FeedbackContext *s)
48 {
49     if (s->x + s->w > ctx->inputs[0]->w)
50         s->x = ctx->inputs[0]->w - s->w;
51     if (s->y + s->h > ctx->inputs[0]->h)
52         s->y = ctx->inputs[0]->h - s->h;
53 }
54 
adjust_parameters(AVFilterContext * ctx,FeedbackContext * s)55 static void adjust_parameters(AVFilterContext *ctx, FeedbackContext *s)
56 {
57     if (s->x >= ctx->inputs[0]->w)
58         s->x = 0;
59     if (s->y >= ctx->inputs[0]->h)
60         s->y = 0;
61 
62     if (s->w <= 0)
63         s->w = ctx->inputs[0]->w - s->x;
64     if (s->h <= 0)
65         s->h = ctx->inputs[0]->h - s->y;
66 
67     if (s->w > ctx->inputs[0]->w)
68         s->w = ctx->inputs[0]->w;
69     if (s->h > ctx->inputs[0]->h)
70         s->h = ctx->inputs[0]->h;
71 
72     adjust_pos(ctx, s);
73 }
74 
config_input(AVFilterLink * inlink)75 static int config_input(AVFilterLink *inlink)
76 {
77     AVFilterContext *ctx = inlink->dst;
78     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
79     FeedbackContext *s = ctx->priv;
80 
81     s->hsub = pix_desc->log2_chroma_w;
82     s->vsub = pix_desc->log2_chroma_h;
83 
84     av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
85 
86     adjust_parameters(ctx, s);
87 
88     ctx->inputs[1]->w = s->w;
89     ctx->inputs[1]->h = s->h;
90 
91     return 0;
92 }
93 
config_output(AVFilterLink * outlink)94 static int config_output(AVFilterLink *outlink)
95 {
96     AVFilterContext *ctx = outlink->src;
97     FeedbackContext *s = ctx->priv;
98 
99     adjust_parameters(ctx, s);
100 
101     ctx->outputs[0]->w = ctx->inputs[0]->w;
102     ctx->outputs[0]->h = ctx->inputs[0]->h;
103     ctx->outputs[1]->w = s->w;
104     ctx->outputs[1]->h = s->h;
105 
106     return 0;
107 }
108 
query_formats(AVFilterContext * ctx)109 static int query_formats(AVFilterContext *ctx)
110 {
111     return ff_set_common_formats(ctx, ff_formats_pixdesc_filter(0, AV_PIX_FMT_FLAG_BITSTREAM |
112                                                                    AV_PIX_FMT_FLAG_HWACCEL |
113                                                                    AV_PIX_FMT_FLAG_PAL));
114 }
115 
activate(AVFilterContext * ctx)116 static int activate(AVFilterContext *ctx)
117 {
118     FeedbackContext *s = ctx->priv;
119     int status, ret;
120     int64_t pts;
121 
122     adjust_pos(ctx, s);
123 
124     for (int i = 0; i < ctx->nb_outputs; i++)
125         FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[i], ctx);
126 
127     if (!s->feed) {
128         ret = ff_inlink_consume_frame(ctx->inputs[1], &s->feed);
129         if (ret < 0)
130             return ret;
131     }
132 
133     if (s->feed && av_fifo_can_read(s->fifo)) {
134         AVFrame *src = s->feed;
135         AVFrame *dst = NULL;
136 
137         av_fifo_read(s->fifo, &dst, 1);
138         if (!dst)
139             return AVERROR_BUG;
140 
141         if (!av_frame_is_writable(dst)) {
142             AVFrame *tmp = ff_get_video_buffer(ctx->outputs[0], ctx->outputs[0]->w, ctx->outputs[0]->h);
143 
144             if (!tmp) {
145                 av_frame_free(&dst);
146                 return AVERROR(ENOMEM);
147             }
148 
149             ret = av_frame_copy(tmp, dst);
150             if (ret < 0) {
151                 av_frame_free(&dst);
152                 av_frame_free(&tmp);
153                 return ret;
154             }
155 
156             av_frame_copy_props(tmp, dst);
157             av_frame_free(&dst);
158             dst = tmp;
159         }
160 
161         for (int y = 0; y < src->height; y++) {
162             memmove(dst->data[0] + (s->y + y) * dst->linesize[0] + s->x * s->max_step[0],
163                     src->data[0] + y * src->linesize[0], src->width * s->max_step[0]);
164         }
165 
166         for (int i = 1; i < 3; i ++) {
167             if (dst->data[i]) {
168                 for (int y = 0; y < src->height; y++) {
169                     memmove(dst->data[i] + ((s->y + y) >> s->vsub) * dst->linesize[i] + ((s->x * s->max_step[i]) >> s->hsub),
170                             src->data[i] + (y >> s->vsub) * src->linesize[i], (src->width * s->max_step[i]) >> s->hsub);
171                 }
172             }
173         }
174 
175         if (dst->data[3]) {
176             for (int y = 0; y < src->height; y++) {
177                 memmove(dst->data[3] + (s->y + y) * dst->linesize[3] + s->x * s->max_step[3],
178                         src->data[3] + y * src->linesize[3], src->width * s->max_step[3]);
179             }
180         }
181 
182         ret = ff_filter_frame(ctx->outputs[0], dst);
183         av_frame_free(&s->feed);
184         return ret;
185     }
186 
187     if (!s->feed) {
188         AVFrame *in = NULL;
189 
190         ret = ff_inlink_consume_frame(ctx->inputs[0], &in);
191         if (ret < 0)
192             return ret;
193 
194         if (ret > 0) {
195             AVFrame *frame;
196 
197             ret = av_fifo_write(s->fifo, &in, 1);
198             if (ret < 0) {
199                 av_frame_free(&in);
200                 return ret;
201             }
202 
203             frame = av_frame_clone(in);
204             if (!frame)
205                 return AVERROR(ENOMEM);
206 
207             frame->width  = s->w;
208             frame->height = s->h;
209 
210             frame->data[0] += s->y * frame->linesize[0];
211             frame->data[0] += s->x * s->max_step[0];
212 
213             for (int i = 1; i < 3; i ++) {
214                 if (frame->data[i]) {
215                     frame->data[i] += (s->y >> s->vsub) * frame->linesize[i];
216                     frame->data[i] += (s->x * s->max_step[i]) >> s->hsub;
217                 }
218             }
219 
220             if (frame->data[3]) {
221                 frame->data[3] += s->y * frame->linesize[3];
222                 frame->data[3] += s->x * s->max_step[3];
223             }
224 
225             return ff_filter_frame(ctx->outputs[1], frame);
226         }
227     }
228 
229     if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
230         ff_outlink_set_status(ctx->outputs[0], status, pts);
231         ff_outlink_set_status(ctx->outputs[1], status, pts);
232         return 0;
233     }
234 
235     if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
236         ff_outlink_set_status(ctx->outputs[0], status, pts);
237         ff_outlink_set_status(ctx->outputs[1], status, pts);
238         return 0;
239     }
240 
241     if (!s->feed) {
242         if (ff_outlink_frame_wanted(ctx->outputs[0])) {
243             ff_inlink_request_frame(ctx->inputs[0]);
244             ff_inlink_request_frame(ctx->inputs[1]);
245             return 0;
246         }
247     }
248 
249     return FFERROR_NOT_READY;
250 }
251 
init(AVFilterContext * ctx)252 static av_cold int init(AVFilterContext *ctx)
253 {
254     FeedbackContext *s = ctx->priv;
255 
256     s->fifo = av_fifo_alloc2(8, sizeof(AVFrame *), AV_FIFO_FLAG_AUTO_GROW);
257     if (!s->fifo)
258         return AVERROR(ENOMEM);
259 
260     return 0;
261 }
262 
uninit(AVFilterContext * ctx)263 static av_cold void uninit(AVFilterContext *ctx)
264 {
265     FeedbackContext *s = ctx->priv;
266     if (s->fifo) {
267         size_t size = av_fifo_can_read(s->fifo);
268 
269         for (size_t n = 0; n < size; n++) {
270             AVFrame *frame = NULL;
271 
272             av_fifo_read(s->fifo, &frame, 1);
273 
274             av_frame_free(&frame);
275         }
276 
277         av_fifo_freep2(&s->fifo);
278     }
279 }
280 
281 static const AVFilterPad inputs[] = {
282     {
283         .name         = "default",
284         .type         = AVMEDIA_TYPE_VIDEO,
285         .config_props = config_input,
286     },
287     {
288         .name         = "feedin",
289         .type         = AVMEDIA_TYPE_VIDEO,
290         .config_props = config_input,
291     },
292 };
293 
294 static const AVFilterPad outputs[] = {
295     {
296         .name         = "default",
297         .type         = AVMEDIA_TYPE_VIDEO,
298         .config_props = config_output,
299     },
300     {
301         .name         = "feedout",
302         .type         = AVMEDIA_TYPE_VIDEO,
303         .config_props = config_output,
304     },
305 };
306 
307 #define OFFSET(x) offsetof(FeedbackContext, x)
308 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
309 #define TFLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM)
310 
311 static const AVOption feedback_options[] = {
312     { "x", "set top left crop position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, TFLAGS },
313     { "y", "set top left crop position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, TFLAGS },
314     { "w", "set crop size",              OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
315     { "h", "set crop size",              OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
316     { NULL }
317 };
318 
319 AVFILTER_DEFINE_CLASS(feedback);
320 
321 const AVFilter ff_vf_feedback = {
322     .name        = "feedback",
323     .description = NULL_IF_CONFIG_SMALL("Apply feedback video filter."),
324     .priv_class  = &feedback_class,
325     .priv_size   = sizeof(FeedbackContext),
326     .activate    = activate,
327     .init        = init,
328     .uninit      = uninit,
329     FILTER_INPUTS(inputs),
330     FILTER_OUTPUTS(outputs),
331     FILTER_QUERY_FUNC(query_formats),
332     .process_command = ff_filter_process_command,
333 };
334