1 /*
2 * Copyright (c) 2013 Vittorio Giovara
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Generate a frame packed video, by combining two views in a single surface.
24 */
25
26 #include <string.h>
27
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/rational.h"
33 #include "libavutil/stereo3d.h"
34
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "formats.h"
38 #include "internal.h"
39 #include "video.h"
40
41 #define LEFT 0
42 #define RIGHT 1
43
44 typedef struct FramepackContext {
45 const AVClass *class;
46
47 int depth;
48 const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
49
50 enum AVStereo3DType format; ///< frame pack type output
51
52 AVFrame *input_views[2]; ///< input frames
53 } FramepackContext;
54
55 static const enum AVPixelFormat formats_supported[] = {
56 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
57 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
58 AV_PIX_FMT_GRAY16,
59 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
60 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
61 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
62 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
63 AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
64 AV_PIX_FMT_YUVJ411P,
65 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
66 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
67 AV_PIX_FMT_YUV440P10,
68 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
69 AV_PIX_FMT_YUV440P12,
70 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
71 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
72 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
73 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
74 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
75 AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
76 AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
77 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
78 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
79 AV_PIX_FMT_NONE
80 };
81
query_formats(AVFilterContext * ctx)82 static int query_formats(AVFilterContext *ctx)
83 {
84 // this will ensure that formats are the same on all pads
85 AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
86 if (!fmts_list)
87 return AVERROR(ENOMEM);
88 return ff_set_common_formats(ctx, fmts_list);
89 }
90
framepack_uninit(AVFilterContext * ctx)91 static av_cold void framepack_uninit(AVFilterContext *ctx)
92 {
93 FramepackContext *s = ctx->priv;
94
95 // clean any leftover frame
96 av_frame_free(&s->input_views[LEFT]);
97 av_frame_free(&s->input_views[RIGHT]);
98 }
99
config_output(AVFilterLink * outlink)100 static int config_output(AVFilterLink *outlink)
101 {
102 AVFilterContext *ctx = outlink->src;
103 FramepackContext *s = outlink->src->priv;
104
105 int width = ctx->inputs[LEFT]->w;
106 int height = ctx->inputs[LEFT]->h;
107 AVRational time_base = ctx->inputs[LEFT]->time_base;
108 AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
109
110 // check size and fps match on the other input
111 if (width != ctx->inputs[RIGHT]->w ||
112 height != ctx->inputs[RIGHT]->h) {
113 av_log(ctx, AV_LOG_ERROR,
114 "Left and right sizes differ (%dx%d vs %dx%d).\n",
115 width, height,
116 ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
117 return AVERROR_INVALIDDATA;
118 } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
119 av_log(ctx, AV_LOG_ERROR,
120 "Left and right time bases differ (%d/%d vs %d/%d).\n",
121 time_base.num, time_base.den,
122 ctx->inputs[RIGHT]->time_base.num,
123 ctx->inputs[RIGHT]->time_base.den);
124 return AVERROR_INVALIDDATA;
125 } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
126 av_log(ctx, AV_LOG_ERROR,
127 "Left and right framerates differ (%d/%d vs %d/%d).\n",
128 frame_rate.num, frame_rate.den,
129 ctx->inputs[RIGHT]->frame_rate.num,
130 ctx->inputs[RIGHT]->frame_rate.den);
131 return AVERROR_INVALIDDATA;
132 }
133
134 s->pix_desc = av_pix_fmt_desc_get(outlink->format);
135 if (!s->pix_desc)
136 return AVERROR_BUG;
137 s->depth = s->pix_desc->comp[0].depth;
138
139 // modify output properties as needed
140 switch (s->format) {
141 case AV_STEREO3D_FRAMESEQUENCE:
142 time_base.den *= 2;
143 frame_rate.num *= 2;
144 break;
145 case AV_STEREO3D_COLUMNS:
146 case AV_STEREO3D_SIDEBYSIDE:
147 width *= 2;
148 break;
149 case AV_STEREO3D_LINES:
150 case AV_STEREO3D_TOPBOTTOM:
151 height *= 2;
152 break;
153 default:
154 av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
155 return AVERROR_INVALIDDATA;
156 }
157
158 outlink->w = width;
159 outlink->h = height;
160 outlink->time_base = time_base;
161 outlink->frame_rate = frame_rate;
162
163 return 0;
164 }
165
horizontal_frame_pack(AVFilterLink * outlink,AVFrame * out,int interleaved)166 static void horizontal_frame_pack(AVFilterLink *outlink,
167 AVFrame *out,
168 int interleaved)
169 {
170 AVFilterContext *ctx = outlink->src;
171 FramepackContext *s = ctx->priv;
172 int i, plane;
173
174 if (interleaved && s->depth <= 8) {
175 const uint8_t *leftp = s->input_views[LEFT]->data[0];
176 const uint8_t *rightp = s->input_views[RIGHT]->data[0];
177 uint8_t *dstp = out->data[0];
178 int length = out->width / 2;
179 int lines = out->height;
180
181 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
182 if (plane == 1 || plane == 2) {
183 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
184 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
185 }
186 for (i = 0; i < lines; i++) {
187 int j;
188 leftp = s->input_views[LEFT]->data[plane] +
189 s->input_views[LEFT]->linesize[plane] * i;
190 rightp = s->input_views[RIGHT]->data[plane] +
191 s->input_views[RIGHT]->linesize[plane] * i;
192 dstp = out->data[plane] + out->linesize[plane] * i;
193 for (j = 0; j < length; j++) {
194 // interpolate chroma as necessary
195 if ((s->pix_desc->log2_chroma_w ||
196 s->pix_desc->log2_chroma_h) &&
197 (plane == 1 || plane == 2)) {
198 *dstp++ = (*leftp + *rightp) / 2;
199 *dstp++ = (*leftp + *rightp) / 2;
200 } else {
201 *dstp++ = *leftp;
202 *dstp++ = *rightp;
203 }
204 leftp += 1;
205 rightp += 1;
206 }
207 }
208 }
209 } else if (interleaved && s->depth > 8) {
210 const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0];
211 const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0];
212 uint16_t *dstp = (uint16_t *)out->data[0];
213 int length = out->width / 2;
214 int lines = out->height;
215
216 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
217 if (plane == 1 || plane == 2) {
218 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
219 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
220 }
221 for (i = 0; i < lines; i++) {
222 int j;
223 leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] +
224 s->input_views[LEFT]->linesize[plane] * i / 2;
225 rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] +
226 s->input_views[RIGHT]->linesize[plane] * i / 2;
227 dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2;
228 for (j = 0; j < length; j++) {
229 // interpolate chroma as necessary
230 if ((s->pix_desc->log2_chroma_w ||
231 s->pix_desc->log2_chroma_h) &&
232 (plane == 1 || plane == 2)) {
233 *dstp++ = (*leftp + *rightp) / 2;
234 *dstp++ = (*leftp + *rightp) / 2;
235 } else {
236 *dstp++ = *leftp;
237 *dstp++ = *rightp;
238 }
239 leftp += 1;
240 rightp += 1;
241 }
242 }
243 }
244 } else {
245 for (i = 0; i < 2; i++) {
246 const int psize = 1 + (s->depth > 8);
247 const uint8_t *src[4];
248 uint8_t *dst[4];
249 int sub_w = psize * s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
250
251 src[0] = s->input_views[i]->data[0];
252 src[1] = s->input_views[i]->data[1];
253 src[2] = s->input_views[i]->data[2];
254
255 dst[0] = out->data[0] + i * s->input_views[i]->width * psize;
256 dst[1] = out->data[1] + i * sub_w;
257 dst[2] = out->data[2] + i * sub_w;
258
259 av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
260 s->input_views[i]->format,
261 s->input_views[i]->width,
262 s->input_views[i]->height);
263 }
264 }
265 }
266
vertical_frame_pack(AVFilterLink * outlink,AVFrame * out,int interleaved)267 static void vertical_frame_pack(AVFilterLink *outlink,
268 AVFrame *out,
269 int interleaved)
270 {
271 AVFilterContext *ctx = outlink->src;
272 FramepackContext *s = ctx->priv;
273 int i;
274
275 for (i = 0; i < 2; i++) {
276 const uint8_t *src[4];
277 uint8_t *dst[4];
278 int linesizes[4];
279 int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
280
281 src[0] = s->input_views[i]->data[0];
282 src[1] = s->input_views[i]->data[1];
283 src[2] = s->input_views[i]->data[2];
284
285 dst[0] = out->data[0] + i * out->linesize[0] *
286 (interleaved + s->input_views[i]->height * (1 - interleaved));
287 dst[1] = out->data[1] + i * out->linesize[1] *
288 (interleaved + sub_h * (1 - interleaved));
289 dst[2] = out->data[2] + i * out->linesize[2] *
290 (interleaved + sub_h * (1 - interleaved));
291
292 linesizes[0] = out->linesize[0] +
293 interleaved * out->linesize[0];
294 linesizes[1] = out->linesize[1] +
295 interleaved * out->linesize[1];
296 linesizes[2] = out->linesize[2] +
297 interleaved * out->linesize[2];
298
299 av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
300 s->input_views[i]->format,
301 s->input_views[i]->width,
302 s->input_views[i]->height);
303 }
304 }
305
spatial_frame_pack(AVFilterLink * outlink,AVFrame * dst)306 static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
307 AVFrame *dst)
308 {
309 AVFilterContext *ctx = outlink->src;
310 FramepackContext *s = ctx->priv;
311 switch (s->format) {
312 case AV_STEREO3D_SIDEBYSIDE:
313 horizontal_frame_pack(outlink, dst, 0);
314 break;
315 case AV_STEREO3D_COLUMNS:
316 horizontal_frame_pack(outlink, dst, 1);
317 break;
318 case AV_STEREO3D_TOPBOTTOM:
319 vertical_frame_pack(outlink, dst, 0);
320 break;
321 case AV_STEREO3D_LINES:
322 vertical_frame_pack(outlink, dst, 1);
323 break;
324 }
325 }
326
try_push_frame(AVFilterContext * ctx)327 static int try_push_frame(AVFilterContext *ctx)
328 {
329 FramepackContext *s = ctx->priv;
330 AVFilterLink *outlink = ctx->outputs[0];
331 AVStereo3D *stereo;
332 int ret, i;
333
334 if (!(s->input_views[0] && s->input_views[1]))
335 return 0;
336 if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
337 int64_t pts = s->input_views[0]->pts;
338
339 for (i = 0; i < 2; i++) {
340 // set correct timestamps
341 if (pts != AV_NOPTS_VALUE)
342 s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
343
344 // set stereo3d side data
345 stereo = av_stereo3d_create_side_data(s->input_views[i]);
346 if (!stereo)
347 return AVERROR(ENOMEM);
348 stereo->type = s->format;
349 stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
350 : AV_STEREO3D_VIEW_RIGHT;
351
352 // filter the frame and immediately relinquish its pointer
353 ret = ff_filter_frame(outlink, s->input_views[i]);
354 s->input_views[i] = NULL;
355 if (ret < 0)
356 return ret;
357 }
358 return ret;
359 } else {
360 AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
361 if (!dst)
362 return AVERROR(ENOMEM);
363
364 spatial_frame_pack(outlink, dst);
365
366 // get any property from the original frame
367 ret = av_frame_copy_props(dst, s->input_views[LEFT]);
368 if (ret < 0) {
369 av_frame_free(&dst);
370 return ret;
371 }
372
373 for (i = 0; i < 2; i++)
374 av_frame_free(&s->input_views[i]);
375
376 // set stereo3d side data
377 stereo = av_stereo3d_create_side_data(dst);
378 if (!stereo) {
379 av_frame_free(&dst);
380 return AVERROR(ENOMEM);
381 }
382 stereo->type = s->format;
383
384 return ff_filter_frame(outlink, dst);
385 }
386 }
387
activate(AVFilterContext * ctx)388 static int activate(AVFilterContext *ctx)
389 {
390 AVFilterLink *outlink = ctx->outputs[0];
391 FramepackContext *s = ctx->priv;
392 int ret;
393
394 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
395
396 if (!s->input_views[0]) {
397 ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]);
398 if (ret < 0)
399 return ret;
400 }
401
402 if (!s->input_views[1]) {
403 ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]);
404 if (ret < 0)
405 return ret;
406 }
407
408 if (s->input_views[0] && s->input_views[1])
409 return try_push_frame(ctx);
410
411 FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
412 FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
413
414 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
415 !ff_outlink_get_status(ctx->inputs[0]) &&
416 !s->input_views[0]) {
417 ff_inlink_request_frame(ctx->inputs[0]);
418 return 0;
419 }
420
421 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
422 !ff_outlink_get_status(ctx->inputs[1]) &&
423 !s->input_views[1]) {
424 ff_inlink_request_frame(ctx->inputs[1]);
425 return 0;
426 }
427
428 return FFERROR_NOT_READY;
429 }
430
431 #define OFFSET(x) offsetof(FramepackContext, x)
432 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
433 static const AVOption framepack_options[] = {
434 { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
435 { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" },
436 { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
437 { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
438 { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
439 { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
440 { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
441 { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
442 { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
443 { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
444 { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
445 { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
446 { NULL },
447 };
448
449 AVFILTER_DEFINE_CLASS(framepack);
450
451 static const AVFilterPad framepack_inputs[] = {
452 {
453 .name = "left",
454 .type = AVMEDIA_TYPE_VIDEO,
455 },
456 {
457 .name = "right",
458 .type = AVMEDIA_TYPE_VIDEO,
459 },
460 { NULL }
461 };
462
463 static const AVFilterPad framepack_outputs[] = {
464 {
465 .name = "packed",
466 .type = AVMEDIA_TYPE_VIDEO,
467 .config_props = config_output,
468 },
469 { NULL }
470 };
471
472 AVFilter ff_vf_framepack = {
473 .name = "framepack",
474 .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
475 .priv_size = sizeof(FramepackContext),
476 .priv_class = &framepack_class,
477 .query_formats = query_formats,
478 .inputs = framepack_inputs,
479 .outputs = framepack_outputs,
480 .activate = activate,
481 .uninit = framepack_uninit,
482 };
483