1 /*
2 * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Audio merging filter
24 */
25
26 #include "libavutil/avstring.h"
27 #include "libavutil/bprint.h"
28 #include "libavutil/channel_layout.h"
29 #include "libavutil/opt.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "audio.h"
33 #include "internal.h"
34
35 #define SWR_CH_MAX 64
36
37 typedef struct AMergeContext {
38 const AVClass *class;
39 int nb_inputs;
40 int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
41 int bps;
42 struct amerge_input {
43 int nb_ch; /**< number of channels for the input */
44 } *in;
45 } AMergeContext;
46
47 #define OFFSET(x) offsetof(AMergeContext, x)
48 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
49
50 static const AVOption amerge_options[] = {
51 { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
52 AV_OPT_TYPE_INT, { .i64 = 2 }, 1, SWR_CH_MAX, FLAGS },
53 { NULL }
54 };
55
56 AVFILTER_DEFINE_CLASS(amerge);
57
uninit(AVFilterContext * ctx)58 static av_cold void uninit(AVFilterContext *ctx)
59 {
60 AMergeContext *s = ctx->priv;
61
62 av_freep(&s->in);
63 }
64
query_formats(AVFilterContext * ctx)65 static int query_formats(AVFilterContext *ctx)
66 {
67 static const enum AVSampleFormat packed_sample_fmts[] = {
68 AV_SAMPLE_FMT_U8,
69 AV_SAMPLE_FMT_S16,
70 AV_SAMPLE_FMT_S32,
71 AV_SAMPLE_FMT_FLT,
72 AV_SAMPLE_FMT_DBL,
73 AV_SAMPLE_FMT_NONE
74 };
75 AMergeContext *s = ctx->priv;
76 AVChannelLayout *inlayout[SWR_CH_MAX] = { NULL }, outlayout = { 0 };
77 uint64_t outmask = 0;
78 AVFilterChannelLayouts *layouts;
79 int i, ret, overlap = 0, nb_ch = 0;
80
81 for (i = 0; i < s->nb_inputs; i++) {
82 if (!ctx->inputs[i]->incfg.channel_layouts ||
83 !ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts) {
84 av_log(ctx, AV_LOG_WARNING,
85 "No channel layout for input %d\n", i + 1);
86 return AVERROR(EAGAIN);
87 }
88 inlayout[i] = &ctx->inputs[i]->incfg.channel_layouts->channel_layouts[0];
89 if (ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts > 1) {
90 char buf[256];
91 av_channel_layout_describe(inlayout[i], buf, sizeof(buf));
92 av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
93 }
94 s->in[i].nb_ch = FF_LAYOUT2COUNT(inlayout[i]);
95 if (s->in[i].nb_ch) {
96 overlap++;
97 } else {
98 s->in[i].nb_ch = inlayout[i]->nb_channels;
99 if (av_channel_layout_subset(inlayout[i], outmask))
100 overlap++;
101 outmask |= inlayout[i]->order == AV_CHANNEL_ORDER_NATIVE ?
102 inlayout[i]->u.mask : 0;
103 }
104 nb_ch += s->in[i].nb_ch;
105 }
106 if (nb_ch > SWR_CH_MAX) {
107 av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
108 return AVERROR(EINVAL);
109 }
110 if (overlap) {
111 av_log(ctx, AV_LOG_WARNING,
112 "Input channel layouts overlap: "
113 "output layout will be determined by the number of distinct input channels\n");
114 for (i = 0; i < nb_ch; i++)
115 s->route[i] = i;
116 av_channel_layout_default(&outlayout, nb_ch);
117 if (!KNOWN(&outlayout) && nb_ch)
118 av_channel_layout_from_mask(&outlayout, 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch));
119 } else {
120 int *route[SWR_CH_MAX];
121 int c, out_ch_number = 0;
122
123 av_channel_layout_from_mask(&outlayout, outmask);
124 route[0] = s->route;
125 for (i = 1; i < s->nb_inputs; i++)
126 route[i] = route[i - 1] + s->in[i - 1].nb_ch;
127 for (c = 0; c < 64; c++)
128 for (i = 0; i < s->nb_inputs; i++)
129 if (av_channel_layout_index_from_channel(inlayout[i], c) >= 0)
130 *(route[i]++) = out_ch_number++;
131 }
132 if ((ret = ff_set_common_formats_from_list(ctx, packed_sample_fmts)) < 0)
133 return ret;
134 for (i = 0; i < s->nb_inputs; i++) {
135 layouts = NULL;
136 if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
137 return ret;
138 if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
139 return ret;
140 }
141 layouts = NULL;
142 if ((ret = ff_add_channel_layout(&layouts, &outlayout)) < 0)
143 return ret;
144 if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
145 return ret;
146
147 return ff_set_common_all_samplerates(ctx);
148 }
149
config_output(AVFilterLink * outlink)150 static int config_output(AVFilterLink *outlink)
151 {
152 AVFilterContext *ctx = outlink->src;
153 AMergeContext *s = ctx->priv;
154 AVBPrint bp;
155 char buf[128];
156 int i;
157
158 s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
159 outlink->time_base = ctx->inputs[0]->time_base;
160
161 av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
162 for (i = 0; i < s->nb_inputs; i++) {
163 av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
164 av_channel_layout_describe(&ctx->inputs[i]->ch_layout, buf, sizeof(buf));
165 av_bprintf(&bp, "%s", buf);
166 }
167 av_bprintf(&bp, " -> out:");
168 av_channel_layout_describe(&ctx->outputs[0]->ch_layout, buf, sizeof(buf));
169 av_bprintf(&bp, "%s", buf);
170 av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
171
172 return 0;
173 }
174
175 /**
176 * Copy samples from several input streams to one output stream.
177 * @param nb_inputs number of inputs
178 * @param in inputs; used only for the nb_ch field;
179 * @param route routing values;
180 * input channel i goes to output channel route[i];
181 * i < in[0].nb_ch are the channels from the first output;
182 * i >= in[0].nb_ch are the channels from the second output
183 * @param ins pointer to the samples of each inputs, in packed format;
184 * will be left at the end of the copied samples
185 * @param outs pointer to the samples of the output, in packet format;
186 * must point to a buffer big enough;
187 * will be left at the end of the copied samples
188 * @param ns number of samples to copy
189 * @param bps bytes per sample
190 */
copy_samples(int nb_inputs,struct amerge_input in[],int * route,uint8_t * ins[],uint8_t ** outs,int ns,int bps)191 static inline void copy_samples(int nb_inputs, struct amerge_input in[],
192 int *route, uint8_t *ins[],
193 uint8_t **outs, int ns, int bps)
194 {
195 int *route_cur;
196 int i, c, nb_ch = 0;
197
198 for (i = 0; i < nb_inputs; i++)
199 nb_ch += in[i].nb_ch;
200 while (ns--) {
201 route_cur = route;
202 for (i = 0; i < nb_inputs; i++) {
203 for (c = 0; c < in[i].nb_ch; c++) {
204 memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
205 ins[i] += bps;
206 }
207 }
208 *outs += nb_ch * bps;
209 }
210 }
211
free_frames(int nb_inputs,AVFrame ** input_frames)212 static void free_frames(int nb_inputs, AVFrame **input_frames)
213 {
214 int i;
215 for (i = 0; i < nb_inputs; i++)
216 av_frame_free(&input_frames[i]);
217 }
218
try_push_frame(AVFilterContext * ctx,int nb_samples)219 static int try_push_frame(AVFilterContext *ctx, int nb_samples)
220 {
221 AMergeContext *s = ctx->priv;
222 AVFilterLink *outlink = ctx->outputs[0];
223 int i, ret;
224 AVFrame *outbuf, *inbuf[SWR_CH_MAX] = { NULL };
225 uint8_t *outs, *ins[SWR_CH_MAX];
226
227 for (i = 0; i < ctx->nb_inputs; i++) {
228 ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &inbuf[i]);
229 if (ret < 0) {
230 free_frames(i, inbuf);
231 return ret;
232 }
233 ins[i] = inbuf[i]->data[0];
234 }
235
236 outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
237 if (!outbuf) {
238 free_frames(s->nb_inputs, inbuf);
239 return AVERROR(ENOMEM);
240 }
241
242 outs = outbuf->data[0];
243 outbuf->pts = inbuf[0]->pts;
244
245 outbuf->nb_samples = nb_samples;
246 if ((ret = av_channel_layout_copy(&outbuf->ch_layout, &outlink->ch_layout)) < 0)
247 return ret;
248 #if FF_API_OLD_CHANNEL_LAYOUT
249 FF_DISABLE_DEPRECATION_WARNINGS
250 outbuf->channel_layout = outlink->channel_layout;
251 outbuf->channels = outlink->ch_layout.nb_channels;
252 FF_ENABLE_DEPRECATION_WARNINGS
253 #endif
254
255 while (nb_samples) {
256 /* Unroll the most common sample formats: speed +~350% for the loop,
257 +~13% overall (including two common decoders) */
258 switch (s->bps) {
259 case 1:
260 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 1);
261 break;
262 case 2:
263 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 2);
264 break;
265 case 4:
266 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 4);
267 break;
268 default:
269 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, s->bps);
270 break;
271 }
272
273 nb_samples = 0;
274 }
275
276 free_frames(s->nb_inputs, inbuf);
277 return ff_filter_frame(ctx->outputs[0], outbuf);
278 }
279
activate(AVFilterContext * ctx)280 static int activate(AVFilterContext *ctx)
281 {
282 int i, status;
283 int ret, nb_samples;
284 int64_t pts;
285
286 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
287
288 nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
289 for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
290 nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[i]), nb_samples);
291 }
292
293 if (nb_samples) {
294 ret = try_push_frame(ctx, nb_samples);
295 if (ret < 0)
296 return ret;
297 }
298
299 for (i = 0; i < ctx->nb_inputs; i++) {
300 if (ff_inlink_queued_samples(ctx->inputs[i]))
301 continue;
302
303 if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
304 ff_outlink_set_status(ctx->outputs[0], status, pts);
305 return 0;
306 } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
307 ff_inlink_request_frame(ctx->inputs[i]);
308 return 0;
309 }
310 }
311
312 return 0;
313 }
314
init(AVFilterContext * ctx)315 static av_cold int init(AVFilterContext *ctx)
316 {
317 AMergeContext *s = ctx->priv;
318 int i, ret;
319
320 s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
321 if (!s->in)
322 return AVERROR(ENOMEM);
323 for (i = 0; i < s->nb_inputs; i++) {
324 char *name = av_asprintf("in%d", i);
325 AVFilterPad pad = {
326 .name = name,
327 .type = AVMEDIA_TYPE_AUDIO,
328 };
329 if (!name)
330 return AVERROR(ENOMEM);
331 if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
332 return ret;
333 }
334 return 0;
335 }
336
337 static const AVFilterPad amerge_outputs[] = {
338 {
339 .name = "default",
340 .type = AVMEDIA_TYPE_AUDIO,
341 .config_props = config_output,
342 },
343 };
344
345 const AVFilter ff_af_amerge = {
346 .name = "amerge",
347 .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
348 "a single multi-channel stream."),
349 .priv_size = sizeof(AMergeContext),
350 .init = init,
351 .uninit = uninit,
352 .activate = activate,
353 .inputs = NULL,
354 FILTER_OUTPUTS(amerge_outputs),
355 FILTER_QUERY_FUNC(query_formats),
356 .priv_class = &amerge_class,
357 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
358 };
359