1 /*
2 * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Audio merging filter
24 */
25
26 #include "libavutil/avstring.h"
27 #include "libavutil/bprint.h"
28 #include "libavutil/channel_layout.h"
29 #include "libavutil/opt.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "audio.h"
33 #include "internal.h"
34
35 #define SWR_CH_MAX 64
36
37 typedef struct AMergeContext {
38 const AVClass *class;
39 int nb_inputs;
40 int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
41 int bps;
42 struct amerge_input {
43 int nb_ch; /**< number of channels for the input */
44 } *in;
45 } AMergeContext;
46
47 #define OFFSET(x) offsetof(AMergeContext, x)
48 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
49
50 static const AVOption amerge_options[] = {
51 { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
52 AV_OPT_TYPE_INT, { .i64 = 2 }, 1, SWR_CH_MAX, FLAGS },
53 { NULL }
54 };
55
56 AVFILTER_DEFINE_CLASS(amerge);
57
uninit(AVFilterContext * ctx)58 static av_cold void uninit(AVFilterContext *ctx)
59 {
60 AMergeContext *s = ctx->priv;
61
62 av_freep(&s->in);
63 for (unsigned i = 0; i < ctx->nb_inputs; i++)
64 av_freep(&ctx->input_pads[i].name);
65 }
66
query_formats(AVFilterContext * ctx)67 static int query_formats(AVFilterContext *ctx)
68 {
69 static const enum AVSampleFormat packed_sample_fmts[] = {
70 AV_SAMPLE_FMT_U8,
71 AV_SAMPLE_FMT_S16,
72 AV_SAMPLE_FMT_S32,
73 AV_SAMPLE_FMT_FLT,
74 AV_SAMPLE_FMT_DBL,
75 AV_SAMPLE_FMT_NONE
76 };
77 AMergeContext *s = ctx->priv;
78 int64_t inlayout[SWR_CH_MAX], outlayout = 0;
79 AVFilterFormats *formats;
80 AVFilterChannelLayouts *layouts;
81 int i, ret, overlap = 0, nb_ch = 0;
82
83 for (i = 0; i < s->nb_inputs; i++) {
84 if (!ctx->inputs[i]->incfg.channel_layouts ||
85 !ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts) {
86 av_log(ctx, AV_LOG_WARNING,
87 "No channel layout for input %d\n", i + 1);
88 return AVERROR(EAGAIN);
89 }
90 inlayout[i] = ctx->inputs[i]->incfg.channel_layouts->channel_layouts[0];
91 if (ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts > 1) {
92 char buf[256];
93 av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
94 av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
95 }
96 s->in[i].nb_ch = FF_LAYOUT2COUNT(inlayout[i]);
97 if (s->in[i].nb_ch) {
98 overlap++;
99 } else {
100 s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
101 if (outlayout & inlayout[i])
102 overlap++;
103 outlayout |= inlayout[i];
104 }
105 nb_ch += s->in[i].nb_ch;
106 }
107 if (nb_ch > SWR_CH_MAX) {
108 av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
109 return AVERROR(EINVAL);
110 }
111 if (overlap) {
112 av_log(ctx, AV_LOG_WARNING,
113 "Input channel layouts overlap: "
114 "output layout will be determined by the number of distinct input channels\n");
115 for (i = 0; i < nb_ch; i++)
116 s->route[i] = i;
117 outlayout = av_get_default_channel_layout(nb_ch);
118 if (!outlayout && nb_ch)
119 outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
120 } else {
121 int *route[SWR_CH_MAX];
122 int c, out_ch_number = 0;
123
124 route[0] = s->route;
125 for (i = 1; i < s->nb_inputs; i++)
126 route[i] = route[i - 1] + s->in[i - 1].nb_ch;
127 for (c = 0; c < 64; c++)
128 for (i = 0; i < s->nb_inputs; i++)
129 if ((inlayout[i] >> c) & 1)
130 *(route[i]++) = out_ch_number++;
131 }
132 formats = ff_make_format_list(packed_sample_fmts);
133 if ((ret = ff_set_common_formats(ctx, formats)) < 0)
134 return ret;
135 for (i = 0; i < s->nb_inputs; i++) {
136 layouts = NULL;
137 if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
138 return ret;
139 if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
140 return ret;
141 }
142 layouts = NULL;
143 if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
144 return ret;
145 if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
146 return ret;
147
148 return ff_set_common_samplerates(ctx, ff_all_samplerates());
149 }
150
config_output(AVFilterLink * outlink)151 static int config_output(AVFilterLink *outlink)
152 {
153 AVFilterContext *ctx = outlink->src;
154 AMergeContext *s = ctx->priv;
155 AVBPrint bp;
156 int i;
157
158 for (i = 1; i < s->nb_inputs; i++) {
159 if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
160 av_log(ctx, AV_LOG_ERROR,
161 "Inputs must have the same sample rate "
162 "%d for in%d vs %d\n",
163 ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
164 return AVERROR(EINVAL);
165 }
166 }
167 s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
168 outlink->sample_rate = ctx->inputs[0]->sample_rate;
169 outlink->time_base = ctx->inputs[0]->time_base;
170
171 av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
172 for (i = 0; i < s->nb_inputs; i++) {
173 av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
174 av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
175 }
176 av_bprintf(&bp, " -> out:");
177 av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
178 av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
179
180 return 0;
181 }
182
183 /**
184 * Copy samples from several input streams to one output stream.
185 * @param nb_inputs number of inputs
186 * @param in inputs; used only for the nb_ch field;
187 * @param route routing values;
188 * input channel i goes to output channel route[i];
189 * i < in[0].nb_ch are the channels from the first output;
190 * i >= in[0].nb_ch are the channels from the second output
191 * @param ins pointer to the samples of each inputs, in packed format;
192 * will be left at the end of the copied samples
193 * @param outs pointer to the samples of the output, in packet format;
194 * must point to a buffer big enough;
195 * will be left at the end of the copied samples
196 * @param ns number of samples to copy
197 * @param bps bytes per sample
198 */
copy_samples(int nb_inputs,struct amerge_input in[],int * route,uint8_t * ins[],uint8_t ** outs,int ns,int bps)199 static inline void copy_samples(int nb_inputs, struct amerge_input in[],
200 int *route, uint8_t *ins[],
201 uint8_t **outs, int ns, int bps)
202 {
203 int *route_cur;
204 int i, c, nb_ch = 0;
205
206 for (i = 0; i < nb_inputs; i++)
207 nb_ch += in[i].nb_ch;
208 while (ns--) {
209 route_cur = route;
210 for (i = 0; i < nb_inputs; i++) {
211 for (c = 0; c < in[i].nb_ch; c++) {
212 memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
213 ins[i] += bps;
214 }
215 }
216 *outs += nb_ch * bps;
217 }
218 }
219
free_frames(int nb_inputs,AVFrame ** input_frames)220 static void free_frames(int nb_inputs, AVFrame **input_frames)
221 {
222 int i;
223 for (i = 0; i < nb_inputs; i++)
224 av_frame_free(&input_frames[i]);
225 }
226
try_push_frame(AVFilterContext * ctx,int nb_samples)227 static int try_push_frame(AVFilterContext *ctx, int nb_samples)
228 {
229 AMergeContext *s = ctx->priv;
230 AVFilterLink *outlink = ctx->outputs[0];
231 int i, ret;
232 AVFrame *outbuf, *inbuf[SWR_CH_MAX] = { NULL };
233 uint8_t *outs, *ins[SWR_CH_MAX];
234
235 for (i = 0; i < ctx->nb_inputs; i++) {
236 ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &inbuf[i]);
237 if (ret < 0) {
238 free_frames(i, inbuf);
239 return ret;
240 }
241 ins[i] = inbuf[i]->data[0];
242 }
243
244 outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
245 if (!outbuf) {
246 free_frames(s->nb_inputs, inbuf);
247 return AVERROR(ENOMEM);
248 }
249
250 outs = outbuf->data[0];
251 outbuf->pts = inbuf[0]->pts;
252
253 outbuf->nb_samples = nb_samples;
254 outbuf->channel_layout = outlink->channel_layout;
255 outbuf->channels = outlink->channels;
256
257 while (nb_samples) {
258 /* Unroll the most common sample formats: speed +~350% for the loop,
259 +~13% overall (including two common decoders) */
260 switch (s->bps) {
261 case 1:
262 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 1);
263 break;
264 case 2:
265 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 2);
266 break;
267 case 4:
268 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 4);
269 break;
270 default:
271 copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, s->bps);
272 break;
273 }
274
275 nb_samples = 0;
276 }
277
278 free_frames(s->nb_inputs, inbuf);
279 return ff_filter_frame(ctx->outputs[0], outbuf);
280 }
281
activate(AVFilterContext * ctx)282 static int activate(AVFilterContext *ctx)
283 {
284 int i, status;
285 int ret, nb_samples;
286 int64_t pts;
287
288 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
289
290 nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
291 for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
292 nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[i]), nb_samples);
293 }
294
295 if (nb_samples) {
296 ret = try_push_frame(ctx, nb_samples);
297 if (ret < 0)
298 return ret;
299 }
300
301 for (i = 0; i < ctx->nb_inputs; i++) {
302 if (ff_inlink_queued_samples(ctx->inputs[i]))
303 continue;
304
305 if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
306 ff_outlink_set_status(ctx->outputs[0], status, pts);
307 return 0;
308 } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
309 ff_inlink_request_frame(ctx->inputs[i]);
310 return 0;
311 }
312 }
313
314 return 0;
315 }
316
init(AVFilterContext * ctx)317 static av_cold int init(AVFilterContext *ctx)
318 {
319 AMergeContext *s = ctx->priv;
320 int i, ret;
321
322 s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
323 if (!s->in)
324 return AVERROR(ENOMEM);
325 for (i = 0; i < s->nb_inputs; i++) {
326 char *name = av_asprintf("in%d", i);
327 AVFilterPad pad = {
328 .name = name,
329 .type = AVMEDIA_TYPE_AUDIO,
330 };
331 if (!name)
332 return AVERROR(ENOMEM);
333 if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
334 av_freep(&pad.name);
335 return ret;
336 }
337 }
338 return 0;
339 }
340
341 static const AVFilterPad amerge_outputs[] = {
342 {
343 .name = "default",
344 .type = AVMEDIA_TYPE_AUDIO,
345 .config_props = config_output,
346 },
347 { NULL }
348 };
349
350 AVFilter ff_af_amerge = {
351 .name = "amerge",
352 .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
353 "a single multi-channel stream."),
354 .priv_size = sizeof(AMergeContext),
355 .init = init,
356 .uninit = uninit,
357 .query_formats = query_formats,
358 .activate = activate,
359 .inputs = NULL,
360 .outputs = amerge_outputs,
361 .priv_class = &amerge_class,
362 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
363 };
364