• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "libavutil/channel_layout.h"
20 #include "libavutil/ffmath.h"
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "audio.h"
24 #include "formats.h"
25 
26 typedef struct ASubBoostContext {
27     const AVClass *class;
28 
29     double dry_gain;
30     double wet_gain;
31     double feedback;
32     double max_boost;
33     double decay;
34     double delay;
35     double cutoff;
36     double slope;
37 
38     double a0, a1, a2;
39     double b0, b1, b2;
40 
41     char *ch_layout_str;
42     AVChannelLayout ch_layout;
43 
44     int *write_pos;
45     int buffer_samples;
46 
47     AVFrame *w;
48     AVFrame *buffer;
49 } ASubBoostContext;
50 
get_coeffs(AVFilterContext * ctx)51 static int get_coeffs(AVFilterContext *ctx)
52 {
53     ASubBoostContext *s = ctx->priv;
54     AVFilterLink *inlink = ctx->inputs[0];
55     double w0 = 2 * M_PI * s->cutoff / inlink->sample_rate;
56     double alpha = sin(w0) / 2 * sqrt(2. * (1. / s->slope - 1.) + 2.);
57 
58     s->a0 =  1 + alpha;
59     s->a1 = -2 * cos(w0);
60     s->a2 =  1 - alpha;
61     s->b0 = (1 - cos(w0)) / 2;
62     s->b1 =  1 - cos(w0);
63     s->b2 = (1 - cos(w0)) / 2;
64 
65     s->a1 /= s->a0;
66     s->a2 /= s->a0;
67     s->b0 /= s->a0;
68     s->b1 /= s->a0;
69     s->b2 /= s->a0;
70 
71     s->buffer_samples = inlink->sample_rate * s->delay / 1000;
72 
73     return 0;
74 }
75 
config_input(AVFilterLink * inlink)76 static int config_input(AVFilterLink *inlink)
77 {
78     AVFilterContext *ctx = inlink->dst;
79     ASubBoostContext *s = ctx->priv;
80 
81     s->buffer = ff_get_audio_buffer(inlink, inlink->sample_rate / 10);
82     s->w = ff_get_audio_buffer(inlink, 3);
83     s->write_pos = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->write_pos));
84     if (!s->buffer || !s->w || !s->write_pos)
85         return AVERROR(ENOMEM);
86 
87     return get_coeffs(ctx);
88 }
89 
90 typedef struct ThreadData {
91     AVFrame *in, *out;
92 } ThreadData;
93 
filter_channels(AVFilterContext * ctx,void * arg,int jobnr,int nb_jobs)94 static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
95 {
96     ASubBoostContext *s = ctx->priv;
97     ThreadData *td = arg;
98     AVFrame *out = td->out;
99     AVFrame *in = td->in;
100     const double mix = ctx->is_disabled ? 0. : 1.;
101     const double wet = ctx->is_disabled ? 1. : s->wet_gain;
102     const double dry = ctx->is_disabled ? 1. : s->dry_gain;
103     const double feedback = s->feedback, decay = s->decay;
104     const double max_boost = s->max_boost;
105     const double b0 = s->b0;
106     const double b1 = s->b1;
107     const double b2 = s->b2;
108     const double a1 = -s->a1;
109     const double a2 = -s->a2;
110     const int start = (in->ch_layout.nb_channels * jobnr) / nb_jobs;
111     const int end = (in->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
112     const int buffer_samples = s->buffer_samples;
113 
114     for (int ch = start; ch < end; ch++) {
115         const double *src = (const double *)in->extended_data[ch];
116         double *dst = (double *)out->extended_data[ch];
117         double *buffer = (double *)s->buffer->extended_data[ch];
118         double *w = (double *)s->w->extended_data[ch];
119         int write_pos = s->write_pos[ch];
120         enum AVChannel channel = av_channel_layout_channel_from_index(&in->ch_layout, ch);
121         const int bypass = av_channel_layout_index_from_channel(&s->ch_layout, channel) < 0;
122         const double a = 0.00001;
123         const double b = 1. - a;
124 
125         if (bypass) {
126             if (in != out)
127                 memcpy(out->extended_data[ch], in->extended_data[ch],
128                        in->nb_samples * sizeof(double));
129             continue;
130         }
131 
132         for (int n = 0; n < in->nb_samples; n++) {
133             double out_sample, boost;
134 
135             out_sample = src[n] * b0 + w[0];
136             w[0] = b1 * src[n] + w[1] + a1 * out_sample;
137             w[1] = b2 * src[n] + a2 * out_sample;
138 
139             buffer[write_pos] = buffer[write_pos] * decay + out_sample * feedback;
140             boost = av_clipd((1. -  (fabs(src[n] * dry))) / fabs(buffer[write_pos]), 0., max_boost);
141             w[2] = boost > w[2] ? w[2] * b + a * boost : w[2] * a + b * boost;
142             w[2] = av_clipd(w[2], 0., max_boost);
143             dst[n] = (src[n] * dry + w[2] * buffer[write_pos] * mix) * wet;
144 
145             if (++write_pos >= buffer_samples)
146                 write_pos = 0;
147         }
148 
149         s->write_pos[ch] = write_pos;
150     }
151 
152     return 0;
153 }
154 
filter_frame(AVFilterLink * inlink,AVFrame * in)155 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
156 {
157     AVFilterContext *ctx = inlink->dst;
158     ASubBoostContext *s = ctx->priv;
159     AVFilterLink *outlink = ctx->outputs[0];
160     ThreadData td;
161     AVFrame *out;
162     int ret;
163 
164     ret = av_channel_layout_copy(&s->ch_layout, &inlink->ch_layout);
165     if (ret < 0)
166         return ret;
167     if (strcmp(s->ch_layout_str, "all"))
168         av_channel_layout_from_string(&s->ch_layout,
169                                       s->ch_layout_str);
170 
171     if (av_frame_is_writable(in)) {
172         out = in;
173     } else {
174         out = ff_get_audio_buffer(outlink, in->nb_samples);
175         if (!out) {
176             av_frame_free(&in);
177             return AVERROR(ENOMEM);
178         }
179         av_frame_copy_props(out, in);
180     }
181 
182     td.in = in; td.out = out;
183     ff_filter_execute(ctx, filter_channels, &td, NULL,
184                       FFMIN(inlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
185 
186     if (out != in)
187         av_frame_free(&in);
188     return ff_filter_frame(outlink, out);
189 }
190 
uninit(AVFilterContext * ctx)191 static av_cold void uninit(AVFilterContext *ctx)
192 {
193     ASubBoostContext *s = ctx->priv;
194 
195     av_channel_layout_uninit(&s->ch_layout);
196     av_frame_free(&s->buffer);
197     av_frame_free(&s->w);
198     av_freep(&s->write_pos);
199 }
200 
process_command(AVFilterContext * ctx,const char * cmd,const char * args,char * res,int res_len,int flags)201 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
202                            char *res, int res_len, int flags)
203 {
204     int ret;
205 
206     ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
207     if (ret < 0)
208         return ret;
209 
210     return get_coeffs(ctx);
211 }
212 
213 #define OFFSET(x) offsetof(ASubBoostContext, x)
214 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
215 
216 static const AVOption asubboost_options[] = {
217     { "dry",      "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1.0},      0,   1, FLAGS },
218     { "wet",      "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1.0},      0,   1, FLAGS },
219     { "boost",    "set max boost",OFFSET(max_boost),AV_OPT_TYPE_DOUBLE, {.dbl=2.0},      1,  12, FLAGS },
220     { "decay",    "set decay",    OFFSET(decay),    AV_OPT_TYPE_DOUBLE, {.dbl=0.0},      0,   1, FLAGS },
221     { "feedback", "set feedback", OFFSET(feedback), AV_OPT_TYPE_DOUBLE, {.dbl=0.9},      0,   1, FLAGS },
222     { "cutoff",   "set cutoff",   OFFSET(cutoff),   AV_OPT_TYPE_DOUBLE, {.dbl=100},     50, 900, FLAGS },
223     { "slope",    "set slope",    OFFSET(slope),    AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0001,   1, FLAGS },
224     { "delay",    "set delay",    OFFSET(delay),    AV_OPT_TYPE_DOUBLE, {.dbl=20},       1, 100, FLAGS },
225     { "channels", "set channels to filter", OFFSET(ch_layout_str), AV_OPT_TYPE_STRING, {.str="all"}, 0, 0, FLAGS },
226     { NULL }
227 };
228 
229 AVFILTER_DEFINE_CLASS(asubboost);
230 
231 static const AVFilterPad inputs[] = {
232     {
233         .name         = "default",
234         .type         = AVMEDIA_TYPE_AUDIO,
235         .filter_frame = filter_frame,
236         .config_props = config_input,
237     },
238 };
239 
240 static const AVFilterPad outputs[] = {
241     {
242         .name = "default",
243         .type = AVMEDIA_TYPE_AUDIO,
244     },
245 };
246 
247 const AVFilter ff_af_asubboost = {
248     .name           = "asubboost",
249     .description    = NULL_IF_CONFIG_SMALL("Boost subwoofer frequencies."),
250     .priv_size      = sizeof(ASubBoostContext),
251     .priv_class     = &asubboost_class,
252     .uninit         = uninit,
253     FILTER_INPUTS(inputs),
254     FILTER_OUTPUTS(outputs),
255     FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_DBLP),
256     .process_command = process_command,
257     .flags           = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
258                        AVFILTER_FLAG_SLICE_THREADS,
259 };
260