• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2020 Michael Barbour <barbour.michael.0@gmail.com>
3  * Copyright (c) 2021 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/channel_layout.h"
23 #include "libavutil/ffmath.h"
24 #include "libavutil/lfg.h"
25 #include "libavutil/random_seed.h"
26 #include "libavutil/opt.h"
27 #include "avfilter.h"
28 #include "audio.h"
29 #include "formats.h"
30 
31 #define MAX_STAGES 16
32 #define FILTER_FC  1100.0
33 #define RT60_LF    0.1
34 #define RT60_HF    0.008
35 
36 typedef struct APContext {
37     int len, p;
38     double *mx, *my;
39     double b0, b1, a0, a1;
40 } APContext;
41 
42 typedef struct ADecorrelateContext {
43     const AVClass *class;
44 
45     int stages;
46     int64_t seed;
47 
48     int nb_channels;
49     APContext (*ap)[MAX_STAGES];
50 
51     AVLFG c;
52 
53     void (*filter_channel)(AVFilterContext *ctx,
54                            int channel,
55                            AVFrame *in, AVFrame *out);
56 } ADecorrelateContext;
57 
ap_init(APContext * ap,int fs,double delay)58 static int ap_init(APContext *ap, int fs, double delay)
59 {
60     const int delay_samples = lrint(round(delay * fs));
61     const double gain_lf = -60.0 / (RT60_LF * fs) * delay_samples;
62     const double gain_hf = -60.0 / (RT60_HF * fs) * delay_samples;
63     const double w0 = 2.0 * M_PI * FILTER_FC / fs;
64     const double t = tan(w0 / 2.0);
65     const double g_hf = ff_exp10(gain_hf / 20.0);
66     const double gd = ff_exp10((gain_lf-gain_hf) / 20.0);
67     const double sgd = sqrt(gd);
68 
69     ap->len = delay_samples + 1;
70     ap->p = 0;
71     ap->mx = av_calloc(ap->len, sizeof(*ap->mx));
72     ap->my = av_calloc(ap->len, sizeof(*ap->my));
73     if (!ap->mx || !ap->my)
74         return AVERROR(ENOMEM);
75 
76     ap->a0 = t + sgd;
77     ap->a1 = (t - sgd) / ap->a0;
78     ap->b0 = (gd*t - sgd) / ap->a0 * g_hf;
79     ap->b1 = (gd*t + sgd) / ap->a0 * g_hf;
80     ap->a0 = 1.0;
81 
82     return 0;
83 }
84 
ap_free(APContext * ap)85 static void ap_free(APContext *ap)
86 {
87     av_freep(&ap->mx);
88     av_freep(&ap->my);
89 }
90 
ap_run(APContext * ap,double x)91 static double ap_run(APContext *ap, double x)
92 {
93     const int i0 = ((ap->p < 1) ? ap->len : ap->p)-1, i_n1 = ap->p, i_n2 = (ap->p+1 >= ap->len) ? 0 : ap->p+1;
94     const double r = ap->b1*x + ap->b0*ap->mx[i0] + ap->a1*ap->mx[i_n2] + ap->a0*ap->mx[i_n1] -
95                      ap->a1*ap->my[i0] - ap->b0*ap->my[i_n2] - ap->b1*ap->my[i_n1];
96 
97     ap->mx[ap->p] = x;
98     ap->my[ap->p] = r;
99     ap->p = (ap->p+1 >= ap->len) ? 0 : ap->p+1;
100 
101     return r;
102 }
103 
filter_channel_dbl(AVFilterContext * ctx,int ch,AVFrame * in,AVFrame * out)104 static void filter_channel_dbl(AVFilterContext *ctx, int ch,
105                                AVFrame *in, AVFrame *out)
106 {
107     ADecorrelateContext *s = ctx->priv;
108     const double *src = (const double *)in->extended_data[ch];
109     double *dst = (double *)out->extended_data[ch];
110     const int nb_samples = in->nb_samples;
111     const int stages = s->stages;
112     APContext *ap0 = &s->ap[ch][0];
113 
114     for (int n = 0; n < nb_samples; n++) {
115         dst[n] = ap_run(ap0, src[n]);
116         for (int i = 1; i < stages; i++) {
117             APContext *ap = &s->ap[ch][i];
118 
119             dst[n] = ap_run(ap, dst[n]);
120         }
121     }
122 }
123 
config_input(AVFilterLink * inlink)124 static int config_input(AVFilterLink *inlink)
125 {
126     AVFilterContext *ctx = inlink->dst;
127     ADecorrelateContext *s = ctx->priv;
128     int ret;
129 
130     if (s->seed == -1)
131         s->seed = av_get_random_seed();
132     av_lfg_init(&s->c, s->seed);
133 
134     s->nb_channels = inlink->ch_layout.nb_channels;
135     s->ap = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->ap));
136     if (!s->ap)
137         return AVERROR(ENOMEM);
138 
139     for (int i = 0; i < inlink->ch_layout.nb_channels; i++) {
140         for (int j = 0; j < s->stages; j++) {
141             ret = ap_init(&s->ap[i][j], inlink->sample_rate,
142                           (double)av_lfg_get(&s->c) / 0xffffffff * 2.2917e-3 + 0.83333e-3);
143             if (ret < 0)
144                 return ret;
145         }
146     }
147 
148     s->filter_channel = filter_channel_dbl;
149 
150     return 0;
151 }
152 
153 typedef struct ThreadData {
154     AVFrame *in, *out;
155 } ThreadData;
156 
filter_channels(AVFilterContext * ctx,void * arg,int jobnr,int nb_jobs)157 static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
158 {
159     ADecorrelateContext *s = ctx->priv;
160     ThreadData *td = arg;
161     AVFrame *out = td->out;
162     AVFrame *in = td->in;
163     const int start = (in->ch_layout.nb_channels * jobnr) / nb_jobs;
164     const int end = (in->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
165 
166     for (int ch = start; ch < end; ch++)
167         s->filter_channel(ctx, ch, in, out);
168 
169     return 0;
170 }
171 
filter_frame(AVFilterLink * inlink,AVFrame * in)172 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
173 {
174     AVFilterContext *ctx = inlink->dst;
175     AVFilterLink *outlink = ctx->outputs[0];
176     AVFrame *out;
177     ThreadData td;
178 
179     if (av_frame_is_writable(in)) {
180         out = in;
181     } else {
182         out = ff_get_audio_buffer(outlink, in->nb_samples);
183         if (!out) {
184             av_frame_free(&in);
185             return AVERROR(ENOMEM);
186         }
187         av_frame_copy_props(out, in);
188     }
189 
190     td.in = in; td.out = out;
191     ff_filter_execute(ctx, filter_channels, &td, NULL,
192                       FFMIN(inlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
193 
194     if (out != in)
195         av_frame_free(&in);
196     return ff_filter_frame(outlink, out);
197 }
198 
uninit(AVFilterContext * ctx)199 static av_cold void uninit(AVFilterContext *ctx)
200 {
201     ADecorrelateContext *s = ctx->priv;
202 
203     if (s->ap) {
204         for (int ch = 0; ch < s->nb_channels; ch++) {
205             for (int stage = 0; stage < s->stages; stage++)
206                 ap_free(&s->ap[ch][stage]);
207         }
208     }
209 
210     av_freep(&s->ap);
211 }
212 
213 #define OFFSET(x) offsetof(ADecorrelateContext, x)
214 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
215 
216 static const AVOption adecorrelate_options[] = {
217     { "stages", "set filtering stages", OFFSET(stages), AV_OPT_TYPE_INT,    {.i64=6},   1, MAX_STAGES, FLAGS },
218     { "seed",   "set random seed",      OFFSET(seed),   AV_OPT_TYPE_INT64,  {.i64=-1}, -1,   UINT_MAX, FLAGS },
219     { NULL }
220 };
221 
222 AVFILTER_DEFINE_CLASS(adecorrelate);
223 
224 static const AVFilterPad inputs[] = {
225     {
226         .name         = "default",
227         .type         = AVMEDIA_TYPE_AUDIO,
228         .filter_frame = filter_frame,
229         .config_props = config_input,
230     },
231 };
232 
233 static const AVFilterPad outputs[] = {
234     {
235         .name = "default",
236         .type = AVMEDIA_TYPE_AUDIO,
237     },
238 };
239 
240 const AVFilter ff_af_adecorrelate = {
241     .name            = "adecorrelate",
242     .description     = NULL_IF_CONFIG_SMALL("Apply decorrelation to input audio."),
243     .priv_size       = sizeof(ADecorrelateContext),
244     .priv_class      = &adecorrelate_class,
245     .uninit          = uninit,
246     FILTER_INPUTS(inputs),
247     FILTER_OUTPUTS(outputs),
248     FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_DBLP),
249     .flags           = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC |
250                        AVFILTER_FLAG_SLICE_THREADS,
251 };
252