1 /*
2 * Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * tempo scaling audio filter -- an implementation of WSOLA algorithm
24 *
25 * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h
26 * from Apprentice Video player by Pavel Koshevoy.
27 * https://sourceforge.net/projects/apprenticevideo/
28 *
29 * An explanation of SOLA algorithm is available at
30 * http://www.surina.net/article/time-and-pitch-scaling.html
31 *
32 * WSOLA is very similar to SOLA, only one major difference exists between
33 * these algorithms. SOLA shifts audio fragments along the output stream,
34 * where as WSOLA shifts audio fragments along the input stream.
35 *
36 * The advantage of WSOLA algorithm is that the overlap region size is
37 * always the same, therefore the blending function is constant and
38 * can be precomputed.
39 */
40
41 #include <float.h>
42 #include "libavutil/avassert.h"
43 #include "libavutil/avstring.h"
44 #include "libavutil/channel_layout.h"
45 #include "libavutil/eval.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/samplefmt.h"
48 #include "libavutil/tx.h"
49 #include "avfilter.h"
50 #include "audio.h"
51 #include "internal.h"
52
53 /**
54 * A fragment of audio waveform
55 */
56 typedef struct AudioFragment {
57 // index of the first sample of this fragment in the overall waveform;
58 // 0: input sample position
59 // 1: output sample position
60 int64_t position[2];
61
62 // original packed multi-channel samples:
63 uint8_t *data;
64
65 // number of samples in this fragment:
66 int nsamples;
67
68 // rDFT transform of the down-mixed mono fragment, used for
69 // fast waveform alignment via correlation in frequency domain:
70 float *xdat_in;
71 float *xdat;
72 } AudioFragment;
73
74 /**
75 * Filter state machine states
76 */
77 typedef enum {
78 YAE_LOAD_FRAGMENT,
79 YAE_ADJUST_POSITION,
80 YAE_RELOAD_FRAGMENT,
81 YAE_OUTPUT_OVERLAP_ADD,
82 YAE_FLUSH_OUTPUT,
83 } FilterState;
84
85 /**
86 * Filter state machine
87 */
88 typedef struct ATempoContext {
89 const AVClass *class;
90
91 // ring-buffer of input samples, necessary because some times
92 // input fragment position may be adjusted backwards:
93 uint8_t *buffer;
94
95 // ring-buffer maximum capacity, expressed in sample rate time base:
96 int ring;
97
98 // ring-buffer house keeping:
99 int size;
100 int head;
101 int tail;
102
103 // 0: input sample position corresponding to the ring buffer tail
104 // 1: output sample position
105 int64_t position[2];
106
107 // first input timestamp, all other timestamps are offset by this one
108 int64_t start_pts;
109
110 // sample format:
111 enum AVSampleFormat format;
112
113 // number of channels:
114 int channels;
115
116 // row of bytes to skip from one sample to next, across multple channels;
117 // stride = (number-of-channels * bits-per-sample-per-channel) / 8
118 int stride;
119
120 // fragment window size, power-of-two integer:
121 int window;
122
123 // Hann window coefficients, for feathering
124 // (blending) the overlapping fragment region:
125 float *hann;
126
127 // tempo scaling factor:
128 double tempo;
129
130 // a snapshot of previous fragment input and output position values
131 // captured when the tempo scale factor was set most recently:
132 int64_t origin[2];
133
134 // current/previous fragment ring-buffer:
135 AudioFragment frag[2];
136
137 // current fragment index:
138 uint64_t nfrag;
139
140 // current state:
141 FilterState state;
142
143 // for fast correlation calculation in frequency domain:
144 AVTXContext *real_to_complex;
145 AVTXContext *complex_to_real;
146 av_tx_fn r2c_fn, c2r_fn;
147 float *correlation_in;
148 float *correlation;
149
150 // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
151 AVFrame *dst_buffer;
152 uint8_t *dst;
153 uint8_t *dst_end;
154 uint64_t nsamples_in;
155 uint64_t nsamples_out;
156 } ATempoContext;
157
158 #define YAE_ATEMPO_MIN 0.5
159 #define YAE_ATEMPO_MAX 100.0
160
161 #define OFFSET(x) offsetof(ATempoContext, x)
162
163 static const AVOption atempo_options[] = {
164 { "tempo", "set tempo scale factor",
165 OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 },
166 YAE_ATEMPO_MIN,
167 YAE_ATEMPO_MAX,
168 AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM },
169 { NULL }
170 };
171
172 AVFILTER_DEFINE_CLASS(atempo);
173
yae_curr_frag(ATempoContext * atempo)174 inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
175 {
176 return &atempo->frag[atempo->nfrag % 2];
177 }
178
yae_prev_frag(ATempoContext * atempo)179 inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
180 {
181 return &atempo->frag[(atempo->nfrag + 1) % 2];
182 }
183
184 /**
185 * Reset filter to initial state, do not deallocate existing local buffers.
186 */
yae_clear(ATempoContext * atempo)187 static void yae_clear(ATempoContext *atempo)
188 {
189 atempo->size = 0;
190 atempo->head = 0;
191 atempo->tail = 0;
192
193 atempo->nfrag = 0;
194 atempo->state = YAE_LOAD_FRAGMENT;
195 atempo->start_pts = AV_NOPTS_VALUE;
196
197 atempo->position[0] = 0;
198 atempo->position[1] = 0;
199
200 atempo->origin[0] = 0;
201 atempo->origin[1] = 0;
202
203 atempo->frag[0].position[0] = 0;
204 atempo->frag[0].position[1] = 0;
205 atempo->frag[0].nsamples = 0;
206
207 atempo->frag[1].position[0] = 0;
208 atempo->frag[1].position[1] = 0;
209 atempo->frag[1].nsamples = 0;
210
211 // shift left position of 1st fragment by half a window
212 // so that no re-normalization would be required for
213 // the left half of the 1st fragment:
214 atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
215 atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
216
217 av_frame_free(&atempo->dst_buffer);
218 atempo->dst = NULL;
219 atempo->dst_end = NULL;
220
221 atempo->nsamples_in = 0;
222 atempo->nsamples_out = 0;
223 }
224
225 /**
226 * Reset filter to initial state and deallocate all buffers.
227 */
yae_release_buffers(ATempoContext * atempo)228 static void yae_release_buffers(ATempoContext *atempo)
229 {
230 yae_clear(atempo);
231
232 av_freep(&atempo->frag[0].data);
233 av_freep(&atempo->frag[1].data);
234 av_freep(&atempo->frag[0].xdat_in);
235 av_freep(&atempo->frag[1].xdat_in);
236 av_freep(&atempo->frag[0].xdat);
237 av_freep(&atempo->frag[1].xdat);
238
239 av_freep(&atempo->buffer);
240 av_freep(&atempo->hann);
241 av_freep(&atempo->correlation_in);
242 av_freep(&atempo->correlation);
243
244 av_tx_uninit(&atempo->real_to_complex);
245 av_tx_uninit(&atempo->complex_to_real);
246 }
247
248 /* av_realloc is not aligned enough; fortunately, the data does not need to
249 * be preserved */
250 #define RE_MALLOC_OR_FAIL(field, field_size) \
251 do { \
252 av_freep(&field); \
253 field = av_calloc(field_size, 1); \
254 if (!field) { \
255 yae_release_buffers(atempo); \
256 return AVERROR(ENOMEM); \
257 } \
258 } while (0)
259
260 /**
261 * Prepare filter for processing audio data of given format,
262 * sample rate and number of channels.
263 */
yae_reset(ATempoContext * atempo,enum AVSampleFormat format,int sample_rate,int channels)264 static int yae_reset(ATempoContext *atempo,
265 enum AVSampleFormat format,
266 int sample_rate,
267 int channels)
268 {
269 const int sample_size = av_get_bytes_per_sample(format);
270 uint32_t nlevels = 0;
271 float scale = 1.f, iscale = 1.f;
272 uint32_t pot;
273 int i;
274
275 atempo->format = format;
276 atempo->channels = channels;
277 atempo->stride = sample_size * channels;
278
279 // pick a segment window size:
280 atempo->window = sample_rate / 24;
281
282 // adjust window size to be a power-of-two integer:
283 nlevels = av_log2(atempo->window);
284 pot = 1 << nlevels;
285 av_assert0(pot <= atempo->window);
286
287 if (pot < atempo->window) {
288 atempo->window = pot * 2;
289 nlevels++;
290 }
291
292 // initialize audio fragment buffers:
293 RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
294 RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
295 RE_MALLOC_OR_FAIL(atempo->frag[0].xdat_in, (atempo->window + 1) * sizeof(AVComplexFloat));
296 RE_MALLOC_OR_FAIL(atempo->frag[1].xdat_in, (atempo->window + 1) * sizeof(AVComplexFloat));
297 RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, (atempo->window + 1) * sizeof(AVComplexFloat));
298 RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, (atempo->window + 1) * sizeof(AVComplexFloat));
299
300 // initialize rDFT contexts:
301 av_tx_uninit(&atempo->real_to_complex);
302 av_tx_uninit(&atempo->complex_to_real);
303
304 av_tx_init(&atempo->real_to_complex, &atempo->r2c_fn, AV_TX_FLOAT_RDFT, 0, 1 << (nlevels + 1), &scale, 0);
305 if (!atempo->real_to_complex) {
306 yae_release_buffers(atempo);
307 return AVERROR(ENOMEM);
308 }
309
310 av_tx_init(&atempo->complex_to_real, &atempo->c2r_fn, AV_TX_FLOAT_RDFT, 1, 1 << (nlevels + 1), &iscale, 0);
311 if (!atempo->complex_to_real) {
312 yae_release_buffers(atempo);
313 return AVERROR(ENOMEM);
314 }
315
316 RE_MALLOC_OR_FAIL(atempo->correlation_in, (atempo->window + 1) * sizeof(AVComplexFloat));
317 RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(AVComplexFloat));
318
319 atempo->ring = atempo->window * 3;
320 RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);
321
322 // initialize the Hann window function:
323 RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));
324
325 for (i = 0; i < atempo->window; i++) {
326 double t = (double)i / (double)(atempo->window - 1);
327 double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
328 atempo->hann[i] = (float)h;
329 }
330
331 yae_clear(atempo);
332 return 0;
333 }
334
yae_update(AVFilterContext * ctx)335 static int yae_update(AVFilterContext *ctx)
336 {
337 const AudioFragment *prev;
338 ATempoContext *atempo = ctx->priv;
339
340 prev = yae_prev_frag(atempo);
341 atempo->origin[0] = prev->position[0] + atempo->window / 2;
342 atempo->origin[1] = prev->position[1] + atempo->window / 2;
343 return 0;
344 }
345
346 /**
347 * A helper macro for initializing complex data buffer with scalar data
348 * of a given type.
349 */
350 #define yae_init_xdat(scalar_type, scalar_max) \
351 do { \
352 const uint8_t *src_end = src + \
353 frag->nsamples * atempo->channels * sizeof(scalar_type); \
354 \
355 float *xdat = frag->xdat_in; \
356 scalar_type tmp; \
357 \
358 if (atempo->channels == 1) { \
359 for (; src < src_end; xdat++) { \
360 tmp = *(const scalar_type *)src; \
361 src += sizeof(scalar_type); \
362 \
363 *xdat = (float)tmp; \
364 } \
365 } else { \
366 float s, max, ti, si; \
367 int i; \
368 \
369 for (; src < src_end; xdat++) { \
370 tmp = *(const scalar_type *)src; \
371 src += sizeof(scalar_type); \
372 \
373 max = (float)tmp; \
374 s = FFMIN((float)scalar_max, \
375 (float)fabsf(max)); \
376 \
377 for (i = 1; i < atempo->channels; i++) { \
378 tmp = *(const scalar_type *)src; \
379 src += sizeof(scalar_type); \
380 \
381 ti = (float)tmp; \
382 si = FFMIN((float)scalar_max, \
383 (float)fabsf(ti)); \
384 \
385 if (s < si) { \
386 s = si; \
387 max = ti; \
388 } \
389 } \
390 \
391 *xdat = max; \
392 } \
393 } \
394 } while (0)
395
396 /**
397 * Initialize complex data buffer of a given audio fragment
398 * with down-mixed mono data of appropriate scalar type.
399 */
yae_downmix(ATempoContext * atempo,AudioFragment * frag)400 static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
401 {
402 // shortcuts:
403 const uint8_t *src = frag->data;
404
405 // init complex data buffer used for FFT and Correlation:
406 memset(frag->xdat_in, 0, sizeof(AVComplexFloat) * (atempo->window + 1));
407
408 if (atempo->format == AV_SAMPLE_FMT_U8) {
409 yae_init_xdat(uint8_t, 127);
410 } else if (atempo->format == AV_SAMPLE_FMT_S16) {
411 yae_init_xdat(int16_t, 32767);
412 } else if (atempo->format == AV_SAMPLE_FMT_S32) {
413 yae_init_xdat(int, 2147483647);
414 } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
415 yae_init_xdat(float, 1);
416 } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
417 yae_init_xdat(double, 1);
418 }
419 }
420
421 /**
422 * Populate the internal data buffer on as-needed basis.
423 *
424 * @return
425 * 0 if requested data was already available or was successfully loaded,
426 * AVERROR(EAGAIN) if more input data is required.
427 */
yae_load_data(ATempoContext * atempo,const uint8_t ** src_ref,const uint8_t * src_end,int64_t stop_here)428 static int yae_load_data(ATempoContext *atempo,
429 const uint8_t **src_ref,
430 const uint8_t *src_end,
431 int64_t stop_here)
432 {
433 // shortcut:
434 const uint8_t *src = *src_ref;
435 const int read_size = stop_here - atempo->position[0];
436
437 if (stop_here <= atempo->position[0]) {
438 return 0;
439 }
440
441 // samples are not expected to be skipped, unless tempo is greater than 2:
442 av_assert0(read_size <= atempo->ring || atempo->tempo > 2.0);
443
444 while (atempo->position[0] < stop_here && src < src_end) {
445 int src_samples = (src_end - src) / atempo->stride;
446
447 // load data piece-wise, in order to avoid complicating the logic:
448 int nsamples = FFMIN(read_size, src_samples);
449 int na;
450 int nb;
451
452 nsamples = FFMIN(nsamples, atempo->ring);
453 na = FFMIN(nsamples, atempo->ring - atempo->tail);
454 nb = FFMIN(nsamples - na, atempo->ring);
455
456 if (na) {
457 uint8_t *a = atempo->buffer + atempo->tail * atempo->stride;
458 memcpy(a, src, na * atempo->stride);
459
460 src += na * atempo->stride;
461 atempo->position[0] += na;
462
463 atempo->size = FFMIN(atempo->size + na, atempo->ring);
464 atempo->tail = (atempo->tail + na) % atempo->ring;
465 atempo->head =
466 atempo->size < atempo->ring ?
467 atempo->tail - atempo->size :
468 atempo->tail;
469 }
470
471 if (nb) {
472 uint8_t *b = atempo->buffer;
473 memcpy(b, src, nb * atempo->stride);
474
475 src += nb * atempo->stride;
476 atempo->position[0] += nb;
477
478 atempo->size = FFMIN(atempo->size + nb, atempo->ring);
479 atempo->tail = (atempo->tail + nb) % atempo->ring;
480 atempo->head =
481 atempo->size < atempo->ring ?
482 atempo->tail - atempo->size :
483 atempo->tail;
484 }
485 }
486
487 // pass back the updated source buffer pointer:
488 *src_ref = src;
489
490 // sanity check:
491 av_assert0(atempo->position[0] <= stop_here);
492
493 return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN);
494 }
495
496 /**
497 * Populate current audio fragment data buffer.
498 *
499 * @return
500 * 0 when the fragment is ready,
501 * AVERROR(EAGAIN) if more input data is required.
502 */
yae_load_frag(ATempoContext * atempo,const uint8_t ** src_ref,const uint8_t * src_end)503 static int yae_load_frag(ATempoContext *atempo,
504 const uint8_t **src_ref,
505 const uint8_t *src_end)
506 {
507 // shortcuts:
508 AudioFragment *frag = yae_curr_frag(atempo);
509 uint8_t *dst;
510 int64_t missing, start, zeros;
511 uint32_t nsamples;
512 const uint8_t *a, *b;
513 int i0, i1, n0, n1, na, nb;
514
515 int64_t stop_here = frag->position[0] + atempo->window;
516 if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
517 return AVERROR(EAGAIN);
518 }
519
520 // calculate the number of samples we don't have:
521 missing =
522 stop_here > atempo->position[0] ?
523 stop_here - atempo->position[0] : 0;
524
525 nsamples =
526 missing < (int64_t)atempo->window ?
527 (uint32_t)(atempo->window - missing) : 0;
528
529 // setup the output buffer:
530 frag->nsamples = nsamples;
531 dst = frag->data;
532
533 start = atempo->position[0] - atempo->size;
534 zeros = 0;
535
536 if (frag->position[0] < start) {
537 // what we don't have we substitute with zeros:
538 zeros = FFMIN(start - frag->position[0], (int64_t)nsamples);
539 av_assert0(zeros != nsamples);
540
541 memset(dst, 0, zeros * atempo->stride);
542 dst += zeros * atempo->stride;
543 }
544
545 if (zeros == nsamples) {
546 return 0;
547 }
548
549 // get the remaining data from the ring buffer:
550 na = (atempo->head < atempo->tail ?
551 atempo->tail - atempo->head :
552 atempo->ring - atempo->head);
553
554 nb = atempo->head < atempo->tail ? 0 : atempo->tail;
555
556 // sanity check:
557 av_assert0(nsamples <= zeros + na + nb);
558
559 a = atempo->buffer + atempo->head * atempo->stride;
560 b = atempo->buffer;
561
562 i0 = frag->position[0] + zeros - start;
563 i1 = i0 < na ? 0 : i0 - na;
564
565 n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0;
566 n1 = nsamples - zeros - n0;
567
568 if (n0) {
569 memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride);
570 dst += n0 * atempo->stride;
571 }
572
573 if (n1) {
574 memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride);
575 }
576
577 return 0;
578 }
579
580 /**
581 * Prepare for loading next audio fragment.
582 */
yae_advance_to_next_frag(ATempoContext * atempo)583 static void yae_advance_to_next_frag(ATempoContext *atempo)
584 {
585 const double fragment_step = atempo->tempo * (double)(atempo->window / 2);
586
587 const AudioFragment *prev;
588 AudioFragment *frag;
589
590 atempo->nfrag++;
591 prev = yae_prev_frag(atempo);
592 frag = yae_curr_frag(atempo);
593
594 frag->position[0] = prev->position[0] + (int64_t)fragment_step;
595 frag->position[1] = prev->position[1] + atempo->window / 2;
596 frag->nsamples = 0;
597 }
598
599 /**
600 * Calculate cross-correlation via rDFT.
601 *
602 * Multiply two vectors of complex numbers (result of real_to_complex rDFT)
603 * and transform back via complex_to_real rDFT.
604 */
yae_xcorr_via_rdft(float * xcorr_in,float * xcorr,AVTXContext * complex_to_real,av_tx_fn c2r_fn,const AVComplexFloat * xa,const AVComplexFloat * xb,const int window)605 static void yae_xcorr_via_rdft(float *xcorr_in,
606 float *xcorr,
607 AVTXContext *complex_to_real,
608 av_tx_fn c2r_fn,
609 const AVComplexFloat *xa,
610 const AVComplexFloat *xb,
611 const int window)
612 {
613 AVComplexFloat *xc = (AVComplexFloat *)xcorr_in;
614 int i;
615
616 for (i = 0; i <= window; i++, xa++, xb++, xc++) {
617 xc->re = (xa->re * xb->re + xa->im * xb->im);
618 xc->im = (xa->im * xb->re - xa->re * xb->im);
619 }
620
621 // apply inverse rDFT:
622 c2r_fn(complex_to_real, xcorr, xcorr_in, sizeof(float));
623 }
624
625 /**
626 * Calculate alignment offset for given fragment
627 * relative to the previous fragment.
628 *
629 * @return alignment offset of current fragment relative to previous.
630 */
yae_align(AudioFragment * frag,const AudioFragment * prev,const int window,const int delta_max,const int drift,float * correlation_in,float * correlation,AVTXContext * complex_to_real,av_tx_fn c2r_fn)631 static int yae_align(AudioFragment *frag,
632 const AudioFragment *prev,
633 const int window,
634 const int delta_max,
635 const int drift,
636 float *correlation_in,
637 float *correlation,
638 AVTXContext *complex_to_real,
639 av_tx_fn c2r_fn)
640 {
641 int best_offset = -drift;
642 float best_metric = -FLT_MAX;
643 float *xcorr;
644
645 int i0;
646 int i1;
647 int i;
648
649 yae_xcorr_via_rdft(correlation_in,
650 correlation,
651 complex_to_real,
652 c2r_fn,
653 (const AVComplexFloat *)prev->xdat,
654 (const AVComplexFloat *)frag->xdat,
655 window);
656
657 // identify search window boundaries:
658 i0 = FFMAX(window / 2 - delta_max - drift, 0);
659 i0 = FFMIN(i0, window);
660
661 i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16);
662 i1 = FFMAX(i1, 0);
663
664 // identify cross-correlation peaks within search window:
665 xcorr = correlation + i0;
666
667 for (i = i0; i < i1; i++, xcorr++) {
668 float metric = *xcorr;
669
670 // normalize:
671 float drifti = (float)(drift + i);
672 metric *= drifti * (float)(i - i0) * (float)(i1 - i);
673
674 if (metric > best_metric) {
675 best_metric = metric;
676 best_offset = i - window / 2;
677 }
678 }
679
680 return best_offset;
681 }
682
683 /**
684 * Adjust current fragment position for better alignment
685 * with previous fragment.
686 *
687 * @return alignment correction.
688 */
yae_adjust_position(ATempoContext * atempo)689 static int yae_adjust_position(ATempoContext *atempo)
690 {
691 const AudioFragment *prev = yae_prev_frag(atempo);
692 AudioFragment *frag = yae_curr_frag(atempo);
693
694 const double prev_output_position =
695 (double)(prev->position[1] - atempo->origin[1] + atempo->window / 2) *
696 atempo->tempo;
697
698 const double ideal_output_position =
699 (double)(prev->position[0] - atempo->origin[0] + atempo->window / 2);
700
701 const int drift = (int)(prev_output_position - ideal_output_position);
702
703 const int delta_max = atempo->window / 2;
704 const int correction = yae_align(frag,
705 prev,
706 atempo->window,
707 delta_max,
708 drift,
709 atempo->correlation_in,
710 atempo->correlation,
711 atempo->complex_to_real,
712 atempo->c2r_fn);
713
714 if (correction) {
715 // adjust fragment position:
716 frag->position[0] -= correction;
717
718 // clear so that the fragment can be reloaded:
719 frag->nsamples = 0;
720 }
721
722 return correction;
723 }
724
725 /**
726 * A helper macro for blending the overlap region of previous
727 * and current audio fragment.
728 */
729 #define yae_blend(scalar_type) \
730 do { \
731 const scalar_type *aaa = (const scalar_type *)a; \
732 const scalar_type *bbb = (const scalar_type *)b; \
733 \
734 scalar_type *out = (scalar_type *)dst; \
735 scalar_type *out_end = (scalar_type *)dst_end; \
736 int64_t i; \
737 \
738 for (i = 0; i < overlap && out < out_end; \
739 i++, atempo->position[1]++, wa++, wb++) { \
740 float w0 = *wa; \
741 float w1 = *wb; \
742 int j; \
743 \
744 for (j = 0; j < atempo->channels; \
745 j++, aaa++, bbb++, out++) { \
746 float t0 = (float)*aaa; \
747 float t1 = (float)*bbb; \
748 \
749 *out = \
750 frag->position[0] + i < 0 ? \
751 *aaa : \
752 (scalar_type)(t0 * w0 + t1 * w1); \
753 } \
754 } \
755 dst = (uint8_t *)out; \
756 } while (0)
757
758 /**
759 * Blend the overlap region of previous and current audio fragment
760 * and output the results to the given destination buffer.
761 *
762 * @return
763 * 0 if the overlap region was completely stored in the dst buffer,
764 * AVERROR(EAGAIN) if more destination buffer space is required.
765 */
yae_overlap_add(ATempoContext * atempo,uint8_t ** dst_ref,uint8_t * dst_end)766 static int yae_overlap_add(ATempoContext *atempo,
767 uint8_t **dst_ref,
768 uint8_t *dst_end)
769 {
770 // shortcuts:
771 const AudioFragment *prev = yae_prev_frag(atempo);
772 const AudioFragment *frag = yae_curr_frag(atempo);
773
774 const int64_t start_here = FFMAX(atempo->position[1],
775 frag->position[1]);
776
777 const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples,
778 frag->position[1] + frag->nsamples);
779
780 const int64_t overlap = stop_here - start_here;
781
782 const int64_t ia = start_here - prev->position[1];
783 const int64_t ib = start_here - frag->position[1];
784
785 const float *wa = atempo->hann + ia;
786 const float *wb = atempo->hann + ib;
787
788 const uint8_t *a = prev->data + ia * atempo->stride;
789 const uint8_t *b = frag->data + ib * atempo->stride;
790
791 uint8_t *dst = *dst_ref;
792
793 av_assert0(start_here <= stop_here &&
794 frag->position[1] <= start_here &&
795 overlap <= frag->nsamples);
796
797 if (atempo->format == AV_SAMPLE_FMT_U8) {
798 yae_blend(uint8_t);
799 } else if (atempo->format == AV_SAMPLE_FMT_S16) {
800 yae_blend(int16_t);
801 } else if (atempo->format == AV_SAMPLE_FMT_S32) {
802 yae_blend(int);
803 } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
804 yae_blend(float);
805 } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
806 yae_blend(double);
807 }
808
809 // pass-back the updated destination buffer pointer:
810 *dst_ref = dst;
811
812 return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
813 }
814
815 /**
816 * Feed as much data to the filter as it is able to consume
817 * and receive as much processed data in the destination buffer
818 * as it is able to produce or store.
819 */
820 static void
yae_apply(ATempoContext * atempo,const uint8_t ** src_ref,const uint8_t * src_end,uint8_t ** dst_ref,uint8_t * dst_end)821 yae_apply(ATempoContext *atempo,
822 const uint8_t **src_ref,
823 const uint8_t *src_end,
824 uint8_t **dst_ref,
825 uint8_t *dst_end)
826 {
827 while (1) {
828 if (atempo->state == YAE_LOAD_FRAGMENT) {
829 // load additional data for the current fragment:
830 if (yae_load_frag(atempo, src_ref, src_end) != 0) {
831 break;
832 }
833
834 // down-mix to mono:
835 yae_downmix(atempo, yae_curr_frag(atempo));
836
837 // apply rDFT:
838 atempo->r2c_fn(atempo->real_to_complex, yae_curr_frag(atempo)->xdat, yae_curr_frag(atempo)->xdat_in, sizeof(float));
839
840 // must load the second fragment before alignment can start:
841 if (!atempo->nfrag) {
842 yae_advance_to_next_frag(atempo);
843 continue;
844 }
845
846 atempo->state = YAE_ADJUST_POSITION;
847 }
848
849 if (atempo->state == YAE_ADJUST_POSITION) {
850 // adjust position for better alignment:
851 if (yae_adjust_position(atempo)) {
852 // reload the fragment at the corrected position, so that the
853 // Hann window blending would not require normalization:
854 atempo->state = YAE_RELOAD_FRAGMENT;
855 } else {
856 atempo->state = YAE_OUTPUT_OVERLAP_ADD;
857 }
858 }
859
860 if (atempo->state == YAE_RELOAD_FRAGMENT) {
861 // load additional data if necessary due to position adjustment:
862 if (yae_load_frag(atempo, src_ref, src_end) != 0) {
863 break;
864 }
865
866 // down-mix to mono:
867 yae_downmix(atempo, yae_curr_frag(atempo));
868
869 // apply rDFT:
870 atempo->r2c_fn(atempo->real_to_complex, yae_curr_frag(atempo)->xdat, yae_curr_frag(atempo)->xdat_in, sizeof(float));
871
872 atempo->state = YAE_OUTPUT_OVERLAP_ADD;
873 }
874
875 if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) {
876 // overlap-add and output the result:
877 if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
878 break;
879 }
880
881 // advance to the next fragment, repeat:
882 yae_advance_to_next_frag(atempo);
883 atempo->state = YAE_LOAD_FRAGMENT;
884 }
885 }
886 }
887
888 /**
889 * Flush any buffered data from the filter.
890 *
891 * @return
892 * 0 if all data was completely stored in the dst buffer,
893 * AVERROR(EAGAIN) if more destination buffer space is required.
894 */
yae_flush(ATempoContext * atempo,uint8_t ** dst_ref,uint8_t * dst_end)895 static int yae_flush(ATempoContext *atempo,
896 uint8_t **dst_ref,
897 uint8_t *dst_end)
898 {
899 AudioFragment *frag = yae_curr_frag(atempo);
900 int64_t overlap_end;
901 int64_t start_here;
902 int64_t stop_here;
903 int64_t offset;
904
905 const uint8_t *src;
906 uint8_t *dst;
907
908 int src_size;
909 int dst_size;
910 int nbytes;
911
912 atempo->state = YAE_FLUSH_OUTPUT;
913
914 if (!atempo->nfrag) {
915 // there is nothing to flush:
916 return 0;
917 }
918
919 if (atempo->position[0] == frag->position[0] + frag->nsamples &&
920 atempo->position[1] == frag->position[1] + frag->nsamples) {
921 // the current fragment is already flushed:
922 return 0;
923 }
924
925 if (frag->position[0] + frag->nsamples < atempo->position[0]) {
926 // finish loading the current (possibly partial) fragment:
927 yae_load_frag(atempo, NULL, NULL);
928
929 if (atempo->nfrag) {
930 // down-mix to mono:
931 yae_downmix(atempo, frag);
932
933 // apply rDFT:
934 atempo->r2c_fn(atempo->real_to_complex, frag->xdat, frag->xdat_in, sizeof(float));
935
936 // align current fragment to previous fragment:
937 if (yae_adjust_position(atempo)) {
938 // reload the current fragment due to adjusted position:
939 yae_load_frag(atempo, NULL, NULL);
940 }
941 }
942 }
943
944 // flush the overlap region:
945 overlap_end = frag->position[1] + FFMIN(atempo->window / 2,
946 frag->nsamples);
947
948 while (atempo->position[1] < overlap_end) {
949 if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
950 return AVERROR(EAGAIN);
951 }
952 }
953
954 // check whether all of the input samples have been consumed:
955 if (frag->position[0] + frag->nsamples < atempo->position[0]) {
956 yae_advance_to_next_frag(atempo);
957 return AVERROR(EAGAIN);
958 }
959
960 // flush the remainder of the current fragment:
961 start_here = FFMAX(atempo->position[1], overlap_end);
962 stop_here = frag->position[1] + frag->nsamples;
963 offset = start_here - frag->position[1];
964 av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
965
966 src = frag->data + offset * atempo->stride;
967 dst = (uint8_t *)*dst_ref;
968
969 src_size = (int)(stop_here - start_here) * atempo->stride;
970 dst_size = dst_end - dst;
971 nbytes = FFMIN(src_size, dst_size);
972
973 memcpy(dst, src, nbytes);
974 dst += nbytes;
975
976 atempo->position[1] += (nbytes / atempo->stride);
977
978 // pass-back the updated destination buffer pointer:
979 *dst_ref = (uint8_t *)dst;
980
981 return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
982 }
983
init(AVFilterContext * ctx)984 static av_cold int init(AVFilterContext *ctx)
985 {
986 ATempoContext *atempo = ctx->priv;
987 atempo->format = AV_SAMPLE_FMT_NONE;
988 atempo->state = YAE_LOAD_FRAGMENT;
989 return 0;
990 }
991
uninit(AVFilterContext * ctx)992 static av_cold void uninit(AVFilterContext *ctx)
993 {
994 ATempoContext *atempo = ctx->priv;
995 yae_release_buffers(atempo);
996 }
997
998 // WSOLA necessitates an internal sliding window ring buffer
999 // for incoming audio stream.
1000 //
1001 // Planar sample formats are too cumbersome to store in a ring buffer,
1002 // therefore planar sample formats are not supported.
1003 //
1004 static const enum AVSampleFormat sample_fmts[] = {
1005 AV_SAMPLE_FMT_U8,
1006 AV_SAMPLE_FMT_S16,
1007 AV_SAMPLE_FMT_S32,
1008 AV_SAMPLE_FMT_FLT,
1009 AV_SAMPLE_FMT_DBL,
1010 AV_SAMPLE_FMT_NONE
1011 };
1012
config_props(AVFilterLink * inlink)1013 static int config_props(AVFilterLink *inlink)
1014 {
1015 AVFilterContext *ctx = inlink->dst;
1016 ATempoContext *atempo = ctx->priv;
1017
1018 enum AVSampleFormat format = inlink->format;
1019 int sample_rate = (int)inlink->sample_rate;
1020
1021 return yae_reset(atempo, format, sample_rate, inlink->ch_layout.nb_channels);
1022 }
1023
push_samples(ATempoContext * atempo,AVFilterLink * outlink,int n_out)1024 static int push_samples(ATempoContext *atempo,
1025 AVFilterLink *outlink,
1026 int n_out)
1027 {
1028 int ret;
1029
1030 atempo->dst_buffer->sample_rate = outlink->sample_rate;
1031 atempo->dst_buffer->nb_samples = n_out;
1032
1033 // adjust the PTS:
1034 atempo->dst_buffer->pts = atempo->start_pts +
1035 av_rescale_q(atempo->nsamples_out,
1036 (AVRational){ 1, outlink->sample_rate },
1037 outlink->time_base);
1038
1039 ret = ff_filter_frame(outlink, atempo->dst_buffer);
1040 atempo->dst_buffer = NULL;
1041 atempo->dst = NULL;
1042 atempo->dst_end = NULL;
1043 if (ret < 0)
1044 return ret;
1045
1046 atempo->nsamples_out += n_out;
1047 return 0;
1048 }
1049
filter_frame(AVFilterLink * inlink,AVFrame * src_buffer)1050 static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
1051 {
1052 AVFilterContext *ctx = inlink->dst;
1053 ATempoContext *atempo = ctx->priv;
1054 AVFilterLink *outlink = ctx->outputs[0];
1055
1056 int ret = 0;
1057 int n_in = src_buffer->nb_samples;
1058 int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
1059
1060 const uint8_t *src = src_buffer->data[0];
1061 const uint8_t *src_end = src + n_in * atempo->stride;
1062
1063 if (atempo->start_pts == AV_NOPTS_VALUE)
1064 atempo->start_pts = av_rescale_q(src_buffer->pts,
1065 inlink->time_base,
1066 outlink->time_base);
1067
1068 while (src < src_end) {
1069 if (!atempo->dst_buffer) {
1070 atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
1071 if (!atempo->dst_buffer) {
1072 av_frame_free(&src_buffer);
1073 return AVERROR(ENOMEM);
1074 }
1075 av_frame_copy_props(atempo->dst_buffer, src_buffer);
1076
1077 atempo->dst = atempo->dst_buffer->data[0];
1078 atempo->dst_end = atempo->dst + n_out * atempo->stride;
1079 }
1080
1081 yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
1082
1083 if (atempo->dst == atempo->dst_end) {
1084 int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
1085 atempo->stride);
1086 ret = push_samples(atempo, outlink, n_samples);
1087 if (ret < 0)
1088 goto end;
1089 }
1090 }
1091
1092 atempo->nsamples_in += n_in;
1093 end:
1094 av_frame_free(&src_buffer);
1095 return ret;
1096 }
1097
request_frame(AVFilterLink * outlink)1098 static int request_frame(AVFilterLink *outlink)
1099 {
1100 AVFilterContext *ctx = outlink->src;
1101 ATempoContext *atempo = ctx->priv;
1102 int ret;
1103
1104 ret = ff_request_frame(ctx->inputs[0]);
1105
1106 if (ret == AVERROR_EOF) {
1107 // flush the filter:
1108 int n_max = atempo->ring;
1109 int n_out;
1110 int err = AVERROR(EAGAIN);
1111
1112 while (err == AVERROR(EAGAIN)) {
1113 if (!atempo->dst_buffer) {
1114 atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
1115 if (!atempo->dst_buffer)
1116 return AVERROR(ENOMEM);
1117
1118 atempo->dst = atempo->dst_buffer->data[0];
1119 atempo->dst_end = atempo->dst + n_max * atempo->stride;
1120 }
1121
1122 err = yae_flush(atempo, &atempo->dst, atempo->dst_end);
1123
1124 n_out = ((atempo->dst - atempo->dst_buffer->data[0]) /
1125 atempo->stride);
1126
1127 if (n_out) {
1128 ret = push_samples(atempo, outlink, n_out);
1129 if (ret < 0)
1130 return ret;
1131 }
1132 }
1133
1134 av_frame_free(&atempo->dst_buffer);
1135 atempo->dst = NULL;
1136 atempo->dst_end = NULL;
1137
1138 return AVERROR_EOF;
1139 }
1140
1141 return ret;
1142 }
1143
process_command(AVFilterContext * ctx,const char * cmd,const char * arg,char * res,int res_len,int flags)1144 static int process_command(AVFilterContext *ctx,
1145 const char *cmd,
1146 const char *arg,
1147 char *res,
1148 int res_len,
1149 int flags)
1150 {
1151 int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
1152
1153 if (ret < 0)
1154 return ret;
1155
1156 return yae_update(ctx);
1157 }
1158
1159 static const AVFilterPad atempo_inputs[] = {
1160 {
1161 .name = "default",
1162 .type = AVMEDIA_TYPE_AUDIO,
1163 .filter_frame = filter_frame,
1164 .config_props = config_props,
1165 },
1166 };
1167
1168 static const AVFilterPad atempo_outputs[] = {
1169 {
1170 .name = "default",
1171 .request_frame = request_frame,
1172 .type = AVMEDIA_TYPE_AUDIO,
1173 },
1174 };
1175
1176 const AVFilter ff_af_atempo = {
1177 .name = "atempo",
1178 .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
1179 .init = init,
1180 .uninit = uninit,
1181 .process_command = process_command,
1182 .priv_size = sizeof(ATempoContext),
1183 .priv_class = &atempo_class,
1184 FILTER_INPUTS(atempo_inputs),
1185 FILTER_OUTPUTS(atempo_outputs),
1186 FILTER_SAMPLEFMTS_ARRAY(sample_fmts),
1187 };
1188