1 /*
2 * Monkey's Audio lossless audio decoder
3 * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4 * based upon libdemac from Dave Chapman.
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <inttypes.h>
24
25 #include "libavutil/avassert.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/crc.h"
28 #include "libavutil/opt.h"
29 #include "lossless_audiodsp.h"
30 #include "avcodec.h"
31 #include "bswapdsp.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 #include "get_bits.h"
35 #include "unary.h"
36
37 /**
38 * @file
39 * Monkey's Audio lossless audio decoder
40 */
41
42 #define MAX_CHANNELS 2
43 #define MAX_BYTESPERSAMPLE 3
44
45 #define APE_FRAMECODE_MONO_SILENCE 1
46 #define APE_FRAMECODE_STEREO_SILENCE 3
47 #define APE_FRAMECODE_PSEUDO_STEREO 4
48
49 #define HISTORY_SIZE 512
50 #define PREDICTOR_ORDER 8
51 /** Total size of all predictor histories */
52 #define PREDICTOR_SIZE 50
53
54 #define YDELAYA (18 + PREDICTOR_ORDER*4)
55 #define YDELAYB (18 + PREDICTOR_ORDER*3)
56 #define XDELAYA (18 + PREDICTOR_ORDER*2)
57 #define XDELAYB (18 + PREDICTOR_ORDER)
58
59 #define YADAPTCOEFFSA 18
60 #define XADAPTCOEFFSA 14
61 #define YADAPTCOEFFSB 10
62 #define XADAPTCOEFFSB 5
63
64 /**
65 * Possible compression levels
66 * @{
67 */
68 enum APECompressionLevel {
69 COMPRESSION_LEVEL_FAST = 1000,
70 COMPRESSION_LEVEL_NORMAL = 2000,
71 COMPRESSION_LEVEL_HIGH = 3000,
72 COMPRESSION_LEVEL_EXTRA_HIGH = 4000,
73 COMPRESSION_LEVEL_INSANE = 5000
74 };
75 /** @} */
76
77 #define APE_FILTER_LEVELS 3
78
79 /** Filter orders depending on compression level */
80 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
81 { 0, 0, 0 },
82 { 16, 0, 0 },
83 { 64, 0, 0 },
84 { 32, 256, 0 },
85 { 16, 256, 1280 }
86 };
87
88 /** Filter fraction bits depending on compression level */
89 static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = {
90 { 0, 0, 0 },
91 { 11, 0, 0 },
92 { 11, 0, 0 },
93 { 10, 13, 0 },
94 { 11, 13, 15 }
95 };
96
97
98 /** Filters applied to the decoded data */
99 typedef struct APEFilter {
100 int16_t *coeffs; ///< actual coefficients used in filtering
101 int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
102 int16_t *historybuffer; ///< filter memory
103 int16_t *delay; ///< filtered values
104
105 int avg;
106 } APEFilter;
107
108 typedef struct APERice {
109 uint32_t k;
110 uint32_t ksum;
111 } APERice;
112
113 typedef struct APERangecoder {
114 uint32_t low; ///< low end of interval
115 uint32_t range; ///< length of interval
116 uint32_t help; ///< bytes_to_follow resp. intermediate value
117 unsigned int buffer; ///< buffer for input/output
118 } APERangecoder;
119
120 /** Filter histories */
121 typedef struct APEPredictor {
122 int32_t *buf;
123
124 int32_t lastA[2];
125
126 int32_t filterA[2];
127 int32_t filterB[2];
128
129 uint32_t coeffsA[2][4]; ///< adaption coefficients
130 uint32_t coeffsB[2][5]; ///< adaption coefficients
131 int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE];
132
133 unsigned int sample_pos;
134 } APEPredictor;
135
136 /** Decoder context */
137 typedef struct APEContext {
138 AVClass *class; ///< class for AVOptions
139 AVCodecContext *avctx;
140 BswapDSPContext bdsp;
141 LLAudDSPContext adsp;
142 int channels;
143 int samples; ///< samples left to decode in current frame
144 int bps;
145
146 int fileversion; ///< codec version, very important in decoding process
147 int compression_level; ///< compression levels
148 int fset; ///< which filter set to use (calculated from compression level)
149 int flags; ///< global decoder flags
150
151 uint32_t CRC; ///< signalled frame CRC
152 uint32_t CRC_state; ///< accumulated CRC
153 int frameflags; ///< frame flags
154 APEPredictor predictor; ///< predictor used for final reconstruction
155
156 int32_t *decoded_buffer;
157 int decoded_size;
158 int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
159 int blocks_per_loop; ///< maximum number of samples to decode for each call
160
161 int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
162
163 APERangecoder rc; ///< rangecoder used to decode actual values
164 APERice riceX; ///< rice code parameters for the second channel
165 APERice riceY; ///< rice code parameters for the first channel
166 APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
167 GetBitContext gb;
168
169 uint8_t *data; ///< current frame data
170 uint8_t *data_end; ///< frame data end
171 int data_size; ///< frame data allocated size
172 const uint8_t *ptr; ///< current position in frame data
173
174 int error;
175
176 void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
177 void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
178 void (*predictor_decode_mono)(struct APEContext *ctx, int count);
179 void (*predictor_decode_stereo)(struct APEContext *ctx, int count);
180 } APEContext;
181
182 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
183 int32_t *decoded1, int count);
184
185 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
186 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
187 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
188 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
189 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
190 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
191 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
192 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
193 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
194
195 static void predictor_decode_mono_3800(APEContext *ctx, int count);
196 static void predictor_decode_stereo_3800(APEContext *ctx, int count);
197 static void predictor_decode_mono_3930(APEContext *ctx, int count);
198 static void predictor_decode_stereo_3930(APEContext *ctx, int count);
199 static void predictor_decode_mono_3950(APEContext *ctx, int count);
200 static void predictor_decode_stereo_3950(APEContext *ctx, int count);
201
ape_decode_close(AVCodecContext * avctx)202 static av_cold int ape_decode_close(AVCodecContext *avctx)
203 {
204 APEContext *s = avctx->priv_data;
205 int i;
206
207 for (i = 0; i < APE_FILTER_LEVELS; i++)
208 av_freep(&s->filterbuf[i]);
209
210 av_freep(&s->decoded_buffer);
211 av_freep(&s->data);
212 s->decoded_size = s->data_size = 0;
213
214 return 0;
215 }
216
ape_decode_init(AVCodecContext * avctx)217 static av_cold int ape_decode_init(AVCodecContext *avctx)
218 {
219 APEContext *s = avctx->priv_data;
220 int i;
221
222 if (avctx->extradata_size != 6) {
223 av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
224 return AVERROR(EINVAL);
225 }
226 if (avctx->channels > 2) {
227 av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
228 return AVERROR(EINVAL);
229 }
230 s->bps = avctx->bits_per_coded_sample;
231 switch (s->bps) {
232 case 8:
233 avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
234 break;
235 case 16:
236 avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
237 break;
238 case 24:
239 avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
240 break;
241 default:
242 avpriv_request_sample(avctx,
243 "%d bits per coded sample", s->bps);
244 return AVERROR_PATCHWELCOME;
245 }
246 s->avctx = avctx;
247 s->channels = avctx->channels;
248 s->fileversion = AV_RL16(avctx->extradata);
249 s->compression_level = AV_RL16(avctx->extradata + 2);
250 s->flags = AV_RL16(avctx->extradata + 4);
251
252 av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n",
253 s->compression_level, s->flags);
254 if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE ||
255 !s->compression_level ||
256 (s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) {
257 av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
258 s->compression_level);
259 return AVERROR_INVALIDDATA;
260 }
261 s->fset = s->compression_level / 1000 - 1;
262 for (i = 0; i < APE_FILTER_LEVELS; i++) {
263 if (!ape_filter_orders[s->fset][i])
264 break;
265 FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
266 (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
267 filter_alloc_fail);
268 }
269
270 if (s->fileversion < 3860) {
271 s->entropy_decode_mono = entropy_decode_mono_0000;
272 s->entropy_decode_stereo = entropy_decode_stereo_0000;
273 } else if (s->fileversion < 3900) {
274 s->entropy_decode_mono = entropy_decode_mono_3860;
275 s->entropy_decode_stereo = entropy_decode_stereo_3860;
276 } else if (s->fileversion < 3930) {
277 s->entropy_decode_mono = entropy_decode_mono_3900;
278 s->entropy_decode_stereo = entropy_decode_stereo_3900;
279 } else if (s->fileversion < 3990) {
280 s->entropy_decode_mono = entropy_decode_mono_3900;
281 s->entropy_decode_stereo = entropy_decode_stereo_3930;
282 } else {
283 s->entropy_decode_mono = entropy_decode_mono_3990;
284 s->entropy_decode_stereo = entropy_decode_stereo_3990;
285 }
286
287 if (s->fileversion < 3930) {
288 s->predictor_decode_mono = predictor_decode_mono_3800;
289 s->predictor_decode_stereo = predictor_decode_stereo_3800;
290 } else if (s->fileversion < 3950) {
291 s->predictor_decode_mono = predictor_decode_mono_3930;
292 s->predictor_decode_stereo = predictor_decode_stereo_3930;
293 } else {
294 s->predictor_decode_mono = predictor_decode_mono_3950;
295 s->predictor_decode_stereo = predictor_decode_stereo_3950;
296 }
297
298 ff_bswapdsp_init(&s->bdsp);
299 ff_llauddsp_init(&s->adsp);
300 avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
301
302 return 0;
303 filter_alloc_fail:
304 ape_decode_close(avctx);
305 return AVERROR(ENOMEM);
306 }
307
308 /**
309 * @name APE range decoding functions
310 * @{
311 */
312
313 #define CODE_BITS 32
314 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
315 #define SHIFT_BITS (CODE_BITS - 9)
316 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
317 #define BOTTOM_VALUE (TOP_VALUE >> 8)
318
319 /** Start the decoder */
range_start_decoding(APEContext * ctx)320 static inline void range_start_decoding(APEContext *ctx)
321 {
322 ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
323 ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
324 ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
325 }
326
327 /** Perform normalization */
range_dec_normalize(APEContext * ctx)328 static inline void range_dec_normalize(APEContext *ctx)
329 {
330 while (ctx->rc.range <= BOTTOM_VALUE) {
331 ctx->rc.buffer <<= 8;
332 if(ctx->ptr < ctx->data_end) {
333 ctx->rc.buffer += *ctx->ptr;
334 ctx->ptr++;
335 } else {
336 ctx->error = 1;
337 }
338 ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
339 ctx->rc.range <<= 8;
340 }
341 }
342
343 /**
344 * Calculate cumulative frequency for next symbol. Does NO update!
345 * @param ctx decoder context
346 * @param tot_f is the total frequency or (code_value)1<<shift
347 * @return the cumulative frequency
348 */
range_decode_culfreq(APEContext * ctx,int tot_f)349 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
350 {
351 range_dec_normalize(ctx);
352 ctx->rc.help = ctx->rc.range / tot_f;
353 return ctx->rc.low / ctx->rc.help;
354 }
355
356 /**
357 * Decode value with given size in bits
358 * @param ctx decoder context
359 * @param shift number of bits to decode
360 */
range_decode_culshift(APEContext * ctx,int shift)361 static inline int range_decode_culshift(APEContext *ctx, int shift)
362 {
363 range_dec_normalize(ctx);
364 ctx->rc.help = ctx->rc.range >> shift;
365 return ctx->rc.low / ctx->rc.help;
366 }
367
368
369 /**
370 * Update decoding state
371 * @param ctx decoder context
372 * @param sy_f the interval length (frequency of the symbol)
373 * @param lt_f the lower end (frequency sum of < symbols)
374 */
range_decode_update(APEContext * ctx,int sy_f,int lt_f)375 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
376 {
377 ctx->rc.low -= ctx->rc.help * lt_f;
378 ctx->rc.range = ctx->rc.help * sy_f;
379 }
380
381 /** Decode n bits (n <= 16) without modelling */
range_decode_bits(APEContext * ctx,int n)382 static inline int range_decode_bits(APEContext *ctx, int n)
383 {
384 int sym = range_decode_culshift(ctx, n);
385 range_decode_update(ctx, 1, sym);
386 return sym;
387 }
388
389
390 #define MODEL_ELEMENTS 64
391
392 /**
393 * Fixed probabilities for symbols in Monkey Audio version 3.97
394 */
395 static const uint16_t counts_3970[22] = {
396 0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
397 62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
398 65450, 65469, 65480, 65487, 65491, 65493,
399 };
400
401 /**
402 * Probability ranges for symbols in Monkey Audio version 3.97
403 */
404 static const uint16_t counts_diff_3970[21] = {
405 14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
406 1104, 677, 415, 248, 150, 89, 54, 31,
407 19, 11, 7, 4, 2,
408 };
409
410 /**
411 * Fixed probabilities for symbols in Monkey Audio version 3.98
412 */
413 static const uint16_t counts_3980[22] = {
414 0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
415 64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
416 65485, 65488, 65490, 65491, 65492, 65493,
417 };
418
419 /**
420 * Probability ranges for symbols in Monkey Audio version 3.98
421 */
422 static const uint16_t counts_diff_3980[21] = {
423 19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
424 261, 119, 65, 31, 19, 10, 6, 3,
425 3, 2, 1, 1, 1,
426 };
427
428 /**
429 * Decode symbol
430 * @param ctx decoder context
431 * @param counts probability range start position
432 * @param counts_diff probability range widths
433 */
range_get_symbol(APEContext * ctx,const uint16_t counts[],const uint16_t counts_diff[])434 static inline int range_get_symbol(APEContext *ctx,
435 const uint16_t counts[],
436 const uint16_t counts_diff[])
437 {
438 int symbol, cf;
439
440 cf = range_decode_culshift(ctx, 16);
441
442 if(cf > 65492){
443 symbol= cf - 65535 + 63;
444 range_decode_update(ctx, 1, cf);
445 if(cf > 65535)
446 ctx->error=1;
447 return symbol;
448 }
449 /* figure out the symbol inefficiently; a binary search would be much better */
450 for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
451
452 range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
453
454 return symbol;
455 }
456 /** @} */ // group rangecoder
457
update_rice(APERice * rice,unsigned int x)458 static inline void update_rice(APERice *rice, unsigned int x)
459 {
460 int lim = rice->k ? (1 << (rice->k + 4)) : 0;
461 rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
462
463 if (rice->ksum < lim)
464 rice->k--;
465 else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
466 rice->k++;
467 }
468
get_rice_ook(GetBitContext * gb,int k)469 static inline int get_rice_ook(GetBitContext *gb, int k)
470 {
471 unsigned int x;
472
473 x = get_unary(gb, 1, get_bits_left(gb));
474
475 if (k)
476 x = (x << k) | get_bits(gb, k);
477
478 return x;
479 }
480
ape_decode_value_3860(APEContext * ctx,GetBitContext * gb,APERice * rice)481 static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb,
482 APERice *rice)
483 {
484 unsigned int x, overflow;
485
486 overflow = get_unary(gb, 1, get_bits_left(gb));
487
488 if (ctx->fileversion > 3880) {
489 while (overflow >= 16) {
490 overflow -= 16;
491 rice->k += 4;
492 }
493 }
494
495 if (!rice->k)
496 x = overflow;
497 else if(rice->k <= MIN_CACHE_BITS) {
498 x = (overflow << rice->k) + get_bits(gb, rice->k);
499 } else {
500 av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
501 ctx->error = 1;
502 return AVERROR_INVALIDDATA;
503 }
504 rice->ksum += x - (rice->ksum + 8 >> 4);
505 if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
506 rice->k--;
507 else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
508 rice->k++;
509
510 /* Convert to signed */
511 return ((x >> 1) ^ ((x & 1) - 1)) + 1;
512 }
513
ape_decode_value_3900(APEContext * ctx,APERice * rice)514 static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
515 {
516 unsigned int x, overflow;
517 int tmpk;
518
519 overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970);
520
521 if (overflow == (MODEL_ELEMENTS - 1)) {
522 tmpk = range_decode_bits(ctx, 5);
523 overflow = 0;
524 } else
525 tmpk = (rice->k < 1) ? 0 : rice->k - 1;
526
527 if (tmpk <= 16 || ctx->fileversion < 3910) {
528 if (tmpk > 23) {
529 av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
530 return AVERROR_INVALIDDATA;
531 }
532 x = range_decode_bits(ctx, tmpk);
533 } else if (tmpk <= 31) {
534 x = range_decode_bits(ctx, 16);
535 x |= (range_decode_bits(ctx, tmpk - 16) << 16);
536 } else {
537 av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
538 return AVERROR_INVALIDDATA;
539 }
540 x += overflow << tmpk;
541
542 update_rice(rice, x);
543
544 /* Convert to signed */
545 return ((x >> 1) ^ ((x & 1) - 1)) + 1;
546 }
547
ape_decode_value_3990(APEContext * ctx,APERice * rice)548 static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
549 {
550 unsigned int x, overflow;
551 int base, pivot;
552
553 pivot = rice->ksum >> 5;
554 if (pivot == 0)
555 pivot = 1;
556
557 overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980);
558
559 if (overflow == (MODEL_ELEMENTS - 1)) {
560 overflow = (unsigned)range_decode_bits(ctx, 16) << 16;
561 overflow |= range_decode_bits(ctx, 16);
562 }
563
564 if (pivot < 0x10000) {
565 base = range_decode_culfreq(ctx, pivot);
566 range_decode_update(ctx, 1, base);
567 } else {
568 int base_hi = pivot, base_lo;
569 int bbits = 0;
570
571 while (base_hi & ~0xFFFF) {
572 base_hi >>= 1;
573 bbits++;
574 }
575 base_hi = range_decode_culfreq(ctx, base_hi + 1);
576 range_decode_update(ctx, 1, base_hi);
577 base_lo = range_decode_culfreq(ctx, 1 << bbits);
578 range_decode_update(ctx, 1, base_lo);
579
580 base = (base_hi << bbits) + base_lo;
581 }
582
583 x = base + overflow * pivot;
584
585 update_rice(rice, x);
586
587 /* Convert to signed */
588 return ((x >> 1) ^ ((x & 1) - 1)) + 1;
589 }
590
get_k(int ksum)591 static int get_k(int ksum)
592 {
593 return av_log2(ksum) + !!ksum;
594 }
595
decode_array_0000(APEContext * ctx,GetBitContext * gb,int32_t * out,APERice * rice,int blockstodecode)596 static void decode_array_0000(APEContext *ctx, GetBitContext *gb,
597 int32_t *out, APERice *rice, int blockstodecode)
598 {
599 int i;
600 unsigned ksummax, ksummin;
601
602 rice->ksum = 0;
603 for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
604 out[i] = get_rice_ook(&ctx->gb, 10);
605 rice->ksum += out[i];
606 }
607
608 if (blockstodecode <= 5)
609 goto end;
610
611 rice->k = get_k(rice->ksum / 10);
612 if (rice->k >= 24)
613 return;
614 for (; i < FFMIN(blockstodecode, 64); i++) {
615 out[i] = get_rice_ook(&ctx->gb, rice->k);
616 rice->ksum += out[i];
617 rice->k = get_k(rice->ksum / ((i + 1) * 2));
618 if (rice->k >= 24)
619 return;
620 }
621
622 if (blockstodecode <= 64)
623 goto end;
624
625 rice->k = get_k(rice->ksum >> 7);
626 ksummax = 1 << rice->k + 7;
627 ksummin = rice->k ? (1 << rice->k + 6) : 0;
628 for (; i < blockstodecode; i++) {
629 if (get_bits_left(&ctx->gb) < 1) {
630 ctx->error = 1;
631 return;
632 }
633 out[i] = get_rice_ook(&ctx->gb, rice->k);
634 rice->ksum += out[i] - (unsigned)out[i - 64];
635 while (rice->ksum < ksummin) {
636 rice->k--;
637 ksummin = rice->k ? ksummin >> 1 : 0;
638 ksummax >>= 1;
639 }
640 while (rice->ksum >= ksummax) {
641 rice->k++;
642 if (rice->k > 24)
643 return;
644 ksummax <<= 1;
645 ksummin = ksummin ? ksummin << 1 : 128;
646 }
647 }
648
649 end:
650 for (i = 0; i < blockstodecode; i++)
651 out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1;
652 }
653
entropy_decode_mono_0000(APEContext * ctx,int blockstodecode)654 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
655 {
656 decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
657 blockstodecode);
658 }
659
entropy_decode_stereo_0000(APEContext * ctx,int blockstodecode)660 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
661 {
662 decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
663 blockstodecode);
664 decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
665 blockstodecode);
666 }
667
entropy_decode_mono_3860(APEContext * ctx,int blockstodecode)668 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
669 {
670 int32_t *decoded0 = ctx->decoded[0];
671
672 while (blockstodecode--)
673 *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
674 }
675
entropy_decode_stereo_3860(APEContext * ctx,int blockstodecode)676 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
677 {
678 int32_t *decoded0 = ctx->decoded[0];
679 int32_t *decoded1 = ctx->decoded[1];
680 int blocks = blockstodecode;
681
682 while (blockstodecode--)
683 *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
684 while (blocks--)
685 *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
686 }
687
entropy_decode_mono_3900(APEContext * ctx,int blockstodecode)688 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
689 {
690 int32_t *decoded0 = ctx->decoded[0];
691
692 while (blockstodecode--)
693 *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
694 }
695
entropy_decode_stereo_3900(APEContext * ctx,int blockstodecode)696 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
697 {
698 int32_t *decoded0 = ctx->decoded[0];
699 int32_t *decoded1 = ctx->decoded[1];
700 int blocks = blockstodecode;
701
702 while (blockstodecode--)
703 *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
704 range_dec_normalize(ctx);
705 // because of some implementation peculiarities we need to backpedal here
706 ctx->ptr -= 1;
707 range_start_decoding(ctx);
708 while (blocks--)
709 *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
710 }
711
entropy_decode_stereo_3930(APEContext * ctx,int blockstodecode)712 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
713 {
714 int32_t *decoded0 = ctx->decoded[0];
715 int32_t *decoded1 = ctx->decoded[1];
716
717 while (blockstodecode--) {
718 *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
719 *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
720 }
721 }
722
entropy_decode_mono_3990(APEContext * ctx,int blockstodecode)723 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
724 {
725 int32_t *decoded0 = ctx->decoded[0];
726
727 while (blockstodecode--)
728 *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
729 }
730
entropy_decode_stereo_3990(APEContext * ctx,int blockstodecode)731 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
732 {
733 int32_t *decoded0 = ctx->decoded[0];
734 int32_t *decoded1 = ctx->decoded[1];
735
736 while (blockstodecode--) {
737 *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
738 *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
739 }
740 }
741
init_entropy_decoder(APEContext * ctx)742 static int init_entropy_decoder(APEContext *ctx)
743 {
744 /* Read the CRC */
745 if (ctx->fileversion >= 3900) {
746 if (ctx->data_end - ctx->ptr < 6)
747 return AVERROR_INVALIDDATA;
748 ctx->CRC = bytestream_get_be32(&ctx->ptr);
749 } else {
750 ctx->CRC = get_bits_long(&ctx->gb, 32);
751 }
752
753 /* Read the frame flags if they exist */
754 ctx->frameflags = 0;
755 ctx->CRC_state = UINT32_MAX;
756 if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
757 ctx->CRC &= ~0x80000000;
758
759 if (ctx->data_end - ctx->ptr < 6)
760 return AVERROR_INVALIDDATA;
761 ctx->frameflags = bytestream_get_be32(&ctx->ptr);
762 }
763
764 /* Initialize the rice structs */
765 ctx->riceX.k = 10;
766 ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
767 ctx->riceY.k = 10;
768 ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
769
770 if (ctx->fileversion >= 3900) {
771 /* The first 8 bits of input are ignored. */
772 ctx->ptr++;
773
774 range_start_decoding(ctx);
775 }
776
777 return 0;
778 }
779
780 static const int32_t initial_coeffs_fast_3320[1] = {
781 375,
782 };
783
784 static const int32_t initial_coeffs_a_3800[3] = {
785 64, 115, 64,
786 };
787
788 static const int32_t initial_coeffs_b_3800[2] = {
789 740, 0
790 };
791
792 static const int32_t initial_coeffs_3930[4] = {
793 360, 317, -109, 98
794 };
795
init_predictor_decoder(APEContext * ctx)796 static void init_predictor_decoder(APEContext *ctx)
797 {
798 APEPredictor *p = &ctx->predictor;
799
800 /* Zero the history buffers */
801 memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
802 p->buf = p->historybuffer;
803
804 /* Initialize and zero the coefficients */
805 if (ctx->fileversion < 3930) {
806 if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
807 memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
808 sizeof(initial_coeffs_fast_3320));
809 memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
810 sizeof(initial_coeffs_fast_3320));
811 } else {
812 memcpy(p->coeffsA[0], initial_coeffs_a_3800,
813 sizeof(initial_coeffs_a_3800));
814 memcpy(p->coeffsA[1], initial_coeffs_a_3800,
815 sizeof(initial_coeffs_a_3800));
816 }
817 } else {
818 memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
819 memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
820 }
821 memset(p->coeffsB, 0, sizeof(p->coeffsB));
822 if (ctx->fileversion < 3930) {
823 memcpy(p->coeffsB[0], initial_coeffs_b_3800,
824 sizeof(initial_coeffs_b_3800));
825 memcpy(p->coeffsB[1], initial_coeffs_b_3800,
826 sizeof(initial_coeffs_b_3800));
827 }
828
829 p->filterA[0] = p->filterA[1] = 0;
830 p->filterB[0] = p->filterB[1] = 0;
831 p->lastA[0] = p->lastA[1] = 0;
832
833 p->sample_pos = 0;
834 }
835
836 /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
APESIGN(int32_t x)837 static inline int APESIGN(int32_t x) {
838 return (x < 0) - (x > 0);
839 }
840
filter_fast_3320(APEPredictor * p,const int decoded,const int filter,const int delayA)841 static av_always_inline int filter_fast_3320(APEPredictor *p,
842 const int decoded, const int filter,
843 const int delayA)
844 {
845 int32_t predictionA;
846
847 p->buf[delayA] = p->lastA[filter];
848 if (p->sample_pos < 3) {
849 p->lastA[filter] = decoded;
850 p->filterA[filter] = decoded;
851 return decoded;
852 }
853
854 predictionA = p->buf[delayA] * 2U - p->buf[delayA - 1];
855 p->lastA[filter] = decoded + ((int32_t)(predictionA * p->coeffsA[filter][0]) >> 9);
856
857 if ((decoded ^ predictionA) > 0)
858 p->coeffsA[filter][0]++;
859 else
860 p->coeffsA[filter][0]--;
861
862 p->filterA[filter] += (unsigned)p->lastA[filter];
863
864 return p->filterA[filter];
865 }
866
filter_3800(APEPredictor * p,const unsigned decoded,const int filter,const int delayA,const int delayB,const int start,const int shift)867 static av_always_inline int filter_3800(APEPredictor *p,
868 const unsigned decoded, const int filter,
869 const int delayA, const int delayB,
870 const int start, const int shift)
871 {
872 int32_t predictionA, predictionB, sign;
873 int32_t d0, d1, d2, d3, d4;
874
875 p->buf[delayA] = p->lastA[filter];
876 p->buf[delayB] = p->filterB[filter];
877 if (p->sample_pos < start) {
878 predictionA = decoded + p->filterA[filter];
879 p->lastA[filter] = decoded;
880 p->filterB[filter] = decoded;
881 p->filterA[filter] = predictionA;
882 return predictionA;
883 }
884 d2 = p->buf[delayA];
885 d1 = (p->buf[delayA] - p->buf[delayA - 1]) * 2U;
886 d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) * 8U);
887 d3 = p->buf[delayB] * 2U - p->buf[delayB - 1];
888 d4 = p->buf[delayB];
889
890 predictionA = d0 * p->coeffsA[filter][0] +
891 d1 * p->coeffsA[filter][1] +
892 d2 * p->coeffsA[filter][2];
893
894 sign = APESIGN(decoded);
895 p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
896 p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
897 p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
898
899 predictionB = d3 * p->coeffsB[filter][0] -
900 d4 * p->coeffsB[filter][1];
901 p->lastA[filter] = decoded + (predictionA >> 11);
902 sign = APESIGN(p->lastA[filter]);
903 p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
904 p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
905
906 p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
907 p->filterA[filter] = p->filterB[filter] + (unsigned)((int)(p->filterA[filter] * 31U) >> 5);
908
909 return p->filterA[filter];
910 }
911
long_filter_high_3800(int32_t * buffer,int order,int shift,int length)912 static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
913 {
914 int i, j;
915 int32_t dotprod, sign;
916 int32_t coeffs[256], delay[256];
917
918 if (order >= length)
919 return;
920
921 memset(coeffs, 0, order * sizeof(*coeffs));
922 for (i = 0; i < order; i++)
923 delay[i] = buffer[i];
924 for (i = order; i < length; i++) {
925 dotprod = 0;
926 sign = APESIGN(buffer[i]);
927 for (j = 0; j < order; j++) {
928 dotprod += delay[j] * (unsigned)coeffs[j];
929 coeffs[j] += ((delay[j] >> 31) | 1) * sign;
930 }
931 buffer[i] -= dotprod >> shift;
932 for (j = 0; j < order - 1; j++)
933 delay[j] = delay[j + 1];
934 delay[order - 1] = buffer[i];
935 }
936 }
937
long_filter_ehigh_3830(int32_t * buffer,int length)938 static void long_filter_ehigh_3830(int32_t *buffer, int length)
939 {
940 int i, j;
941 int32_t dotprod, sign;
942 int32_t delay[8] = { 0 };
943 uint32_t coeffs[8] = { 0 };
944
945 for (i = 0; i < length; i++) {
946 dotprod = 0;
947 sign = APESIGN(buffer[i]);
948 for (j = 7; j >= 0; j--) {
949 dotprod += delay[j] * coeffs[j];
950 coeffs[j] += ((delay[j] >> 31) | 1) * sign;
951 }
952 for (j = 7; j > 0; j--)
953 delay[j] = delay[j - 1];
954 delay[0] = buffer[i];
955 buffer[i] -= dotprod >> 9;
956 }
957 }
958
predictor_decode_stereo_3800(APEContext * ctx,int count)959 static void predictor_decode_stereo_3800(APEContext *ctx, int count)
960 {
961 APEPredictor *p = &ctx->predictor;
962 int32_t *decoded0 = ctx->decoded[0];
963 int32_t *decoded1 = ctx->decoded[1];
964 int start = 4, shift = 10;
965
966 if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
967 start = 16;
968 long_filter_high_3800(decoded0, 16, 9, count);
969 long_filter_high_3800(decoded1, 16, 9, count);
970 } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
971 int order = 128, shift2 = 11;
972
973 if (ctx->fileversion >= 3830) {
974 order <<= 1;
975 shift++;
976 shift2++;
977 long_filter_ehigh_3830(decoded0 + order, count - order);
978 long_filter_ehigh_3830(decoded1 + order, count - order);
979 }
980 start = order;
981 long_filter_high_3800(decoded0, order, shift2, count);
982 long_filter_high_3800(decoded1, order, shift2, count);
983 }
984
985 while (count--) {
986 int X = *decoded0, Y = *decoded1;
987 if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
988 *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
989 decoded0++;
990 *decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
991 decoded1++;
992 } else {
993 *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
994 start, shift);
995 decoded0++;
996 *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
997 start, shift);
998 decoded1++;
999 }
1000
1001 /* Combined */
1002 p->buf++;
1003 p->sample_pos++;
1004
1005 /* Have we filled the history buffer? */
1006 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1007 memmove(p->historybuffer, p->buf,
1008 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1009 p->buf = p->historybuffer;
1010 }
1011 }
1012 }
1013
predictor_decode_mono_3800(APEContext * ctx,int count)1014 static void predictor_decode_mono_3800(APEContext *ctx, int count)
1015 {
1016 APEPredictor *p = &ctx->predictor;
1017 int32_t *decoded0 = ctx->decoded[0];
1018 int start = 4, shift = 10;
1019
1020 if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
1021 start = 16;
1022 long_filter_high_3800(decoded0, 16, 9, count);
1023 } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
1024 int order = 128, shift2 = 11;
1025
1026 if (ctx->fileversion >= 3830) {
1027 order <<= 1;
1028 shift++;
1029 shift2++;
1030 long_filter_ehigh_3830(decoded0 + order, count - order);
1031 }
1032 start = order;
1033 long_filter_high_3800(decoded0, order, shift2, count);
1034 }
1035
1036 while (count--) {
1037 if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
1038 *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
1039 decoded0++;
1040 } else {
1041 *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
1042 start, shift);
1043 decoded0++;
1044 }
1045
1046 /* Combined */
1047 p->buf++;
1048 p->sample_pos++;
1049
1050 /* Have we filled the history buffer? */
1051 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1052 memmove(p->historybuffer, p->buf,
1053 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1054 p->buf = p->historybuffer;
1055 }
1056 }
1057 }
1058
predictor_update_3930(APEPredictor * p,const int decoded,const int filter,const int delayA)1059 static av_always_inline int predictor_update_3930(APEPredictor *p,
1060 const int decoded, const int filter,
1061 const int delayA)
1062 {
1063 int32_t predictionA, sign;
1064 int32_t d0, d1, d2, d3;
1065
1066 p->buf[delayA] = p->lastA[filter];
1067 d0 = p->buf[delayA ];
1068 d1 = p->buf[delayA ] - p->buf[delayA - 1];
1069 d2 = p->buf[delayA - 1] - p->buf[delayA - 2];
1070 d3 = p->buf[delayA - 2] - p->buf[delayA - 3];
1071
1072 predictionA = d0 * p->coeffsA[filter][0] +
1073 d1 * p->coeffsA[filter][1] +
1074 d2 * p->coeffsA[filter][2] +
1075 d3 * p->coeffsA[filter][3];
1076
1077 p->lastA[filter] = decoded + (predictionA >> 9);
1078 p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1079
1080 sign = APESIGN(decoded);
1081 p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign;
1082 p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign;
1083 p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign;
1084 p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign;
1085
1086 return p->filterA[filter];
1087 }
1088
predictor_decode_stereo_3930(APEContext * ctx,int count)1089 static void predictor_decode_stereo_3930(APEContext *ctx, int count)
1090 {
1091 APEPredictor *p = &ctx->predictor;
1092 int32_t *decoded0 = ctx->decoded[0];
1093 int32_t *decoded1 = ctx->decoded[1];
1094
1095 ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1096
1097 while (count--) {
1098 /* Predictor Y */
1099 int Y = *decoded1, X = *decoded0;
1100 *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
1101 decoded0++;
1102 *decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
1103 decoded1++;
1104
1105 /* Combined */
1106 p->buf++;
1107
1108 /* Have we filled the history buffer? */
1109 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1110 memmove(p->historybuffer, p->buf,
1111 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1112 p->buf = p->historybuffer;
1113 }
1114 }
1115 }
1116
predictor_decode_mono_3930(APEContext * ctx,int count)1117 static void predictor_decode_mono_3930(APEContext *ctx, int count)
1118 {
1119 APEPredictor *p = &ctx->predictor;
1120 int32_t *decoded0 = ctx->decoded[0];
1121
1122 ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1123
1124 while (count--) {
1125 *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
1126 decoded0++;
1127
1128 p->buf++;
1129
1130 /* Have we filled the history buffer? */
1131 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1132 memmove(p->historybuffer, p->buf,
1133 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1134 p->buf = p->historybuffer;
1135 }
1136 }
1137 }
1138
predictor_update_filter(APEPredictor * p,const int decoded,const int filter,const int delayA,const int delayB,const int adaptA,const int adaptB)1139 static av_always_inline int predictor_update_filter(APEPredictor *p,
1140 const int decoded, const int filter,
1141 const int delayA, const int delayB,
1142 const int adaptA, const int adaptB)
1143 {
1144 int32_t predictionA, predictionB, sign;
1145
1146 p->buf[delayA] = p->lastA[filter];
1147 p->buf[adaptA] = APESIGN(p->buf[delayA]);
1148 p->buf[delayA - 1] = p->buf[delayA] - (unsigned)p->buf[delayA - 1];
1149 p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
1150
1151 predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
1152 p->buf[delayA - 1] * p->coeffsA[filter][1] +
1153 p->buf[delayA - 2] * p->coeffsA[filter][2] +
1154 p->buf[delayA - 3] * p->coeffsA[filter][3];
1155
1156 /* Apply a scaled first-order filter compression */
1157 p->buf[delayB] = p->filterA[filter ^ 1] - ((int)(p->filterB[filter] * 31U) >> 5);
1158 p->buf[adaptB] = APESIGN(p->buf[delayB]);
1159 p->buf[delayB - 1] = p->buf[delayB] - (unsigned)p->buf[delayB - 1];
1160 p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
1161 p->filterB[filter] = p->filterA[filter ^ 1];
1162
1163 predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
1164 p->buf[delayB - 1] * p->coeffsB[filter][1] +
1165 p->buf[delayB - 2] * p->coeffsB[filter][2] +
1166 p->buf[delayB - 3] * p->coeffsB[filter][3] +
1167 p->buf[delayB - 4] * p->coeffsB[filter][4];
1168
1169 p->lastA[filter] = decoded + ((int)((unsigned)predictionA + (predictionB >> 1)) >> 10);
1170 p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1171
1172 sign = APESIGN(decoded);
1173 p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
1174 p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
1175 p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
1176 p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
1177 p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
1178 p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
1179 p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
1180 p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
1181 p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
1182
1183 return p->filterA[filter];
1184 }
1185
predictor_decode_stereo_3950(APEContext * ctx,int count)1186 static void predictor_decode_stereo_3950(APEContext *ctx, int count)
1187 {
1188 APEPredictor *p = &ctx->predictor;
1189 int32_t *decoded0 = ctx->decoded[0];
1190 int32_t *decoded1 = ctx->decoded[1];
1191
1192 ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1193
1194 while (count--) {
1195 /* Predictor Y */
1196 *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
1197 YADAPTCOEFFSA, YADAPTCOEFFSB);
1198 decoded0++;
1199 *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
1200 XADAPTCOEFFSA, XADAPTCOEFFSB);
1201 decoded1++;
1202
1203 /* Combined */
1204 p->buf++;
1205
1206 /* Have we filled the history buffer? */
1207 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1208 memmove(p->historybuffer, p->buf,
1209 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1210 p->buf = p->historybuffer;
1211 }
1212 }
1213 }
1214
predictor_decode_mono_3950(APEContext * ctx,int count)1215 static void predictor_decode_mono_3950(APEContext *ctx, int count)
1216 {
1217 APEPredictor *p = &ctx->predictor;
1218 int32_t *decoded0 = ctx->decoded[0];
1219 int32_t predictionA, currentA, A, sign;
1220
1221 ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1222
1223 currentA = p->lastA[0];
1224
1225 while (count--) {
1226 A = *decoded0;
1227
1228 p->buf[YDELAYA] = currentA;
1229 p->buf[YDELAYA - 1] = p->buf[YDELAYA] - (unsigned)p->buf[YDELAYA - 1];
1230
1231 predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
1232 p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
1233 p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
1234 p->buf[YDELAYA - 3] * p->coeffsA[0][3];
1235
1236 currentA = A + (unsigned)(predictionA >> 10);
1237
1238 p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
1239 p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
1240
1241 sign = APESIGN(A);
1242 p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
1243 p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
1244 p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
1245 p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
1246
1247 p->buf++;
1248
1249 /* Have we filled the history buffer? */
1250 if (p->buf == p->historybuffer + HISTORY_SIZE) {
1251 memmove(p->historybuffer, p->buf,
1252 PREDICTOR_SIZE * sizeof(*p->historybuffer));
1253 p->buf = p->historybuffer;
1254 }
1255
1256 p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
1257 *(decoded0++) = p->filterA[0];
1258 }
1259
1260 p->lastA[0] = currentA;
1261 }
1262
do_init_filter(APEFilter * f,int16_t * buf,int order)1263 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
1264 {
1265 f->coeffs = buf;
1266 f->historybuffer = buf + order;
1267 f->delay = f->historybuffer + order * 2;
1268 f->adaptcoeffs = f->historybuffer + order;
1269
1270 memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
1271 memset(f->coeffs, 0, order * sizeof(*f->coeffs));
1272 f->avg = 0;
1273 }
1274
init_filter(APEContext * ctx,APEFilter * f,int16_t * buf,int order)1275 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
1276 {
1277 do_init_filter(&f[0], buf, order);
1278 do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
1279 }
1280
do_apply_filter(APEContext * ctx,int version,APEFilter * f,int32_t * data,int count,int order,int fracbits)1281 static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
1282 int32_t *data, int count, int order, int fracbits)
1283 {
1284 int res;
1285 int absres;
1286
1287 while (count--) {
1288 /* round fixedpoint scalar product */
1289 res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs,
1290 f->delay - order,
1291 f->adaptcoeffs - order,
1292 order, APESIGN(*data));
1293 res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
1294 res += (unsigned)*data;
1295 *data++ = res;
1296
1297 /* Update the output history */
1298 *f->delay++ = av_clip_int16(res);
1299
1300 if (version < 3980) {
1301 /* Version ??? to < 3.98 files (untested) */
1302 f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
1303 f->adaptcoeffs[-4] >>= 1;
1304 f->adaptcoeffs[-8] >>= 1;
1305 } else {
1306 /* Version 3.98 and later files */
1307
1308 /* Update the adaption coefficients */
1309 absres = res < 0 ? -(unsigned)res : res;
1310 if (absres)
1311 *f->adaptcoeffs = APESIGN(res) *
1312 (8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
1313 /* equivalent to the following code
1314 if (absres <= f->avg * 4 / 3)
1315 *f->adaptcoeffs = APESIGN(res) * 8;
1316 else if (absres <= f->avg * 3)
1317 *f->adaptcoeffs = APESIGN(res) * 16;
1318 else
1319 *f->adaptcoeffs = APESIGN(res) * 32;
1320 */
1321 else
1322 *f->adaptcoeffs = 0;
1323
1324 f->avg += (int)(absres - (unsigned)f->avg) / 16;
1325
1326 f->adaptcoeffs[-1] >>= 1;
1327 f->adaptcoeffs[-2] >>= 1;
1328 f->adaptcoeffs[-8] >>= 1;
1329 }
1330
1331 f->adaptcoeffs++;
1332
1333 /* Have we filled the history buffer? */
1334 if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
1335 memmove(f->historybuffer, f->delay - (order * 2),
1336 (order * 2) * sizeof(*f->historybuffer));
1337 f->delay = f->historybuffer + order * 2;
1338 f->adaptcoeffs = f->historybuffer + order;
1339 }
1340 }
1341 }
1342
apply_filter(APEContext * ctx,APEFilter * f,int32_t * data0,int32_t * data1,int count,int order,int fracbits)1343 static void apply_filter(APEContext *ctx, APEFilter *f,
1344 int32_t *data0, int32_t *data1,
1345 int count, int order, int fracbits)
1346 {
1347 do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
1348 if (data1)
1349 do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
1350 }
1351
ape_apply_filters(APEContext * ctx,int32_t * decoded0,int32_t * decoded1,int count)1352 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
1353 int32_t *decoded1, int count)
1354 {
1355 int i;
1356
1357 for (i = 0; i < APE_FILTER_LEVELS; i++) {
1358 if (!ape_filter_orders[ctx->fset][i])
1359 break;
1360 apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
1361 ape_filter_orders[ctx->fset][i],
1362 ape_filter_fracbits[ctx->fset][i]);
1363 }
1364 }
1365
init_frame_decoder(APEContext * ctx)1366 static int init_frame_decoder(APEContext *ctx)
1367 {
1368 int i, ret;
1369 if ((ret = init_entropy_decoder(ctx)) < 0)
1370 return ret;
1371 init_predictor_decoder(ctx);
1372
1373 for (i = 0; i < APE_FILTER_LEVELS; i++) {
1374 if (!ape_filter_orders[ctx->fset][i])
1375 break;
1376 init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
1377 ape_filter_orders[ctx->fset][i]);
1378 }
1379 return 0;
1380 }
1381
ape_unpack_mono(APEContext * ctx,int count)1382 static void ape_unpack_mono(APEContext *ctx, int count)
1383 {
1384 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
1385 /* We are pure silence, so we're done. */
1386 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
1387 return;
1388 }
1389
1390 ctx->entropy_decode_mono(ctx, count);
1391 if (ctx->error)
1392 return;
1393
1394 /* Now apply the predictor decoding */
1395 ctx->predictor_decode_mono(ctx, count);
1396
1397 /* Pseudo-stereo - just copy left channel to right channel */
1398 if (ctx->channels == 2) {
1399 memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
1400 }
1401 }
1402
ape_unpack_stereo(APEContext * ctx,int count)1403 static void ape_unpack_stereo(APEContext *ctx, int count)
1404 {
1405 unsigned left, right;
1406 int32_t *decoded0 = ctx->decoded[0];
1407 int32_t *decoded1 = ctx->decoded[1];
1408
1409 if ((ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) == APE_FRAMECODE_STEREO_SILENCE) {
1410 /* We are pure silence, so we're done. */
1411 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
1412 return;
1413 }
1414
1415 ctx->entropy_decode_stereo(ctx, count);
1416 if (ctx->error)
1417 return;
1418
1419 /* Now apply the predictor decoding */
1420 ctx->predictor_decode_stereo(ctx, count);
1421
1422 /* Decorrelate and scale to output depth */
1423 while (count--) {
1424 left = *decoded1 - (unsigned)(*decoded0 / 2);
1425 right = left + *decoded0;
1426
1427 *(decoded0++) = left;
1428 *(decoded1++) = right;
1429 }
1430 }
1431
ape_decode_frame(AVCodecContext * avctx,void * data,int * got_frame_ptr,AVPacket * avpkt)1432 static int ape_decode_frame(AVCodecContext *avctx, void *data,
1433 int *got_frame_ptr, AVPacket *avpkt)
1434 {
1435 AVFrame *frame = data;
1436 const uint8_t *buf = avpkt->data;
1437 APEContext *s = avctx->priv_data;
1438 uint8_t *sample8;
1439 int16_t *sample16;
1440 int32_t *sample24;
1441 int i, ch, ret;
1442 int blockstodecode;
1443 uint64_t decoded_buffer_size;
1444
1445 /* this should never be negative, but bad things will happen if it is, so
1446 check it just to make sure. */
1447 av_assert0(s->samples >= 0);
1448
1449 if(!s->samples){
1450 uint32_t nblocks, offset;
1451 int buf_size;
1452
1453 if (!avpkt->size) {
1454 *got_frame_ptr = 0;
1455 return 0;
1456 }
1457 if (avpkt->size < 8) {
1458 av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1459 return AVERROR_INVALIDDATA;
1460 }
1461 buf_size = avpkt->size & ~3;
1462 if (buf_size != avpkt->size) {
1463 av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
1464 "extra bytes at the end will be skipped.\n");
1465 }
1466 if (s->fileversion < 3950) // previous versions overread two bytes
1467 buf_size += 2;
1468 av_fast_padded_malloc(&s->data, &s->data_size, buf_size);
1469 if (!s->data)
1470 return AVERROR(ENOMEM);
1471 s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf,
1472 buf_size >> 2);
1473 memset(s->data + (buf_size & ~3), 0, buf_size & 3);
1474 s->ptr = s->data;
1475 s->data_end = s->data + buf_size;
1476
1477 nblocks = bytestream_get_be32(&s->ptr);
1478 offset = bytestream_get_be32(&s->ptr);
1479 if (s->fileversion >= 3900) {
1480 if (offset > 3) {
1481 av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
1482 av_freep(&s->data);
1483 s->data_size = 0;
1484 return AVERROR_INVALIDDATA;
1485 }
1486 if (s->data_end - s->ptr < offset) {
1487 av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1488 return AVERROR_INVALIDDATA;
1489 }
1490 s->ptr += offset;
1491 } else {
1492 if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0)
1493 return ret;
1494 if (s->fileversion > 3800)
1495 skip_bits_long(&s->gb, offset * 8);
1496 else
1497 skip_bits_long(&s->gb, offset);
1498 }
1499
1500 if (!nblocks || nblocks > INT_MAX / 2 / sizeof(*s->decoded_buffer) - 8) {
1501 av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n",
1502 nblocks);
1503 return AVERROR_INVALIDDATA;
1504 }
1505
1506 /* Initialize the frame decoder */
1507 if (init_frame_decoder(s) < 0) {
1508 av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
1509 return AVERROR_INVALIDDATA;
1510 }
1511 s->samples = nblocks;
1512 }
1513
1514 if (!s->data) {
1515 *got_frame_ptr = 0;
1516 return avpkt->size;
1517 }
1518
1519 blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
1520 // for old files coefficients were not interleaved,
1521 // so we need to decode all of them at once
1522 if (s->fileversion < 3930)
1523 blockstodecode = s->samples;
1524
1525 /* reallocate decoded sample buffer if needed */
1526 decoded_buffer_size = 2LL * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer);
1527 av_assert0(decoded_buffer_size <= INT_MAX);
1528
1529 /* get output buffer */
1530 frame->nb_samples = blockstodecode;
1531 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1532 s->samples=0;
1533 return ret;
1534 }
1535
1536 av_fast_malloc(&s->decoded_buffer, &s->decoded_size, decoded_buffer_size);
1537 if (!s->decoded_buffer)
1538 return AVERROR(ENOMEM);
1539 memset(s->decoded_buffer, 0, decoded_buffer_size);
1540 s->decoded[0] = s->decoded_buffer;
1541 s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
1542
1543 s->error=0;
1544
1545 if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
1546 ape_unpack_mono(s, blockstodecode);
1547 else
1548 ape_unpack_stereo(s, blockstodecode);
1549 emms_c();
1550
1551 if (s->error) {
1552 s->samples=0;
1553 av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
1554 return AVERROR_INVALIDDATA;
1555 }
1556
1557 switch (s->bps) {
1558 case 8:
1559 for (ch = 0; ch < s->channels; ch++) {
1560 sample8 = (uint8_t *)frame->data[ch];
1561 for (i = 0; i < blockstodecode; i++)
1562 *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
1563 }
1564 break;
1565 case 16:
1566 for (ch = 0; ch < s->channels; ch++) {
1567 sample16 = (int16_t *)frame->data[ch];
1568 for (i = 0; i < blockstodecode; i++)
1569 *sample16++ = s->decoded[ch][i];
1570 }
1571 break;
1572 case 24:
1573 for (ch = 0; ch < s->channels; ch++) {
1574 sample24 = (int32_t *)frame->data[ch];
1575 for (i = 0; i < blockstodecode; i++)
1576 *sample24++ = s->decoded[ch][i] * 256U;
1577 }
1578 break;
1579 }
1580
1581 s->samples -= blockstodecode;
1582
1583 if (avctx->err_recognition & AV_EF_CRCCHECK &&
1584 s->fileversion >= 3900 && s->bps < 24) {
1585 uint32_t crc = s->CRC_state;
1586 const AVCRC *crc_tab = av_crc_get_table(AV_CRC_32_IEEE_LE);
1587 for (i = 0; i < blockstodecode; i++) {
1588 for (ch = 0; ch < s->channels; ch++) {
1589 uint8_t *smp = frame->data[ch] + (i*(s->bps >> 3));
1590 crc = av_crc(crc_tab, crc, smp, s->bps >> 3);
1591 }
1592 }
1593
1594 if (!s->samples && (~crc >> 1) ^ s->CRC) {
1595 av_log(avctx, AV_LOG_ERROR, "CRC mismatch! Previously decoded "
1596 "frames may have been affected as well.\n");
1597 if (avctx->err_recognition & AV_EF_EXPLODE)
1598 return AVERROR_INVALIDDATA;
1599 }
1600
1601 s->CRC_state = crc;
1602 }
1603
1604 *got_frame_ptr = 1;
1605
1606 return !s->samples ? avpkt->size : 0;
1607 }
1608
ape_flush(AVCodecContext * avctx)1609 static void ape_flush(AVCodecContext *avctx)
1610 {
1611 APEContext *s = avctx->priv_data;
1612 s->samples= 0;
1613 }
1614
1615 #define OFFSET(x) offsetof(APEContext, x)
1616 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
1617 static const AVOption options[] = {
1618 { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
1619 { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
1620 { NULL},
1621 };
1622
1623 static const AVClass ape_decoder_class = {
1624 .class_name = "APE decoder",
1625 .item_name = av_default_item_name,
1626 .option = options,
1627 .version = LIBAVUTIL_VERSION_INT,
1628 };
1629
1630 AVCodec ff_ape_decoder = {
1631 .name = "ape",
1632 .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
1633 .type = AVMEDIA_TYPE_AUDIO,
1634 .id = AV_CODEC_ID_APE,
1635 .priv_data_size = sizeof(APEContext),
1636 .init = ape_decode_init,
1637 .close = ape_decode_close,
1638 .decode = ape_decode_frame,
1639 .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
1640 AV_CODEC_CAP_DR1,
1641 .flush = ape_flush,
1642 .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
1643 AV_SAMPLE_FMT_S16P,
1644 AV_SAMPLE_FMT_S32P,
1645 AV_SAMPLE_FMT_NONE },
1646 .priv_class = &ape_decoder_class,
1647 };
1648