• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* libFLAC - Free Lossless Audio Codec library
2  * Copyright (C) 2000-2009  Josh Coalson
3  * Copyright (C) 2011-2022  Xiph.Org Foundation
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * - Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  *
12  * - Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  *
16  * - Neither the name of the Xiph.org Foundation nor the names of its
17  * contributors may be used to endorse or promote products derived from
18  * this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR
24  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifdef HAVE_CONFIG_H
34 #  include <config.h>
35 #endif
36 
37 #include <math.h>
38 #include <string.h>
39 #include "share/compat.h"
40 #include "private/bitmath.h"
41 #include "private/fixed.h"
42 #include "private/macros.h"
43 #include "FLAC/assert.h"
44 
45 #ifdef local_abs
46 #undef local_abs
47 #endif
48 #define local_abs(x) ((uint32_t)((x)<0? -(x) : (x)))
49 
50 #ifdef local_abs64
51 #undef local_abs64
52 #endif
53 #define local_abs64(x) ((uint64_t)((x)<0? -(x) : (x)))
54 
55 #ifdef FLAC__INTEGER_ONLY_LIBRARY
56 /* rbps stands for residual bits per sample
57  *
58  *             (ln(2) * err)
59  * rbps = log  (-----------)
60  *           2 (     n     )
61  */
local__compute_rbps_integerized(FLAC__uint32 err,FLAC__uint32 n)62 static FLAC__fixedpoint local__compute_rbps_integerized(FLAC__uint32 err, FLAC__uint32 n)
63 {
64 	FLAC__uint32 rbps;
65 	uint32_t bits; /* the number of bits required to represent a number */
66 	int fracbits; /* the number of bits of rbps that comprise the fractional part */
67 
68 	FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
69 	FLAC__ASSERT(err > 0);
70 	FLAC__ASSERT(n > 0);
71 
72 	FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
73 	if(err <= n)
74 		return 0;
75 	/*
76 	 * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
77 	 * These allow us later to know we won't lose too much precision in the
78 	 * fixed-point division (err<<fracbits)/n.
79 	 */
80 
81 	fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2(err)+1);
82 
83 	err <<= fracbits;
84 	err /= n;
85 	/* err now holds err/n with fracbits fractional bits */
86 
87 	/*
88 	 * Whittle err down to 16 bits max.  16 significant bits is enough for
89 	 * our purposes.
90 	 */
91 	FLAC__ASSERT(err > 0);
92 	bits = FLAC__bitmath_ilog2(err)+1;
93 	if(bits > 16) {
94 		err >>= (bits-16);
95 		fracbits -= (int)(bits-16);
96 	}
97 	rbps = (FLAC__uint32)err;
98 
99 	/* Multiply by fixed-point version of ln(2), with 16 fractional bits */
100 	rbps *= FLAC__FP_LN2;
101 	fracbits += 16;
102 	FLAC__ASSERT(fracbits >= 0);
103 
104 	/* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
105 	{
106 		const int f = fracbits & 3;
107 		if(f) {
108 			rbps >>= f;
109 			fracbits -= f;
110 		}
111 	}
112 
113 	rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
114 
115 	if(rbps == 0)
116 		return 0;
117 
118 	/*
119 	 * The return value must have 16 fractional bits.  Since the whole part
120 	 * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
121 	 * must be >= -3, these assertion allows us to be able to shift rbps
122 	 * left if necessary to get 16 fracbits without losing any bits of the
123 	 * whole part of rbps.
124 	 *
125 	 * There is a slight chance due to accumulated error that the whole part
126 	 * will require 6 bits, so we use 6 in the assertion.  Really though as
127 	 * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
128 	 */
129 	FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
130 	FLAC__ASSERT(fracbits >= -3);
131 
132 	/* now shift the decimal point into place */
133 	if(fracbits < 16)
134 		return rbps << (16-fracbits);
135 	else if(fracbits > 16)
136 		return rbps >> (fracbits-16);
137 	else
138 		return rbps;
139 }
140 
local__compute_rbps_wide_integerized(FLAC__uint64 err,FLAC__uint32 n)141 static FLAC__fixedpoint local__compute_rbps_wide_integerized(FLAC__uint64 err, FLAC__uint32 n)
142 {
143 	FLAC__uint32 rbps;
144 	uint32_t bits; /* the number of bits required to represent a number */
145 	int fracbits; /* the number of bits of rbps that comprise the fractional part */
146 
147 	FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
148 	FLAC__ASSERT(err > 0);
149 	FLAC__ASSERT(n > 0);
150 
151 	FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
152 	if(err <= n)
153 		return 0;
154 	/*
155 	 * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
156 	 * These allow us later to know we won't lose too much precision in the
157 	 * fixed-point division (err<<fracbits)/n.
158 	 */
159 
160 	fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2_wide(err)+1);
161 
162 	err <<= fracbits;
163 	err /= n;
164 	/* err now holds err/n with fracbits fractional bits */
165 
166 	/*
167 	 * Whittle err down to 16 bits max.  16 significant bits is enough for
168 	 * our purposes.
169 	 */
170 	FLAC__ASSERT(err > 0);
171 	bits = FLAC__bitmath_ilog2_wide(err)+1;
172 	if(bits > 16) {
173 		err >>= (bits-16);
174 		fracbits -= (int)(bits-16); // defined, but cast to int to avoid ubsan assert.
175 	}
176 	rbps = (FLAC__uint32)err;
177 
178 	/* Multiply by fixed-point version of ln(2), with 16 fractional bits */
179 	rbps *= FLAC__FP_LN2;
180 	fracbits += 16;
181 	FLAC__ASSERT(fracbits >= 0);
182 
183 	/* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
184 	{
185 		const int f = fracbits & 3;
186 		if(f) {
187 			rbps >>= f;
188 			fracbits -= f;
189 		}
190 	}
191 
192 	rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
193 
194 	if(rbps == 0)
195 		return 0;
196 
197 	/*
198 	 * The return value must have 16 fractional bits.  Since the whole part
199 	 * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
200 	 * must be >= -3, these assertion allows us to be able to shift rbps
201 	 * left if necessary to get 16 fracbits without losing any bits of the
202 	 * whole part of rbps.
203 	 *
204 	 * There is a slight chance due to accumulated error that the whole part
205 	 * will require 6 bits, so we use 6 in the assertion.  Really though as
206 	 * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
207 	 */
208 	FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
209 	FLAC__ASSERT(fracbits >= -3);
210 
211 	/* now shift the decimal point into place */
212 	if(fracbits < 16)
213 		return rbps << (16-fracbits);
214 	else if(fracbits > 16)
215 		return rbps >> (fracbits-16);
216 	else
217 		return rbps;
218 }
219 #endif
220 
221 #ifndef FLAC__INTEGER_ONLY_LIBRARY
FLAC__fixed_compute_best_predictor(const FLAC__int32 data[],uint32_t data_len,float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])222 uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
223 #else
224 uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
225 #endif
226 {
227 	FLAC__uint32 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
228 	uint32_t order;
229 #if 0
230 	/* This code has been around a long time, and was written when compilers weren't able
231 	 * to vectorize code. These days, compilers are better in optimizing the next block
232 	 * which is also much more readable
233 	 */
234 	FLAC__int32 last_error_0 = data[-1];
235 	FLAC__int32 last_error_1 = data[-1] - data[-2];
236 	FLAC__int32 last_error_2 = last_error_1 - (data[-2] - data[-3]);
237 	FLAC__int32 last_error_3 = last_error_2 - (data[-2] - 2*data[-3] + data[-4]);
238 	FLAC__int32 error, save;
239 	uint32_t i;
240 	/* total_error_* are 64-bits to avoid overflow when encoding
241 	 * erratic signals when the bits-per-sample and blocksize are
242 	 * large.
243 	 */
244 	for(i = 0; i < data_len; i++) {
245 		error  = data[i]     ; total_error_0 += local_abs(error);                      save = error;
246 		error -= last_error_0; total_error_1 += local_abs(error); last_error_0 = save; save = error;
247 		error -= last_error_1; total_error_2 += local_abs(error); last_error_1 = save; save = error;
248 		error -= last_error_2; total_error_3 += local_abs(error); last_error_2 = save; save = error;
249 		error -= last_error_3; total_error_4 += local_abs(error); last_error_3 = save;
250 	}
251 #else
252 	int i;
253 	for(i = 0; i < (int)data_len; i++) {
254 		total_error_0 += local_abs(data[i]);
255 		total_error_1 += local_abs(data[i] - data[i-1]);
256 		total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
257 		total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
258 		total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
259 	}
260 #endif
261 
262 
263 	/* prefer lower order */
264 	if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
265 		order = 0;
266 	else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
267 		order = 1;
268 	else if(total_error_2 <= flac_min(total_error_3, total_error_4))
269 		order = 2;
270 	else if(total_error_3 <= total_error_4)
271 		order = 3;
272 	else
273 		order = 4;
274 
275 	/* Estimate the expected number of bits per residual signal sample. */
276 	/* 'total_error*' is linearly related to the variance of the residual */
277 	/* signal, so we use it directly to compute E(|x|) */
278 	FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
279 	FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
280 	FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
281 	FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
282 	FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
283 #ifndef FLAC__INTEGER_ONLY_LIBRARY
284 	residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
285 	residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
286 	residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
287 	residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
288 	residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
289 #else
290 	residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_integerized(total_error_0, data_len) : 0;
291 	residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_integerized(total_error_1, data_len) : 0;
292 	residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_integerized(total_error_2, data_len) : 0;
293 	residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_integerized(total_error_3, data_len) : 0;
294 	residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_integerized(total_error_4, data_len) : 0;
295 #endif
296 
297 	return order;
298 }
299 
300 #ifndef FLAC__INTEGER_ONLY_LIBRARY
FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[],uint32_t data_len,float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])301 uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
302 #else
303 uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
304 #endif
305 {
306 	FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
307 	uint32_t order;
308 	int i;
309 
310 	for(i = 0; i < (int)data_len; i++) {
311 		total_error_0 += local_abs(data[i]);
312 		total_error_1 += local_abs(data[i] - data[i-1]);
313 		total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
314 		total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
315 		total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
316 	}
317 
318 	/* prefer lower order */
319 	if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
320 		order = 0;
321 	else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
322 		order = 1;
323 	else if(total_error_2 <= flac_min(total_error_3, total_error_4))
324 		order = 2;
325 	else if(total_error_3 <= total_error_4)
326 		order = 3;
327 	else
328 		order = 4;
329 
330 	/* Estimate the expected number of bits per residual signal sample. */
331 	/* 'total_error*' is linearly related to the variance of the residual */
332 	/* signal, so we use it directly to compute E(|x|) */
333 	FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
334 	FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
335 	FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
336 	FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
337 	FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
338 #ifndef FLAC__INTEGER_ONLY_LIBRARY
339 	residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
340 	residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
341 	residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
342 	residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
343 	residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
344 #else
345 	residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_wide_integerized(total_error_0, data_len) : 0;
346 	residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_wide_integerized(total_error_1, data_len) : 0;
347 	residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_wide_integerized(total_error_2, data_len) : 0;
348 	residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_wide_integerized(total_error_3, data_len) : 0;
349 	residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_wide_integerized(total_error_4, data_len) : 0;
350 #endif
351 
352 	return order;
353 }
354 
355 #ifndef FLAC__INTEGER_ONLY_LIBRARY
356 #define CHECK_ORDER_IS_VALID(macro_order)		\
357 if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
358 	order = macro_order;				\
359 	smallest_error = total_error_##macro_order ;	\
360 	residual_bits_per_sample[ macro_order ] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0); \
361 }							\
362 else							\
363 	residual_bits_per_sample[ macro_order ] = 34.0f;
364 #else
365 #define CHECK_ORDER_IS_VALID(macro_order)		\
366 if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
367 	order = macro_order;				\
368 	smallest_error = total_error_##macro_order ;	\
369 	residual_bits_per_sample[ macro_order ] = (total_error_##macro_order > 0) ? local__compute_rbps_wide_integerized(total_error_##macro_order, data_len) : 0; \
370 }							\
371 else							\
372 	residual_bits_per_sample[ macro_order ] = 34 * FLAC__FP_ONE;
373 #endif
374 
375 
376 #ifndef FLAC__INTEGER_ONLY_LIBRARY
FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[],uint32_t data_len,float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])377 uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
378 #else
379 uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
380 #endif
381 {
382 	FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
383 	FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
384 	FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
385 	uint32_t order = 0;
386 	int i;
387 
388 	for(i = -4; i < (int)data_len; i++) {
389 		error_0 = local_abs64((FLAC__int64)data[i]);
390 		error_1 = (i > -4) ? local_abs64((FLAC__int64)data[i] - data[i-1]) : 0 ;
391 		error_2 = (i > -3) ? local_abs64((FLAC__int64)data[i] - 2 * (FLAC__int64)data[i-1] + data[i-2]) : 0;
392 		error_3 = (i > -2) ? local_abs64((FLAC__int64)data[i] - 3 * (FLAC__int64)data[i-1] + 3 * (FLAC__int64)data[i-2] - data[i-3]) : 0;
393 		error_4 = (i > -1) ? local_abs64((FLAC__int64)data[i] - 4 * (FLAC__int64)data[i-1] + 6 * (FLAC__int64)data[i-2] - 4 * (FLAC__int64)data[i-3] + data[i-4]) : 0;
394 
395 		total_error_0 += error_0;
396 		total_error_1 += error_1;
397 		total_error_2 += error_2;
398 		total_error_3 += error_3;
399 		total_error_4 += error_4;
400 
401 		/* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
402 		if(error_0 > INT32_MAX)
403 			order_0_is_valid = false;
404 		if(error_1 > INT32_MAX)
405 			order_1_is_valid = false;
406 		if(error_2 > INT32_MAX)
407 			order_2_is_valid = false;
408 		if(error_3 > INT32_MAX)
409 			order_3_is_valid = false;
410 		if(error_4 > INT32_MAX)
411 			order_4_is_valid = false;
412 	}
413 
414 	CHECK_ORDER_IS_VALID(0);
415 	CHECK_ORDER_IS_VALID(1);
416 	CHECK_ORDER_IS_VALID(2);
417 	CHECK_ORDER_IS_VALID(3);
418 	CHECK_ORDER_IS_VALID(4);
419 
420 	return order;
421 }
422 
423 #ifndef FLAC__INTEGER_ONLY_LIBRARY
FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[],uint32_t data_len,float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])424 uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
425 #else
426 uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
427 #endif
428 {
429 	FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
430 	FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
431 	FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
432 	uint32_t order = 0;
433 	int i;
434 
435 	for(i = -4; i < (int)data_len; i++) {
436 		error_0 = local_abs64(data[i]);
437 		error_1 = (i > -4) ? local_abs64(data[i] - data[i-1]) : 0 ;
438 		error_2 = (i > -3) ? local_abs64(data[i] - 2 * data[i-1] + data[i-2]) : 0;
439 		error_3 = (i > -2) ? local_abs64(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]) : 0;
440 		error_4 = (i > -1) ? local_abs64(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]) : 0;
441 
442 		total_error_0 += error_0;
443 		total_error_1 += error_1;
444 		total_error_2 += error_2;
445 		total_error_3 += error_3;
446 		total_error_4 += error_4;
447 
448 		/* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
449 		if(error_0 > INT32_MAX)
450 			order_0_is_valid = false;
451 		if(error_1 > INT32_MAX)
452 			order_1_is_valid = false;
453 		if(error_2 > INT32_MAX)
454 			order_2_is_valid = false;
455 		if(error_3 > INT32_MAX)
456 			order_3_is_valid = false;
457 		if(error_4 > INT32_MAX)
458 			order_4_is_valid = false;
459 	}
460 
461 	CHECK_ORDER_IS_VALID(0);
462 	CHECK_ORDER_IS_VALID(1);
463 	CHECK_ORDER_IS_VALID(2);
464 	CHECK_ORDER_IS_VALID(3);
465 	CHECK_ORDER_IS_VALID(4);
466 
467 	return order;
468 }
469 
FLAC__fixed_compute_residual(const FLAC__int32 data[],uint32_t data_len,uint32_t order,FLAC__int32 residual[])470 void FLAC__fixed_compute_residual(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
471 {
472 	const int idata_len = (int)data_len;
473 	int i;
474 
475 	switch(order) {
476 		case 0:
477 			FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
478 			memcpy(residual, data, sizeof(residual[0])*data_len);
479 			break;
480 		case 1:
481 			for(i = 0; i < idata_len; i++)
482 				residual[i] = data[i] - data[i-1];
483 			break;
484 		case 2:
485 			for(i = 0; i < idata_len; i++)
486 				residual[i] = data[i] - 2*data[i-1] + data[i-2];
487 			break;
488 		case 3:
489 			for(i = 0; i < idata_len; i++)
490 				residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
491 			break;
492 		case 4:
493 			for(i = 0; i < idata_len; i++)
494 				residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
495 			break;
496 		default:
497 			FLAC__ASSERT(0);
498 	}
499 }
500 
FLAC__fixed_compute_residual_wide(const FLAC__int32 data[],uint32_t data_len,uint32_t order,FLAC__int32 residual[])501 void FLAC__fixed_compute_residual_wide(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
502 {
503 	const int idata_len = (int)data_len;
504 	int i;
505 
506 	switch(order) {
507 		case 0:
508 			FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
509 			memcpy(residual, data, sizeof(residual[0])*data_len);
510 			break;
511 		case 1:
512 			for(i = 0; i < idata_len; i++)
513 				residual[i] = (FLAC__int64)data[i] - data[i-1];
514 			break;
515 		case 2:
516 			for(i = 0; i < idata_len; i++)
517 				residual[i] = (FLAC__int64)data[i] - 2*(FLAC__int64)data[i-1] + data[i-2];
518 			break;
519 		case 3:
520 			for(i = 0; i < idata_len; i++)
521 				residual[i] = (FLAC__int64)data[i] - 3*(FLAC__int64)data[i-1] + 3*(FLAC__int64)data[i-2] - data[i-3];
522 			break;
523 		case 4:
524 			for(i = 0; i < idata_len; i++)
525 				residual[i] = (FLAC__int64)data[i] - 4*(FLAC__int64)data[i-1] + 6*(FLAC__int64)data[i-2] - 4*(FLAC__int64)data[i-3] + data[i-4];
526 			break;
527 		default:
528 			FLAC__ASSERT(0);
529 	}
530 }
531 
FLAC__fixed_compute_residual_wide_33bit(const FLAC__int64 data[],uint32_t data_len,uint32_t order,FLAC__int32 residual[])532 void FLAC__fixed_compute_residual_wide_33bit(const FLAC__int64 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
533 {
534 	const int idata_len = (int)data_len;
535 	int i;
536 
537 	switch(order) {
538 		case 0:
539 			for(i = 0; i < idata_len; i++)
540 				residual[i] = data[i];
541 			break;
542 		case 1:
543 			for(i = 0; i < idata_len; i++)
544 				residual[i] = data[i] - data[i-1];
545 			break;
546 		case 2:
547 			for(i = 0; i < idata_len; i++)
548 				residual[i] = data[i] - 2*data[i-1] + data[i-2];
549 			break;
550 		case 3:
551 			for(i = 0; i < idata_len; i++)
552 				residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
553 			break;
554 		case 4:
555 			for(i = 0; i < idata_len; i++)
556 				residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
557 			break;
558 		default:
559 			FLAC__ASSERT(0);
560 	}
561 }
562 
563 #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && !defined(FUZZING_BUILD_MODE_FLAC_SANITIZE_SIGNED_INTEGER_OVERFLOW)
564 /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
565  * Because fuzzing feeds bogus predictors and residual samples to the
566  * decoder, having overflows in this section is unavoidable. Also,
567  * because the calculated values are audio path only, there is no
568  * potential for security problems */
569 __attribute__((no_sanitize("signed-integer-overflow")))
570 #endif
FLAC__fixed_restore_signal(const FLAC__int32 residual[],uint32_t data_len,uint32_t order,FLAC__int32 data[])571 void FLAC__fixed_restore_signal(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
572 {
573 	int i, idata_len = (int)data_len;
574 
575 	switch(order) {
576 		case 0:
577 			FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
578 			memcpy(data, residual, sizeof(residual[0])*data_len);
579 			break;
580 		case 1:
581 			for(i = 0; i < idata_len; i++)
582 				data[i] = residual[i] + data[i-1];
583 			break;
584 		case 2:
585 			for(i = 0; i < idata_len; i++)
586 				data[i] = residual[i] + 2*data[i-1] - data[i-2];
587 			break;
588 		case 3:
589 			for(i = 0; i < idata_len; i++)
590 				data[i] = residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
591 			break;
592 		case 4:
593 			for(i = 0; i < idata_len; i++)
594 				data[i] = residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];
595 			break;
596 		default:
597 			FLAC__ASSERT(0);
598 	}
599 }
600 
FLAC__fixed_restore_signal_wide(const FLAC__int32 residual[],uint32_t data_len,uint32_t order,FLAC__int32 data[])601 void FLAC__fixed_restore_signal_wide(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
602 {
603 	int i, idata_len = (int)data_len;
604 
605 	switch(order) {
606 		case 0:
607 			FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
608 			memcpy(data, residual, sizeof(residual[0])*data_len);
609 			break;
610 		case 1:
611 			for(i = 0; i < idata_len; i++)
612 				data[i] = (FLAC__int64)residual[i] + (FLAC__int64)data[i-1];
613 			break;
614 		case 2:
615 			for(i = 0; i < idata_len; i++)
616 				data[i] = (FLAC__int64)residual[i] + 2*(FLAC__int64)data[i-1] - (FLAC__int64)data[i-2];
617 			break;
618 		case 3:
619 			for(i = 0; i < idata_len; i++)
620 				data[i] = (FLAC__int64)residual[i] + 3*(FLAC__int64)data[i-1] - 3*(FLAC__int64)data[i-2] + (FLAC__int64)data[i-3];
621 			break;
622 		case 4:
623 			for(i = 0; i < idata_len; i++)
624 				data[i] = (FLAC__int64)residual[i] + 4*(FLAC__int64)data[i-1] - 6*(FLAC__int64)data[i-2] + 4*(FLAC__int64)data[i-3] - (FLAC__int64)data[i-4];
625 			break;
626 		default:
627 			FLAC__ASSERT(0);
628 	}
629 }
630 
631 #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && !defined(FUZZING_BUILD_MODE_FLAC_SANITIZE_SIGNED_INTEGER_OVERFLOW)
632 /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
633  * Because fuzzing feeds bogus predictors and residual samples to the
634  * decoder, having overflows in this section is unavoidable. Also,
635  * because the calculated values are audio path only, there is no
636  * potential for security problems */
637 __attribute__((no_sanitize("signed-integer-overflow")))
638 #endif
FLAC__fixed_restore_signal_wide_33bit(const FLAC__int32 residual[],uint32_t data_len,uint32_t order,FLAC__int64 data[])639 void FLAC__fixed_restore_signal_wide_33bit(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int64 data[])
640 {
641 	int i, idata_len = (int)data_len;
642 
643 	switch(order) {
644 		case 0:
645 			for(i = 0; i < idata_len; i++)
646 				data[i] = residual[i];
647 			break;
648 		case 1:
649 			for(i = 0; i < idata_len; i++)
650 				data[i] = (FLAC__int64)residual[i] + data[i-1];
651 			break;
652 		case 2:
653 			for(i = 0; i < idata_len; i++)
654 				data[i] = (FLAC__int64)residual[i] + 2*data[i-1] - data[i-2];
655 			break;
656 		case 3:
657 			for(i = 0; i < idata_len; i++)
658 				data[i] = (FLAC__int64)residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
659 			break;
660 		case 4:
661 			for(i = 0; i < idata_len; i++)
662 				data[i] = (FLAC__int64)residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];
663 			break;
664 		default:
665 			FLAC__ASSERT(0);
666 	}
667 }
668