1
2 /* audioopmodule - Module to detect peak values in arrays */
3
4 #define PY_SSIZE_T_CLEAN
5
6 #include "Python.h"
7
8 #if defined(__CHAR_UNSIGNED__)
9 #if defined(signed)
10 /* This module currently does not work on systems where only unsigned
11 characters are available. Take it out of Setup. Sorry. */
12 #endif
13 #endif
14
15 static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
16 /* -1 trick is needed on Windows to support -0x80000000 without a warning */
17 static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
18 static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
19
20 static int
fbound(double val,double minval,double maxval)21 fbound(double val, double minval, double maxval)
22 {
23 if (val > maxval)
24 val = maxval;
25 else if (val < minval + 1)
26 val = minval;
27 return (int)val;
28 }
29
30
31 /* Code shamelessly stolen from sox, 12.17.7, g711.c
32 ** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
33
34 /* From g711.c:
35 *
36 * December 30, 1994:
37 * Functions linear2alaw, linear2ulaw have been updated to correctly
38 * convert unquantized 16 bit values.
39 * Tables for direct u- to A-law and A- to u-law conversions have been
40 * corrected.
41 * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
42 * bli@cpk.auc.dk
43 *
44 */
45 #define BIAS 0x84 /* define the add-in bias for 16 bit samples */
46 #define CLIP 32635
47 #define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
48 #define QUANT_MASK (0xf) /* Quantization field mask. */
49 #define SEG_SHIFT (4) /* Left shift for segment number. */
50 #define SEG_MASK (0x70) /* Segment field mask. */
51
52 static const int16_t seg_aend[8] = {
53 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
54 };
55 static const int16_t seg_uend[8] = {
56 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
57 };
58
59 static int16_t
search(int16_t val,const int16_t * table,int size)60 search(int16_t val, const int16_t *table, int size)
61 {
62 int i;
63
64 for (i = 0; i < size; i++) {
65 if (val <= *table++)
66 return (i);
67 }
68 return (size);
69 }
70 #define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
71 #define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
72
73 static const int16_t _st_ulaw2linear16[256] = {
74 -32124, -31100, -30076, -29052, -28028, -27004, -25980,
75 -24956, -23932, -22908, -21884, -20860, -19836, -18812,
76 -17788, -16764, -15996, -15484, -14972, -14460, -13948,
77 -13436, -12924, -12412, -11900, -11388, -10876, -10364,
78 -9852, -9340, -8828, -8316, -7932, -7676, -7420,
79 -7164, -6908, -6652, -6396, -6140, -5884, -5628,
80 -5372, -5116, -4860, -4604, -4348, -4092, -3900,
81 -3772, -3644, -3516, -3388, -3260, -3132, -3004,
82 -2876, -2748, -2620, -2492, -2364, -2236, -2108,
83 -1980, -1884, -1820, -1756, -1692, -1628, -1564,
84 -1500, -1436, -1372, -1308, -1244, -1180, -1116,
85 -1052, -988, -924, -876, -844, -812, -780,
86 -748, -716, -684, -652, -620, -588, -556,
87 -524, -492, -460, -428, -396, -372, -356,
88 -340, -324, -308, -292, -276, -260, -244,
89 -228, -212, -196, -180, -164, -148, -132,
90 -120, -112, -104, -96, -88, -80, -72,
91 -64, -56, -48, -40, -32, -24, -16,
92 -8, 0, 32124, 31100, 30076, 29052, 28028,
93 27004, 25980, 24956, 23932, 22908, 21884, 20860,
94 19836, 18812, 17788, 16764, 15996, 15484, 14972,
95 14460, 13948, 13436, 12924, 12412, 11900, 11388,
96 10876, 10364, 9852, 9340, 8828, 8316, 7932,
97 7676, 7420, 7164, 6908, 6652, 6396, 6140,
98 5884, 5628, 5372, 5116, 4860, 4604, 4348,
99 4092, 3900, 3772, 3644, 3516, 3388, 3260,
100 3132, 3004, 2876, 2748, 2620, 2492, 2364,
101 2236, 2108, 1980, 1884, 1820, 1756, 1692,
102 1628, 1564, 1500, 1436, 1372, 1308, 1244,
103 1180, 1116, 1052, 988, 924, 876, 844,
104 812, 780, 748, 716, 684, 652, 620,
105 588, 556, 524, 492, 460, 428, 396,
106 372, 356, 340, 324, 308, 292, 276,
107 260, 244, 228, 212, 196, 180, 164,
108 148, 132, 120, 112, 104, 96, 88,
109 80, 72, 64, 56, 48, 40, 32,
110 24, 16, 8, 0
111 };
112
113 /*
114 * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
115 * stored in an unsigned char. This function should only be called with
116 * the data shifted such that it only contains information in the lower
117 * 14-bits.
118 *
119 * In order to simplify the encoding process, the original linear magnitude
120 * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
121 * (33 - 8191). The result can be seen in the following encoding table:
122 *
123 * Biased Linear Input Code Compressed Code
124 * ------------------------ ---------------
125 * 00000001wxyza 000wxyz
126 * 0000001wxyzab 001wxyz
127 * 000001wxyzabc 010wxyz
128 * 00001wxyzabcd 011wxyz
129 * 0001wxyzabcde 100wxyz
130 * 001wxyzabcdef 101wxyz
131 * 01wxyzabcdefg 110wxyz
132 * 1wxyzabcdefgh 111wxyz
133 *
134 * Each biased linear code has a leading 1 which identifies the segment
135 * number. The value of the segment number is equal to 7 minus the number
136 * of leading 0's. The quantization interval is directly available as the
137 * four bits wxyz. * The trailing bits (a - h) are ignored.
138 *
139 * Ordinarily the complement of the resulting code word is used for
140 * transmission, and so the code word is complemented before it is returned.
141 *
142 * For further information see John C. Bellamy's Digital Telephony, 1982,
143 * John Wiley & Sons, pps 98-111 and 472-476.
144 */
145 static unsigned char
st_14linear2ulaw(int16_t pcm_val)146 st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
147 {
148 int16_t mask;
149 int16_t seg;
150 unsigned char uval;
151
152 /* u-law inverts all bits */
153 /* Get the sign and the magnitude of the value. */
154 if (pcm_val < 0) {
155 pcm_val = -pcm_val;
156 mask = 0x7F;
157 } else {
158 mask = 0xFF;
159 }
160 if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
161 pcm_val += (BIAS >> 2);
162
163 /* Convert the scaled magnitude to segment number. */
164 seg = search(pcm_val, seg_uend, 8);
165
166 /*
167 * Combine the sign, segment, quantization bits;
168 * and complement the code word.
169 */
170 if (seg >= 8) /* out of range, return maximum value. */
171 return (unsigned char) (0x7F ^ mask);
172 else {
173 uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
174 return (uval ^ mask);
175 }
176
177 }
178
179 static const int16_t _st_alaw2linear16[256] = {
180 -5504, -5248, -6016, -5760, -4480, -4224, -4992,
181 -4736, -7552, -7296, -8064, -7808, -6528, -6272,
182 -7040, -6784, -2752, -2624, -3008, -2880, -2240,
183 -2112, -2496, -2368, -3776, -3648, -4032, -3904,
184 -3264, -3136, -3520, -3392, -22016, -20992, -24064,
185 -23040, -17920, -16896, -19968, -18944, -30208, -29184,
186 -32256, -31232, -26112, -25088, -28160, -27136, -11008,
187 -10496, -12032, -11520, -8960, -8448, -9984, -9472,
188 -15104, -14592, -16128, -15616, -13056, -12544, -14080,
189 -13568, -344, -328, -376, -360, -280, -264,
190 -312, -296, -472, -456, -504, -488, -408,
191 -392, -440, -424, -88, -72, -120, -104,
192 -24, -8, -56, -40, -216, -200, -248,
193 -232, -152, -136, -184, -168, -1376, -1312,
194 -1504, -1440, -1120, -1056, -1248, -1184, -1888,
195 -1824, -2016, -1952, -1632, -1568, -1760, -1696,
196 -688, -656, -752, -720, -560, -528, -624,
197 -592, -944, -912, -1008, -976, -816, -784,
198 -880, -848, 5504, 5248, 6016, 5760, 4480,
199 4224, 4992, 4736, 7552, 7296, 8064, 7808,
200 6528, 6272, 7040, 6784, 2752, 2624, 3008,
201 2880, 2240, 2112, 2496, 2368, 3776, 3648,
202 4032, 3904, 3264, 3136, 3520, 3392, 22016,
203 20992, 24064, 23040, 17920, 16896, 19968, 18944,
204 30208, 29184, 32256, 31232, 26112, 25088, 28160,
205 27136, 11008, 10496, 12032, 11520, 8960, 8448,
206 9984, 9472, 15104, 14592, 16128, 15616, 13056,
207 12544, 14080, 13568, 344, 328, 376, 360,
208 280, 264, 312, 296, 472, 456, 504,
209 488, 408, 392, 440, 424, 88, 72,
210 120, 104, 24, 8, 56, 40, 216,
211 200, 248, 232, 152, 136, 184, 168,
212 1376, 1312, 1504, 1440, 1120, 1056, 1248,
213 1184, 1888, 1824, 2016, 1952, 1632, 1568,
214 1760, 1696, 688, 656, 752, 720, 560,
215 528, 624, 592, 944, 912, 1008, 976,
216 816, 784, 880, 848
217 };
218
219 /*
220 * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
221 * stored in an unsigned char. This function should only be called with
222 * the data shifted such that it only contains information in the lower
223 * 13-bits.
224 *
225 * Linear Input Code Compressed Code
226 * ------------------------ ---------------
227 * 0000000wxyza 000wxyz
228 * 0000001wxyza 001wxyz
229 * 000001wxyzab 010wxyz
230 * 00001wxyzabc 011wxyz
231 * 0001wxyzabcd 100wxyz
232 * 001wxyzabcde 101wxyz
233 * 01wxyzabcdef 110wxyz
234 * 1wxyzabcdefg 111wxyz
235 *
236 * For further information see John C. Bellamy's Digital Telephony, 1982,
237 * John Wiley & Sons, pps 98-111 and 472-476.
238 */
239 static unsigned char
st_linear2alaw(int16_t pcm_val)240 st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
241 {
242 int16_t mask;
243 int16_t seg;
244 unsigned char aval;
245
246 /* A-law using even bit inversion */
247 if (pcm_val >= 0) {
248 mask = 0xD5; /* sign (7th) bit = 1 */
249 } else {
250 mask = 0x55; /* sign bit = 0 */
251 pcm_val = -pcm_val - 1;
252 }
253
254 /* Convert the scaled magnitude to segment number. */
255 seg = search(pcm_val, seg_aend, 8);
256
257 /* Combine the sign, segment, and quantization bits. */
258
259 if (seg >= 8) /* out of range, return maximum value. */
260 return (unsigned char) (0x7F ^ mask);
261 else {
262 aval = (unsigned char) seg << SEG_SHIFT;
263 if (seg < 2)
264 aval |= (pcm_val >> 1) & QUANT_MASK;
265 else
266 aval |= (pcm_val >> seg) & QUANT_MASK;
267 return (aval ^ mask);
268 }
269 }
270 /* End of code taken from sox */
271
272 /* Intel ADPCM step variation table */
273 static const int indexTable[16] = {
274 -1, -1, -1, -1, 2, 4, 6, 8,
275 -1, -1, -1, -1, 2, 4, 6, 8,
276 };
277
278 static const int stepsizeTable[89] = {
279 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
280 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
281 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
282 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
283 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
284 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
285 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
286 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
287 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
288 };
289
290 #define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
291 #define SETINTX(T, cp, i, val) do { \
292 *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
293 } while (0)
294
295
296 #define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
297 #define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
298 #define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
299
300 #if WORDS_BIGENDIAN
301 #define GETINT24(cp, i) ( \
302 ((unsigned char *)(cp) + (i))[2] + \
303 (((unsigned char *)(cp) + (i))[1] << 8) + \
304 (((signed char *)(cp) + (i))[0] << 16) )
305 #else
306 #define GETINT24(cp, i) ( \
307 ((unsigned char *)(cp) + (i))[0] + \
308 (((unsigned char *)(cp) + (i))[1] << 8) + \
309 (((signed char *)(cp) + (i))[2] << 16) )
310 #endif
311
312
313 #define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
314 #define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
315 #define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
316
317 #if WORDS_BIGENDIAN
318 #define SETINT24(cp, i, val) do { \
319 ((unsigned char *)(cp) + (i))[2] = (int)(val); \
320 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
321 ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
322 } while (0)
323 #else
324 #define SETINT24(cp, i, val) do { \
325 ((unsigned char *)(cp) + (i))[0] = (int)(val); \
326 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
327 ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
328 } while (0)
329 #endif
330
331
332 #define GETRAWSAMPLE(size, cp, i) ( \
333 (size == 1) ? (int)GETINT8((cp), (i)) : \
334 (size == 2) ? (int)GETINT16((cp), (i)) : \
335 (size == 3) ? (int)GETINT24((cp), (i)) : \
336 (int)GETINT32((cp), (i)))
337
338 #define SETRAWSAMPLE(size, cp, i, val) do { \
339 if (size == 1) \
340 SETINT8((cp), (i), (val)); \
341 else if (size == 2) \
342 SETINT16((cp), (i), (val)); \
343 else if (size == 3) \
344 SETINT24((cp), (i), (val)); \
345 else \
346 SETINT32((cp), (i), (val)); \
347 } while(0)
348
349
350 #define GETSAMPLE32(size, cp, i) ( \
351 (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
352 (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
353 (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
354 (int)GETINT32((cp), (i)))
355
356 #define SETSAMPLE32(size, cp, i, val) do { \
357 if (size == 1) \
358 SETINT8((cp), (i), (val) >> 24); \
359 else if (size == 2) \
360 SETINT16((cp), (i), (val) >> 16); \
361 else if (size == 3) \
362 SETINT24((cp), (i), (val) >> 8); \
363 else \
364 SETINT32((cp), (i), (val)); \
365 } while(0)
366
367
368 static PyObject *AudioopError;
369
370 static int
audioop_check_size(int size)371 audioop_check_size(int size)
372 {
373 if (size < 1 || size > 4) {
374 PyErr_SetString(AudioopError, "Size should be 1, 2, 3 or 4");
375 return 0;
376 }
377 else
378 return 1;
379 }
380
381 static int
audioop_check_parameters(Py_ssize_t len,int size)382 audioop_check_parameters(Py_ssize_t len, int size)
383 {
384 if (!audioop_check_size(size))
385 return 0;
386 if (len % size != 0) {
387 PyErr_SetString(AudioopError, "not a whole number of frames");
388 return 0;
389 }
390 return 1;
391 }
392
393 /*[clinic input]
394 module audioop
395 [clinic start generated code]*/
396 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
397
398 /*[clinic input]
399 audioop.getsample
400
401 fragment: Py_buffer
402 width: int
403 index: Py_ssize_t
404 /
405
406 Return the value of sample index from the fragment.
407 [clinic start generated code]*/
408
409 static PyObject *
audioop_getsample_impl(PyObject * module,Py_buffer * fragment,int width,Py_ssize_t index)410 audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
411 Py_ssize_t index)
412 /*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
413 {
414 int val;
415
416 if (!audioop_check_parameters(fragment->len, width))
417 return NULL;
418 if (index < 0 || index >= fragment->len/width) {
419 PyErr_SetString(AudioopError, "Index out of range");
420 return NULL;
421 }
422 val = GETRAWSAMPLE(width, fragment->buf, index*width);
423 return PyLong_FromLong(val);
424 }
425
426 /*[clinic input]
427 audioop.max
428
429 fragment: Py_buffer
430 width: int
431 /
432
433 Return the maximum of the absolute value of all samples in a fragment.
434 [clinic start generated code]*/
435
436 static PyObject *
audioop_max_impl(PyObject * module,Py_buffer * fragment,int width)437 audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
438 /*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
439 {
440 Py_ssize_t i;
441 unsigned int absval, max = 0;
442
443 if (!audioop_check_parameters(fragment->len, width))
444 return NULL;
445 for (i = 0; i < fragment->len; i += width) {
446 int val = GETRAWSAMPLE(width, fragment->buf, i);
447 /* Cast to unsigned before negating. Unsigned overflow is well-
448 defined, but signed overflow is not. */
449 if (val < 0) absval = (unsigned int)-(int64_t)val;
450 else absval = val;
451 if (absval > max) max = absval;
452 }
453 return PyLong_FromUnsignedLong(max);
454 }
455
456 /*[clinic input]
457 audioop.minmax
458
459 fragment: Py_buffer
460 width: int
461 /
462
463 Return the minimum and maximum values of all samples in the sound fragment.
464 [clinic start generated code]*/
465
466 static PyObject *
audioop_minmax_impl(PyObject * module,Py_buffer * fragment,int width)467 audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
468 /*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
469 {
470 Py_ssize_t i;
471 /* -1 trick below is needed on Windows to support -0x80000000 without
472 a warning */
473 int min = 0x7fffffff, max = -0x7FFFFFFF-1;
474
475 if (!audioop_check_parameters(fragment->len, width))
476 return NULL;
477 for (i = 0; i < fragment->len; i += width) {
478 int val = GETRAWSAMPLE(width, fragment->buf, i);
479 if (val > max) max = val;
480 if (val < min) min = val;
481 }
482 return Py_BuildValue("(ii)", min, max);
483 }
484
485 /*[clinic input]
486 audioop.avg
487
488 fragment: Py_buffer
489 width: int
490 /
491
492 Return the average over all samples in the fragment.
493 [clinic start generated code]*/
494
495 static PyObject *
audioop_avg_impl(PyObject * module,Py_buffer * fragment,int width)496 audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
497 /*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
498 {
499 Py_ssize_t i;
500 int avg;
501 double sum = 0.0;
502
503 if (!audioop_check_parameters(fragment->len, width))
504 return NULL;
505 for (i = 0; i < fragment->len; i += width)
506 sum += GETRAWSAMPLE(width, fragment->buf, i);
507 if (fragment->len == 0)
508 avg = 0;
509 else
510 avg = (int)floor(sum / (double)(fragment->len/width));
511 return PyLong_FromLong(avg);
512 }
513
514 /*[clinic input]
515 audioop.rms
516
517 fragment: Py_buffer
518 width: int
519 /
520
521 Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
522 [clinic start generated code]*/
523
524 static PyObject *
audioop_rms_impl(PyObject * module,Py_buffer * fragment,int width)525 audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
526 /*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
527 {
528 Py_ssize_t i;
529 unsigned int res;
530 double sum_squares = 0.0;
531
532 if (!audioop_check_parameters(fragment->len, width))
533 return NULL;
534 for (i = 0; i < fragment->len; i += width) {
535 double val = GETRAWSAMPLE(width, fragment->buf, i);
536 sum_squares += val*val;
537 }
538 if (fragment->len == 0)
539 res = 0;
540 else
541 res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
542 return PyLong_FromUnsignedLong(res);
543 }
544
_sum2(const int16_t * a,const int16_t * b,Py_ssize_t len)545 static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
546 {
547 Py_ssize_t i;
548 double sum = 0.0;
549
550 for( i=0; i<len; i++) {
551 sum = sum + (double)a[i]*(double)b[i];
552 }
553 return sum;
554 }
555
556 /*
557 ** Findfit tries to locate a sample within another sample. Its main use
558 ** is in echo-cancellation (to find the feedback of the output signal in
559 ** the input signal).
560 ** The method used is as follows:
561 **
562 ** let R be the reference signal (length n) and A the input signal (length N)
563 ** with N > n, and let all sums be over i from 0 to n-1.
564 **
565 ** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
566 ** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
567 ** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
568 **
569 ** Next, we compute the relative distance between the original signal and
570 ** the modified signal and minimize that over j:
571 ** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
572 ** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
573 **
574 ** In the code variables correspond as follows:
575 ** cp1 A
576 ** cp2 R
577 ** len1 N
578 ** len2 n
579 ** aj_m1 A[j-1]
580 ** aj_lm1 A[j+n-1]
581 ** sum_ri_2 sum(R[i]^2)
582 ** sum_aij_2 sum(A[i+j]^2)
583 ** sum_aij_ri sum(A[i+j]R[i])
584 **
585 ** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
586 ** is completely recalculated each step.
587 */
588 /*[clinic input]
589 audioop.findfit
590
591 fragment: Py_buffer
592 reference: Py_buffer
593 /
594
595 Try to match reference as well as possible to a portion of fragment.
596 [clinic start generated code]*/
597
598 static PyObject *
audioop_findfit_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)599 audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
600 Py_buffer *reference)
601 /*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
602 {
603 const int16_t *cp1, *cp2;
604 Py_ssize_t len1, len2;
605 Py_ssize_t j, best_j;
606 double aj_m1, aj_lm1;
607 double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
608
609 if (fragment->len & 1 || reference->len & 1) {
610 PyErr_SetString(AudioopError, "Strings should be even-sized");
611 return NULL;
612 }
613 cp1 = (const int16_t *)fragment->buf;
614 len1 = fragment->len >> 1;
615 cp2 = (const int16_t *)reference->buf;
616 len2 = reference->len >> 1;
617
618 if (len1 < len2) {
619 PyErr_SetString(AudioopError, "First sample should be longer");
620 return NULL;
621 }
622 sum_ri_2 = _sum2(cp2, cp2, len2);
623 sum_aij_2 = _sum2(cp1, cp1, len2);
624 sum_aij_ri = _sum2(cp1, cp2, len2);
625
626 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
627
628 best_result = result;
629 best_j = 0;
630
631 for ( j=1; j<=len1-len2; j++) {
632 aj_m1 = (double)cp1[j-1];
633 aj_lm1 = (double)cp1[j+len2-1];
634
635 sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
636 sum_aij_ri = _sum2(cp1+j, cp2, len2);
637
638 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
639 / sum_aij_2;
640
641 if ( result < best_result ) {
642 best_result = result;
643 best_j = j;
644 }
645
646 }
647
648 factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
649
650 return Py_BuildValue("(nf)", best_j, factor);
651 }
652
653 /*
654 ** findfactor finds a factor f so that the energy in A-fB is minimal.
655 ** See the comment for findfit for details.
656 */
657 /*[clinic input]
658 audioop.findfactor
659
660 fragment: Py_buffer
661 reference: Py_buffer
662 /
663
664 Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
665 [clinic start generated code]*/
666
667 static PyObject *
audioop_findfactor_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)668 audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
669 Py_buffer *reference)
670 /*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
671 {
672 const int16_t *cp1, *cp2;
673 Py_ssize_t len;
674 double sum_ri_2, sum_aij_ri, result;
675
676 if (fragment->len & 1 || reference->len & 1) {
677 PyErr_SetString(AudioopError, "Strings should be even-sized");
678 return NULL;
679 }
680 if (fragment->len != reference->len) {
681 PyErr_SetString(AudioopError, "Samples should be same size");
682 return NULL;
683 }
684 cp1 = (const int16_t *)fragment->buf;
685 cp2 = (const int16_t *)reference->buf;
686 len = fragment->len >> 1;
687 sum_ri_2 = _sum2(cp2, cp2, len);
688 sum_aij_ri = _sum2(cp1, cp2, len);
689
690 result = sum_aij_ri / sum_ri_2;
691
692 return PyFloat_FromDouble(result);
693 }
694
695 /*
696 ** findmax returns the index of the n-sized segment of the input sample
697 ** that contains the most energy.
698 */
699 /*[clinic input]
700 audioop.findmax
701
702 fragment: Py_buffer
703 length: Py_ssize_t
704 /
705
706 Search fragment for a slice of specified number of samples with maximum energy.
707 [clinic start generated code]*/
708
709 static PyObject *
audioop_findmax_impl(PyObject * module,Py_buffer * fragment,Py_ssize_t length)710 audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
711 Py_ssize_t length)
712 /*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
713 {
714 const int16_t *cp1;
715 Py_ssize_t len1;
716 Py_ssize_t j, best_j;
717 double aj_m1, aj_lm1;
718 double result, best_result;
719
720 if (fragment->len & 1) {
721 PyErr_SetString(AudioopError, "Strings should be even-sized");
722 return NULL;
723 }
724 cp1 = (const int16_t *)fragment->buf;
725 len1 = fragment->len >> 1;
726
727 if (length < 0 || len1 < length) {
728 PyErr_SetString(AudioopError, "Input sample should be longer");
729 return NULL;
730 }
731
732 result = _sum2(cp1, cp1, length);
733
734 best_result = result;
735 best_j = 0;
736
737 for ( j=1; j<=len1-length; j++) {
738 aj_m1 = (double)cp1[j-1];
739 aj_lm1 = (double)cp1[j+length-1];
740
741 result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
742
743 if ( result > best_result ) {
744 best_result = result;
745 best_j = j;
746 }
747
748 }
749
750 return PyLong_FromSsize_t(best_j);
751 }
752
753 /*[clinic input]
754 audioop.avgpp
755
756 fragment: Py_buffer
757 width: int
758 /
759
760 Return the average peak-peak value over all samples in the fragment.
761 [clinic start generated code]*/
762
763 static PyObject *
audioop_avgpp_impl(PyObject * module,Py_buffer * fragment,int width)764 audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
765 /*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
766 {
767 Py_ssize_t i;
768 int prevval, prevextremevalid = 0, prevextreme = 0;
769 double sum = 0.0;
770 unsigned int avg;
771 int diff, prevdiff, nextreme = 0;
772
773 if (!audioop_check_parameters(fragment->len, width))
774 return NULL;
775 if (fragment->len <= width)
776 return PyLong_FromLong(0);
777 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
778 prevdiff = 17; /* Anything != 0, 1 */
779 for (i = width; i < fragment->len; i += width) {
780 int val = GETRAWSAMPLE(width, fragment->buf, i);
781 if (val != prevval) {
782 diff = val < prevval;
783 if (prevdiff == !diff) {
784 /* Derivative changed sign. Compute difference to last
785 ** extreme value and remember.
786 */
787 if (prevextremevalid) {
788 if (prevval < prevextreme)
789 sum += (double)((unsigned int)prevextreme -
790 (unsigned int)prevval);
791 else
792 sum += (double)((unsigned int)prevval -
793 (unsigned int)prevextreme);
794 nextreme++;
795 }
796 prevextremevalid = 1;
797 prevextreme = prevval;
798 }
799 prevval = val;
800 prevdiff = diff;
801 }
802 }
803 if ( nextreme == 0 )
804 avg = 0;
805 else
806 avg = (unsigned int)(sum / (double)nextreme);
807 return PyLong_FromUnsignedLong(avg);
808 }
809
810 /*[clinic input]
811 audioop.maxpp
812
813 fragment: Py_buffer
814 width: int
815 /
816
817 Return the maximum peak-peak value in the sound fragment.
818 [clinic start generated code]*/
819
820 static PyObject *
audioop_maxpp_impl(PyObject * module,Py_buffer * fragment,int width)821 audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
822 /*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
823 {
824 Py_ssize_t i;
825 int prevval, prevextremevalid = 0, prevextreme = 0;
826 unsigned int max = 0, extremediff;
827 int diff, prevdiff;
828
829 if (!audioop_check_parameters(fragment->len, width))
830 return NULL;
831 if (fragment->len <= width)
832 return PyLong_FromLong(0);
833 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
834 prevdiff = 17; /* Anything != 0, 1 */
835 for (i = width; i < fragment->len; i += width) {
836 int val = GETRAWSAMPLE(width, fragment->buf, i);
837 if (val != prevval) {
838 diff = val < prevval;
839 if (prevdiff == !diff) {
840 /* Derivative changed sign. Compute difference to
841 ** last extreme value and remember.
842 */
843 if (prevextremevalid) {
844 if (prevval < prevextreme)
845 extremediff = (unsigned int)prevextreme -
846 (unsigned int)prevval;
847 else
848 extremediff = (unsigned int)prevval -
849 (unsigned int)prevextreme;
850 if ( extremediff > max )
851 max = extremediff;
852 }
853 prevextremevalid = 1;
854 prevextreme = prevval;
855 }
856 prevval = val;
857 prevdiff = diff;
858 }
859 }
860 return PyLong_FromUnsignedLong(max);
861 }
862
863 /*[clinic input]
864 audioop.cross
865
866 fragment: Py_buffer
867 width: int
868 /
869
870 Return the number of zero crossings in the fragment passed as an argument.
871 [clinic start generated code]*/
872
873 static PyObject *
audioop_cross_impl(PyObject * module,Py_buffer * fragment,int width)874 audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
875 /*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
876 {
877 Py_ssize_t i;
878 int prevval;
879 Py_ssize_t ncross;
880
881 if (!audioop_check_parameters(fragment->len, width))
882 return NULL;
883 ncross = -1;
884 prevval = 17; /* Anything <> 0,1 */
885 for (i = 0; i < fragment->len; i += width) {
886 int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
887 if (val != prevval) ncross++;
888 prevval = val;
889 }
890 return PyLong_FromSsize_t(ncross);
891 }
892
893 /*[clinic input]
894 audioop.mul
895
896 fragment: Py_buffer
897 width: int
898 factor: double
899 /
900
901 Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
902 [clinic start generated code]*/
903
904 static PyObject *
audioop_mul_impl(PyObject * module,Py_buffer * fragment,int width,double factor)905 audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
906 double factor)
907 /*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
908 {
909 signed char *ncp;
910 Py_ssize_t i;
911 double maxval, minval;
912 PyObject *rv;
913
914 if (!audioop_check_parameters(fragment->len, width))
915 return NULL;
916
917 maxval = (double) maxvals[width];
918 minval = (double) minvals[width];
919
920 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
921 if (rv == NULL)
922 return NULL;
923 ncp = (signed char *)PyBytes_AsString(rv);
924
925 for (i = 0; i < fragment->len; i += width) {
926 double val = GETRAWSAMPLE(width, fragment->buf, i);
927 val *= factor;
928 val = floor(fbound(val, minval, maxval));
929 SETRAWSAMPLE(width, ncp, i, (int)val);
930 }
931 return rv;
932 }
933
934 /*[clinic input]
935 audioop.tomono
936
937 fragment: Py_buffer
938 width: int
939 lfactor: double
940 rfactor: double
941 /
942
943 Convert a stereo fragment to a mono fragment.
944 [clinic start generated code]*/
945
946 static PyObject *
audioop_tomono_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)947 audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
948 double lfactor, double rfactor)
949 /*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
950 {
951 signed char *cp, *ncp;
952 Py_ssize_t len, i;
953 double maxval, minval;
954 PyObject *rv;
955
956 cp = fragment->buf;
957 len = fragment->len;
958 if (!audioop_check_parameters(len, width))
959 return NULL;
960 if (((len / width) & 1) != 0) {
961 PyErr_SetString(AudioopError, "not a whole number of frames");
962 return NULL;
963 }
964
965 maxval = (double) maxvals[width];
966 minval = (double) minvals[width];
967
968 rv = PyBytes_FromStringAndSize(NULL, len/2);
969 if (rv == NULL)
970 return NULL;
971 ncp = (signed char *)PyBytes_AsString(rv);
972
973 for (i = 0; i < len; i += width*2) {
974 double val1 = GETRAWSAMPLE(width, cp, i);
975 double val2 = GETRAWSAMPLE(width, cp, i + width);
976 double val = val1*lfactor + val2*rfactor;
977 val = floor(fbound(val, minval, maxval));
978 SETRAWSAMPLE(width, ncp, i/2, val);
979 }
980 return rv;
981 }
982
983 /*[clinic input]
984 audioop.tostereo
985
986 fragment: Py_buffer
987 width: int
988 lfactor: double
989 rfactor: double
990 /
991
992 Generate a stereo fragment from a mono fragment.
993 [clinic start generated code]*/
994
995 static PyObject *
audioop_tostereo_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)996 audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
997 double lfactor, double rfactor)
998 /*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
999 {
1000 signed char *ncp;
1001 Py_ssize_t i;
1002 double maxval, minval;
1003 PyObject *rv;
1004
1005 if (!audioop_check_parameters(fragment->len, width))
1006 return NULL;
1007
1008 maxval = (double) maxvals[width];
1009 minval = (double) minvals[width];
1010
1011 if (fragment->len > PY_SSIZE_T_MAX/2) {
1012 PyErr_SetString(PyExc_MemoryError,
1013 "not enough memory for output buffer");
1014 return NULL;
1015 }
1016
1017 rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1018 if (rv == NULL)
1019 return NULL;
1020 ncp = (signed char *)PyBytes_AsString(rv);
1021
1022 for (i = 0; i < fragment->len; i += width) {
1023 double val = GETRAWSAMPLE(width, fragment->buf, i);
1024 int val1 = (int)floor(fbound(val*lfactor, minval, maxval));
1025 int val2 = (int)floor(fbound(val*rfactor, minval, maxval));
1026 SETRAWSAMPLE(width, ncp, i*2, val1);
1027 SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1028 }
1029 return rv;
1030 }
1031
1032 /*[clinic input]
1033 audioop.add
1034
1035 fragment1: Py_buffer
1036 fragment2: Py_buffer
1037 width: int
1038 /
1039
1040 Return a fragment which is the addition of the two samples passed as parameters.
1041 [clinic start generated code]*/
1042
1043 static PyObject *
audioop_add_impl(PyObject * module,Py_buffer * fragment1,Py_buffer * fragment2,int width)1044 audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1045 Py_buffer *fragment2, int width)
1046 /*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1047 {
1048 signed char *ncp;
1049 Py_ssize_t i;
1050 int minval, maxval, newval;
1051 PyObject *rv;
1052
1053 if (!audioop_check_parameters(fragment1->len, width))
1054 return NULL;
1055 if (fragment1->len != fragment2->len) {
1056 PyErr_SetString(AudioopError, "Lengths should be the same");
1057 return NULL;
1058 }
1059
1060 maxval = maxvals[width];
1061 minval = minvals[width];
1062
1063 rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1064 if (rv == NULL)
1065 return NULL;
1066 ncp = (signed char *)PyBytes_AsString(rv);
1067
1068 for (i = 0; i < fragment1->len; i += width) {
1069 int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1070 int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1071
1072 if (width < 4) {
1073 newval = val1 + val2;
1074 /* truncate in case of overflow */
1075 if (newval > maxval)
1076 newval = maxval;
1077 else if (newval < minval)
1078 newval = minval;
1079 }
1080 else {
1081 double fval = (double)val1 + (double)val2;
1082 /* truncate in case of overflow */
1083 newval = (int)floor(fbound(fval, minval, maxval));
1084 }
1085
1086 SETRAWSAMPLE(width, ncp, i, newval);
1087 }
1088 return rv;
1089 }
1090
1091 /*[clinic input]
1092 audioop.bias
1093
1094 fragment: Py_buffer
1095 width: int
1096 bias: int
1097 /
1098
1099 Return a fragment that is the original fragment with a bias added to each sample.
1100 [clinic start generated code]*/
1101
1102 static PyObject *
audioop_bias_impl(PyObject * module,Py_buffer * fragment,int width,int bias)1103 audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1104 /*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1105 {
1106 signed char *ncp;
1107 Py_ssize_t i;
1108 unsigned int val = 0, mask;
1109 PyObject *rv;
1110
1111 if (!audioop_check_parameters(fragment->len, width))
1112 return NULL;
1113
1114 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1115 if (rv == NULL)
1116 return NULL;
1117 ncp = (signed char *)PyBytes_AsString(rv);
1118
1119 mask = masks[width];
1120
1121 for (i = 0; i < fragment->len; i += width) {
1122 if (width == 1)
1123 val = GETINTX(unsigned char, fragment->buf, i);
1124 else if (width == 2)
1125 val = GETINTX(uint16_t, fragment->buf, i);
1126 else if (width == 3)
1127 val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1128 else {
1129 assert(width == 4);
1130 val = GETINTX(uint32_t, fragment->buf, i);
1131 }
1132
1133 val += (unsigned int)bias;
1134 /* wrap around in case of overflow */
1135 val &= mask;
1136
1137 if (width == 1)
1138 SETINTX(unsigned char, ncp, i, val);
1139 else if (width == 2)
1140 SETINTX(uint16_t, ncp, i, val);
1141 else if (width == 3)
1142 SETINT24(ncp, i, (int)val);
1143 else {
1144 assert(width == 4);
1145 SETINTX(uint32_t, ncp, i, val);
1146 }
1147 }
1148 return rv;
1149 }
1150
1151 /*[clinic input]
1152 audioop.reverse
1153
1154 fragment: Py_buffer
1155 width: int
1156 /
1157
1158 Reverse the samples in a fragment and returns the modified fragment.
1159 [clinic start generated code]*/
1160
1161 static PyObject *
audioop_reverse_impl(PyObject * module,Py_buffer * fragment,int width)1162 audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1163 /*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1164 {
1165 unsigned char *ncp;
1166 Py_ssize_t i;
1167 PyObject *rv;
1168
1169 if (!audioop_check_parameters(fragment->len, width))
1170 return NULL;
1171
1172 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1173 if (rv == NULL)
1174 return NULL;
1175 ncp = (unsigned char *)PyBytes_AsString(rv);
1176
1177 for (i = 0; i < fragment->len; i += width) {
1178 int val = GETRAWSAMPLE(width, fragment->buf, i);
1179 SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1180 }
1181 return rv;
1182 }
1183
1184 /*[clinic input]
1185 audioop.byteswap
1186
1187 fragment: Py_buffer
1188 width: int
1189 /
1190
1191 Convert big-endian samples to little-endian and vice versa.
1192 [clinic start generated code]*/
1193
1194 static PyObject *
audioop_byteswap_impl(PyObject * module,Py_buffer * fragment,int width)1195 audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1196 /*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1197 {
1198 unsigned char *ncp;
1199 Py_ssize_t i;
1200 PyObject *rv;
1201
1202 if (!audioop_check_parameters(fragment->len, width))
1203 return NULL;
1204
1205 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1206 if (rv == NULL)
1207 return NULL;
1208 ncp = (unsigned char *)PyBytes_AsString(rv);
1209
1210 for (i = 0; i < fragment->len; i += width) {
1211 int j;
1212 for (j = 0; j < width; j++)
1213 ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1214 }
1215 return rv;
1216 }
1217
1218 /*[clinic input]
1219 audioop.lin2lin
1220
1221 fragment: Py_buffer
1222 width: int
1223 newwidth: int
1224 /
1225
1226 Convert samples between 1-, 2-, 3- and 4-byte formats.
1227 [clinic start generated code]*/
1228
1229 static PyObject *
audioop_lin2lin_impl(PyObject * module,Py_buffer * fragment,int width,int newwidth)1230 audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1231 int newwidth)
1232 /*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1233 {
1234 unsigned char *ncp;
1235 Py_ssize_t i, j;
1236 PyObject *rv;
1237
1238 if (!audioop_check_parameters(fragment->len, width))
1239 return NULL;
1240 if (!audioop_check_size(newwidth))
1241 return NULL;
1242
1243 if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1244 PyErr_SetString(PyExc_MemoryError,
1245 "not enough memory for output buffer");
1246 return NULL;
1247 }
1248 rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1249 if (rv == NULL)
1250 return NULL;
1251 ncp = (unsigned char *)PyBytes_AsString(rv);
1252
1253 for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1254 int val = GETSAMPLE32(width, fragment->buf, i);
1255 SETSAMPLE32(newwidth, ncp, j, val);
1256 }
1257 return rv;
1258 }
1259
1260 static int
gcd(int a,int b)1261 gcd(int a, int b)
1262 {
1263 while (b > 0) {
1264 int tmp = a % b;
1265 a = b;
1266 b = tmp;
1267 }
1268 return a;
1269 }
1270
1271 /*[clinic input]
1272 audioop.ratecv
1273
1274 fragment: Py_buffer
1275 width: int
1276 nchannels: int
1277 inrate: int
1278 outrate: int
1279 state: object
1280 weightA: int = 1
1281 weightB: int = 0
1282 /
1283
1284 Convert the frame rate of the input fragment.
1285 [clinic start generated code]*/
1286
1287 static PyObject *
audioop_ratecv_impl(PyObject * module,Py_buffer * fragment,int width,int nchannels,int inrate,int outrate,PyObject * state,int weightA,int weightB)1288 audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1289 int nchannels, int inrate, int outrate, PyObject *state,
1290 int weightA, int weightB)
1291 /*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1292 {
1293 char *cp, *ncp;
1294 Py_ssize_t len;
1295 int chan, d, *prev_i, *cur_i, cur_o;
1296 PyObject *samps, *str, *rv = NULL;
1297 int bytes_per_frame;
1298
1299 if (!audioop_check_size(width))
1300 return NULL;
1301 if (nchannels < 1) {
1302 PyErr_SetString(AudioopError, "# of channels should be >= 1");
1303 return NULL;
1304 }
1305 if (width > INT_MAX / nchannels) {
1306 /* This overflow test is rigorously correct because
1307 both multiplicands are >= 1. Use the argument names
1308 from the docs for the error msg. */
1309 PyErr_SetString(PyExc_OverflowError,
1310 "width * nchannels too big for a C int");
1311 return NULL;
1312 }
1313 bytes_per_frame = width * nchannels;
1314 if (weightA < 1 || weightB < 0) {
1315 PyErr_SetString(AudioopError,
1316 "weightA should be >= 1, weightB should be >= 0");
1317 return NULL;
1318 }
1319 assert(fragment->len >= 0);
1320 if (fragment->len % bytes_per_frame != 0) {
1321 PyErr_SetString(AudioopError, "not a whole number of frames");
1322 return NULL;
1323 }
1324 if (inrate <= 0 || outrate <= 0) {
1325 PyErr_SetString(AudioopError, "sampling rate not > 0");
1326 return NULL;
1327 }
1328 /* divide inrate and outrate by their greatest common divisor */
1329 d = gcd(inrate, outrate);
1330 inrate /= d;
1331 outrate /= d;
1332 /* divide weightA and weightB by their greatest common divisor */
1333 d = gcd(weightA, weightB);
1334 weightA /= d;
1335 weightB /= d;
1336
1337 if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1338 PyErr_SetString(PyExc_MemoryError,
1339 "not enough memory for output buffer");
1340 return NULL;
1341 }
1342 prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1343 cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1344 if (prev_i == NULL || cur_i == NULL) {
1345 (void) PyErr_NoMemory();
1346 goto exit;
1347 }
1348
1349 len = fragment->len / bytes_per_frame; /* # of frames */
1350
1351 if (state == Py_None) {
1352 d = -outrate;
1353 for (chan = 0; chan < nchannels; chan++)
1354 prev_i[chan] = cur_i[chan] = 0;
1355 }
1356 else {
1357 if (!PyArg_ParseTuple(state,
1358 "iO!;audioop.ratecv: illegal state argument",
1359 &d, &PyTuple_Type, &samps))
1360 goto exit;
1361 if (PyTuple_Size(samps) != nchannels) {
1362 PyErr_SetString(AudioopError,
1363 "illegal state argument");
1364 goto exit;
1365 }
1366 for (chan = 0; chan < nchannels; chan++) {
1367 if (!PyArg_ParseTuple(PyTuple_GetItem(samps, chan),
1368 "ii:ratecv", &prev_i[chan],
1369 &cur_i[chan]))
1370 goto exit;
1371 }
1372 }
1373
1374 /* str <- Space for the output buffer. */
1375 if (len == 0)
1376 str = PyBytes_FromStringAndSize(NULL, 0);
1377 else {
1378 /* There are len input frames, so we need (mathematically)
1379 ceiling(len*outrate/inrate) output frames, and each frame
1380 requires bytes_per_frame bytes. Computing this
1381 without spurious overflow is the challenge; we can
1382 settle for a reasonable upper bound, though, in this
1383 case ceiling(len/inrate) * outrate. */
1384
1385 /* compute ceiling(len/inrate) without overflow */
1386 Py_ssize_t q = 1 + (len - 1) / inrate;
1387 if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1388 str = NULL;
1389 else
1390 str = PyBytes_FromStringAndSize(NULL,
1391 q * outrate * bytes_per_frame);
1392 }
1393 if (str == NULL) {
1394 PyErr_SetString(PyExc_MemoryError,
1395 "not enough memory for output buffer");
1396 goto exit;
1397 }
1398 ncp = PyBytes_AsString(str);
1399 cp = fragment->buf;
1400
1401 for (;;) {
1402 while (d < 0) {
1403 if (len == 0) {
1404 samps = PyTuple_New(nchannels);
1405 if (samps == NULL)
1406 goto exit;
1407 for (chan = 0; chan < nchannels; chan++)
1408 PyTuple_SetItem(samps, chan,
1409 Py_BuildValue("(ii)",
1410 prev_i[chan],
1411 cur_i[chan]));
1412 if (PyErr_Occurred())
1413 goto exit;
1414 /* We have checked before that the length
1415 * of the string fits into int. */
1416 len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1417 rv = PyBytes_FromStringAndSize
1418 (PyBytes_AsString(str), len);
1419 Py_DECREF(str);
1420 str = rv;
1421 if (str == NULL)
1422 goto exit;
1423 rv = Py_BuildValue("(O(iO))", str, d, samps);
1424 Py_DECREF(samps);
1425 Py_DECREF(str);
1426 goto exit; /* return rv */
1427 }
1428 for (chan = 0; chan < nchannels; chan++) {
1429 prev_i[chan] = cur_i[chan];
1430 cur_i[chan] = GETSAMPLE32(width, cp, 0);
1431 cp += width;
1432 /* implements a simple digital filter */
1433 cur_i[chan] = (int)(
1434 ((double)weightA * (double)cur_i[chan] +
1435 (double)weightB * (double)prev_i[chan]) /
1436 ((double)weightA + (double)weightB));
1437 }
1438 len--;
1439 d += outrate;
1440 }
1441 while (d >= 0) {
1442 for (chan = 0; chan < nchannels; chan++) {
1443 cur_o = (int)(((double)prev_i[chan] * (double)d +
1444 (double)cur_i[chan] * (double)(outrate - d)) /
1445 (double)outrate);
1446 SETSAMPLE32(width, ncp, 0, cur_o);
1447 ncp += width;
1448 }
1449 d -= inrate;
1450 }
1451 }
1452 exit:
1453 PyMem_Free(prev_i);
1454 PyMem_Free(cur_i);
1455 return rv;
1456 }
1457
1458 /*[clinic input]
1459 audioop.lin2ulaw
1460
1461 fragment: Py_buffer
1462 width: int
1463 /
1464
1465 Convert samples in the audio fragment to u-LAW encoding.
1466 [clinic start generated code]*/
1467
1468 static PyObject *
audioop_lin2ulaw_impl(PyObject * module,Py_buffer * fragment,int width)1469 audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1470 /*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1471 {
1472 unsigned char *ncp;
1473 Py_ssize_t i;
1474 PyObject *rv;
1475
1476 if (!audioop_check_parameters(fragment->len, width))
1477 return NULL;
1478
1479 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1480 if (rv == NULL)
1481 return NULL;
1482 ncp = (unsigned char *)PyBytes_AsString(rv);
1483
1484 for (i = 0; i < fragment->len; i += width) {
1485 int val = GETSAMPLE32(width, fragment->buf, i);
1486 *ncp++ = st_14linear2ulaw(val >> 18);
1487 }
1488 return rv;
1489 }
1490
1491 /*[clinic input]
1492 audioop.ulaw2lin
1493
1494 fragment: Py_buffer
1495 width: int
1496 /
1497
1498 Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1499 [clinic start generated code]*/
1500
1501 static PyObject *
audioop_ulaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1502 audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1503 /*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1504 {
1505 unsigned char *cp;
1506 signed char *ncp;
1507 Py_ssize_t i;
1508 PyObject *rv;
1509
1510 if (!audioop_check_size(width))
1511 return NULL;
1512
1513 if (fragment->len > PY_SSIZE_T_MAX/width) {
1514 PyErr_SetString(PyExc_MemoryError,
1515 "not enough memory for output buffer");
1516 return NULL;
1517 }
1518 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1519 if (rv == NULL)
1520 return NULL;
1521 ncp = (signed char *)PyBytes_AsString(rv);
1522
1523 cp = fragment->buf;
1524 for (i = 0; i < fragment->len*width; i += width) {
1525 int val = st_ulaw2linear16(*cp++) << 16;
1526 SETSAMPLE32(width, ncp, i, val);
1527 }
1528 return rv;
1529 }
1530
1531 /*[clinic input]
1532 audioop.lin2alaw
1533
1534 fragment: Py_buffer
1535 width: int
1536 /
1537
1538 Convert samples in the audio fragment to a-LAW encoding.
1539 [clinic start generated code]*/
1540
1541 static PyObject *
audioop_lin2alaw_impl(PyObject * module,Py_buffer * fragment,int width)1542 audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1543 /*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1544 {
1545 unsigned char *ncp;
1546 Py_ssize_t i;
1547 PyObject *rv;
1548
1549 if (!audioop_check_parameters(fragment->len, width))
1550 return NULL;
1551
1552 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1553 if (rv == NULL)
1554 return NULL;
1555 ncp = (unsigned char *)PyBytes_AsString(rv);
1556
1557 for (i = 0; i < fragment->len; i += width) {
1558 int val = GETSAMPLE32(width, fragment->buf, i);
1559 *ncp++ = st_linear2alaw(val >> 19);
1560 }
1561 return rv;
1562 }
1563
1564 /*[clinic input]
1565 audioop.alaw2lin
1566
1567 fragment: Py_buffer
1568 width: int
1569 /
1570
1571 Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1572 [clinic start generated code]*/
1573
1574 static PyObject *
audioop_alaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1575 audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1576 /*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1577 {
1578 unsigned char *cp;
1579 signed char *ncp;
1580 Py_ssize_t i;
1581 int val;
1582 PyObject *rv;
1583
1584 if (!audioop_check_size(width))
1585 return NULL;
1586
1587 if (fragment->len > PY_SSIZE_T_MAX/width) {
1588 PyErr_SetString(PyExc_MemoryError,
1589 "not enough memory for output buffer");
1590 return NULL;
1591 }
1592 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1593 if (rv == NULL)
1594 return NULL;
1595 ncp = (signed char *)PyBytes_AsString(rv);
1596 cp = fragment->buf;
1597
1598 for (i = 0; i < fragment->len*width; i += width) {
1599 val = st_alaw2linear16(*cp++) << 16;
1600 SETSAMPLE32(width, ncp, i, val);
1601 }
1602 return rv;
1603 }
1604
1605 /*[clinic input]
1606 audioop.lin2adpcm
1607
1608 fragment: Py_buffer
1609 width: int
1610 state: object
1611 /
1612
1613 Convert samples to 4 bit Intel/DVI ADPCM encoding.
1614 [clinic start generated code]*/
1615
1616 static PyObject *
audioop_lin2adpcm_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1617 audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1618 PyObject *state)
1619 /*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1620 {
1621 signed char *ncp;
1622 Py_ssize_t i;
1623 int step, valpred, delta,
1624 index, sign, vpdiff, diff;
1625 PyObject *rv = NULL, *str;
1626 int outputbuffer = 0, bufferstep;
1627
1628 if (!audioop_check_parameters(fragment->len, width))
1629 return NULL;
1630
1631 /* Decode state, should have (value, step) */
1632 if ( state == Py_None ) {
1633 /* First time, it seems. Set defaults */
1634 valpred = 0;
1635 index = 0;
1636 }
1637 else if (!PyTuple_Check(state)) {
1638 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1639 return NULL;
1640 }
1641 else if (!PyArg_ParseTuple(state, "ii", &valpred, &index)) {
1642 return NULL;
1643 }
1644 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1645 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1646 PyErr_SetString(PyExc_ValueError, "bad state");
1647 return NULL;
1648 }
1649
1650 str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1651 if (str == NULL)
1652 return NULL;
1653 ncp = (signed char *)PyBytes_AsString(str);
1654
1655 step = stepsizeTable[index];
1656 bufferstep = 1;
1657
1658 for (i = 0; i < fragment->len; i += width) {
1659 int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1660
1661 /* Step 1 - compute difference with previous value */
1662 if (val < valpred) {
1663 diff = valpred - val;
1664 sign = 8;
1665 }
1666 else {
1667 diff = val - valpred;
1668 sign = 0;
1669 }
1670
1671 /* Step 2 - Divide and clamp */
1672 /* Note:
1673 ** This code *approximately* computes:
1674 ** delta = diff*4/step;
1675 ** vpdiff = (delta+0.5)*step/4;
1676 ** but in shift step bits are dropped. The net result of this
1677 ** is that even if you have fast mul/div hardware you cannot
1678 ** put it to good use since the fixup would be too expensive.
1679 */
1680 delta = 0;
1681 vpdiff = (step >> 3);
1682
1683 if ( diff >= step ) {
1684 delta = 4;
1685 diff -= step;
1686 vpdiff += step;
1687 }
1688 step >>= 1;
1689 if ( diff >= step ) {
1690 delta |= 2;
1691 diff -= step;
1692 vpdiff += step;
1693 }
1694 step >>= 1;
1695 if ( diff >= step ) {
1696 delta |= 1;
1697 vpdiff += step;
1698 }
1699
1700 /* Step 3 - Update previous value */
1701 if ( sign )
1702 valpred -= vpdiff;
1703 else
1704 valpred += vpdiff;
1705
1706 /* Step 4 - Clamp previous value to 16 bits */
1707 if ( valpred > 32767 )
1708 valpred = 32767;
1709 else if ( valpred < -32768 )
1710 valpred = -32768;
1711
1712 /* Step 5 - Assemble value, update index and step values */
1713 delta |= sign;
1714
1715 index += indexTable[delta];
1716 if ( index < 0 ) index = 0;
1717 if ( index > 88 ) index = 88;
1718 step = stepsizeTable[index];
1719
1720 /* Step 6 - Output value */
1721 if ( bufferstep ) {
1722 outputbuffer = (delta << 4) & 0xf0;
1723 } else {
1724 *ncp++ = (delta & 0x0f) | outputbuffer;
1725 }
1726 bufferstep = !bufferstep;
1727 }
1728 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1729 Py_DECREF(str);
1730 return rv;
1731 }
1732
1733 /*[clinic input]
1734 audioop.adpcm2lin
1735
1736 fragment: Py_buffer
1737 width: int
1738 state: object
1739 /
1740
1741 Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1742 [clinic start generated code]*/
1743
1744 static PyObject *
audioop_adpcm2lin_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1745 audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1746 PyObject *state)
1747 /*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1748 {
1749 signed char *cp;
1750 signed char *ncp;
1751 Py_ssize_t i, outlen;
1752 int valpred, step, delta, index, sign, vpdiff;
1753 PyObject *rv, *str;
1754 int inputbuffer = 0, bufferstep;
1755
1756 if (!audioop_check_size(width))
1757 return NULL;
1758
1759 /* Decode state, should have (value, step) */
1760 if ( state == Py_None ) {
1761 /* First time, it seems. Set defaults */
1762 valpred = 0;
1763 index = 0;
1764 }
1765 else if (!PyTuple_Check(state)) {
1766 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1767 return NULL;
1768 }
1769 else if (!PyArg_ParseTuple(state, "ii", &valpred, &index)) {
1770 return NULL;
1771 }
1772 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1773 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1774 PyErr_SetString(PyExc_ValueError, "bad state");
1775 return NULL;
1776 }
1777
1778 if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1779 PyErr_SetString(PyExc_MemoryError,
1780 "not enough memory for output buffer");
1781 return NULL;
1782 }
1783 outlen = fragment->len*width*2;
1784 str = PyBytes_FromStringAndSize(NULL, outlen);
1785 if (str == NULL)
1786 return NULL;
1787 ncp = (signed char *)PyBytes_AsString(str);
1788 cp = fragment->buf;
1789
1790 step = stepsizeTable[index];
1791 bufferstep = 0;
1792
1793 for (i = 0; i < outlen; i += width) {
1794 /* Step 1 - get the delta value and compute next index */
1795 if ( bufferstep ) {
1796 delta = inputbuffer & 0xf;
1797 } else {
1798 inputbuffer = *cp++;
1799 delta = (inputbuffer >> 4) & 0xf;
1800 }
1801
1802 bufferstep = !bufferstep;
1803
1804 /* Step 2 - Find new index value (for later) */
1805 index += indexTable[delta];
1806 if ( index < 0 ) index = 0;
1807 if ( index > 88 ) index = 88;
1808
1809 /* Step 3 - Separate sign and magnitude */
1810 sign = delta & 8;
1811 delta = delta & 7;
1812
1813 /* Step 4 - Compute difference and new predicted value */
1814 /*
1815 ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1816 ** in adpcm_coder.
1817 */
1818 vpdiff = step >> 3;
1819 if ( delta & 4 ) vpdiff += step;
1820 if ( delta & 2 ) vpdiff += step>>1;
1821 if ( delta & 1 ) vpdiff += step>>2;
1822
1823 if ( sign )
1824 valpred -= vpdiff;
1825 else
1826 valpred += vpdiff;
1827
1828 /* Step 5 - clamp output value */
1829 if ( valpred > 32767 )
1830 valpred = 32767;
1831 else if ( valpred < -32768 )
1832 valpred = -32768;
1833
1834 /* Step 6 - Update step value */
1835 step = stepsizeTable[index];
1836
1837 /* Step 6 - Output value */
1838 SETSAMPLE32(width, ncp, i, valpred << 16);
1839 }
1840
1841 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1842 Py_DECREF(str);
1843 return rv;
1844 }
1845
1846 #include "clinic/audioop.c.h"
1847
1848 static PyMethodDef audioop_methods[] = {
1849 AUDIOOP_MAX_METHODDEF
1850 AUDIOOP_MINMAX_METHODDEF
1851 AUDIOOP_AVG_METHODDEF
1852 AUDIOOP_MAXPP_METHODDEF
1853 AUDIOOP_AVGPP_METHODDEF
1854 AUDIOOP_RMS_METHODDEF
1855 AUDIOOP_FINDFIT_METHODDEF
1856 AUDIOOP_FINDMAX_METHODDEF
1857 AUDIOOP_FINDFACTOR_METHODDEF
1858 AUDIOOP_CROSS_METHODDEF
1859 AUDIOOP_MUL_METHODDEF
1860 AUDIOOP_ADD_METHODDEF
1861 AUDIOOP_BIAS_METHODDEF
1862 AUDIOOP_ULAW2LIN_METHODDEF
1863 AUDIOOP_LIN2ULAW_METHODDEF
1864 AUDIOOP_ALAW2LIN_METHODDEF
1865 AUDIOOP_LIN2ALAW_METHODDEF
1866 AUDIOOP_LIN2LIN_METHODDEF
1867 AUDIOOP_ADPCM2LIN_METHODDEF
1868 AUDIOOP_LIN2ADPCM_METHODDEF
1869 AUDIOOP_TOMONO_METHODDEF
1870 AUDIOOP_TOSTEREO_METHODDEF
1871 AUDIOOP_GETSAMPLE_METHODDEF
1872 AUDIOOP_REVERSE_METHODDEF
1873 AUDIOOP_BYTESWAP_METHODDEF
1874 AUDIOOP_RATECV_METHODDEF
1875 { 0, 0 }
1876 };
1877
1878
1879 static struct PyModuleDef audioopmodule = {
1880 PyModuleDef_HEAD_INIT,
1881 "audioop",
1882 NULL,
1883 -1,
1884 audioop_methods,
1885 NULL,
1886 NULL,
1887 NULL,
1888 NULL
1889 };
1890
1891 PyMODINIT_FUNC
PyInit_audioop(void)1892 PyInit_audioop(void)
1893 {
1894 PyObject *m, *d;
1895 m = PyModule_Create(&audioopmodule);
1896 if (m == NULL)
1897 return NULL;
1898 d = PyModule_GetDict(m);
1899 if (d == NULL)
1900 return NULL;
1901 AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1902 if (AudioopError != NULL)
1903 PyDict_SetItemString(d,"error",AudioopError);
1904 return m;
1905 }
1906