1
2 /* audioopmodule - Module to detect peak values in arrays */
3
4 #define PY_SSIZE_T_CLEAN
5
6 #include "Python.h"
7
8 #if defined(__CHAR_UNSIGNED__)
9 #if defined(signed)
10 /* This module currently does not work on systems where only unsigned
11 characters are available. Take it out of Setup. Sorry. */
12 #endif
13 #endif
14
15 static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
16 /* -1 trick is needed on Windows to support -0x80000000 without a warning */
17 static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
18 static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
19
20 static int
fbound(double val,double minval,double maxval)21 fbound(double val, double minval, double maxval)
22 {
23 if (val > maxval) {
24 val = maxval;
25 }
26 else if (val < minval + 1.0) {
27 val = minval;
28 }
29
30 /* Round towards minus infinity (-inf) */
31 val = floor(val);
32
33 /* Cast double to integer: round towards zero */
34 return (int)val;
35 }
36
37
38 /* Code shamelessly stolen from sox, 12.17.7, g711.c
39 ** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
40
41 /* From g711.c:
42 *
43 * December 30, 1994:
44 * Functions linear2alaw, linear2ulaw have been updated to correctly
45 * convert unquantized 16 bit values.
46 * Tables for direct u- to A-law and A- to u-law conversions have been
47 * corrected.
48 * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
49 * bli@cpk.auc.dk
50 *
51 */
52 #define BIAS 0x84 /* define the add-in bias for 16 bit samples */
53 #define CLIP 32635
54 #define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
55 #define QUANT_MASK (0xf) /* Quantization field mask. */
56 #define SEG_SHIFT (4) /* Left shift for segment number. */
57 #define SEG_MASK (0x70) /* Segment field mask. */
58
59 static const int16_t seg_aend[8] = {
60 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
61 };
62 static const int16_t seg_uend[8] = {
63 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
64 };
65
66 static int16_t
search(int16_t val,const int16_t * table,int size)67 search(int16_t val, const int16_t *table, int size)
68 {
69 int i;
70
71 for (i = 0; i < size; i++) {
72 if (val <= *table++)
73 return (i);
74 }
75 return (size);
76 }
77 #define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
78 #define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
79
80 static const int16_t _st_ulaw2linear16[256] = {
81 -32124, -31100, -30076, -29052, -28028, -27004, -25980,
82 -24956, -23932, -22908, -21884, -20860, -19836, -18812,
83 -17788, -16764, -15996, -15484, -14972, -14460, -13948,
84 -13436, -12924, -12412, -11900, -11388, -10876, -10364,
85 -9852, -9340, -8828, -8316, -7932, -7676, -7420,
86 -7164, -6908, -6652, -6396, -6140, -5884, -5628,
87 -5372, -5116, -4860, -4604, -4348, -4092, -3900,
88 -3772, -3644, -3516, -3388, -3260, -3132, -3004,
89 -2876, -2748, -2620, -2492, -2364, -2236, -2108,
90 -1980, -1884, -1820, -1756, -1692, -1628, -1564,
91 -1500, -1436, -1372, -1308, -1244, -1180, -1116,
92 -1052, -988, -924, -876, -844, -812, -780,
93 -748, -716, -684, -652, -620, -588, -556,
94 -524, -492, -460, -428, -396, -372, -356,
95 -340, -324, -308, -292, -276, -260, -244,
96 -228, -212, -196, -180, -164, -148, -132,
97 -120, -112, -104, -96, -88, -80, -72,
98 -64, -56, -48, -40, -32, -24, -16,
99 -8, 0, 32124, 31100, 30076, 29052, 28028,
100 27004, 25980, 24956, 23932, 22908, 21884, 20860,
101 19836, 18812, 17788, 16764, 15996, 15484, 14972,
102 14460, 13948, 13436, 12924, 12412, 11900, 11388,
103 10876, 10364, 9852, 9340, 8828, 8316, 7932,
104 7676, 7420, 7164, 6908, 6652, 6396, 6140,
105 5884, 5628, 5372, 5116, 4860, 4604, 4348,
106 4092, 3900, 3772, 3644, 3516, 3388, 3260,
107 3132, 3004, 2876, 2748, 2620, 2492, 2364,
108 2236, 2108, 1980, 1884, 1820, 1756, 1692,
109 1628, 1564, 1500, 1436, 1372, 1308, 1244,
110 1180, 1116, 1052, 988, 924, 876, 844,
111 812, 780, 748, 716, 684, 652, 620,
112 588, 556, 524, 492, 460, 428, 396,
113 372, 356, 340, 324, 308, 292, 276,
114 260, 244, 228, 212, 196, 180, 164,
115 148, 132, 120, 112, 104, 96, 88,
116 80, 72, 64, 56, 48, 40, 32,
117 24, 16, 8, 0
118 };
119
120 /*
121 * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
122 * stored in an unsigned char. This function should only be called with
123 * the data shifted such that it only contains information in the lower
124 * 14-bits.
125 *
126 * In order to simplify the encoding process, the original linear magnitude
127 * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
128 * (33 - 8191). The result can be seen in the following encoding table:
129 *
130 * Biased Linear Input Code Compressed Code
131 * ------------------------ ---------------
132 * 00000001wxyza 000wxyz
133 * 0000001wxyzab 001wxyz
134 * 000001wxyzabc 010wxyz
135 * 00001wxyzabcd 011wxyz
136 * 0001wxyzabcde 100wxyz
137 * 001wxyzabcdef 101wxyz
138 * 01wxyzabcdefg 110wxyz
139 * 1wxyzabcdefgh 111wxyz
140 *
141 * Each biased linear code has a leading 1 which identifies the segment
142 * number. The value of the segment number is equal to 7 minus the number
143 * of leading 0's. The quantization interval is directly available as the
144 * four bits wxyz. * The trailing bits (a - h) are ignored.
145 *
146 * Ordinarily the complement of the resulting code word is used for
147 * transmission, and so the code word is complemented before it is returned.
148 *
149 * For further information see John C. Bellamy's Digital Telephony, 1982,
150 * John Wiley & Sons, pps 98-111 and 472-476.
151 */
152 static unsigned char
st_14linear2ulaw(int16_t pcm_val)153 st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
154 {
155 int16_t mask;
156 int16_t seg;
157 unsigned char uval;
158
159 /* u-law inverts all bits */
160 /* Get the sign and the magnitude of the value. */
161 if (pcm_val < 0) {
162 pcm_val = -pcm_val;
163 mask = 0x7F;
164 } else {
165 mask = 0xFF;
166 }
167 if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
168 pcm_val += (BIAS >> 2);
169
170 /* Convert the scaled magnitude to segment number. */
171 seg = search(pcm_val, seg_uend, 8);
172
173 /*
174 * Combine the sign, segment, quantization bits;
175 * and complement the code word.
176 */
177 if (seg >= 8) /* out of range, return maximum value. */
178 return (unsigned char) (0x7F ^ mask);
179 else {
180 uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
181 return (uval ^ mask);
182 }
183
184 }
185
186 static const int16_t _st_alaw2linear16[256] = {
187 -5504, -5248, -6016, -5760, -4480, -4224, -4992,
188 -4736, -7552, -7296, -8064, -7808, -6528, -6272,
189 -7040, -6784, -2752, -2624, -3008, -2880, -2240,
190 -2112, -2496, -2368, -3776, -3648, -4032, -3904,
191 -3264, -3136, -3520, -3392, -22016, -20992, -24064,
192 -23040, -17920, -16896, -19968, -18944, -30208, -29184,
193 -32256, -31232, -26112, -25088, -28160, -27136, -11008,
194 -10496, -12032, -11520, -8960, -8448, -9984, -9472,
195 -15104, -14592, -16128, -15616, -13056, -12544, -14080,
196 -13568, -344, -328, -376, -360, -280, -264,
197 -312, -296, -472, -456, -504, -488, -408,
198 -392, -440, -424, -88, -72, -120, -104,
199 -24, -8, -56, -40, -216, -200, -248,
200 -232, -152, -136, -184, -168, -1376, -1312,
201 -1504, -1440, -1120, -1056, -1248, -1184, -1888,
202 -1824, -2016, -1952, -1632, -1568, -1760, -1696,
203 -688, -656, -752, -720, -560, -528, -624,
204 -592, -944, -912, -1008, -976, -816, -784,
205 -880, -848, 5504, 5248, 6016, 5760, 4480,
206 4224, 4992, 4736, 7552, 7296, 8064, 7808,
207 6528, 6272, 7040, 6784, 2752, 2624, 3008,
208 2880, 2240, 2112, 2496, 2368, 3776, 3648,
209 4032, 3904, 3264, 3136, 3520, 3392, 22016,
210 20992, 24064, 23040, 17920, 16896, 19968, 18944,
211 30208, 29184, 32256, 31232, 26112, 25088, 28160,
212 27136, 11008, 10496, 12032, 11520, 8960, 8448,
213 9984, 9472, 15104, 14592, 16128, 15616, 13056,
214 12544, 14080, 13568, 344, 328, 376, 360,
215 280, 264, 312, 296, 472, 456, 504,
216 488, 408, 392, 440, 424, 88, 72,
217 120, 104, 24, 8, 56, 40, 216,
218 200, 248, 232, 152, 136, 184, 168,
219 1376, 1312, 1504, 1440, 1120, 1056, 1248,
220 1184, 1888, 1824, 2016, 1952, 1632, 1568,
221 1760, 1696, 688, 656, 752, 720, 560,
222 528, 624, 592, 944, 912, 1008, 976,
223 816, 784, 880, 848
224 };
225
226 /*
227 * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
228 * stored in an unsigned char. This function should only be called with
229 * the data shifted such that it only contains information in the lower
230 * 13-bits.
231 *
232 * Linear Input Code Compressed Code
233 * ------------------------ ---------------
234 * 0000000wxyza 000wxyz
235 * 0000001wxyza 001wxyz
236 * 000001wxyzab 010wxyz
237 * 00001wxyzabc 011wxyz
238 * 0001wxyzabcd 100wxyz
239 * 001wxyzabcde 101wxyz
240 * 01wxyzabcdef 110wxyz
241 * 1wxyzabcdefg 111wxyz
242 *
243 * For further information see John C. Bellamy's Digital Telephony, 1982,
244 * John Wiley & Sons, pps 98-111 and 472-476.
245 */
246 static unsigned char
st_linear2alaw(int16_t pcm_val)247 st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
248 {
249 int16_t mask;
250 int16_t seg;
251 unsigned char aval;
252
253 /* A-law using even bit inversion */
254 if (pcm_val >= 0) {
255 mask = 0xD5; /* sign (7th) bit = 1 */
256 } else {
257 mask = 0x55; /* sign bit = 0 */
258 pcm_val = -pcm_val - 1;
259 }
260
261 /* Convert the scaled magnitude to segment number. */
262 seg = search(pcm_val, seg_aend, 8);
263
264 /* Combine the sign, segment, and quantization bits. */
265
266 if (seg >= 8) /* out of range, return maximum value. */
267 return (unsigned char) (0x7F ^ mask);
268 else {
269 aval = (unsigned char) seg << SEG_SHIFT;
270 if (seg < 2)
271 aval |= (pcm_val >> 1) & QUANT_MASK;
272 else
273 aval |= (pcm_val >> seg) & QUANT_MASK;
274 return (aval ^ mask);
275 }
276 }
277 /* End of code taken from sox */
278
279 /* Intel ADPCM step variation table */
280 static const int indexTable[16] = {
281 -1, -1, -1, -1, 2, 4, 6, 8,
282 -1, -1, -1, -1, 2, 4, 6, 8,
283 };
284
285 static const int stepsizeTable[89] = {
286 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
287 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
288 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
289 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
290 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
291 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
292 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
293 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
294 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
295 };
296
297 #define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
298 #define SETINTX(T, cp, i, val) do { \
299 *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
300 } while (0)
301
302
303 #define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
304 #define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
305 #define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
306
307 #if WORDS_BIGENDIAN
308 #define GETINT24(cp, i) ( \
309 ((unsigned char *)(cp) + (i))[2] + \
310 (((unsigned char *)(cp) + (i))[1] << 8) + \
311 (((signed char *)(cp) + (i))[0] << 16) )
312 #else
313 #define GETINT24(cp, i) ( \
314 ((unsigned char *)(cp) + (i))[0] + \
315 (((unsigned char *)(cp) + (i))[1] << 8) + \
316 (((signed char *)(cp) + (i))[2] << 16) )
317 #endif
318
319
320 #define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
321 #define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
322 #define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
323
324 #if WORDS_BIGENDIAN
325 #define SETINT24(cp, i, val) do { \
326 ((unsigned char *)(cp) + (i))[2] = (int)(val); \
327 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
328 ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
329 } while (0)
330 #else
331 #define SETINT24(cp, i, val) do { \
332 ((unsigned char *)(cp) + (i))[0] = (int)(val); \
333 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
334 ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
335 } while (0)
336 #endif
337
338
339 #define GETRAWSAMPLE(size, cp, i) ( \
340 (size == 1) ? (int)GETINT8((cp), (i)) : \
341 (size == 2) ? (int)GETINT16((cp), (i)) : \
342 (size == 3) ? (int)GETINT24((cp), (i)) : \
343 (int)GETINT32((cp), (i)))
344
345 #define SETRAWSAMPLE(size, cp, i, val) do { \
346 if (size == 1) \
347 SETINT8((cp), (i), (val)); \
348 else if (size == 2) \
349 SETINT16((cp), (i), (val)); \
350 else if (size == 3) \
351 SETINT24((cp), (i), (val)); \
352 else \
353 SETINT32((cp), (i), (val)); \
354 } while(0)
355
356
357 #define GETSAMPLE32(size, cp, i) ( \
358 (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
359 (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
360 (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
361 (int)GETINT32((cp), (i)))
362
363 #define SETSAMPLE32(size, cp, i, val) do { \
364 if (size == 1) \
365 SETINT8((cp), (i), (val) >> 24); \
366 else if (size == 2) \
367 SETINT16((cp), (i), (val) >> 16); \
368 else if (size == 3) \
369 SETINT24((cp), (i), (val) >> 8); \
370 else \
371 SETINT32((cp), (i), (val)); \
372 } while(0)
373
374
375 static PyObject *AudioopError;
376
377 static int
audioop_check_size(int size)378 audioop_check_size(int size)
379 {
380 if (size < 1 || size > 4) {
381 PyErr_SetString(AudioopError, "Size should be 1, 2, 3 or 4");
382 return 0;
383 }
384 else
385 return 1;
386 }
387
388 static int
audioop_check_parameters(Py_ssize_t len,int size)389 audioop_check_parameters(Py_ssize_t len, int size)
390 {
391 if (!audioop_check_size(size))
392 return 0;
393 if (len % size != 0) {
394 PyErr_SetString(AudioopError, "not a whole number of frames");
395 return 0;
396 }
397 return 1;
398 }
399
400 /*[clinic input]
401 module audioop
402 [clinic start generated code]*/
403 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
404
405 /*[clinic input]
406 audioop.getsample
407
408 fragment: Py_buffer
409 width: int
410 index: Py_ssize_t
411 /
412
413 Return the value of sample index from the fragment.
414 [clinic start generated code]*/
415
416 static PyObject *
audioop_getsample_impl(PyObject * module,Py_buffer * fragment,int width,Py_ssize_t index)417 audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
418 Py_ssize_t index)
419 /*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
420 {
421 int val;
422
423 if (!audioop_check_parameters(fragment->len, width))
424 return NULL;
425 if (index < 0 || index >= fragment->len/width) {
426 PyErr_SetString(AudioopError, "Index out of range");
427 return NULL;
428 }
429 val = GETRAWSAMPLE(width, fragment->buf, index*width);
430 return PyLong_FromLong(val);
431 }
432
433 /*[clinic input]
434 audioop.max
435
436 fragment: Py_buffer
437 width: int
438 /
439
440 Return the maximum of the absolute value of all samples in a fragment.
441 [clinic start generated code]*/
442
443 static PyObject *
audioop_max_impl(PyObject * module,Py_buffer * fragment,int width)444 audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
445 /*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
446 {
447 Py_ssize_t i;
448 unsigned int absval, max = 0;
449
450 if (!audioop_check_parameters(fragment->len, width))
451 return NULL;
452 for (i = 0; i < fragment->len; i += width) {
453 int val = GETRAWSAMPLE(width, fragment->buf, i);
454 /* Cast to unsigned before negating. Unsigned overflow is well-
455 defined, but signed overflow is not. */
456 if (val < 0) absval = (unsigned int)-(int64_t)val;
457 else absval = val;
458 if (absval > max) max = absval;
459 }
460 return PyLong_FromUnsignedLong(max);
461 }
462
463 /*[clinic input]
464 audioop.minmax
465
466 fragment: Py_buffer
467 width: int
468 /
469
470 Return the minimum and maximum values of all samples in the sound fragment.
471 [clinic start generated code]*/
472
473 static PyObject *
audioop_minmax_impl(PyObject * module,Py_buffer * fragment,int width)474 audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
475 /*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
476 {
477 Py_ssize_t i;
478 /* -1 trick below is needed on Windows to support -0x80000000 without
479 a warning */
480 int min = 0x7fffffff, max = -0x7FFFFFFF-1;
481
482 if (!audioop_check_parameters(fragment->len, width))
483 return NULL;
484 for (i = 0; i < fragment->len; i += width) {
485 int val = GETRAWSAMPLE(width, fragment->buf, i);
486 if (val > max) max = val;
487 if (val < min) min = val;
488 }
489 return Py_BuildValue("(ii)", min, max);
490 }
491
492 /*[clinic input]
493 audioop.avg
494
495 fragment: Py_buffer
496 width: int
497 /
498
499 Return the average over all samples in the fragment.
500 [clinic start generated code]*/
501
502 static PyObject *
audioop_avg_impl(PyObject * module,Py_buffer * fragment,int width)503 audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
504 /*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
505 {
506 Py_ssize_t i;
507 int avg;
508 double sum = 0.0;
509
510 if (!audioop_check_parameters(fragment->len, width))
511 return NULL;
512 for (i = 0; i < fragment->len; i += width)
513 sum += GETRAWSAMPLE(width, fragment->buf, i);
514 if (fragment->len == 0)
515 avg = 0;
516 else
517 avg = (int)floor(sum / (double)(fragment->len/width));
518 return PyLong_FromLong(avg);
519 }
520
521 /*[clinic input]
522 audioop.rms
523
524 fragment: Py_buffer
525 width: int
526 /
527
528 Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
529 [clinic start generated code]*/
530
531 static PyObject *
audioop_rms_impl(PyObject * module,Py_buffer * fragment,int width)532 audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
533 /*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
534 {
535 Py_ssize_t i;
536 unsigned int res;
537 double sum_squares = 0.0;
538
539 if (!audioop_check_parameters(fragment->len, width))
540 return NULL;
541 for (i = 0; i < fragment->len; i += width) {
542 double val = GETRAWSAMPLE(width, fragment->buf, i);
543 sum_squares += val*val;
544 }
545 if (fragment->len == 0)
546 res = 0;
547 else
548 res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
549 return PyLong_FromUnsignedLong(res);
550 }
551
_sum2(const int16_t * a,const int16_t * b,Py_ssize_t len)552 static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
553 {
554 Py_ssize_t i;
555 double sum = 0.0;
556
557 for( i=0; i<len; i++) {
558 sum = sum + (double)a[i]*(double)b[i];
559 }
560 return sum;
561 }
562
563 /*
564 ** Findfit tries to locate a sample within another sample. Its main use
565 ** is in echo-cancellation (to find the feedback of the output signal in
566 ** the input signal).
567 ** The method used is as follows:
568 **
569 ** let R be the reference signal (length n) and A the input signal (length N)
570 ** with N > n, and let all sums be over i from 0 to n-1.
571 **
572 ** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
573 ** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
574 ** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
575 **
576 ** Next, we compute the relative distance between the original signal and
577 ** the modified signal and minimize that over j:
578 ** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
579 ** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
580 **
581 ** In the code variables correspond as follows:
582 ** cp1 A
583 ** cp2 R
584 ** len1 N
585 ** len2 n
586 ** aj_m1 A[j-1]
587 ** aj_lm1 A[j+n-1]
588 ** sum_ri_2 sum(R[i]^2)
589 ** sum_aij_2 sum(A[i+j]^2)
590 ** sum_aij_ri sum(A[i+j]R[i])
591 **
592 ** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
593 ** is completely recalculated each step.
594 */
595 /*[clinic input]
596 audioop.findfit
597
598 fragment: Py_buffer
599 reference: Py_buffer
600 /
601
602 Try to match reference as well as possible to a portion of fragment.
603 [clinic start generated code]*/
604
605 static PyObject *
audioop_findfit_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)606 audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
607 Py_buffer *reference)
608 /*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
609 {
610 const int16_t *cp1, *cp2;
611 Py_ssize_t len1, len2;
612 Py_ssize_t j, best_j;
613 double aj_m1, aj_lm1;
614 double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
615
616 if (fragment->len & 1 || reference->len & 1) {
617 PyErr_SetString(AudioopError, "Strings should be even-sized");
618 return NULL;
619 }
620 cp1 = (const int16_t *)fragment->buf;
621 len1 = fragment->len >> 1;
622 cp2 = (const int16_t *)reference->buf;
623 len2 = reference->len >> 1;
624
625 if (len1 < len2) {
626 PyErr_SetString(AudioopError, "First sample should be longer");
627 return NULL;
628 }
629 sum_ri_2 = _sum2(cp2, cp2, len2);
630 sum_aij_2 = _sum2(cp1, cp1, len2);
631 sum_aij_ri = _sum2(cp1, cp2, len2);
632
633 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
634
635 best_result = result;
636 best_j = 0;
637
638 for ( j=1; j<=len1-len2; j++) {
639 aj_m1 = (double)cp1[j-1];
640 aj_lm1 = (double)cp1[j+len2-1];
641
642 sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
643 sum_aij_ri = _sum2(cp1+j, cp2, len2);
644
645 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
646 / sum_aij_2;
647
648 if ( result < best_result ) {
649 best_result = result;
650 best_j = j;
651 }
652
653 }
654
655 factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
656
657 return Py_BuildValue("(nf)", best_j, factor);
658 }
659
660 /*
661 ** findfactor finds a factor f so that the energy in A-fB is minimal.
662 ** See the comment for findfit for details.
663 */
664 /*[clinic input]
665 audioop.findfactor
666
667 fragment: Py_buffer
668 reference: Py_buffer
669 /
670
671 Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
672 [clinic start generated code]*/
673
674 static PyObject *
audioop_findfactor_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)675 audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
676 Py_buffer *reference)
677 /*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
678 {
679 const int16_t *cp1, *cp2;
680 Py_ssize_t len;
681 double sum_ri_2, sum_aij_ri, result;
682
683 if (fragment->len & 1 || reference->len & 1) {
684 PyErr_SetString(AudioopError, "Strings should be even-sized");
685 return NULL;
686 }
687 if (fragment->len != reference->len) {
688 PyErr_SetString(AudioopError, "Samples should be same size");
689 return NULL;
690 }
691 cp1 = (const int16_t *)fragment->buf;
692 cp2 = (const int16_t *)reference->buf;
693 len = fragment->len >> 1;
694 sum_ri_2 = _sum2(cp2, cp2, len);
695 sum_aij_ri = _sum2(cp1, cp2, len);
696
697 result = sum_aij_ri / sum_ri_2;
698
699 return PyFloat_FromDouble(result);
700 }
701
702 /*
703 ** findmax returns the index of the n-sized segment of the input sample
704 ** that contains the most energy.
705 */
706 /*[clinic input]
707 audioop.findmax
708
709 fragment: Py_buffer
710 length: Py_ssize_t
711 /
712
713 Search fragment for a slice of specified number of samples with maximum energy.
714 [clinic start generated code]*/
715
716 static PyObject *
audioop_findmax_impl(PyObject * module,Py_buffer * fragment,Py_ssize_t length)717 audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
718 Py_ssize_t length)
719 /*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
720 {
721 const int16_t *cp1;
722 Py_ssize_t len1;
723 Py_ssize_t j, best_j;
724 double aj_m1, aj_lm1;
725 double result, best_result;
726
727 if (fragment->len & 1) {
728 PyErr_SetString(AudioopError, "Strings should be even-sized");
729 return NULL;
730 }
731 cp1 = (const int16_t *)fragment->buf;
732 len1 = fragment->len >> 1;
733
734 if (length < 0 || len1 < length) {
735 PyErr_SetString(AudioopError, "Input sample should be longer");
736 return NULL;
737 }
738
739 result = _sum2(cp1, cp1, length);
740
741 best_result = result;
742 best_j = 0;
743
744 for ( j=1; j<=len1-length; j++) {
745 aj_m1 = (double)cp1[j-1];
746 aj_lm1 = (double)cp1[j+length-1];
747
748 result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
749
750 if ( result > best_result ) {
751 best_result = result;
752 best_j = j;
753 }
754
755 }
756
757 return PyLong_FromSsize_t(best_j);
758 }
759
760 /*[clinic input]
761 audioop.avgpp
762
763 fragment: Py_buffer
764 width: int
765 /
766
767 Return the average peak-peak value over all samples in the fragment.
768 [clinic start generated code]*/
769
770 static PyObject *
audioop_avgpp_impl(PyObject * module,Py_buffer * fragment,int width)771 audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
772 /*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
773 {
774 Py_ssize_t i;
775 int prevval, prevextremevalid = 0, prevextreme = 0;
776 double sum = 0.0;
777 unsigned int avg;
778 int diff, prevdiff, nextreme = 0;
779
780 if (!audioop_check_parameters(fragment->len, width))
781 return NULL;
782 if (fragment->len <= width)
783 return PyLong_FromLong(0);
784 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
785 prevdiff = 17; /* Anything != 0, 1 */
786 for (i = width; i < fragment->len; i += width) {
787 int val = GETRAWSAMPLE(width, fragment->buf, i);
788 if (val != prevval) {
789 diff = val < prevval;
790 if (prevdiff == !diff) {
791 /* Derivative changed sign. Compute difference to last
792 ** extreme value and remember.
793 */
794 if (prevextremevalid) {
795 if (prevval < prevextreme)
796 sum += (double)((unsigned int)prevextreme -
797 (unsigned int)prevval);
798 else
799 sum += (double)((unsigned int)prevval -
800 (unsigned int)prevextreme);
801 nextreme++;
802 }
803 prevextremevalid = 1;
804 prevextreme = prevval;
805 }
806 prevval = val;
807 prevdiff = diff;
808 }
809 }
810 if ( nextreme == 0 )
811 avg = 0;
812 else
813 avg = (unsigned int)(sum / (double)nextreme);
814 return PyLong_FromUnsignedLong(avg);
815 }
816
817 /*[clinic input]
818 audioop.maxpp
819
820 fragment: Py_buffer
821 width: int
822 /
823
824 Return the maximum peak-peak value in the sound fragment.
825 [clinic start generated code]*/
826
827 static PyObject *
audioop_maxpp_impl(PyObject * module,Py_buffer * fragment,int width)828 audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
829 /*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
830 {
831 Py_ssize_t i;
832 int prevval, prevextremevalid = 0, prevextreme = 0;
833 unsigned int max = 0, extremediff;
834 int diff, prevdiff;
835
836 if (!audioop_check_parameters(fragment->len, width))
837 return NULL;
838 if (fragment->len <= width)
839 return PyLong_FromLong(0);
840 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
841 prevdiff = 17; /* Anything != 0, 1 */
842 for (i = width; i < fragment->len; i += width) {
843 int val = GETRAWSAMPLE(width, fragment->buf, i);
844 if (val != prevval) {
845 diff = val < prevval;
846 if (prevdiff == !diff) {
847 /* Derivative changed sign. Compute difference to
848 ** last extreme value and remember.
849 */
850 if (prevextremevalid) {
851 if (prevval < prevextreme)
852 extremediff = (unsigned int)prevextreme -
853 (unsigned int)prevval;
854 else
855 extremediff = (unsigned int)prevval -
856 (unsigned int)prevextreme;
857 if ( extremediff > max )
858 max = extremediff;
859 }
860 prevextremevalid = 1;
861 prevextreme = prevval;
862 }
863 prevval = val;
864 prevdiff = diff;
865 }
866 }
867 return PyLong_FromUnsignedLong(max);
868 }
869
870 /*[clinic input]
871 audioop.cross
872
873 fragment: Py_buffer
874 width: int
875 /
876
877 Return the number of zero crossings in the fragment passed as an argument.
878 [clinic start generated code]*/
879
880 static PyObject *
audioop_cross_impl(PyObject * module,Py_buffer * fragment,int width)881 audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
882 /*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
883 {
884 Py_ssize_t i;
885 int prevval;
886 Py_ssize_t ncross;
887
888 if (!audioop_check_parameters(fragment->len, width))
889 return NULL;
890 ncross = -1;
891 prevval = 17; /* Anything <> 0,1 */
892 for (i = 0; i < fragment->len; i += width) {
893 int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
894 if (val != prevval) ncross++;
895 prevval = val;
896 }
897 return PyLong_FromSsize_t(ncross);
898 }
899
900 /*[clinic input]
901 audioop.mul
902
903 fragment: Py_buffer
904 width: int
905 factor: double
906 /
907
908 Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
909 [clinic start generated code]*/
910
911 static PyObject *
audioop_mul_impl(PyObject * module,Py_buffer * fragment,int width,double factor)912 audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
913 double factor)
914 /*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
915 {
916 signed char *ncp;
917 Py_ssize_t i;
918 double maxval, minval;
919 PyObject *rv;
920
921 if (!audioop_check_parameters(fragment->len, width))
922 return NULL;
923
924 maxval = (double) maxvals[width];
925 minval = (double) minvals[width];
926
927 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
928 if (rv == NULL)
929 return NULL;
930 ncp = (signed char *)PyBytes_AsString(rv);
931
932 for (i = 0; i < fragment->len; i += width) {
933 double val = GETRAWSAMPLE(width, fragment->buf, i);
934 int ival = fbound(val * factor, minval, maxval);
935 SETRAWSAMPLE(width, ncp, i, ival);
936 }
937 return rv;
938 }
939
940 /*[clinic input]
941 audioop.tomono
942
943 fragment: Py_buffer
944 width: int
945 lfactor: double
946 rfactor: double
947 /
948
949 Convert a stereo fragment to a mono fragment.
950 [clinic start generated code]*/
951
952 static PyObject *
audioop_tomono_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)953 audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
954 double lfactor, double rfactor)
955 /*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
956 {
957 signed char *cp, *ncp;
958 Py_ssize_t len, i;
959 double maxval, minval;
960 PyObject *rv;
961
962 cp = fragment->buf;
963 len = fragment->len;
964 if (!audioop_check_parameters(len, width))
965 return NULL;
966 if (((len / width) & 1) != 0) {
967 PyErr_SetString(AudioopError, "not a whole number of frames");
968 return NULL;
969 }
970
971 maxval = (double) maxvals[width];
972 minval = (double) minvals[width];
973
974 rv = PyBytes_FromStringAndSize(NULL, len/2);
975 if (rv == NULL)
976 return NULL;
977 ncp = (signed char *)PyBytes_AsString(rv);
978
979 for (i = 0; i < len; i += width*2) {
980 double val1 = GETRAWSAMPLE(width, cp, i);
981 double val2 = GETRAWSAMPLE(width, cp, i + width);
982 double val = val1 * lfactor + val2 * rfactor;
983 int ival = fbound(val, minval, maxval);
984 SETRAWSAMPLE(width, ncp, i/2, ival);
985 }
986 return rv;
987 }
988
989 /*[clinic input]
990 audioop.tostereo
991
992 fragment: Py_buffer
993 width: int
994 lfactor: double
995 rfactor: double
996 /
997
998 Generate a stereo fragment from a mono fragment.
999 [clinic start generated code]*/
1000
1001 static PyObject *
audioop_tostereo_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)1002 audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
1003 double lfactor, double rfactor)
1004 /*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
1005 {
1006 signed char *ncp;
1007 Py_ssize_t i;
1008 double maxval, minval;
1009 PyObject *rv;
1010
1011 if (!audioop_check_parameters(fragment->len, width))
1012 return NULL;
1013
1014 maxval = (double) maxvals[width];
1015 minval = (double) minvals[width];
1016
1017 if (fragment->len > PY_SSIZE_T_MAX/2) {
1018 PyErr_SetString(PyExc_MemoryError,
1019 "not enough memory for output buffer");
1020 return NULL;
1021 }
1022
1023 rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1024 if (rv == NULL)
1025 return NULL;
1026 ncp = (signed char *)PyBytes_AsString(rv);
1027
1028 for (i = 0; i < fragment->len; i += width) {
1029 double val = GETRAWSAMPLE(width, fragment->buf, i);
1030 int val1 = fbound(val * lfactor, minval, maxval);
1031 int val2 = fbound(val * rfactor, minval, maxval);
1032 SETRAWSAMPLE(width, ncp, i*2, val1);
1033 SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1034 }
1035 return rv;
1036 }
1037
1038 /*[clinic input]
1039 audioop.add
1040
1041 fragment1: Py_buffer
1042 fragment2: Py_buffer
1043 width: int
1044 /
1045
1046 Return a fragment which is the addition of the two samples passed as parameters.
1047 [clinic start generated code]*/
1048
1049 static PyObject *
audioop_add_impl(PyObject * module,Py_buffer * fragment1,Py_buffer * fragment2,int width)1050 audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1051 Py_buffer *fragment2, int width)
1052 /*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1053 {
1054 signed char *ncp;
1055 Py_ssize_t i;
1056 int minval, maxval, newval;
1057 PyObject *rv;
1058
1059 if (!audioop_check_parameters(fragment1->len, width))
1060 return NULL;
1061 if (fragment1->len != fragment2->len) {
1062 PyErr_SetString(AudioopError, "Lengths should be the same");
1063 return NULL;
1064 }
1065
1066 maxval = maxvals[width];
1067 minval = minvals[width];
1068
1069 rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1070 if (rv == NULL)
1071 return NULL;
1072 ncp = (signed char *)PyBytes_AsString(rv);
1073
1074 for (i = 0; i < fragment1->len; i += width) {
1075 int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1076 int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1077
1078 if (width < 4) {
1079 newval = val1 + val2;
1080 /* truncate in case of overflow */
1081 if (newval > maxval)
1082 newval = maxval;
1083 else if (newval < minval)
1084 newval = minval;
1085 }
1086 else {
1087 double fval = (double)val1 + (double)val2;
1088 /* truncate in case of overflow */
1089 newval = fbound(fval, minval, maxval);
1090 }
1091
1092 SETRAWSAMPLE(width, ncp, i, newval);
1093 }
1094 return rv;
1095 }
1096
1097 /*[clinic input]
1098 audioop.bias
1099
1100 fragment: Py_buffer
1101 width: int
1102 bias: int
1103 /
1104
1105 Return a fragment that is the original fragment with a bias added to each sample.
1106 [clinic start generated code]*/
1107
1108 static PyObject *
audioop_bias_impl(PyObject * module,Py_buffer * fragment,int width,int bias)1109 audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1110 /*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1111 {
1112 signed char *ncp;
1113 Py_ssize_t i;
1114 unsigned int val = 0, mask;
1115 PyObject *rv;
1116
1117 if (!audioop_check_parameters(fragment->len, width))
1118 return NULL;
1119
1120 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1121 if (rv == NULL)
1122 return NULL;
1123 ncp = (signed char *)PyBytes_AsString(rv);
1124
1125 mask = masks[width];
1126
1127 for (i = 0; i < fragment->len; i += width) {
1128 if (width == 1)
1129 val = GETINTX(unsigned char, fragment->buf, i);
1130 else if (width == 2)
1131 val = GETINTX(uint16_t, fragment->buf, i);
1132 else if (width == 3)
1133 val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1134 else {
1135 assert(width == 4);
1136 val = GETINTX(uint32_t, fragment->buf, i);
1137 }
1138
1139 val += (unsigned int)bias;
1140 /* wrap around in case of overflow */
1141 val &= mask;
1142
1143 if (width == 1)
1144 SETINTX(unsigned char, ncp, i, val);
1145 else if (width == 2)
1146 SETINTX(uint16_t, ncp, i, val);
1147 else if (width == 3)
1148 SETINT24(ncp, i, (int)val);
1149 else {
1150 assert(width == 4);
1151 SETINTX(uint32_t, ncp, i, val);
1152 }
1153 }
1154 return rv;
1155 }
1156
1157 /*[clinic input]
1158 audioop.reverse
1159
1160 fragment: Py_buffer
1161 width: int
1162 /
1163
1164 Reverse the samples in a fragment and returns the modified fragment.
1165 [clinic start generated code]*/
1166
1167 static PyObject *
audioop_reverse_impl(PyObject * module,Py_buffer * fragment,int width)1168 audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1169 /*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1170 {
1171 unsigned char *ncp;
1172 Py_ssize_t i;
1173 PyObject *rv;
1174
1175 if (!audioop_check_parameters(fragment->len, width))
1176 return NULL;
1177
1178 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1179 if (rv == NULL)
1180 return NULL;
1181 ncp = (unsigned char *)PyBytes_AsString(rv);
1182
1183 for (i = 0; i < fragment->len; i += width) {
1184 int val = GETRAWSAMPLE(width, fragment->buf, i);
1185 SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1186 }
1187 return rv;
1188 }
1189
1190 /*[clinic input]
1191 audioop.byteswap
1192
1193 fragment: Py_buffer
1194 width: int
1195 /
1196
1197 Convert big-endian samples to little-endian and vice versa.
1198 [clinic start generated code]*/
1199
1200 static PyObject *
audioop_byteswap_impl(PyObject * module,Py_buffer * fragment,int width)1201 audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1202 /*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1203 {
1204 unsigned char *ncp;
1205 Py_ssize_t i;
1206 PyObject *rv;
1207
1208 if (!audioop_check_parameters(fragment->len, width))
1209 return NULL;
1210
1211 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1212 if (rv == NULL)
1213 return NULL;
1214 ncp = (unsigned char *)PyBytes_AsString(rv);
1215
1216 for (i = 0; i < fragment->len; i += width) {
1217 int j;
1218 for (j = 0; j < width; j++)
1219 ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1220 }
1221 return rv;
1222 }
1223
1224 /*[clinic input]
1225 audioop.lin2lin
1226
1227 fragment: Py_buffer
1228 width: int
1229 newwidth: int
1230 /
1231
1232 Convert samples between 1-, 2-, 3- and 4-byte formats.
1233 [clinic start generated code]*/
1234
1235 static PyObject *
audioop_lin2lin_impl(PyObject * module,Py_buffer * fragment,int width,int newwidth)1236 audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1237 int newwidth)
1238 /*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1239 {
1240 unsigned char *ncp;
1241 Py_ssize_t i, j;
1242 PyObject *rv;
1243
1244 if (!audioop_check_parameters(fragment->len, width))
1245 return NULL;
1246 if (!audioop_check_size(newwidth))
1247 return NULL;
1248
1249 if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1250 PyErr_SetString(PyExc_MemoryError,
1251 "not enough memory for output buffer");
1252 return NULL;
1253 }
1254 rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1255 if (rv == NULL)
1256 return NULL;
1257 ncp = (unsigned char *)PyBytes_AsString(rv);
1258
1259 for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1260 int val = GETSAMPLE32(width, fragment->buf, i);
1261 SETSAMPLE32(newwidth, ncp, j, val);
1262 }
1263 return rv;
1264 }
1265
1266 static int
gcd(int a,int b)1267 gcd(int a, int b)
1268 {
1269 while (b > 0) {
1270 int tmp = a % b;
1271 a = b;
1272 b = tmp;
1273 }
1274 return a;
1275 }
1276
1277 /*[clinic input]
1278 audioop.ratecv
1279
1280 fragment: Py_buffer
1281 width: int
1282 nchannels: int
1283 inrate: int
1284 outrate: int
1285 state: object
1286 weightA: int = 1
1287 weightB: int = 0
1288 /
1289
1290 Convert the frame rate of the input fragment.
1291 [clinic start generated code]*/
1292
1293 static PyObject *
audioop_ratecv_impl(PyObject * module,Py_buffer * fragment,int width,int nchannels,int inrate,int outrate,PyObject * state,int weightA,int weightB)1294 audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1295 int nchannels, int inrate, int outrate, PyObject *state,
1296 int weightA, int weightB)
1297 /*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1298 {
1299 char *cp, *ncp;
1300 Py_ssize_t len;
1301 int chan, d, *prev_i, *cur_i, cur_o;
1302 PyObject *samps, *str, *rv = NULL, *channel;
1303 int bytes_per_frame;
1304
1305 if (!audioop_check_size(width))
1306 return NULL;
1307 if (nchannels < 1) {
1308 PyErr_SetString(AudioopError, "# of channels should be >= 1");
1309 return NULL;
1310 }
1311 if (width > INT_MAX / nchannels) {
1312 /* This overflow test is rigorously correct because
1313 both multiplicands are >= 1. Use the argument names
1314 from the docs for the error msg. */
1315 PyErr_SetString(PyExc_OverflowError,
1316 "width * nchannels too big for a C int");
1317 return NULL;
1318 }
1319 bytes_per_frame = width * nchannels;
1320 if (weightA < 1 || weightB < 0) {
1321 PyErr_SetString(AudioopError,
1322 "weightA should be >= 1, weightB should be >= 0");
1323 return NULL;
1324 }
1325 assert(fragment->len >= 0);
1326 if (fragment->len % bytes_per_frame != 0) {
1327 PyErr_SetString(AudioopError, "not a whole number of frames");
1328 return NULL;
1329 }
1330 if (inrate <= 0 || outrate <= 0) {
1331 PyErr_SetString(AudioopError, "sampling rate not > 0");
1332 return NULL;
1333 }
1334 /* divide inrate and outrate by their greatest common divisor */
1335 d = gcd(inrate, outrate);
1336 inrate /= d;
1337 outrate /= d;
1338 /* divide weightA and weightB by their greatest common divisor */
1339 d = gcd(weightA, weightB);
1340 weightA /= d;
1341 weightB /= d;
1342
1343 if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1344 PyErr_SetString(PyExc_MemoryError,
1345 "not enough memory for output buffer");
1346 return NULL;
1347 }
1348 prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1349 cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1350 if (prev_i == NULL || cur_i == NULL) {
1351 (void) PyErr_NoMemory();
1352 goto exit;
1353 }
1354
1355 len = fragment->len / bytes_per_frame; /* # of frames */
1356
1357 if (state == Py_None) {
1358 d = -outrate;
1359 for (chan = 0; chan < nchannels; chan++)
1360 prev_i[chan] = cur_i[chan] = 0;
1361 }
1362 else {
1363 if (!PyTuple_Check(state)) {
1364 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1365 goto exit;
1366 }
1367 if (!PyArg_ParseTuple(state,
1368 "iO!;ratecv(): illegal state argument",
1369 &d, &PyTuple_Type, &samps))
1370 goto exit;
1371 if (PyTuple_Size(samps) != nchannels) {
1372 PyErr_SetString(AudioopError,
1373 "illegal state argument");
1374 goto exit;
1375 }
1376 for (chan = 0; chan < nchannels; chan++) {
1377 channel = PyTuple_GetItem(samps, chan);
1378 if (!PyTuple_Check(channel)) {
1379 PyErr_SetString(PyExc_TypeError,
1380 "ratecv(): illegal state argument");
1381 goto exit;
1382 }
1383 if (!PyArg_ParseTuple(channel,
1384 "ii;ratecv(): illegal state argument",
1385 &prev_i[chan], &cur_i[chan]))
1386 {
1387 goto exit;
1388 }
1389 }
1390 }
1391
1392 /* str <- Space for the output buffer. */
1393 if (len == 0)
1394 str = PyBytes_FromStringAndSize(NULL, 0);
1395 else {
1396 /* There are len input frames, so we need (mathematically)
1397 ceiling(len*outrate/inrate) output frames, and each frame
1398 requires bytes_per_frame bytes. Computing this
1399 without spurious overflow is the challenge; we can
1400 settle for a reasonable upper bound, though, in this
1401 case ceiling(len/inrate) * outrate. */
1402
1403 /* compute ceiling(len/inrate) without overflow */
1404 Py_ssize_t q = 1 + (len - 1) / inrate;
1405 if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1406 str = NULL;
1407 else
1408 str = PyBytes_FromStringAndSize(NULL,
1409 q * outrate * bytes_per_frame);
1410 }
1411 if (str == NULL) {
1412 PyErr_SetString(PyExc_MemoryError,
1413 "not enough memory for output buffer");
1414 goto exit;
1415 }
1416 ncp = PyBytes_AsString(str);
1417 cp = fragment->buf;
1418
1419 for (;;) {
1420 while (d < 0) {
1421 if (len == 0) {
1422 samps = PyTuple_New(nchannels);
1423 if (samps == NULL)
1424 goto exit;
1425 for (chan = 0; chan < nchannels; chan++)
1426 PyTuple_SetItem(samps, chan,
1427 Py_BuildValue("(ii)",
1428 prev_i[chan],
1429 cur_i[chan]));
1430 if (PyErr_Occurred())
1431 goto exit;
1432 /* We have checked before that the length
1433 * of the string fits into int. */
1434 len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1435 rv = PyBytes_FromStringAndSize
1436 (PyBytes_AsString(str), len);
1437 Py_DECREF(str);
1438 str = rv;
1439 if (str == NULL)
1440 goto exit;
1441 rv = Py_BuildValue("(O(iO))", str, d, samps);
1442 Py_DECREF(samps);
1443 Py_DECREF(str);
1444 goto exit; /* return rv */
1445 }
1446 for (chan = 0; chan < nchannels; chan++) {
1447 prev_i[chan] = cur_i[chan];
1448 cur_i[chan] = GETSAMPLE32(width, cp, 0);
1449 cp += width;
1450 /* implements a simple digital filter */
1451 cur_i[chan] = (int)(
1452 ((double)weightA * (double)cur_i[chan] +
1453 (double)weightB * (double)prev_i[chan]) /
1454 ((double)weightA + (double)weightB));
1455 }
1456 len--;
1457 d += outrate;
1458 }
1459 while (d >= 0) {
1460 for (chan = 0; chan < nchannels; chan++) {
1461 cur_o = (int)(((double)prev_i[chan] * (double)d +
1462 (double)cur_i[chan] * (double)(outrate - d)) /
1463 (double)outrate);
1464 SETSAMPLE32(width, ncp, 0, cur_o);
1465 ncp += width;
1466 }
1467 d -= inrate;
1468 }
1469 }
1470 exit:
1471 PyMem_Free(prev_i);
1472 PyMem_Free(cur_i);
1473 return rv;
1474 }
1475
1476 /*[clinic input]
1477 audioop.lin2ulaw
1478
1479 fragment: Py_buffer
1480 width: int
1481 /
1482
1483 Convert samples in the audio fragment to u-LAW encoding.
1484 [clinic start generated code]*/
1485
1486 static PyObject *
audioop_lin2ulaw_impl(PyObject * module,Py_buffer * fragment,int width)1487 audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1488 /*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1489 {
1490 unsigned char *ncp;
1491 Py_ssize_t i;
1492 PyObject *rv;
1493
1494 if (!audioop_check_parameters(fragment->len, width))
1495 return NULL;
1496
1497 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1498 if (rv == NULL)
1499 return NULL;
1500 ncp = (unsigned char *)PyBytes_AsString(rv);
1501
1502 for (i = 0; i < fragment->len; i += width) {
1503 int val = GETSAMPLE32(width, fragment->buf, i);
1504 *ncp++ = st_14linear2ulaw(val >> 18);
1505 }
1506 return rv;
1507 }
1508
1509 /*[clinic input]
1510 audioop.ulaw2lin
1511
1512 fragment: Py_buffer
1513 width: int
1514 /
1515
1516 Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1517 [clinic start generated code]*/
1518
1519 static PyObject *
audioop_ulaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1520 audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1521 /*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1522 {
1523 unsigned char *cp;
1524 signed char *ncp;
1525 Py_ssize_t i;
1526 PyObject *rv;
1527
1528 if (!audioop_check_size(width))
1529 return NULL;
1530
1531 if (fragment->len > PY_SSIZE_T_MAX/width) {
1532 PyErr_SetString(PyExc_MemoryError,
1533 "not enough memory for output buffer");
1534 return NULL;
1535 }
1536 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1537 if (rv == NULL)
1538 return NULL;
1539 ncp = (signed char *)PyBytes_AsString(rv);
1540
1541 cp = fragment->buf;
1542 for (i = 0; i < fragment->len*width; i += width) {
1543 int val = st_ulaw2linear16(*cp++) << 16;
1544 SETSAMPLE32(width, ncp, i, val);
1545 }
1546 return rv;
1547 }
1548
1549 /*[clinic input]
1550 audioop.lin2alaw
1551
1552 fragment: Py_buffer
1553 width: int
1554 /
1555
1556 Convert samples in the audio fragment to a-LAW encoding.
1557 [clinic start generated code]*/
1558
1559 static PyObject *
audioop_lin2alaw_impl(PyObject * module,Py_buffer * fragment,int width)1560 audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1561 /*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1562 {
1563 unsigned char *ncp;
1564 Py_ssize_t i;
1565 PyObject *rv;
1566
1567 if (!audioop_check_parameters(fragment->len, width))
1568 return NULL;
1569
1570 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1571 if (rv == NULL)
1572 return NULL;
1573 ncp = (unsigned char *)PyBytes_AsString(rv);
1574
1575 for (i = 0; i < fragment->len; i += width) {
1576 int val = GETSAMPLE32(width, fragment->buf, i);
1577 *ncp++ = st_linear2alaw(val >> 19);
1578 }
1579 return rv;
1580 }
1581
1582 /*[clinic input]
1583 audioop.alaw2lin
1584
1585 fragment: Py_buffer
1586 width: int
1587 /
1588
1589 Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1590 [clinic start generated code]*/
1591
1592 static PyObject *
audioop_alaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1593 audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1594 /*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1595 {
1596 unsigned char *cp;
1597 signed char *ncp;
1598 Py_ssize_t i;
1599 int val;
1600 PyObject *rv;
1601
1602 if (!audioop_check_size(width))
1603 return NULL;
1604
1605 if (fragment->len > PY_SSIZE_T_MAX/width) {
1606 PyErr_SetString(PyExc_MemoryError,
1607 "not enough memory for output buffer");
1608 return NULL;
1609 }
1610 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1611 if (rv == NULL)
1612 return NULL;
1613 ncp = (signed char *)PyBytes_AsString(rv);
1614 cp = fragment->buf;
1615
1616 for (i = 0; i < fragment->len*width; i += width) {
1617 val = st_alaw2linear16(*cp++) << 16;
1618 SETSAMPLE32(width, ncp, i, val);
1619 }
1620 return rv;
1621 }
1622
1623 /*[clinic input]
1624 audioop.lin2adpcm
1625
1626 fragment: Py_buffer
1627 width: int
1628 state: object
1629 /
1630
1631 Convert samples to 4 bit Intel/DVI ADPCM encoding.
1632 [clinic start generated code]*/
1633
1634 static PyObject *
audioop_lin2adpcm_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1635 audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1636 PyObject *state)
1637 /*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1638 {
1639 signed char *ncp;
1640 Py_ssize_t i;
1641 int step, valpred, delta,
1642 index, sign, vpdiff, diff;
1643 PyObject *rv = NULL, *str;
1644 int outputbuffer = 0, bufferstep;
1645
1646 if (!audioop_check_parameters(fragment->len, width))
1647 return NULL;
1648
1649 /* Decode state, should have (value, step) */
1650 if ( state == Py_None ) {
1651 /* First time, it seems. Set defaults */
1652 valpred = 0;
1653 index = 0;
1654 }
1655 else if (!PyTuple_Check(state)) {
1656 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1657 return NULL;
1658 }
1659 else if (!PyArg_ParseTuple(state, "ii;lin2adpcm(): illegal state argument",
1660 &valpred, &index))
1661 {
1662 return NULL;
1663 }
1664 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1665 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1666 PyErr_SetString(PyExc_ValueError, "bad state");
1667 return NULL;
1668 }
1669
1670 str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1671 if (str == NULL)
1672 return NULL;
1673 ncp = (signed char *)PyBytes_AsString(str);
1674
1675 step = stepsizeTable[index];
1676 bufferstep = 1;
1677
1678 for (i = 0; i < fragment->len; i += width) {
1679 int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1680
1681 /* Step 1 - compute difference with previous value */
1682 if (val < valpred) {
1683 diff = valpred - val;
1684 sign = 8;
1685 }
1686 else {
1687 diff = val - valpred;
1688 sign = 0;
1689 }
1690
1691 /* Step 2 - Divide and clamp */
1692 /* Note:
1693 ** This code *approximately* computes:
1694 ** delta = diff*4/step;
1695 ** vpdiff = (delta+0.5)*step/4;
1696 ** but in shift step bits are dropped. The net result of this
1697 ** is that even if you have fast mul/div hardware you cannot
1698 ** put it to good use since the fixup would be too expensive.
1699 */
1700 delta = 0;
1701 vpdiff = (step >> 3);
1702
1703 if ( diff >= step ) {
1704 delta = 4;
1705 diff -= step;
1706 vpdiff += step;
1707 }
1708 step >>= 1;
1709 if ( diff >= step ) {
1710 delta |= 2;
1711 diff -= step;
1712 vpdiff += step;
1713 }
1714 step >>= 1;
1715 if ( diff >= step ) {
1716 delta |= 1;
1717 vpdiff += step;
1718 }
1719
1720 /* Step 3 - Update previous value */
1721 if ( sign )
1722 valpred -= vpdiff;
1723 else
1724 valpred += vpdiff;
1725
1726 /* Step 4 - Clamp previous value to 16 bits */
1727 if ( valpred > 32767 )
1728 valpred = 32767;
1729 else if ( valpred < -32768 )
1730 valpred = -32768;
1731
1732 /* Step 5 - Assemble value, update index and step values */
1733 delta |= sign;
1734
1735 index += indexTable[delta];
1736 if ( index < 0 ) index = 0;
1737 if ( index > 88 ) index = 88;
1738 step = stepsizeTable[index];
1739
1740 /* Step 6 - Output value */
1741 if ( bufferstep ) {
1742 outputbuffer = (delta << 4) & 0xf0;
1743 } else {
1744 *ncp++ = (delta & 0x0f) | outputbuffer;
1745 }
1746 bufferstep = !bufferstep;
1747 }
1748 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1749 Py_DECREF(str);
1750 return rv;
1751 }
1752
1753 /*[clinic input]
1754 audioop.adpcm2lin
1755
1756 fragment: Py_buffer
1757 width: int
1758 state: object
1759 /
1760
1761 Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1762 [clinic start generated code]*/
1763
1764 static PyObject *
audioop_adpcm2lin_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1765 audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1766 PyObject *state)
1767 /*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1768 {
1769 signed char *cp;
1770 signed char *ncp;
1771 Py_ssize_t i, outlen;
1772 int valpred, step, delta, index, sign, vpdiff;
1773 PyObject *rv, *str;
1774 int inputbuffer = 0, bufferstep;
1775
1776 if (!audioop_check_size(width))
1777 return NULL;
1778
1779 /* Decode state, should have (value, step) */
1780 if ( state == Py_None ) {
1781 /* First time, it seems. Set defaults */
1782 valpred = 0;
1783 index = 0;
1784 }
1785 else if (!PyTuple_Check(state)) {
1786 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1787 return NULL;
1788 }
1789 else if (!PyArg_ParseTuple(state, "ii;adpcm2lin(): illegal state argument",
1790 &valpred, &index))
1791 {
1792 return NULL;
1793 }
1794 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1795 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1796 PyErr_SetString(PyExc_ValueError, "bad state");
1797 return NULL;
1798 }
1799
1800 if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1801 PyErr_SetString(PyExc_MemoryError,
1802 "not enough memory for output buffer");
1803 return NULL;
1804 }
1805 outlen = fragment->len*width*2;
1806 str = PyBytes_FromStringAndSize(NULL, outlen);
1807 if (str == NULL)
1808 return NULL;
1809 ncp = (signed char *)PyBytes_AsString(str);
1810 cp = fragment->buf;
1811
1812 step = stepsizeTable[index];
1813 bufferstep = 0;
1814
1815 for (i = 0; i < outlen; i += width) {
1816 /* Step 1 - get the delta value and compute next index */
1817 if ( bufferstep ) {
1818 delta = inputbuffer & 0xf;
1819 } else {
1820 inputbuffer = *cp++;
1821 delta = (inputbuffer >> 4) & 0xf;
1822 }
1823
1824 bufferstep = !bufferstep;
1825
1826 /* Step 2 - Find new index value (for later) */
1827 index += indexTable[delta];
1828 if ( index < 0 ) index = 0;
1829 if ( index > 88 ) index = 88;
1830
1831 /* Step 3 - Separate sign and magnitude */
1832 sign = delta & 8;
1833 delta = delta & 7;
1834
1835 /* Step 4 - Compute difference and new predicted value */
1836 /*
1837 ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1838 ** in adpcm_coder.
1839 */
1840 vpdiff = step >> 3;
1841 if ( delta & 4 ) vpdiff += step;
1842 if ( delta & 2 ) vpdiff += step>>1;
1843 if ( delta & 1 ) vpdiff += step>>2;
1844
1845 if ( sign )
1846 valpred -= vpdiff;
1847 else
1848 valpred += vpdiff;
1849
1850 /* Step 5 - clamp output value */
1851 if ( valpred > 32767 )
1852 valpred = 32767;
1853 else if ( valpred < -32768 )
1854 valpred = -32768;
1855
1856 /* Step 6 - Update step value */
1857 step = stepsizeTable[index];
1858
1859 /* Step 6 - Output value */
1860 SETSAMPLE32(width, ncp, i, valpred << 16);
1861 }
1862
1863 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1864 Py_DECREF(str);
1865 return rv;
1866 }
1867
1868 #include "clinic/audioop.c.h"
1869
1870 static PyMethodDef audioop_methods[] = {
1871 AUDIOOP_MAX_METHODDEF
1872 AUDIOOP_MINMAX_METHODDEF
1873 AUDIOOP_AVG_METHODDEF
1874 AUDIOOP_MAXPP_METHODDEF
1875 AUDIOOP_AVGPP_METHODDEF
1876 AUDIOOP_RMS_METHODDEF
1877 AUDIOOP_FINDFIT_METHODDEF
1878 AUDIOOP_FINDMAX_METHODDEF
1879 AUDIOOP_FINDFACTOR_METHODDEF
1880 AUDIOOP_CROSS_METHODDEF
1881 AUDIOOP_MUL_METHODDEF
1882 AUDIOOP_ADD_METHODDEF
1883 AUDIOOP_BIAS_METHODDEF
1884 AUDIOOP_ULAW2LIN_METHODDEF
1885 AUDIOOP_LIN2ULAW_METHODDEF
1886 AUDIOOP_ALAW2LIN_METHODDEF
1887 AUDIOOP_LIN2ALAW_METHODDEF
1888 AUDIOOP_LIN2LIN_METHODDEF
1889 AUDIOOP_ADPCM2LIN_METHODDEF
1890 AUDIOOP_LIN2ADPCM_METHODDEF
1891 AUDIOOP_TOMONO_METHODDEF
1892 AUDIOOP_TOSTEREO_METHODDEF
1893 AUDIOOP_GETSAMPLE_METHODDEF
1894 AUDIOOP_REVERSE_METHODDEF
1895 AUDIOOP_BYTESWAP_METHODDEF
1896 AUDIOOP_RATECV_METHODDEF
1897 { 0, 0 }
1898 };
1899
1900
1901 static struct PyModuleDef audioopmodule = {
1902 PyModuleDef_HEAD_INIT,
1903 "audioop",
1904 NULL,
1905 -1,
1906 audioop_methods,
1907 NULL,
1908 NULL,
1909 NULL,
1910 NULL
1911 };
1912
1913 PyMODINIT_FUNC
PyInit_audioop(void)1914 PyInit_audioop(void)
1915 {
1916 PyObject *m, *d;
1917 m = PyModule_Create(&audioopmodule);
1918 if (m == NULL)
1919 return NULL;
1920 d = PyModule_GetDict(m);
1921 if (d == NULL)
1922 return NULL;
1923 AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1924 if (AudioopError != NULL)
1925 PyDict_SetItemString(d,"error",AudioopError);
1926 return m;
1927 }
1928