1
2 /* audioopmodule - Module to detect peak values in arrays */
3
4 #define PY_SSIZE_T_CLEAN
5
6 #include "Python.h"
7
8 #if defined(__CHAR_UNSIGNED__)
9 #if defined(signed)
10 /* This module currently does not work on systems where only unsigned
11 characters are available. Take it out of Setup. Sorry. */
12 #endif
13 #endif
14
15 static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
16 /* -1 trick is needed on Windows to support -0x80000000 without a warning */
17 static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
18 static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
19
20 static int
fbound(double val,double minval,double maxval)21 fbound(double val, double minval, double maxval)
22 {
23 if (val > maxval) {
24 val = maxval;
25 }
26 else if (val < minval + 1.0) {
27 val = minval;
28 }
29
30 /* Round towards minus infinity (-inf) */
31 val = floor(val);
32
33 /* Cast double to integer: round towards zero */
34 return (int)val;
35 }
36
37
38 /* Code shamelessly stolen from sox, 12.17.7, g711.c
39 ** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
40
41 /* From g711.c:
42 *
43 * December 30, 1994:
44 * Functions linear2alaw, linear2ulaw have been updated to correctly
45 * convert unquantized 16 bit values.
46 * Tables for direct u- to A-law and A- to u-law conversions have been
47 * corrected.
48 * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
49 * bli@cpk.auc.dk
50 *
51 */
52 #define BIAS 0x84 /* define the add-in bias for 16 bit samples */
53 #define CLIP 32635
54 #define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
55 #define QUANT_MASK (0xf) /* Quantization field mask. */
56 #define SEG_SHIFT (4) /* Left shift for segment number. */
57 #define SEG_MASK (0x70) /* Segment field mask. */
58
59 static const int16_t seg_aend[8] = {
60 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
61 };
62 static const int16_t seg_uend[8] = {
63 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
64 };
65
66 static int16_t
search(int16_t val,const int16_t * table,int size)67 search(int16_t val, const int16_t *table, int size)
68 {
69 int i;
70
71 for (i = 0; i < size; i++) {
72 if (val <= *table++)
73 return (i);
74 }
75 return (size);
76 }
77 #define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
78 #define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
79
80 static const int16_t _st_ulaw2linear16[256] = {
81 -32124, -31100, -30076, -29052, -28028, -27004, -25980,
82 -24956, -23932, -22908, -21884, -20860, -19836, -18812,
83 -17788, -16764, -15996, -15484, -14972, -14460, -13948,
84 -13436, -12924, -12412, -11900, -11388, -10876, -10364,
85 -9852, -9340, -8828, -8316, -7932, -7676, -7420,
86 -7164, -6908, -6652, -6396, -6140, -5884, -5628,
87 -5372, -5116, -4860, -4604, -4348, -4092, -3900,
88 -3772, -3644, -3516, -3388, -3260, -3132, -3004,
89 -2876, -2748, -2620, -2492, -2364, -2236, -2108,
90 -1980, -1884, -1820, -1756, -1692, -1628, -1564,
91 -1500, -1436, -1372, -1308, -1244, -1180, -1116,
92 -1052, -988, -924, -876, -844, -812, -780,
93 -748, -716, -684, -652, -620, -588, -556,
94 -524, -492, -460, -428, -396, -372, -356,
95 -340, -324, -308, -292, -276, -260, -244,
96 -228, -212, -196, -180, -164, -148, -132,
97 -120, -112, -104, -96, -88, -80, -72,
98 -64, -56, -48, -40, -32, -24, -16,
99 -8, 0, 32124, 31100, 30076, 29052, 28028,
100 27004, 25980, 24956, 23932, 22908, 21884, 20860,
101 19836, 18812, 17788, 16764, 15996, 15484, 14972,
102 14460, 13948, 13436, 12924, 12412, 11900, 11388,
103 10876, 10364, 9852, 9340, 8828, 8316, 7932,
104 7676, 7420, 7164, 6908, 6652, 6396, 6140,
105 5884, 5628, 5372, 5116, 4860, 4604, 4348,
106 4092, 3900, 3772, 3644, 3516, 3388, 3260,
107 3132, 3004, 2876, 2748, 2620, 2492, 2364,
108 2236, 2108, 1980, 1884, 1820, 1756, 1692,
109 1628, 1564, 1500, 1436, 1372, 1308, 1244,
110 1180, 1116, 1052, 988, 924, 876, 844,
111 812, 780, 748, 716, 684, 652, 620,
112 588, 556, 524, 492, 460, 428, 396,
113 372, 356, 340, 324, 308, 292, 276,
114 260, 244, 228, 212, 196, 180, 164,
115 148, 132, 120, 112, 104, 96, 88,
116 80, 72, 64, 56, 48, 40, 32,
117 24, 16, 8, 0
118 };
119
120 /*
121 * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
122 * stored in an unsigned char. This function should only be called with
123 * the data shifted such that it only contains information in the lower
124 * 14-bits.
125 *
126 * In order to simplify the encoding process, the original linear magnitude
127 * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
128 * (33 - 8191). The result can be seen in the following encoding table:
129 *
130 * Biased Linear Input Code Compressed Code
131 * ------------------------ ---------------
132 * 00000001wxyza 000wxyz
133 * 0000001wxyzab 001wxyz
134 * 000001wxyzabc 010wxyz
135 * 00001wxyzabcd 011wxyz
136 * 0001wxyzabcde 100wxyz
137 * 001wxyzabcdef 101wxyz
138 * 01wxyzabcdefg 110wxyz
139 * 1wxyzabcdefgh 111wxyz
140 *
141 * Each biased linear code has a leading 1 which identifies the segment
142 * number. The value of the segment number is equal to 7 minus the number
143 * of leading 0's. The quantization interval is directly available as the
144 * four bits wxyz. * The trailing bits (a - h) are ignored.
145 *
146 * Ordinarily the complement of the resulting code word is used for
147 * transmission, and so the code word is complemented before it is returned.
148 *
149 * For further information see John C. Bellamy's Digital Telephony, 1982,
150 * John Wiley & Sons, pps 98-111 and 472-476.
151 */
152 static unsigned char
st_14linear2ulaw(int16_t pcm_val)153 st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
154 {
155 int16_t mask;
156 int16_t seg;
157 unsigned char uval;
158
159 /* u-law inverts all bits */
160 /* Get the sign and the magnitude of the value. */
161 if (pcm_val < 0) {
162 pcm_val = -pcm_val;
163 mask = 0x7F;
164 } else {
165 mask = 0xFF;
166 }
167 if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
168 pcm_val += (BIAS >> 2);
169
170 /* Convert the scaled magnitude to segment number. */
171 seg = search(pcm_val, seg_uend, 8);
172
173 /*
174 * Combine the sign, segment, quantization bits;
175 * and complement the code word.
176 */
177 if (seg >= 8) /* out of range, return maximum value. */
178 return (unsigned char) (0x7F ^ mask);
179 else {
180 uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
181 return (uval ^ mask);
182 }
183
184 }
185
186 static const int16_t _st_alaw2linear16[256] = {
187 -5504, -5248, -6016, -5760, -4480, -4224, -4992,
188 -4736, -7552, -7296, -8064, -7808, -6528, -6272,
189 -7040, -6784, -2752, -2624, -3008, -2880, -2240,
190 -2112, -2496, -2368, -3776, -3648, -4032, -3904,
191 -3264, -3136, -3520, -3392, -22016, -20992, -24064,
192 -23040, -17920, -16896, -19968, -18944, -30208, -29184,
193 -32256, -31232, -26112, -25088, -28160, -27136, -11008,
194 -10496, -12032, -11520, -8960, -8448, -9984, -9472,
195 -15104, -14592, -16128, -15616, -13056, -12544, -14080,
196 -13568, -344, -328, -376, -360, -280, -264,
197 -312, -296, -472, -456, -504, -488, -408,
198 -392, -440, -424, -88, -72, -120, -104,
199 -24, -8, -56, -40, -216, -200, -248,
200 -232, -152, -136, -184, -168, -1376, -1312,
201 -1504, -1440, -1120, -1056, -1248, -1184, -1888,
202 -1824, -2016, -1952, -1632, -1568, -1760, -1696,
203 -688, -656, -752, -720, -560, -528, -624,
204 -592, -944, -912, -1008, -976, -816, -784,
205 -880, -848, 5504, 5248, 6016, 5760, 4480,
206 4224, 4992, 4736, 7552, 7296, 8064, 7808,
207 6528, 6272, 7040, 6784, 2752, 2624, 3008,
208 2880, 2240, 2112, 2496, 2368, 3776, 3648,
209 4032, 3904, 3264, 3136, 3520, 3392, 22016,
210 20992, 24064, 23040, 17920, 16896, 19968, 18944,
211 30208, 29184, 32256, 31232, 26112, 25088, 28160,
212 27136, 11008, 10496, 12032, 11520, 8960, 8448,
213 9984, 9472, 15104, 14592, 16128, 15616, 13056,
214 12544, 14080, 13568, 344, 328, 376, 360,
215 280, 264, 312, 296, 472, 456, 504,
216 488, 408, 392, 440, 424, 88, 72,
217 120, 104, 24, 8, 56, 40, 216,
218 200, 248, 232, 152, 136, 184, 168,
219 1376, 1312, 1504, 1440, 1120, 1056, 1248,
220 1184, 1888, 1824, 2016, 1952, 1632, 1568,
221 1760, 1696, 688, 656, 752, 720, 560,
222 528, 624, 592, 944, 912, 1008, 976,
223 816, 784, 880, 848
224 };
225
226 /*
227 * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
228 * stored in an unsigned char. This function should only be called with
229 * the data shifted such that it only contains information in the lower
230 * 13-bits.
231 *
232 * Linear Input Code Compressed Code
233 * ------------------------ ---------------
234 * 0000000wxyza 000wxyz
235 * 0000001wxyza 001wxyz
236 * 000001wxyzab 010wxyz
237 * 00001wxyzabc 011wxyz
238 * 0001wxyzabcd 100wxyz
239 * 001wxyzabcde 101wxyz
240 * 01wxyzabcdef 110wxyz
241 * 1wxyzabcdefg 111wxyz
242 *
243 * For further information see John C. Bellamy's Digital Telephony, 1982,
244 * John Wiley & Sons, pps 98-111 and 472-476.
245 */
246 static unsigned char
st_linear2alaw(int16_t pcm_val)247 st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
248 {
249 int16_t mask;
250 int16_t seg;
251 unsigned char aval;
252
253 /* A-law using even bit inversion */
254 if (pcm_val >= 0) {
255 mask = 0xD5; /* sign (7th) bit = 1 */
256 } else {
257 mask = 0x55; /* sign bit = 0 */
258 pcm_val = -pcm_val - 1;
259 }
260
261 /* Convert the scaled magnitude to segment number. */
262 seg = search(pcm_val, seg_aend, 8);
263
264 /* Combine the sign, segment, and quantization bits. */
265
266 if (seg >= 8) /* out of range, return maximum value. */
267 return (unsigned char) (0x7F ^ mask);
268 else {
269 aval = (unsigned char) seg << SEG_SHIFT;
270 if (seg < 2)
271 aval |= (pcm_val >> 1) & QUANT_MASK;
272 else
273 aval |= (pcm_val >> seg) & QUANT_MASK;
274 return (aval ^ mask);
275 }
276 }
277 /* End of code taken from sox */
278
279 /* Intel ADPCM step variation table */
280 static const int indexTable[16] = {
281 -1, -1, -1, -1, 2, 4, 6, 8,
282 -1, -1, -1, -1, 2, 4, 6, 8,
283 };
284
285 static const int stepsizeTable[89] = {
286 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
287 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
288 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
289 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
290 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
291 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
292 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
293 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
294 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
295 };
296
297 #define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
298 #define SETINTX(T, cp, i, val) do { \
299 *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
300 } while (0)
301
302
303 #define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
304 #define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
305 #define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
306
307 #if WORDS_BIGENDIAN
308 #define GETINT24(cp, i) ( \
309 ((unsigned char *)(cp) + (i))[2] + \
310 (((unsigned char *)(cp) + (i))[1] << 8) + \
311 (((signed char *)(cp) + (i))[0] << 16) )
312 #else
313 #define GETINT24(cp, i) ( \
314 ((unsigned char *)(cp) + (i))[0] + \
315 (((unsigned char *)(cp) + (i))[1] << 8) + \
316 (((signed char *)(cp) + (i))[2] << 16) )
317 #endif
318
319
320 #define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
321 #define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
322 #define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
323
324 #if WORDS_BIGENDIAN
325 #define SETINT24(cp, i, val) do { \
326 ((unsigned char *)(cp) + (i))[2] = (int)(val); \
327 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
328 ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
329 } while (0)
330 #else
331 #define SETINT24(cp, i, val) do { \
332 ((unsigned char *)(cp) + (i))[0] = (int)(val); \
333 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
334 ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
335 } while (0)
336 #endif
337
338
339 #define GETRAWSAMPLE(size, cp, i) ( \
340 (size == 1) ? (int)GETINT8((cp), (i)) : \
341 (size == 2) ? (int)GETINT16((cp), (i)) : \
342 (size == 3) ? (int)GETINT24((cp), (i)) : \
343 (int)GETINT32((cp), (i)))
344
345 #define SETRAWSAMPLE(size, cp, i, val) do { \
346 if (size == 1) \
347 SETINT8((cp), (i), (val)); \
348 else if (size == 2) \
349 SETINT16((cp), (i), (val)); \
350 else if (size == 3) \
351 SETINT24((cp), (i), (val)); \
352 else \
353 SETINT32((cp), (i), (val)); \
354 } while(0)
355
356
357 #define GETSAMPLE32(size, cp, i) ( \
358 (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
359 (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
360 (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
361 (int)GETINT32((cp), (i)))
362
363 #define SETSAMPLE32(size, cp, i, val) do { \
364 if (size == 1) \
365 SETINT8((cp), (i), (val) >> 24); \
366 else if (size == 2) \
367 SETINT16((cp), (i), (val) >> 16); \
368 else if (size == 3) \
369 SETINT24((cp), (i), (val) >> 8); \
370 else \
371 SETINT32((cp), (i), (val)); \
372 } while(0)
373
374 static PyModuleDef audioopmodule;
375
376 typedef struct {
377 PyObject *AudioopError;
378 } audioop_state;
379
380 static inline audioop_state *
get_audioop_state(PyObject * module)381 get_audioop_state(PyObject *module)
382 {
383 void *state = PyModule_GetState(module);
384 assert(state != NULL);
385 return (audioop_state *)state;
386 }
387
388 static int
audioop_check_size(PyObject * module,int size)389 audioop_check_size(PyObject *module, int size)
390 {
391 if (size < 1 || size > 4) {
392 PyErr_SetString(get_audioop_state(module)->AudioopError,
393 "Size should be 1, 2, 3 or 4");
394 return 0;
395 }
396 else
397 return 1;
398 }
399
400 static int
audioop_check_parameters(PyObject * module,Py_ssize_t len,int size)401 audioop_check_parameters(PyObject *module, Py_ssize_t len, int size)
402 {
403 if (!audioop_check_size(module, size))
404 return 0;
405 if (len % size != 0) {
406 PyErr_SetString(get_audioop_state(module)->AudioopError,
407 "not a whole number of frames");
408 return 0;
409 }
410 return 1;
411 }
412
413 /*[clinic input]
414 module audioop
415 [clinic start generated code]*/
416 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
417
418 /*[clinic input]
419 audioop.getsample
420
421 fragment: Py_buffer
422 width: int
423 index: Py_ssize_t
424 /
425
426 Return the value of sample index from the fragment.
427 [clinic start generated code]*/
428
429 static PyObject *
audioop_getsample_impl(PyObject * module,Py_buffer * fragment,int width,Py_ssize_t index)430 audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
431 Py_ssize_t index)
432 /*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
433 {
434 int val;
435
436 if (!audioop_check_parameters(module, fragment->len, width))
437 return NULL;
438 if (index < 0 || index >= fragment->len/width) {
439 PyErr_SetString(get_audioop_state(module)->AudioopError,
440 "Index out of range");
441 return NULL;
442 }
443 val = GETRAWSAMPLE(width, fragment->buf, index*width);
444 return PyLong_FromLong(val);
445 }
446
447 /*[clinic input]
448 audioop.max
449
450 fragment: Py_buffer
451 width: int
452 /
453
454 Return the maximum of the absolute value of all samples in a fragment.
455 [clinic start generated code]*/
456
457 static PyObject *
audioop_max_impl(PyObject * module,Py_buffer * fragment,int width)458 audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
459 /*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
460 {
461 Py_ssize_t i;
462 unsigned int absval, max = 0;
463
464 if (!audioop_check_parameters(module, fragment->len, width))
465 return NULL;
466 for (i = 0; i < fragment->len; i += width) {
467 int val = GETRAWSAMPLE(width, fragment->buf, i);
468 /* Cast to unsigned before negating. Unsigned overflow is well-
469 defined, but signed overflow is not. */
470 if (val < 0) absval = (unsigned int)-(int64_t)val;
471 else absval = val;
472 if (absval > max) max = absval;
473 }
474 return PyLong_FromUnsignedLong(max);
475 }
476
477 /*[clinic input]
478 audioop.minmax
479
480 fragment: Py_buffer
481 width: int
482 /
483
484 Return the minimum and maximum values of all samples in the sound fragment.
485 [clinic start generated code]*/
486
487 static PyObject *
audioop_minmax_impl(PyObject * module,Py_buffer * fragment,int width)488 audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
489 /*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
490 {
491 Py_ssize_t i;
492 /* -1 trick below is needed on Windows to support -0x80000000 without
493 a warning */
494 int min = 0x7fffffff, max = -0x7FFFFFFF-1;
495
496 if (!audioop_check_parameters(module, fragment->len, width))
497 return NULL;
498 for (i = 0; i < fragment->len; i += width) {
499 int val = GETRAWSAMPLE(width, fragment->buf, i);
500 if (val > max) max = val;
501 if (val < min) min = val;
502 }
503 return Py_BuildValue("(ii)", min, max);
504 }
505
506 /*[clinic input]
507 audioop.avg
508
509 fragment: Py_buffer
510 width: int
511 /
512
513 Return the average over all samples in the fragment.
514 [clinic start generated code]*/
515
516 static PyObject *
audioop_avg_impl(PyObject * module,Py_buffer * fragment,int width)517 audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
518 /*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
519 {
520 Py_ssize_t i;
521 int avg;
522 double sum = 0.0;
523
524 if (!audioop_check_parameters(module, fragment->len, width))
525 return NULL;
526 for (i = 0; i < fragment->len; i += width)
527 sum += GETRAWSAMPLE(width, fragment->buf, i);
528 if (fragment->len == 0)
529 avg = 0;
530 else
531 avg = (int)floor(sum / (double)(fragment->len/width));
532 return PyLong_FromLong(avg);
533 }
534
535 /*[clinic input]
536 audioop.rms
537
538 fragment: Py_buffer
539 width: int
540 /
541
542 Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
543 [clinic start generated code]*/
544
545 static PyObject *
audioop_rms_impl(PyObject * module,Py_buffer * fragment,int width)546 audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
547 /*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
548 {
549 Py_ssize_t i;
550 unsigned int res;
551 double sum_squares = 0.0;
552
553 if (!audioop_check_parameters(module, fragment->len, width))
554 return NULL;
555 for (i = 0; i < fragment->len; i += width) {
556 double val = GETRAWSAMPLE(width, fragment->buf, i);
557 sum_squares += val*val;
558 }
559 if (fragment->len == 0)
560 res = 0;
561 else
562 res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
563 return PyLong_FromUnsignedLong(res);
564 }
565
_sum2(const int16_t * a,const int16_t * b,Py_ssize_t len)566 static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
567 {
568 Py_ssize_t i;
569 double sum = 0.0;
570
571 for( i=0; i<len; i++) {
572 sum = sum + (double)a[i]*(double)b[i];
573 }
574 return sum;
575 }
576
577 /*
578 ** Findfit tries to locate a sample within another sample. Its main use
579 ** is in echo-cancellation (to find the feedback of the output signal in
580 ** the input signal).
581 ** The method used is as follows:
582 **
583 ** let R be the reference signal (length n) and A the input signal (length N)
584 ** with N > n, and let all sums be over i from 0 to n-1.
585 **
586 ** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
587 ** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
588 ** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
589 **
590 ** Next, we compute the relative distance between the original signal and
591 ** the modified signal and minimize that over j:
592 ** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
593 ** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
594 **
595 ** In the code variables correspond as follows:
596 ** cp1 A
597 ** cp2 R
598 ** len1 N
599 ** len2 n
600 ** aj_m1 A[j-1]
601 ** aj_lm1 A[j+n-1]
602 ** sum_ri_2 sum(R[i]^2)
603 ** sum_aij_2 sum(A[i+j]^2)
604 ** sum_aij_ri sum(A[i+j]R[i])
605 **
606 ** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
607 ** is completely recalculated each step.
608 */
609 /*[clinic input]
610 audioop.findfit
611
612 fragment: Py_buffer
613 reference: Py_buffer
614 /
615
616 Try to match reference as well as possible to a portion of fragment.
617 [clinic start generated code]*/
618
619 static PyObject *
audioop_findfit_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)620 audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
621 Py_buffer *reference)
622 /*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
623 {
624 const int16_t *cp1, *cp2;
625 Py_ssize_t len1, len2;
626 Py_ssize_t j, best_j;
627 double aj_m1, aj_lm1;
628 double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
629
630 if (fragment->len & 1 || reference->len & 1) {
631 PyErr_SetString(get_audioop_state(module)->AudioopError,
632 "Strings should be even-sized");
633 return NULL;
634 }
635 cp1 = (const int16_t *)fragment->buf;
636 len1 = fragment->len >> 1;
637 cp2 = (const int16_t *)reference->buf;
638 len2 = reference->len >> 1;
639
640 if (len1 < len2) {
641 PyErr_SetString(get_audioop_state(module)->AudioopError,
642 "First sample should be longer");
643 return NULL;
644 }
645 sum_ri_2 = _sum2(cp2, cp2, len2);
646 sum_aij_2 = _sum2(cp1, cp1, len2);
647 sum_aij_ri = _sum2(cp1, cp2, len2);
648
649 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
650
651 best_result = result;
652 best_j = 0;
653
654 for ( j=1; j<=len1-len2; j++) {
655 aj_m1 = (double)cp1[j-1];
656 aj_lm1 = (double)cp1[j+len2-1];
657
658 sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
659 sum_aij_ri = _sum2(cp1+j, cp2, len2);
660
661 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
662 / sum_aij_2;
663
664 if ( result < best_result ) {
665 best_result = result;
666 best_j = j;
667 }
668
669 }
670
671 factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
672
673 return Py_BuildValue("(nf)", best_j, factor);
674 }
675
676 /*
677 ** findfactor finds a factor f so that the energy in A-fB is minimal.
678 ** See the comment for findfit for details.
679 */
680 /*[clinic input]
681 audioop.findfactor
682
683 fragment: Py_buffer
684 reference: Py_buffer
685 /
686
687 Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
688 [clinic start generated code]*/
689
690 static PyObject *
audioop_findfactor_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)691 audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
692 Py_buffer *reference)
693 /*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
694 {
695 const int16_t *cp1, *cp2;
696 Py_ssize_t len;
697 double sum_ri_2, sum_aij_ri, result;
698
699 if (fragment->len & 1 || reference->len & 1) {
700 PyErr_SetString(get_audioop_state(module)->AudioopError,
701 "Strings should be even-sized");
702 return NULL;
703 }
704 if (fragment->len != reference->len) {
705 PyErr_SetString(get_audioop_state(module)->AudioopError,
706 "Samples should be same size");
707 return NULL;
708 }
709 cp1 = (const int16_t *)fragment->buf;
710 cp2 = (const int16_t *)reference->buf;
711 len = fragment->len >> 1;
712 sum_ri_2 = _sum2(cp2, cp2, len);
713 sum_aij_ri = _sum2(cp1, cp2, len);
714
715 result = sum_aij_ri / sum_ri_2;
716
717 return PyFloat_FromDouble(result);
718 }
719
720 /*
721 ** findmax returns the index of the n-sized segment of the input sample
722 ** that contains the most energy.
723 */
724 /*[clinic input]
725 audioop.findmax
726
727 fragment: Py_buffer
728 length: Py_ssize_t
729 /
730
731 Search fragment for a slice of specified number of samples with maximum energy.
732 [clinic start generated code]*/
733
734 static PyObject *
audioop_findmax_impl(PyObject * module,Py_buffer * fragment,Py_ssize_t length)735 audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
736 Py_ssize_t length)
737 /*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
738 {
739 const int16_t *cp1;
740 Py_ssize_t len1;
741 Py_ssize_t j, best_j;
742 double aj_m1, aj_lm1;
743 double result, best_result;
744
745 if (fragment->len & 1) {
746 PyErr_SetString(get_audioop_state(module)->AudioopError,
747 "Strings should be even-sized");
748 return NULL;
749 }
750 cp1 = (const int16_t *)fragment->buf;
751 len1 = fragment->len >> 1;
752
753 if (length < 0 || len1 < length) {
754 PyErr_SetString(get_audioop_state(module)->AudioopError,
755 "Input sample should be longer");
756 return NULL;
757 }
758
759 result = _sum2(cp1, cp1, length);
760
761 best_result = result;
762 best_j = 0;
763
764 for ( j=1; j<=len1-length; j++) {
765 aj_m1 = (double)cp1[j-1];
766 aj_lm1 = (double)cp1[j+length-1];
767
768 result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
769
770 if ( result > best_result ) {
771 best_result = result;
772 best_j = j;
773 }
774
775 }
776
777 return PyLong_FromSsize_t(best_j);
778 }
779
780 /*[clinic input]
781 audioop.avgpp
782
783 fragment: Py_buffer
784 width: int
785 /
786
787 Return the average peak-peak value over all samples in the fragment.
788 [clinic start generated code]*/
789
790 static PyObject *
audioop_avgpp_impl(PyObject * module,Py_buffer * fragment,int width)791 audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
792 /*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
793 {
794 Py_ssize_t i;
795 int prevval, prevextremevalid = 0, prevextreme = 0;
796 double sum = 0.0;
797 unsigned int avg;
798 int diff, prevdiff, nextreme = 0;
799
800 if (!audioop_check_parameters(module, fragment->len, width))
801 return NULL;
802 if (fragment->len <= width)
803 return PyLong_FromLong(0);
804 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
805 prevdiff = 17; /* Anything != 0, 1 */
806 for (i = width; i < fragment->len; i += width) {
807 int val = GETRAWSAMPLE(width, fragment->buf, i);
808 if (val != prevval) {
809 diff = val < prevval;
810 if (prevdiff == !diff) {
811 /* Derivative changed sign. Compute difference to last
812 ** extreme value and remember.
813 */
814 if (prevextremevalid) {
815 if (prevval < prevextreme)
816 sum += (double)((unsigned int)prevextreme -
817 (unsigned int)prevval);
818 else
819 sum += (double)((unsigned int)prevval -
820 (unsigned int)prevextreme);
821 nextreme++;
822 }
823 prevextremevalid = 1;
824 prevextreme = prevval;
825 }
826 prevval = val;
827 prevdiff = diff;
828 }
829 }
830 if ( nextreme == 0 )
831 avg = 0;
832 else
833 avg = (unsigned int)(sum / (double)nextreme);
834 return PyLong_FromUnsignedLong(avg);
835 }
836
837 /*[clinic input]
838 audioop.maxpp
839
840 fragment: Py_buffer
841 width: int
842 /
843
844 Return the maximum peak-peak value in the sound fragment.
845 [clinic start generated code]*/
846
847 static PyObject *
audioop_maxpp_impl(PyObject * module,Py_buffer * fragment,int width)848 audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
849 /*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
850 {
851 Py_ssize_t i;
852 int prevval, prevextremevalid = 0, prevextreme = 0;
853 unsigned int max = 0, extremediff;
854 int diff, prevdiff;
855
856 if (!audioop_check_parameters(module, fragment->len, width))
857 return NULL;
858 if (fragment->len <= width)
859 return PyLong_FromLong(0);
860 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
861 prevdiff = 17; /* Anything != 0, 1 */
862 for (i = width; i < fragment->len; i += width) {
863 int val = GETRAWSAMPLE(width, fragment->buf, i);
864 if (val != prevval) {
865 diff = val < prevval;
866 if (prevdiff == !diff) {
867 /* Derivative changed sign. Compute difference to
868 ** last extreme value and remember.
869 */
870 if (prevextremevalid) {
871 if (prevval < prevextreme)
872 extremediff = (unsigned int)prevextreme -
873 (unsigned int)prevval;
874 else
875 extremediff = (unsigned int)prevval -
876 (unsigned int)prevextreme;
877 if ( extremediff > max )
878 max = extremediff;
879 }
880 prevextremevalid = 1;
881 prevextreme = prevval;
882 }
883 prevval = val;
884 prevdiff = diff;
885 }
886 }
887 return PyLong_FromUnsignedLong(max);
888 }
889
890 /*[clinic input]
891 audioop.cross
892
893 fragment: Py_buffer
894 width: int
895 /
896
897 Return the number of zero crossings in the fragment passed as an argument.
898 [clinic start generated code]*/
899
900 static PyObject *
audioop_cross_impl(PyObject * module,Py_buffer * fragment,int width)901 audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
902 /*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
903 {
904 Py_ssize_t i;
905 int prevval;
906 Py_ssize_t ncross;
907
908 if (!audioop_check_parameters(module, fragment->len, width))
909 return NULL;
910 ncross = -1;
911 prevval = 17; /* Anything <> 0,1 */
912 for (i = 0; i < fragment->len; i += width) {
913 int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
914 if (val != prevval) ncross++;
915 prevval = val;
916 }
917 return PyLong_FromSsize_t(ncross);
918 }
919
920 /*[clinic input]
921 audioop.mul
922
923 fragment: Py_buffer
924 width: int
925 factor: double
926 /
927
928 Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
929 [clinic start generated code]*/
930
931 static PyObject *
audioop_mul_impl(PyObject * module,Py_buffer * fragment,int width,double factor)932 audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
933 double factor)
934 /*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
935 {
936 signed char *ncp;
937 Py_ssize_t i;
938 double maxval, minval;
939 PyObject *rv;
940
941 if (!audioop_check_parameters(module, fragment->len, width))
942 return NULL;
943
944 maxval = (double) maxvals[width];
945 minval = (double) minvals[width];
946
947 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
948 if (rv == NULL)
949 return NULL;
950 ncp = (signed char *)PyBytes_AsString(rv);
951
952 for (i = 0; i < fragment->len; i += width) {
953 double val = GETRAWSAMPLE(width, fragment->buf, i);
954 int ival = fbound(val * factor, minval, maxval);
955 SETRAWSAMPLE(width, ncp, i, ival);
956 }
957 return rv;
958 }
959
960 /*[clinic input]
961 audioop.tomono
962
963 fragment: Py_buffer
964 width: int
965 lfactor: double
966 rfactor: double
967 /
968
969 Convert a stereo fragment to a mono fragment.
970 [clinic start generated code]*/
971
972 static PyObject *
audioop_tomono_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)973 audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
974 double lfactor, double rfactor)
975 /*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
976 {
977 signed char *cp, *ncp;
978 Py_ssize_t len, i;
979 double maxval, minval;
980 PyObject *rv;
981
982 cp = fragment->buf;
983 len = fragment->len;
984 if (!audioop_check_parameters(module, len, width))
985 return NULL;
986 if (((len / width) & 1) != 0) {
987 PyErr_SetString(get_audioop_state(module)->AudioopError,
988 "not a whole number of frames");
989 return NULL;
990 }
991
992 maxval = (double) maxvals[width];
993 minval = (double) minvals[width];
994
995 rv = PyBytes_FromStringAndSize(NULL, len/2);
996 if (rv == NULL)
997 return NULL;
998 ncp = (signed char *)PyBytes_AsString(rv);
999
1000 for (i = 0; i < len; i += width*2) {
1001 double val1 = GETRAWSAMPLE(width, cp, i);
1002 double val2 = GETRAWSAMPLE(width, cp, i + width);
1003 double val = val1 * lfactor + val2 * rfactor;
1004 int ival = fbound(val, minval, maxval);
1005 SETRAWSAMPLE(width, ncp, i/2, ival);
1006 }
1007 return rv;
1008 }
1009
1010 /*[clinic input]
1011 audioop.tostereo
1012
1013 fragment: Py_buffer
1014 width: int
1015 lfactor: double
1016 rfactor: double
1017 /
1018
1019 Generate a stereo fragment from a mono fragment.
1020 [clinic start generated code]*/
1021
1022 static PyObject *
audioop_tostereo_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)1023 audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
1024 double lfactor, double rfactor)
1025 /*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
1026 {
1027 signed char *ncp;
1028 Py_ssize_t i;
1029 double maxval, minval;
1030 PyObject *rv;
1031
1032 if (!audioop_check_parameters(module, fragment->len, width))
1033 return NULL;
1034
1035 maxval = (double) maxvals[width];
1036 minval = (double) minvals[width];
1037
1038 if (fragment->len > PY_SSIZE_T_MAX/2) {
1039 PyErr_SetString(PyExc_MemoryError,
1040 "not enough memory for output buffer");
1041 return NULL;
1042 }
1043
1044 rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1045 if (rv == NULL)
1046 return NULL;
1047 ncp = (signed char *)PyBytes_AsString(rv);
1048
1049 for (i = 0; i < fragment->len; i += width) {
1050 double val = GETRAWSAMPLE(width, fragment->buf, i);
1051 int val1 = fbound(val * lfactor, minval, maxval);
1052 int val2 = fbound(val * rfactor, minval, maxval);
1053 SETRAWSAMPLE(width, ncp, i*2, val1);
1054 SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1055 }
1056 return rv;
1057 }
1058
1059 /*[clinic input]
1060 audioop.add
1061
1062 fragment1: Py_buffer
1063 fragment2: Py_buffer
1064 width: int
1065 /
1066
1067 Return a fragment which is the addition of the two samples passed as parameters.
1068 [clinic start generated code]*/
1069
1070 static PyObject *
audioop_add_impl(PyObject * module,Py_buffer * fragment1,Py_buffer * fragment2,int width)1071 audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1072 Py_buffer *fragment2, int width)
1073 /*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1074 {
1075 signed char *ncp;
1076 Py_ssize_t i;
1077 int minval, maxval, newval;
1078 PyObject *rv;
1079
1080 if (!audioop_check_parameters(module, fragment1->len, width))
1081 return NULL;
1082 if (fragment1->len != fragment2->len) {
1083 PyErr_SetString(get_audioop_state(module)->AudioopError,
1084 "Lengths should be the same");
1085 return NULL;
1086 }
1087
1088 maxval = maxvals[width];
1089 minval = minvals[width];
1090
1091 rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1092 if (rv == NULL)
1093 return NULL;
1094 ncp = (signed char *)PyBytes_AsString(rv);
1095
1096 for (i = 0; i < fragment1->len; i += width) {
1097 int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1098 int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1099
1100 if (width < 4) {
1101 newval = val1 + val2;
1102 /* truncate in case of overflow */
1103 if (newval > maxval)
1104 newval = maxval;
1105 else if (newval < minval)
1106 newval = minval;
1107 }
1108 else {
1109 double fval = (double)val1 + (double)val2;
1110 /* truncate in case of overflow */
1111 newval = fbound(fval, minval, maxval);
1112 }
1113
1114 SETRAWSAMPLE(width, ncp, i, newval);
1115 }
1116 return rv;
1117 }
1118
1119 /*[clinic input]
1120 audioop.bias
1121
1122 fragment: Py_buffer
1123 width: int
1124 bias: int
1125 /
1126
1127 Return a fragment that is the original fragment with a bias added to each sample.
1128 [clinic start generated code]*/
1129
1130 static PyObject *
audioop_bias_impl(PyObject * module,Py_buffer * fragment,int width,int bias)1131 audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1132 /*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1133 {
1134 signed char *ncp;
1135 Py_ssize_t i;
1136 unsigned int val = 0, mask;
1137 PyObject *rv;
1138
1139 if (!audioop_check_parameters(module, fragment->len, width))
1140 return NULL;
1141
1142 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1143 if (rv == NULL)
1144 return NULL;
1145 ncp = (signed char *)PyBytes_AsString(rv);
1146
1147 mask = masks[width];
1148
1149 for (i = 0; i < fragment->len; i += width) {
1150 if (width == 1)
1151 val = GETINTX(unsigned char, fragment->buf, i);
1152 else if (width == 2)
1153 val = GETINTX(uint16_t, fragment->buf, i);
1154 else if (width == 3)
1155 val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1156 else {
1157 assert(width == 4);
1158 val = GETINTX(uint32_t, fragment->buf, i);
1159 }
1160
1161 val += (unsigned int)bias;
1162 /* wrap around in case of overflow */
1163 val &= mask;
1164
1165 if (width == 1)
1166 SETINTX(unsigned char, ncp, i, val);
1167 else if (width == 2)
1168 SETINTX(uint16_t, ncp, i, val);
1169 else if (width == 3)
1170 SETINT24(ncp, i, (int)val);
1171 else {
1172 assert(width == 4);
1173 SETINTX(uint32_t, ncp, i, val);
1174 }
1175 }
1176 return rv;
1177 }
1178
1179 /*[clinic input]
1180 audioop.reverse
1181
1182 fragment: Py_buffer
1183 width: int
1184 /
1185
1186 Reverse the samples in a fragment and returns the modified fragment.
1187 [clinic start generated code]*/
1188
1189 static PyObject *
audioop_reverse_impl(PyObject * module,Py_buffer * fragment,int width)1190 audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1191 /*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1192 {
1193 unsigned char *ncp;
1194 Py_ssize_t i;
1195 PyObject *rv;
1196
1197 if (!audioop_check_parameters(module, fragment->len, width))
1198 return NULL;
1199
1200 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1201 if (rv == NULL)
1202 return NULL;
1203 ncp = (unsigned char *)PyBytes_AsString(rv);
1204
1205 for (i = 0; i < fragment->len; i += width) {
1206 int val = GETRAWSAMPLE(width, fragment->buf, i);
1207 SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1208 }
1209 return rv;
1210 }
1211
1212 /*[clinic input]
1213 audioop.byteswap
1214
1215 fragment: Py_buffer
1216 width: int
1217 /
1218
1219 Convert big-endian samples to little-endian and vice versa.
1220 [clinic start generated code]*/
1221
1222 static PyObject *
audioop_byteswap_impl(PyObject * module,Py_buffer * fragment,int width)1223 audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1224 /*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1225 {
1226 unsigned char *ncp;
1227 Py_ssize_t i;
1228 PyObject *rv;
1229
1230 if (!audioop_check_parameters(module, fragment->len, width))
1231 return NULL;
1232
1233 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1234 if (rv == NULL)
1235 return NULL;
1236 ncp = (unsigned char *)PyBytes_AsString(rv);
1237
1238 for (i = 0; i < fragment->len; i += width) {
1239 int j;
1240 for (j = 0; j < width; j++)
1241 ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1242 }
1243 return rv;
1244 }
1245
1246 /*[clinic input]
1247 audioop.lin2lin
1248
1249 fragment: Py_buffer
1250 width: int
1251 newwidth: int
1252 /
1253
1254 Convert samples between 1-, 2-, 3- and 4-byte formats.
1255 [clinic start generated code]*/
1256
1257 static PyObject *
audioop_lin2lin_impl(PyObject * module,Py_buffer * fragment,int width,int newwidth)1258 audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1259 int newwidth)
1260 /*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1261 {
1262 unsigned char *ncp;
1263 Py_ssize_t i, j;
1264 PyObject *rv;
1265
1266 if (!audioop_check_parameters(module, fragment->len, width))
1267 return NULL;
1268 if (!audioop_check_size(module, newwidth))
1269 return NULL;
1270
1271 if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1272 PyErr_SetString(PyExc_MemoryError,
1273 "not enough memory for output buffer");
1274 return NULL;
1275 }
1276 rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1277 if (rv == NULL)
1278 return NULL;
1279 ncp = (unsigned char *)PyBytes_AsString(rv);
1280
1281 for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1282 int val = GETSAMPLE32(width, fragment->buf, i);
1283 SETSAMPLE32(newwidth, ncp, j, val);
1284 }
1285 return rv;
1286 }
1287
1288 static int
gcd(int a,int b)1289 gcd(int a, int b)
1290 {
1291 while (b > 0) {
1292 int tmp = a % b;
1293 a = b;
1294 b = tmp;
1295 }
1296 return a;
1297 }
1298
1299 /*[clinic input]
1300 audioop.ratecv
1301
1302 fragment: Py_buffer
1303 width: int
1304 nchannels: int
1305 inrate: int
1306 outrate: int
1307 state: object
1308 weightA: int = 1
1309 weightB: int = 0
1310 /
1311
1312 Convert the frame rate of the input fragment.
1313 [clinic start generated code]*/
1314
1315 static PyObject *
audioop_ratecv_impl(PyObject * module,Py_buffer * fragment,int width,int nchannels,int inrate,int outrate,PyObject * state,int weightA,int weightB)1316 audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1317 int nchannels, int inrate, int outrate, PyObject *state,
1318 int weightA, int weightB)
1319 /*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1320 {
1321 char *cp, *ncp;
1322 Py_ssize_t len;
1323 int chan, d, *prev_i, *cur_i, cur_o;
1324 PyObject *samps, *str, *rv = NULL, *channel;
1325 int bytes_per_frame;
1326
1327 if (!audioop_check_size(module, width))
1328 return NULL;
1329 if (nchannels < 1) {
1330 PyErr_SetString(get_audioop_state(module)->AudioopError,
1331 "# of channels should be >= 1");
1332 return NULL;
1333 }
1334 if (width > INT_MAX / nchannels) {
1335 /* This overflow test is rigorously correct because
1336 both multiplicands are >= 1. Use the argument names
1337 from the docs for the error msg. */
1338 PyErr_SetString(PyExc_OverflowError,
1339 "width * nchannels too big for a C int");
1340 return NULL;
1341 }
1342 bytes_per_frame = width * nchannels;
1343 if (weightA < 1 || weightB < 0) {
1344 PyErr_SetString(get_audioop_state(module)->AudioopError,
1345 "weightA should be >= 1, weightB should be >= 0");
1346 return NULL;
1347 }
1348 assert(fragment->len >= 0);
1349 if (fragment->len % bytes_per_frame != 0) {
1350 PyErr_SetString(get_audioop_state(module)->AudioopError,
1351 "not a whole number of frames");
1352 return NULL;
1353 }
1354 if (inrate <= 0 || outrate <= 0) {
1355 PyErr_SetString(get_audioop_state(module)->AudioopError,
1356 "sampling rate not > 0");
1357 return NULL;
1358 }
1359 /* divide inrate and outrate by their greatest common divisor */
1360 d = gcd(inrate, outrate);
1361 inrate /= d;
1362 outrate /= d;
1363 /* divide weightA and weightB by their greatest common divisor */
1364 d = gcd(weightA, weightB);
1365 weightA /= d;
1366 weightB /= d;
1367
1368 if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1369 PyErr_SetString(PyExc_MemoryError,
1370 "not enough memory for output buffer");
1371 return NULL;
1372 }
1373 prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1374 cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1375 if (prev_i == NULL || cur_i == NULL) {
1376 (void) PyErr_NoMemory();
1377 goto exit;
1378 }
1379
1380 len = fragment->len / bytes_per_frame; /* # of frames */
1381
1382 if (state == Py_None) {
1383 d = -outrate;
1384 for (chan = 0; chan < nchannels; chan++)
1385 prev_i[chan] = cur_i[chan] = 0;
1386 }
1387 else {
1388 if (!PyTuple_Check(state)) {
1389 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1390 goto exit;
1391 }
1392 if (!PyArg_ParseTuple(state,
1393 "iO!;ratecv(): illegal state argument",
1394 &d, &PyTuple_Type, &samps))
1395 goto exit;
1396 if (PyTuple_Size(samps) != nchannels) {
1397 PyErr_SetString(get_audioop_state(module)->AudioopError,
1398 "illegal state argument");
1399 goto exit;
1400 }
1401 for (chan = 0; chan < nchannels; chan++) {
1402 channel = PyTuple_GetItem(samps, chan);
1403 if (!PyTuple_Check(channel)) {
1404 PyErr_SetString(PyExc_TypeError,
1405 "ratecv(): illegal state argument");
1406 goto exit;
1407 }
1408 if (!PyArg_ParseTuple(channel,
1409 "ii;ratecv(): illegal state argument",
1410 &prev_i[chan], &cur_i[chan]))
1411 {
1412 goto exit;
1413 }
1414 }
1415 }
1416
1417 /* str <- Space for the output buffer. */
1418 if (len == 0)
1419 str = PyBytes_FromStringAndSize(NULL, 0);
1420 else {
1421 /* There are len input frames, so we need (mathematically)
1422 ceiling(len*outrate/inrate) output frames, and each frame
1423 requires bytes_per_frame bytes. Computing this
1424 without spurious overflow is the challenge; we can
1425 settle for a reasonable upper bound, though, in this
1426 case ceiling(len/inrate) * outrate. */
1427
1428 /* compute ceiling(len/inrate) without overflow */
1429 Py_ssize_t q = 1 + (len - 1) / inrate;
1430 if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1431 str = NULL;
1432 else
1433 str = PyBytes_FromStringAndSize(NULL,
1434 q * outrate * bytes_per_frame);
1435 }
1436 if (str == NULL) {
1437 PyErr_SetString(PyExc_MemoryError,
1438 "not enough memory for output buffer");
1439 goto exit;
1440 }
1441 ncp = PyBytes_AsString(str);
1442 cp = fragment->buf;
1443
1444 for (;;) {
1445 while (d < 0) {
1446 if (len == 0) {
1447 samps = PyTuple_New(nchannels);
1448 if (samps == NULL)
1449 goto exit;
1450 for (chan = 0; chan < nchannels; chan++)
1451 PyTuple_SetItem(samps, chan,
1452 Py_BuildValue("(ii)",
1453 prev_i[chan],
1454 cur_i[chan]));
1455 if (PyErr_Occurred())
1456 goto exit;
1457 /* We have checked before that the length
1458 * of the string fits into int. */
1459 len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1460 rv = PyBytes_FromStringAndSize
1461 (PyBytes_AsString(str), len);
1462 Py_DECREF(str);
1463 str = rv;
1464 if (str == NULL)
1465 goto exit;
1466 rv = Py_BuildValue("(O(iO))", str, d, samps);
1467 Py_DECREF(samps);
1468 Py_DECREF(str);
1469 goto exit; /* return rv */
1470 }
1471 for (chan = 0; chan < nchannels; chan++) {
1472 prev_i[chan] = cur_i[chan];
1473 cur_i[chan] = GETSAMPLE32(width, cp, 0);
1474 cp += width;
1475 /* implements a simple digital filter */
1476 cur_i[chan] = (int)(
1477 ((double)weightA * (double)cur_i[chan] +
1478 (double)weightB * (double)prev_i[chan]) /
1479 ((double)weightA + (double)weightB));
1480 }
1481 len--;
1482 d += outrate;
1483 }
1484 while (d >= 0) {
1485 for (chan = 0; chan < nchannels; chan++) {
1486 cur_o = (int)(((double)prev_i[chan] * (double)d +
1487 (double)cur_i[chan] * (double)(outrate - d)) /
1488 (double)outrate);
1489 SETSAMPLE32(width, ncp, 0, cur_o);
1490 ncp += width;
1491 }
1492 d -= inrate;
1493 }
1494 }
1495 exit:
1496 PyMem_Free(prev_i);
1497 PyMem_Free(cur_i);
1498 return rv;
1499 }
1500
1501 /*[clinic input]
1502 audioop.lin2ulaw
1503
1504 fragment: Py_buffer
1505 width: int
1506 /
1507
1508 Convert samples in the audio fragment to u-LAW encoding.
1509 [clinic start generated code]*/
1510
1511 static PyObject *
audioop_lin2ulaw_impl(PyObject * module,Py_buffer * fragment,int width)1512 audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1513 /*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1514 {
1515 unsigned char *ncp;
1516 Py_ssize_t i;
1517 PyObject *rv;
1518
1519 if (!audioop_check_parameters(module, fragment->len, width))
1520 return NULL;
1521
1522 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1523 if (rv == NULL)
1524 return NULL;
1525 ncp = (unsigned char *)PyBytes_AsString(rv);
1526
1527 for (i = 0; i < fragment->len; i += width) {
1528 int val = GETSAMPLE32(width, fragment->buf, i);
1529 *ncp++ = st_14linear2ulaw(val >> 18);
1530 }
1531 return rv;
1532 }
1533
1534 /*[clinic input]
1535 audioop.ulaw2lin
1536
1537 fragment: Py_buffer
1538 width: int
1539 /
1540
1541 Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1542 [clinic start generated code]*/
1543
1544 static PyObject *
audioop_ulaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1545 audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1546 /*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1547 {
1548 unsigned char *cp;
1549 signed char *ncp;
1550 Py_ssize_t i;
1551 PyObject *rv;
1552
1553 if (!audioop_check_size(module, width))
1554 return NULL;
1555
1556 if (fragment->len > PY_SSIZE_T_MAX/width) {
1557 PyErr_SetString(PyExc_MemoryError,
1558 "not enough memory for output buffer");
1559 return NULL;
1560 }
1561 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1562 if (rv == NULL)
1563 return NULL;
1564 ncp = (signed char *)PyBytes_AsString(rv);
1565
1566 cp = fragment->buf;
1567 for (i = 0; i < fragment->len*width; i += width) {
1568 int val = st_ulaw2linear16(*cp++) << 16;
1569 SETSAMPLE32(width, ncp, i, val);
1570 }
1571 return rv;
1572 }
1573
1574 /*[clinic input]
1575 audioop.lin2alaw
1576
1577 fragment: Py_buffer
1578 width: int
1579 /
1580
1581 Convert samples in the audio fragment to a-LAW encoding.
1582 [clinic start generated code]*/
1583
1584 static PyObject *
audioop_lin2alaw_impl(PyObject * module,Py_buffer * fragment,int width)1585 audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1586 /*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1587 {
1588 unsigned char *ncp;
1589 Py_ssize_t i;
1590 PyObject *rv;
1591
1592 if (!audioop_check_parameters(module, fragment->len, width))
1593 return NULL;
1594
1595 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1596 if (rv == NULL)
1597 return NULL;
1598 ncp = (unsigned char *)PyBytes_AsString(rv);
1599
1600 for (i = 0; i < fragment->len; i += width) {
1601 int val = GETSAMPLE32(width, fragment->buf, i);
1602 *ncp++ = st_linear2alaw(val >> 19);
1603 }
1604 return rv;
1605 }
1606
1607 /*[clinic input]
1608 audioop.alaw2lin
1609
1610 fragment: Py_buffer
1611 width: int
1612 /
1613
1614 Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1615 [clinic start generated code]*/
1616
1617 static PyObject *
audioop_alaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1618 audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1619 /*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1620 {
1621 unsigned char *cp;
1622 signed char *ncp;
1623 Py_ssize_t i;
1624 int val;
1625 PyObject *rv;
1626
1627 if (!audioop_check_size(module, width))
1628 return NULL;
1629
1630 if (fragment->len > PY_SSIZE_T_MAX/width) {
1631 PyErr_SetString(PyExc_MemoryError,
1632 "not enough memory for output buffer");
1633 return NULL;
1634 }
1635 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1636 if (rv == NULL)
1637 return NULL;
1638 ncp = (signed char *)PyBytes_AsString(rv);
1639 cp = fragment->buf;
1640
1641 for (i = 0; i < fragment->len*width; i += width) {
1642 val = st_alaw2linear16(*cp++) << 16;
1643 SETSAMPLE32(width, ncp, i, val);
1644 }
1645 return rv;
1646 }
1647
1648 /*[clinic input]
1649 audioop.lin2adpcm
1650
1651 fragment: Py_buffer
1652 width: int
1653 state: object
1654 /
1655
1656 Convert samples to 4 bit Intel/DVI ADPCM encoding.
1657 [clinic start generated code]*/
1658
1659 static PyObject *
audioop_lin2adpcm_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1660 audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1661 PyObject *state)
1662 /*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1663 {
1664 signed char *ncp;
1665 Py_ssize_t i;
1666 int step, valpred, delta,
1667 index, sign, vpdiff, diff;
1668 PyObject *rv = NULL, *str;
1669 int outputbuffer = 0, bufferstep;
1670
1671 if (!audioop_check_parameters(module, fragment->len, width))
1672 return NULL;
1673
1674 /* Decode state, should have (value, step) */
1675 if ( state == Py_None ) {
1676 /* First time, it seems. Set defaults */
1677 valpred = 0;
1678 index = 0;
1679 }
1680 else if (!PyTuple_Check(state)) {
1681 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1682 return NULL;
1683 }
1684 else if (!PyArg_ParseTuple(state, "ii;lin2adpcm(): illegal state argument",
1685 &valpred, &index))
1686 {
1687 return NULL;
1688 }
1689 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1690 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1691 PyErr_SetString(PyExc_ValueError, "bad state");
1692 return NULL;
1693 }
1694
1695 str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1696 if (str == NULL)
1697 return NULL;
1698 ncp = (signed char *)PyBytes_AsString(str);
1699
1700 step = stepsizeTable[index];
1701 bufferstep = 1;
1702
1703 for (i = 0; i < fragment->len; i += width) {
1704 int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1705
1706 /* Step 1 - compute difference with previous value */
1707 if (val < valpred) {
1708 diff = valpred - val;
1709 sign = 8;
1710 }
1711 else {
1712 diff = val - valpred;
1713 sign = 0;
1714 }
1715
1716 /* Step 2 - Divide and clamp */
1717 /* Note:
1718 ** This code *approximately* computes:
1719 ** delta = diff*4/step;
1720 ** vpdiff = (delta+0.5)*step/4;
1721 ** but in shift step bits are dropped. The net result of this
1722 ** is that even if you have fast mul/div hardware you cannot
1723 ** put it to good use since the fixup would be too expensive.
1724 */
1725 delta = 0;
1726 vpdiff = (step >> 3);
1727
1728 if ( diff >= step ) {
1729 delta = 4;
1730 diff -= step;
1731 vpdiff += step;
1732 }
1733 step >>= 1;
1734 if ( diff >= step ) {
1735 delta |= 2;
1736 diff -= step;
1737 vpdiff += step;
1738 }
1739 step >>= 1;
1740 if ( diff >= step ) {
1741 delta |= 1;
1742 vpdiff += step;
1743 }
1744
1745 /* Step 3 - Update previous value */
1746 if ( sign )
1747 valpred -= vpdiff;
1748 else
1749 valpred += vpdiff;
1750
1751 /* Step 4 - Clamp previous value to 16 bits */
1752 if ( valpred > 32767 )
1753 valpred = 32767;
1754 else if ( valpred < -32768 )
1755 valpred = -32768;
1756
1757 /* Step 5 - Assemble value, update index and step values */
1758 delta |= sign;
1759
1760 index += indexTable[delta];
1761 if ( index < 0 ) index = 0;
1762 if ( index > 88 ) index = 88;
1763 step = stepsizeTable[index];
1764
1765 /* Step 6 - Output value */
1766 if ( bufferstep ) {
1767 outputbuffer = (delta << 4) & 0xf0;
1768 } else {
1769 *ncp++ = (delta & 0x0f) | outputbuffer;
1770 }
1771 bufferstep = !bufferstep;
1772 }
1773 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1774 Py_DECREF(str);
1775 return rv;
1776 }
1777
1778 /*[clinic input]
1779 audioop.adpcm2lin
1780
1781 fragment: Py_buffer
1782 width: int
1783 state: object
1784 /
1785
1786 Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1787 [clinic start generated code]*/
1788
1789 static PyObject *
audioop_adpcm2lin_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1790 audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1791 PyObject *state)
1792 /*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1793 {
1794 signed char *cp;
1795 signed char *ncp;
1796 Py_ssize_t i, outlen;
1797 int valpred, step, delta, index, sign, vpdiff;
1798 PyObject *rv, *str;
1799 int inputbuffer = 0, bufferstep;
1800
1801 if (!audioop_check_size(module, width))
1802 return NULL;
1803
1804 /* Decode state, should have (value, step) */
1805 if ( state == Py_None ) {
1806 /* First time, it seems. Set defaults */
1807 valpred = 0;
1808 index = 0;
1809 }
1810 else if (!PyTuple_Check(state)) {
1811 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1812 return NULL;
1813 }
1814 else if (!PyArg_ParseTuple(state, "ii;adpcm2lin(): illegal state argument",
1815 &valpred, &index))
1816 {
1817 return NULL;
1818 }
1819 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1820 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1821 PyErr_SetString(PyExc_ValueError, "bad state");
1822 return NULL;
1823 }
1824
1825 if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1826 PyErr_SetString(PyExc_MemoryError,
1827 "not enough memory for output buffer");
1828 return NULL;
1829 }
1830 outlen = fragment->len*width*2;
1831 str = PyBytes_FromStringAndSize(NULL, outlen);
1832 if (str == NULL)
1833 return NULL;
1834 ncp = (signed char *)PyBytes_AsString(str);
1835 cp = fragment->buf;
1836
1837 step = stepsizeTable[index];
1838 bufferstep = 0;
1839
1840 for (i = 0; i < outlen; i += width) {
1841 /* Step 1 - get the delta value and compute next index */
1842 if ( bufferstep ) {
1843 delta = inputbuffer & 0xf;
1844 } else {
1845 inputbuffer = *cp++;
1846 delta = (inputbuffer >> 4) & 0xf;
1847 }
1848
1849 bufferstep = !bufferstep;
1850
1851 /* Step 2 - Find new index value (for later) */
1852 index += indexTable[delta];
1853 if ( index < 0 ) index = 0;
1854 if ( index > 88 ) index = 88;
1855
1856 /* Step 3 - Separate sign and magnitude */
1857 sign = delta & 8;
1858 delta = delta & 7;
1859
1860 /* Step 4 - Compute difference and new predicted value */
1861 /*
1862 ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1863 ** in adpcm_coder.
1864 */
1865 vpdiff = step >> 3;
1866 if ( delta & 4 ) vpdiff += step;
1867 if ( delta & 2 ) vpdiff += step>>1;
1868 if ( delta & 1 ) vpdiff += step>>2;
1869
1870 if ( sign )
1871 valpred -= vpdiff;
1872 else
1873 valpred += vpdiff;
1874
1875 /* Step 5 - clamp output value */
1876 if ( valpred > 32767 )
1877 valpred = 32767;
1878 else if ( valpred < -32768 )
1879 valpred = -32768;
1880
1881 /* Step 6 - Update step value */
1882 step = stepsizeTable[index];
1883
1884 /* Step 6 - Output value */
1885 SETSAMPLE32(width, ncp, i, valpred << 16);
1886 }
1887
1888 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1889 Py_DECREF(str);
1890 return rv;
1891 }
1892
1893 #include "clinic/audioop.c.h"
1894
1895 static PyMethodDef audioop_methods[] = {
1896 AUDIOOP_MAX_METHODDEF
1897 AUDIOOP_MINMAX_METHODDEF
1898 AUDIOOP_AVG_METHODDEF
1899 AUDIOOP_MAXPP_METHODDEF
1900 AUDIOOP_AVGPP_METHODDEF
1901 AUDIOOP_RMS_METHODDEF
1902 AUDIOOP_FINDFIT_METHODDEF
1903 AUDIOOP_FINDMAX_METHODDEF
1904 AUDIOOP_FINDFACTOR_METHODDEF
1905 AUDIOOP_CROSS_METHODDEF
1906 AUDIOOP_MUL_METHODDEF
1907 AUDIOOP_ADD_METHODDEF
1908 AUDIOOP_BIAS_METHODDEF
1909 AUDIOOP_ULAW2LIN_METHODDEF
1910 AUDIOOP_LIN2ULAW_METHODDEF
1911 AUDIOOP_ALAW2LIN_METHODDEF
1912 AUDIOOP_LIN2ALAW_METHODDEF
1913 AUDIOOP_LIN2LIN_METHODDEF
1914 AUDIOOP_ADPCM2LIN_METHODDEF
1915 AUDIOOP_LIN2ADPCM_METHODDEF
1916 AUDIOOP_TOMONO_METHODDEF
1917 AUDIOOP_TOSTEREO_METHODDEF
1918 AUDIOOP_GETSAMPLE_METHODDEF
1919 AUDIOOP_REVERSE_METHODDEF
1920 AUDIOOP_BYTESWAP_METHODDEF
1921 AUDIOOP_RATECV_METHODDEF
1922 { 0, 0 }
1923 };
1924
1925 static int
audioop_traverse(PyObject * module,visitproc visit,void * arg)1926 audioop_traverse(PyObject *module, visitproc visit, void *arg)
1927 {
1928 audioop_state *state = get_audioop_state(module);
1929 Py_VISIT(state->AudioopError);
1930 return 0;
1931 }
1932
1933 static int
audioop_clear(PyObject * module)1934 audioop_clear(PyObject *module)
1935 {
1936 audioop_state *state = get_audioop_state(module);
1937 Py_CLEAR(state->AudioopError);
1938 return 0;
1939 }
1940
1941 static void
audioop_free(void * module)1942 audioop_free(void *module) {
1943 audioop_clear((PyObject *)module);
1944 }
1945
1946 static int
audioop_exec(PyObject * module)1947 audioop_exec(PyObject* module)
1948 {
1949 audioop_state *state = get_audioop_state(module);
1950
1951 state->AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1952 if (state->AudioopError == NULL) {
1953 return -1;
1954 }
1955
1956 Py_INCREF(state->AudioopError);
1957 if (PyModule_AddObject(module, "error", state->AudioopError) < 0) {
1958 Py_DECREF(state->AudioopError);
1959 return -1;
1960 }
1961
1962 return 0;
1963 }
1964
1965 static PyModuleDef_Slot audioop_slots[] = {
1966 {Py_mod_exec, audioop_exec},
1967 {0, NULL}
1968 };
1969
1970 static struct PyModuleDef audioopmodule = {
1971 PyModuleDef_HEAD_INIT,
1972 "audioop",
1973 NULL,
1974 sizeof(audioop_state),
1975 audioop_methods,
1976 audioop_slots,
1977 audioop_traverse,
1978 audioop_clear,
1979 audioop_free
1980 };
1981
1982 PyMODINIT_FUNC
PyInit_audioop(void)1983 PyInit_audioop(void)
1984 {
1985 return PyModuleDef_Init(&audioopmodule);
1986 }
1987