• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AAudio"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include <cutils/properties.h>
22 #include <stdint.h>
23 #include <sys/types.h>
24 #include <utils/Errors.h>
25 
26 #include "aaudio/AAudio.h"
27 #include <aaudio/AAudioTesting.h>
28 #include <math.h>
29 #include <system/audio-base.h>
30 #include <assert.h>
31 
32 #include "utility/AAudioUtilities.h"
33 
34 using namespace android;
35 
36 // This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
37 // It is designed to allow occasional transient peaks.
38 #define MAX_HEADROOM (1.41253754f)
39 #define MIN_HEADROOM (0 - MAX_HEADROOM)
40 
AAudioConvert_formatToSizeInBytes(aaudio_format_t format)41 int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format) {
42     int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
43     switch (format) {
44         case AAUDIO_FORMAT_PCM_I16:
45             size = sizeof(int16_t);
46             break;
47         case AAUDIO_FORMAT_PCM_FLOAT:
48             size = sizeof(float);
49             break;
50         default:
51             break;
52     }
53     return size;
54 }
55 
56 // TODO expose and call clamp16_from_float function in primitives.h
clamp16_from_float(float f)57 static inline int16_t clamp16_from_float(float f) {
58     static const float scale = 1 << 15;
59     return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
60 }
61 
62 // Clip to valid range of a float sample to prevent excessive volume.
63 // By using fmin and fmax we also protect against NaN.
clipToMinMaxHeadroom(float input)64 static float clipToMinMaxHeadroom(float input) {
65     return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
66 }
67 
clipAndClampFloatToPcm16(float sample,float scaler)68 static float clipAndClampFloatToPcm16(float sample, float scaler) {
69     // Clip to valid range of a float sample to prevent excessive volume.
70     sample = clipToMinMaxHeadroom(sample);
71 
72     // Scale and convert to a short.
73     float fval = sample * scaler;
74     return clamp16_from_float(fval);
75 }
76 
AAudioConvert_floatToPcm16(const float * source,int16_t * destination,int32_t numSamples,float amplitude)77 void AAudioConvert_floatToPcm16(const float *source,
78                                 int16_t *destination,
79                                 int32_t numSamples,
80                                 float amplitude) {
81     const float scaler = amplitude;
82     for (int i = 0; i < numSamples; i++) {
83         float sample = *source++;
84         *destination++ = clipAndClampFloatToPcm16(sample, scaler);
85     }
86 }
87 
AAudioConvert_floatToPcm16(const float * source,int16_t * destination,int32_t numFrames,int32_t samplesPerFrame,float amplitude1,float amplitude2)88 void AAudioConvert_floatToPcm16(const float *source,
89                                 int16_t *destination,
90                                 int32_t numFrames,
91                                 int32_t samplesPerFrame,
92                                 float amplitude1,
93                                 float amplitude2) {
94     float scaler = amplitude1;
95     // divide by numFrames so that we almost reach amplitude2
96     float delta = (amplitude2 - amplitude1) / numFrames;
97     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
98         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
99             float sample = *source++;
100             *destination++ = clipAndClampFloatToPcm16(sample, scaler);
101         }
102         scaler += delta;
103     }
104 }
105 
106 #define SHORT_SCALE  32768
107 
AAudioConvert_pcm16ToFloat(const int16_t * source,float * destination,int32_t numSamples,float amplitude)108 void AAudioConvert_pcm16ToFloat(const int16_t *source,
109                                 float *destination,
110                                 int32_t numSamples,
111                                 float amplitude) {
112     const float scaler = amplitude / SHORT_SCALE;
113     for (int i = 0; i < numSamples; i++) {
114         destination[i] = source[i] * scaler;
115     }
116 }
117 
118 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudioConvert_pcm16ToFloat(const int16_t * source,float * destination,int32_t numFrames,int32_t samplesPerFrame,float amplitude1,float amplitude2)119 void AAudioConvert_pcm16ToFloat(const int16_t *source,
120                                 float *destination,
121                                 int32_t numFrames,
122                                 int32_t samplesPerFrame,
123                                 float amplitude1,
124                                 float amplitude2) {
125     float scaler = amplitude1 / SHORT_SCALE;
126     const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
127     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
128         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
129             *destination++ = *source++ * scaler;
130         }
131         scaler += delta;
132     }
133 }
134 
135 
136 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudio_linearRamp(const float * source,float * destination,int32_t numFrames,int32_t samplesPerFrame,float amplitude1,float amplitude2)137 void AAudio_linearRamp(const float *source,
138                        float *destination,
139                        int32_t numFrames,
140                        int32_t samplesPerFrame,
141                        float amplitude1,
142                        float amplitude2) {
143     float scaler = amplitude1;
144     const float delta = (amplitude2 - amplitude1) / numFrames;
145     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
146         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
147             float sample = *source++;
148             // Clip to valid range of a float sample to prevent excessive volume.
149             sample = clipToMinMaxHeadroom(sample);
150 
151             *destination++ = sample * scaler;
152         }
153         scaler += delta;
154     }
155 }
156 
157 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudio_linearRamp(const int16_t * source,int16_t * destination,int32_t numFrames,int32_t samplesPerFrame,float amplitude1,float amplitude2)158 void AAudio_linearRamp(const int16_t *source,
159                        int16_t *destination,
160                        int32_t numFrames,
161                        int32_t samplesPerFrame,
162                        float amplitude1,
163                        float amplitude2) {
164     // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
165     float scaler = amplitude1;
166     const float delta = (amplitude2 - amplitude1) / numFrames;
167     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
168         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
169             // No need to clip because int16_t range is inherently limited.
170             float sample =  *source++ * scaler;
171             *destination++ = (int16_t) roundf(sample);
172         }
173         scaler += delta;
174     }
175 }
176 
177 // *************************************************************************************
178 // Convert Mono To Stereo at the same time as converting format.
AAudioConvert_formatMonoToStereo(const float * source,int16_t * destination,int32_t numFrames,float amplitude)179 void AAudioConvert_formatMonoToStereo(const float *source,
180                                       int16_t *destination,
181                                       int32_t numFrames,
182                                       float amplitude) {
183     const float scaler = amplitude;
184     for (int i = 0; i < numFrames; i++) {
185         float sample = *source++;
186         int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
187         *destination++ = sample16;
188         *destination++ = sample16;
189     }
190 }
191 
AAudioConvert_formatMonoToStereo(const float * source,int16_t * destination,int32_t numFrames,float amplitude1,float amplitude2)192 void AAudioConvert_formatMonoToStereo(const float *source,
193                                       int16_t *destination,
194                                       int32_t numFrames,
195                                       float amplitude1,
196                                       float amplitude2) {
197     // divide by numFrames so that we almost reach amplitude2
198     const float delta = (amplitude2 - amplitude1) / numFrames;
199     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
200         const float scaler = amplitude1 + (frameIndex * delta);
201         const float sample = *source++;
202         int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
203         *destination++ = sample16;
204         *destination++ = sample16;
205     }
206 }
207 
AAudioConvert_formatMonoToStereo(const int16_t * source,float * destination,int32_t numFrames,float amplitude)208 void AAudioConvert_formatMonoToStereo(const int16_t *source,
209                                       float *destination,
210                                       int32_t numFrames,
211                                       float amplitude) {
212     const float scaler = amplitude / SHORT_SCALE;
213     for (int i = 0; i < numFrames; i++) {
214         float sample = source[i] * scaler;
215         *destination++ = sample;
216         *destination++ = sample;
217     }
218 }
219 
220 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudioConvert_formatMonoToStereo(const int16_t * source,float * destination,int32_t numFrames,float amplitude1,float amplitude2)221 void AAudioConvert_formatMonoToStereo(const int16_t *source,
222                                       float *destination,
223                                       int32_t numFrames,
224                                       float amplitude1,
225                                       float amplitude2) {
226     const float scaler1 = amplitude1 / SHORT_SCALE;
227     const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
228     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
229         float scaler = scaler1 + (frameIndex * delta);
230         float sample = source[frameIndex] * scaler;
231         *destination++ = sample;
232         *destination++ = sample;
233     }
234 }
235 
236 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudio_linearRampMonoToStereo(const float * source,float * destination,int32_t numFrames,float amplitude1,float amplitude2)237 void AAudio_linearRampMonoToStereo(const float *source,
238                                    float *destination,
239                                    int32_t numFrames,
240                                    float amplitude1,
241                                    float amplitude2) {
242     const float delta = (amplitude2 - amplitude1) / numFrames;
243     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
244         float sample = *source++;
245 
246         // Clip to valid range of a float sample to prevent excessive volume.
247         sample = clipToMinMaxHeadroom(sample);
248 
249         const float scaler = amplitude1 + (frameIndex * delta);
250         float sampleScaled = sample * scaler;
251         *destination++ = sampleScaled;
252         *destination++ = sampleScaled;
253     }
254 }
255 
256 // This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
AAudio_linearRampMonoToStereo(const int16_t * source,int16_t * destination,int32_t numFrames,float amplitude1,float amplitude2)257 void AAudio_linearRampMonoToStereo(const int16_t *source,
258                                    int16_t *destination,
259                                    int32_t numFrames,
260                                    float amplitude1,
261                                    float amplitude2) {
262     // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
263     const float delta = (amplitude2 - amplitude1) / numFrames;
264     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
265         const float scaler = amplitude1 + (frameIndex * delta);
266         // No need to clip because int16_t range is inherently limited.
267         const float sample =  *source++ * scaler;
268         int16_t sample16 = (int16_t) roundf(sample);
269         *destination++ = sample16;
270         *destination++ = sample16;
271     }
272 }
273 
274 // *************************************************************************************
convert(const FormattedData & source,const FormattedData & destination,int32_t numFrames,float levelFrom,float levelTo)275 void AAudioDataConverter::convert(
276         const FormattedData &source,
277         const FormattedData &destination,
278         int32_t numFrames,
279         float levelFrom,
280         float levelTo) {
281 
282     if (source.channelCount == 1 && destination.channelCount == 2) {
283         convertMonoToStereo(source,
284                             destination,
285                             numFrames,
286                             levelFrom,
287                             levelTo);
288     } else {
289         // We only support mono to stereo conversion. Otherwise source and destination
290         // must match.
291         assert(source.channelCount == destination.channelCount);
292         convertChannelsMatch(source,
293                              destination,
294                              numFrames,
295                              levelFrom,
296                              levelTo);
297     }
298 }
299 
convertMonoToStereo(const FormattedData & source,const FormattedData & destination,int32_t numFrames,float levelFrom,float levelTo)300 void AAudioDataConverter::convertMonoToStereo(
301         const FormattedData &source,
302         const FormattedData &destination,
303         int32_t numFrames,
304         float levelFrom,
305         float levelTo) {
306 
307     // The formats are validated when the stream is opened so we do not have to
308     // check for illegal combinations here.
309     if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
310         if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
311             AAudio_linearRampMonoToStereo(
312                     (const float *) source.data,
313                     (float *) destination.data,
314                     numFrames,
315                     levelFrom,
316                     levelTo);
317         } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
318             if (levelFrom != levelTo) {
319                 AAudioConvert_formatMonoToStereo(
320                         (const float *) source.data,
321                         (int16_t *) destination.data,
322                         numFrames,
323                         levelFrom,
324                         levelTo);
325             } else {
326                 AAudioConvert_formatMonoToStereo(
327                         (const float *) source.data,
328                         (int16_t *) destination.data,
329                         numFrames,
330                         levelTo);
331             }
332         }
333     } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
334         if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
335             if (levelFrom != levelTo) {
336                 AAudioConvert_formatMonoToStereo(
337                         (const int16_t *) source.data,
338                         (float *) destination.data,
339                         numFrames,
340                         levelFrom,
341                         levelTo);
342             } else {
343                 AAudioConvert_formatMonoToStereo(
344                         (const int16_t *) source.data,
345                         (float *) destination.data,
346                         numFrames,
347                         levelTo);
348             }
349         } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
350             AAudio_linearRampMonoToStereo(
351                     (const int16_t *) source.data,
352                     (int16_t *) destination.data,
353                     numFrames,
354                     levelFrom,
355                     levelTo);
356         }
357     }
358 }
359 
convertChannelsMatch(const FormattedData & source,const FormattedData & destination,int32_t numFrames,float levelFrom,float levelTo)360 void AAudioDataConverter::convertChannelsMatch(
361         const FormattedData &source,
362         const FormattedData &destination,
363         int32_t numFrames,
364         float levelFrom,
365         float levelTo) {
366     const int32_t numSamples = numFrames * source.channelCount;
367 
368     // The formats are validated when the stream is opened so we do not have to
369     // check for illegal combinations here.
370     if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
371         if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
372             AAudio_linearRamp(
373                     (const float *) source.data,
374                     (float *) destination.data,
375                     numFrames,
376                     source.channelCount,
377                     levelFrom,
378                     levelTo);
379         } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
380             if (levelFrom != levelTo) {
381                 AAudioConvert_floatToPcm16(
382                         (const float *) source.data,
383                         (int16_t *) destination.data,
384                         numFrames,
385                         source.channelCount,
386                         levelFrom,
387                         levelTo);
388             } else {
389                 AAudioConvert_floatToPcm16(
390                         (const float *) source.data,
391                         (int16_t *) destination.data,
392                         numSamples,
393                         levelTo);
394             }
395         }
396     } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
397         if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
398             if (levelFrom != levelTo) {
399                 AAudioConvert_pcm16ToFloat(
400                         (const int16_t *) source.data,
401                         (float *) destination.data,
402                         numFrames,
403                         source.channelCount,
404                         levelFrom,
405                         levelTo);
406             } else {
407                 AAudioConvert_pcm16ToFloat(
408                         (const int16_t *) source.data,
409                         (float *) destination.data,
410                         numSamples,
411                         levelTo);
412             }
413         } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
414             AAudio_linearRamp(
415                     (const int16_t *) source.data,
416                     (int16_t *) destination.data,
417                     numFrames,
418                     source.channelCount,
419                     levelFrom,
420                     levelTo);
421         }
422     }
423 }
424 
AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result)425 status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
426     // This covers the case for AAUDIO_OK and for positive results.
427     if (result >= 0) {
428         return result;
429     }
430     status_t status;
431     switch (result) {
432     case AAUDIO_ERROR_DISCONNECTED:
433     case AAUDIO_ERROR_NO_SERVICE:
434         status = DEAD_OBJECT;
435         break;
436     case AAUDIO_ERROR_INVALID_HANDLE:
437         status = BAD_TYPE;
438         break;
439     case AAUDIO_ERROR_INVALID_STATE:
440         status = INVALID_OPERATION;
441         break;
442     case AAUDIO_ERROR_INVALID_RATE:
443     case AAUDIO_ERROR_INVALID_FORMAT:
444     case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
445     case AAUDIO_ERROR_OUT_OF_RANGE:
446         status = BAD_VALUE;
447         break;
448     case AAUDIO_ERROR_WOULD_BLOCK:
449         status = WOULD_BLOCK;
450         break;
451     case AAUDIO_ERROR_NULL:
452         status = UNEXPECTED_NULL;
453         break;
454     case AAUDIO_ERROR_UNAVAILABLE:
455         status = NOT_ENOUGH_DATA;
456         break;
457 
458     // TODO translate these result codes
459     case AAUDIO_ERROR_INTERNAL:
460     case AAUDIO_ERROR_UNIMPLEMENTED:
461     case AAUDIO_ERROR_NO_FREE_HANDLES:
462     case AAUDIO_ERROR_NO_MEMORY:
463     case AAUDIO_ERROR_TIMEOUT:
464     default:
465         status = UNKNOWN_ERROR;
466         break;
467     }
468     return status;
469 }
470 
AAudioConvert_androidToAAudioResult(status_t status)471 aaudio_result_t AAudioConvert_androidToAAudioResult(status_t status) {
472     // This covers the case for OK and for positive result.
473     if (status >= 0) {
474         return status;
475     }
476     aaudio_result_t result;
477     switch (status) {
478     case BAD_TYPE:
479         result = AAUDIO_ERROR_INVALID_HANDLE;
480         break;
481     case DEAD_OBJECT:
482         result = AAUDIO_ERROR_NO_SERVICE;
483         break;
484     case INVALID_OPERATION:
485         result = AAUDIO_ERROR_INVALID_STATE;
486         break;
487     case UNEXPECTED_NULL:
488         result = AAUDIO_ERROR_NULL;
489         break;
490     case BAD_VALUE:
491         result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
492         break;
493     case WOULD_BLOCK:
494         result = AAUDIO_ERROR_WOULD_BLOCK;
495         break;
496     case NOT_ENOUGH_DATA:
497         result = AAUDIO_ERROR_UNAVAILABLE;
498         break;
499     default:
500         result = AAUDIO_ERROR_INTERNAL;
501         break;
502     }
503     return result;
504 }
505 
AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId)506 audio_session_t AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId) {
507     // If not a regular sessionId then convert to a safe value of AUDIO_SESSION_ALLOCATE.
508     return (sessionId == AAUDIO_SESSION_ID_ALLOCATE || sessionId == AAUDIO_SESSION_ID_NONE)
509            ? AUDIO_SESSION_ALLOCATE
510            : (audio_session_t) sessionId;
511 }
512 
AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat)513 audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
514     audio_format_t androidFormat;
515     switch (aaudioFormat) {
516     case AAUDIO_FORMAT_PCM_I16:
517         androidFormat = AUDIO_FORMAT_PCM_16_BIT;
518         break;
519     case AAUDIO_FORMAT_PCM_FLOAT:
520         androidFormat = AUDIO_FORMAT_PCM_FLOAT;
521         break;
522     default:
523         androidFormat = AUDIO_FORMAT_DEFAULT;
524         ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
525         break;
526     }
527     return androidFormat;
528 }
529 
AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat)530 aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
531     aaudio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
532     switch (androidFormat) {
533     case AUDIO_FORMAT_PCM_16_BIT:
534         aaudioFormat = AAUDIO_FORMAT_PCM_I16;
535         break;
536     case AUDIO_FORMAT_PCM_FLOAT:
537         aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
538         break;
539     default:
540         aaudioFormat = AAUDIO_FORMAT_INVALID;
541         ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
542         break;
543     }
544     return aaudioFormat;
545 }
546 
547 // Make a message string from the condition.
548 #define STATIC_ASSERT(condition) static_assert(condition, #condition)
549 
AAudioConvert_usageToInternal(aaudio_usage_t usage)550 audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage) {
551     // The public aaudio_content_type_t constants are supposed to have the same
552     // values as the internal audio_content_type_t values.
553     STATIC_ASSERT(AAUDIO_USAGE_MEDIA == AUDIO_USAGE_MEDIA);
554     STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION == AUDIO_USAGE_VOICE_COMMUNICATION);
555     STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING
556                   == AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING);
557     STATIC_ASSERT(AAUDIO_USAGE_ALARM == AUDIO_USAGE_ALARM);
558     STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION == AUDIO_USAGE_NOTIFICATION);
559     STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_RINGTONE
560                   == AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE);
561     STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_EVENT == AUDIO_USAGE_NOTIFICATION_EVENT);
562     STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY);
563     STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE
564                   == AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE);
565     STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_SONIFICATION == AUDIO_USAGE_ASSISTANCE_SONIFICATION);
566     STATIC_ASSERT(AAUDIO_USAGE_GAME == AUDIO_USAGE_GAME);
567     STATIC_ASSERT(AAUDIO_USAGE_ASSISTANT == AUDIO_USAGE_ASSISTANT);
568     if (usage == AAUDIO_UNSPECIFIED) {
569         usage = AAUDIO_USAGE_MEDIA;
570     }
571     return (audio_usage_t) usage; // same value
572 }
573 
AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType)574 audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType) {
575     // The public aaudio_content_type_t constants are supposed to have the same
576     // values as the internal audio_content_type_t values.
577     STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MUSIC == AUDIO_CONTENT_TYPE_MUSIC);
578     STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SPEECH == AUDIO_CONTENT_TYPE_SPEECH);
579     STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SONIFICATION == AUDIO_CONTENT_TYPE_SONIFICATION);
580     STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MOVIE == AUDIO_CONTENT_TYPE_MOVIE);
581     if (contentType == AAUDIO_UNSPECIFIED) {
582         contentType = AAUDIO_CONTENT_TYPE_MUSIC;
583     }
584     return (audio_content_type_t) contentType; // same value
585 }
586 
AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset)587 audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset) {
588     // The public aaudio_input_preset_t constants are supposed to have the same
589     // values as the internal audio_source_t values.
590     STATIC_ASSERT(AAUDIO_UNSPECIFIED == AUDIO_SOURCE_DEFAULT);
591     STATIC_ASSERT(AAUDIO_INPUT_PRESET_GENERIC == AUDIO_SOURCE_MIC);
592     STATIC_ASSERT(AAUDIO_INPUT_PRESET_CAMCORDER == AUDIO_SOURCE_CAMCORDER);
593     STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_RECOGNITION == AUDIO_SOURCE_VOICE_RECOGNITION);
594     STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION == AUDIO_SOURCE_VOICE_COMMUNICATION);
595     STATIC_ASSERT(AAUDIO_INPUT_PRESET_UNPROCESSED == AUDIO_SOURCE_UNPROCESSED);
596     if (preset == AAUDIO_UNSPECIFIED) {
597         preset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
598     }
599     return (audio_source_t) preset; // same value
600 }
601 
AAudioConvert_framesToBytes(int32_t numFrames,int32_t bytesPerFrame,int32_t * sizeInBytes)602 int32_t AAudioConvert_framesToBytes(int32_t numFrames,
603                                     int32_t bytesPerFrame,
604                                     int32_t *sizeInBytes) {
605     *sizeInBytes = 0;
606 
607     if (numFrames < 0 || bytesPerFrame < 0) {
608         ALOGE("negative size, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
609         return AAUDIO_ERROR_OUT_OF_RANGE;
610     }
611 
612     // Prevent numeric overflow.
613     if (numFrames > (INT32_MAX / bytesPerFrame)) {
614         ALOGE("size overflow, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
615         return AAUDIO_ERROR_OUT_OF_RANGE;
616     }
617 
618     *sizeInBytes = numFrames * bytesPerFrame;
619     return AAUDIO_OK;
620 }
621 
AAudioProperty_getMMapProperty(const char * propName,int32_t defaultValue,const char * caller)622 static int32_t AAudioProperty_getMMapProperty(const char *propName,
623                                               int32_t defaultValue,
624                                               const char * caller) {
625     int32_t prop = property_get_int32(propName, defaultValue);
626     switch (prop) {
627         case AAUDIO_UNSPECIFIED:
628         case AAUDIO_POLICY_NEVER:
629         case AAUDIO_POLICY_ALWAYS:
630         case AAUDIO_POLICY_AUTO:
631             break;
632         default:
633             ALOGE("%s: invalid = %d", caller, prop);
634             prop = defaultValue;
635             break;
636     }
637     return prop;
638 }
639 
AAudioProperty_getMMapPolicy()640 int32_t AAudioProperty_getMMapPolicy() {
641     return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
642                                           AAUDIO_UNSPECIFIED, __func__);
643 }
644 
AAudioProperty_getMMapExclusivePolicy()645 int32_t AAudioProperty_getMMapExclusivePolicy() {
646     return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
647                                           AAUDIO_UNSPECIFIED, __func__);
648 }
649 
AAudioProperty_getMixerBursts()650 int32_t AAudioProperty_getMixerBursts() {
651     const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
652     const int32_t maxBursts = 1024; // arbitrary
653     int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
654     if (prop < 1 || prop > maxBursts) {
655         ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
656         prop = defaultBursts;
657     }
658     return prop;
659 }
660 
AAudioProperty_getWakeupDelayMicros()661 int32_t AAudioProperty_getWakeupDelayMicros() {
662     const int32_t minMicros = 0; // arbitrary
663     const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
664     const int32_t maxMicros = 5000; // arbitrary, probably don't want more than 500
665     int32_t prop = property_get_int32(AAUDIO_PROP_WAKEUP_DELAY_USEC, defaultMicros);
666     if (prop < minMicros) {
667         ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, minMicros);
668         prop = minMicros;
669     } else if (prop > maxMicros) {
670         ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, maxMicros);
671         prop = maxMicros;
672     }
673     return prop;
674 }
675 
AAudioProperty_getMinimumSleepMicros()676 int32_t AAudioProperty_getMinimumSleepMicros() {
677     const int32_t minMicros = 20; // arbitrary
678     const int32_t defaultMicros = 200; // arbitrary
679     const int32_t maxMicros = 2000; // arbitrary
680     int32_t prop = property_get_int32(AAUDIO_PROP_MINIMUM_SLEEP_USEC, defaultMicros);
681     if (prop < minMicros) {
682         ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, minMicros);
683         prop = minMicros;
684     } else if (prop > maxMicros) {
685         ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, maxMicros);
686         prop = maxMicros;
687     }
688     return prop;
689 }
690 
AAudioProperty_getHardwareBurstMinMicros()691 int32_t AAudioProperty_getHardwareBurstMinMicros() {
692     const int32_t defaultMicros = 1000; // arbitrary
693     const int32_t maxMicros = 1000 * 1000; // arbitrary
694     int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
695     if (prop < 1 || prop > maxMicros) {
696         ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
697               prop, defaultMicros);
698         prop = defaultMicros;
699     }
700     return prop;
701 }
702 
AAudio_isFlushAllowed(aaudio_stream_state_t state)703 aaudio_result_t AAudio_isFlushAllowed(aaudio_stream_state_t state) {
704     aaudio_result_t result = AAUDIO_OK;
705     switch (state) {
706 // Proceed with flushing.
707         case AAUDIO_STREAM_STATE_OPEN:
708         case AAUDIO_STREAM_STATE_PAUSED:
709         case AAUDIO_STREAM_STATE_STOPPED:
710         case AAUDIO_STREAM_STATE_FLUSHED:
711             break;
712 
713 // Transition from one inactive state to another.
714         case AAUDIO_STREAM_STATE_STARTING:
715         case AAUDIO_STREAM_STATE_STARTED:
716         case AAUDIO_STREAM_STATE_STOPPING:
717         case AAUDIO_STREAM_STATE_PAUSING:
718         case AAUDIO_STREAM_STATE_FLUSHING:
719         case AAUDIO_STREAM_STATE_CLOSING:
720         case AAUDIO_STREAM_STATE_CLOSED:
721         case AAUDIO_STREAM_STATE_DISCONNECTED:
722         default:
723             ALOGE("can only flush stream when PAUSED, OPEN or STOPPED, state = %s",
724                   AAudio_convertStreamStateToText(state));
725             result =  AAUDIO_ERROR_INVALID_STATE;
726             break;
727     }
728     return result;
729 }
730