• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include <stdint.h>
22 
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AudioRecord.h>
26 #include <utils/String16.h>
27 
28 #include "legacy/AudioStreamLegacy.h"
29 #include "legacy/AudioStreamRecord.h"
30 #include "utility/AudioClock.h"
31 #include "utility/FixedBlockWriter.h"
32 
33 using namespace android;
34 using namespace aaudio;
35 
AudioStreamRecord()36 AudioStreamRecord::AudioStreamRecord()
37     : AudioStreamLegacy()
38     , mFixedBlockWriter(*this)
39 {
40 }
41 
~AudioStreamRecord()42 AudioStreamRecord::~AudioStreamRecord()
43 {
44     const aaudio_stream_state_t state = getState();
45     bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
46     ALOGE_IF(bad, "stream not closed, in state %d", state);
47 }
48 
open(const AudioStreamBuilder & builder)49 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
50 {
51     aaudio_result_t result = AAUDIO_OK;
52 
53     result = AudioStream::open(builder);
54     if (result != AAUDIO_OK) {
55         return result;
56     }
57 
58     // Try to create an AudioRecord
59 
60     const aaudio_session_id_t requestedSessionId = builder.getSessionId();
61     const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
62 
63     // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
64     int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
65                               ? 2 : getSamplesPerFrame();
66     audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
67                                audio_channel_in_mask_from_count(samplesPerFrame) :
68                                audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
69 
70     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
71                         : builder.getBufferCapacity();
72 
73 
74     audio_input_flags_t flags;
75     aaudio_performance_mode_t perfMode = getPerformanceMode();
76     switch (perfMode) {
77         case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
78             // If the app asks for a sessionId then it means they want to use effects.
79             // So don't use RAW flag.
80             flags = (audio_input_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
81                     ? (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW)
82                     : (AUDIO_INPUT_FLAG_FAST));
83             break;
84 
85         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
86         case AAUDIO_PERFORMANCE_MODE_NONE:
87         default:
88             flags = AUDIO_INPUT_FLAG_NONE;
89             break;
90     }
91 
92     // Preserve behavior of API 26
93     if (getFormat() == AUDIO_FORMAT_DEFAULT) {
94         setFormat(AUDIO_FORMAT_PCM_FLOAT);
95     }
96 
97     // Maybe change device format to get a FAST path.
98     // AudioRecord does not support FAST mode for FLOAT data.
99     // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
100     // So IF the user asks for low latency FLOAT
101     // AND the sampleRate is likely to be compatible with FAST
102     // THEN request I16 and convert to FLOAT when passing to user.
103     // Note that hard coding 48000 Hz is not ideal because the sampleRate
104     // for a FAST path might not be 48000 Hz.
105     // It normally is but there is a chance that it is not.
106     // And there is no reliable way to know that in advance.
107     // Luckily the consequences of a wrong guess are minor.
108     // We just may not get a FAST track.
109     // But we wouldn't have anyway without this hack.
110     constexpr int32_t kMostLikelySampleRateForFast = 48000;
111     if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
112             && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
113             && (samplesPerFrame <= 2) // FAST only for mono and stereo
114             && (getSampleRate() == kMostLikelySampleRateForFast
115                 || getSampleRate() == AAUDIO_UNSPECIFIED)) {
116         setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
117     } else {
118         setDeviceFormat(getFormat());
119     }
120 
121     uint32_t notificationFrames = 0;
122 
123     // Setup the callback if there is one.
124     AudioRecord::callback_t callback = nullptr;
125     void *callbackData = nullptr;
126     AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
127     if (builder.getDataCallbackProc() != nullptr) {
128         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
129         callback = getLegacyCallback();
130         callbackData = this;
131         notificationFrames = builder.getFramesPerDataCallback();
132     }
133     mCallbackBufferSize = builder.getFramesPerDataCallback();
134 
135     // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
136     audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
137                                            ? AUDIO_PORT_HANDLE_NONE
138                                            : getDeviceId();
139 
140     const audio_content_type_t contentType =
141             AAudioConvert_contentTypeToInternal(builder.getContentType());
142     const audio_source_t source =
143             AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
144 
145     const audio_attributes_t attributes = {
146             .content_type = contentType,
147             .usage = AUDIO_USAGE_UNKNOWN, // only used for output
148             .source = source,
149             .flags = AUDIO_FLAG_NONE, // Different than the AUDIO_INPUT_FLAGS
150             .tags = ""
151     };
152 
153     // ----------- open the AudioRecord ---------------------
154     // Might retry, but never more than once.
155     for (int i = 0; i < 2; i ++) {
156         const audio_format_t requestedInternalFormat = getDeviceFormat();
157 
158         mAudioRecord = new AudioRecord(
159                 mOpPackageName // const String16& opPackageName TODO does not compile
160         );
161         mAudioRecord->set(
162                 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
163                 getSampleRate(),
164                 requestedInternalFormat,
165                 channelMask,
166                 frameCount,
167                 callback,
168                 callbackData,
169                 notificationFrames,
170                 false /*threadCanCallJava*/,
171                 sessionId,
172                 streamTransferType,
173                 flags,
174                 AUDIO_UID_INVALID, // DEFAULT uid
175                 -1,                // DEFAULT pid
176                 &attributes,
177                 selectedDeviceId
178         );
179 
180         // Did we get a valid track?
181         status_t status = mAudioRecord->initCheck();
182         if (status != OK) {
183             close();
184             ALOGE("open(), initCheck() returned %d", status);
185             return AAudioConvert_androidToAAudioResult(status);
186         }
187 
188         // Check to see if it was worth hacking the deviceFormat.
189         bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
190                            == AUDIO_INPUT_FLAG_FAST;
191         if (getFormat() != getDeviceFormat() && !gotFastPath) {
192             // We tried to get a FAST path by switching the device format.
193             // But it didn't work. So we might as well reopen using the same
194             // format for device and for app.
195             ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
196             mAudioRecord.clear();
197             setDeviceFormat(getFormat());
198         } else {
199             break; // Keep the one we just opened.
200         }
201     }
202 
203     // Get the actual values from the AudioRecord.
204     setSamplesPerFrame(mAudioRecord->channelCount());
205 
206     int32_t actualSampleRate = mAudioRecord->getSampleRate();
207     ALOGW_IF(actualSampleRate != getSampleRate(),
208              "open() sampleRate changed from %d to %d",
209              getSampleRate(), actualSampleRate);
210     setSampleRate(actualSampleRate);
211 
212     // We may need to pass the data through a block size adapter to guarantee constant size.
213     if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
214         int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
215         mFixedBlockWriter.open(callbackSizeBytes);
216         mBlockAdapter = &mFixedBlockWriter;
217     } else {
218         mBlockAdapter = nullptr;
219     }
220 
221     // Allocate format conversion buffer if needed.
222     if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
223         && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
224 
225         if (builder.getDataCallbackProc() != nullptr) {
226             // If we have a callback then we need to convert the data into an internal float
227             // array and then pass that entire array to the app.
228             mFormatConversionBufferSizeInFrames =
229                     (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
230                     ? mCallbackBufferSize : getFramesPerBurst();
231             int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
232             mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
233         } else {
234             // If we don't have a callback then we will read into an internal short array
235             // and then convert into the app float array in read().
236             mFormatConversionBufferSizeInFrames = getFramesPerBurst();
237             int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
238             mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
239         }
240         ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
241               __func__, mFormatConversionBufferSizeInFrames);
242     }
243 
244     // Update performance mode based on the actual stream.
245     // For example, if the sample rate does not match native then you won't get a FAST track.
246     audio_input_flags_t actualFlags = mAudioRecord->getFlags();
247     aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
248     // FIXME Some platforms do not advertise RAW mode for low latency inputs.
249     if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
250         == (AUDIO_INPUT_FLAG_FAST)) {
251         actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
252     }
253     setPerformanceMode(actualPerformanceMode);
254 
255     setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
256 
257     // Log warning if we did not get what we asked for.
258     ALOGW_IF(actualFlags != flags,
259              "open() flags changed from 0x%08X to 0x%08X",
260              flags, actualFlags);
261     ALOGW_IF(actualPerformanceMode != perfMode,
262              "open() perfMode changed from %d to %d",
263              perfMode, actualPerformanceMode);
264 
265     setState(AAUDIO_STREAM_STATE_OPEN);
266     setDeviceId(mAudioRecord->getRoutedDeviceId());
267 
268     aaudio_session_id_t actualSessionId =
269             (requestedSessionId == AAUDIO_SESSION_ID_NONE)
270             ? AAUDIO_SESSION_ID_NONE
271             : (aaudio_session_id_t) mAudioRecord->getSessionId();
272     setSessionId(actualSessionId);
273 
274     mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
275 
276     return AAUDIO_OK;
277 }
278 
close()279 aaudio_result_t AudioStreamRecord::close()
280 {
281     // TODO add close() or release() to AudioRecord API then call it from here
282     if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
283         mAudioRecord->removeAudioDeviceCallback(mDeviceCallback);
284         mAudioRecord.clear();
285         setState(AAUDIO_STREAM_STATE_CLOSED);
286     }
287     mFixedBlockWriter.close();
288     return AudioStream::close();
289 }
290 
maybeConvertDeviceData(const void * audioData,int32_t numFrames)291 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
292     if (mFormatConversionBufferFloat.get() != nullptr) {
293         LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
294                             "%s() conversion size %d too large for buffer %d",
295                             __func__, numFrames, mFormatConversionBufferSizeInFrames);
296 
297         int32_t numSamples = numFrames * getSamplesPerFrame();
298         // Only conversion supported is I16 to FLOAT
299         memcpy_to_float_from_i16(
300                     mFormatConversionBufferFloat.get(),
301                     (const int16_t *) audioData,
302                     numSamples);
303         return mFormatConversionBufferFloat.get();
304     } else {
305         return audioData;
306     }
307 }
308 
processCallback(int event,void * info)309 void AudioStreamRecord::processCallback(int event, void *info) {
310     switch (event) {
311         case AudioRecord::EVENT_MORE_DATA:
312             processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
313             break;
314 
315             // Stream got rerouted so we disconnect.
316         case AudioRecord::EVENT_NEW_IAUDIORECORD:
317             processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
318             break;
319 
320         default:
321             break;
322     }
323     return;
324 }
325 
requestStart()326 aaudio_result_t AudioStreamRecord::requestStart()
327 {
328     if (mAudioRecord.get() == nullptr) {
329         return AAUDIO_ERROR_INVALID_STATE;
330     }
331 
332     // Enable callback before starting AudioRecord to avoid shutting
333     // down because of a race condition.
334     mCallbackEnabled.store(true);
335     mFramesWritten.reset32(); // service writes frames
336     mTimestampPosition.reset32();
337     status_t err = mAudioRecord->start(); // resets position to zero
338     if (err != OK) {
339         return AAudioConvert_androidToAAudioResult(err);
340     } else {
341         setState(AAUDIO_STREAM_STATE_STARTING);
342     }
343     return AAUDIO_OK;
344 }
345 
requestStop()346 aaudio_result_t AudioStreamRecord::requestStop() {
347     if (mAudioRecord.get() == nullptr) {
348         return AAUDIO_ERROR_INVALID_STATE;
349     }
350     setState(AAUDIO_STREAM_STATE_STOPPING);
351     mFramesWritten.catchUpTo(getFramesRead());
352     mTimestampPosition.catchUpTo(getFramesRead());
353     mAudioRecord->stop();
354     mCallbackEnabled.store(false);
355     // Pass false to prevent errorCallback from being called after disconnect
356     // when app has already requested a stop().
357     return checkForDisconnectRequest(false);
358 }
359 
updateStateMachine()360 aaudio_result_t AudioStreamRecord::updateStateMachine()
361 {
362     aaudio_result_t result = AAUDIO_OK;
363     aaudio_wrapping_frames_t position;
364     status_t err;
365     switch (getState()) {
366     // TODO add better state visibility to AudioRecord
367     case AAUDIO_STREAM_STATE_STARTING:
368         // When starting, the position will begin at zero and then go positive.
369         // The position can wrap but by that time the state will not be STARTING.
370         err = mAudioRecord->getPosition(&position);
371         if (err != OK) {
372             result = AAudioConvert_androidToAAudioResult(err);
373         } else if (position > 0) {
374             setState(AAUDIO_STREAM_STATE_STARTED);
375         }
376         break;
377     case AAUDIO_STREAM_STATE_STOPPING:
378         if (mAudioRecord->stopped()) {
379             setState(AAUDIO_STREAM_STATE_STOPPED);
380         }
381         break;
382     default:
383         break;
384     }
385     return result;
386 }
387 
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)388 aaudio_result_t AudioStreamRecord::read(void *buffer,
389                                       int32_t numFrames,
390                                       int64_t timeoutNanoseconds)
391 {
392     int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
393     int32_t numBytes;
394     // This will detect out of range values for numFrames.
395     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
396     if (result != AAUDIO_OK) {
397         return result;
398     }
399 
400     if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
401         return AAUDIO_ERROR_DISCONNECTED;
402     }
403 
404     // TODO add timeout to AudioRecord
405     bool blocking = (timeoutNanoseconds > 0);
406 
407     ssize_t bytesActuallyRead = 0;
408     ssize_t totalBytesRead = 0;
409     if (mFormatConversionBufferI16.get() != nullptr) {
410         // Convert I16 data to float using an intermediate buffer.
411         float *floatBuffer = (float *) buffer;
412         int32_t framesLeft = numFrames;
413         // Perform conversion using multiple read()s if necessary.
414         while (framesLeft > 0) {
415             // Read into short internal buffer.
416             int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
417             size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
418             bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
419             if (bytesActuallyRead <= 0) {
420                 break;
421             }
422             totalBytesRead += bytesActuallyRead;
423             int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
424             // Convert into app float buffer.
425             size_t numSamples = framesToConvert * getSamplesPerFrame();
426             memcpy_to_float_from_i16(
427                     floatBuffer,
428                     mFormatConversionBufferI16.get(),
429                     numSamples);
430             floatBuffer += numSamples;
431             framesLeft -= framesToConvert;
432         }
433     } else {
434         bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
435         totalBytesRead = bytesActuallyRead;
436     }
437     if (bytesActuallyRead == WOULD_BLOCK) {
438         return 0;
439     } else if (bytesActuallyRead < 0) {
440         // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
441         // AudioRecord invalidation.
442         if (bytesActuallyRead == DEAD_OBJECT) {
443             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
444             return AAUDIO_ERROR_DISCONNECTED;
445         }
446         return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
447     }
448     int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
449     incrementFramesRead(framesRead);
450 
451     result = updateStateMachine();
452     if (result != AAUDIO_OK) {
453         return result;
454     }
455 
456     return (aaudio_result_t) framesRead;
457 }
458 
setBufferSize(int32_t requestedFrames)459 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
460 {
461     return getBufferSize();
462 }
463 
getBufferSize() const464 int32_t AudioStreamRecord::getBufferSize() const
465 {
466     return getBufferCapacity(); // TODO implement in AudioRecord?
467 }
468 
getBufferCapacity() const469 int32_t AudioStreamRecord::getBufferCapacity() const
470 {
471     return static_cast<int32_t>(mAudioRecord->frameCount());
472 }
473 
getXRunCount() const474 int32_t AudioStreamRecord::getXRunCount() const
475 {
476     return 0; // TODO implement when AudioRecord supports it
477 }
478 
getFramesPerBurst() const479 int32_t AudioStreamRecord::getFramesPerBurst() const
480 {
481     return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
482 }
483 
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)484 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
485                                                int64_t *framePosition,
486                                                int64_t *timeNanoseconds) {
487     ExtendedTimestamp extendedTimestamp;
488     if (getState() != AAUDIO_STREAM_STATE_STARTED) {
489         return AAUDIO_ERROR_INVALID_STATE;
490     }
491     status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
492     if (status == WOULD_BLOCK) {
493         return AAUDIO_ERROR_INVALID_STATE;
494     } else if (status != NO_ERROR) {
495         return AAudioConvert_androidToAAudioResult(status);
496     }
497     return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
498 }
499 
getFramesWritten()500 int64_t AudioStreamRecord::getFramesWritten() {
501     aaudio_wrapping_frames_t position;
502     status_t result;
503     switch (getState()) {
504         case AAUDIO_STREAM_STATE_STARTING:
505         case AAUDIO_STREAM_STATE_STARTED:
506             result = mAudioRecord->getPosition(&position);
507             if (result == OK) {
508                 mFramesWritten.update32(position);
509             }
510             break;
511         case AAUDIO_STREAM_STATE_STOPPING:
512         default:
513             break;
514     }
515     return AudioStreamLegacy::getFramesWritten();
516 }
517