1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #include <stdint.h>
22
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AidlConversion.h>
26 #include <media/AudioRecord.h>
27 #include <utils/String16.h>
28
29 #include "core/AudioGlobal.h"
30 #include "legacy/AudioStreamLegacy.h"
31 #include "legacy/AudioStreamRecord.h"
32 #include "utility/AudioClock.h"
33 #include "utility/FixedBlockWriter.h"
34
35 using android::content::AttributionSourceState;
36
37 using namespace android;
38 using namespace aaudio;
39
AudioStreamRecord()40 AudioStreamRecord::AudioStreamRecord()
41 : AudioStreamLegacy()
42 , mFixedBlockWriter(*this)
43 {
44 }
45
~AudioStreamRecord()46 AudioStreamRecord::~AudioStreamRecord()
47 {
48 const aaudio_stream_state_t state = getState();
49 bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
50 ALOGE_IF(bad, "stream not closed, in state %d", state);
51 }
52
open(const AudioStreamBuilder & builder)53 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
54 {
55 aaudio_result_t result = AAUDIO_OK;
56
57 result = AudioStream::open(builder);
58 if (result != AAUDIO_OK) {
59 return result;
60 }
61
62 // Try to create an AudioRecord
63
64 const aaudio_session_id_t requestedSessionId = builder.getSessionId();
65 const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
66
67 // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
68 audio_channel_mask_t channelMask =
69 AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
70
71 size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
72 : builder.getBufferCapacity();
73
74
75 audio_input_flags_t flags;
76 aaudio_performance_mode_t perfMode = getPerformanceMode();
77 switch (perfMode) {
78 case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
79 // If the app asks for a sessionId then it means they want to use effects.
80 // So don't use RAW flag.
81 flags = (audio_input_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
82 ? (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW)
83 : (AUDIO_INPUT_FLAG_FAST));
84 break;
85
86 case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
87 case AAUDIO_PERFORMANCE_MODE_NONE:
88 default:
89 flags = AUDIO_INPUT_FLAG_NONE;
90 break;
91 }
92
93 const audio_format_t requestedFormat = getFormat();
94 // Preserve behavior of API 26
95 if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
96 setFormat(AUDIO_FORMAT_PCM_FLOAT);
97 }
98
99
100 setDeviceFormat(getFormat());
101
102 // To avoid glitching, let AudioFlinger pick the optimal burst size.
103 uint32_t notificationFrames = 0;
104
105 // Setup the callback if there is one.
106 sp<AudioRecord::IAudioRecordCallback> callback;
107 AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
108 if (builder.getDataCallbackProc() != nullptr) {
109 streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
110 callback = sp<AudioRecord::IAudioRecordCallback>::fromExisting(this);
111 }
112 mCallbackBufferSize = builder.getFramesPerDataCallback();
113
114 // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
115 audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
116 ? AUDIO_PORT_HANDLE_NONE
117 : getDeviceId();
118
119 const audio_content_type_t contentType =
120 AAudioConvert_contentTypeToInternal(builder.getContentType());
121 const audio_source_t source =
122 AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
123
124 const audio_flags_mask_t attrFlags =
125 AAudioConvert_privacySensitiveToAudioFlagsMask(builder.isPrivacySensitive());
126 const audio_attributes_t attributes = {
127 .content_type = contentType,
128 .usage = AUDIO_USAGE_UNKNOWN, // only used for output
129 .source = source,
130 .flags = attrFlags, // Different than the AUDIO_INPUT_FLAGS
131 .tags = ""
132 };
133
134 // TODO b/182392769: use attribution source util
135 AttributionSourceState attributionSource;
136 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
137 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
138 attributionSource.packageName = builder.getOpPackageName();
139 attributionSource.attributionTag = builder.getAttributionTag();
140 attributionSource.token = sp<BBinder>::make();
141
142 // ----------- open the AudioRecord ---------------------
143 // Might retry, but never more than once.
144 for (int i = 0; i < 2; i ++) {
145 const audio_format_t requestedInternalFormat = getDeviceFormat();
146
147 mAudioRecord = new AudioRecord(
148 attributionSource
149 );
150 mAudioRecord->set(
151 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
152 getSampleRate(),
153 requestedInternalFormat,
154 channelMask,
155 frameCount,
156 callback,
157 notificationFrames,
158 false /*threadCanCallJava*/,
159 sessionId,
160 streamTransferType,
161 flags,
162 AUDIO_UID_INVALID, // DEFAULT uid
163 -1, // DEFAULT pid
164 &attributes,
165 selectedDeviceId
166 );
167
168 // Set it here so it can be logged by the destructor if the open failed.
169 mAudioRecord->setCallerName(kCallerName);
170
171 // Did we get a valid track?
172 status_t status = mAudioRecord->initCheck();
173 if (status != OK) {
174 safeReleaseClose();
175 ALOGE("open(), initCheck() returned %d", status);
176 return AAudioConvert_androidToAAudioResult(status);
177 }
178
179 // Check to see if it was worth hacking the deviceFormat.
180 bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
181 == AUDIO_INPUT_FLAG_FAST;
182 if (getFormat() != getDeviceFormat() && !gotFastPath) {
183 // We tried to get a FAST path by switching the device format.
184 // But it didn't work. So we might as well reopen using the same
185 // format for device and for app.
186 ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
187 mAudioRecord.clear();
188 setDeviceFormat(getFormat());
189 } else {
190 break; // Keep the one we just opened.
191 }
192 }
193
194 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD)
195 + std::to_string(mAudioRecord->getPortId());
196 android::mediametrics::LogItem(mMetricsId)
197 .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
198 AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
199 .set(AMEDIAMETRICS_PROP_SHARINGMODE,
200 AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
201 .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
202
203 // Get the actual values from the AudioRecord.
204 setChannelMask(AAudioConvert_androidToAAudioChannelMask(
205 mAudioRecord->channelMask(), true /*isInput*/,
206 AAudio_isChannelIndexMask(getChannelMask())));
207 setSampleRate(mAudioRecord->getSampleRate());
208 setBufferCapacity(getBufferCapacityFromDevice());
209 setFramesPerBurst(getFramesPerBurstFromDevice());
210
211 // We may need to pass the data through a block size adapter to guarantee constant size.
212 if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
213 // The block adapter runs before the format conversion.
214 // So we need to use the device frame size.
215 mBlockAdapterBytesPerFrame = getBytesPerDeviceFrame();
216 int callbackSizeBytes = mBlockAdapterBytesPerFrame * mCallbackBufferSize;
217 mFixedBlockWriter.open(callbackSizeBytes);
218 mBlockAdapter = &mFixedBlockWriter;
219 } else {
220 mBlockAdapter = nullptr;
221 }
222
223 // Allocate format conversion buffer if needed.
224 if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
225 && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
226
227 if (builder.getDataCallbackProc() != nullptr) {
228 // If we have a callback then we need to convert the data into an internal float
229 // array and then pass that entire array to the app.
230 mFormatConversionBufferSizeInFrames =
231 (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
232 ? mCallbackBufferSize : getFramesPerBurst();
233 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
234 mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
235 } else {
236 // If we don't have a callback then we will read into an internal short array
237 // and then convert into the app float array in read().
238 mFormatConversionBufferSizeInFrames = getFramesPerBurst();
239 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
240 mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
241 }
242 ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
243 __func__, mFormatConversionBufferSizeInFrames);
244 }
245
246 // Update performance mode based on the actual stream.
247 // For example, if the sample rate does not match native then you won't get a FAST track.
248 audio_input_flags_t actualFlags = mAudioRecord->getFlags();
249 aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
250 // FIXME Some platforms do not advertise RAW mode for low latency inputs.
251 if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
252 == (AUDIO_INPUT_FLAG_FAST)) {
253 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
254 }
255 setPerformanceMode(actualPerformanceMode);
256
257 setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
258
259 // Log warning if we did not get what we asked for.
260 ALOGW_IF(actualFlags != flags,
261 "open() flags changed from 0x%08X to 0x%08X",
262 flags, actualFlags);
263 ALOGW_IF(actualPerformanceMode != perfMode,
264 "open() perfMode changed from %d to %d",
265 perfMode, actualPerformanceMode);
266
267 setState(AAUDIO_STREAM_STATE_OPEN);
268 setDeviceId(mAudioRecord->getRoutedDeviceId());
269
270 aaudio_session_id_t actualSessionId =
271 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
272 ? AAUDIO_SESSION_ID_NONE
273 : (aaudio_session_id_t) mAudioRecord->getSessionId();
274 setSessionId(actualSessionId);
275
276 mAudioRecord->addAudioDeviceCallback(this);
277
278 return AAUDIO_OK;
279 }
280
release_l()281 aaudio_result_t AudioStreamRecord::release_l() {
282 // TODO add close() or release() to AudioFlinger's AudioRecord API.
283 // Then call it from here
284 if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
285 mAudioRecord->removeAudioDeviceCallback(this);
286 logReleaseBufferState();
287 // Data callbacks may still be running!
288 return AudioStream::release_l();
289 } else {
290 return AAUDIO_OK; // already released
291 }
292 }
293
close_l()294 void AudioStreamRecord::close_l() {
295 // The callbacks are normally joined in the AudioRecord destructor.
296 // But if another object has a reference to the AudioRecord then
297 // it will not get deleted here.
298 // So we should join callbacks explicitly before returning.
299 // Unlock around the join to avoid deadlocks if the callback tries to lock.
300 // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
301 mStreamLock.unlock();
302 mAudioRecord->stopAndJoinCallbacks();
303 mStreamLock.lock();
304
305 mAudioRecord.clear();
306 // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
307 // so it will clean up by itself.
308 AudioStream::close_l();
309 }
310
maybeConvertDeviceData(const void * audioData,int32_t numFrames)311 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
312 if (mFormatConversionBufferFloat.get() != nullptr) {
313 LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
314 "%s() conversion size %d too large for buffer %d",
315 __func__, numFrames, mFormatConversionBufferSizeInFrames);
316
317 int32_t numSamples = numFrames * getSamplesPerFrame();
318 // Only conversion supported is I16 to FLOAT
319 memcpy_to_float_from_i16(
320 mFormatConversionBufferFloat.get(),
321 (const int16_t *) audioData,
322 numSamples);
323 return mFormatConversionBufferFloat.get();
324 } else {
325 return audioData;
326 }
327 }
328
requestStart_l()329 aaudio_result_t AudioStreamRecord::requestStart_l()
330 {
331 if (mAudioRecord.get() == nullptr) {
332 return AAUDIO_ERROR_INVALID_STATE;
333 }
334
335 // Enable callback before starting AudioRecord to avoid shutting
336 // down because of a race condition.
337 mCallbackEnabled.store(true);
338 aaudio_stream_state_t originalState = getState();
339 // Set before starting the callback so that we are in the correct state
340 // before updateStateMachine() can be called by the callback.
341 setState(AAUDIO_STREAM_STATE_STARTING);
342 mFramesWritten.reset32(); // service writes frames
343 mTimestampPosition.reset32();
344 status_t err = mAudioRecord->start(); // resets position to zero
345 if (err != OK) {
346 mCallbackEnabled.store(false);
347 setState(originalState);
348 return AAudioConvert_androidToAAudioResult(err);
349 }
350 return AAUDIO_OK;
351 }
352
requestStop_l()353 aaudio_result_t AudioStreamRecord::requestStop_l() {
354 if (mAudioRecord.get() == nullptr) {
355 return AAUDIO_ERROR_INVALID_STATE;
356 }
357 setState(AAUDIO_STREAM_STATE_STOPPING);
358 mFramesWritten.catchUpTo(getFramesRead());
359 mTimestampPosition.catchUpTo(getFramesRead());
360 mAudioRecord->stop();
361 mCallbackEnabled.store(false);
362 // Pass false to prevent errorCallback from being called after disconnect
363 // when app has already requested a stop().
364 return checkForDisconnectRequest(false);
365 }
366
updateStateMachine()367 aaudio_result_t AudioStreamRecord::updateStateMachine()
368 {
369 aaudio_result_t result = AAUDIO_OK;
370 aaudio_wrapping_frames_t position;
371 status_t err;
372 switch (getState()) {
373 // TODO add better state visibility to AudioRecord
374 case AAUDIO_STREAM_STATE_STARTING:
375 // When starting, the position will begin at zero and then go positive.
376 // The position can wrap but by that time the state will not be STARTING.
377 err = mAudioRecord->getPosition(&position);
378 if (err != OK) {
379 result = AAudioConvert_androidToAAudioResult(err);
380 } else if (position > 0) {
381 setState(AAUDIO_STREAM_STATE_STARTED);
382 }
383 break;
384 case AAUDIO_STREAM_STATE_STOPPING:
385 if (mAudioRecord->stopped()) {
386 setState(AAUDIO_STREAM_STATE_STOPPED);
387 }
388 break;
389 default:
390 break;
391 }
392 return result;
393 }
394
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)395 aaudio_result_t AudioStreamRecord::read(void *buffer,
396 int32_t numFrames,
397 int64_t timeoutNanoseconds)
398 {
399 int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
400 int32_t numBytes;
401 // This will detect out of range values for numFrames.
402 aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
403 if (result != AAUDIO_OK) {
404 return result;
405 }
406
407 if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
408 return AAUDIO_ERROR_DISCONNECTED;
409 }
410
411 // TODO add timeout to AudioRecord
412 bool blocking = (timeoutNanoseconds > 0);
413
414 ssize_t bytesActuallyRead = 0;
415 ssize_t totalBytesRead = 0;
416 if (mFormatConversionBufferI16.get() != nullptr) {
417 // Convert I16 data to float using an intermediate buffer.
418 float *floatBuffer = (float *) buffer;
419 int32_t framesLeft = numFrames;
420 // Perform conversion using multiple read()s if necessary.
421 while (framesLeft > 0) {
422 // Read into short internal buffer.
423 int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
424 size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
425 bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
426 if (bytesActuallyRead <= 0) {
427 break;
428 }
429 totalBytesRead += bytesActuallyRead;
430 int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
431 // Convert into app float buffer.
432 size_t numSamples = framesToConvert * getSamplesPerFrame();
433 memcpy_to_float_from_i16(
434 floatBuffer,
435 mFormatConversionBufferI16.get(),
436 numSamples);
437 floatBuffer += numSamples;
438 framesLeft -= framesToConvert;
439 }
440 } else {
441 bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
442 totalBytesRead = bytesActuallyRead;
443 }
444 if (bytesActuallyRead == WOULD_BLOCK) {
445 return 0;
446 } else if (bytesActuallyRead < 0) {
447 // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
448 // AudioRecord invalidation.
449 if (bytesActuallyRead == DEAD_OBJECT) {
450 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
451 return AAUDIO_ERROR_DISCONNECTED;
452 }
453 return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
454 }
455 int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
456 incrementFramesRead(framesRead);
457
458 result = updateStateMachine();
459 if (result != AAUDIO_OK) {
460 return result;
461 }
462
463 return (aaudio_result_t) framesRead;
464 }
465
setBufferSize(int32_t)466 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
467 {
468 return getBufferSize();
469 }
470
getBufferSize() const471 int32_t AudioStreamRecord::getBufferSize() const
472 {
473 return getBufferCapacity(); // TODO implement in AudioRecord?
474 }
475
getBufferCapacityFromDevice() const476 int32_t AudioStreamRecord::getBufferCapacityFromDevice() const
477 {
478 return static_cast<int32_t>(mAudioRecord->frameCount());
479 }
480
getXRunCount() const481 int32_t AudioStreamRecord::getXRunCount() const
482 {
483 return 0; // TODO implement when AudioRecord supports it
484 }
485
getFramesPerBurstFromDevice() const486 int32_t AudioStreamRecord::getFramesPerBurstFromDevice() const {
487 return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
488 }
489
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)490 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
491 int64_t *framePosition,
492 int64_t *timeNanoseconds) {
493 ExtendedTimestamp extendedTimestamp;
494 if (getState() != AAUDIO_STREAM_STATE_STARTED) {
495 return AAUDIO_ERROR_INVALID_STATE;
496 }
497 status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
498 if (status == WOULD_BLOCK) {
499 return AAUDIO_ERROR_INVALID_STATE;
500 } else if (status != NO_ERROR) {
501 return AAudioConvert_androidToAAudioResult(status);
502 }
503 return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
504 }
505
getFramesWritten()506 int64_t AudioStreamRecord::getFramesWritten() {
507 aaudio_wrapping_frames_t position;
508 status_t result;
509 switch (getState()) {
510 case AAUDIO_STREAM_STATE_STARTING:
511 case AAUDIO_STREAM_STATE_STARTED:
512 result = mAudioRecord->getPosition(&position);
513 if (result == OK) {
514 mFramesWritten.update32((int32_t)position);
515 }
516 break;
517 case AAUDIO_STREAM_STATE_STOPPING:
518 default:
519 break;
520 }
521 return AudioStreamLegacy::getFramesWritten();
522 }
523