1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #include <stdint.h>
22
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AidlConversion.h>
26 #include <media/AudioRecord.h>
27 #include <utils/String16.h>
28
29 #include "core/AudioGlobal.h"
30 #include "legacy/AudioStreamLegacy.h"
31 #include "legacy/AudioStreamRecord.h"
32 #include "utility/AudioClock.h"
33 #include "utility/FixedBlockWriter.h"
34
35 using android::content::AttributionSourceState;
36
37 using namespace android;
38 using namespace aaudio;
39
AudioStreamRecord()40 AudioStreamRecord::AudioStreamRecord()
41 : AudioStreamLegacy()
42 , mFixedBlockWriter(*this)
43 {
44 }
45
~AudioStreamRecord()46 AudioStreamRecord::~AudioStreamRecord()
47 {
48 const aaudio_stream_state_t state = getState();
49 bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
50 ALOGE_IF(bad, "stream not closed, in state %d", state);
51 }
52
open(const AudioStreamBuilder & builder)53 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
54 {
55 aaudio_result_t result = AAUDIO_OK;
56
57 result = AudioStream::open(builder);
58 if (result != AAUDIO_OK) {
59 return result;
60 }
61
62 // Try to create an AudioRecord
63
64 const aaudio_session_id_t requestedSessionId = builder.getSessionId();
65 const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
66
67 // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
68 audio_channel_mask_t channelMask =
69 AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
70
71 size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
72 : builder.getBufferCapacity();
73
74
75 audio_input_flags_t flags;
76 aaudio_performance_mode_t perfMode = getPerformanceMode();
77 switch (perfMode) {
78 case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
79 // If the app asks for a sessionId then it means they want to use effects.
80 // So don't use RAW flag.
81 flags = (audio_input_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
82 ? (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW)
83 : (AUDIO_INPUT_FLAG_FAST));
84 break;
85
86 case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
87 case AAUDIO_PERFORMANCE_MODE_NONE:
88 default:
89 flags = AUDIO_INPUT_FLAG_NONE;
90 break;
91 }
92
93 const audio_format_t requestedFormat = getFormat();
94 // Preserve behavior of API 26
95 if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
96 setFormat(AUDIO_FORMAT_PCM_FLOAT);
97 }
98
99
100 setDeviceFormat(getFormat());
101
102 // To avoid glitching, let AudioFlinger pick the optimal burst size.
103 uint32_t notificationFrames = 0;
104
105 // Setup the callback if there is one.
106 sp<AudioRecord::IAudioRecordCallback> callback;
107 AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
108 if (builder.getDataCallbackProc() != nullptr) {
109 streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
110 callback = sp<AudioRecord::IAudioRecordCallback>::fromExisting(this);
111 }
112 mCallbackBufferSize = builder.getFramesPerDataCallback();
113
114 // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
115 audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
116 ? AUDIO_PORT_HANDLE_NONE
117 : getDeviceId();
118
119 const audio_content_type_t contentType =
120 AAudioConvert_contentTypeToInternal(builder.getContentType());
121 const audio_source_t source =
122 AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
123
124 const audio_flags_mask_t attrFlags =
125 AAudioConvert_privacySensitiveToAudioFlagsMask(builder.isPrivacySensitive());
126 const audio_attributes_t attributes = {
127 .content_type = contentType,
128 .usage = AUDIO_USAGE_UNKNOWN, // only used for output
129 .source = source,
130 .flags = attrFlags, // Different than the AUDIO_INPUT_FLAGS
131 .tags = ""
132 };
133
134 // TODO b/182392769: use attribution source util
135 AttributionSourceState attributionSource;
136 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
137 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
138 attributionSource.packageName = builder.getOpPackageName();
139 attributionSource.attributionTag = builder.getAttributionTag();
140 attributionSource.token = sp<BBinder>::make();
141
142 // ----------- open the AudioRecord ---------------------
143 // Might retry, but never more than once.
144 for (int i = 0; i < 2; i ++) {
145 const audio_format_t requestedInternalFormat = getDeviceFormat();
146
147 mAudioRecord = new AudioRecord(
148 attributionSource
149 );
150 mAudioRecord->set(
151 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
152 getSampleRate(),
153 requestedInternalFormat,
154 channelMask,
155 frameCount,
156 callback,
157 notificationFrames,
158 false /*threadCanCallJava*/,
159 sessionId,
160 streamTransferType,
161 flags,
162 AUDIO_UID_INVALID, // DEFAULT uid
163 -1, // DEFAULT pid
164 &attributes,
165 selectedDeviceId
166 );
167
168 // Set it here so it can be logged by the destructor if the open failed.
169 mAudioRecord->setCallerName(kCallerName);
170
171 // Did we get a valid track?
172 status_t status = mAudioRecord->initCheck();
173 if (status != OK) {
174 safeReleaseClose();
175 ALOGE("open(), initCheck() returned %d", status);
176 return AAudioConvert_androidToAAudioResult(status);
177 }
178
179 // Check to see if it was worth hacking the deviceFormat.
180 bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
181 == AUDIO_INPUT_FLAG_FAST;
182 if (getFormat() != getDeviceFormat() && !gotFastPath) {
183 // We tried to get a FAST path by switching the device format.
184 // But it didn't work. So we might as well reopen using the same
185 // format for device and for app.
186 ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
187 mAudioRecord.clear();
188 setDeviceFormat(getFormat());
189 } else {
190 break; // Keep the one we just opened.
191 }
192 }
193
194 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD)
195 + std::to_string(mAudioRecord->getPortId());
196 android::mediametrics::LogItem(mMetricsId)
197 .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
198 AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
199 .set(AMEDIAMETRICS_PROP_SHARINGMODE,
200 AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
201 .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
202
203 // Get the actual values from the AudioRecord.
204 setChannelMask(AAudioConvert_androidToAAudioChannelMask(
205 mAudioRecord->channelMask(), true /*isInput*/,
206 AAudio_isChannelIndexMask(getChannelMask())));
207 setSampleRate(mAudioRecord->getSampleRate());
208 setBufferCapacity(getBufferCapacityFromDevice());
209 setFramesPerBurst(getFramesPerBurstFromDevice());
210
211 setHardwareSamplesPerFrame(mAudioRecord->getHalChannelCount());
212 setHardwareSampleRate(mAudioRecord->getHalSampleRate());
213 setHardwareFormat(mAudioRecord->getHalFormat());
214
215 // We may need to pass the data through a block size adapter to guarantee constant size.
216 if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
217 // The block adapter runs before the format conversion.
218 // So we need to use the device frame size.
219 mBlockAdapterBytesPerFrame = getBytesPerDeviceFrame();
220 int callbackSizeBytes = mBlockAdapterBytesPerFrame * mCallbackBufferSize;
221 mFixedBlockWriter.open(callbackSizeBytes);
222 mBlockAdapter = &mFixedBlockWriter;
223 } else {
224 mBlockAdapter = nullptr;
225 }
226
227 // Allocate format conversion buffer if needed.
228 if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
229 && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
230
231 if (builder.getDataCallbackProc() != nullptr) {
232 // If we have a callback then we need to convert the data into an internal float
233 // array and then pass that entire array to the app.
234 mFormatConversionBufferSizeInFrames =
235 (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
236 ? mCallbackBufferSize : getFramesPerBurst();
237 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
238 mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
239 } else {
240 // If we don't have a callback then we will read into an internal short array
241 // and then convert into the app float array in read().
242 mFormatConversionBufferSizeInFrames = getFramesPerBurst();
243 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
244 mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
245 }
246 ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
247 __func__, mFormatConversionBufferSizeInFrames);
248 }
249
250 // Update performance mode based on the actual stream.
251 // For example, if the sample rate does not match native then you won't get a FAST track.
252 audio_input_flags_t actualFlags = mAudioRecord->getFlags();
253 aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
254 // FIXME Some platforms do not advertise RAW mode for low latency inputs.
255 if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
256 == (AUDIO_INPUT_FLAG_FAST)) {
257 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
258 }
259 setPerformanceMode(actualPerformanceMode);
260
261 setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
262
263 // Log warning if we did not get what we asked for.
264 ALOGW_IF(actualFlags != flags,
265 "open() flags changed from 0x%08X to 0x%08X",
266 flags, actualFlags);
267 ALOGW_IF(actualPerformanceMode != perfMode,
268 "open() perfMode changed from %d to %d",
269 perfMode, actualPerformanceMode);
270
271 setState(AAUDIO_STREAM_STATE_OPEN);
272 setDeviceId(mAudioRecord->getRoutedDeviceId());
273
274 aaudio_session_id_t actualSessionId =
275 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
276 ? AAUDIO_SESSION_ID_NONE
277 : (aaudio_session_id_t) mAudioRecord->getSessionId();
278 setSessionId(actualSessionId);
279
280 mAudioRecord->addAudioDeviceCallback(this);
281
282 return AAUDIO_OK;
283 }
284
release_l()285 aaudio_result_t AudioStreamRecord::release_l() {
286 // TODO add close() or release() to AudioFlinger's AudioRecord API.
287 // Then call it from here
288 if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
289 mAudioRecord->removeAudioDeviceCallback(this);
290 logReleaseBufferState();
291 // Data callbacks may still be running!
292 return AudioStream::release_l();
293 } else {
294 return AAUDIO_OK; // already released
295 }
296 }
297
close_l()298 void AudioStreamRecord::close_l() {
299 // The callbacks are normally joined in the AudioRecord destructor.
300 // But if another object has a reference to the AudioRecord then
301 // it will not get deleted here.
302 // So we should join callbacks explicitly before returning.
303 // Unlock around the join to avoid deadlocks if the callback tries to lock.
304 // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
305 mStreamLock.unlock();
306 mAudioRecord->stopAndJoinCallbacks();
307 mStreamLock.lock();
308
309 mAudioRecord.clear();
310 // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
311 // so it will clean up by itself.
312 AudioStream::close_l();
313 }
314
maybeConvertDeviceData(const void * audioData,int32_t numFrames)315 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
316 if (mFormatConversionBufferFloat.get() != nullptr) {
317 LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
318 "%s() conversion size %d too large for buffer %d",
319 __func__, numFrames, mFormatConversionBufferSizeInFrames);
320
321 int32_t numSamples = numFrames * getSamplesPerFrame();
322 // Only conversion supported is I16 to FLOAT
323 memcpy_to_float_from_i16(
324 mFormatConversionBufferFloat.get(),
325 (const int16_t *) audioData,
326 numSamples);
327 return mFormatConversionBufferFloat.get();
328 } else {
329 return audioData;
330 }
331 }
332
requestStart_l()333 aaudio_result_t AudioStreamRecord::requestStart_l()
334 {
335 if (mAudioRecord.get() == nullptr) {
336 return AAUDIO_ERROR_INVALID_STATE;
337 }
338
339 // Enable callback before starting AudioRecord to avoid shutting
340 // down because of a race condition.
341 mCallbackEnabled.store(true);
342 aaudio_stream_state_t originalState = getState();
343 // Set before starting the callback so that we are in the correct state
344 // before updateStateMachine() can be called by the callback.
345 setState(AAUDIO_STREAM_STATE_STARTING);
346 mFramesWritten.reset32(); // service writes frames
347 mTimestampPosition.reset32();
348 status_t err = mAudioRecord->start(); // resets position to zero
349 if (err != OK) {
350 mCallbackEnabled.store(false);
351 setState(originalState);
352 return AAudioConvert_androidToAAudioResult(err);
353 }
354 return AAUDIO_OK;
355 }
356
requestStop_l()357 aaudio_result_t AudioStreamRecord::requestStop_l() {
358 if (mAudioRecord.get() == nullptr) {
359 return AAUDIO_ERROR_INVALID_STATE;
360 }
361 setState(AAUDIO_STREAM_STATE_STOPPING);
362 mFramesWritten.catchUpTo(getFramesRead());
363 mTimestampPosition.catchUpTo(getFramesRead());
364 mAudioRecord->stop();
365 mCallbackEnabled.store(false);
366 // Pass false to prevent errorCallback from being called after disconnect
367 // when app has already requested a stop().
368 return checkForDisconnectRequest(false);
369 }
370
processCommands()371 aaudio_result_t AudioStreamRecord::processCommands() {
372 aaudio_result_t result = AAUDIO_OK;
373 aaudio_wrapping_frames_t position;
374 status_t err;
375 switch (getState()) {
376 // TODO add better state visibility to AudioRecord
377 case AAUDIO_STREAM_STATE_STARTING:
378 // When starting, the position will begin at zero and then go positive.
379 // The position can wrap but by that time the state will not be STARTING.
380 err = mAudioRecord->getPosition(&position);
381 if (err != OK) {
382 result = AAudioConvert_androidToAAudioResult(err);
383 } else if (position > 0) {
384 setState(AAUDIO_STREAM_STATE_STARTED);
385 }
386 break;
387 case AAUDIO_STREAM_STATE_STOPPING:
388 if (mAudioRecord->stopped()) {
389 setState(AAUDIO_STREAM_STATE_STOPPED);
390 }
391 break;
392 default:
393 break;
394 }
395 return result;
396 }
397
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)398 aaudio_result_t AudioStreamRecord::read(void *buffer,
399 int32_t numFrames,
400 int64_t timeoutNanoseconds)
401 {
402 int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
403 int32_t numBytes;
404 // This will detect out of range values for numFrames.
405 aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
406 if (result != AAUDIO_OK) {
407 return result;
408 }
409
410 if (isDisconnected()) {
411 return AAUDIO_ERROR_DISCONNECTED;
412 }
413
414 // TODO add timeout to AudioRecord
415 bool blocking = (timeoutNanoseconds > 0);
416
417 ssize_t bytesActuallyRead = 0;
418 ssize_t totalBytesRead = 0;
419 if (mFormatConversionBufferI16.get() != nullptr) {
420 // Convert I16 data to float using an intermediate buffer.
421 float *floatBuffer = (float *) buffer;
422 int32_t framesLeft = numFrames;
423 // Perform conversion using multiple read()s if necessary.
424 while (framesLeft > 0) {
425 // Read into short internal buffer.
426 int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
427 size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
428 bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
429 if (bytesActuallyRead <= 0) {
430 break;
431 }
432 totalBytesRead += bytesActuallyRead;
433 int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
434 // Convert into app float buffer.
435 size_t numSamples = framesToConvert * getSamplesPerFrame();
436 memcpy_to_float_from_i16(
437 floatBuffer,
438 mFormatConversionBufferI16.get(),
439 numSamples);
440 floatBuffer += numSamples;
441 framesLeft -= framesToConvert;
442 }
443 } else {
444 bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
445 totalBytesRead = bytesActuallyRead;
446 }
447 if (bytesActuallyRead == WOULD_BLOCK) {
448 return 0;
449 } else if (bytesActuallyRead < 0) {
450 // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
451 // AudioRecord invalidation.
452 if (bytesActuallyRead == DEAD_OBJECT) {
453 setDisconnected();
454 return AAUDIO_ERROR_DISCONNECTED;
455 }
456 return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
457 }
458 int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
459 incrementFramesRead(framesRead);
460
461 result = updateStateMachine();
462 if (result != AAUDIO_OK) {
463 return result;
464 }
465
466 return (aaudio_result_t) framesRead;
467 }
468
setBufferSize(int32_t)469 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
470 {
471 return getBufferSize();
472 }
473
getBufferSize() const474 int32_t AudioStreamRecord::getBufferSize() const
475 {
476 return getBufferCapacity(); // TODO implement in AudioRecord?
477 }
478
getBufferCapacityFromDevice() const479 int32_t AudioStreamRecord::getBufferCapacityFromDevice() const
480 {
481 return static_cast<int32_t>(mAudioRecord->frameCount());
482 }
483
getXRunCount() const484 int32_t AudioStreamRecord::getXRunCount() const
485 {
486 return 0; // TODO implement when AudioRecord supports it
487 }
488
getFramesPerBurstFromDevice() const489 int32_t AudioStreamRecord::getFramesPerBurstFromDevice() const {
490 return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
491 }
492
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)493 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
494 int64_t *framePosition,
495 int64_t *timeNanoseconds) {
496 ExtendedTimestamp extendedTimestamp;
497 if (getState() != AAUDIO_STREAM_STATE_STARTED) {
498 return AAUDIO_ERROR_INVALID_STATE;
499 }
500 status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
501 if (status == WOULD_BLOCK) {
502 return AAUDIO_ERROR_INVALID_STATE;
503 } else if (status != NO_ERROR) {
504 return AAudioConvert_androidToAAudioResult(status);
505 }
506 return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
507 }
508
getFramesWritten()509 int64_t AudioStreamRecord::getFramesWritten() {
510 aaudio_wrapping_frames_t position;
511 status_t result;
512 switch (getState()) {
513 case AAUDIO_STREAM_STATE_STARTING:
514 case AAUDIO_STREAM_STATE_STARTED:
515 result = mAudioRecord->getPosition(&position);
516 if (result == OK) {
517 mFramesWritten.update32((int32_t)position);
518 }
519 break;
520 case AAUDIO_STREAM_STATE_STOPPING:
521 default:
522 break;
523 }
524 return AudioStreamLegacy::getFramesWritten();
525 }
526