1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamTrack"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #include <stdint.h>
22 #include <media/AudioTrack.h>
23
24 #include <aaudio/AAudio.h>
25 #include <system/audio.h>
26 #include "utility/AudioClock.h"
27 #include "legacy/AudioStreamLegacy.h"
28 #include "legacy/AudioStreamTrack.h"
29 #include "utility/FixedBlockReader.h"
30
31 using namespace android;
32 using namespace aaudio;
33
34 // Arbitrary and somewhat generous number of bursts.
35 #define DEFAULT_BURSTS_PER_BUFFER_CAPACITY 8
36
37 /*
38 * Create a stream that uses the AudioTrack.
39 */
AudioStreamTrack()40 AudioStreamTrack::AudioStreamTrack()
41 : AudioStreamLegacy()
42 , mFixedBlockReader(*this)
43 {
44 }
45
~AudioStreamTrack()46 AudioStreamTrack::~AudioStreamTrack()
47 {
48 const aaudio_stream_state_t state = getState();
49 bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
50 ALOGE_IF(bad, "stream not closed, in state %d", state);
51 }
52
open(const AudioStreamBuilder & builder)53 aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
54 {
55 aaudio_result_t result = AAUDIO_OK;
56
57 result = AudioStream::open(builder);
58 if (result != OK) {
59 return result;
60 }
61
62 const aaudio_session_id_t requestedSessionId = builder.getSessionId();
63 const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
64
65 // Try to create an AudioTrack
66 // Use stereo if unspecified.
67 int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
68 ? 2 : getSamplesPerFrame();
69 audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
70 audio_channel_out_mask_from_count(samplesPerFrame) :
71 audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
72
73 audio_output_flags_t flags;
74 aaudio_performance_mode_t perfMode = getPerformanceMode();
75 switch(perfMode) {
76 case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
77 // Bypass the normal mixer and go straight to the FAST mixer.
78 // If the app asks for a sessionId then it means they want to use effects.
79 // So don't use RAW flag.
80 flags = (audio_output_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
81 ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
82 : (AUDIO_OUTPUT_FLAG_FAST));
83 break;
84
85 case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
86 // This uses a mixer that wakes up less often than the FAST mixer.
87 flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
88 break;
89
90 case AAUDIO_PERFORMANCE_MODE_NONE:
91 default:
92 // No flags. Use a normal mixer in front of the FAST mixer.
93 flags = AUDIO_OUTPUT_FLAG_NONE;
94 break;
95 }
96
97 size_t frameCount = (size_t)builder.getBufferCapacity();
98
99 int32_t notificationFrames = 0;
100
101 const audio_format_t format = (getFormat() == AUDIO_FORMAT_DEFAULT)
102 ? AUDIO_FORMAT_PCM_FLOAT
103 : getFormat();
104
105 // Setup the callback if there is one.
106 AudioTrack::callback_t callback = nullptr;
107 void *callbackData = nullptr;
108 // Note that TRANSFER_SYNC does not allow FAST track
109 AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
110 if (builder.getDataCallbackProc() != nullptr) {
111 streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
112 callback = getLegacyCallback();
113 callbackData = this;
114
115 // If the total buffer size is unspecified then base the size on the burst size.
116 if (frameCount == 0
117 && ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0)) {
118 // Take advantage of a special trick that allows us to create a buffer
119 // that is some multiple of the burst size.
120 notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
121 } else {
122 notificationFrames = builder.getFramesPerDataCallback();
123 }
124 }
125 mCallbackBufferSize = builder.getFramesPerDataCallback();
126
127 ALOGD("open(), request notificationFrames = %d, frameCount = %u",
128 notificationFrames, (uint)frameCount);
129
130 // Don't call mAudioTrack->setDeviceId() because it will be overwritten by set()!
131 audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
132 ? AUDIO_PORT_HANDLE_NONE
133 : getDeviceId();
134
135 const audio_content_type_t contentType =
136 AAudioConvert_contentTypeToInternal(builder.getContentType());
137 const audio_usage_t usage =
138 AAudioConvert_usageToInternal(builder.getUsage());
139 const audio_flags_mask_t attributesFlags =
140 AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
141
142 const audio_attributes_t attributes = {
143 .content_type = contentType,
144 .usage = usage,
145 .source = AUDIO_SOURCE_DEFAULT, // only used for recording
146 .flags = attributesFlags,
147 .tags = ""
148 };
149
150 mAudioTrack = new AudioTrack();
151 mAudioTrack->set(
152 AUDIO_STREAM_DEFAULT, // ignored because we pass attributes below
153 getSampleRate(),
154 format,
155 channelMask,
156 frameCount,
157 flags,
158 callback,
159 callbackData,
160 notificationFrames,
161 0, // DEFAULT sharedBuffer*/,
162 false, // DEFAULT threadCanCallJava
163 sessionId,
164 streamTransferType,
165 NULL, // DEFAULT audio_offload_info_t
166 AUDIO_UID_INVALID, // DEFAULT uid
167 -1, // DEFAULT pid
168 &attributes,
169 // WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
170 // headphones a few times.
171 false, // DEFAULT doNotReconnect,
172 1.0f, // DEFAULT maxRequiredSpeed
173 selectedDeviceId
174 );
175
176 // Did we get a valid track?
177 status_t status = mAudioTrack->initCheck();
178 if (status != NO_ERROR) {
179 close();
180 ALOGE("open(), initCheck() returned %d", status);
181 return AAudioConvert_androidToAAudioResult(status);
182 }
183
184 doSetVolume();
185
186 // Get the actual values from the AudioTrack.
187 setSamplesPerFrame(mAudioTrack->channelCount());
188 setFormat(mAudioTrack->format());
189 setDeviceFormat(mAudioTrack->format());
190
191 int32_t actualSampleRate = mAudioTrack->getSampleRate();
192 ALOGW_IF(actualSampleRate != getSampleRate(),
193 "open() sampleRate changed from %d to %d",
194 getSampleRate(), actualSampleRate);
195 setSampleRate(actualSampleRate);
196
197 // We may need to pass the data through a block size adapter to guarantee constant size.
198 if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
199 int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
200 mFixedBlockReader.open(callbackSizeBytes);
201 mBlockAdapter = &mFixedBlockReader;
202 } else {
203 mBlockAdapter = nullptr;
204 }
205
206 setState(AAUDIO_STREAM_STATE_OPEN);
207 setDeviceId(mAudioTrack->getRoutedDeviceId());
208
209 aaudio_session_id_t actualSessionId =
210 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
211 ? AAUDIO_SESSION_ID_NONE
212 : (aaudio_session_id_t) mAudioTrack->getSessionId();
213 setSessionId(actualSessionId);
214
215 mAudioTrack->addAudioDeviceCallback(mDeviceCallback);
216
217 // Update performance mode based on the actual stream flags.
218 // For example, if the sample rate is not allowed then you won't get a FAST track.
219 audio_output_flags_t actualFlags = mAudioTrack->getFlags();
220 aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
221 // We may not get the RAW flag. But as long as we get the FAST flag we can call it LOW_LATENCY.
222 if ((actualFlags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
223 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
224 } else if ((actualFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
225 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
226 }
227 setPerformanceMode(actualPerformanceMode);
228
229 setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
230
231 // Log warning if we did not get what we asked for.
232 ALOGW_IF(actualFlags != flags,
233 "open() flags changed from 0x%08X to 0x%08X",
234 flags, actualFlags);
235 ALOGW_IF(actualPerformanceMode != perfMode,
236 "open() perfMode changed from %d to %d",
237 perfMode, actualPerformanceMode);
238
239 return AAUDIO_OK;
240 }
241
close()242 aaudio_result_t AudioStreamTrack::close()
243 {
244 if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
245 mAudioTrack->removeAudioDeviceCallback(mDeviceCallback);
246 setState(AAUDIO_STREAM_STATE_CLOSED);
247 }
248 mFixedBlockReader.close();
249 return AAUDIO_OK;
250 }
251
processCallback(int event,void * info)252 void AudioStreamTrack::processCallback(int event, void *info) {
253
254 switch (event) {
255 case AudioTrack::EVENT_MORE_DATA:
256 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
257 break;
258
259 // Stream got rerouted so we disconnect.
260 case AudioTrack::EVENT_NEW_IAUDIOTRACK:
261 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
262 break;
263
264 default:
265 break;
266 }
267 return;
268 }
269
requestStart()270 aaudio_result_t AudioStreamTrack::requestStart() {
271 if (mAudioTrack.get() == nullptr) {
272 ALOGE("requestStart() no AudioTrack");
273 return AAUDIO_ERROR_INVALID_STATE;
274 }
275 // Get current position so we can detect when the track is playing.
276 status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
277 if (err != OK) {
278 return AAudioConvert_androidToAAudioResult(err);
279 }
280
281 // Enable callback before starting AudioTrack to avoid shutting
282 // down because of a race condition.
283 mCallbackEnabled.store(true);
284 err = mAudioTrack->start();
285 if (err != OK) {
286 return AAudioConvert_androidToAAudioResult(err);
287 } else {
288 setState(AAUDIO_STREAM_STATE_STARTING);
289 }
290 return AAUDIO_OK;
291 }
292
requestPause()293 aaudio_result_t AudioStreamTrack::requestPause() {
294 if (mAudioTrack.get() == nullptr) {
295 ALOGE("%s() no AudioTrack", __func__);
296 return AAUDIO_ERROR_INVALID_STATE;
297 }
298
299 setState(AAUDIO_STREAM_STATE_PAUSING);
300 mAudioTrack->pause();
301 mCallbackEnabled.store(false);
302 status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
303 if (err != OK) {
304 return AAudioConvert_androidToAAudioResult(err);
305 }
306 return checkForDisconnectRequest(false);
307 }
308
requestFlush()309 aaudio_result_t AudioStreamTrack::requestFlush() {
310 if (mAudioTrack.get() == nullptr) {
311 ALOGE("%s() no AudioTrack", __func__);
312 return AAUDIO_ERROR_INVALID_STATE;
313 }
314
315 setState(AAUDIO_STREAM_STATE_FLUSHING);
316 incrementFramesRead(getFramesWritten() - getFramesRead());
317 mAudioTrack->flush();
318 mFramesRead.reset32(); // service reads frames, service position reset on flush
319 mTimestampPosition.reset32();
320 return AAUDIO_OK;
321 }
322
requestStop()323 aaudio_result_t AudioStreamTrack::requestStop() {
324 if (mAudioTrack.get() == nullptr) {
325 ALOGE("%s() no AudioTrack", __func__);
326 return AAUDIO_ERROR_INVALID_STATE;
327 }
328
329 setState(AAUDIO_STREAM_STATE_STOPPING);
330 mFramesRead.catchUpTo(getFramesWritten());
331 mTimestampPosition.catchUpTo(getFramesWritten());
332 mFramesRead.reset32(); // service reads frames, service position reset on stop
333 mTimestampPosition.reset32();
334 mAudioTrack->stop();
335 mCallbackEnabled.store(false);
336 return checkForDisconnectRequest(false);;
337 }
338
updateStateMachine()339 aaudio_result_t AudioStreamTrack::updateStateMachine()
340 {
341 status_t err;
342 aaudio_wrapping_frames_t position;
343 switch (getState()) {
344 // TODO add better state visibility to AudioTrack
345 case AAUDIO_STREAM_STATE_STARTING:
346 if (mAudioTrack->hasStarted()) {
347 setState(AAUDIO_STREAM_STATE_STARTED);
348 }
349 break;
350 case AAUDIO_STREAM_STATE_PAUSING:
351 if (mAudioTrack->stopped()) {
352 err = mAudioTrack->getPosition(&position);
353 if (err != OK) {
354 return AAudioConvert_androidToAAudioResult(err);
355 } else if (position == mPositionWhenPausing) {
356 // Has stream really stopped advancing?
357 setState(AAUDIO_STREAM_STATE_PAUSED);
358 }
359 mPositionWhenPausing = position;
360 }
361 break;
362 case AAUDIO_STREAM_STATE_FLUSHING:
363 {
364 err = mAudioTrack->getPosition(&position);
365 if (err != OK) {
366 return AAudioConvert_androidToAAudioResult(err);
367 } else if (position == 0) {
368 // TODO Advance frames read to match written.
369 setState(AAUDIO_STREAM_STATE_FLUSHED);
370 }
371 }
372 break;
373 case AAUDIO_STREAM_STATE_STOPPING:
374 if (mAudioTrack->stopped()) {
375 setState(AAUDIO_STREAM_STATE_STOPPED);
376 }
377 break;
378 default:
379 break;
380 }
381 return AAUDIO_OK;
382 }
383
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)384 aaudio_result_t AudioStreamTrack::write(const void *buffer,
385 int32_t numFrames,
386 int64_t timeoutNanoseconds)
387 {
388 int32_t bytesPerFrame = getBytesPerFrame();
389 int32_t numBytes;
390 aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
391 if (result != AAUDIO_OK) {
392 return result;
393 }
394
395 if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
396 return AAUDIO_ERROR_DISCONNECTED;
397 }
398
399 // TODO add timeout to AudioTrack
400 bool blocking = timeoutNanoseconds > 0;
401 ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
402 if (bytesWritten == WOULD_BLOCK) {
403 return 0;
404 } else if (bytesWritten < 0) {
405 ALOGE("invalid write, returned %d", (int)bytesWritten);
406 // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
407 // AudioTrack invalidation
408 if (bytesWritten == DEAD_OBJECT) {
409 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
410 return AAUDIO_ERROR_DISCONNECTED;
411 }
412 return AAudioConvert_androidToAAudioResult(bytesWritten);
413 }
414 int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
415 incrementFramesWritten(framesWritten);
416
417 result = updateStateMachine();
418 if (result != AAUDIO_OK) {
419 return result;
420 }
421
422 return framesWritten;
423 }
424
setBufferSize(int32_t requestedFrames)425 aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
426 {
427 // Do not ask for less than one burst.
428 if (requestedFrames < getFramesPerBurst()) {
429 requestedFrames = getFramesPerBurst();
430 }
431 ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
432 if (result < 0) {
433 return AAudioConvert_androidToAAudioResult(result);
434 } else {
435 return result;
436 }
437 }
438
getBufferSize() const439 int32_t AudioStreamTrack::getBufferSize() const
440 {
441 return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
442 }
443
getBufferCapacity() const444 int32_t AudioStreamTrack::getBufferCapacity() const
445 {
446 return static_cast<int32_t>(mAudioTrack->frameCount());
447 }
448
getXRunCount() const449 int32_t AudioStreamTrack::getXRunCount() const
450 {
451 return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
452 }
453
getFramesPerBurst() const454 int32_t AudioStreamTrack::getFramesPerBurst() const
455 {
456 return static_cast<int32_t>(mAudioTrack->getNotificationPeriodInFrames());
457 }
458
getFramesRead()459 int64_t AudioStreamTrack::getFramesRead() {
460 aaudio_wrapping_frames_t position;
461 status_t result;
462 switch (getState()) {
463 case AAUDIO_STREAM_STATE_STARTING:
464 case AAUDIO_STREAM_STATE_STARTED:
465 case AAUDIO_STREAM_STATE_STOPPING:
466 case AAUDIO_STREAM_STATE_PAUSING:
467 case AAUDIO_STREAM_STATE_PAUSED:
468 result = mAudioTrack->getPosition(&position);
469 if (result == OK) {
470 mFramesRead.update32(position);
471 }
472 break;
473 default:
474 break;
475 }
476 return AudioStreamLegacy::getFramesRead();
477 }
478
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)479 aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
480 int64_t *framePosition,
481 int64_t *timeNanoseconds) {
482 ExtendedTimestamp extendedTimestamp;
483 status_t status = mAudioTrack->getTimestamp(&extendedTimestamp);
484 if (status == WOULD_BLOCK) {
485 return AAUDIO_ERROR_INVALID_STATE;
486 } if (status != NO_ERROR) {
487 return AAudioConvert_androidToAAudioResult(status);
488 }
489 int64_t position = 0;
490 int64_t nanoseconds = 0;
491 aaudio_result_t result = getBestTimestamp(clockId, &position,
492 &nanoseconds, &extendedTimestamp);
493 if (result == AAUDIO_OK) {
494 if (position < getFramesWritten()) {
495 *framePosition = position;
496 *timeNanoseconds = nanoseconds;
497 return result;
498 } else {
499 return AAUDIO_ERROR_INVALID_STATE; // TODO review, documented but not consistent
500 }
501 }
502 return result;
503 }
504
doSetVolume()505 status_t AudioStreamTrack::doSetVolume() {
506 status_t status = NO_INIT;
507 if (mAudioTrack.get() != nullptr) {
508 float volume = getDuckAndMuteVolume();
509 mAudioTrack->setVolume(volume, volume);
510 status = NO_ERROR;
511 }
512 return status;
513 }
514
515 #if AAUDIO_USE_VOLUME_SHAPER
516
517 using namespace android::media::VolumeShaper;
518
applyVolumeShaper(const VolumeShaper::Configuration & configuration,const VolumeShaper::Operation & operation)519 binder::Status AudioStreamTrack::applyVolumeShaper(
520 const VolumeShaper::Configuration& configuration,
521 const VolumeShaper::Operation& operation) {
522
523 sp<VolumeShaper::Configuration> spConfiguration = new VolumeShaper::Configuration(configuration);
524 sp<VolumeShaper::Operation> spOperation = new VolumeShaper::Operation(operation);
525
526 if (mAudioTrack.get() != nullptr) {
527 ALOGD("applyVolumeShaper() from IPlayer");
528 binder::Status status = mAudioTrack->applyVolumeShaper(spConfiguration, spOperation);
529 if (status < 0) { // a non-negative value is the volume shaper id.
530 ALOGE("applyVolumeShaper() failed with status %d", status);
531 }
532 return binder::Status::fromStatusT(status);
533 } else {
534 ALOGD("applyVolumeShaper()"
535 " no AudioTrack for volume control from IPlayer");
536 return binder::Status::ok();
537 }
538 }
539 #endif
540