1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24
25 #include <audio_utils/primitives.h>
26 #include <binder/IPCThreadState.h>
27 #include <media/AudioTrack.h>
28 #include <utils/Log.h>
29 #include <private/media/AudioTrackShared.h>
30 #include <media/IAudioFlinger.h>
31 #include <media/AudioPolicyHelper.h>
32 #include <media/AudioResamplerPublic.h>
33
34 #define WAIT_PERIOD_MS 10
35 #define WAIT_STREAM_END_TIMEOUT_SEC 120
36 static const int kMaxLoopCountNotifications = 32;
37
38 namespace android {
39 // ---------------------------------------------------------------------------
40
41 // TODO: Move to a separate .h
42
43 template <typename T>
min(const T & x,const T & y)44 static inline const T &min(const T &x, const T &y) {
45 return x < y ? x : y;
46 }
47
48 template <typename T>
max(const T & x,const T & y)49 static inline const T &max(const T &x, const T &y) {
50 return x > y ? x : y;
51 }
52
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)53 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
54 {
55 return ((double)frames * 1000000000) / ((double)sampleRate * speed);
56 }
57
convertTimespecToUs(const struct timespec & tv)58 static int64_t convertTimespecToUs(const struct timespec &tv)
59 {
60 return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
61 }
62
63 // current monotonic time in microseconds.
getNowUs()64 static int64_t getNowUs()
65 {
66 struct timespec tv;
67 (void) clock_gettime(CLOCK_MONOTONIC, &tv);
68 return convertTimespecToUs(tv);
69 }
70
71 // FIXME: we don't use the pitch setting in the time stretcher (not working);
72 // instead we emulate it using our sample rate converter.
73 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)74 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
75 {
76 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
77 }
78
adjustSpeed(float speed,float pitch)79 static inline float adjustSpeed(float speed, float pitch)
80 {
81 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
82 }
83
adjustPitch(float pitch)84 static inline float adjustPitch(float pitch)
85 {
86 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
87 }
88
89 // Must match similar computation in createTrack_l in Threads.cpp.
90 // TODO: Move to a common library
calculateMinFrameCount(uint32_t afLatencyMs,uint32_t afFrameCount,uint32_t afSampleRate,uint32_t sampleRate,float speed)91 static size_t calculateMinFrameCount(
92 uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
93 uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
94 {
95 // Ensure that buffer depth covers at least audio hardware latency
96 uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
97 if (minBufCount < 2) {
98 minBufCount = 2;
99 }
100 #if 0
101 // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
102 // but keeping the code here to make it easier to add later.
103 if (minBufCount < notificationsPerBufferReq) {
104 minBufCount = notificationsPerBufferReq;
105 }
106 #endif
107 ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
108 "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
109 afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
110 /*, notificationsPerBufferReq*/);
111 return minBufCount * sourceFramesNeededWithTimestretch(
112 sampleRate, afFrameCount, afSampleRate, speed);
113 }
114
115 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)116 status_t AudioTrack::getMinFrameCount(
117 size_t* frameCount,
118 audio_stream_type_t streamType,
119 uint32_t sampleRate)
120 {
121 if (frameCount == NULL) {
122 return BAD_VALUE;
123 }
124
125 // FIXME handle in server, like createTrack_l(), possible missing info:
126 // audio_io_handle_t output
127 // audio_format_t format
128 // audio_channel_mask_t channelMask
129 // audio_output_flags_t flags (FAST)
130 uint32_t afSampleRate;
131 status_t status;
132 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
133 if (status != NO_ERROR) {
134 ALOGE("Unable to query output sample rate for stream type %d; status %d",
135 streamType, status);
136 return status;
137 }
138 size_t afFrameCount;
139 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
140 if (status != NO_ERROR) {
141 ALOGE("Unable to query output frame count for stream type %d; status %d",
142 streamType, status);
143 return status;
144 }
145 uint32_t afLatency;
146 status = AudioSystem::getOutputLatency(&afLatency, streamType);
147 if (status != NO_ERROR) {
148 ALOGE("Unable to query output latency for stream type %d; status %d",
149 streamType, status);
150 return status;
151 }
152
153 // When called from createTrack, speed is 1.0f (normal speed).
154 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
155 *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
156 /*, 0 notificationsPerBufferReq*/);
157
158 // The formula above should always produce a non-zero value under normal circumstances:
159 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
160 // Return error in the unlikely event that it does not, as that's part of the API contract.
161 if (*frameCount == 0) {
162 ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
163 streamType, sampleRate);
164 return BAD_VALUE;
165 }
166 ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
167 *frameCount, afFrameCount, afSampleRate, afLatency);
168 return NO_ERROR;
169 }
170
171 // ---------------------------------------------------------------------------
172
AudioTrack()173 AudioTrack::AudioTrack()
174 : mStatus(NO_INIT),
175 mState(STATE_STOPPED),
176 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
177 mPreviousSchedulingGroup(SP_DEFAULT),
178 mPausedPosition(0),
179 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
180 {
181 mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
182 mAttributes.usage = AUDIO_USAGE_UNKNOWN;
183 mAttributes.flags = 0x0;
184 strcpy(mAttributes.tags, "");
185 }
186
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)187 AudioTrack::AudioTrack(
188 audio_stream_type_t streamType,
189 uint32_t sampleRate,
190 audio_format_t format,
191 audio_channel_mask_t channelMask,
192 size_t frameCount,
193 audio_output_flags_t flags,
194 callback_t cbf,
195 void* user,
196 int32_t notificationFrames,
197 audio_session_t sessionId,
198 transfer_type transferType,
199 const audio_offload_info_t *offloadInfo,
200 int uid,
201 pid_t pid,
202 const audio_attributes_t* pAttributes,
203 bool doNotReconnect,
204 float maxRequiredSpeed)
205 : mStatus(NO_INIT),
206 mState(STATE_STOPPED),
207 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
208 mPreviousSchedulingGroup(SP_DEFAULT),
209 mPausedPosition(0),
210 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
211 {
212 mStatus = set(streamType, sampleRate, format, channelMask,
213 frameCount, flags, cbf, user, notificationFrames,
214 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
215 offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
216 }
217
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)218 AudioTrack::AudioTrack(
219 audio_stream_type_t streamType,
220 uint32_t sampleRate,
221 audio_format_t format,
222 audio_channel_mask_t channelMask,
223 const sp<IMemory>& sharedBuffer,
224 audio_output_flags_t flags,
225 callback_t cbf,
226 void* user,
227 int32_t notificationFrames,
228 audio_session_t sessionId,
229 transfer_type transferType,
230 const audio_offload_info_t *offloadInfo,
231 int uid,
232 pid_t pid,
233 const audio_attributes_t* pAttributes,
234 bool doNotReconnect,
235 float maxRequiredSpeed)
236 : mStatus(NO_INIT),
237 mState(STATE_STOPPED),
238 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
239 mPreviousSchedulingGroup(SP_DEFAULT),
240 mPausedPosition(0),
241 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
242 {
243 mStatus = set(streamType, sampleRate, format, channelMask,
244 0 /*frameCount*/, flags, cbf, user, notificationFrames,
245 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
246 uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
247 }
248
~AudioTrack()249 AudioTrack::~AudioTrack()
250 {
251 if (mStatus == NO_ERROR) {
252 // Make sure that callback function exits in the case where
253 // it is looping on buffer full condition in obtainBuffer().
254 // Otherwise the callback thread will never exit.
255 stop();
256 if (mAudioTrackThread != 0) {
257 mProxy->interrupt();
258 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
259 mAudioTrackThread->requestExitAndWait();
260 mAudioTrackThread.clear();
261 }
262 // No lock here: worst case we remove a NULL callback which will be a nop
263 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
264 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
265 }
266 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
267 mAudioTrack.clear();
268 mCblkMemory.clear();
269 mSharedBuffer.clear();
270 IPCThreadState::self()->flushCommands();
271 ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
272 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
273 AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
274 }
275 }
276
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)277 status_t AudioTrack::set(
278 audio_stream_type_t streamType,
279 uint32_t sampleRate,
280 audio_format_t format,
281 audio_channel_mask_t channelMask,
282 size_t frameCount,
283 audio_output_flags_t flags,
284 callback_t cbf,
285 void* user,
286 int32_t notificationFrames,
287 const sp<IMemory>& sharedBuffer,
288 bool threadCanCallJava,
289 audio_session_t sessionId,
290 transfer_type transferType,
291 const audio_offload_info_t *offloadInfo,
292 int uid,
293 pid_t pid,
294 const audio_attributes_t* pAttributes,
295 bool doNotReconnect,
296 float maxRequiredSpeed)
297 {
298 ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
299 "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
300 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
301 sessionId, transferType, uid, pid);
302
303 mThreadCanCallJava = threadCanCallJava;
304
305 switch (transferType) {
306 case TRANSFER_DEFAULT:
307 if (sharedBuffer != 0) {
308 transferType = TRANSFER_SHARED;
309 } else if (cbf == NULL || threadCanCallJava) {
310 transferType = TRANSFER_SYNC;
311 } else {
312 transferType = TRANSFER_CALLBACK;
313 }
314 break;
315 case TRANSFER_CALLBACK:
316 if (cbf == NULL || sharedBuffer != 0) {
317 ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
318 return BAD_VALUE;
319 }
320 break;
321 case TRANSFER_OBTAIN:
322 case TRANSFER_SYNC:
323 if (sharedBuffer != 0) {
324 ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
325 return BAD_VALUE;
326 }
327 break;
328 case TRANSFER_SHARED:
329 if (sharedBuffer == 0) {
330 ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
331 return BAD_VALUE;
332 }
333 break;
334 default:
335 ALOGE("Invalid transfer type %d", transferType);
336 return BAD_VALUE;
337 }
338 mSharedBuffer = sharedBuffer;
339 mTransfer = transferType;
340 mDoNotReconnect = doNotReconnect;
341
342 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
343 sharedBuffer->size());
344
345 ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
346
347 // invariant that mAudioTrack != 0 is true only after set() returns successfully
348 if (mAudioTrack != 0) {
349 ALOGE("Track already in use");
350 return INVALID_OPERATION;
351 }
352
353 // handle default values first.
354 if (streamType == AUDIO_STREAM_DEFAULT) {
355 streamType = AUDIO_STREAM_MUSIC;
356 }
357 if (pAttributes == NULL) {
358 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
359 ALOGE("Invalid stream type %d", streamType);
360 return BAD_VALUE;
361 }
362 mStreamType = streamType;
363
364 } else {
365 // stream type shouldn't be looked at, this track has audio attributes
366 memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
367 ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
368 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
369 mStreamType = AUDIO_STREAM_DEFAULT;
370 if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
371 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
372 }
373 if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
374 flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
375 }
376 }
377
378 // these below should probably come from the audioFlinger too...
379 if (format == AUDIO_FORMAT_DEFAULT) {
380 format = AUDIO_FORMAT_PCM_16_BIT;
381 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
382 mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
383 }
384
385 // validate parameters
386 if (!audio_is_valid_format(format)) {
387 ALOGE("Invalid format %#x", format);
388 return BAD_VALUE;
389 }
390 mFormat = format;
391
392 if (!audio_is_output_channel(channelMask)) {
393 ALOGE("Invalid channel mask %#x", channelMask);
394 return BAD_VALUE;
395 }
396 mChannelMask = channelMask;
397 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
398 mChannelCount = channelCount;
399
400 // force direct flag if format is not linear PCM
401 // or offload was requested
402 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
403 || !audio_is_linear_pcm(format)) {
404 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
405 ? "Offload request, forcing to Direct Output"
406 : "Not linear PCM, forcing to Direct Output");
407 flags = (audio_output_flags_t)
408 // FIXME why can't we allow direct AND fast?
409 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
410 }
411
412 // force direct flag if HW A/V sync requested
413 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
414 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
415 }
416
417 if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
418 if (audio_has_proportional_frames(format)) {
419 mFrameSize = channelCount * audio_bytes_per_sample(format);
420 } else {
421 mFrameSize = sizeof(uint8_t);
422 }
423 } else {
424 ALOG_ASSERT(audio_has_proportional_frames(format));
425 mFrameSize = channelCount * audio_bytes_per_sample(format);
426 // createTrack will return an error if PCM format is not supported by server,
427 // so no need to check for specific PCM formats here
428 }
429
430 // sampling rate must be specified for direct outputs
431 if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
432 return BAD_VALUE;
433 }
434 mSampleRate = sampleRate;
435 mOriginalSampleRate = sampleRate;
436 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
437 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
438 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
439
440 // Make copy of input parameter offloadInfo so that in the future:
441 // (a) createTrack_l doesn't need it as an input parameter
442 // (b) we can support re-creation of offloaded tracks
443 if (offloadInfo != NULL) {
444 mOffloadInfoCopy = *offloadInfo;
445 mOffloadInfo = &mOffloadInfoCopy;
446 } else {
447 mOffloadInfo = NULL;
448 }
449
450 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
451 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
452 mSendLevel = 0.0f;
453 // mFrameCount is initialized in createTrack_l
454 mReqFrameCount = frameCount;
455 if (notificationFrames >= 0) {
456 mNotificationFramesReq = notificationFrames;
457 mNotificationsPerBufferReq = 0;
458 } else {
459 if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
460 ALOGE("notificationFrames=%d not permitted for non-fast track",
461 notificationFrames);
462 return BAD_VALUE;
463 }
464 if (frameCount > 0) {
465 ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
466 notificationFrames, frameCount);
467 return BAD_VALUE;
468 }
469 mNotificationFramesReq = 0;
470 const uint32_t minNotificationsPerBuffer = 1;
471 const uint32_t maxNotificationsPerBuffer = 8;
472 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
473 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
474 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
475 "notificationFrames=%d clamped to the range -%u to -%u",
476 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
477 }
478 mNotificationFramesAct = 0;
479 if (sessionId == AUDIO_SESSION_ALLOCATE) {
480 mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
481 } else {
482 mSessionId = sessionId;
483 }
484 int callingpid = IPCThreadState::self()->getCallingPid();
485 int mypid = getpid();
486 if (uid == -1 || (callingpid != mypid)) {
487 mClientUid = IPCThreadState::self()->getCallingUid();
488 } else {
489 mClientUid = uid;
490 }
491 if (pid == -1 || (callingpid != mypid)) {
492 mClientPid = callingpid;
493 } else {
494 mClientPid = pid;
495 }
496 mAuxEffectId = 0;
497 mOrigFlags = mFlags = flags;
498 mCbf = cbf;
499
500 if (cbf != NULL) {
501 mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
502 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
503 // thread begins in paused state, and will not reference us until start()
504 }
505
506 // create the IAudioTrack
507 status_t status = createTrack_l();
508
509 if (status != NO_ERROR) {
510 if (mAudioTrackThread != 0) {
511 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
512 mAudioTrackThread->requestExitAndWait();
513 mAudioTrackThread.clear();
514 }
515 return status;
516 }
517
518 mStatus = NO_ERROR;
519 mUserData = user;
520 mLoopCount = 0;
521 mLoopStart = 0;
522 mLoopEnd = 0;
523 mLoopCountNotified = 0;
524 mMarkerPosition = 0;
525 mMarkerReached = false;
526 mNewPosition = 0;
527 mUpdatePeriod = 0;
528 mPosition = 0;
529 mReleased = 0;
530 mStartUs = 0;
531 AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
532 mSequence = 1;
533 mObservedSequence = mSequence;
534 mInUnderrun = false;
535 mPreviousTimestampValid = false;
536 mTimestampStartupGlitchReported = false;
537 mRetrogradeMotionReported = false;
538 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
539 mUnderrunCountOffset = 0;
540 mFramesWritten = 0;
541 mFramesWrittenServerOffset = 0;
542
543 return NO_ERROR;
544 }
545
546 // -------------------------------------------------------------------------
547
start()548 status_t AudioTrack::start()
549 {
550 AutoMutex lock(mLock);
551
552 if (mState == STATE_ACTIVE) {
553 return INVALID_OPERATION;
554 }
555
556 mInUnderrun = true;
557
558 State previousState = mState;
559 if (previousState == STATE_PAUSED_STOPPING) {
560 mState = STATE_STOPPING;
561 } else {
562 mState = STATE_ACTIVE;
563 }
564 (void) updateAndGetPosition_l();
565 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
566 // reset current position as seen by client to 0
567 mPosition = 0;
568 mPreviousTimestampValid = false;
569 mTimestampStartupGlitchReported = false;
570 mRetrogradeMotionReported = false;
571 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
572
573 // read last server side position change via timestamp.
574 ExtendedTimestamp ets;
575 if (mProxy->getTimestamp(&ets) == OK &&
576 ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
577 // Server side has consumed something, but is it finished consuming?
578 // It is possible since flush and stop are asynchronous that the server
579 // is still active at this point.
580 ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld",
581 (long long)(mFramesWrittenServerOffset
582 + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
583 (long long)ets.mFlushed,
584 (long long)mFramesWritten);
585 mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
586 }
587 mFramesWritten = 0;
588 mProxy->clearTimestamp(); // need new server push for valid timestamp
589 mMarkerReached = false;
590
591 // For offloaded tracks, we don't know if the hardware counters are really zero here,
592 // since the flush is asynchronous and stop may not fully drain.
593 // We save the time when the track is started to later verify whether
594 // the counters are realistic (i.e. start from zero after this time).
595 mStartUs = getNowUs();
596
597 // force refresh of remaining frames by processAudioBuffer() as last
598 // write before stop could be partial.
599 mRefreshRemaining = true;
600 }
601 mNewPosition = mPosition + mUpdatePeriod;
602 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
603
604 status_t status = NO_ERROR;
605 if (!(flags & CBLK_INVALID)) {
606 status = mAudioTrack->start();
607 if (status == DEAD_OBJECT) {
608 flags |= CBLK_INVALID;
609 }
610 }
611 if (flags & CBLK_INVALID) {
612 status = restoreTrack_l("start");
613 }
614
615 // resume or pause the callback thread as needed.
616 sp<AudioTrackThread> t = mAudioTrackThread;
617 if (status == NO_ERROR) {
618 if (t != 0) {
619 if (previousState == STATE_STOPPING) {
620 mProxy->interrupt();
621 } else {
622 t->resume();
623 }
624 } else {
625 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
626 get_sched_policy(0, &mPreviousSchedulingGroup);
627 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
628 }
629 } else {
630 ALOGE("start() status %d", status);
631 mState = previousState;
632 if (t != 0) {
633 if (previousState != STATE_STOPPING) {
634 t->pause();
635 }
636 } else {
637 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
638 set_sched_policy(0, mPreviousSchedulingGroup);
639 }
640 }
641
642 return status;
643 }
644
stop()645 void AudioTrack::stop()
646 {
647 AutoMutex lock(mLock);
648 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
649 return;
650 }
651
652 if (isOffloaded_l()) {
653 mState = STATE_STOPPING;
654 } else {
655 mState = STATE_STOPPED;
656 mReleased = 0;
657 }
658
659 mProxy->interrupt();
660 mAudioTrack->stop();
661
662 // Note: legacy handling - stop does not clear playback marker
663 // and periodic update counter, but flush does for streaming tracks.
664
665 if (mSharedBuffer != 0) {
666 // clear buffer position and loop count.
667 mStaticProxy->setBufferPositionAndLoop(0 /* position */,
668 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
669 }
670
671 sp<AudioTrackThread> t = mAudioTrackThread;
672 if (t != 0) {
673 if (!isOffloaded_l()) {
674 t->pause();
675 }
676 } else {
677 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
678 set_sched_policy(0, mPreviousSchedulingGroup);
679 }
680 }
681
stopped() const682 bool AudioTrack::stopped() const
683 {
684 AutoMutex lock(mLock);
685 return mState != STATE_ACTIVE;
686 }
687
flush()688 void AudioTrack::flush()
689 {
690 if (mSharedBuffer != 0) {
691 return;
692 }
693 AutoMutex lock(mLock);
694 if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
695 return;
696 }
697 flush_l();
698 }
699
flush_l()700 void AudioTrack::flush_l()
701 {
702 ALOG_ASSERT(mState != STATE_ACTIVE);
703
704 // clear playback marker and periodic update counter
705 mMarkerPosition = 0;
706 mMarkerReached = false;
707 mUpdatePeriod = 0;
708 mRefreshRemaining = true;
709
710 mState = STATE_FLUSHED;
711 mReleased = 0;
712 if (isOffloaded_l()) {
713 mProxy->interrupt();
714 }
715 mProxy->flush();
716 mAudioTrack->flush();
717 }
718
pause()719 void AudioTrack::pause()
720 {
721 AutoMutex lock(mLock);
722 if (mState == STATE_ACTIVE) {
723 mState = STATE_PAUSED;
724 } else if (mState == STATE_STOPPING) {
725 mState = STATE_PAUSED_STOPPING;
726 } else {
727 return;
728 }
729 mProxy->interrupt();
730 mAudioTrack->pause();
731
732 if (isOffloaded_l()) {
733 if (mOutput != AUDIO_IO_HANDLE_NONE) {
734 // An offload output can be re-used between two audio tracks having
735 // the same configuration. A timestamp query for a paused track
736 // while the other is running would return an incorrect time.
737 // To fix this, cache the playback position on a pause() and return
738 // this time when requested until the track is resumed.
739
740 // OffloadThread sends HAL pause in its threadLoop. Time saved
741 // here can be slightly off.
742
743 // TODO: check return code for getRenderPosition.
744
745 uint32_t halFrames;
746 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
747 ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
748 }
749 }
750 }
751
setVolume(float left,float right)752 status_t AudioTrack::setVolume(float left, float right)
753 {
754 // This duplicates a test by AudioTrack JNI, but that is not the only caller
755 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
756 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
757 return BAD_VALUE;
758 }
759
760 AutoMutex lock(mLock);
761 mVolume[AUDIO_INTERLEAVE_LEFT] = left;
762 mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
763
764 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
765
766 if (isOffloaded_l()) {
767 mAudioTrack->signal();
768 }
769 return NO_ERROR;
770 }
771
setVolume(float volume)772 status_t AudioTrack::setVolume(float volume)
773 {
774 return setVolume(volume, volume);
775 }
776
setAuxEffectSendLevel(float level)777 status_t AudioTrack::setAuxEffectSendLevel(float level)
778 {
779 // This duplicates a test by AudioTrack JNI, but that is not the only caller
780 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
781 return BAD_VALUE;
782 }
783
784 AutoMutex lock(mLock);
785 mSendLevel = level;
786 mProxy->setSendLevel(level);
787
788 return NO_ERROR;
789 }
790
getAuxEffectSendLevel(float * level) const791 void AudioTrack::getAuxEffectSendLevel(float* level) const
792 {
793 if (level != NULL) {
794 *level = mSendLevel;
795 }
796 }
797
setSampleRate(uint32_t rate)798 status_t AudioTrack::setSampleRate(uint32_t rate)
799 {
800 AutoMutex lock(mLock);
801 if (rate == mSampleRate) {
802 return NO_ERROR;
803 }
804 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
805 return INVALID_OPERATION;
806 }
807 if (mOutput == AUDIO_IO_HANDLE_NONE) {
808 return NO_INIT;
809 }
810 // NOTE: it is theoretically possible, but highly unlikely, that a device change
811 // could mean a previously allowed sampling rate is no longer allowed.
812 uint32_t afSamplingRate;
813 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
814 return NO_INIT;
815 }
816 // pitch is emulated by adjusting speed and sampleRate
817 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
818 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
819 return BAD_VALUE;
820 }
821 // TODO: Should we also check if the buffer size is compatible?
822
823 mSampleRate = rate;
824 mProxy->setSampleRate(effectiveSampleRate);
825
826 return NO_ERROR;
827 }
828
getSampleRate() const829 uint32_t AudioTrack::getSampleRate() const
830 {
831 AutoMutex lock(mLock);
832
833 // sample rate can be updated during playback by the offloaded decoder so we need to
834 // query the HAL and update if needed.
835 // FIXME use Proxy return channel to update the rate from server and avoid polling here
836 if (isOffloadedOrDirect_l()) {
837 if (mOutput != AUDIO_IO_HANDLE_NONE) {
838 uint32_t sampleRate = 0;
839 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
840 if (status == NO_ERROR) {
841 mSampleRate = sampleRate;
842 }
843 }
844 }
845 return mSampleRate;
846 }
847
getOriginalSampleRate() const848 uint32_t AudioTrack::getOriginalSampleRate() const
849 {
850 return mOriginalSampleRate;
851 }
852
setPlaybackRate(const AudioPlaybackRate & playbackRate)853 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
854 {
855 AutoMutex lock(mLock);
856 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
857 return NO_ERROR;
858 }
859 if (isOffloadedOrDirect_l()) {
860 return INVALID_OPERATION;
861 }
862 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
863 return INVALID_OPERATION;
864 }
865
866 ALOGV("setPlaybackRate (input): mSampleRate:%u mSpeed:%f mPitch:%f",
867 mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
868 // pitch is emulated by adjusting speed and sampleRate
869 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
870 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
871 const float effectivePitch = adjustPitch(playbackRate.mPitch);
872 AudioPlaybackRate playbackRateTemp = playbackRate;
873 playbackRateTemp.mSpeed = effectiveSpeed;
874 playbackRateTemp.mPitch = effectivePitch;
875
876 ALOGV("setPlaybackRate (effective): mSampleRate:%u mSpeed:%f mPitch:%f",
877 effectiveRate, effectiveSpeed, effectivePitch);
878
879 if (!isAudioPlaybackRateValid(playbackRateTemp)) {
880 ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
881 playbackRate.mSpeed, playbackRate.mPitch);
882 return BAD_VALUE;
883 }
884 // Check if the buffer size is compatible.
885 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
886 ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
887 playbackRate.mSpeed, playbackRate.mPitch);
888 return BAD_VALUE;
889 }
890
891 // Check resampler ratios are within bounds
892 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
893 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
894 playbackRate.mSpeed, playbackRate.mPitch);
895 return BAD_VALUE;
896 }
897
898 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
899 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
900 playbackRate.mSpeed, playbackRate.mPitch);
901 return BAD_VALUE;
902 }
903 mPlaybackRate = playbackRate;
904 //set effective rates
905 mProxy->setPlaybackRate(playbackRateTemp);
906 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
907 return NO_ERROR;
908 }
909
getPlaybackRate() const910 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
911 {
912 AutoMutex lock(mLock);
913 return mPlaybackRate;
914 }
915
getBufferSizeInFrames()916 ssize_t AudioTrack::getBufferSizeInFrames()
917 {
918 AutoMutex lock(mLock);
919 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
920 return NO_INIT;
921 }
922 return (ssize_t) mProxy->getBufferSizeInFrames();
923 }
924
getBufferDurationInUs(int64_t * duration)925 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
926 {
927 if (duration == nullptr) {
928 return BAD_VALUE;
929 }
930 AutoMutex lock(mLock);
931 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
932 return NO_INIT;
933 }
934 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
935 if (bufferSizeInFrames < 0) {
936 return (status_t)bufferSizeInFrames;
937 }
938 *duration = (int64_t)((double)bufferSizeInFrames * 1000000
939 / ((double)mSampleRate * mPlaybackRate.mSpeed));
940 return NO_ERROR;
941 }
942
setBufferSizeInFrames(size_t bufferSizeInFrames)943 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
944 {
945 AutoMutex lock(mLock);
946 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
947 return NO_INIT;
948 }
949 // Reject if timed track or compressed audio.
950 if (!audio_is_linear_pcm(mFormat)) {
951 return INVALID_OPERATION;
952 }
953 return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
954 }
955
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)956 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
957 {
958 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
959 return INVALID_OPERATION;
960 }
961
962 if (loopCount == 0) {
963 ;
964 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
965 loopEnd - loopStart >= MIN_LOOP) {
966 ;
967 } else {
968 return BAD_VALUE;
969 }
970
971 AutoMutex lock(mLock);
972 // See setPosition() regarding setting parameters such as loop points or position while active
973 if (mState == STATE_ACTIVE) {
974 return INVALID_OPERATION;
975 }
976 setLoop_l(loopStart, loopEnd, loopCount);
977 return NO_ERROR;
978 }
979
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)980 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
981 {
982 // We do not update the periodic notification point.
983 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
984 mLoopCount = loopCount;
985 mLoopEnd = loopEnd;
986 mLoopStart = loopStart;
987 mLoopCountNotified = loopCount;
988 mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
989
990 // Waking the AudioTrackThread is not needed as this cannot be called when active.
991 }
992
setMarkerPosition(uint32_t marker)993 status_t AudioTrack::setMarkerPosition(uint32_t marker)
994 {
995 // The only purpose of setting marker position is to get a callback
996 if (mCbf == NULL || isOffloadedOrDirect()) {
997 return INVALID_OPERATION;
998 }
999
1000 AutoMutex lock(mLock);
1001 mMarkerPosition = marker;
1002 mMarkerReached = false;
1003
1004 sp<AudioTrackThread> t = mAudioTrackThread;
1005 if (t != 0) {
1006 t->wake();
1007 }
1008 return NO_ERROR;
1009 }
1010
getMarkerPosition(uint32_t * marker) const1011 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1012 {
1013 if (isOffloadedOrDirect()) {
1014 return INVALID_OPERATION;
1015 }
1016 if (marker == NULL) {
1017 return BAD_VALUE;
1018 }
1019
1020 AutoMutex lock(mLock);
1021 mMarkerPosition.getValue(marker);
1022
1023 return NO_ERROR;
1024 }
1025
setPositionUpdatePeriod(uint32_t updatePeriod)1026 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1027 {
1028 // The only purpose of setting position update period is to get a callback
1029 if (mCbf == NULL || isOffloadedOrDirect()) {
1030 return INVALID_OPERATION;
1031 }
1032
1033 AutoMutex lock(mLock);
1034 mNewPosition = updateAndGetPosition_l() + updatePeriod;
1035 mUpdatePeriod = updatePeriod;
1036
1037 sp<AudioTrackThread> t = mAudioTrackThread;
1038 if (t != 0) {
1039 t->wake();
1040 }
1041 return NO_ERROR;
1042 }
1043
getPositionUpdatePeriod(uint32_t * updatePeriod) const1044 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1045 {
1046 if (isOffloadedOrDirect()) {
1047 return INVALID_OPERATION;
1048 }
1049 if (updatePeriod == NULL) {
1050 return BAD_VALUE;
1051 }
1052
1053 AutoMutex lock(mLock);
1054 *updatePeriod = mUpdatePeriod;
1055
1056 return NO_ERROR;
1057 }
1058
setPosition(uint32_t position)1059 status_t AudioTrack::setPosition(uint32_t position)
1060 {
1061 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1062 return INVALID_OPERATION;
1063 }
1064 if (position > mFrameCount) {
1065 return BAD_VALUE;
1066 }
1067
1068 AutoMutex lock(mLock);
1069 // Currently we require that the player is inactive before setting parameters such as position
1070 // or loop points. Otherwise, there could be a race condition: the application could read the
1071 // current position, compute a new position or loop parameters, and then set that position or
1072 // loop parameters but it would do the "wrong" thing since the position has continued to advance
1073 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app
1074 // to specify how it wants to handle such scenarios.
1075 if (mState == STATE_ACTIVE) {
1076 return INVALID_OPERATION;
1077 }
1078 // After setting the position, use full update period before notification.
1079 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1080 mStaticProxy->setBufferPosition(position);
1081
1082 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1083 return NO_ERROR;
1084 }
1085
getPosition(uint32_t * position)1086 status_t AudioTrack::getPosition(uint32_t *position)
1087 {
1088 if (position == NULL) {
1089 return BAD_VALUE;
1090 }
1091
1092 AutoMutex lock(mLock);
1093 // FIXME: offloaded and direct tracks call into the HAL for render positions
1094 // for compressed/synced data; however, we use proxy position for pure linear pcm data
1095 // as we do not know the capability of the HAL for pcm position support and standby.
1096 // There may be some latency differences between the HAL position and the proxy position.
1097 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1098 uint32_t dspFrames = 0;
1099
1100 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1101 ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1102 *position = mPausedPosition;
1103 return NO_ERROR;
1104 }
1105
1106 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1107 uint32_t halFrames; // actually unused
1108 (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1109 // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1110 }
1111 // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1112 // due to hardware latency. We leave this behavior for now.
1113 *position = dspFrames;
1114 } else {
1115 if (mCblk->mFlags & CBLK_INVALID) {
1116 (void) restoreTrack_l("getPosition");
1117 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1118 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1119 }
1120
1121 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1122 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1123 0 : updateAndGetPosition_l().value();
1124 }
1125 return NO_ERROR;
1126 }
1127
getBufferPosition(uint32_t * position)1128 status_t AudioTrack::getBufferPosition(uint32_t *position)
1129 {
1130 if (mSharedBuffer == 0) {
1131 return INVALID_OPERATION;
1132 }
1133 if (position == NULL) {
1134 return BAD_VALUE;
1135 }
1136
1137 AutoMutex lock(mLock);
1138 *position = mStaticProxy->getBufferPosition();
1139 return NO_ERROR;
1140 }
1141
reload()1142 status_t AudioTrack::reload()
1143 {
1144 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1145 return INVALID_OPERATION;
1146 }
1147
1148 AutoMutex lock(mLock);
1149 // See setPosition() regarding setting parameters such as loop points or position while active
1150 if (mState == STATE_ACTIVE) {
1151 return INVALID_OPERATION;
1152 }
1153 mNewPosition = mUpdatePeriod;
1154 (void) updateAndGetPosition_l();
1155 mPosition = 0;
1156 mPreviousTimestampValid = false;
1157 #if 0
1158 // The documentation is not clear on the behavior of reload() and the restoration
1159 // of loop count. Historically we have not restored loop count, start, end,
1160 // but it makes sense if one desires to repeat playing a particular sound.
1161 if (mLoopCount != 0) {
1162 mLoopCountNotified = mLoopCount;
1163 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1164 }
1165 #endif
1166 mStaticProxy->setBufferPosition(0);
1167 return NO_ERROR;
1168 }
1169
getOutput() const1170 audio_io_handle_t AudioTrack::getOutput() const
1171 {
1172 AutoMutex lock(mLock);
1173 return mOutput;
1174 }
1175
setOutputDevice(audio_port_handle_t deviceId)1176 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1177 AutoMutex lock(mLock);
1178 if (mSelectedDeviceId != deviceId) {
1179 mSelectedDeviceId = deviceId;
1180 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1181 }
1182 return NO_ERROR;
1183 }
1184
getOutputDevice()1185 audio_port_handle_t AudioTrack::getOutputDevice() {
1186 AutoMutex lock(mLock);
1187 return mSelectedDeviceId;
1188 }
1189
getRoutedDeviceId()1190 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1191 AutoMutex lock(mLock);
1192 if (mOutput == AUDIO_IO_HANDLE_NONE) {
1193 return AUDIO_PORT_HANDLE_NONE;
1194 }
1195 return AudioSystem::getDeviceIdForIo(mOutput);
1196 }
1197
attachAuxEffect(int effectId)1198 status_t AudioTrack::attachAuxEffect(int effectId)
1199 {
1200 AutoMutex lock(mLock);
1201 status_t status = mAudioTrack->attachAuxEffect(effectId);
1202 if (status == NO_ERROR) {
1203 mAuxEffectId = effectId;
1204 }
1205 return status;
1206 }
1207
streamType() const1208 audio_stream_type_t AudioTrack::streamType() const
1209 {
1210 if (mStreamType == AUDIO_STREAM_DEFAULT) {
1211 return audio_attributes_to_stream_type(&mAttributes);
1212 }
1213 return mStreamType;
1214 }
1215
1216 // -------------------------------------------------------------------------
1217
1218 // must be called with mLock held
createTrack_l()1219 status_t AudioTrack::createTrack_l()
1220 {
1221 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1222 if (audioFlinger == 0) {
1223 ALOGE("Could not get audioflinger");
1224 return NO_INIT;
1225 }
1226
1227 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1228 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1229 }
1230 audio_io_handle_t output;
1231 audio_stream_type_t streamType = mStreamType;
1232 audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1233
1234 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1235 // After fast request is denied, we will request again if IAudioTrack is re-created.
1236
1237 status_t status;
1238 status = AudioSystem::getOutputForAttr(attr, &output,
1239 mSessionId, &streamType, mClientUid,
1240 mSampleRate, mFormat, mChannelMask,
1241 mFlags, mSelectedDeviceId, mOffloadInfo);
1242
1243 if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1244 ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1245 " channel mask %#x, flags %#x",
1246 mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1247 return BAD_VALUE;
1248 }
1249 {
1250 // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1251 // we must release it ourselves if anything goes wrong.
1252
1253 // Not all of these values are needed under all conditions, but it is easier to get them all
1254 status = AudioSystem::getLatency(output, &mAfLatency);
1255 if (status != NO_ERROR) {
1256 ALOGE("getLatency(%d) failed status %d", output, status);
1257 goto release;
1258 }
1259 ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1260
1261 status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1262 if (status != NO_ERROR) {
1263 ALOGE("getFrameCount(output=%d) status %d", output, status);
1264 goto release;
1265 }
1266
1267 // TODO consider making this a member variable if there are other uses for it later
1268 size_t afFrameCountHAL;
1269 status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1270 if (status != NO_ERROR) {
1271 ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1272 goto release;
1273 }
1274 ALOG_ASSERT(afFrameCountHAL > 0);
1275
1276 status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1277 if (status != NO_ERROR) {
1278 ALOGE("getSamplingRate(output=%d) status %d", output, status);
1279 goto release;
1280 }
1281 if (mSampleRate == 0) {
1282 mSampleRate = mAfSampleRate;
1283 mOriginalSampleRate = mAfSampleRate;
1284 }
1285
1286 // Client can only express a preference for FAST. Server will perform additional tests.
1287 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1288 bool useCaseAllowed =
1289 // either of these use cases:
1290 // use case 1: shared buffer
1291 (mSharedBuffer != 0) ||
1292 // use case 2: callback transfer mode
1293 (mTransfer == TRANSFER_CALLBACK) ||
1294 // use case 3: obtain/release mode
1295 (mTransfer == TRANSFER_OBTAIN) ||
1296 // use case 4: synchronous write
1297 ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1298 // sample rates must also match
1299 bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1300 if (!fastAllowed) {
1301 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
1302 "track %u Hz, output %u Hz",
1303 mTransfer, mSampleRate, mAfSampleRate);
1304 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1305 }
1306 }
1307
1308 mNotificationFramesAct = mNotificationFramesReq;
1309
1310 size_t frameCount = mReqFrameCount;
1311 if (!audio_has_proportional_frames(mFormat)) {
1312
1313 if (mSharedBuffer != 0) {
1314 // Same comment as below about ignoring frameCount parameter for set()
1315 frameCount = mSharedBuffer->size();
1316 } else if (frameCount == 0) {
1317 frameCount = mAfFrameCount;
1318 }
1319 if (mNotificationFramesAct != frameCount) {
1320 mNotificationFramesAct = frameCount;
1321 }
1322 } else if (mSharedBuffer != 0) {
1323 // FIXME: Ensure client side memory buffers need
1324 // not have additional alignment beyond sample
1325 // (e.g. 16 bit stereo accessed as 32 bit frame).
1326 size_t alignment = audio_bytes_per_sample(mFormat);
1327 if (alignment & 1) {
1328 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1329 alignment = 1;
1330 }
1331 if (mChannelCount > 1) {
1332 // More than 2 channels does not require stronger alignment than stereo
1333 alignment <<= 1;
1334 }
1335 if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1336 ALOGE("Invalid buffer alignment: address %p, channel count %u",
1337 mSharedBuffer->pointer(), mChannelCount);
1338 status = BAD_VALUE;
1339 goto release;
1340 }
1341
1342 // When initializing a shared buffer AudioTrack via constructors,
1343 // there's no frameCount parameter.
1344 // But when initializing a shared buffer AudioTrack via set(),
1345 // there _is_ a frameCount parameter. We silently ignore it.
1346 frameCount = mSharedBuffer->size() / mFrameSize;
1347 } else {
1348 size_t minFrameCount = 0;
1349 // For fast tracks the frame count calculations and checks are mostly done by server,
1350 // but we try to respect the application's request for notifications per buffer.
1351 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1352 if (mNotificationsPerBufferReq > 0) {
1353 // Avoid possible arithmetic overflow during multiplication.
1354 // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1355 if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1356 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1357 mNotificationsPerBufferReq, afFrameCountHAL);
1358 } else {
1359 minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1360 }
1361 }
1362 } else {
1363 // for normal tracks precompute the frame count based on speed.
1364 const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1365 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1366 minFrameCount = calculateMinFrameCount(
1367 mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1368 speed /*, 0 mNotificationsPerBufferReq*/);
1369 }
1370 if (frameCount < minFrameCount) {
1371 frameCount = minFrameCount;
1372 }
1373 }
1374
1375 audio_output_flags_t flags = mFlags;
1376
1377 pid_t tid = -1;
1378 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1379 if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1380 tid = mAudioTrackThread->getTid();
1381 }
1382 }
1383
1384 size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
1385 // but we will still need the original value also
1386 audio_session_t originalSessionId = mSessionId;
1387 sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1388 mSampleRate,
1389 mFormat,
1390 mChannelMask,
1391 &temp,
1392 &flags,
1393 mSharedBuffer,
1394 output,
1395 mClientPid,
1396 tid,
1397 &mSessionId,
1398 mClientUid,
1399 &status);
1400 ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1401 "session ID changed from %d to %d", originalSessionId, mSessionId);
1402
1403 if (status != NO_ERROR) {
1404 ALOGE("AudioFlinger could not create track, status: %d", status);
1405 goto release;
1406 }
1407 ALOG_ASSERT(track != 0);
1408
1409 // AudioFlinger now owns the reference to the I/O handle,
1410 // so we are no longer responsible for releasing it.
1411
1412 // FIXME compare to AudioRecord
1413 sp<IMemory> iMem = track->getCblk();
1414 if (iMem == 0) {
1415 ALOGE("Could not get control block");
1416 return NO_INIT;
1417 }
1418 void *iMemPointer = iMem->pointer();
1419 if (iMemPointer == NULL) {
1420 ALOGE("Could not get control block pointer");
1421 return NO_INIT;
1422 }
1423 // invariant that mAudioTrack != 0 is true only after set() returns successfully
1424 if (mAudioTrack != 0) {
1425 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1426 mDeathNotifier.clear();
1427 }
1428 mAudioTrack = track;
1429 mCblkMemory = iMem;
1430 IPCThreadState::self()->flushCommands();
1431
1432 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1433 mCblk = cblk;
1434 // note that temp is the (possibly revised) value of frameCount
1435 if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1436 // In current design, AudioTrack client checks and ensures frame count validity before
1437 // passing it to AudioFlinger so AudioFlinger should not return a different value except
1438 // for fast track as it uses a special method of assigning frame count.
1439 ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1440 }
1441 frameCount = temp;
1442
1443 mAwaitBoost = false;
1444 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1445 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1446 ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1447 if (!mThreadCanCallJava) {
1448 mAwaitBoost = true;
1449 }
1450 } else {
1451 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1452 }
1453 }
1454 mFlags = flags;
1455
1456 // Make sure that application is notified with sufficient margin before underrun.
1457 // The client can divide the AudioTrack buffer into sub-buffers,
1458 // and expresses its desire to server as the notification frame count.
1459 if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1460 size_t maxNotificationFrames;
1461 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1462 // notify every HAL buffer, regardless of the size of the track buffer
1463 maxNotificationFrames = afFrameCountHAL;
1464 } else {
1465 // For normal tracks, use at least double-buffering if no sample rate conversion,
1466 // or at least triple-buffering if there is sample rate conversion
1467 const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1468 maxNotificationFrames = frameCount / nBuffering;
1469 }
1470 if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1471 if (mNotificationFramesAct == 0) {
1472 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1473 maxNotificationFrames, frameCount);
1474 } else {
1475 ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1476 mNotificationFramesAct, maxNotificationFrames, frameCount);
1477 }
1478 mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1479 }
1480 }
1481
1482 // We retain a copy of the I/O handle, but don't own the reference
1483 mOutput = output;
1484 mRefreshRemaining = true;
1485
1486 // Starting address of buffers in shared memory. If there is a shared buffer, buffers
1487 // is the value of pointer() for the shared buffer, otherwise buffers points
1488 // immediately after the control block. This address is for the mapping within client
1489 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
1490 void* buffers;
1491 if (mSharedBuffer == 0) {
1492 buffers = cblk + 1;
1493 } else {
1494 buffers = mSharedBuffer->pointer();
1495 if (buffers == NULL) {
1496 ALOGE("Could not get buffer pointer");
1497 return NO_INIT;
1498 }
1499 }
1500
1501 mAudioTrack->attachAuxEffect(mAuxEffectId);
1502 // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1503 // FIXME don't believe this lie
1504 mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1505
1506 mFrameCount = frameCount;
1507 // If IAudioTrack is re-created, don't let the requested frameCount
1508 // decrease. This can confuse clients that cache frameCount().
1509 if (frameCount > mReqFrameCount) {
1510 mReqFrameCount = frameCount;
1511 }
1512
1513 // reset server position to 0 as we have new cblk.
1514 mServer = 0;
1515
1516 // update proxy
1517 if (mSharedBuffer == 0) {
1518 mStaticProxy.clear();
1519 mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1520 } else {
1521 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1522 mProxy = mStaticProxy;
1523 }
1524
1525 mProxy->setVolumeLR(gain_minifloat_pack(
1526 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1527 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1528
1529 mProxy->setSendLevel(mSendLevel);
1530 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1531 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1532 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1533 mProxy->setSampleRate(effectiveSampleRate);
1534
1535 AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1536 playbackRateTemp.mSpeed = effectiveSpeed;
1537 playbackRateTemp.mPitch = effectivePitch;
1538 mProxy->setPlaybackRate(playbackRateTemp);
1539 mProxy->setMinimum(mNotificationFramesAct);
1540
1541 mDeathNotifier = new DeathNotifier(this);
1542 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1543
1544 if (mDeviceCallback != 0) {
1545 AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1546 }
1547
1548 return NO_ERROR;
1549 }
1550
1551 release:
1552 AudioSystem::releaseOutput(output, streamType, mSessionId);
1553 if (status == NO_ERROR) {
1554 status = NO_INIT;
1555 }
1556 return status;
1557 }
1558
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1559 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1560 {
1561 if (audioBuffer == NULL) {
1562 if (nonContig != NULL) {
1563 *nonContig = 0;
1564 }
1565 return BAD_VALUE;
1566 }
1567 if (mTransfer != TRANSFER_OBTAIN) {
1568 audioBuffer->frameCount = 0;
1569 audioBuffer->size = 0;
1570 audioBuffer->raw = NULL;
1571 if (nonContig != NULL) {
1572 *nonContig = 0;
1573 }
1574 return INVALID_OPERATION;
1575 }
1576
1577 const struct timespec *requested;
1578 struct timespec timeout;
1579 if (waitCount == -1) {
1580 requested = &ClientProxy::kForever;
1581 } else if (waitCount == 0) {
1582 requested = &ClientProxy::kNonBlocking;
1583 } else if (waitCount > 0) {
1584 long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1585 timeout.tv_sec = ms / 1000;
1586 timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1587 requested = &timeout;
1588 } else {
1589 ALOGE("%s invalid waitCount %d", __func__, waitCount);
1590 requested = NULL;
1591 }
1592 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1593 }
1594
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1595 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1596 struct timespec *elapsed, size_t *nonContig)
1597 {
1598 // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1599 uint32_t oldSequence = 0;
1600 uint32_t newSequence;
1601
1602 Proxy::Buffer buffer;
1603 status_t status = NO_ERROR;
1604
1605 static const int32_t kMaxTries = 5;
1606 int32_t tryCounter = kMaxTries;
1607
1608 do {
1609 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1610 // keep them from going away if another thread re-creates the track during obtainBuffer()
1611 sp<AudioTrackClientProxy> proxy;
1612 sp<IMemory> iMem;
1613
1614 { // start of lock scope
1615 AutoMutex lock(mLock);
1616
1617 newSequence = mSequence;
1618 // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1619 if (status == DEAD_OBJECT) {
1620 // re-create track, unless someone else has already done so
1621 if (newSequence == oldSequence) {
1622 status = restoreTrack_l("obtainBuffer");
1623 if (status != NO_ERROR) {
1624 buffer.mFrameCount = 0;
1625 buffer.mRaw = NULL;
1626 buffer.mNonContig = 0;
1627 break;
1628 }
1629 }
1630 }
1631 oldSequence = newSequence;
1632
1633 if (status == NOT_ENOUGH_DATA) {
1634 restartIfDisabled();
1635 }
1636
1637 // Keep the extra references
1638 proxy = mProxy;
1639 iMem = mCblkMemory;
1640
1641 if (mState == STATE_STOPPING) {
1642 status = -EINTR;
1643 buffer.mFrameCount = 0;
1644 buffer.mRaw = NULL;
1645 buffer.mNonContig = 0;
1646 break;
1647 }
1648
1649 // Non-blocking if track is stopped or paused
1650 if (mState != STATE_ACTIVE) {
1651 requested = &ClientProxy::kNonBlocking;
1652 }
1653
1654 } // end of lock scope
1655
1656 buffer.mFrameCount = audioBuffer->frameCount;
1657 // FIXME starts the requested timeout and elapsed over from scratch
1658 status = proxy->obtainBuffer(&buffer, requested, elapsed);
1659 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1660
1661 audioBuffer->frameCount = buffer.mFrameCount;
1662 audioBuffer->size = buffer.mFrameCount * mFrameSize;
1663 audioBuffer->raw = buffer.mRaw;
1664 if (nonContig != NULL) {
1665 *nonContig = buffer.mNonContig;
1666 }
1667 return status;
1668 }
1669
releaseBuffer(const Buffer * audioBuffer)1670 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1671 {
1672 // FIXME add error checking on mode, by adding an internal version
1673 if (mTransfer == TRANSFER_SHARED) {
1674 return;
1675 }
1676
1677 size_t stepCount = audioBuffer->size / mFrameSize;
1678 if (stepCount == 0) {
1679 return;
1680 }
1681
1682 Proxy::Buffer buffer;
1683 buffer.mFrameCount = stepCount;
1684 buffer.mRaw = audioBuffer->raw;
1685
1686 AutoMutex lock(mLock);
1687 mReleased += stepCount;
1688 mInUnderrun = false;
1689 mProxy->releaseBuffer(&buffer);
1690
1691 // restart track if it was disabled by audioflinger due to previous underrun
1692 restartIfDisabled();
1693 }
1694
restartIfDisabled()1695 void AudioTrack::restartIfDisabled()
1696 {
1697 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1698 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1699 ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1700 // FIXME ignoring status
1701 mAudioTrack->start();
1702 }
1703 }
1704
1705 // -------------------------------------------------------------------------
1706
write(const void * buffer,size_t userSize,bool blocking)1707 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1708 {
1709 if (mTransfer != TRANSFER_SYNC) {
1710 return INVALID_OPERATION;
1711 }
1712
1713 if (isDirect()) {
1714 AutoMutex lock(mLock);
1715 int32_t flags = android_atomic_and(
1716 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1717 &mCblk->mFlags);
1718 if (flags & CBLK_INVALID) {
1719 return DEAD_OBJECT;
1720 }
1721 }
1722
1723 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1724 // Sanity-check: user is most-likely passing an error code, and it would
1725 // make the return value ambiguous (actualSize vs error).
1726 ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1727 return BAD_VALUE;
1728 }
1729
1730 size_t written = 0;
1731 Buffer audioBuffer;
1732
1733 while (userSize >= mFrameSize) {
1734 audioBuffer.frameCount = userSize / mFrameSize;
1735
1736 status_t err = obtainBuffer(&audioBuffer,
1737 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1738 if (err < 0) {
1739 if (written > 0) {
1740 break;
1741 }
1742 if (err == TIMED_OUT || err == -EINTR) {
1743 err = WOULD_BLOCK;
1744 }
1745 return ssize_t(err);
1746 }
1747
1748 size_t toWrite = audioBuffer.size;
1749 memcpy(audioBuffer.i8, buffer, toWrite);
1750 buffer = ((const char *) buffer) + toWrite;
1751 userSize -= toWrite;
1752 written += toWrite;
1753
1754 releaseBuffer(&audioBuffer);
1755 }
1756
1757 if (written > 0) {
1758 mFramesWritten += written / mFrameSize;
1759 }
1760 return written;
1761 }
1762
1763 // -------------------------------------------------------------------------
1764
processAudioBuffer()1765 nsecs_t AudioTrack::processAudioBuffer()
1766 {
1767 // Currently the AudioTrack thread is not created if there are no callbacks.
1768 // Would it ever make sense to run the thread, even without callbacks?
1769 // If so, then replace this by checks at each use for mCbf != NULL.
1770 LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1771
1772 mLock.lock();
1773 if (mAwaitBoost) {
1774 mAwaitBoost = false;
1775 mLock.unlock();
1776 static const int32_t kMaxTries = 5;
1777 int32_t tryCounter = kMaxTries;
1778 uint32_t pollUs = 10000;
1779 do {
1780 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1781 if (policy == SCHED_FIFO || policy == SCHED_RR) {
1782 break;
1783 }
1784 usleep(pollUs);
1785 pollUs <<= 1;
1786 } while (tryCounter-- > 0);
1787 if (tryCounter < 0) {
1788 ALOGE("did not receive expected priority boost on time");
1789 }
1790 // Run again immediately
1791 return 0;
1792 }
1793
1794 // Can only reference mCblk while locked
1795 int32_t flags = android_atomic_and(
1796 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1797
1798 // Check for track invalidation
1799 if (flags & CBLK_INVALID) {
1800 // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1801 // AudioSystem cache. We should not exit here but after calling the callback so
1802 // that the upper layers can recreate the track
1803 if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1804 status_t status __unused = restoreTrack_l("processAudioBuffer");
1805 // FIXME unused status
1806 // after restoration, continue below to make sure that the loop and buffer events
1807 // are notified because they have been cleared from mCblk->mFlags above.
1808 }
1809 }
1810
1811 bool waitStreamEnd = mState == STATE_STOPPING;
1812 bool active = mState == STATE_ACTIVE;
1813
1814 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1815 bool newUnderrun = false;
1816 if (flags & CBLK_UNDERRUN) {
1817 #if 0
1818 // Currently in shared buffer mode, when the server reaches the end of buffer,
1819 // the track stays active in continuous underrun state. It's up to the application
1820 // to pause or stop the track, or set the position to a new offset within buffer.
1821 // This was some experimental code to auto-pause on underrun. Keeping it here
1822 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1823 if (mTransfer == TRANSFER_SHARED) {
1824 mState = STATE_PAUSED;
1825 active = false;
1826 }
1827 #endif
1828 if (!mInUnderrun) {
1829 mInUnderrun = true;
1830 newUnderrun = true;
1831 }
1832 }
1833
1834 // Get current position of server
1835 Modulo<uint32_t> position(updateAndGetPosition_l());
1836
1837 // Manage marker callback
1838 bool markerReached = false;
1839 Modulo<uint32_t> markerPosition(mMarkerPosition);
1840 // uses 32 bit wraparound for comparison with position.
1841 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1842 mMarkerReached = markerReached = true;
1843 }
1844
1845 // Determine number of new position callback(s) that will be needed, while locked
1846 size_t newPosCount = 0;
1847 Modulo<uint32_t> newPosition(mNewPosition);
1848 uint32_t updatePeriod = mUpdatePeriod;
1849 // FIXME fails for wraparound, need 64 bits
1850 if (updatePeriod > 0 && position >= newPosition) {
1851 newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1852 mNewPosition += updatePeriod * newPosCount;
1853 }
1854
1855 // Cache other fields that will be needed soon
1856 uint32_t sampleRate = mSampleRate;
1857 float speed = mPlaybackRate.mSpeed;
1858 const uint32_t notificationFrames = mNotificationFramesAct;
1859 if (mRefreshRemaining) {
1860 mRefreshRemaining = false;
1861 mRemainingFrames = notificationFrames;
1862 mRetryOnPartialBuffer = false;
1863 }
1864 size_t misalignment = mProxy->getMisalignment();
1865 uint32_t sequence = mSequence;
1866 sp<AudioTrackClientProxy> proxy = mProxy;
1867
1868 // Determine the number of new loop callback(s) that will be needed, while locked.
1869 int loopCountNotifications = 0;
1870 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1871
1872 if (mLoopCount > 0) {
1873 int loopCount;
1874 size_t bufferPosition;
1875 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1876 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1877 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1878 mLoopCountNotified = loopCount; // discard any excess notifications
1879 } else if (mLoopCount < 0) {
1880 // FIXME: We're not accurate with notification count and position with infinite looping
1881 // since loopCount from server side will always return -1 (we could decrement it).
1882 size_t bufferPosition = mStaticProxy->getBufferPosition();
1883 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1884 loopPeriod = mLoopEnd - bufferPosition;
1885 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1886 size_t bufferPosition = mStaticProxy->getBufferPosition();
1887 loopPeriod = mFrameCount - bufferPosition;
1888 }
1889
1890 // These fields don't need to be cached, because they are assigned only by set():
1891 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1892 // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1893
1894 mLock.unlock();
1895
1896 // get anchor time to account for callbacks.
1897 const nsecs_t timeBeforeCallbacks = systemTime();
1898
1899 if (waitStreamEnd) {
1900 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1901 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1902 // (and make sure we don't callback for more data while we're stopping).
1903 // This helps with position, marker notifications, and track invalidation.
1904 struct timespec timeout;
1905 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1906 timeout.tv_nsec = 0;
1907
1908 status_t status = proxy->waitStreamEndDone(&timeout);
1909 switch (status) {
1910 case NO_ERROR:
1911 case DEAD_OBJECT:
1912 case TIMED_OUT:
1913 if (status != DEAD_OBJECT) {
1914 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1915 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1916 mCbf(EVENT_STREAM_END, mUserData, NULL);
1917 }
1918 {
1919 AutoMutex lock(mLock);
1920 // The previously assigned value of waitStreamEnd is no longer valid,
1921 // since the mutex has been unlocked and either the callback handler
1922 // or another thread could have re-started the AudioTrack during that time.
1923 waitStreamEnd = mState == STATE_STOPPING;
1924 if (waitStreamEnd) {
1925 mState = STATE_STOPPED;
1926 mReleased = 0;
1927 }
1928 }
1929 if (waitStreamEnd && status != DEAD_OBJECT) {
1930 return NS_INACTIVE;
1931 }
1932 break;
1933 }
1934 return 0;
1935 }
1936
1937 // perform callbacks while unlocked
1938 if (newUnderrun) {
1939 mCbf(EVENT_UNDERRUN, mUserData, NULL);
1940 }
1941 while (loopCountNotifications > 0) {
1942 mCbf(EVENT_LOOP_END, mUserData, NULL);
1943 --loopCountNotifications;
1944 }
1945 if (flags & CBLK_BUFFER_END) {
1946 mCbf(EVENT_BUFFER_END, mUserData, NULL);
1947 }
1948 if (markerReached) {
1949 mCbf(EVENT_MARKER, mUserData, &markerPosition);
1950 }
1951 while (newPosCount > 0) {
1952 size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1953 mCbf(EVENT_NEW_POS, mUserData, &temp);
1954 newPosition += updatePeriod;
1955 newPosCount--;
1956 }
1957
1958 if (mObservedSequence != sequence) {
1959 mObservedSequence = sequence;
1960 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1961 // for offloaded tracks, just wait for the upper layers to recreate the track
1962 if (isOffloadedOrDirect()) {
1963 return NS_INACTIVE;
1964 }
1965 }
1966
1967 // if inactive, then don't run me again until re-started
1968 if (!active) {
1969 return NS_INACTIVE;
1970 }
1971
1972 // Compute the estimated time until the next timed event (position, markers, loops)
1973 // FIXME only for non-compressed audio
1974 uint32_t minFrames = ~0;
1975 if (!markerReached && position < markerPosition) {
1976 minFrames = (markerPosition - position).value();
1977 }
1978 if (loopPeriod > 0 && loopPeriod < minFrames) {
1979 // loopPeriod is already adjusted for actual position.
1980 minFrames = loopPeriod;
1981 }
1982 if (updatePeriod > 0) {
1983 minFrames = min(minFrames, (newPosition - position).value());
1984 }
1985
1986 // If > 0, poll periodically to recover from a stuck server. A good value is 2.
1987 static const uint32_t kPoll = 0;
1988 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1989 minFrames = kPoll * notificationFrames;
1990 }
1991
1992 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1993 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
1994 const nsecs_t timeAfterCallbacks = systemTime();
1995
1996 // Convert frame units to time units
1997 nsecs_t ns = NS_WHENEVER;
1998 if (minFrames != (uint32_t) ~0) {
1999 ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
2000 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
2001 // TODO: Should we warn if the callback time is too long?
2002 if (ns < 0) ns = 0;
2003 }
2004
2005 // If not supplying data by EVENT_MORE_DATA, then we're done
2006 if (mTransfer != TRANSFER_CALLBACK) {
2007 return ns;
2008 }
2009
2010 // EVENT_MORE_DATA callback handling.
2011 // Timing for linear pcm audio data formats can be derived directly from the
2012 // buffer fill level.
2013 // Timing for compressed data is not directly available from the buffer fill level,
2014 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2015 // to return a certain fill level.
2016
2017 struct timespec timeout;
2018 const struct timespec *requested = &ClientProxy::kForever;
2019 if (ns != NS_WHENEVER) {
2020 timeout.tv_sec = ns / 1000000000LL;
2021 timeout.tv_nsec = ns % 1000000000LL;
2022 ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2023 requested = &timeout;
2024 }
2025
2026 size_t writtenFrames = 0;
2027 while (mRemainingFrames > 0) {
2028
2029 Buffer audioBuffer;
2030 audioBuffer.frameCount = mRemainingFrames;
2031 size_t nonContig;
2032 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2033 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2034 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2035 requested = &ClientProxy::kNonBlocking;
2036 size_t avail = audioBuffer.frameCount + nonContig;
2037 ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2038 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2039 if (err != NO_ERROR) {
2040 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2041 (isOffloaded() && (err == DEAD_OBJECT))) {
2042 // FIXME bug 25195759
2043 return 1000000;
2044 }
2045 ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2046 return NS_NEVER;
2047 }
2048
2049 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2050 mRetryOnPartialBuffer = false;
2051 if (avail < mRemainingFrames) {
2052 if (ns > 0) { // account for obtain time
2053 const nsecs_t timeNow = systemTime();
2054 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2055 }
2056 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2057 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2058 ns = myns;
2059 }
2060 return ns;
2061 }
2062 }
2063
2064 size_t reqSize = audioBuffer.size;
2065 mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2066 size_t writtenSize = audioBuffer.size;
2067
2068 // Sanity check on returned size
2069 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2070 ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2071 reqSize, ssize_t(writtenSize));
2072 return NS_NEVER;
2073 }
2074
2075 if (writtenSize == 0) {
2076 // The callback is done filling buffers
2077 // Keep this thread going to handle timed events and
2078 // still try to get more data in intervals of WAIT_PERIOD_MS
2079 // but don't just loop and block the CPU, so wait
2080
2081 // mCbf(EVENT_MORE_DATA, ...) might either
2082 // (1) Block until it can fill the buffer, returning 0 size on EOS.
2083 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2084 // (3) Return 0 size when no data is available, does not wait for more data.
2085 //
2086 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2087 // We try to compute the wait time to avoid a tight sleep-wait cycle,
2088 // especially for case (3).
2089 //
2090 // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2091 // and this loop; whereas for case (3) we could simply check once with the full
2092 // buffer size and skip the loop entirely.
2093
2094 nsecs_t myns;
2095 if (audio_has_proportional_frames(mFormat)) {
2096 // time to wait based on buffer occupancy
2097 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2098 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2099 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2100 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2101 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2102 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2103 myns = datans + (afns / 2);
2104 } else {
2105 // FIXME: This could ping quite a bit if the buffer isn't full.
2106 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2107 myns = kWaitPeriodNs;
2108 }
2109 if (ns > 0) { // account for obtain and callback time
2110 const nsecs_t timeNow = systemTime();
2111 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2112 }
2113 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2114 ns = myns;
2115 }
2116 return ns;
2117 }
2118
2119 size_t releasedFrames = writtenSize / mFrameSize;
2120 audioBuffer.frameCount = releasedFrames;
2121 mRemainingFrames -= releasedFrames;
2122 if (misalignment >= releasedFrames) {
2123 misalignment -= releasedFrames;
2124 } else {
2125 misalignment = 0;
2126 }
2127
2128 releaseBuffer(&audioBuffer);
2129 writtenFrames += releasedFrames;
2130
2131 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2132 // if callback doesn't like to accept the full chunk
2133 if (writtenSize < reqSize) {
2134 continue;
2135 }
2136
2137 // There could be enough non-contiguous frames available to satisfy the remaining request
2138 if (mRemainingFrames <= nonContig) {
2139 continue;
2140 }
2141
2142 #if 0
2143 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2144 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA
2145 // that total to a sum == notificationFrames.
2146 if (0 < misalignment && misalignment <= mRemainingFrames) {
2147 mRemainingFrames = misalignment;
2148 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2149 }
2150 #endif
2151
2152 }
2153 if (writtenFrames > 0) {
2154 AutoMutex lock(mLock);
2155 mFramesWritten += writtenFrames;
2156 }
2157 mRemainingFrames = notificationFrames;
2158 mRetryOnPartialBuffer = true;
2159
2160 // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2161 return 0;
2162 }
2163
restoreTrack_l(const char * from)2164 status_t AudioTrack::restoreTrack_l(const char *from)
2165 {
2166 ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2167 isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2168 ++mSequence;
2169
2170 // refresh the audio configuration cache in this process to make sure we get new
2171 // output parameters and new IAudioFlinger in createTrack_l()
2172 AudioSystem::clearAudioConfigCache();
2173
2174 if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2175 // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2176 // reconsider enabling for linear PCM encodings when position can be preserved.
2177 return DEAD_OBJECT;
2178 }
2179
2180 // Save so we can return count since creation.
2181 mUnderrunCountOffset = getUnderrunCount_l();
2182
2183 // save the old static buffer position
2184 size_t bufferPosition = 0;
2185 int loopCount = 0;
2186 if (mStaticProxy != 0) {
2187 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2188 }
2189
2190 mFlags = mOrigFlags;
2191
2192 // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2193 // following member variables: mAudioTrack, mCblkMemory and mCblk.
2194 // It will also delete the strong references on previous IAudioTrack and IMemory.
2195 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2196 status_t result = createTrack_l();
2197
2198 if (result == NO_ERROR) {
2199 // take the frames that will be lost by track recreation into account in saved position
2200 // For streaming tracks, this is the amount we obtained from the user/client
2201 // (not the number actually consumed at the server - those are already lost).
2202 if (mStaticProxy == 0) {
2203 mPosition = mReleased;
2204 }
2205 // Continue playback from last known position and restore loop.
2206 if (mStaticProxy != 0) {
2207 if (loopCount != 0) {
2208 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2209 mLoopStart, mLoopEnd, loopCount);
2210 } else {
2211 mStaticProxy->setBufferPosition(bufferPosition);
2212 if (bufferPosition == mFrameCount) {
2213 ALOGD("restoring track at end of static buffer");
2214 }
2215 }
2216 }
2217 if (mState == STATE_ACTIVE) {
2218 result = mAudioTrack->start();
2219 mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
2220 }
2221 }
2222 if (result != NO_ERROR) {
2223 ALOGW("restoreTrack_l() failed status %d", result);
2224 mState = STATE_STOPPED;
2225 mReleased = 0;
2226 }
2227
2228 return result;
2229 }
2230
updateAndGetPosition_l()2231 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2232 {
2233 // This is the sole place to read server consumed frames
2234 Modulo<uint32_t> newServer(mProxy->getPosition());
2235 const int32_t delta = (newServer - mServer).signedValue();
2236 // TODO There is controversy about whether there can be "negative jitter" in server position.
2237 // This should be investigated further, and if possible, it should be addressed.
2238 // A more definite failure mode is infrequent polling by client.
2239 // One could call (void)getPosition_l() in releaseBuffer(),
2240 // so mReleased and mPosition are always lock-step as best possible.
2241 // That should ensure delta never goes negative for infrequent polling
2242 // unless the server has more than 2^31 frames in its buffer,
2243 // in which case the use of uint32_t for these counters has bigger issues.
2244 ALOGE_IF(delta < 0,
2245 "detected illegal retrograde motion by the server: mServer advanced by %d",
2246 delta);
2247 mServer = newServer;
2248 if (delta > 0) { // avoid retrograde
2249 mPosition += delta;
2250 }
2251 return mPosition;
2252 }
2253
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed) const2254 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2255 {
2256 // applicable for mixing tracks only (not offloaded or direct)
2257 if (mStaticProxy != 0) {
2258 return true; // static tracks do not have issues with buffer sizing.
2259 }
2260 const size_t minFrameCount =
2261 calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2262 /*, 0 mNotificationsPerBufferReq*/);
2263 ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu minFrameCount %zu",
2264 mFrameCount, minFrameCount);
2265 return mFrameCount >= minFrameCount;
2266 }
2267
setParameters(const String8 & keyValuePairs)2268 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2269 {
2270 AutoMutex lock(mLock);
2271 return mAudioTrack->setParameters(keyValuePairs);
2272 }
2273
getTimestamp(ExtendedTimestamp * timestamp)2274 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2275 {
2276 if (timestamp == nullptr) {
2277 return BAD_VALUE;
2278 }
2279 AutoMutex lock(mLock);
2280 return getTimestamp_l(timestamp);
2281 }
2282
getTimestamp_l(ExtendedTimestamp * timestamp)2283 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2284 {
2285 if (mCblk->mFlags & CBLK_INVALID) {
2286 const status_t status = restoreTrack_l("getTimestampExtended");
2287 if (status != OK) {
2288 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2289 // recommending that the track be recreated.
2290 return DEAD_OBJECT;
2291 }
2292 }
2293 // check for offloaded/direct here in case restoring somehow changed those flags.
2294 if (isOffloadedOrDirect_l()) {
2295 return INVALID_OPERATION; // not supported
2296 }
2297 status_t status = mProxy->getTimestamp(timestamp);
2298 LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2299 bool found = false;
2300 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2301 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2302 // server side frame offset in case AudioTrack has been restored.
2303 for (int i = ExtendedTimestamp::LOCATION_SERVER;
2304 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2305 if (timestamp->mTimeNs[i] >= 0) {
2306 // apply server offset (frames flushed is ignored
2307 // so we don't report the jump when the flush occurs).
2308 timestamp->mPosition[i] += mFramesWrittenServerOffset;
2309 found = true;
2310 }
2311 }
2312 return found ? OK : WOULD_BLOCK;
2313 }
2314
getTimestamp(AudioTimestamp & timestamp)2315 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2316 {
2317 AutoMutex lock(mLock);
2318
2319 bool previousTimestampValid = mPreviousTimestampValid;
2320 // Set false here to cover all the error return cases.
2321 mPreviousTimestampValid = false;
2322
2323 switch (mState) {
2324 case STATE_ACTIVE:
2325 case STATE_PAUSED:
2326 break; // handle below
2327 case STATE_FLUSHED:
2328 case STATE_STOPPED:
2329 return WOULD_BLOCK;
2330 case STATE_STOPPING:
2331 case STATE_PAUSED_STOPPING:
2332 if (!isOffloaded_l()) {
2333 return INVALID_OPERATION;
2334 }
2335 break; // offloaded tracks handled below
2336 default:
2337 LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2338 break;
2339 }
2340
2341 if (mCblk->mFlags & CBLK_INVALID) {
2342 const status_t status = restoreTrack_l("getTimestamp");
2343 if (status != OK) {
2344 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2345 // recommending that the track be recreated.
2346 return DEAD_OBJECT;
2347 }
2348 }
2349
2350 // The presented frame count must always lag behind the consumed frame count.
2351 // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
2352
2353 status_t status;
2354 if (isOffloadedOrDirect_l()) {
2355 // use Binder to get timestamp
2356 status = mAudioTrack->getTimestamp(timestamp);
2357 } else {
2358 // read timestamp from shared memory
2359 ExtendedTimestamp ets;
2360 status = mProxy->getTimestamp(&ets);
2361 if (status == OK) {
2362 ExtendedTimestamp::Location location;
2363 status = ets.getBestTimestamp(×tamp, &location);
2364
2365 if (status == OK) {
2366 // It is possible that the best location has moved from the kernel to the server.
2367 // In this case we adjust the position from the previous computed latency.
2368 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2369 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2370 "getTimestamp() location moved from kernel to server");
2371 // check that the last kernel OK time info exists and the positions
2372 // are valid (if they predate the current track, the positions may
2373 // be zero or negative).
2374 const int64_t frames =
2375 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2376 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2377 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2378 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2379 ?
2380 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2381 / 1000)
2382 :
2383 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2384 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2385 ALOGV("frame adjustment:%lld timestamp:%s",
2386 (long long)frames, ets.toString().c_str());
2387 if (frames >= ets.mPosition[location]) {
2388 timestamp.mPosition = 0;
2389 } else {
2390 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2391 }
2392 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2393 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2394 "getTimestamp() location moved from server to kernel");
2395 }
2396 mPreviousLocation = location;
2397 } else {
2398 // right after AudioTrack is started, one may not find a timestamp
2399 ALOGV("getBestTimestamp did not find timestamp");
2400 }
2401 }
2402 if (status == INVALID_OPERATION) {
2403 status = WOULD_BLOCK;
2404 }
2405 }
2406 if (status != NO_ERROR) {
2407 ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2408 return status;
2409 }
2410 if (isOffloadedOrDirect_l()) {
2411 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2412 // use cached paused position in case another offloaded track is running.
2413 timestamp.mPosition = mPausedPosition;
2414 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
2415 return NO_ERROR;
2416 }
2417
2418 // Check whether a pending flush or stop has completed, as those commands may
2419 // be asynchronous or return near finish or exhibit glitchy behavior.
2420 //
2421 // Originally this showed up as the first timestamp being a continuation of
2422 // the previous song under gapless playback.
2423 // However, we sometimes see zero timestamps, then a glitch of
2424 // the previous song's position, and then correct timestamps afterwards.
2425 if (mStartUs != 0 && mSampleRate != 0) {
2426 static const int kTimeJitterUs = 100000; // 100 ms
2427 static const int k1SecUs = 1000000;
2428
2429 const int64_t timeNow = getNowUs();
2430
2431 if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2432 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2433 if (timestampTimeUs < mStartUs) {
2434 return WOULD_BLOCK; // stale timestamp time, occurs before start.
2435 }
2436 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2437 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2438 / ((double)mSampleRate * mPlaybackRate.mSpeed);
2439
2440 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2441 // Verify that the counter can't count faster than the sample rate
2442 // since the start time. If greater, then that means we may have failed
2443 // to completely flush or stop the previous playing track.
2444 ALOGW_IF(!mTimestampStartupGlitchReported,
2445 "getTimestamp startup glitch detected"
2446 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2447 (long long)deltaTimeUs, (long long)deltaPositionByUs,
2448 timestamp.mPosition);
2449 mTimestampStartupGlitchReported = true;
2450 if (previousTimestampValid
2451 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2452 timestamp = mPreviousTimestamp;
2453 mPreviousTimestampValid = true;
2454 return NO_ERROR;
2455 }
2456 return WOULD_BLOCK;
2457 }
2458 if (deltaPositionByUs != 0) {
2459 mStartUs = 0; // don't check again, we got valid nonzero position.
2460 }
2461 } else {
2462 mStartUs = 0; // don't check again, start time expired.
2463 }
2464 mTimestampStartupGlitchReported = false;
2465 }
2466 } else {
2467 // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2468 (void) updateAndGetPosition_l();
2469 // Server consumed (mServer) and presented both use the same server time base,
2470 // and server consumed is always >= presented.
2471 // The delta between these represents the number of frames in the buffer pipeline.
2472 // If this delta between these is greater than the client position, it means that
2473 // actually presented is still stuck at the starting line (figuratively speaking),
2474 // waiting for the first frame to go by. So we can't report a valid timestamp yet.
2475 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2476 // mPosition exceeds 32 bits.
2477 // TODO Remove when timestamp is updated to contain pipeline status info.
2478 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2479 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2480 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2481 return INVALID_OPERATION;
2482 }
2483 // Convert timestamp position from server time base to client time base.
2484 // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2485 // But if we change it to 64-bit then this could fail.
2486 // Use Modulo computation here.
2487 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2488 // Immediately after a call to getPosition_l(), mPosition and
2489 // mServer both represent the same frame position. mPosition is
2490 // in client's point of view, and mServer is in server's point of
2491 // view. So the difference between them is the "fudge factor"
2492 // between client and server views due to stop() and/or new
2493 // IAudioTrack. And timestamp.mPosition is initially in server's
2494 // point of view, so we need to apply the same fudge factor to it.
2495 }
2496
2497 // Prevent retrograde motion in timestamp.
2498 // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2499 if (status == NO_ERROR) {
2500 if (previousTimestampValid) {
2501 #define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
2502 const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
2503 const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
2504 #undef TIME_TO_NANOS
2505 if (currentTimeNanos < previousTimeNanos) {
2506 ALOGW("retrograde timestamp time");
2507 // FIXME Consider blocking this from propagating upwards.
2508 }
2509
2510 // Looking at signed delta will work even when the timestamps
2511 // are wrapping around.
2512 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2513 - mPreviousTimestamp.mPosition).signedValue();
2514 // position can bobble slightly as an artifact; this hides the bobble
2515 static const int32_t MINIMUM_POSITION_DELTA = 8;
2516 if (deltaPosition < 0) {
2517 // Only report once per position instead of spamming the log.
2518 if (!mRetrogradeMotionReported) {
2519 ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2520 deltaPosition,
2521 timestamp.mPosition,
2522 mPreviousTimestamp.mPosition);
2523 mRetrogradeMotionReported = true;
2524 }
2525 } else {
2526 mRetrogradeMotionReported = false;
2527 }
2528 if (deltaPosition < MINIMUM_POSITION_DELTA) {
2529 timestamp = mPreviousTimestamp; // Use last valid timestamp.
2530 }
2531 }
2532 mPreviousTimestamp = timestamp;
2533 mPreviousTimestampValid = true;
2534 }
2535
2536 return status;
2537 }
2538
getParameters(const String8 & keys)2539 String8 AudioTrack::getParameters(const String8& keys)
2540 {
2541 audio_io_handle_t output = getOutput();
2542 if (output != AUDIO_IO_HANDLE_NONE) {
2543 return AudioSystem::getParameters(output, keys);
2544 } else {
2545 return String8::empty();
2546 }
2547 }
2548
isOffloaded() const2549 bool AudioTrack::isOffloaded() const
2550 {
2551 AutoMutex lock(mLock);
2552 return isOffloaded_l();
2553 }
2554
isDirect() const2555 bool AudioTrack::isDirect() const
2556 {
2557 AutoMutex lock(mLock);
2558 return isDirect_l();
2559 }
2560
isOffloadedOrDirect() const2561 bool AudioTrack::isOffloadedOrDirect() const
2562 {
2563 AutoMutex lock(mLock);
2564 return isOffloadedOrDirect_l();
2565 }
2566
2567
dump(int fd,const Vector<String16> & args __unused) const2568 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2569 {
2570
2571 const size_t SIZE = 256;
2572 char buffer[SIZE];
2573 String8 result;
2574
2575 result.append(" AudioTrack::dump\n");
2576 snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2577 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2578 result.append(buffer);
2579 snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2580 mChannelCount, mFrameCount);
2581 result.append(buffer);
2582 snprintf(buffer, 255, " sample rate(%u), speed(%f), status(%d)\n",
2583 mSampleRate, mPlaybackRate.mSpeed, mStatus);
2584 result.append(buffer);
2585 snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency);
2586 result.append(buffer);
2587 ::write(fd, result.string(), result.size());
2588 return NO_ERROR;
2589 }
2590
getUnderrunCount() const2591 uint32_t AudioTrack::getUnderrunCount() const
2592 {
2593 AutoMutex lock(mLock);
2594 return getUnderrunCount_l();
2595 }
2596
getUnderrunCount_l() const2597 uint32_t AudioTrack::getUnderrunCount_l() const
2598 {
2599 return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2600 }
2601
getUnderrunFrames() const2602 uint32_t AudioTrack::getUnderrunFrames() const
2603 {
2604 AutoMutex lock(mLock);
2605 return mProxy->getUnderrunFrames();
2606 }
2607
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2608 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2609 {
2610 if (callback == 0) {
2611 ALOGW("%s adding NULL callback!", __FUNCTION__);
2612 return BAD_VALUE;
2613 }
2614 AutoMutex lock(mLock);
2615 if (mDeviceCallback == callback) {
2616 ALOGW("%s adding same callback!", __FUNCTION__);
2617 return INVALID_OPERATION;
2618 }
2619 status_t status = NO_ERROR;
2620 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2621 if (mDeviceCallback != 0) {
2622 ALOGW("%s callback already present!", __FUNCTION__);
2623 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2624 }
2625 status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2626 }
2627 mDeviceCallback = callback;
2628 return status;
2629 }
2630
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2631 status_t AudioTrack::removeAudioDeviceCallback(
2632 const sp<AudioSystem::AudioDeviceCallback>& callback)
2633 {
2634 if (callback == 0) {
2635 ALOGW("%s removing NULL callback!", __FUNCTION__);
2636 return BAD_VALUE;
2637 }
2638 AutoMutex lock(mLock);
2639 if (mDeviceCallback != callback) {
2640 ALOGW("%s removing different callback!", __FUNCTION__);
2641 return INVALID_OPERATION;
2642 }
2643 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2644 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2645 }
2646 mDeviceCallback = 0;
2647 return NO_ERROR;
2648 }
2649
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)2650 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2651 {
2652 if (msec == nullptr ||
2653 (location != ExtendedTimestamp::LOCATION_SERVER
2654 && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2655 return BAD_VALUE;
2656 }
2657 AutoMutex lock(mLock);
2658 // inclusive of offloaded and direct tracks.
2659 //
2660 // It is possible, but not enabled, to allow duration computation for non-pcm
2661 // audio_has_proportional_frames() formats because currently they have
2662 // the drain rate equivalent to the pcm sample rate * framesize.
2663 if (!isPurePcmData_l()) {
2664 return INVALID_OPERATION;
2665 }
2666 ExtendedTimestamp ets;
2667 if (getTimestamp_l(&ets) == OK
2668 && ets.mTimeNs[location] > 0) {
2669 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2670 - ets.mPosition[location];
2671 if (diff < 0) {
2672 *msec = 0;
2673 } else {
2674 // ms is the playback time by frames
2675 int64_t ms = (int64_t)((double)diff * 1000 /
2676 ((double)mSampleRate * mPlaybackRate.mSpeed));
2677 // clockdiff is the timestamp age (negative)
2678 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2679 ets.mTimeNs[location]
2680 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2681 - systemTime(SYSTEM_TIME_MONOTONIC);
2682
2683 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
2684 static const int NANOS_PER_MILLIS = 1000000;
2685 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2686 }
2687 return NO_ERROR;
2688 }
2689 if (location != ExtendedTimestamp::LOCATION_SERVER) {
2690 return INVALID_OPERATION; // LOCATION_KERNEL is not available
2691 }
2692 // use server position directly (offloaded and direct arrive here)
2693 updateAndGetPosition_l();
2694 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2695 *msec = (diff <= 0) ? 0
2696 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2697 return NO_ERROR;
2698 }
2699
2700 // =========================================================================
2701
binderDied(const wp<IBinder> & who __unused)2702 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2703 {
2704 sp<AudioTrack> audioTrack = mAudioTrack.promote();
2705 if (audioTrack != 0) {
2706 AutoMutex lock(audioTrack->mLock);
2707 audioTrack->mProxy->binderDied();
2708 }
2709 }
2710
2711 // =========================================================================
2712
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)2713 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2714 : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2715 mIgnoreNextPausedInt(false)
2716 {
2717 }
2718
~AudioTrackThread()2719 AudioTrack::AudioTrackThread::~AudioTrackThread()
2720 {
2721 }
2722
threadLoop()2723 bool AudioTrack::AudioTrackThread::threadLoop()
2724 {
2725 {
2726 AutoMutex _l(mMyLock);
2727 if (mPaused) {
2728 mMyCond.wait(mMyLock);
2729 // caller will check for exitPending()
2730 return true;
2731 }
2732 if (mIgnoreNextPausedInt) {
2733 mIgnoreNextPausedInt = false;
2734 mPausedInt = false;
2735 }
2736 if (mPausedInt) {
2737 if (mPausedNs > 0) {
2738 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2739 } else {
2740 mMyCond.wait(mMyLock);
2741 }
2742 mPausedInt = false;
2743 return true;
2744 }
2745 }
2746 if (exitPending()) {
2747 return false;
2748 }
2749 nsecs_t ns = mReceiver.processAudioBuffer();
2750 switch (ns) {
2751 case 0:
2752 return true;
2753 case NS_INACTIVE:
2754 pauseInternal();
2755 return true;
2756 case NS_NEVER:
2757 return false;
2758 case NS_WHENEVER:
2759 // Event driven: call wake() when callback notifications conditions change.
2760 ns = INT64_MAX;
2761 // fall through
2762 default:
2763 LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2764 pauseInternal(ns);
2765 return true;
2766 }
2767 }
2768
requestExit()2769 void AudioTrack::AudioTrackThread::requestExit()
2770 {
2771 // must be in this order to avoid a race condition
2772 Thread::requestExit();
2773 resume();
2774 }
2775
pause()2776 void AudioTrack::AudioTrackThread::pause()
2777 {
2778 AutoMutex _l(mMyLock);
2779 mPaused = true;
2780 }
2781
resume()2782 void AudioTrack::AudioTrackThread::resume()
2783 {
2784 AutoMutex _l(mMyLock);
2785 mIgnoreNextPausedInt = true;
2786 if (mPaused || mPausedInt) {
2787 mPaused = false;
2788 mPausedInt = false;
2789 mMyCond.signal();
2790 }
2791 }
2792
wake()2793 void AudioTrack::AudioTrackThread::wake()
2794 {
2795 AutoMutex _l(mMyLock);
2796 if (!mPaused) {
2797 // wake() might be called while servicing a callback - ignore the next
2798 // pause time and call processAudioBuffer.
2799 mIgnoreNextPausedInt = true;
2800 if (mPausedInt && mPausedNs > 0) {
2801 // audio track is active and internally paused with timeout.
2802 mPausedInt = false;
2803 mMyCond.signal();
2804 }
2805 }
2806 }
2807
pauseInternal(nsecs_t ns)2808 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2809 {
2810 AutoMutex _l(mMyLock);
2811 mPausedInt = true;
2812 mPausedNs = ns;
2813 }
2814
2815 } // namespace android
2816