1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24
25 #include <audio_utils/clock.h>
26 #include <audio_utils/primitives.h>
27 #include <binder/IPCThreadState.h>
28 #include <media/AudioTrack.h>
29 #include <utils/Log.h>
30 #include <private/media/AudioTrackShared.h>
31 #include <media/IAudioFlinger.h>
32 #include <media/AudioPolicyHelper.h>
33 #include <media/AudioResamplerPublic.h>
34
35 #define WAIT_PERIOD_MS 10
36 #define WAIT_STREAM_END_TIMEOUT_SEC 120
37 static const int kMaxLoopCountNotifications = 32;
38
39 namespace android {
40 // ---------------------------------------------------------------------------
41
42 // TODO: Move to a separate .h
43
44 template <typename T>
min(const T & x,const T & y)45 static inline const T &min(const T &x, const T &y) {
46 return x < y ? x : y;
47 }
48
49 template <typename T>
max(const T & x,const T & y)50 static inline const T &max(const T &x, const T &y) {
51 return x > y ? x : y;
52 }
53
54 static const int32_t NANOS_PER_SECOND = 1000000000;
55
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)56 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
57 {
58 return ((double)frames * 1000000000) / ((double)sampleRate * speed);
59 }
60
convertTimespecToUs(const struct timespec & tv)61 static int64_t convertTimespecToUs(const struct timespec &tv)
62 {
63 return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
64 }
65
66 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)67 static inline struct timespec convertNsToTimespec(int64_t ns) {
68 struct timespec tv;
69 tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
70 tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
71 return tv;
72 }
73
74 // current monotonic time in microseconds.
getNowUs()75 static int64_t getNowUs()
76 {
77 struct timespec tv;
78 (void) clock_gettime(CLOCK_MONOTONIC, &tv);
79 return convertTimespecToUs(tv);
80 }
81
82 // FIXME: we don't use the pitch setting in the time stretcher (not working);
83 // instead we emulate it using our sample rate converter.
84 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)85 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
86 {
87 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
88 }
89
adjustSpeed(float speed,float pitch)90 static inline float adjustSpeed(float speed, float pitch)
91 {
92 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
93 }
94
adjustPitch(float pitch)95 static inline float adjustPitch(float pitch)
96 {
97 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
98 }
99
100 // Must match similar computation in createTrack_l in Threads.cpp.
101 // TODO: Move to a common library
calculateMinFrameCount(uint32_t afLatencyMs,uint32_t afFrameCount,uint32_t afSampleRate,uint32_t sampleRate,float speed)102 static size_t calculateMinFrameCount(
103 uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
104 uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
105 {
106 // Ensure that buffer depth covers at least audio hardware latency
107 uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
108 if (minBufCount < 2) {
109 minBufCount = 2;
110 }
111 #if 0
112 // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
113 // but keeping the code here to make it easier to add later.
114 if (minBufCount < notificationsPerBufferReq) {
115 minBufCount = notificationsPerBufferReq;
116 }
117 #endif
118 ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
119 "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
120 afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
121 /*, notificationsPerBufferReq*/);
122 return minBufCount * sourceFramesNeededWithTimestretch(
123 sampleRate, afFrameCount, afSampleRate, speed);
124 }
125
126 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)127 status_t AudioTrack::getMinFrameCount(
128 size_t* frameCount,
129 audio_stream_type_t streamType,
130 uint32_t sampleRate)
131 {
132 if (frameCount == NULL) {
133 return BAD_VALUE;
134 }
135
136 // FIXME handle in server, like createTrack_l(), possible missing info:
137 // audio_io_handle_t output
138 // audio_format_t format
139 // audio_channel_mask_t channelMask
140 // audio_output_flags_t flags (FAST)
141 uint32_t afSampleRate;
142 status_t status;
143 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
144 if (status != NO_ERROR) {
145 ALOGE("Unable to query output sample rate for stream type %d; status %d",
146 streamType, status);
147 return status;
148 }
149 size_t afFrameCount;
150 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
151 if (status != NO_ERROR) {
152 ALOGE("Unable to query output frame count for stream type %d; status %d",
153 streamType, status);
154 return status;
155 }
156 uint32_t afLatency;
157 status = AudioSystem::getOutputLatency(&afLatency, streamType);
158 if (status != NO_ERROR) {
159 ALOGE("Unable to query output latency for stream type %d; status %d",
160 streamType, status);
161 return status;
162 }
163
164 // When called from createTrack, speed is 1.0f (normal speed).
165 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
166 *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
167 /*, 0 notificationsPerBufferReq*/);
168
169 // The formula above should always produce a non-zero value under normal circumstances:
170 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
171 // Return error in the unlikely event that it does not, as that's part of the API contract.
172 if (*frameCount == 0) {
173 ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
174 streamType, sampleRate);
175 return BAD_VALUE;
176 }
177 ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
178 *frameCount, afFrameCount, afSampleRate, afLatency);
179 return NO_ERROR;
180 }
181
182 // ---------------------------------------------------------------------------
183
AudioTrack()184 AudioTrack::AudioTrack()
185 : mStatus(NO_INIT),
186 mState(STATE_STOPPED),
187 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
188 mPreviousSchedulingGroup(SP_DEFAULT),
189 mPausedPosition(0),
190 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
191 mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
192 mPortId(AUDIO_PORT_HANDLE_NONE)
193 {
194 mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
195 mAttributes.usage = AUDIO_USAGE_UNKNOWN;
196 mAttributes.flags = 0x0;
197 strcpy(mAttributes.tags, "");
198 }
199
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)200 AudioTrack::AudioTrack(
201 audio_stream_type_t streamType,
202 uint32_t sampleRate,
203 audio_format_t format,
204 audio_channel_mask_t channelMask,
205 size_t frameCount,
206 audio_output_flags_t flags,
207 callback_t cbf,
208 void* user,
209 int32_t notificationFrames,
210 audio_session_t sessionId,
211 transfer_type transferType,
212 const audio_offload_info_t *offloadInfo,
213 uid_t uid,
214 pid_t pid,
215 const audio_attributes_t* pAttributes,
216 bool doNotReconnect,
217 float maxRequiredSpeed)
218 : mStatus(NO_INIT),
219 mState(STATE_STOPPED),
220 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
221 mPreviousSchedulingGroup(SP_DEFAULT),
222 mPausedPosition(0),
223 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
224 mPortId(AUDIO_PORT_HANDLE_NONE)
225 {
226 mStatus = set(streamType, sampleRate, format, channelMask,
227 frameCount, flags, cbf, user, notificationFrames,
228 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
229 offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
230 }
231
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)232 AudioTrack::AudioTrack(
233 audio_stream_type_t streamType,
234 uint32_t sampleRate,
235 audio_format_t format,
236 audio_channel_mask_t channelMask,
237 const sp<IMemory>& sharedBuffer,
238 audio_output_flags_t flags,
239 callback_t cbf,
240 void* user,
241 int32_t notificationFrames,
242 audio_session_t sessionId,
243 transfer_type transferType,
244 const audio_offload_info_t *offloadInfo,
245 uid_t uid,
246 pid_t pid,
247 const audio_attributes_t* pAttributes,
248 bool doNotReconnect,
249 float maxRequiredSpeed)
250 : mStatus(NO_INIT),
251 mState(STATE_STOPPED),
252 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
253 mPreviousSchedulingGroup(SP_DEFAULT),
254 mPausedPosition(0),
255 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
256 mPortId(AUDIO_PORT_HANDLE_NONE)
257 {
258 mStatus = set(streamType, sampleRate, format, channelMask,
259 0 /*frameCount*/, flags, cbf, user, notificationFrames,
260 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
261 uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
262 }
263
~AudioTrack()264 AudioTrack::~AudioTrack()
265 {
266 if (mStatus == NO_ERROR) {
267 // Make sure that callback function exits in the case where
268 // it is looping on buffer full condition in obtainBuffer().
269 // Otherwise the callback thread will never exit.
270 stop();
271 if (mAudioTrackThread != 0) {
272 mProxy->interrupt();
273 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
274 mAudioTrackThread->requestExitAndWait();
275 mAudioTrackThread.clear();
276 }
277 // No lock here: worst case we remove a NULL callback which will be a nop
278 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
279 AudioSystem::removeAudioDeviceCallback(this, mOutput);
280 }
281 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
282 mAudioTrack.clear();
283 mCblkMemory.clear();
284 mSharedBuffer.clear();
285 IPCThreadState::self()->flushCommands();
286 ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
287 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
288 AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
289 }
290 }
291
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)292 status_t AudioTrack::set(
293 audio_stream_type_t streamType,
294 uint32_t sampleRate,
295 audio_format_t format,
296 audio_channel_mask_t channelMask,
297 size_t frameCount,
298 audio_output_flags_t flags,
299 callback_t cbf,
300 void* user,
301 int32_t notificationFrames,
302 const sp<IMemory>& sharedBuffer,
303 bool threadCanCallJava,
304 audio_session_t sessionId,
305 transfer_type transferType,
306 const audio_offload_info_t *offloadInfo,
307 uid_t uid,
308 pid_t pid,
309 const audio_attributes_t* pAttributes,
310 bool doNotReconnect,
311 float maxRequiredSpeed)
312 {
313 ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
314 "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
315 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
316 sessionId, transferType, uid, pid);
317
318 mThreadCanCallJava = threadCanCallJava;
319
320 switch (transferType) {
321 case TRANSFER_DEFAULT:
322 if (sharedBuffer != 0) {
323 transferType = TRANSFER_SHARED;
324 } else if (cbf == NULL || threadCanCallJava) {
325 transferType = TRANSFER_SYNC;
326 } else {
327 transferType = TRANSFER_CALLBACK;
328 }
329 break;
330 case TRANSFER_CALLBACK:
331 if (cbf == NULL || sharedBuffer != 0) {
332 ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
333 return BAD_VALUE;
334 }
335 break;
336 case TRANSFER_OBTAIN:
337 case TRANSFER_SYNC:
338 if (sharedBuffer != 0) {
339 ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
340 return BAD_VALUE;
341 }
342 break;
343 case TRANSFER_SHARED:
344 if (sharedBuffer == 0) {
345 ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
346 return BAD_VALUE;
347 }
348 break;
349 default:
350 ALOGE("Invalid transfer type %d", transferType);
351 return BAD_VALUE;
352 }
353 mSharedBuffer = sharedBuffer;
354 mTransfer = transferType;
355 mDoNotReconnect = doNotReconnect;
356
357 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
358 sharedBuffer->size());
359
360 ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
361
362 // invariant that mAudioTrack != 0 is true only after set() returns successfully
363 if (mAudioTrack != 0) {
364 ALOGE("Track already in use");
365 return INVALID_OPERATION;
366 }
367
368 // handle default values first.
369 if (streamType == AUDIO_STREAM_DEFAULT) {
370 streamType = AUDIO_STREAM_MUSIC;
371 }
372 if (pAttributes == NULL) {
373 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
374 ALOGE("Invalid stream type %d", streamType);
375 return BAD_VALUE;
376 }
377 mStreamType = streamType;
378
379 } else {
380 // stream type shouldn't be looked at, this track has audio attributes
381 memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
382 ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
383 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
384 mStreamType = AUDIO_STREAM_DEFAULT;
385 if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
386 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
387 }
388 if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
389 flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
390 }
391 // check deep buffer after flags have been modified above
392 if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
393 flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
394 }
395 }
396
397 // these below should probably come from the audioFlinger too...
398 if (format == AUDIO_FORMAT_DEFAULT) {
399 format = AUDIO_FORMAT_PCM_16_BIT;
400 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
401 mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
402 }
403
404 // validate parameters
405 if (!audio_is_valid_format(format)) {
406 ALOGE("Invalid format %#x", format);
407 return BAD_VALUE;
408 }
409 mFormat = format;
410
411 if (!audio_is_output_channel(channelMask)) {
412 ALOGE("Invalid channel mask %#x", channelMask);
413 return BAD_VALUE;
414 }
415 mChannelMask = channelMask;
416 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
417 mChannelCount = channelCount;
418
419 // force direct flag if format is not linear PCM
420 // or offload was requested
421 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
422 || !audio_is_linear_pcm(format)) {
423 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
424 ? "Offload request, forcing to Direct Output"
425 : "Not linear PCM, forcing to Direct Output");
426 flags = (audio_output_flags_t)
427 // FIXME why can't we allow direct AND fast?
428 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
429 }
430
431 // force direct flag if HW A/V sync requested
432 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
433 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
434 }
435
436 if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
437 if (audio_has_proportional_frames(format)) {
438 mFrameSize = channelCount * audio_bytes_per_sample(format);
439 } else {
440 mFrameSize = sizeof(uint8_t);
441 }
442 } else {
443 ALOG_ASSERT(audio_has_proportional_frames(format));
444 mFrameSize = channelCount * audio_bytes_per_sample(format);
445 // createTrack will return an error if PCM format is not supported by server,
446 // so no need to check for specific PCM formats here
447 }
448
449 // sampling rate must be specified for direct outputs
450 if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
451 return BAD_VALUE;
452 }
453 mSampleRate = sampleRate;
454 mOriginalSampleRate = sampleRate;
455 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
456 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
457 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
458
459 // Make copy of input parameter offloadInfo so that in the future:
460 // (a) createTrack_l doesn't need it as an input parameter
461 // (b) we can support re-creation of offloaded tracks
462 if (offloadInfo != NULL) {
463 mOffloadInfoCopy = *offloadInfo;
464 mOffloadInfo = &mOffloadInfoCopy;
465 } else {
466 mOffloadInfo = NULL;
467 memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
468 }
469
470 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
471 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
472 mSendLevel = 0.0f;
473 // mFrameCount is initialized in createTrack_l
474 mReqFrameCount = frameCount;
475 if (notificationFrames >= 0) {
476 mNotificationFramesReq = notificationFrames;
477 mNotificationsPerBufferReq = 0;
478 } else {
479 if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
480 ALOGE("notificationFrames=%d not permitted for non-fast track",
481 notificationFrames);
482 return BAD_VALUE;
483 }
484 if (frameCount > 0) {
485 ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
486 notificationFrames, frameCount);
487 return BAD_VALUE;
488 }
489 mNotificationFramesReq = 0;
490 const uint32_t minNotificationsPerBuffer = 1;
491 const uint32_t maxNotificationsPerBuffer = 8;
492 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
493 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
494 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
495 "notificationFrames=%d clamped to the range -%u to -%u",
496 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
497 }
498 mNotificationFramesAct = 0;
499 if (sessionId == AUDIO_SESSION_ALLOCATE) {
500 mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
501 } else {
502 mSessionId = sessionId;
503 }
504 int callingpid = IPCThreadState::self()->getCallingPid();
505 int mypid = getpid();
506 if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
507 mClientUid = IPCThreadState::self()->getCallingUid();
508 } else {
509 mClientUid = uid;
510 }
511 if (pid == -1 || (callingpid != mypid)) {
512 mClientPid = callingpid;
513 } else {
514 mClientPid = pid;
515 }
516 mAuxEffectId = 0;
517 mOrigFlags = mFlags = flags;
518 mCbf = cbf;
519
520 if (cbf != NULL) {
521 mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
522 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
523 // thread begins in paused state, and will not reference us until start()
524 }
525
526 // create the IAudioTrack
527 status_t status = createTrack_l();
528
529 if (status != NO_ERROR) {
530 if (mAudioTrackThread != 0) {
531 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
532 mAudioTrackThread->requestExitAndWait();
533 mAudioTrackThread.clear();
534 }
535 return status;
536 }
537
538 mStatus = NO_ERROR;
539 mUserData = user;
540 mLoopCount = 0;
541 mLoopStart = 0;
542 mLoopEnd = 0;
543 mLoopCountNotified = 0;
544 mMarkerPosition = 0;
545 mMarkerReached = false;
546 mNewPosition = 0;
547 mUpdatePeriod = 0;
548 mPosition = 0;
549 mReleased = 0;
550 mStartNs = 0;
551 mStartFromZeroUs = 0;
552 AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
553 mSequence = 1;
554 mObservedSequence = mSequence;
555 mInUnderrun = false;
556 mPreviousTimestampValid = false;
557 mTimestampStartupGlitchReported = false;
558 mRetrogradeMotionReported = false;
559 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
560 mStartTs.mPosition = 0;
561 mUnderrunCountOffset = 0;
562 mFramesWritten = 0;
563 mFramesWrittenServerOffset = 0;
564 mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
565 mVolumeHandler = new VolumeHandler();
566 return NO_ERROR;
567 }
568
569 // -------------------------------------------------------------------------
570
start()571 status_t AudioTrack::start()
572 {
573 AutoMutex lock(mLock);
574
575 if (mState == STATE_ACTIVE) {
576 return INVALID_OPERATION;
577 }
578
579 mInUnderrun = true;
580
581 State previousState = mState;
582 if (previousState == STATE_PAUSED_STOPPING) {
583 mState = STATE_STOPPING;
584 } else {
585 mState = STATE_ACTIVE;
586 }
587 (void) updateAndGetPosition_l();
588
589 // save start timestamp
590 if (isOffloadedOrDirect_l()) {
591 if (getTimestamp_l(mStartTs) != OK) {
592 mStartTs.mPosition = 0;
593 }
594 } else {
595 if (getTimestamp_l(&mStartEts) != OK) {
596 mStartEts.clear();
597 }
598 }
599 mStartNs = systemTime(); // save this for timestamp adjustment after starting.
600 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
601 // reset current position as seen by client to 0
602 mPosition = 0;
603 mPreviousTimestampValid = false;
604 mTimestampStartupGlitchReported = false;
605 mRetrogradeMotionReported = false;
606 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
607
608 if (!isOffloadedOrDirect_l()
609 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
610 // Server side has consumed something, but is it finished consuming?
611 // It is possible since flush and stop are asynchronous that the server
612 // is still active at this point.
613 ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld",
614 (long long)(mFramesWrittenServerOffset
615 + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
616 (long long)mStartEts.mFlushed,
617 (long long)mFramesWritten);
618 // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
619 mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
620 }
621 mFramesWritten = 0;
622 mProxy->clearTimestamp(); // need new server push for valid timestamp
623 mMarkerReached = false;
624
625 // For offloaded tracks, we don't know if the hardware counters are really zero here,
626 // since the flush is asynchronous and stop may not fully drain.
627 // We save the time when the track is started to later verify whether
628 // the counters are realistic (i.e. start from zero after this time).
629 mStartFromZeroUs = mStartNs / 1000;
630
631 // force refresh of remaining frames by processAudioBuffer() as last
632 // write before stop could be partial.
633 mRefreshRemaining = true;
634 }
635 mNewPosition = mPosition + mUpdatePeriod;
636 int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
637
638 status_t status = NO_ERROR;
639 if (!(flags & CBLK_INVALID)) {
640 status = mAudioTrack->start();
641 if (status == DEAD_OBJECT) {
642 flags |= CBLK_INVALID;
643 }
644 }
645 if (flags & CBLK_INVALID) {
646 status = restoreTrack_l("start");
647 }
648
649 // resume or pause the callback thread as needed.
650 sp<AudioTrackThread> t = mAudioTrackThread;
651 if (status == NO_ERROR) {
652 if (t != 0) {
653 if (previousState == STATE_STOPPING) {
654 mProxy->interrupt();
655 } else {
656 t->resume();
657 }
658 } else {
659 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
660 get_sched_policy(0, &mPreviousSchedulingGroup);
661 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
662 }
663
664 // Start our local VolumeHandler for restoration purposes.
665 mVolumeHandler->setStarted();
666 } else {
667 ALOGE("start() status %d", status);
668 mState = previousState;
669 if (t != 0) {
670 if (previousState != STATE_STOPPING) {
671 t->pause();
672 }
673 } else {
674 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
675 set_sched_policy(0, mPreviousSchedulingGroup);
676 }
677 }
678
679 return status;
680 }
681
stop()682 void AudioTrack::stop()
683 {
684 AutoMutex lock(mLock);
685 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
686 return;
687 }
688
689 if (isOffloaded_l()) {
690 mState = STATE_STOPPING;
691 } else {
692 mState = STATE_STOPPED;
693 ALOGD_IF(mSharedBuffer == nullptr,
694 "stop() called with %u frames delivered", mReleased.value());
695 mReleased = 0;
696 }
697
698 mProxy->interrupt();
699 mAudioTrack->stop();
700
701 // Note: legacy handling - stop does not clear playback marker
702 // and periodic update counter, but flush does for streaming tracks.
703
704 if (mSharedBuffer != 0) {
705 // clear buffer position and loop count.
706 mStaticProxy->setBufferPositionAndLoop(0 /* position */,
707 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
708 }
709
710 sp<AudioTrackThread> t = mAudioTrackThread;
711 if (t != 0) {
712 if (!isOffloaded_l()) {
713 t->pause();
714 }
715 } else {
716 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
717 set_sched_policy(0, mPreviousSchedulingGroup);
718 }
719 }
720
stopped() const721 bool AudioTrack::stopped() const
722 {
723 AutoMutex lock(mLock);
724 return mState != STATE_ACTIVE;
725 }
726
flush()727 void AudioTrack::flush()
728 {
729 if (mSharedBuffer != 0) {
730 return;
731 }
732 AutoMutex lock(mLock);
733 if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
734 return;
735 }
736 flush_l();
737 }
738
flush_l()739 void AudioTrack::flush_l()
740 {
741 ALOG_ASSERT(mState != STATE_ACTIVE);
742
743 // clear playback marker and periodic update counter
744 mMarkerPosition = 0;
745 mMarkerReached = false;
746 mUpdatePeriod = 0;
747 mRefreshRemaining = true;
748
749 mState = STATE_FLUSHED;
750 mReleased = 0;
751 if (isOffloaded_l()) {
752 mProxy->interrupt();
753 }
754 mProxy->flush();
755 mAudioTrack->flush();
756 }
757
pause()758 void AudioTrack::pause()
759 {
760 AutoMutex lock(mLock);
761 if (mState == STATE_ACTIVE) {
762 mState = STATE_PAUSED;
763 } else if (mState == STATE_STOPPING) {
764 mState = STATE_PAUSED_STOPPING;
765 } else {
766 return;
767 }
768 mProxy->interrupt();
769 mAudioTrack->pause();
770
771 if (isOffloaded_l()) {
772 if (mOutput != AUDIO_IO_HANDLE_NONE) {
773 // An offload output can be re-used between two audio tracks having
774 // the same configuration. A timestamp query for a paused track
775 // while the other is running would return an incorrect time.
776 // To fix this, cache the playback position on a pause() and return
777 // this time when requested until the track is resumed.
778
779 // OffloadThread sends HAL pause in its threadLoop. Time saved
780 // here can be slightly off.
781
782 // TODO: check return code for getRenderPosition.
783
784 uint32_t halFrames;
785 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
786 ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
787 }
788 }
789 }
790
setVolume(float left,float right)791 status_t AudioTrack::setVolume(float left, float right)
792 {
793 // This duplicates a test by AudioTrack JNI, but that is not the only caller
794 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
795 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
796 return BAD_VALUE;
797 }
798
799 AutoMutex lock(mLock);
800 mVolume[AUDIO_INTERLEAVE_LEFT] = left;
801 mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
802
803 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
804
805 if (isOffloaded_l()) {
806 mAudioTrack->signal();
807 }
808 return NO_ERROR;
809 }
810
setVolume(float volume)811 status_t AudioTrack::setVolume(float volume)
812 {
813 return setVolume(volume, volume);
814 }
815
setAuxEffectSendLevel(float level)816 status_t AudioTrack::setAuxEffectSendLevel(float level)
817 {
818 // This duplicates a test by AudioTrack JNI, but that is not the only caller
819 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
820 return BAD_VALUE;
821 }
822
823 AutoMutex lock(mLock);
824 mSendLevel = level;
825 mProxy->setSendLevel(level);
826
827 return NO_ERROR;
828 }
829
getAuxEffectSendLevel(float * level) const830 void AudioTrack::getAuxEffectSendLevel(float* level) const
831 {
832 if (level != NULL) {
833 *level = mSendLevel;
834 }
835 }
836
setSampleRate(uint32_t rate)837 status_t AudioTrack::setSampleRate(uint32_t rate)
838 {
839 AutoMutex lock(mLock);
840 if (rate == mSampleRate) {
841 return NO_ERROR;
842 }
843 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
844 return INVALID_OPERATION;
845 }
846 if (mOutput == AUDIO_IO_HANDLE_NONE) {
847 return NO_INIT;
848 }
849 // NOTE: it is theoretically possible, but highly unlikely, that a device change
850 // could mean a previously allowed sampling rate is no longer allowed.
851 uint32_t afSamplingRate;
852 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
853 return NO_INIT;
854 }
855 // pitch is emulated by adjusting speed and sampleRate
856 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
857 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
858 return BAD_VALUE;
859 }
860 // TODO: Should we also check if the buffer size is compatible?
861
862 mSampleRate = rate;
863 mProxy->setSampleRate(effectiveSampleRate);
864
865 return NO_ERROR;
866 }
867
getSampleRate() const868 uint32_t AudioTrack::getSampleRate() const
869 {
870 AutoMutex lock(mLock);
871
872 // sample rate can be updated during playback by the offloaded decoder so we need to
873 // query the HAL and update if needed.
874 // FIXME use Proxy return channel to update the rate from server and avoid polling here
875 if (isOffloadedOrDirect_l()) {
876 if (mOutput != AUDIO_IO_HANDLE_NONE) {
877 uint32_t sampleRate = 0;
878 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
879 if (status == NO_ERROR) {
880 mSampleRate = sampleRate;
881 }
882 }
883 }
884 return mSampleRate;
885 }
886
getOriginalSampleRate() const887 uint32_t AudioTrack::getOriginalSampleRate() const
888 {
889 return mOriginalSampleRate;
890 }
891
setPlaybackRate(const AudioPlaybackRate & playbackRate)892 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
893 {
894 AutoMutex lock(mLock);
895 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
896 return NO_ERROR;
897 }
898 if (isOffloadedOrDirect_l()) {
899 return INVALID_OPERATION;
900 }
901 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
902 return INVALID_OPERATION;
903 }
904
905 ALOGV("setPlaybackRate (input): mSampleRate:%u mSpeed:%f mPitch:%f",
906 mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
907 // pitch is emulated by adjusting speed and sampleRate
908 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
909 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
910 const float effectivePitch = adjustPitch(playbackRate.mPitch);
911 AudioPlaybackRate playbackRateTemp = playbackRate;
912 playbackRateTemp.mSpeed = effectiveSpeed;
913 playbackRateTemp.mPitch = effectivePitch;
914
915 ALOGV("setPlaybackRate (effective): mSampleRate:%u mSpeed:%f mPitch:%f",
916 effectiveRate, effectiveSpeed, effectivePitch);
917
918 if (!isAudioPlaybackRateValid(playbackRateTemp)) {
919 ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
920 playbackRate.mSpeed, playbackRate.mPitch);
921 return BAD_VALUE;
922 }
923 // Check if the buffer size is compatible.
924 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
925 ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
926 playbackRate.mSpeed, playbackRate.mPitch);
927 return BAD_VALUE;
928 }
929
930 // Check resampler ratios are within bounds
931 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
932 (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
933 ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
934 playbackRate.mSpeed, playbackRate.mPitch);
935 return BAD_VALUE;
936 }
937
938 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
939 ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
940 playbackRate.mSpeed, playbackRate.mPitch);
941 return BAD_VALUE;
942 }
943 mPlaybackRate = playbackRate;
944 //set effective rates
945 mProxy->setPlaybackRate(playbackRateTemp);
946 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
947 return NO_ERROR;
948 }
949
getPlaybackRate() const950 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
951 {
952 AutoMutex lock(mLock);
953 return mPlaybackRate;
954 }
955
getBufferSizeInFrames()956 ssize_t AudioTrack::getBufferSizeInFrames()
957 {
958 AutoMutex lock(mLock);
959 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
960 return NO_INIT;
961 }
962 return (ssize_t) mProxy->getBufferSizeInFrames();
963 }
964
getBufferDurationInUs(int64_t * duration)965 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
966 {
967 if (duration == nullptr) {
968 return BAD_VALUE;
969 }
970 AutoMutex lock(mLock);
971 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
972 return NO_INIT;
973 }
974 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
975 if (bufferSizeInFrames < 0) {
976 return (status_t)bufferSizeInFrames;
977 }
978 *duration = (int64_t)((double)bufferSizeInFrames * 1000000
979 / ((double)mSampleRate * mPlaybackRate.mSpeed));
980 return NO_ERROR;
981 }
982
setBufferSizeInFrames(size_t bufferSizeInFrames)983 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
984 {
985 AutoMutex lock(mLock);
986 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
987 return NO_INIT;
988 }
989 // Reject if timed track or compressed audio.
990 if (!audio_is_linear_pcm(mFormat)) {
991 return INVALID_OPERATION;
992 }
993 return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
994 }
995
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)996 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
997 {
998 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
999 return INVALID_OPERATION;
1000 }
1001
1002 if (loopCount == 0) {
1003 ;
1004 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1005 loopEnd - loopStart >= MIN_LOOP) {
1006 ;
1007 } else {
1008 return BAD_VALUE;
1009 }
1010
1011 AutoMutex lock(mLock);
1012 // See setPosition() regarding setting parameters such as loop points or position while active
1013 if (mState == STATE_ACTIVE) {
1014 return INVALID_OPERATION;
1015 }
1016 setLoop_l(loopStart, loopEnd, loopCount);
1017 return NO_ERROR;
1018 }
1019
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1020 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1021 {
1022 // We do not update the periodic notification point.
1023 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1024 mLoopCount = loopCount;
1025 mLoopEnd = loopEnd;
1026 mLoopStart = loopStart;
1027 mLoopCountNotified = loopCount;
1028 mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1029
1030 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1031 }
1032
setMarkerPosition(uint32_t marker)1033 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1034 {
1035 // The only purpose of setting marker position is to get a callback
1036 if (mCbf == NULL || isOffloadedOrDirect()) {
1037 return INVALID_OPERATION;
1038 }
1039
1040 AutoMutex lock(mLock);
1041 mMarkerPosition = marker;
1042 mMarkerReached = false;
1043
1044 sp<AudioTrackThread> t = mAudioTrackThread;
1045 if (t != 0) {
1046 t->wake();
1047 }
1048 return NO_ERROR;
1049 }
1050
getMarkerPosition(uint32_t * marker) const1051 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1052 {
1053 if (isOffloadedOrDirect()) {
1054 return INVALID_OPERATION;
1055 }
1056 if (marker == NULL) {
1057 return BAD_VALUE;
1058 }
1059
1060 AutoMutex lock(mLock);
1061 mMarkerPosition.getValue(marker);
1062
1063 return NO_ERROR;
1064 }
1065
setPositionUpdatePeriod(uint32_t updatePeriod)1066 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1067 {
1068 // The only purpose of setting position update period is to get a callback
1069 if (mCbf == NULL || isOffloadedOrDirect()) {
1070 return INVALID_OPERATION;
1071 }
1072
1073 AutoMutex lock(mLock);
1074 mNewPosition = updateAndGetPosition_l() + updatePeriod;
1075 mUpdatePeriod = updatePeriod;
1076
1077 sp<AudioTrackThread> t = mAudioTrackThread;
1078 if (t != 0) {
1079 t->wake();
1080 }
1081 return NO_ERROR;
1082 }
1083
getPositionUpdatePeriod(uint32_t * updatePeriod) const1084 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1085 {
1086 if (isOffloadedOrDirect()) {
1087 return INVALID_OPERATION;
1088 }
1089 if (updatePeriod == NULL) {
1090 return BAD_VALUE;
1091 }
1092
1093 AutoMutex lock(mLock);
1094 *updatePeriod = mUpdatePeriod;
1095
1096 return NO_ERROR;
1097 }
1098
setPosition(uint32_t position)1099 status_t AudioTrack::setPosition(uint32_t position)
1100 {
1101 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1102 return INVALID_OPERATION;
1103 }
1104 if (position > mFrameCount) {
1105 return BAD_VALUE;
1106 }
1107
1108 AutoMutex lock(mLock);
1109 // Currently we require that the player is inactive before setting parameters such as position
1110 // or loop points. Otherwise, there could be a race condition: the application could read the
1111 // current position, compute a new position or loop parameters, and then set that position or
1112 // loop parameters but it would do the "wrong" thing since the position has continued to advance
1113 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app
1114 // to specify how it wants to handle such scenarios.
1115 if (mState == STATE_ACTIVE) {
1116 return INVALID_OPERATION;
1117 }
1118 // After setting the position, use full update period before notification.
1119 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1120 mStaticProxy->setBufferPosition(position);
1121
1122 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1123 return NO_ERROR;
1124 }
1125
getPosition(uint32_t * position)1126 status_t AudioTrack::getPosition(uint32_t *position)
1127 {
1128 if (position == NULL) {
1129 return BAD_VALUE;
1130 }
1131
1132 AutoMutex lock(mLock);
1133 // FIXME: offloaded and direct tracks call into the HAL for render positions
1134 // for compressed/synced data; however, we use proxy position for pure linear pcm data
1135 // as we do not know the capability of the HAL for pcm position support and standby.
1136 // There may be some latency differences between the HAL position and the proxy position.
1137 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1138 uint32_t dspFrames = 0;
1139
1140 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1141 ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1142 *position = mPausedPosition;
1143 return NO_ERROR;
1144 }
1145
1146 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1147 uint32_t halFrames; // actually unused
1148 (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1149 // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1150 }
1151 // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1152 // due to hardware latency. We leave this behavior for now.
1153 *position = dspFrames;
1154 } else {
1155 if (mCblk->mFlags & CBLK_INVALID) {
1156 (void) restoreTrack_l("getPosition");
1157 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1158 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1159 }
1160
1161 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1162 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1163 0 : updateAndGetPosition_l().value();
1164 }
1165 return NO_ERROR;
1166 }
1167
getBufferPosition(uint32_t * position)1168 status_t AudioTrack::getBufferPosition(uint32_t *position)
1169 {
1170 if (mSharedBuffer == 0) {
1171 return INVALID_OPERATION;
1172 }
1173 if (position == NULL) {
1174 return BAD_VALUE;
1175 }
1176
1177 AutoMutex lock(mLock);
1178 *position = mStaticProxy->getBufferPosition();
1179 return NO_ERROR;
1180 }
1181
reload()1182 status_t AudioTrack::reload()
1183 {
1184 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1185 return INVALID_OPERATION;
1186 }
1187
1188 AutoMutex lock(mLock);
1189 // See setPosition() regarding setting parameters such as loop points or position while active
1190 if (mState == STATE_ACTIVE) {
1191 return INVALID_OPERATION;
1192 }
1193 mNewPosition = mUpdatePeriod;
1194 (void) updateAndGetPosition_l();
1195 mPosition = 0;
1196 mPreviousTimestampValid = false;
1197 #if 0
1198 // The documentation is not clear on the behavior of reload() and the restoration
1199 // of loop count. Historically we have not restored loop count, start, end,
1200 // but it makes sense if one desires to repeat playing a particular sound.
1201 if (mLoopCount != 0) {
1202 mLoopCountNotified = mLoopCount;
1203 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1204 }
1205 #endif
1206 mStaticProxy->setBufferPosition(0);
1207 return NO_ERROR;
1208 }
1209
getOutput() const1210 audio_io_handle_t AudioTrack::getOutput() const
1211 {
1212 AutoMutex lock(mLock);
1213 return mOutput;
1214 }
1215
setOutputDevice(audio_port_handle_t deviceId)1216 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1217 AutoMutex lock(mLock);
1218 if (mSelectedDeviceId != deviceId) {
1219 mSelectedDeviceId = deviceId;
1220 if (mStatus == NO_ERROR) {
1221 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1222 }
1223 }
1224 return NO_ERROR;
1225 }
1226
getOutputDevice()1227 audio_port_handle_t AudioTrack::getOutputDevice() {
1228 AutoMutex lock(mLock);
1229 return mSelectedDeviceId;
1230 }
1231
1232 // must be called with mLock held
updateRoutedDeviceId_l()1233 void AudioTrack::updateRoutedDeviceId_l()
1234 {
1235 // if the track is inactive, do not update actual device as the output stream maybe routed
1236 // to a device not relevant to this client because of other active use cases.
1237 if (mState != STATE_ACTIVE) {
1238 return;
1239 }
1240 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1241 audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1242 if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1243 mRoutedDeviceId = deviceId;
1244 }
1245 }
1246 }
1247
getRoutedDeviceId()1248 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1249 AutoMutex lock(mLock);
1250 updateRoutedDeviceId_l();
1251 return mRoutedDeviceId;
1252 }
1253
attachAuxEffect(int effectId)1254 status_t AudioTrack::attachAuxEffect(int effectId)
1255 {
1256 AutoMutex lock(mLock);
1257 status_t status = mAudioTrack->attachAuxEffect(effectId);
1258 if (status == NO_ERROR) {
1259 mAuxEffectId = effectId;
1260 }
1261 return status;
1262 }
1263
streamType() const1264 audio_stream_type_t AudioTrack::streamType() const
1265 {
1266 if (mStreamType == AUDIO_STREAM_DEFAULT) {
1267 return audio_attributes_to_stream_type(&mAttributes);
1268 }
1269 return mStreamType;
1270 }
1271
latency()1272 uint32_t AudioTrack::latency()
1273 {
1274 AutoMutex lock(mLock);
1275 updateLatency_l();
1276 return mLatency;
1277 }
1278
1279 // -------------------------------------------------------------------------
1280
1281 // must be called with mLock held
updateLatency_l()1282 void AudioTrack::updateLatency_l()
1283 {
1284 status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1285 if (status != NO_ERROR) {
1286 ALOGW("getLatency(%d) failed status %d", mOutput, status);
1287 } else {
1288 // FIXME don't believe this lie
1289 mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1290 }
1291 }
1292
1293 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1294 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1295 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1296 switch (transferType) {
1297 MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1298 MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1299 MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1300 MEDIA_CASE_ENUM(TRANSFER_SYNC);
1301 MEDIA_CASE_ENUM(TRANSFER_SHARED);
1302 default:
1303 return "UNRECOGNIZED";
1304 }
1305 }
1306
createTrack_l()1307 status_t AudioTrack::createTrack_l()
1308 {
1309 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1310 if (audioFlinger == 0) {
1311 ALOGE("Could not get audioflinger");
1312 return NO_INIT;
1313 }
1314
1315 audio_io_handle_t output;
1316 audio_stream_type_t streamType = mStreamType;
1317 audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1318 bool callbackAdded = false;
1319
1320 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1321 // After fast request is denied, we will request again if IAudioTrack is re-created.
1322
1323 status_t status;
1324 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
1325 config.sample_rate = mSampleRate;
1326 config.channel_mask = mChannelMask;
1327 config.format = mFormat;
1328 config.offload_info = mOffloadInfoCopy;
1329 mRoutedDeviceId = mSelectedDeviceId;
1330 status = AudioSystem::getOutputForAttr(attr, &output,
1331 mSessionId, &streamType, mClientUid,
1332 &config,
1333 mFlags, &mRoutedDeviceId, &mPortId);
1334
1335 if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1336 ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
1337 " format %#x, channel mask %#x, flags %#x",
1338 mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
1339 mFlags);
1340 return BAD_VALUE;
1341 }
1342 {
1343 // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1344 // we must release it ourselves if anything goes wrong.
1345
1346 // Not all of these values are needed under all conditions, but it is easier to get them all
1347 status = AudioSystem::getLatency(output, &mAfLatency);
1348 if (status != NO_ERROR) {
1349 ALOGE("getLatency(%d) failed status %d", output, status);
1350 goto release;
1351 }
1352 ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1353
1354 status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1355 if (status != NO_ERROR) {
1356 ALOGE("getFrameCount(output=%d) status %d", output, status);
1357 goto release;
1358 }
1359
1360 // TODO consider making this a member variable if there are other uses for it later
1361 size_t afFrameCountHAL;
1362 status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1363 if (status != NO_ERROR) {
1364 ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1365 goto release;
1366 }
1367 ALOG_ASSERT(afFrameCountHAL > 0);
1368
1369 status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1370 if (status != NO_ERROR) {
1371 ALOGE("getSamplingRate(output=%d) status %d", output, status);
1372 goto release;
1373 }
1374 if (mSampleRate == 0) {
1375 mSampleRate = mAfSampleRate;
1376 mOriginalSampleRate = mAfSampleRate;
1377 }
1378
1379 // Client can only express a preference for FAST. Server will perform additional tests.
1380 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1381 // either of these use cases:
1382 // use case 1: shared buffer
1383 bool sharedBuffer = mSharedBuffer != 0;
1384 bool transferAllowed =
1385 // use case 2: callback transfer mode
1386 (mTransfer == TRANSFER_CALLBACK) ||
1387 // use case 3: obtain/release mode
1388 (mTransfer == TRANSFER_OBTAIN) ||
1389 // use case 4: synchronous write
1390 ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1391
1392 bool useCaseAllowed = sharedBuffer || transferAllowed;
1393 if (!useCaseAllowed) {
1394 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, not shared buffer and transfer = %s",
1395 convertTransferToText(mTransfer));
1396 }
1397
1398 // sample rates must also match
1399 bool sampleRateAllowed = mSampleRate == mAfSampleRate;
1400 if (!sampleRateAllowed) {
1401 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
1402 mSampleRate, mAfSampleRate);
1403 }
1404
1405 bool fastAllowed = useCaseAllowed && sampleRateAllowed;
1406 if (!fastAllowed) {
1407 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1408 }
1409 }
1410
1411 mNotificationFramesAct = mNotificationFramesReq;
1412
1413 size_t frameCount = mReqFrameCount;
1414 if (!audio_has_proportional_frames(mFormat)) {
1415
1416 if (mSharedBuffer != 0) {
1417 // Same comment as below about ignoring frameCount parameter for set()
1418 frameCount = mSharedBuffer->size();
1419 } else if (frameCount == 0) {
1420 frameCount = mAfFrameCount;
1421 }
1422 if (mNotificationFramesAct != frameCount) {
1423 mNotificationFramesAct = frameCount;
1424 }
1425 } else if (mSharedBuffer != 0) {
1426 // FIXME: Ensure client side memory buffers need
1427 // not have additional alignment beyond sample
1428 // (e.g. 16 bit stereo accessed as 32 bit frame).
1429 size_t alignment = audio_bytes_per_sample(mFormat);
1430 if (alignment & 1) {
1431 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1432 alignment = 1;
1433 }
1434 if (mChannelCount > 1) {
1435 // More than 2 channels does not require stronger alignment than stereo
1436 alignment <<= 1;
1437 }
1438 if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1439 ALOGE("Invalid buffer alignment: address %p, channel count %u",
1440 mSharedBuffer->pointer(), mChannelCount);
1441 status = BAD_VALUE;
1442 goto release;
1443 }
1444
1445 // When initializing a shared buffer AudioTrack via constructors,
1446 // there's no frameCount parameter.
1447 // But when initializing a shared buffer AudioTrack via set(),
1448 // there _is_ a frameCount parameter. We silently ignore it.
1449 frameCount = mSharedBuffer->size() / mFrameSize;
1450 } else {
1451 size_t minFrameCount = 0;
1452 // For fast tracks the frame count calculations and checks are mostly done by server,
1453 // but we try to respect the application's request for notifications per buffer.
1454 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1455 if (mNotificationsPerBufferReq > 0) {
1456 // Avoid possible arithmetic overflow during multiplication.
1457 // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1458 if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1459 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1460 mNotificationsPerBufferReq, afFrameCountHAL);
1461 } else {
1462 minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1463 }
1464 }
1465 } else {
1466 // for normal tracks precompute the frame count based on speed.
1467 const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1468 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1469 minFrameCount = calculateMinFrameCount(
1470 mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1471 speed /*, 0 mNotificationsPerBufferReq*/);
1472 }
1473 if (frameCount < minFrameCount) {
1474 frameCount = minFrameCount;
1475 }
1476 }
1477
1478 audio_output_flags_t flags = mFlags;
1479
1480 pid_t tid = -1;
1481 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1482 // It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
1483 // application-level code follows all non-blocking design rules, the language runtime
1484 // doesn't also follow those rules, so the thread will not benefit overall.
1485 if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1486 tid = mAudioTrackThread->getTid();
1487 }
1488 }
1489
1490 size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
1491 // but we will still need the original value also
1492 audio_session_t originalSessionId = mSessionId;
1493 sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1494 mSampleRate,
1495 mFormat,
1496 mChannelMask,
1497 &temp,
1498 &flags,
1499 mSharedBuffer,
1500 output,
1501 mClientPid,
1502 tid,
1503 &mSessionId,
1504 mClientUid,
1505 &status,
1506 mPortId);
1507 ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1508 "session ID changed from %d to %d", originalSessionId, mSessionId);
1509
1510 if (status != NO_ERROR) {
1511 ALOGE("AudioFlinger could not create track, status: %d", status);
1512 goto release;
1513 }
1514 ALOG_ASSERT(track != 0);
1515
1516 // AudioFlinger now owns the reference to the I/O handle,
1517 // so we are no longer responsible for releasing it.
1518
1519 // FIXME compare to AudioRecord
1520 sp<IMemory> iMem = track->getCblk();
1521 if (iMem == 0) {
1522 ALOGE("Could not get control block");
1523 status = NO_INIT;
1524 goto release;
1525 }
1526 void *iMemPointer = iMem->pointer();
1527 if (iMemPointer == NULL) {
1528 ALOGE("Could not get control block pointer");
1529 status = NO_INIT;
1530 goto release;
1531 }
1532 // invariant that mAudioTrack != 0 is true only after set() returns successfully
1533 if (mAudioTrack != 0) {
1534 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1535 mDeathNotifier.clear();
1536 }
1537 mAudioTrack = track;
1538 mCblkMemory = iMem;
1539 IPCThreadState::self()->flushCommands();
1540
1541 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1542 mCblk = cblk;
1543 // note that temp is the (possibly revised) value of frameCount
1544 if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1545 // In current design, AudioTrack client checks and ensures frame count validity before
1546 // passing it to AudioFlinger so AudioFlinger should not return a different value except
1547 // for fast track as it uses a special method of assigning frame count.
1548 ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1549 }
1550 frameCount = temp;
1551
1552 mAwaitBoost = false;
1553 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1554 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1555 ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
1556 if (!mThreadCanCallJava) {
1557 mAwaitBoost = true;
1558 }
1559 } else {
1560 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
1561 temp);
1562 }
1563 }
1564 mFlags = flags;
1565
1566 // Make sure that application is notified with sufficient margin before underrun.
1567 // The client can divide the AudioTrack buffer into sub-buffers,
1568 // and expresses its desire to server as the notification frame count.
1569 if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1570 size_t maxNotificationFrames;
1571 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1572 // notify every HAL buffer, regardless of the size of the track buffer
1573 maxNotificationFrames = afFrameCountHAL;
1574 } else {
1575 // For normal tracks, use at least double-buffering if no sample rate conversion,
1576 // or at least triple-buffering if there is sample rate conversion
1577 const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1578 maxNotificationFrames = frameCount / nBuffering;
1579 }
1580 if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1581 if (mNotificationFramesAct == 0) {
1582 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1583 maxNotificationFrames, frameCount);
1584 } else {
1585 ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1586 mNotificationFramesAct, maxNotificationFrames, frameCount);
1587 }
1588 mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1589 }
1590 }
1591
1592 //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1593 if (mDeviceCallback != 0 && mOutput != output) {
1594 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1595 AudioSystem::removeAudioDeviceCallback(this, mOutput);
1596 }
1597 AudioSystem::addAudioDeviceCallback(this, output);
1598 callbackAdded = true;
1599 }
1600
1601 // We retain a copy of the I/O handle, but don't own the reference
1602 mOutput = output;
1603 mRefreshRemaining = true;
1604
1605 // Starting address of buffers in shared memory. If there is a shared buffer, buffers
1606 // is the value of pointer() for the shared buffer, otherwise buffers points
1607 // immediately after the control block. This address is for the mapping within client
1608 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
1609 void* buffers;
1610 if (mSharedBuffer == 0) {
1611 buffers = cblk + 1;
1612 } else {
1613 buffers = mSharedBuffer->pointer();
1614 if (buffers == NULL) {
1615 ALOGE("Could not get buffer pointer");
1616 status = NO_INIT;
1617 goto release;
1618 }
1619 }
1620
1621 mAudioTrack->attachAuxEffect(mAuxEffectId);
1622 mFrameCount = frameCount;
1623 updateLatency_l(); // this refetches mAfLatency and sets mLatency
1624
1625 // If IAudioTrack is re-created, don't let the requested frameCount
1626 // decrease. This can confuse clients that cache frameCount().
1627 if (frameCount > mReqFrameCount) {
1628 mReqFrameCount = frameCount;
1629 }
1630
1631 // reset server position to 0 as we have new cblk.
1632 mServer = 0;
1633
1634 // update proxy
1635 if (mSharedBuffer == 0) {
1636 mStaticProxy.clear();
1637 mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1638 } else {
1639 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1640 mProxy = mStaticProxy;
1641 }
1642
1643 mProxy->setVolumeLR(gain_minifloat_pack(
1644 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1645 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1646
1647 mProxy->setSendLevel(mSendLevel);
1648 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1649 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1650 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1651 mProxy->setSampleRate(effectiveSampleRate);
1652
1653 AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1654 playbackRateTemp.mSpeed = effectiveSpeed;
1655 playbackRateTemp.mPitch = effectivePitch;
1656 mProxy->setPlaybackRate(playbackRateTemp);
1657 mProxy->setMinimum(mNotificationFramesAct);
1658
1659 mDeathNotifier = new DeathNotifier(this);
1660 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1661
1662 return NO_ERROR;
1663 }
1664
1665 release:
1666 AudioSystem::releaseOutput(output, streamType, mSessionId);
1667 if (callbackAdded) {
1668 // note: mOutput is always valid is callbackAdded is true
1669 AudioSystem::removeAudioDeviceCallback(this, mOutput);
1670 }
1671 if (status == NO_ERROR) {
1672 status = NO_INIT;
1673 }
1674 return status;
1675 }
1676
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1677 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1678 {
1679 if (audioBuffer == NULL) {
1680 if (nonContig != NULL) {
1681 *nonContig = 0;
1682 }
1683 return BAD_VALUE;
1684 }
1685 if (mTransfer != TRANSFER_OBTAIN) {
1686 audioBuffer->frameCount = 0;
1687 audioBuffer->size = 0;
1688 audioBuffer->raw = NULL;
1689 if (nonContig != NULL) {
1690 *nonContig = 0;
1691 }
1692 return INVALID_OPERATION;
1693 }
1694
1695 const struct timespec *requested;
1696 struct timespec timeout;
1697 if (waitCount == -1) {
1698 requested = &ClientProxy::kForever;
1699 } else if (waitCount == 0) {
1700 requested = &ClientProxy::kNonBlocking;
1701 } else if (waitCount > 0) {
1702 long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1703 timeout.tv_sec = ms / 1000;
1704 timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1705 requested = &timeout;
1706 } else {
1707 ALOGE("%s invalid waitCount %d", __func__, waitCount);
1708 requested = NULL;
1709 }
1710 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1711 }
1712
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1713 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1714 struct timespec *elapsed, size_t *nonContig)
1715 {
1716 // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1717 uint32_t oldSequence = 0;
1718 uint32_t newSequence;
1719
1720 Proxy::Buffer buffer;
1721 status_t status = NO_ERROR;
1722
1723 static const int32_t kMaxTries = 5;
1724 int32_t tryCounter = kMaxTries;
1725
1726 do {
1727 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1728 // keep them from going away if another thread re-creates the track during obtainBuffer()
1729 sp<AudioTrackClientProxy> proxy;
1730 sp<IMemory> iMem;
1731
1732 { // start of lock scope
1733 AutoMutex lock(mLock);
1734
1735 newSequence = mSequence;
1736 // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1737 if (status == DEAD_OBJECT) {
1738 // re-create track, unless someone else has already done so
1739 if (newSequence == oldSequence) {
1740 status = restoreTrack_l("obtainBuffer");
1741 if (status != NO_ERROR) {
1742 buffer.mFrameCount = 0;
1743 buffer.mRaw = NULL;
1744 buffer.mNonContig = 0;
1745 break;
1746 }
1747 }
1748 }
1749 oldSequence = newSequence;
1750
1751 if (status == NOT_ENOUGH_DATA) {
1752 restartIfDisabled();
1753 }
1754
1755 // Keep the extra references
1756 proxy = mProxy;
1757 iMem = mCblkMemory;
1758
1759 if (mState == STATE_STOPPING) {
1760 status = -EINTR;
1761 buffer.mFrameCount = 0;
1762 buffer.mRaw = NULL;
1763 buffer.mNonContig = 0;
1764 break;
1765 }
1766
1767 // Non-blocking if track is stopped or paused
1768 if (mState != STATE_ACTIVE) {
1769 requested = &ClientProxy::kNonBlocking;
1770 }
1771
1772 } // end of lock scope
1773
1774 buffer.mFrameCount = audioBuffer->frameCount;
1775 // FIXME starts the requested timeout and elapsed over from scratch
1776 status = proxy->obtainBuffer(&buffer, requested, elapsed);
1777 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1778
1779 audioBuffer->frameCount = buffer.mFrameCount;
1780 audioBuffer->size = buffer.mFrameCount * mFrameSize;
1781 audioBuffer->raw = buffer.mRaw;
1782 if (nonContig != NULL) {
1783 *nonContig = buffer.mNonContig;
1784 }
1785 return status;
1786 }
1787
releaseBuffer(const Buffer * audioBuffer)1788 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1789 {
1790 // FIXME add error checking on mode, by adding an internal version
1791 if (mTransfer == TRANSFER_SHARED) {
1792 return;
1793 }
1794
1795 size_t stepCount = audioBuffer->size / mFrameSize;
1796 if (stepCount == 0) {
1797 return;
1798 }
1799
1800 Proxy::Buffer buffer;
1801 buffer.mFrameCount = stepCount;
1802 buffer.mRaw = audioBuffer->raw;
1803
1804 AutoMutex lock(mLock);
1805 mReleased += stepCount;
1806 mInUnderrun = false;
1807 mProxy->releaseBuffer(&buffer);
1808
1809 // restart track if it was disabled by audioflinger due to previous underrun
1810 restartIfDisabled();
1811 }
1812
restartIfDisabled()1813 void AudioTrack::restartIfDisabled()
1814 {
1815 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1816 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1817 ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1818 // FIXME ignoring status
1819 mAudioTrack->start();
1820 }
1821 }
1822
1823 // -------------------------------------------------------------------------
1824
write(const void * buffer,size_t userSize,bool blocking)1825 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1826 {
1827 if (mTransfer != TRANSFER_SYNC) {
1828 return INVALID_OPERATION;
1829 }
1830
1831 if (isDirect()) {
1832 AutoMutex lock(mLock);
1833 int32_t flags = android_atomic_and(
1834 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1835 &mCblk->mFlags);
1836 if (flags & CBLK_INVALID) {
1837 return DEAD_OBJECT;
1838 }
1839 }
1840
1841 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1842 // Sanity-check: user is most-likely passing an error code, and it would
1843 // make the return value ambiguous (actualSize vs error).
1844 ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1845 return BAD_VALUE;
1846 }
1847
1848 size_t written = 0;
1849 Buffer audioBuffer;
1850
1851 while (userSize >= mFrameSize) {
1852 audioBuffer.frameCount = userSize / mFrameSize;
1853
1854 status_t err = obtainBuffer(&audioBuffer,
1855 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1856 if (err < 0) {
1857 if (written > 0) {
1858 break;
1859 }
1860 if (err == TIMED_OUT || err == -EINTR) {
1861 err = WOULD_BLOCK;
1862 }
1863 return ssize_t(err);
1864 }
1865
1866 size_t toWrite = audioBuffer.size;
1867 memcpy(audioBuffer.i8, buffer, toWrite);
1868 buffer = ((const char *) buffer) + toWrite;
1869 userSize -= toWrite;
1870 written += toWrite;
1871
1872 releaseBuffer(&audioBuffer);
1873 }
1874
1875 if (written > 0) {
1876 mFramesWritten += written / mFrameSize;
1877 }
1878 return written;
1879 }
1880
1881 // -------------------------------------------------------------------------
1882
processAudioBuffer()1883 nsecs_t AudioTrack::processAudioBuffer()
1884 {
1885 // Currently the AudioTrack thread is not created if there are no callbacks.
1886 // Would it ever make sense to run the thread, even without callbacks?
1887 // If so, then replace this by checks at each use for mCbf != NULL.
1888 LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1889
1890 mLock.lock();
1891 if (mAwaitBoost) {
1892 mAwaitBoost = false;
1893 mLock.unlock();
1894 static const int32_t kMaxTries = 5;
1895 int32_t tryCounter = kMaxTries;
1896 uint32_t pollUs = 10000;
1897 do {
1898 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1899 if (policy == SCHED_FIFO || policy == SCHED_RR) {
1900 break;
1901 }
1902 usleep(pollUs);
1903 pollUs <<= 1;
1904 } while (tryCounter-- > 0);
1905 if (tryCounter < 0) {
1906 ALOGE("did not receive expected priority boost on time");
1907 }
1908 // Run again immediately
1909 return 0;
1910 }
1911
1912 // Can only reference mCblk while locked
1913 int32_t flags = android_atomic_and(
1914 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1915
1916 // Check for track invalidation
1917 if (flags & CBLK_INVALID) {
1918 // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1919 // AudioSystem cache. We should not exit here but after calling the callback so
1920 // that the upper layers can recreate the track
1921 if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1922 status_t status __unused = restoreTrack_l("processAudioBuffer");
1923 // FIXME unused status
1924 // after restoration, continue below to make sure that the loop and buffer events
1925 // are notified because they have been cleared from mCblk->mFlags above.
1926 }
1927 }
1928
1929 bool waitStreamEnd = mState == STATE_STOPPING;
1930 bool active = mState == STATE_ACTIVE;
1931
1932 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1933 bool newUnderrun = false;
1934 if (flags & CBLK_UNDERRUN) {
1935 #if 0
1936 // Currently in shared buffer mode, when the server reaches the end of buffer,
1937 // the track stays active in continuous underrun state. It's up to the application
1938 // to pause or stop the track, or set the position to a new offset within buffer.
1939 // This was some experimental code to auto-pause on underrun. Keeping it here
1940 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1941 if (mTransfer == TRANSFER_SHARED) {
1942 mState = STATE_PAUSED;
1943 active = false;
1944 }
1945 #endif
1946 if (!mInUnderrun) {
1947 mInUnderrun = true;
1948 newUnderrun = true;
1949 }
1950 }
1951
1952 // Get current position of server
1953 Modulo<uint32_t> position(updateAndGetPosition_l());
1954
1955 // Manage marker callback
1956 bool markerReached = false;
1957 Modulo<uint32_t> markerPosition(mMarkerPosition);
1958 // uses 32 bit wraparound for comparison with position.
1959 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1960 mMarkerReached = markerReached = true;
1961 }
1962
1963 // Determine number of new position callback(s) that will be needed, while locked
1964 size_t newPosCount = 0;
1965 Modulo<uint32_t> newPosition(mNewPosition);
1966 uint32_t updatePeriod = mUpdatePeriod;
1967 // FIXME fails for wraparound, need 64 bits
1968 if (updatePeriod > 0 && position >= newPosition) {
1969 newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1970 mNewPosition += updatePeriod * newPosCount;
1971 }
1972
1973 // Cache other fields that will be needed soon
1974 uint32_t sampleRate = mSampleRate;
1975 float speed = mPlaybackRate.mSpeed;
1976 const uint32_t notificationFrames = mNotificationFramesAct;
1977 if (mRefreshRemaining) {
1978 mRefreshRemaining = false;
1979 mRemainingFrames = notificationFrames;
1980 mRetryOnPartialBuffer = false;
1981 }
1982 size_t misalignment = mProxy->getMisalignment();
1983 uint32_t sequence = mSequence;
1984 sp<AudioTrackClientProxy> proxy = mProxy;
1985
1986 // Determine the number of new loop callback(s) that will be needed, while locked.
1987 int loopCountNotifications = 0;
1988 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1989
1990 if (mLoopCount > 0) {
1991 int loopCount;
1992 size_t bufferPosition;
1993 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1994 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1995 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1996 mLoopCountNotified = loopCount; // discard any excess notifications
1997 } else if (mLoopCount < 0) {
1998 // FIXME: We're not accurate with notification count and position with infinite looping
1999 // since loopCount from server side will always return -1 (we could decrement it).
2000 size_t bufferPosition = mStaticProxy->getBufferPosition();
2001 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
2002 loopPeriod = mLoopEnd - bufferPosition;
2003 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
2004 size_t bufferPosition = mStaticProxy->getBufferPosition();
2005 loopPeriod = mFrameCount - bufferPosition;
2006 }
2007
2008 // These fields don't need to be cached, because they are assigned only by set():
2009 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
2010 // mFlags is also assigned by createTrack_l(), but not the bit we care about.
2011
2012 mLock.unlock();
2013
2014 // get anchor time to account for callbacks.
2015 const nsecs_t timeBeforeCallbacks = systemTime();
2016
2017 if (waitStreamEnd) {
2018 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2019 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2020 // (and make sure we don't callback for more data while we're stopping).
2021 // This helps with position, marker notifications, and track invalidation.
2022 struct timespec timeout;
2023 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2024 timeout.tv_nsec = 0;
2025
2026 status_t status = proxy->waitStreamEndDone(&timeout);
2027 switch (status) {
2028 case NO_ERROR:
2029 case DEAD_OBJECT:
2030 case TIMED_OUT:
2031 if (status != DEAD_OBJECT) {
2032 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2033 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2034 mCbf(EVENT_STREAM_END, mUserData, NULL);
2035 }
2036 {
2037 AutoMutex lock(mLock);
2038 // The previously assigned value of waitStreamEnd is no longer valid,
2039 // since the mutex has been unlocked and either the callback handler
2040 // or another thread could have re-started the AudioTrack during that time.
2041 waitStreamEnd = mState == STATE_STOPPING;
2042 if (waitStreamEnd) {
2043 mState = STATE_STOPPED;
2044 mReleased = 0;
2045 }
2046 }
2047 if (waitStreamEnd && status != DEAD_OBJECT) {
2048 return NS_INACTIVE;
2049 }
2050 break;
2051 }
2052 return 0;
2053 }
2054
2055 // perform callbacks while unlocked
2056 if (newUnderrun) {
2057 mCbf(EVENT_UNDERRUN, mUserData, NULL);
2058 }
2059 while (loopCountNotifications > 0) {
2060 mCbf(EVENT_LOOP_END, mUserData, NULL);
2061 --loopCountNotifications;
2062 }
2063 if (flags & CBLK_BUFFER_END) {
2064 mCbf(EVENT_BUFFER_END, mUserData, NULL);
2065 }
2066 if (markerReached) {
2067 mCbf(EVENT_MARKER, mUserData, &markerPosition);
2068 }
2069 while (newPosCount > 0) {
2070 size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2071 mCbf(EVENT_NEW_POS, mUserData, &temp);
2072 newPosition += updatePeriod;
2073 newPosCount--;
2074 }
2075
2076 if (mObservedSequence != sequence) {
2077 mObservedSequence = sequence;
2078 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2079 // for offloaded tracks, just wait for the upper layers to recreate the track
2080 if (isOffloadedOrDirect()) {
2081 return NS_INACTIVE;
2082 }
2083 }
2084
2085 // if inactive, then don't run me again until re-started
2086 if (!active) {
2087 return NS_INACTIVE;
2088 }
2089
2090 // Compute the estimated time until the next timed event (position, markers, loops)
2091 // FIXME only for non-compressed audio
2092 uint32_t minFrames = ~0;
2093 if (!markerReached && position < markerPosition) {
2094 minFrames = (markerPosition - position).value();
2095 }
2096 if (loopPeriod > 0 && loopPeriod < minFrames) {
2097 // loopPeriod is already adjusted for actual position.
2098 minFrames = loopPeriod;
2099 }
2100 if (updatePeriod > 0) {
2101 minFrames = min(minFrames, (newPosition - position).value());
2102 }
2103
2104 // If > 0, poll periodically to recover from a stuck server. A good value is 2.
2105 static const uint32_t kPoll = 0;
2106 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2107 minFrames = kPoll * notificationFrames;
2108 }
2109
2110 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2111 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2112 const nsecs_t timeAfterCallbacks = systemTime();
2113
2114 // Convert frame units to time units
2115 nsecs_t ns = NS_WHENEVER;
2116 if (minFrames != (uint32_t) ~0) {
2117 // AudioFlinger consumption of client data may be irregular when coming out of device
2118 // standby since the kernel buffers require filling. This is throttled to no more than 2x
2119 // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2120 // half (but no more than half a second) to improve callback accuracy during these temporary
2121 // data surges.
2122 const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2123 constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2124 ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2125 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
2126 // TODO: Should we warn if the callback time is too long?
2127 if (ns < 0) ns = 0;
2128 }
2129
2130 // If not supplying data by EVENT_MORE_DATA, then we're done
2131 if (mTransfer != TRANSFER_CALLBACK) {
2132 return ns;
2133 }
2134
2135 // EVENT_MORE_DATA callback handling.
2136 // Timing for linear pcm audio data formats can be derived directly from the
2137 // buffer fill level.
2138 // Timing for compressed data is not directly available from the buffer fill level,
2139 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2140 // to return a certain fill level.
2141
2142 struct timespec timeout;
2143 const struct timespec *requested = &ClientProxy::kForever;
2144 if (ns != NS_WHENEVER) {
2145 timeout.tv_sec = ns / 1000000000LL;
2146 timeout.tv_nsec = ns % 1000000000LL;
2147 ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2148 requested = &timeout;
2149 }
2150
2151 size_t writtenFrames = 0;
2152 while (mRemainingFrames > 0) {
2153
2154 Buffer audioBuffer;
2155 audioBuffer.frameCount = mRemainingFrames;
2156 size_t nonContig;
2157 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2158 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2159 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2160 requested = &ClientProxy::kNonBlocking;
2161 size_t avail = audioBuffer.frameCount + nonContig;
2162 ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2163 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2164 if (err != NO_ERROR) {
2165 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2166 (isOffloaded() && (err == DEAD_OBJECT))) {
2167 // FIXME bug 25195759
2168 return 1000000;
2169 }
2170 ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2171 return NS_NEVER;
2172 }
2173
2174 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2175 mRetryOnPartialBuffer = false;
2176 if (avail < mRemainingFrames) {
2177 if (ns > 0) { // account for obtain time
2178 const nsecs_t timeNow = systemTime();
2179 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2180 }
2181 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2182 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2183 ns = myns;
2184 }
2185 return ns;
2186 }
2187 }
2188
2189 size_t reqSize = audioBuffer.size;
2190 mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2191 size_t writtenSize = audioBuffer.size;
2192
2193 // Sanity check on returned size
2194 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2195 ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2196 reqSize, ssize_t(writtenSize));
2197 return NS_NEVER;
2198 }
2199
2200 if (writtenSize == 0) {
2201 // The callback is done filling buffers
2202 // Keep this thread going to handle timed events and
2203 // still try to get more data in intervals of WAIT_PERIOD_MS
2204 // but don't just loop and block the CPU, so wait
2205
2206 // mCbf(EVENT_MORE_DATA, ...) might either
2207 // (1) Block until it can fill the buffer, returning 0 size on EOS.
2208 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2209 // (3) Return 0 size when no data is available, does not wait for more data.
2210 //
2211 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2212 // We try to compute the wait time to avoid a tight sleep-wait cycle,
2213 // especially for case (3).
2214 //
2215 // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2216 // and this loop; whereas for case (3) we could simply check once with the full
2217 // buffer size and skip the loop entirely.
2218
2219 nsecs_t myns;
2220 if (audio_has_proportional_frames(mFormat)) {
2221 // time to wait based on buffer occupancy
2222 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2223 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2224 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2225 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2226 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2227 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2228 myns = datans + (afns / 2);
2229 } else {
2230 // FIXME: This could ping quite a bit if the buffer isn't full.
2231 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2232 myns = kWaitPeriodNs;
2233 }
2234 if (ns > 0) { // account for obtain and callback time
2235 const nsecs_t timeNow = systemTime();
2236 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2237 }
2238 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2239 ns = myns;
2240 }
2241 return ns;
2242 }
2243
2244 size_t releasedFrames = writtenSize / mFrameSize;
2245 audioBuffer.frameCount = releasedFrames;
2246 mRemainingFrames -= releasedFrames;
2247 if (misalignment >= releasedFrames) {
2248 misalignment -= releasedFrames;
2249 } else {
2250 misalignment = 0;
2251 }
2252
2253 releaseBuffer(&audioBuffer);
2254 writtenFrames += releasedFrames;
2255
2256 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2257 // if callback doesn't like to accept the full chunk
2258 if (writtenSize < reqSize) {
2259 continue;
2260 }
2261
2262 // There could be enough non-contiguous frames available to satisfy the remaining request
2263 if (mRemainingFrames <= nonContig) {
2264 continue;
2265 }
2266
2267 #if 0
2268 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2269 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA
2270 // that total to a sum == notificationFrames.
2271 if (0 < misalignment && misalignment <= mRemainingFrames) {
2272 mRemainingFrames = misalignment;
2273 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2274 }
2275 #endif
2276
2277 }
2278 if (writtenFrames > 0) {
2279 AutoMutex lock(mLock);
2280 mFramesWritten += writtenFrames;
2281 }
2282 mRemainingFrames = notificationFrames;
2283 mRetryOnPartialBuffer = true;
2284
2285 // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2286 return 0;
2287 }
2288
restoreTrack_l(const char * from)2289 status_t AudioTrack::restoreTrack_l(const char *from)
2290 {
2291 ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2292 isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2293 ++mSequence;
2294
2295 // refresh the audio configuration cache in this process to make sure we get new
2296 // output parameters and new IAudioFlinger in createTrack_l()
2297 AudioSystem::clearAudioConfigCache();
2298
2299 if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2300 // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2301 // reconsider enabling for linear PCM encodings when position can be preserved.
2302 return DEAD_OBJECT;
2303 }
2304
2305 // Save so we can return count since creation.
2306 mUnderrunCountOffset = getUnderrunCount_l();
2307
2308 // save the old static buffer position
2309 uint32_t staticPosition = 0;
2310 size_t bufferPosition = 0;
2311 int loopCount = 0;
2312 if (mStaticProxy != 0) {
2313 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2314 staticPosition = mStaticProxy->getPosition().unsignedValue();
2315 }
2316
2317 mFlags = mOrigFlags;
2318
2319 // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2320 // following member variables: mAudioTrack, mCblkMemory and mCblk.
2321 // It will also delete the strong references on previous IAudioTrack and IMemory.
2322 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2323 status_t result = createTrack_l();
2324
2325 if (result == NO_ERROR) {
2326 // take the frames that will be lost by track recreation into account in saved position
2327 // For streaming tracks, this is the amount we obtained from the user/client
2328 // (not the number actually consumed at the server - those are already lost).
2329 if (mStaticProxy == 0) {
2330 mPosition = mReleased;
2331 }
2332 // Continue playback from last known position and restore loop.
2333 if (mStaticProxy != 0) {
2334 if (loopCount != 0) {
2335 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2336 mLoopStart, mLoopEnd, loopCount);
2337 } else {
2338 mStaticProxy->setBufferPosition(bufferPosition);
2339 if (bufferPosition == mFrameCount) {
2340 ALOGD("restoring track at end of static buffer");
2341 }
2342 }
2343 }
2344 // restore volume handler
2345 mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2346 sp<VolumeShaper::Operation> operationToEnd =
2347 new VolumeShaper::Operation(shaper.mOperation);
2348 // TODO: Ideally we would restore to the exact xOffset position
2349 // as returned by getVolumeShaperState(), but we don't have that
2350 // information when restoring at the client unless we periodically poll
2351 // the server or create shared memory state.
2352 //
2353 // For now, we simply advance to the end of the VolumeShaper effect
2354 // if it has been started.
2355 if (shaper.isStarted()) {
2356 operationToEnd->setNormalizedTime(1.f);
2357 }
2358 return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2359 });
2360
2361 if (mState == STATE_ACTIVE) {
2362 result = mAudioTrack->start();
2363 }
2364 // server resets to zero so we offset
2365 mFramesWrittenServerOffset =
2366 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2367 mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2368 }
2369 if (result != NO_ERROR) {
2370 ALOGW("restoreTrack_l() failed status %d", result);
2371 mState = STATE_STOPPED;
2372 mReleased = 0;
2373 }
2374
2375 return result;
2376 }
2377
updateAndGetPosition_l()2378 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2379 {
2380 // This is the sole place to read server consumed frames
2381 Modulo<uint32_t> newServer(mProxy->getPosition());
2382 const int32_t delta = (newServer - mServer).signedValue();
2383 // TODO There is controversy about whether there can be "negative jitter" in server position.
2384 // This should be investigated further, and if possible, it should be addressed.
2385 // A more definite failure mode is infrequent polling by client.
2386 // One could call (void)getPosition_l() in releaseBuffer(),
2387 // so mReleased and mPosition are always lock-step as best possible.
2388 // That should ensure delta never goes negative for infrequent polling
2389 // unless the server has more than 2^31 frames in its buffer,
2390 // in which case the use of uint32_t for these counters has bigger issues.
2391 ALOGE_IF(delta < 0,
2392 "detected illegal retrograde motion by the server: mServer advanced by %d",
2393 delta);
2394 mServer = newServer;
2395 if (delta > 0) { // avoid retrograde
2396 mPosition += delta;
2397 }
2398 return mPosition;
2399 }
2400
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2401 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2402 {
2403 updateLatency_l();
2404 // applicable for mixing tracks only (not offloaded or direct)
2405 if (mStaticProxy != 0) {
2406 return true; // static tracks do not have issues with buffer sizing.
2407 }
2408 const size_t minFrameCount =
2409 calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2410 /*, 0 mNotificationsPerBufferReq*/);
2411 const bool allowed = mFrameCount >= minFrameCount;
2412 ALOGD_IF(!allowed,
2413 "isSampleRateSpeedAllowed_l denied "
2414 "mAfLatency:%u mAfFrameCount:%zu mAfSampleRate:%u sampleRate:%u speed:%f "
2415 "mFrameCount:%zu < minFrameCount:%zu",
2416 mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2417 mFrameCount, minFrameCount);
2418 return allowed;
2419 }
2420
setParameters(const String8 & keyValuePairs)2421 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2422 {
2423 AutoMutex lock(mLock);
2424 return mAudioTrack->setParameters(keyValuePairs);
2425 }
2426
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2427 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2428 const sp<VolumeShaper::Configuration>& configuration,
2429 const sp<VolumeShaper::Operation>& operation)
2430 {
2431 AutoMutex lock(mLock);
2432 mVolumeHandler->setIdIfNecessary(configuration);
2433 VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2434
2435 if (status == DEAD_OBJECT) {
2436 if (restoreTrack_l("applyVolumeShaper") == OK) {
2437 status = mAudioTrack->applyVolumeShaper(configuration, operation);
2438 }
2439 }
2440 if (status >= 0) {
2441 // save VolumeShaper for restore
2442 mVolumeHandler->applyVolumeShaper(configuration, operation);
2443 if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2444 mVolumeHandler->setStarted();
2445 }
2446 } else {
2447 // warn only if not an expected restore failure.
2448 ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2449 "applyVolumeShaper failed: %d", status);
2450 }
2451 return status;
2452 }
2453
getVolumeShaperState(int id)2454 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2455 {
2456 AutoMutex lock(mLock);
2457 sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2458 if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2459 if (restoreTrack_l("getVolumeShaperState") == OK) {
2460 state = mAudioTrack->getVolumeShaperState(id);
2461 }
2462 }
2463 return state;
2464 }
2465
getTimestamp(ExtendedTimestamp * timestamp)2466 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2467 {
2468 if (timestamp == nullptr) {
2469 return BAD_VALUE;
2470 }
2471 AutoMutex lock(mLock);
2472 return getTimestamp_l(timestamp);
2473 }
2474
getTimestamp_l(ExtendedTimestamp * timestamp)2475 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2476 {
2477 if (mCblk->mFlags & CBLK_INVALID) {
2478 const status_t status = restoreTrack_l("getTimestampExtended");
2479 if (status != OK) {
2480 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2481 // recommending that the track be recreated.
2482 return DEAD_OBJECT;
2483 }
2484 }
2485 // check for offloaded/direct here in case restoring somehow changed those flags.
2486 if (isOffloadedOrDirect_l()) {
2487 return INVALID_OPERATION; // not supported
2488 }
2489 status_t status = mProxy->getTimestamp(timestamp);
2490 LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2491 bool found = false;
2492 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2493 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2494 // server side frame offset in case AudioTrack has been restored.
2495 for (int i = ExtendedTimestamp::LOCATION_SERVER;
2496 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2497 if (timestamp->mTimeNs[i] >= 0) {
2498 // apply server offset (frames flushed is ignored
2499 // so we don't report the jump when the flush occurs).
2500 timestamp->mPosition[i] += mFramesWrittenServerOffset;
2501 found = true;
2502 }
2503 }
2504 return found ? OK : WOULD_BLOCK;
2505 }
2506
getTimestamp(AudioTimestamp & timestamp)2507 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2508 {
2509 AutoMutex lock(mLock);
2510 return getTimestamp_l(timestamp);
2511 }
2512
getTimestamp_l(AudioTimestamp & timestamp)2513 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2514 {
2515 bool previousTimestampValid = mPreviousTimestampValid;
2516 // Set false here to cover all the error return cases.
2517 mPreviousTimestampValid = false;
2518
2519 switch (mState) {
2520 case STATE_ACTIVE:
2521 case STATE_PAUSED:
2522 break; // handle below
2523 case STATE_FLUSHED:
2524 case STATE_STOPPED:
2525 return WOULD_BLOCK;
2526 case STATE_STOPPING:
2527 case STATE_PAUSED_STOPPING:
2528 if (!isOffloaded_l()) {
2529 return INVALID_OPERATION;
2530 }
2531 break; // offloaded tracks handled below
2532 default:
2533 LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2534 break;
2535 }
2536
2537 if (mCblk->mFlags & CBLK_INVALID) {
2538 const status_t status = restoreTrack_l("getTimestamp");
2539 if (status != OK) {
2540 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2541 // recommending that the track be recreated.
2542 return DEAD_OBJECT;
2543 }
2544 }
2545
2546 // The presented frame count must always lag behind the consumed frame count.
2547 // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
2548
2549 status_t status;
2550 if (isOffloadedOrDirect_l()) {
2551 // use Binder to get timestamp
2552 status = mAudioTrack->getTimestamp(timestamp);
2553 } else {
2554 // read timestamp from shared memory
2555 ExtendedTimestamp ets;
2556 status = mProxy->getTimestamp(&ets);
2557 if (status == OK) {
2558 ExtendedTimestamp::Location location;
2559 status = ets.getBestTimestamp(×tamp, &location);
2560
2561 if (status == OK) {
2562 updateLatency_l();
2563 // It is possible that the best location has moved from the kernel to the server.
2564 // In this case we adjust the position from the previous computed latency.
2565 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2566 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2567 "getTimestamp() location moved from kernel to server");
2568 // check that the last kernel OK time info exists and the positions
2569 // are valid (if they predate the current track, the positions may
2570 // be zero or negative).
2571 const int64_t frames =
2572 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2573 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2574 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2575 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2576 ?
2577 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2578 / 1000)
2579 :
2580 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2581 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2582 ALOGV("frame adjustment:%lld timestamp:%s",
2583 (long long)frames, ets.toString().c_str());
2584 if (frames >= ets.mPosition[location]) {
2585 timestamp.mPosition = 0;
2586 } else {
2587 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2588 }
2589 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2590 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2591 "getTimestamp() location moved from server to kernel");
2592 }
2593
2594 // We update the timestamp time even when paused.
2595 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2596 const int64_t now = systemTime();
2597 const int64_t at = audio_utils_ns_from_timespec(×tamp.mTime);
2598 const int64_t lag =
2599 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2600 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2601 ? int64_t(mAfLatency * 1000000LL)
2602 : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2603 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2604 * NANOS_PER_SECOND / mSampleRate;
2605 const int64_t limit = now - lag; // no earlier than this limit
2606 if (at < limit) {
2607 ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2608 (long long)lag, (long long)at, (long long)limit);
2609 timestamp.mTime = convertNsToTimespec(limit);
2610 }
2611 }
2612 mPreviousLocation = location;
2613 } else {
2614 // right after AudioTrack is started, one may not find a timestamp
2615 ALOGV("getBestTimestamp did not find timestamp");
2616 }
2617 }
2618 if (status == INVALID_OPERATION) {
2619 // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2620 // other failures are signaled by a negative time.
2621 // If we come out of FLUSHED or STOPPED where the position is known
2622 // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2623 // "zero" for NuPlayer). We don't convert for track restoration as position
2624 // does not reset.
2625 ALOGV("timestamp server offset:%lld restore frames:%lld",
2626 (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2627 if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2628 status = WOULD_BLOCK;
2629 }
2630 }
2631 }
2632 if (status != NO_ERROR) {
2633 ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2634 return status;
2635 }
2636 if (isOffloadedOrDirect_l()) {
2637 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2638 // use cached paused position in case another offloaded track is running.
2639 timestamp.mPosition = mPausedPosition;
2640 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
2641 // TODO: adjust for delay
2642 return NO_ERROR;
2643 }
2644
2645 // Check whether a pending flush or stop has completed, as those commands may
2646 // be asynchronous or return near finish or exhibit glitchy behavior.
2647 //
2648 // Originally this showed up as the first timestamp being a continuation of
2649 // the previous song under gapless playback.
2650 // However, we sometimes see zero timestamps, then a glitch of
2651 // the previous song's position, and then correct timestamps afterwards.
2652 if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2653 static const int kTimeJitterUs = 100000; // 100 ms
2654 static const int k1SecUs = 1000000;
2655
2656 const int64_t timeNow = getNowUs();
2657
2658 if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2659 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2660 if (timestampTimeUs < mStartFromZeroUs) {
2661 return WOULD_BLOCK; // stale timestamp time, occurs before start.
2662 }
2663 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2664 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2665 / ((double)mSampleRate * mPlaybackRate.mSpeed);
2666
2667 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2668 // Verify that the counter can't count faster than the sample rate
2669 // since the start time. If greater, then that means we may have failed
2670 // to completely flush or stop the previous playing track.
2671 ALOGW_IF(!mTimestampStartupGlitchReported,
2672 "getTimestamp startup glitch detected"
2673 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2674 (long long)deltaTimeUs, (long long)deltaPositionByUs,
2675 timestamp.mPosition);
2676 mTimestampStartupGlitchReported = true;
2677 if (previousTimestampValid
2678 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2679 timestamp = mPreviousTimestamp;
2680 mPreviousTimestampValid = true;
2681 return NO_ERROR;
2682 }
2683 return WOULD_BLOCK;
2684 }
2685 if (deltaPositionByUs != 0) {
2686 mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2687 }
2688 } else {
2689 mStartFromZeroUs = 0; // don't check again, start time expired.
2690 }
2691 mTimestampStartupGlitchReported = false;
2692 }
2693 } else {
2694 // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2695 (void) updateAndGetPosition_l();
2696 // Server consumed (mServer) and presented both use the same server time base,
2697 // and server consumed is always >= presented.
2698 // The delta between these represents the number of frames in the buffer pipeline.
2699 // If this delta between these is greater than the client position, it means that
2700 // actually presented is still stuck at the starting line (figuratively speaking),
2701 // waiting for the first frame to go by. So we can't report a valid timestamp yet.
2702 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2703 // mPosition exceeds 32 bits.
2704 // TODO Remove when timestamp is updated to contain pipeline status info.
2705 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2706 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2707 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2708 return INVALID_OPERATION;
2709 }
2710 // Convert timestamp position from server time base to client time base.
2711 // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2712 // But if we change it to 64-bit then this could fail.
2713 // Use Modulo computation here.
2714 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2715 // Immediately after a call to getPosition_l(), mPosition and
2716 // mServer both represent the same frame position. mPosition is
2717 // in client's point of view, and mServer is in server's point of
2718 // view. So the difference between them is the "fudge factor"
2719 // between client and server views due to stop() and/or new
2720 // IAudioTrack. And timestamp.mPosition is initially in server's
2721 // point of view, so we need to apply the same fudge factor to it.
2722 }
2723
2724 // Prevent retrograde motion in timestamp.
2725 // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2726 if (status == NO_ERROR) {
2727 // previousTimestampValid is set to false when starting after a stop or flush.
2728 if (previousTimestampValid) {
2729 const int64_t previousTimeNanos =
2730 audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2731 int64_t currentTimeNanos = audio_utils_ns_from_timespec(×tamp.mTime);
2732
2733 // Fix stale time when checking timestamp right after start().
2734 //
2735 // For offload compatibility, use a default lag value here.
2736 // Any time discrepancy between this update and the pause timestamp is handled
2737 // by the retrograde check afterwards.
2738 const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2739 const int64_t limitNs = mStartNs - lagNs;
2740 if (currentTimeNanos < limitNs) {
2741 ALOGD("correcting timestamp time for pause, "
2742 "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2743 (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2744 timestamp.mTime = convertNsToTimespec(limitNs);
2745 currentTimeNanos = limitNs;
2746 }
2747
2748 // retrograde check
2749 if (currentTimeNanos < previousTimeNanos) {
2750 ALOGW("retrograde timestamp time corrected, %lld < %lld",
2751 (long long)currentTimeNanos, (long long)previousTimeNanos);
2752 timestamp.mTime = mPreviousTimestamp.mTime;
2753 // currentTimeNanos not used below.
2754 }
2755
2756 // Looking at signed delta will work even when the timestamps
2757 // are wrapping around.
2758 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2759 - mPreviousTimestamp.mPosition).signedValue();
2760 if (deltaPosition < 0) {
2761 // Only report once per position instead of spamming the log.
2762 if (!mRetrogradeMotionReported) {
2763 ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2764 deltaPosition,
2765 timestamp.mPosition,
2766 mPreviousTimestamp.mPosition);
2767 mRetrogradeMotionReported = true;
2768 }
2769 } else {
2770 mRetrogradeMotionReported = false;
2771 }
2772 if (deltaPosition < 0) {
2773 timestamp.mPosition = mPreviousTimestamp.mPosition;
2774 deltaPosition = 0;
2775 }
2776 #if 0
2777 // Uncomment this to verify audio timestamp rate.
2778 const int64_t deltaTime =
2779 audio_utils_ns_from_timespec(×tamp.mTime) - previousTimeNanos;
2780 if (deltaTime != 0) {
2781 const int64_t computedSampleRate =
2782 deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2783 ALOGD("computedSampleRate:%u sampleRate:%u",
2784 (unsigned)computedSampleRate, mSampleRate);
2785 }
2786 #endif
2787 }
2788 mPreviousTimestamp = timestamp;
2789 mPreviousTimestampValid = true;
2790 }
2791
2792 return status;
2793 }
2794
getParameters(const String8 & keys)2795 String8 AudioTrack::getParameters(const String8& keys)
2796 {
2797 audio_io_handle_t output = getOutput();
2798 if (output != AUDIO_IO_HANDLE_NONE) {
2799 return AudioSystem::getParameters(output, keys);
2800 } else {
2801 return String8::empty();
2802 }
2803 }
2804
isOffloaded() const2805 bool AudioTrack::isOffloaded() const
2806 {
2807 AutoMutex lock(mLock);
2808 return isOffloaded_l();
2809 }
2810
isDirect() const2811 bool AudioTrack::isDirect() const
2812 {
2813 AutoMutex lock(mLock);
2814 return isDirect_l();
2815 }
2816
isOffloadedOrDirect() const2817 bool AudioTrack::isOffloadedOrDirect() const
2818 {
2819 AutoMutex lock(mLock);
2820 return isOffloadedOrDirect_l();
2821 }
2822
2823
dump(int fd,const Vector<String16> & args __unused) const2824 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2825 {
2826
2827 const size_t SIZE = 256;
2828 char buffer[SIZE];
2829 String8 result;
2830
2831 result.append(" AudioTrack::dump\n");
2832 snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2833 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2834 result.append(buffer);
2835 snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2836 mChannelCount, mFrameCount);
2837 result.append(buffer);
2838 snprintf(buffer, 255, " sample rate(%u), speed(%f), status(%d)\n",
2839 mSampleRate, mPlaybackRate.mSpeed, mStatus);
2840 result.append(buffer);
2841 snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency);
2842 result.append(buffer);
2843 ::write(fd, result.string(), result.size());
2844 return NO_ERROR;
2845 }
2846
getUnderrunCount() const2847 uint32_t AudioTrack::getUnderrunCount() const
2848 {
2849 AutoMutex lock(mLock);
2850 return getUnderrunCount_l();
2851 }
2852
getUnderrunCount_l() const2853 uint32_t AudioTrack::getUnderrunCount_l() const
2854 {
2855 return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2856 }
2857
getUnderrunFrames() const2858 uint32_t AudioTrack::getUnderrunFrames() const
2859 {
2860 AutoMutex lock(mLock);
2861 return mProxy->getUnderrunFrames();
2862 }
2863
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2864 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2865 {
2866 if (callback == 0) {
2867 ALOGW("%s adding NULL callback!", __FUNCTION__);
2868 return BAD_VALUE;
2869 }
2870 AutoMutex lock(mLock);
2871 if (mDeviceCallback.unsafe_get() == callback.get()) {
2872 ALOGW("%s adding same callback!", __FUNCTION__);
2873 return INVALID_OPERATION;
2874 }
2875 status_t status = NO_ERROR;
2876 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2877 if (mDeviceCallback != 0) {
2878 ALOGW("%s callback already present!", __FUNCTION__);
2879 AudioSystem::removeAudioDeviceCallback(this, mOutput);
2880 }
2881 status = AudioSystem::addAudioDeviceCallback(this, mOutput);
2882 }
2883 mDeviceCallback = callback;
2884 return status;
2885 }
2886
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2887 status_t AudioTrack::removeAudioDeviceCallback(
2888 const sp<AudioSystem::AudioDeviceCallback>& callback)
2889 {
2890 if (callback == 0) {
2891 ALOGW("%s removing NULL callback!", __FUNCTION__);
2892 return BAD_VALUE;
2893 }
2894 AutoMutex lock(mLock);
2895 if (mDeviceCallback.unsafe_get() != callback.get()) {
2896 ALOGW("%s removing different callback!", __FUNCTION__);
2897 return INVALID_OPERATION;
2898 }
2899 mDeviceCallback.clear();
2900 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2901 AudioSystem::removeAudioDeviceCallback(this, mOutput);
2902 }
2903 return NO_ERROR;
2904 }
2905
2906
onAudioDeviceUpdate(audio_io_handle_t audioIo,audio_port_handle_t deviceId)2907 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2908 audio_port_handle_t deviceId)
2909 {
2910 sp<AudioSystem::AudioDeviceCallback> callback;
2911 {
2912 AutoMutex lock(mLock);
2913 if (audioIo != mOutput) {
2914 return;
2915 }
2916 callback = mDeviceCallback.promote();
2917 // only update device if the track is active as route changes due to other use cases are
2918 // irrelevant for this client
2919 if (mState == STATE_ACTIVE) {
2920 mRoutedDeviceId = deviceId;
2921 }
2922 }
2923 if (callback.get() != nullptr) {
2924 callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
2925 }
2926 }
2927
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)2928 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2929 {
2930 if (msec == nullptr ||
2931 (location != ExtendedTimestamp::LOCATION_SERVER
2932 && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2933 return BAD_VALUE;
2934 }
2935 AutoMutex lock(mLock);
2936 // inclusive of offloaded and direct tracks.
2937 //
2938 // It is possible, but not enabled, to allow duration computation for non-pcm
2939 // audio_has_proportional_frames() formats because currently they have
2940 // the drain rate equivalent to the pcm sample rate * framesize.
2941 if (!isPurePcmData_l()) {
2942 return INVALID_OPERATION;
2943 }
2944 ExtendedTimestamp ets;
2945 if (getTimestamp_l(&ets) == OK
2946 && ets.mTimeNs[location] > 0) {
2947 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2948 - ets.mPosition[location];
2949 if (diff < 0) {
2950 *msec = 0;
2951 } else {
2952 // ms is the playback time by frames
2953 int64_t ms = (int64_t)((double)diff * 1000 /
2954 ((double)mSampleRate * mPlaybackRate.mSpeed));
2955 // clockdiff is the timestamp age (negative)
2956 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2957 ets.mTimeNs[location]
2958 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2959 - systemTime(SYSTEM_TIME_MONOTONIC);
2960
2961 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
2962 static const int NANOS_PER_MILLIS = 1000000;
2963 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2964 }
2965 return NO_ERROR;
2966 }
2967 if (location != ExtendedTimestamp::LOCATION_SERVER) {
2968 return INVALID_OPERATION; // LOCATION_KERNEL is not available
2969 }
2970 // use server position directly (offloaded and direct arrive here)
2971 updateAndGetPosition_l();
2972 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2973 *msec = (diff <= 0) ? 0
2974 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2975 return NO_ERROR;
2976 }
2977
hasStarted()2978 bool AudioTrack::hasStarted()
2979 {
2980 AutoMutex lock(mLock);
2981 switch (mState) {
2982 case STATE_STOPPED:
2983 if (isOffloadedOrDirect_l()) {
2984 // check if we have started in the past to return true.
2985 return mStartFromZeroUs > 0;
2986 }
2987 // A normal audio track may still be draining, so
2988 // check if stream has ended. This covers fasttrack position
2989 // instability and start/stop without any data written.
2990 if (mProxy->getStreamEndDone()) {
2991 return true;
2992 }
2993 // fall through
2994 case STATE_ACTIVE:
2995 case STATE_STOPPING:
2996 break;
2997 case STATE_PAUSED:
2998 case STATE_PAUSED_STOPPING:
2999 case STATE_FLUSHED:
3000 return false; // we're not active
3001 default:
3002 LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
3003 break;
3004 }
3005
3006 // wait indicates whether we need to wait for a timestamp.
3007 // This is conservatively figured - if we encounter an unexpected error
3008 // then we will not wait.
3009 bool wait = false;
3010 if (isOffloadedOrDirect_l()) {
3011 AudioTimestamp ts;
3012 status_t status = getTimestamp_l(ts);
3013 if (status == WOULD_BLOCK) {
3014 wait = true;
3015 } else if (status == OK) {
3016 wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3017 }
3018 ALOGV("hasStarted wait:%d ts:%u start position:%lld",
3019 (int)wait,
3020 ts.mPosition,
3021 (long long)mStartTs.mPosition);
3022 } else {
3023 int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3024 ExtendedTimestamp ets;
3025 status_t status = getTimestamp_l(&ets);
3026 if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets
3027 wait = true;
3028 } else if (status == OK) {
3029 for (location = ExtendedTimestamp::LOCATION_KERNEL;
3030 location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3031 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3032 continue;
3033 }
3034 wait = ets.mPosition[location] == 0
3035 || ets.mPosition[location] == mStartEts.mPosition[location];
3036 break;
3037 }
3038 }
3039 ALOGV("hasStarted wait:%d ets:%lld start position:%lld",
3040 (int)wait,
3041 (long long)ets.mPosition[location],
3042 (long long)mStartEts.mPosition[location]);
3043 }
3044 return !wait;
3045 }
3046
3047 // =========================================================================
3048
binderDied(const wp<IBinder> & who __unused)3049 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3050 {
3051 sp<AudioTrack> audioTrack = mAudioTrack.promote();
3052 if (audioTrack != 0) {
3053 AutoMutex lock(audioTrack->mLock);
3054 audioTrack->mProxy->binderDied();
3055 }
3056 }
3057
3058 // =========================================================================
3059
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)3060 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
3061 : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3062 mIgnoreNextPausedInt(false)
3063 {
3064 }
3065
~AudioTrackThread()3066 AudioTrack::AudioTrackThread::~AudioTrackThread()
3067 {
3068 }
3069
threadLoop()3070 bool AudioTrack::AudioTrackThread::threadLoop()
3071 {
3072 {
3073 AutoMutex _l(mMyLock);
3074 if (mPaused) {
3075 // TODO check return value and handle or log
3076 mMyCond.wait(mMyLock);
3077 // caller will check for exitPending()
3078 return true;
3079 }
3080 if (mIgnoreNextPausedInt) {
3081 mIgnoreNextPausedInt = false;
3082 mPausedInt = false;
3083 }
3084 if (mPausedInt) {
3085 // TODO use futex instead of condition, for event flag "or"
3086 if (mPausedNs > 0) {
3087 // TODO check return value and handle or log
3088 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3089 } else {
3090 // TODO check return value and handle or log
3091 mMyCond.wait(mMyLock);
3092 }
3093 mPausedInt = false;
3094 return true;
3095 }
3096 }
3097 if (exitPending()) {
3098 return false;
3099 }
3100 nsecs_t ns = mReceiver.processAudioBuffer();
3101 switch (ns) {
3102 case 0:
3103 return true;
3104 case NS_INACTIVE:
3105 pauseInternal();
3106 return true;
3107 case NS_NEVER:
3108 return false;
3109 case NS_WHENEVER:
3110 // Event driven: call wake() when callback notifications conditions change.
3111 ns = INT64_MAX;
3112 // fall through
3113 default:
3114 LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3115 pauseInternal(ns);
3116 return true;
3117 }
3118 }
3119
requestExit()3120 void AudioTrack::AudioTrackThread::requestExit()
3121 {
3122 // must be in this order to avoid a race condition
3123 Thread::requestExit();
3124 resume();
3125 }
3126
pause()3127 void AudioTrack::AudioTrackThread::pause()
3128 {
3129 AutoMutex _l(mMyLock);
3130 mPaused = true;
3131 }
3132
resume()3133 void AudioTrack::AudioTrackThread::resume()
3134 {
3135 AutoMutex _l(mMyLock);
3136 mIgnoreNextPausedInt = true;
3137 if (mPaused || mPausedInt) {
3138 mPaused = false;
3139 mPausedInt = false;
3140 mMyCond.signal();
3141 }
3142 }
3143
wake()3144 void AudioTrack::AudioTrackThread::wake()
3145 {
3146 AutoMutex _l(mMyLock);
3147 if (!mPaused) {
3148 // wake() might be called while servicing a callback - ignore the next
3149 // pause time and call processAudioBuffer.
3150 mIgnoreNextPausedInt = true;
3151 if (mPausedInt && mPausedNs > 0) {
3152 // audio track is active and internally paused with timeout.
3153 mPausedInt = false;
3154 mMyCond.signal();
3155 }
3156 }
3157 }
3158
pauseInternal(nsecs_t ns)3159 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3160 {
3161 AutoMutex _l(mMyLock);
3162 mPausedInt = true;
3163 mPausedNs = ns;
3164 }
3165
3166 } // namespace android
3167