1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24
25 #include <android-base/macros.h>
26 #include <audio_utils/clock.h>
27 #include <audio_utils/primitives.h>
28 #include <binder/IPCThreadState.h>
29 #include <media/AudioTrack.h>
30 #include <utils/Log.h>
31 #include <private/media/AudioTrackShared.h>
32 #include <processgroup/sched_policy.h>
33 #include <media/IAudioFlinger.h>
34 #include <media/IAudioPolicyService.h>
35 #include <media/AudioParameter.h>
36 #include <media/AudioResamplerPublic.h>
37 #include <media/AudioSystem.h>
38 #include <media/MediaMetricsItem.h>
39 #include <media/TypeConverter.h>
40
41 #define WAIT_PERIOD_MS 10
42 #define WAIT_STREAM_END_TIMEOUT_SEC 120
43 static const int kMaxLoopCountNotifications = 32;
44
45 namespace android {
46 // ---------------------------------------------------------------------------
47
48 using media::VolumeShaper;
49
50 // TODO: Move to a separate .h
51
52 template <typename T>
min(const T & x,const T & y)53 static inline const T &min(const T &x, const T &y) {
54 return x < y ? x : y;
55 }
56
57 template <typename T>
max(const T & x,const T & y)58 static inline const T &max(const T &x, const T &y) {
59 return x > y ? x : y;
60 }
61
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)62 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
63 {
64 return ((double)frames * 1000000000) / ((double)sampleRate * speed);
65 }
66
convertTimespecToUs(const struct timespec & tv)67 static int64_t convertTimespecToUs(const struct timespec &tv)
68 {
69 return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
70 }
71
72 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)73 static inline struct timespec convertNsToTimespec(int64_t ns) {
74 struct timespec tv;
75 tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
76 tv.tv_nsec = static_cast<int64_t>(ns % NANOS_PER_SECOND);
77 return tv;
78 }
79
80 // current monotonic time in microseconds.
getNowUs()81 static int64_t getNowUs()
82 {
83 struct timespec tv;
84 (void) clock_gettime(CLOCK_MONOTONIC, &tv);
85 return convertTimespecToUs(tv);
86 }
87
88 // FIXME: we don't use the pitch setting in the time stretcher (not working);
89 // instead we emulate it using our sample rate converter.
90 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)91 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
92 {
93 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
94 }
95
adjustSpeed(float speed,float pitch)96 static inline float adjustSpeed(float speed, float pitch)
97 {
98 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
99 }
100
adjustPitch(float pitch)101 static inline float adjustPitch(float pitch)
102 {
103 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
104 }
105
106 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)107 status_t AudioTrack::getMinFrameCount(
108 size_t* frameCount,
109 audio_stream_type_t streamType,
110 uint32_t sampleRate)
111 {
112 if (frameCount == NULL) {
113 return BAD_VALUE;
114 }
115
116 // FIXME handle in server, like createTrack_l(), possible missing info:
117 // audio_io_handle_t output
118 // audio_format_t format
119 // audio_channel_mask_t channelMask
120 // audio_output_flags_t flags (FAST)
121 uint32_t afSampleRate;
122 status_t status;
123 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
124 if (status != NO_ERROR) {
125 ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
126 __func__, streamType, status);
127 return status;
128 }
129 size_t afFrameCount;
130 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
131 if (status != NO_ERROR) {
132 ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
133 __func__, streamType, status);
134 return status;
135 }
136 uint32_t afLatency;
137 status = AudioSystem::getOutputLatency(&afLatency, streamType);
138 if (status != NO_ERROR) {
139 ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
140 __func__, streamType, status);
141 return status;
142 }
143
144 // When called from createTrack, speed is 1.0f (normal speed).
145 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
146 *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
147 sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
148
149 // The formula above should always produce a non-zero value under normal circumstances:
150 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151 // Return error in the unlikely event that it does not, as that's part of the API contract.
152 if (*frameCount == 0) {
153 ALOGE("%s(): failed for streamType %d, sampleRate %u",
154 __func__, streamType, sampleRate);
155 return BAD_VALUE;
156 }
157 ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158 __func__, *frameCount, afFrameCount, afSampleRate, afLatency);
159 return NO_ERROR;
160 }
161
162 // static
isDirectOutputSupported(const audio_config_base_t & config,const audio_attributes_t & attributes)163 bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
164 const audio_attributes_t& attributes) {
165 ALOGV("%s()", __FUNCTION__);
166 const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
167 if (aps == 0) return false;
168 return aps->isDirectOutputSupported(config, attributes);
169 }
170
171 // ---------------------------------------------------------------------------
172
gather(const AudioTrack * track)173 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
174 {
175 // only if we're in a good state...
176 // XXX: shall we gather alternative info if failing?
177 const status_t lstatus = track->initCheck();
178 if (lstatus != NO_ERROR) {
179 ALOGD("%s(): no metrics gathered, track status=%d", __func__, (int) lstatus);
180 return;
181 }
182
183 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
184
185 // Java API 28 entries, do not change.
186 mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
187 mMetricsItem->setCString(MM_PREFIX "type",
188 toString(track->mAttributes.content_type).c_str());
189 mMetricsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
190
191 // Non-API entries, these can change due to a Java string mistake.
192 mMetricsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
193 mMetricsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
194 // Non-API entries, these can change.
195 mMetricsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
196 mMetricsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
197 mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
198 mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
199 }
200
201 // hand the user a snapshot of the metrics.
getMetrics(mediametrics::Item * & item)202 status_t AudioTrack::getMetrics(mediametrics::Item * &item)
203 {
204 mMediaMetrics.gather(this);
205 mediametrics::Item *tmp = mMediaMetrics.dup();
206 if (tmp == nullptr) {
207 return BAD_VALUE;
208 }
209 item = tmp;
210 return NO_ERROR;
211 }
212
AudioTrack()213 AudioTrack::AudioTrack() : AudioTrack("" /*opPackageName*/)
214 {
215 }
216
AudioTrack(const std::string & opPackageName)217 AudioTrack::AudioTrack(const std::string& opPackageName)
218 : mStatus(NO_INIT),
219 mState(STATE_STOPPED),
220 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
221 mPreviousSchedulingGroup(SP_DEFAULT),
222 mPausedPosition(0),
223 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
224 mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
225 mOpPackageName(opPackageName),
226 mAudioTrackCallback(new AudioTrackCallback())
227 {
228 mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
229 mAttributes.usage = AUDIO_USAGE_UNKNOWN;
230 mAttributes.flags = 0x0;
231 strcpy(mAttributes.tags, "");
232 }
233
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId,const std::string & opPackageName)234 AudioTrack::AudioTrack(
235 audio_stream_type_t streamType,
236 uint32_t sampleRate,
237 audio_format_t format,
238 audio_channel_mask_t channelMask,
239 size_t frameCount,
240 audio_output_flags_t flags,
241 callback_t cbf,
242 void* user,
243 int32_t notificationFrames,
244 audio_session_t sessionId,
245 transfer_type transferType,
246 const audio_offload_info_t *offloadInfo,
247 uid_t uid,
248 pid_t pid,
249 const audio_attributes_t* pAttributes,
250 bool doNotReconnect,
251 float maxRequiredSpeed,
252 audio_port_handle_t selectedDeviceId,
253 const std::string& opPackageName)
254 : mStatus(NO_INIT),
255 mState(STATE_STOPPED),
256 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
257 mPreviousSchedulingGroup(SP_DEFAULT),
258 mPausedPosition(0),
259 mOpPackageName(opPackageName),
260 mAudioTrackCallback(new AudioTrackCallback())
261 {
262 mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
263
264 (void)set(streamType, sampleRate, format, channelMask,
265 frameCount, flags, cbf, user, notificationFrames,
266 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
267 offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
268 }
269
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,const std::string & opPackageName)270 AudioTrack::AudioTrack(
271 audio_stream_type_t streamType,
272 uint32_t sampleRate,
273 audio_format_t format,
274 audio_channel_mask_t channelMask,
275 const sp<IMemory>& sharedBuffer,
276 audio_output_flags_t flags,
277 callback_t cbf,
278 void* user,
279 int32_t notificationFrames,
280 audio_session_t sessionId,
281 transfer_type transferType,
282 const audio_offload_info_t *offloadInfo,
283 uid_t uid,
284 pid_t pid,
285 const audio_attributes_t* pAttributes,
286 bool doNotReconnect,
287 float maxRequiredSpeed,
288 const std::string& opPackageName)
289 : mStatus(NO_INIT),
290 mState(STATE_STOPPED),
291 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
292 mPreviousSchedulingGroup(SP_DEFAULT),
293 mPausedPosition(0),
294 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
295 mOpPackageName(opPackageName),
296 mAudioTrackCallback(new AudioTrackCallback())
297 {
298 mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
299
300 (void)set(streamType, sampleRate, format, channelMask,
301 0 /*frameCount*/, flags, cbf, user, notificationFrames,
302 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
303 uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
304 }
305
~AudioTrack()306 AudioTrack::~AudioTrack()
307 {
308 // pull together the numbers, before we clean up our structures
309 mMediaMetrics.gather(this);
310
311 mediametrics::LogItem(mMetricsId)
312 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DTOR)
313 .set(AMEDIAMETRICS_PROP_CALLERNAME,
314 mCallerName.empty()
315 ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
316 : mCallerName.c_str())
317 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
318 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
319 .record();
320
321 if (mStatus == NO_ERROR) {
322 // Make sure that callback function exits in the case where
323 // it is looping on buffer full condition in obtainBuffer().
324 // Otherwise the callback thread will never exit.
325 stop();
326 if (mAudioTrackThread != 0) {
327 mProxy->interrupt();
328 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
329 mAudioTrackThread->requestExitAndWait();
330 mAudioTrackThread.clear();
331 }
332 // No lock here: worst case we remove a NULL callback which will be a nop
333 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
334 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
335 }
336 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
337 mAudioTrack.clear();
338 mCblkMemory.clear();
339 mSharedBuffer.clear();
340 IPCThreadState::self()->flushCommands();
341 ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
342 __func__, mPortId,
343 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
344 AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
345 }
346 }
347
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)348 status_t AudioTrack::set(
349 audio_stream_type_t streamType,
350 uint32_t sampleRate,
351 audio_format_t format,
352 audio_channel_mask_t channelMask,
353 size_t frameCount,
354 audio_output_flags_t flags,
355 callback_t cbf,
356 void* user,
357 int32_t notificationFrames,
358 const sp<IMemory>& sharedBuffer,
359 bool threadCanCallJava,
360 audio_session_t sessionId,
361 transfer_type transferType,
362 const audio_offload_info_t *offloadInfo,
363 uid_t uid,
364 pid_t pid,
365 const audio_attributes_t* pAttributes,
366 bool doNotReconnect,
367 float maxRequiredSpeed,
368 audio_port_handle_t selectedDeviceId)
369 {
370 status_t status;
371 uint32_t channelCount;
372 pid_t callingPid;
373 pid_t myPid;
374
375 // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
376 ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
377 "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
378 __func__,
379 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
380 sessionId, transferType, uid, pid);
381
382 mThreadCanCallJava = threadCanCallJava;
383 mSelectedDeviceId = selectedDeviceId;
384 mSessionId = sessionId;
385
386 switch (transferType) {
387 case TRANSFER_DEFAULT:
388 if (sharedBuffer != 0) {
389 transferType = TRANSFER_SHARED;
390 } else if (cbf == NULL || threadCanCallJava) {
391 transferType = TRANSFER_SYNC;
392 } else {
393 transferType = TRANSFER_CALLBACK;
394 }
395 break;
396 case TRANSFER_CALLBACK:
397 case TRANSFER_SYNC_NOTIF_CALLBACK:
398 if (cbf == NULL || sharedBuffer != 0) {
399 ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
400 convertTransferToText(transferType), __func__);
401 status = BAD_VALUE;
402 goto exit;
403 }
404 break;
405 case TRANSFER_OBTAIN:
406 case TRANSFER_SYNC:
407 if (sharedBuffer != 0) {
408 ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
409 status = BAD_VALUE;
410 goto exit;
411 }
412 break;
413 case TRANSFER_SHARED:
414 if (sharedBuffer == 0) {
415 ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
416 status = BAD_VALUE;
417 goto exit;
418 }
419 break;
420 default:
421 ALOGE("%s(): Invalid transfer type %d",
422 __func__, transferType);
423 status = BAD_VALUE;
424 goto exit;
425 }
426 mSharedBuffer = sharedBuffer;
427 mTransfer = transferType;
428 mDoNotReconnect = doNotReconnect;
429
430 ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
431 __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
432
433 ALOGV("%s(): streamType %d frameCount %zu flags %04x",
434 __func__, streamType, frameCount, flags);
435
436 // invariant that mAudioTrack != 0 is true only after set() returns successfully
437 if (mAudioTrack != 0) {
438 ALOGE("%s(): Track already in use", __func__);
439 status = INVALID_OPERATION;
440 goto exit;
441 }
442
443 // handle default values first.
444 if (streamType == AUDIO_STREAM_DEFAULT) {
445 streamType = AUDIO_STREAM_MUSIC;
446 }
447 if (pAttributes == NULL) {
448 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
449 ALOGE("%s(): Invalid stream type %d", __func__, streamType);
450 status = BAD_VALUE;
451 goto exit;
452 }
453 mStreamType = streamType;
454
455 } else {
456 // stream type shouldn't be looked at, this track has audio attributes
457 memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
458 ALOGV("%s(): Building AudioTrack with attributes:"
459 " usage=%d content=%d flags=0x%x tags=[%s]",
460 __func__,
461 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
462 mStreamType = AUDIO_STREAM_DEFAULT;
463 audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
464 }
465
466 // these below should probably come from the audioFlinger too...
467 if (format == AUDIO_FORMAT_DEFAULT) {
468 format = AUDIO_FORMAT_PCM_16_BIT;
469 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
470 mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
471 }
472
473 // validate parameters
474 if (!audio_is_valid_format(format)) {
475 ALOGE("%s(): Invalid format %#x", __func__, format);
476 status = BAD_VALUE;
477 goto exit;
478 }
479 mFormat = format;
480
481 if (!audio_is_output_channel(channelMask)) {
482 ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
483 status = BAD_VALUE;
484 goto exit;
485 }
486 mChannelMask = channelMask;
487 channelCount = audio_channel_count_from_out_mask(channelMask);
488 mChannelCount = channelCount;
489
490 // force direct flag if format is not linear PCM
491 // or offload was requested
492 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
493 || !audio_is_linear_pcm(format)) {
494 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
495 ? "%s(): Offload request, forcing to Direct Output"
496 : "%s(): Not linear PCM, forcing to Direct Output",
497 __func__);
498 flags = (audio_output_flags_t)
499 // FIXME why can't we allow direct AND fast?
500 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
501 }
502
503 // force direct flag if HW A/V sync requested
504 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
505 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
506 }
507
508 if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
509 if (audio_has_proportional_frames(format)) {
510 mFrameSize = channelCount * audio_bytes_per_sample(format);
511 } else {
512 mFrameSize = sizeof(uint8_t);
513 }
514 } else {
515 ALOG_ASSERT(audio_has_proportional_frames(format));
516 mFrameSize = channelCount * audio_bytes_per_sample(format);
517 // createTrack will return an error if PCM format is not supported by server,
518 // so no need to check for specific PCM formats here
519 }
520
521 // sampling rate must be specified for direct outputs
522 if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
523 status = BAD_VALUE;
524 goto exit;
525 }
526 mSampleRate = sampleRate;
527 mOriginalSampleRate = sampleRate;
528 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
529 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
530 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
531
532 // Make copy of input parameter offloadInfo so that in the future:
533 // (a) createTrack_l doesn't need it as an input parameter
534 // (b) we can support re-creation of offloaded tracks
535 if (offloadInfo != NULL) {
536 mOffloadInfoCopy = *offloadInfo;
537 mOffloadInfo = &mOffloadInfoCopy;
538 } else {
539 mOffloadInfo = NULL;
540 memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
541 }
542
543 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
544 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
545 mSendLevel = 0.0f;
546 // mFrameCount is initialized in createTrack_l
547 mReqFrameCount = frameCount;
548 if (notificationFrames >= 0) {
549 mNotificationFramesReq = notificationFrames;
550 mNotificationsPerBufferReq = 0;
551 } else {
552 if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
553 ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
554 __func__, notificationFrames);
555 status = BAD_VALUE;
556 goto exit;
557 }
558 if (frameCount > 0) {
559 ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
560 __func__, notificationFrames, frameCount);
561 status = BAD_VALUE;
562 goto exit;
563 }
564 mNotificationFramesReq = 0;
565 const uint32_t minNotificationsPerBuffer = 1;
566 const uint32_t maxNotificationsPerBuffer = 8;
567 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
568 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
569 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
570 "%s(): notificationFrames=%d clamped to the range -%u to -%u",
571 __func__,
572 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
573 }
574 mNotificationFramesAct = 0;
575 callingPid = IPCThreadState::self()->getCallingPid();
576 myPid = getpid();
577 if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
578 mClientUid = IPCThreadState::self()->getCallingUid();
579 } else {
580 mClientUid = uid;
581 }
582 if (pid == -1 || (callingPid != myPid)) {
583 mClientPid = callingPid;
584 } else {
585 mClientPid = pid;
586 }
587 mAuxEffectId = 0;
588 mOrigFlags = mFlags = flags;
589 mCbf = cbf;
590
591 if (cbf != NULL) {
592 mAudioTrackThread = new AudioTrackThread(*this);
593 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
594 // thread begins in paused state, and will not reference us until start()
595 }
596
597 // create the IAudioTrack
598 {
599 AutoMutex lock(mLock);
600 status = createTrack_l();
601 }
602 if (status != NO_ERROR) {
603 if (mAudioTrackThread != 0) {
604 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
605 mAudioTrackThread->requestExitAndWait();
606 mAudioTrackThread.clear();
607 }
608 goto exit;
609 }
610
611 mUserData = user;
612 mLoopCount = 0;
613 mLoopStart = 0;
614 mLoopEnd = 0;
615 mLoopCountNotified = 0;
616 mMarkerPosition = 0;
617 mMarkerReached = false;
618 mNewPosition = 0;
619 mUpdatePeriod = 0;
620 mPosition = 0;
621 mReleased = 0;
622 mStartNs = 0;
623 mStartFromZeroUs = 0;
624 AudioSystem::acquireAudioSessionId(mSessionId, mClientPid, mClientUid);
625 mSequence = 1;
626 mObservedSequence = mSequence;
627 mInUnderrun = false;
628 mPreviousTimestampValid = false;
629 mTimestampStartupGlitchReported = false;
630 mTimestampRetrogradePositionReported = false;
631 mTimestampRetrogradeTimeReported = false;
632 mTimestampStallReported = false;
633 mTimestampStaleTimeReported = false;
634 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
635 mStartTs.mPosition = 0;
636 mUnderrunCountOffset = 0;
637 mFramesWritten = 0;
638 mFramesWrittenServerOffset = 0;
639 mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
640 mVolumeHandler = new media::VolumeHandler();
641
642 exit:
643 mStatus = status;
644 return status;
645 }
646
647 // -------------------------------------------------------------------------
648
start()649 status_t AudioTrack::start()
650 {
651 AutoMutex lock(mLock);
652
653 if (mState == STATE_ACTIVE) {
654 return INVALID_OPERATION;
655 }
656
657 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
658
659 // Defer logging here due to OpenSL ES repeated start calls.
660 // TODO(b/154868033) after fix, restore this logging back to the beginning of start().
661 const int64_t beginNs = systemTime();
662 status_t status = NO_ERROR; // logged: make sure to set this before returning.
663 mediametrics::Defer defer([&] {
664 mediametrics::LogItem(mMetricsId)
665 .set(AMEDIAMETRICS_PROP_CALLERNAME,
666 mCallerName.empty()
667 ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
668 : mCallerName.c_str())
669 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
670 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
671 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
672 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
673 .record(); });
674
675
676 mInUnderrun = true;
677
678 State previousState = mState;
679 if (previousState == STATE_PAUSED_STOPPING) {
680 mState = STATE_STOPPING;
681 } else {
682 mState = STATE_ACTIVE;
683 }
684 (void) updateAndGetPosition_l();
685
686 // save start timestamp
687 if (isOffloadedOrDirect_l()) {
688 if (getTimestamp_l(mStartTs) != OK) {
689 mStartTs.mPosition = 0;
690 }
691 } else {
692 if (getTimestamp_l(&mStartEts) != OK) {
693 mStartEts.clear();
694 }
695 }
696 mStartNs = systemTime(); // save this for timestamp adjustment after starting.
697 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
698 // reset current position as seen by client to 0
699 mPosition = 0;
700 mPreviousTimestampValid = false;
701 mTimestampStartupGlitchReported = false;
702 mTimestampRetrogradePositionReported = false;
703 mTimestampRetrogradeTimeReported = false;
704 mTimestampStallReported = false;
705 mTimestampStaleTimeReported = false;
706 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
707
708 if (!isOffloadedOrDirect_l()
709 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
710 // Server side has consumed something, but is it finished consuming?
711 // It is possible since flush and stop are asynchronous that the server
712 // is still active at this point.
713 ALOGV("%s(%d): server read:%lld cumulative flushed:%lld client written:%lld",
714 __func__, mPortId,
715 (long long)(mFramesWrittenServerOffset
716 + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
717 (long long)mStartEts.mFlushed,
718 (long long)mFramesWritten);
719 // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
720 mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
721 }
722 mFramesWritten = 0;
723 mProxy->clearTimestamp(); // need new server push for valid timestamp
724 mMarkerReached = false;
725
726 // For offloaded tracks, we don't know if the hardware counters are really zero here,
727 // since the flush is asynchronous and stop may not fully drain.
728 // We save the time when the track is started to later verify whether
729 // the counters are realistic (i.e. start from zero after this time).
730 mStartFromZeroUs = mStartNs / 1000;
731
732 // force refresh of remaining frames by processAudioBuffer() as last
733 // write before stop could be partial.
734 mRefreshRemaining = true;
735
736 // for static track, clear the old flags when starting from stopped state
737 if (mSharedBuffer != 0) {
738 android_atomic_and(
739 ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
740 &mCblk->mFlags);
741 }
742 }
743 mNewPosition = mPosition + mUpdatePeriod;
744 int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
745
746 if (!(flags & CBLK_INVALID)) {
747 status = mAudioTrack->start();
748 if (status == DEAD_OBJECT) {
749 flags |= CBLK_INVALID;
750 }
751 }
752 if (flags & CBLK_INVALID) {
753 status = restoreTrack_l("start");
754 }
755
756 // resume or pause the callback thread as needed.
757 sp<AudioTrackThread> t = mAudioTrackThread;
758 if (status == NO_ERROR) {
759 if (t != 0) {
760 if (previousState == STATE_STOPPING) {
761 mProxy->interrupt();
762 } else {
763 t->resume();
764 }
765 } else {
766 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
767 get_sched_policy(0, &mPreviousSchedulingGroup);
768 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
769 }
770
771 // Start our local VolumeHandler for restoration purposes.
772 mVolumeHandler->setStarted();
773 } else {
774 ALOGE("%s(%d): status %d", __func__, mPortId, status);
775 mState = previousState;
776 if (t != 0) {
777 if (previousState != STATE_STOPPING) {
778 t->pause();
779 }
780 } else {
781 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
782 set_sched_policy(0, mPreviousSchedulingGroup);
783 }
784 }
785
786 return status;
787 }
788
stop()789 void AudioTrack::stop()
790 {
791 const int64_t beginNs = systemTime();
792
793 AutoMutex lock(mLock);
794 mediametrics::Defer defer([&]() {
795 mediametrics::LogItem(mMetricsId)
796 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_STOP)
797 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
798 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
799 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
800 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getUnderrunCount_l())
801 .record();
802 });
803
804 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
805
806 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
807 return;
808 }
809
810 if (isOffloaded_l()) {
811 mState = STATE_STOPPING;
812 } else {
813 mState = STATE_STOPPED;
814 ALOGD_IF(mSharedBuffer == nullptr,
815 "%s(%d): called with %u frames delivered", __func__, mPortId, mReleased.value());
816 mReleased = 0;
817 }
818
819 mProxy->stop(); // notify server not to read beyond current client position until start().
820 mProxy->interrupt();
821 mAudioTrack->stop();
822
823 // Note: legacy handling - stop does not clear playback marker
824 // and periodic update counter, but flush does for streaming tracks.
825
826 if (mSharedBuffer != 0) {
827 // clear buffer position and loop count.
828 mStaticProxy->setBufferPositionAndLoop(0 /* position */,
829 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
830 }
831
832 sp<AudioTrackThread> t = mAudioTrackThread;
833 if (t != 0) {
834 if (!isOffloaded_l()) {
835 t->pause();
836 } else if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
837 // causes wake up of the playback thread, that will callback the client for
838 // EVENT_STREAM_END in processAudioBuffer()
839 t->wake();
840 }
841 } else {
842 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
843 set_sched_policy(0, mPreviousSchedulingGroup);
844 }
845 }
846
stopped() const847 bool AudioTrack::stopped() const
848 {
849 AutoMutex lock(mLock);
850 return mState != STATE_ACTIVE;
851 }
852
flush()853 void AudioTrack::flush()
854 {
855 const int64_t beginNs = systemTime();
856 AutoMutex lock(mLock);
857 mediametrics::Defer defer([&]() {
858 mediametrics::LogItem(mMetricsId)
859 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_FLUSH)
860 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
861 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
862 .record(); });
863
864 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
865
866 if (mSharedBuffer != 0) {
867 return;
868 }
869 if (mState == STATE_ACTIVE) {
870 return;
871 }
872 flush_l();
873 }
874
flush_l()875 void AudioTrack::flush_l()
876 {
877 ALOG_ASSERT(mState != STATE_ACTIVE);
878
879 // clear playback marker and periodic update counter
880 mMarkerPosition = 0;
881 mMarkerReached = false;
882 mUpdatePeriod = 0;
883 mRefreshRemaining = true;
884
885 mState = STATE_FLUSHED;
886 mReleased = 0;
887 if (isOffloaded_l()) {
888 mProxy->interrupt();
889 }
890 mProxy->flush();
891 mAudioTrack->flush();
892 }
893
pause()894 void AudioTrack::pause()
895 {
896 const int64_t beginNs = systemTime();
897 AutoMutex lock(mLock);
898 mediametrics::Defer defer([&]() {
899 mediametrics::LogItem(mMetricsId)
900 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_PAUSE)
901 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
902 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
903 .record(); });
904
905 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
906
907 if (mState == STATE_ACTIVE) {
908 mState = STATE_PAUSED;
909 } else if (mState == STATE_STOPPING) {
910 mState = STATE_PAUSED_STOPPING;
911 } else {
912 return;
913 }
914 mProxy->interrupt();
915 mAudioTrack->pause();
916
917 if (isOffloaded_l()) {
918 if (mOutput != AUDIO_IO_HANDLE_NONE) {
919 // An offload output can be re-used between two audio tracks having
920 // the same configuration. A timestamp query for a paused track
921 // while the other is running would return an incorrect time.
922 // To fix this, cache the playback position on a pause() and return
923 // this time when requested until the track is resumed.
924
925 // OffloadThread sends HAL pause in its threadLoop. Time saved
926 // here can be slightly off.
927
928 // TODO: check return code for getRenderPosition.
929
930 uint32_t halFrames;
931 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
932 ALOGV("%s(%d): for offload, cache current position %u",
933 __func__, mPortId, mPausedPosition);
934 }
935 }
936 }
937
setVolume(float left,float right)938 status_t AudioTrack::setVolume(float left, float right)
939 {
940 // This duplicates a test by AudioTrack JNI, but that is not the only caller
941 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
942 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
943 return BAD_VALUE;
944 }
945
946 mediametrics::LogItem(mMetricsId)
947 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOLUME)
948 .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)left)
949 .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)right)
950 .record();
951
952 AutoMutex lock(mLock);
953 mVolume[AUDIO_INTERLEAVE_LEFT] = left;
954 mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
955
956 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
957
958 if (isOffloaded_l()) {
959 mAudioTrack->signal();
960 }
961 return NO_ERROR;
962 }
963
setVolume(float volume)964 status_t AudioTrack::setVolume(float volume)
965 {
966 return setVolume(volume, volume);
967 }
968
setAuxEffectSendLevel(float level)969 status_t AudioTrack::setAuxEffectSendLevel(float level)
970 {
971 // This duplicates a test by AudioTrack JNI, but that is not the only caller
972 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
973 return BAD_VALUE;
974 }
975
976 AutoMutex lock(mLock);
977 mSendLevel = level;
978 mProxy->setSendLevel(level);
979
980 return NO_ERROR;
981 }
982
getAuxEffectSendLevel(float * level) const983 void AudioTrack::getAuxEffectSendLevel(float* level) const
984 {
985 if (level != NULL) {
986 *level = mSendLevel;
987 }
988 }
989
setSampleRate(uint32_t rate)990 status_t AudioTrack::setSampleRate(uint32_t rate)
991 {
992 AutoMutex lock(mLock);
993 ALOGV("%s(%d): prior state:%s rate:%u", __func__, mPortId, stateToString(mState), rate);
994
995 if (rate == mSampleRate) {
996 return NO_ERROR;
997 }
998 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
999 || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
1000 return INVALID_OPERATION;
1001 }
1002 if (mOutput == AUDIO_IO_HANDLE_NONE) {
1003 return NO_INIT;
1004 }
1005 // NOTE: it is theoretically possible, but highly unlikely, that a device change
1006 // could mean a previously allowed sampling rate is no longer allowed.
1007 uint32_t afSamplingRate;
1008 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
1009 return NO_INIT;
1010 }
1011 // pitch is emulated by adjusting speed and sampleRate
1012 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
1013 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1014 return BAD_VALUE;
1015 }
1016 // TODO: Should we also check if the buffer size is compatible?
1017
1018 mSampleRate = rate;
1019 mProxy->setSampleRate(effectiveSampleRate);
1020
1021 return NO_ERROR;
1022 }
1023
getSampleRate() const1024 uint32_t AudioTrack::getSampleRate() const
1025 {
1026 AutoMutex lock(mLock);
1027
1028 // sample rate can be updated during playback by the offloaded decoder so we need to
1029 // query the HAL and update if needed.
1030 // FIXME use Proxy return channel to update the rate from server and avoid polling here
1031 if (isOffloadedOrDirect_l()) {
1032 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1033 uint32_t sampleRate = 0;
1034 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
1035 if (status == NO_ERROR) {
1036 mSampleRate = sampleRate;
1037 }
1038 }
1039 }
1040 return mSampleRate;
1041 }
1042
getOriginalSampleRate() const1043 uint32_t AudioTrack::getOriginalSampleRate() const
1044 {
1045 return mOriginalSampleRate;
1046 }
1047
setPlaybackRate(const AudioPlaybackRate & playbackRate)1048 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
1049 {
1050 AutoMutex lock(mLock);
1051 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
1052 return NO_ERROR;
1053 }
1054 if (isOffloadedOrDirect_l()) {
1055 return INVALID_OPERATION;
1056 }
1057 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1058 return INVALID_OPERATION;
1059 }
1060
1061 ALOGV("%s(%d): mSampleRate:%u mSpeed:%f mPitch:%f",
1062 __func__, mPortId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
1063 // pitch is emulated by adjusting speed and sampleRate
1064 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
1065 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
1066 const float effectivePitch = adjustPitch(playbackRate.mPitch);
1067 AudioPlaybackRate playbackRateTemp = playbackRate;
1068 playbackRateTemp.mSpeed = effectiveSpeed;
1069 playbackRateTemp.mPitch = effectivePitch;
1070
1071 ALOGV("%s(%d) (effective) mSampleRate:%u mSpeed:%f mPitch:%f",
1072 __func__, mPortId, effectiveRate, effectiveSpeed, effectivePitch);
1073
1074 if (!isAudioPlaybackRateValid(playbackRateTemp)) {
1075 ALOGW("%s(%d) (%f, %f) failed (effective rate out of bounds)",
1076 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1077 return BAD_VALUE;
1078 }
1079 // Check if the buffer size is compatible.
1080 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
1081 ALOGW("%s(%d) (%f, %f) failed (buffer size)",
1082 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1083 return BAD_VALUE;
1084 }
1085
1086 // Check resampler ratios are within bounds
1087 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1088 (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1089 ALOGW("%s(%d) (%f, %f) failed. Resample rate exceeds max accepted value",
1090 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1091 return BAD_VALUE;
1092 }
1093
1094 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1095 ALOGW("%s(%d) (%f, %f) failed. Resample rate below min accepted value",
1096 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1097 return BAD_VALUE;
1098 }
1099 mPlaybackRate = playbackRate;
1100 //set effective rates
1101 mProxy->setPlaybackRate(playbackRateTemp);
1102 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1103
1104 mediametrics::LogItem(mMetricsId)
1105 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYBACKPARAM)
1106 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
1107 .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
1108 .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
1109 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1110 AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveRate)
1111 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1112 AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)playbackRateTemp.mSpeed)
1113 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1114 AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)playbackRateTemp.mPitch)
1115 .record();
1116
1117 return NO_ERROR;
1118 }
1119
getPlaybackRate() const1120 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
1121 {
1122 AutoMutex lock(mLock);
1123 return mPlaybackRate;
1124 }
1125
getBufferSizeInFrames()1126 ssize_t AudioTrack::getBufferSizeInFrames()
1127 {
1128 AutoMutex lock(mLock);
1129 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1130 return NO_INIT;
1131 }
1132
1133 return (ssize_t) mProxy->getBufferSizeInFrames();
1134 }
1135
getBufferDurationInUs(int64_t * duration)1136 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1137 {
1138 if (duration == nullptr) {
1139 return BAD_VALUE;
1140 }
1141 AutoMutex lock(mLock);
1142 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1143 return NO_INIT;
1144 }
1145 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1146 if (bufferSizeInFrames < 0) {
1147 return (status_t)bufferSizeInFrames;
1148 }
1149 *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1150 / ((double)mSampleRate * mPlaybackRate.mSpeed));
1151 return NO_ERROR;
1152 }
1153
setBufferSizeInFrames(size_t bufferSizeInFrames)1154 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1155 {
1156 AutoMutex lock(mLock);
1157 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1158 return NO_INIT;
1159 }
1160 // Reject if timed track or compressed audio.
1161 if (!audio_is_linear_pcm(mFormat)) {
1162 return INVALID_OPERATION;
1163 }
1164
1165 ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
1166 ssize_t finalBufferSize = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1167 if (originalBufferSize != finalBufferSize) {
1168 android::mediametrics::LogItem(mMetricsId)
1169 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
1170 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
1171 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t)getUnderrunCount_l())
1172 .record();
1173 }
1174 return finalBufferSize;
1175 }
1176
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)1177 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1178 {
1179 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1180 return INVALID_OPERATION;
1181 }
1182
1183 if (loopCount == 0) {
1184 ;
1185 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1186 loopEnd - loopStart >= MIN_LOOP) {
1187 ;
1188 } else {
1189 return BAD_VALUE;
1190 }
1191
1192 AutoMutex lock(mLock);
1193 // See setPosition() regarding setting parameters such as loop points or position while active
1194 if (mState == STATE_ACTIVE) {
1195 return INVALID_OPERATION;
1196 }
1197 setLoop_l(loopStart, loopEnd, loopCount);
1198 return NO_ERROR;
1199 }
1200
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1201 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1202 {
1203 // We do not update the periodic notification point.
1204 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1205 mLoopCount = loopCount;
1206 mLoopEnd = loopEnd;
1207 mLoopStart = loopStart;
1208 mLoopCountNotified = loopCount;
1209 mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1210
1211 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1212 }
1213
setMarkerPosition(uint32_t marker)1214 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1215 {
1216 // The only purpose of setting marker position is to get a callback
1217 if (mCbf == NULL || isOffloadedOrDirect()) {
1218 return INVALID_OPERATION;
1219 }
1220
1221 AutoMutex lock(mLock);
1222 mMarkerPosition = marker;
1223 mMarkerReached = false;
1224
1225 sp<AudioTrackThread> t = mAudioTrackThread;
1226 if (t != 0) {
1227 t->wake();
1228 }
1229 return NO_ERROR;
1230 }
1231
getMarkerPosition(uint32_t * marker) const1232 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1233 {
1234 if (isOffloadedOrDirect()) {
1235 return INVALID_OPERATION;
1236 }
1237 if (marker == NULL) {
1238 return BAD_VALUE;
1239 }
1240
1241 AutoMutex lock(mLock);
1242 mMarkerPosition.getValue(marker);
1243
1244 return NO_ERROR;
1245 }
1246
setPositionUpdatePeriod(uint32_t updatePeriod)1247 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1248 {
1249 // The only purpose of setting position update period is to get a callback
1250 if (mCbf == NULL || isOffloadedOrDirect()) {
1251 return INVALID_OPERATION;
1252 }
1253
1254 AutoMutex lock(mLock);
1255 mNewPosition = updateAndGetPosition_l() + updatePeriod;
1256 mUpdatePeriod = updatePeriod;
1257
1258 sp<AudioTrackThread> t = mAudioTrackThread;
1259 if (t != 0) {
1260 t->wake();
1261 }
1262 return NO_ERROR;
1263 }
1264
getPositionUpdatePeriod(uint32_t * updatePeriod) const1265 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1266 {
1267 if (isOffloadedOrDirect()) {
1268 return INVALID_OPERATION;
1269 }
1270 if (updatePeriod == NULL) {
1271 return BAD_VALUE;
1272 }
1273
1274 AutoMutex lock(mLock);
1275 *updatePeriod = mUpdatePeriod;
1276
1277 return NO_ERROR;
1278 }
1279
setPosition(uint32_t position)1280 status_t AudioTrack::setPosition(uint32_t position)
1281 {
1282 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1283 return INVALID_OPERATION;
1284 }
1285 if (position > mFrameCount) {
1286 return BAD_VALUE;
1287 }
1288
1289 AutoMutex lock(mLock);
1290 // Currently we require that the player is inactive before setting parameters such as position
1291 // or loop points. Otherwise, there could be a race condition: the application could read the
1292 // current position, compute a new position or loop parameters, and then set that position or
1293 // loop parameters but it would do the "wrong" thing since the position has continued to advance
1294 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app
1295 // to specify how it wants to handle such scenarios.
1296 if (mState == STATE_ACTIVE) {
1297 return INVALID_OPERATION;
1298 }
1299 // After setting the position, use full update period before notification.
1300 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1301 mStaticProxy->setBufferPosition(position);
1302
1303 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1304 return NO_ERROR;
1305 }
1306
getPosition(uint32_t * position)1307 status_t AudioTrack::getPosition(uint32_t *position)
1308 {
1309 if (position == NULL) {
1310 return BAD_VALUE;
1311 }
1312
1313 AutoMutex lock(mLock);
1314 // FIXME: offloaded and direct tracks call into the HAL for render positions
1315 // for compressed/synced data; however, we use proxy position for pure linear pcm data
1316 // as we do not know the capability of the HAL for pcm position support and standby.
1317 // There may be some latency differences between the HAL position and the proxy position.
1318 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1319 uint32_t dspFrames = 0;
1320
1321 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1322 ALOGV("%s(%d): called in paused state, return cached position %u",
1323 __func__, mPortId, mPausedPosition);
1324 *position = mPausedPosition;
1325 return NO_ERROR;
1326 }
1327
1328 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1329 uint32_t halFrames; // actually unused
1330 (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1331 // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1332 }
1333 // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1334 // due to hardware latency. We leave this behavior for now.
1335 *position = dspFrames;
1336 } else {
1337 if (mCblk->mFlags & CBLK_INVALID) {
1338 (void) restoreTrack_l("getPosition");
1339 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1340 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1341 }
1342
1343 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1344 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1345 0 : updateAndGetPosition_l().value();
1346 }
1347 return NO_ERROR;
1348 }
1349
getBufferPosition(uint32_t * position)1350 status_t AudioTrack::getBufferPosition(uint32_t *position)
1351 {
1352 if (mSharedBuffer == 0) {
1353 return INVALID_OPERATION;
1354 }
1355 if (position == NULL) {
1356 return BAD_VALUE;
1357 }
1358
1359 AutoMutex lock(mLock);
1360 *position = mStaticProxy->getBufferPosition();
1361 return NO_ERROR;
1362 }
1363
reload()1364 status_t AudioTrack::reload()
1365 {
1366 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1367 return INVALID_OPERATION;
1368 }
1369
1370 AutoMutex lock(mLock);
1371 // See setPosition() regarding setting parameters such as loop points or position while active
1372 if (mState == STATE_ACTIVE) {
1373 return INVALID_OPERATION;
1374 }
1375 mNewPosition = mUpdatePeriod;
1376 (void) updateAndGetPosition_l();
1377 mPosition = 0;
1378 mPreviousTimestampValid = false;
1379 #if 0
1380 // The documentation is not clear on the behavior of reload() and the restoration
1381 // of loop count. Historically we have not restored loop count, start, end,
1382 // but it makes sense if one desires to repeat playing a particular sound.
1383 if (mLoopCount != 0) {
1384 mLoopCountNotified = mLoopCount;
1385 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1386 }
1387 #endif
1388 mStaticProxy->setBufferPosition(0);
1389 return NO_ERROR;
1390 }
1391
getOutput() const1392 audio_io_handle_t AudioTrack::getOutput() const
1393 {
1394 AutoMutex lock(mLock);
1395 return mOutput;
1396 }
1397
setOutputDevice(audio_port_handle_t deviceId)1398 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1399 AutoMutex lock(mLock);
1400 if (mSelectedDeviceId != deviceId) {
1401 mSelectedDeviceId = deviceId;
1402 if (mStatus == NO_ERROR) {
1403 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1404 mProxy->interrupt();
1405 }
1406 }
1407 return NO_ERROR;
1408 }
1409
getOutputDevice()1410 audio_port_handle_t AudioTrack::getOutputDevice() {
1411 AutoMutex lock(mLock);
1412 return mSelectedDeviceId;
1413 }
1414
1415 // must be called with mLock held
updateRoutedDeviceId_l()1416 void AudioTrack::updateRoutedDeviceId_l()
1417 {
1418 // if the track is inactive, do not update actual device as the output stream maybe routed
1419 // to a device not relevant to this client because of other active use cases.
1420 if (mState != STATE_ACTIVE) {
1421 return;
1422 }
1423 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1424 audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1425 if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1426 mRoutedDeviceId = deviceId;
1427 }
1428 }
1429 }
1430
getRoutedDeviceId()1431 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1432 AutoMutex lock(mLock);
1433 updateRoutedDeviceId_l();
1434 return mRoutedDeviceId;
1435 }
1436
attachAuxEffect(int effectId)1437 status_t AudioTrack::attachAuxEffect(int effectId)
1438 {
1439 AutoMutex lock(mLock);
1440 status_t status = mAudioTrack->attachAuxEffect(effectId);
1441 if (status == NO_ERROR) {
1442 mAuxEffectId = effectId;
1443 }
1444 return status;
1445 }
1446
streamType() const1447 audio_stream_type_t AudioTrack::streamType() const
1448 {
1449 if (mStreamType == AUDIO_STREAM_DEFAULT) {
1450 return AudioSystem::attributesToStreamType(mAttributes);
1451 }
1452 return mStreamType;
1453 }
1454
latency()1455 uint32_t AudioTrack::latency()
1456 {
1457 AutoMutex lock(mLock);
1458 updateLatency_l();
1459 return mLatency;
1460 }
1461
1462 // -------------------------------------------------------------------------
1463
1464 // must be called with mLock held
updateLatency_l()1465 void AudioTrack::updateLatency_l()
1466 {
1467 status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1468 if (status != NO_ERROR) {
1469 ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mPortId, mOutput, status);
1470 } else {
1471 // FIXME don't believe this lie
1472 mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1473 }
1474 }
1475
1476 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1477 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1478 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1479 switch (transferType) {
1480 MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1481 MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1482 MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1483 MEDIA_CASE_ENUM(TRANSFER_SYNC);
1484 MEDIA_CASE_ENUM(TRANSFER_SHARED);
1485 MEDIA_CASE_ENUM(TRANSFER_SYNC_NOTIF_CALLBACK);
1486 default:
1487 return "UNRECOGNIZED";
1488 }
1489 }
1490
createTrack_l()1491 status_t AudioTrack::createTrack_l()
1492 {
1493 status_t status;
1494 bool callbackAdded = false;
1495
1496 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1497 if (audioFlinger == 0) {
1498 ALOGE("%s(%d): Could not get audioflinger",
1499 __func__, mPortId);
1500 status = NO_INIT;
1501 goto exit;
1502 }
1503
1504 {
1505 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1506 // After fast request is denied, we will request again if IAudioTrack is re-created.
1507 // Client can only express a preference for FAST. Server will perform additional tests.
1508 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1509 // either of these use cases:
1510 // use case 1: shared buffer
1511 bool sharedBuffer = mSharedBuffer != 0;
1512 bool transferAllowed =
1513 // use case 2: callback transfer mode
1514 (mTransfer == TRANSFER_CALLBACK) ||
1515 // use case 3: obtain/release mode
1516 (mTransfer == TRANSFER_OBTAIN) ||
1517 // use case 4: synchronous write
1518 ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
1519 && mThreadCanCallJava);
1520
1521 bool fastAllowed = sharedBuffer || transferAllowed;
1522 if (!fastAllowed) {
1523 ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
1524 " not shared buffer and transfer = %s",
1525 __func__, mPortId,
1526 convertTransferToText(mTransfer));
1527 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1528 }
1529 }
1530
1531 IAudioFlinger::CreateTrackInput input;
1532 if (mStreamType != AUDIO_STREAM_DEFAULT) {
1533 input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
1534 } else {
1535 input.attr = mAttributes;
1536 }
1537 input.config = AUDIO_CONFIG_INITIALIZER;
1538 input.config.sample_rate = mSampleRate;
1539 input.config.channel_mask = mChannelMask;
1540 input.config.format = mFormat;
1541 input.config.offload_info = mOffloadInfoCopy;
1542 input.clientInfo.clientUid = mClientUid;
1543 input.clientInfo.clientPid = mClientPid;
1544 input.clientInfo.clientTid = -1;
1545 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1546 // It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
1547 // application-level code follows all non-blocking design rules, the language runtime
1548 // doesn't also follow those rules, so the thread will not benefit overall.
1549 if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1550 input.clientInfo.clientTid = mAudioTrackThread->getTid();
1551 }
1552 }
1553 input.sharedBuffer = mSharedBuffer;
1554 input.notificationsPerBuffer = mNotificationsPerBufferReq;
1555 input.speed = 1.0;
1556 if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1557 (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1558 input.speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1559 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1560 }
1561 input.flags = mFlags;
1562 input.frameCount = mReqFrameCount;
1563 input.notificationFrameCount = mNotificationFramesReq;
1564 input.selectedDeviceId = mSelectedDeviceId;
1565 input.sessionId = mSessionId;
1566 input.audioTrackCallback = mAudioTrackCallback;
1567 input.opPackageName = mOpPackageName;
1568
1569 IAudioFlinger::CreateTrackOutput output;
1570
1571 sp<IAudioTrack> track = audioFlinger->createTrack(input,
1572 output,
1573 &status);
1574
1575 if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1576 ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
1577 __func__, mPortId, status, output.outputId);
1578 if (status == NO_ERROR) {
1579 status = NO_INIT;
1580 }
1581 goto exit;
1582 }
1583 ALOG_ASSERT(track != 0);
1584
1585 mFrameCount = output.frameCount;
1586 mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1587 mRoutedDeviceId = output.selectedDeviceId;
1588 mSessionId = output.sessionId;
1589
1590 mSampleRate = output.sampleRate;
1591 if (mOriginalSampleRate == 0) {
1592 mOriginalSampleRate = mSampleRate;
1593 }
1594
1595 mAfFrameCount = output.afFrameCount;
1596 mAfSampleRate = output.afSampleRate;
1597 mAfLatency = output.afLatencyMs;
1598
1599 mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1600
1601 // AudioFlinger now owns the reference to the I/O handle,
1602 // so we are no longer responsible for releasing it.
1603
1604 // FIXME compare to AudioRecord
1605 sp<IMemory> iMem = track->getCblk();
1606 if (iMem == 0) {
1607 ALOGE("%s(%d): Could not get control block", __func__, mPortId);
1608 status = NO_INIT;
1609 goto exit;
1610 }
1611 // TODO: Using unsecurePointer() has some associated security pitfalls
1612 // (see declaration for details).
1613 // Either document why it is safe in this case or address the
1614 // issue (e.g. by copying).
1615 void *iMemPointer = iMem->unsecurePointer();
1616 if (iMemPointer == NULL) {
1617 ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
1618 status = NO_INIT;
1619 goto exit;
1620 }
1621 // invariant that mAudioTrack != 0 is true only after set() returns successfully
1622 if (mAudioTrack != 0) {
1623 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1624 mDeathNotifier.clear();
1625 }
1626 mAudioTrack = track;
1627 mCblkMemory = iMem;
1628 IPCThreadState::self()->flushCommands();
1629
1630 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1631 mCblk = cblk;
1632
1633 mAwaitBoost = false;
1634 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1635 if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1636 ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1637 __func__, mPortId, mReqFrameCount, mFrameCount);
1638 if (!mThreadCanCallJava) {
1639 mAwaitBoost = true;
1640 }
1641 } else {
1642 ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
1643 __func__, mPortId, mReqFrameCount, mFrameCount);
1644 }
1645 }
1646 mFlags = output.flags;
1647
1648 //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1649 if (mDeviceCallback != 0) {
1650 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1651 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1652 }
1653 AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
1654 callbackAdded = true;
1655 }
1656
1657 mPortId = output.portId;
1658 // We retain a copy of the I/O handle, but don't own the reference
1659 mOutput = output.outputId;
1660 mRefreshRemaining = true;
1661
1662 // Starting address of buffers in shared memory. If there is a shared buffer, buffers
1663 // is the value of pointer() for the shared buffer, otherwise buffers points
1664 // immediately after the control block. This address is for the mapping within client
1665 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
1666 void* buffers;
1667 if (mSharedBuffer == 0) {
1668 buffers = cblk + 1;
1669 } else {
1670 // TODO: Using unsecurePointer() has some associated security pitfalls
1671 // (see declaration for details).
1672 // Either document why it is safe in this case or address the
1673 // issue (e.g. by copying).
1674 buffers = mSharedBuffer->unsecurePointer();
1675 if (buffers == NULL) {
1676 ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
1677 status = NO_INIT;
1678 goto exit;
1679 }
1680 }
1681
1682 mAudioTrack->attachAuxEffect(mAuxEffectId);
1683
1684 // If IAudioTrack is re-created, don't let the requested frameCount
1685 // decrease. This can confuse clients that cache frameCount().
1686 if (mFrameCount > mReqFrameCount) {
1687 mReqFrameCount = mFrameCount;
1688 }
1689
1690 // reset server position to 0 as we have new cblk.
1691 mServer = 0;
1692
1693 // update proxy
1694 if (mSharedBuffer == 0) {
1695 mStaticProxy.clear();
1696 mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1697 } else {
1698 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1699 mProxy = mStaticProxy;
1700 }
1701
1702 mProxy->setVolumeLR(gain_minifloat_pack(
1703 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1704 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1705
1706 mProxy->setSendLevel(mSendLevel);
1707 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1708 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1709 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1710 mProxy->setSampleRate(effectiveSampleRate);
1711
1712 AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1713 playbackRateTemp.mSpeed = effectiveSpeed;
1714 playbackRateTemp.mPitch = effectivePitch;
1715 mProxy->setPlaybackRate(playbackRateTemp);
1716 mProxy->setMinimum(mNotificationFramesAct);
1717
1718 mDeathNotifier = new DeathNotifier(this);
1719 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1720
1721 // This is the first log sent from the AudioTrack client.
1722 // The creation of the audio track by AudioFlinger (in the code above)
1723 // is the first log of the AudioTrack and must be present before
1724 // any AudioTrack client logs will be accepted.
1725
1726 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
1727 mediametrics::LogItem(mMetricsId)
1728 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
1729 // the following are immutable
1730 .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
1731 .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
1732 .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
1733 .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
1734 .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
1735 .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
1736 .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.outputId)
1737 .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
1738 .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
1739 .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
1740 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
1741 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
1742 // the following are NOT immutable
1743 .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
1744 .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
1745 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
1746 .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
1747 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
1748 .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
1749 .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
1750 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1751 AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveSampleRate)
1752 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1753 AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)effectiveSpeed)
1754 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1755 AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)effectivePitch)
1756 .record();
1757
1758 // mSendLevel
1759 // mReqFrameCount?
1760 // mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq
1761 // mLatency, mAfLatency, mAfFrameCount, mAfSampleRate
1762
1763 }
1764
1765 exit:
1766 if (status != NO_ERROR && callbackAdded) {
1767 // note: mOutput is always valid is callbackAdded is true
1768 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1769 }
1770
1771 mStatus = status;
1772
1773 // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
1774 return status;
1775 }
1776
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1777 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1778 {
1779 if (audioBuffer == NULL) {
1780 if (nonContig != NULL) {
1781 *nonContig = 0;
1782 }
1783 return BAD_VALUE;
1784 }
1785 if (mTransfer != TRANSFER_OBTAIN) {
1786 audioBuffer->frameCount = 0;
1787 audioBuffer->size = 0;
1788 audioBuffer->raw = NULL;
1789 if (nonContig != NULL) {
1790 *nonContig = 0;
1791 }
1792 return INVALID_OPERATION;
1793 }
1794
1795 const struct timespec *requested;
1796 struct timespec timeout;
1797 if (waitCount == -1) {
1798 requested = &ClientProxy::kForever;
1799 } else if (waitCount == 0) {
1800 requested = &ClientProxy::kNonBlocking;
1801 } else if (waitCount > 0) {
1802 time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
1803 timeout.tv_sec = ms / 1000;
1804 timeout.tv_nsec = (ms % 1000) * 1000000;
1805 requested = &timeout;
1806 } else {
1807 ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
1808 requested = NULL;
1809 }
1810 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1811 }
1812
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1813 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1814 struct timespec *elapsed, size_t *nonContig)
1815 {
1816 // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1817 uint32_t oldSequence = 0;
1818
1819 Proxy::Buffer buffer;
1820 status_t status = NO_ERROR;
1821
1822 static const int32_t kMaxTries = 5;
1823 int32_t tryCounter = kMaxTries;
1824
1825 do {
1826 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1827 // keep them from going away if another thread re-creates the track during obtainBuffer()
1828 sp<AudioTrackClientProxy> proxy;
1829 sp<IMemory> iMem;
1830
1831 { // start of lock scope
1832 AutoMutex lock(mLock);
1833
1834 uint32_t newSequence = mSequence;
1835 // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1836 if (status == DEAD_OBJECT) {
1837 // re-create track, unless someone else has already done so
1838 if (newSequence == oldSequence) {
1839 status = restoreTrack_l("obtainBuffer");
1840 if (status != NO_ERROR) {
1841 buffer.mFrameCount = 0;
1842 buffer.mRaw = NULL;
1843 buffer.mNonContig = 0;
1844 break;
1845 }
1846 }
1847 }
1848 oldSequence = newSequence;
1849
1850 if (status == NOT_ENOUGH_DATA) {
1851 restartIfDisabled();
1852 }
1853
1854 // Keep the extra references
1855 proxy = mProxy;
1856 iMem = mCblkMemory;
1857
1858 if (mState == STATE_STOPPING) {
1859 status = -EINTR;
1860 buffer.mFrameCount = 0;
1861 buffer.mRaw = NULL;
1862 buffer.mNonContig = 0;
1863 break;
1864 }
1865
1866 // Non-blocking if track is stopped or paused
1867 if (mState != STATE_ACTIVE) {
1868 requested = &ClientProxy::kNonBlocking;
1869 }
1870
1871 } // end of lock scope
1872
1873 buffer.mFrameCount = audioBuffer->frameCount;
1874 // FIXME starts the requested timeout and elapsed over from scratch
1875 status = proxy->obtainBuffer(&buffer, requested, elapsed);
1876 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1877
1878 audioBuffer->frameCount = buffer.mFrameCount;
1879 audioBuffer->size = buffer.mFrameCount * mFrameSize;
1880 audioBuffer->raw = buffer.mRaw;
1881 audioBuffer->sequence = oldSequence;
1882 if (nonContig != NULL) {
1883 *nonContig = buffer.mNonContig;
1884 }
1885 return status;
1886 }
1887
releaseBuffer(const Buffer * audioBuffer)1888 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1889 {
1890 // FIXME add error checking on mode, by adding an internal version
1891 if (mTransfer == TRANSFER_SHARED) {
1892 return;
1893 }
1894
1895 size_t stepCount = audioBuffer->size / mFrameSize;
1896 if (stepCount == 0) {
1897 return;
1898 }
1899
1900 Proxy::Buffer buffer;
1901 buffer.mFrameCount = stepCount;
1902 buffer.mRaw = audioBuffer->raw;
1903
1904 AutoMutex lock(mLock);
1905 if (audioBuffer->sequence != mSequence) {
1906 // This Buffer came from a different IAudioTrack instance, so ignore the releaseBuffer
1907 ALOGD("%s is no-op due to IAudioTrack sequence mismatch %u != %u",
1908 __func__, audioBuffer->sequence, mSequence);
1909 return;
1910 }
1911 mReleased += stepCount;
1912 mInUnderrun = false;
1913 mProxy->releaseBuffer(&buffer);
1914
1915 // restart track if it was disabled by audioflinger due to previous underrun
1916 restartIfDisabled();
1917 }
1918
restartIfDisabled()1919 void AudioTrack::restartIfDisabled()
1920 {
1921 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1922 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1923 ALOGW("%s(%d): releaseBuffer() track %p disabled due to previous underrun, restarting",
1924 __func__, mPortId, this);
1925 // FIXME ignoring status
1926 mAudioTrack->start();
1927 }
1928 }
1929
1930 // -------------------------------------------------------------------------
1931
write(const void * buffer,size_t userSize,bool blocking)1932 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1933 {
1934 if (mTransfer != TRANSFER_SYNC && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
1935 return INVALID_OPERATION;
1936 }
1937
1938 if (isDirect()) {
1939 AutoMutex lock(mLock);
1940 int32_t flags = android_atomic_and(
1941 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1942 &mCblk->mFlags);
1943 if (flags & CBLK_INVALID) {
1944 return DEAD_OBJECT;
1945 }
1946 }
1947
1948 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1949 // Sanity-check: user is most-likely passing an error code, and it would
1950 // make the return value ambiguous (actualSize vs error).
1951 ALOGE("%s(%d): AudioTrack::write(buffer=%p, size=%zu (%zd)",
1952 __func__, mPortId, buffer, userSize, userSize);
1953 return BAD_VALUE;
1954 }
1955
1956 size_t written = 0;
1957 Buffer audioBuffer;
1958
1959 while (userSize >= mFrameSize) {
1960 audioBuffer.frameCount = userSize / mFrameSize;
1961
1962 status_t err = obtainBuffer(&audioBuffer,
1963 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1964 if (err < 0) {
1965 if (written > 0) {
1966 break;
1967 }
1968 if (err == TIMED_OUT || err == -EINTR) {
1969 err = WOULD_BLOCK;
1970 }
1971 return ssize_t(err);
1972 }
1973
1974 size_t toWrite = audioBuffer.size;
1975 memcpy(audioBuffer.i8, buffer, toWrite);
1976 buffer = ((const char *) buffer) + toWrite;
1977 userSize -= toWrite;
1978 written += toWrite;
1979
1980 releaseBuffer(&audioBuffer);
1981 }
1982
1983 if (written > 0) {
1984 mFramesWritten += written / mFrameSize;
1985
1986 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
1987 const sp<AudioTrackThread> t = mAudioTrackThread;
1988 if (t != 0) {
1989 // causes wake up of the playback thread, that will callback the client for
1990 // more data (with EVENT_CAN_WRITE_MORE_DATA) in processAudioBuffer()
1991 t->wake();
1992 }
1993 }
1994 }
1995
1996 return written;
1997 }
1998
1999 // -------------------------------------------------------------------------
2000
processAudioBuffer()2001 nsecs_t AudioTrack::processAudioBuffer()
2002 {
2003 // Currently the AudioTrack thread is not created if there are no callbacks.
2004 // Would it ever make sense to run the thread, even without callbacks?
2005 // If so, then replace this by checks at each use for mCbf != NULL.
2006 LOG_ALWAYS_FATAL_IF(mCblk == NULL);
2007
2008 mLock.lock();
2009 if (mAwaitBoost) {
2010 mAwaitBoost = false;
2011 mLock.unlock();
2012 static const int32_t kMaxTries = 5;
2013 int32_t tryCounter = kMaxTries;
2014 uint32_t pollUs = 10000;
2015 do {
2016 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
2017 if (policy == SCHED_FIFO || policy == SCHED_RR) {
2018 break;
2019 }
2020 usleep(pollUs);
2021 pollUs <<= 1;
2022 } while (tryCounter-- > 0);
2023 if (tryCounter < 0) {
2024 ALOGE("%s(%d): did not receive expected priority boost on time",
2025 __func__, mPortId);
2026 }
2027 // Run again immediately
2028 return 0;
2029 }
2030
2031 // Can only reference mCblk while locked
2032 int32_t flags = android_atomic_and(
2033 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
2034
2035 // Check for track invalidation
2036 if (flags & CBLK_INVALID) {
2037 // for offloaded tracks restoreTrack_l() will just update the sequence and clear
2038 // AudioSystem cache. We should not exit here but after calling the callback so
2039 // that the upper layers can recreate the track
2040 if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
2041 status_t status __unused = restoreTrack_l("processAudioBuffer");
2042 // FIXME unused status
2043 // after restoration, continue below to make sure that the loop and buffer events
2044 // are notified because they have been cleared from mCblk->mFlags above.
2045 }
2046 }
2047
2048 bool waitStreamEnd = mState == STATE_STOPPING;
2049 bool active = mState == STATE_ACTIVE;
2050
2051 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
2052 bool newUnderrun = false;
2053 if (flags & CBLK_UNDERRUN) {
2054 #if 0
2055 // Currently in shared buffer mode, when the server reaches the end of buffer,
2056 // the track stays active in continuous underrun state. It's up to the application
2057 // to pause or stop the track, or set the position to a new offset within buffer.
2058 // This was some experimental code to auto-pause on underrun. Keeping it here
2059 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
2060 if (mTransfer == TRANSFER_SHARED) {
2061 mState = STATE_PAUSED;
2062 active = false;
2063 }
2064 #endif
2065 if (!mInUnderrun) {
2066 mInUnderrun = true;
2067 newUnderrun = true;
2068 }
2069 }
2070
2071 // Get current position of server
2072 Modulo<uint32_t> position(updateAndGetPosition_l());
2073
2074 // Manage marker callback
2075 bool markerReached = false;
2076 Modulo<uint32_t> markerPosition(mMarkerPosition);
2077 // uses 32 bit wraparound for comparison with position.
2078 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
2079 mMarkerReached = markerReached = true;
2080 }
2081
2082 // Determine number of new position callback(s) that will be needed, while locked
2083 size_t newPosCount = 0;
2084 Modulo<uint32_t> newPosition(mNewPosition);
2085 uint32_t updatePeriod = mUpdatePeriod;
2086 // FIXME fails for wraparound, need 64 bits
2087 if (updatePeriod > 0 && position >= newPosition) {
2088 newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
2089 mNewPosition += updatePeriod * newPosCount;
2090 }
2091
2092 // Cache other fields that will be needed soon
2093 uint32_t sampleRate = mSampleRate;
2094 float speed = mPlaybackRate.mSpeed;
2095 const uint32_t notificationFrames = mNotificationFramesAct;
2096 if (mRefreshRemaining) {
2097 mRefreshRemaining = false;
2098 mRemainingFrames = notificationFrames;
2099 mRetryOnPartialBuffer = false;
2100 }
2101 size_t misalignment = mProxy->getMisalignment();
2102 uint32_t sequence = mSequence;
2103 sp<AudioTrackClientProxy> proxy = mProxy;
2104
2105 // Determine the number of new loop callback(s) that will be needed, while locked.
2106 int loopCountNotifications = 0;
2107 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
2108
2109 if (mLoopCount > 0) {
2110 int loopCount;
2111 size_t bufferPosition;
2112 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2113 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
2114 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
2115 mLoopCountNotified = loopCount; // discard any excess notifications
2116 } else if (mLoopCount < 0) {
2117 // FIXME: We're not accurate with notification count and position with infinite looping
2118 // since loopCount from server side will always return -1 (we could decrement it).
2119 size_t bufferPosition = mStaticProxy->getBufferPosition();
2120 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
2121 loopPeriod = mLoopEnd - bufferPosition;
2122 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
2123 size_t bufferPosition = mStaticProxy->getBufferPosition();
2124 loopPeriod = mFrameCount - bufferPosition;
2125 }
2126
2127 // These fields don't need to be cached, because they are assigned only by set():
2128 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
2129 // mFlags is also assigned by createTrack_l(), but not the bit we care about.
2130
2131 mLock.unlock();
2132
2133 // get anchor time to account for callbacks.
2134 const nsecs_t timeBeforeCallbacks = systemTime();
2135
2136 if (waitStreamEnd) {
2137 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2138 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2139 // (and make sure we don't callback for more data while we're stopping).
2140 // This helps with position, marker notifications, and track invalidation.
2141 struct timespec timeout;
2142 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2143 timeout.tv_nsec = 0;
2144
2145 status_t status = proxy->waitStreamEndDone(&timeout);
2146 switch (status) {
2147 case NO_ERROR:
2148 case DEAD_OBJECT:
2149 case TIMED_OUT:
2150 if (status != DEAD_OBJECT) {
2151 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2152 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2153 mCbf(EVENT_STREAM_END, mUserData, NULL);
2154 }
2155 {
2156 AutoMutex lock(mLock);
2157 // The previously assigned value of waitStreamEnd is no longer valid,
2158 // since the mutex has been unlocked and either the callback handler
2159 // or another thread could have re-started the AudioTrack during that time.
2160 waitStreamEnd = mState == STATE_STOPPING;
2161 if (waitStreamEnd) {
2162 mState = STATE_STOPPED;
2163 mReleased = 0;
2164 }
2165 }
2166 if (waitStreamEnd && status != DEAD_OBJECT) {
2167 return NS_INACTIVE;
2168 }
2169 break;
2170 }
2171 return 0;
2172 }
2173
2174 // perform callbacks while unlocked
2175 if (newUnderrun) {
2176 mCbf(EVENT_UNDERRUN, mUserData, NULL);
2177 }
2178 while (loopCountNotifications > 0) {
2179 mCbf(EVENT_LOOP_END, mUserData, NULL);
2180 --loopCountNotifications;
2181 }
2182 if (flags & CBLK_BUFFER_END) {
2183 mCbf(EVENT_BUFFER_END, mUserData, NULL);
2184 }
2185 if (markerReached) {
2186 mCbf(EVENT_MARKER, mUserData, &markerPosition);
2187 }
2188 while (newPosCount > 0) {
2189 size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2190 mCbf(EVENT_NEW_POS, mUserData, &temp);
2191 newPosition += updatePeriod;
2192 newPosCount--;
2193 }
2194
2195 if (mObservedSequence != sequence) {
2196 mObservedSequence = sequence;
2197 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2198 // for offloaded tracks, just wait for the upper layers to recreate the track
2199 if (isOffloadedOrDirect()) {
2200 return NS_INACTIVE;
2201 }
2202 }
2203
2204 // if inactive, then don't run me again until re-started
2205 if (!active) {
2206 return NS_INACTIVE;
2207 }
2208
2209 // Compute the estimated time until the next timed event (position, markers, loops)
2210 // FIXME only for non-compressed audio
2211 uint32_t minFrames = ~0;
2212 if (!markerReached && position < markerPosition) {
2213 minFrames = (markerPosition - position).value();
2214 }
2215 if (loopPeriod > 0 && loopPeriod < minFrames) {
2216 // loopPeriod is already adjusted for actual position.
2217 minFrames = loopPeriod;
2218 }
2219 if (updatePeriod > 0) {
2220 minFrames = min(minFrames, (newPosition - position).value());
2221 }
2222
2223 // If > 0, poll periodically to recover from a stuck server. A good value is 2.
2224 static const uint32_t kPoll = 0;
2225 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2226 minFrames = kPoll * notificationFrames;
2227 }
2228
2229 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2230 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2231 const nsecs_t timeAfterCallbacks = systemTime();
2232
2233 // Convert frame units to time units
2234 nsecs_t ns = NS_WHENEVER;
2235 if (minFrames != (uint32_t) ~0) {
2236 // AudioFlinger consumption of client data may be irregular when coming out of device
2237 // standby since the kernel buffers require filling. This is throttled to no more than 2x
2238 // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2239 // half (but no more than half a second) to improve callback accuracy during these temporary
2240 // data surges.
2241 const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2242 constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2243 ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2244 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
2245 // TODO: Should we warn if the callback time is too long?
2246 if (ns < 0) ns = 0;
2247 }
2248
2249 // If not supplying data by EVENT_MORE_DATA or EVENT_CAN_WRITE_MORE_DATA, then we're done
2250 if (mTransfer != TRANSFER_CALLBACK && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
2251 return ns;
2252 }
2253
2254 // EVENT_MORE_DATA callback handling.
2255 // Timing for linear pcm audio data formats can be derived directly from the
2256 // buffer fill level.
2257 // Timing for compressed data is not directly available from the buffer fill level,
2258 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2259 // to return a certain fill level.
2260
2261 struct timespec timeout;
2262 const struct timespec *requested = &ClientProxy::kForever;
2263 if (ns != NS_WHENEVER) {
2264 timeout.tv_sec = ns / 1000000000LL;
2265 timeout.tv_nsec = ns % 1000000000LL;
2266 ALOGV("%s(%d): timeout %ld.%03d",
2267 __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2268 requested = &timeout;
2269 }
2270
2271 size_t writtenFrames = 0;
2272 while (mRemainingFrames > 0) {
2273
2274 Buffer audioBuffer;
2275 audioBuffer.frameCount = mRemainingFrames;
2276 size_t nonContig;
2277 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2278 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2279 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
2280 __func__, mPortId, err, audioBuffer.frameCount);
2281 requested = &ClientProxy::kNonBlocking;
2282 size_t avail = audioBuffer.frameCount + nonContig;
2283 ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2284 __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2285 if (err != NO_ERROR) {
2286 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2287 (isOffloaded() && (err == DEAD_OBJECT))) {
2288 // FIXME bug 25195759
2289 return 1000000;
2290 }
2291 ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
2292 __func__, mPortId, err);
2293 return NS_NEVER;
2294 }
2295
2296 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2297 mRetryOnPartialBuffer = false;
2298 if (avail < mRemainingFrames) {
2299 if (ns > 0) { // account for obtain time
2300 const nsecs_t timeNow = systemTime();
2301 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2302 }
2303
2304 // delayNs is first computed by the additional frames required in the buffer.
2305 nsecs_t delayNs = framesToNanoseconds(
2306 mRemainingFrames - avail, sampleRate, speed);
2307
2308 // afNs is the AudioFlinger mixer period in ns.
2309 const nsecs_t afNs = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2310
2311 // If the AudioTrack is double buffered based on the AudioFlinger mixer period,
2312 // we may have a race if we wait based on the number of frames desired.
2313 // This is a possible issue with resampling and AAudio.
2314 //
2315 // The granularity of audioflinger processing is one mixer period; if
2316 // our wait time is less than one mixer period, wait at most half the period.
2317 if (delayNs < afNs) {
2318 delayNs = std::min(delayNs, afNs / 2);
2319 }
2320
2321 // adjust our ns wait by delayNs.
2322 if (ns < 0 /* NS_WHENEVER */ || delayNs < ns) {
2323 ns = delayNs;
2324 }
2325 return ns;
2326 }
2327 }
2328
2329 size_t reqSize = audioBuffer.size;
2330 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2331 // when notifying client it can write more data, pass the total size that can be
2332 // written in the next write() call, since it's not passed through the callback
2333 audioBuffer.size += nonContig;
2334 }
2335 mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
2336 mUserData, &audioBuffer);
2337 size_t writtenSize = audioBuffer.size;
2338
2339 // Sanity check on returned size
2340 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2341 ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2342 __func__, mPortId, reqSize, ssize_t(writtenSize));
2343 return NS_NEVER;
2344 }
2345
2346 if (writtenSize == 0) {
2347 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2348 // The callback EVENT_CAN_WRITE_MORE_DATA was processed in the JNI of
2349 // android.media.AudioTrack. The JNI is not using the callback to provide data,
2350 // it only signals to the Java client that it can provide more data, which
2351 // this track is read to accept now.
2352 // The playback thread will be awaken at the next ::write()
2353 return NS_WHENEVER;
2354 }
2355 // The callback is done filling buffers
2356 // Keep this thread going to handle timed events and
2357 // still try to get more data in intervals of WAIT_PERIOD_MS
2358 // but don't just loop and block the CPU, so wait
2359
2360 // mCbf(EVENT_MORE_DATA, ...) might either
2361 // (1) Block until it can fill the buffer, returning 0 size on EOS.
2362 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2363 // (3) Return 0 size when no data is available, does not wait for more data.
2364 //
2365 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2366 // We try to compute the wait time to avoid a tight sleep-wait cycle,
2367 // especially for case (3).
2368 //
2369 // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2370 // and this loop; whereas for case (3) we could simply check once with the full
2371 // buffer size and skip the loop entirely.
2372
2373 nsecs_t myns;
2374 if (audio_has_proportional_frames(mFormat)) {
2375 // time to wait based on buffer occupancy
2376 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2377 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2378 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2379 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2380 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2381 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2382 myns = datans + (afns / 2);
2383 } else {
2384 // FIXME: This could ping quite a bit if the buffer isn't full.
2385 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2386 myns = kWaitPeriodNs;
2387 }
2388 if (ns > 0) { // account for obtain and callback time
2389 const nsecs_t timeNow = systemTime();
2390 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2391 }
2392 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2393 ns = myns;
2394 }
2395 return ns;
2396 }
2397
2398 size_t releasedFrames = writtenSize / mFrameSize;
2399 audioBuffer.frameCount = releasedFrames;
2400 mRemainingFrames -= releasedFrames;
2401 if (misalignment >= releasedFrames) {
2402 misalignment -= releasedFrames;
2403 } else {
2404 misalignment = 0;
2405 }
2406
2407 releaseBuffer(&audioBuffer);
2408 writtenFrames += releasedFrames;
2409
2410 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2411 // if callback doesn't like to accept the full chunk
2412 if (writtenSize < reqSize) {
2413 continue;
2414 }
2415
2416 // There could be enough non-contiguous frames available to satisfy the remaining request
2417 if (mRemainingFrames <= nonContig) {
2418 continue;
2419 }
2420
2421 #if 0
2422 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2423 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA
2424 // that total to a sum == notificationFrames.
2425 if (0 < misalignment && misalignment <= mRemainingFrames) {
2426 mRemainingFrames = misalignment;
2427 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2428 }
2429 #endif
2430
2431 }
2432 if (writtenFrames > 0) {
2433 AutoMutex lock(mLock);
2434 mFramesWritten += writtenFrames;
2435 }
2436 mRemainingFrames = notificationFrames;
2437 mRetryOnPartialBuffer = true;
2438
2439 // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2440 return 0;
2441 }
2442
restoreTrack_l(const char * from)2443 status_t AudioTrack::restoreTrack_l(const char *from)
2444 {
2445 status_t result = NO_ERROR; // logged: make sure to set this before returning.
2446 const int64_t beginNs = systemTime();
2447 mediametrics::Defer defer([&] {
2448 mediametrics::LogItem(mMetricsId)
2449 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_RESTORE)
2450 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
2451 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
2452 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
2453 .set(AMEDIAMETRICS_PROP_WHERE, from)
2454 .record(); });
2455
2456 ALOGW("%s(%d): dead IAudioTrack, %s, creating a new one from %s()",
2457 __func__, mPortId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2458 ++mSequence;
2459
2460 // refresh the audio configuration cache in this process to make sure we get new
2461 // output parameters and new IAudioFlinger in createTrack_l()
2462 AudioSystem::clearAudioConfigCache();
2463
2464 if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2465 // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2466 // reconsider enabling for linear PCM encodings when position can be preserved.
2467 result = DEAD_OBJECT;
2468 return result;
2469 }
2470
2471 // Save so we can return count since creation.
2472 mUnderrunCountOffset = getUnderrunCount_l();
2473
2474 // save the old static buffer position
2475 uint32_t staticPosition = 0;
2476 size_t bufferPosition = 0;
2477 int loopCount = 0;
2478 if (mStaticProxy != 0) {
2479 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2480 staticPosition = mStaticProxy->getPosition().unsignedValue();
2481 }
2482
2483 // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
2484 // causes a lot of churn on the service side, and it can reject starting
2485 // playback of a previously created track. May also apply to other cases.
2486 const int INITIAL_RETRIES = 3;
2487 int retries = INITIAL_RETRIES;
2488 retry:
2489 if (retries < INITIAL_RETRIES) {
2490 // See the comment for clearAudioConfigCache at the start of the function.
2491 AudioSystem::clearAudioConfigCache();
2492 }
2493 mFlags = mOrigFlags;
2494
2495 // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2496 // following member variables: mAudioTrack, mCblkMemory and mCblk.
2497 // It will also delete the strong references on previous IAudioTrack and IMemory.
2498 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2499 result = createTrack_l();
2500
2501 if (result == NO_ERROR) {
2502 // take the frames that will be lost by track recreation into account in saved position
2503 // For streaming tracks, this is the amount we obtained from the user/client
2504 // (not the number actually consumed at the server - those are already lost).
2505 if (mStaticProxy == 0) {
2506 mPosition = mReleased;
2507 }
2508 // Continue playback from last known position and restore loop.
2509 if (mStaticProxy != 0) {
2510 if (loopCount != 0) {
2511 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2512 mLoopStart, mLoopEnd, loopCount);
2513 } else {
2514 mStaticProxy->setBufferPosition(bufferPosition);
2515 if (bufferPosition == mFrameCount) {
2516 ALOGD("%s(%d): restoring track at end of static buffer", __func__, mPortId);
2517 }
2518 }
2519 }
2520 // restore volume handler
2521 mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2522 sp<VolumeShaper::Operation> operationToEnd =
2523 new VolumeShaper::Operation(shaper.mOperation);
2524 // TODO: Ideally we would restore to the exact xOffset position
2525 // as returned by getVolumeShaperState(), but we don't have that
2526 // information when restoring at the client unless we periodically poll
2527 // the server or create shared memory state.
2528 //
2529 // For now, we simply advance to the end of the VolumeShaper effect
2530 // if it has been started.
2531 if (shaper.isStarted()) {
2532 operationToEnd->setNormalizedTime(1.f);
2533 }
2534 return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2535 });
2536
2537 if (mState == STATE_ACTIVE) {
2538 result = mAudioTrack->start();
2539 }
2540 // server resets to zero so we offset
2541 mFramesWrittenServerOffset =
2542 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2543 mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2544 }
2545 if (result != NO_ERROR) {
2546 ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
2547 if (--retries > 0) {
2548 // leave time for an eventual race condition to clear before retrying
2549 usleep(500000);
2550 goto retry;
2551 }
2552 // if no retries left, set invalid bit to force restoring at next occasion
2553 // and avoid inconsistent active state on client and server sides
2554 if (mCblk != nullptr) {
2555 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
2556 }
2557 }
2558 return result;
2559 }
2560
updateAndGetPosition_l()2561 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2562 {
2563 // This is the sole place to read server consumed frames
2564 Modulo<uint32_t> newServer(mProxy->getPosition());
2565 const int32_t delta = (newServer - mServer).signedValue();
2566 // TODO There is controversy about whether there can be "negative jitter" in server position.
2567 // This should be investigated further, and if possible, it should be addressed.
2568 // A more definite failure mode is infrequent polling by client.
2569 // One could call (void)getPosition_l() in releaseBuffer(),
2570 // so mReleased and mPosition are always lock-step as best possible.
2571 // That should ensure delta never goes negative for infrequent polling
2572 // unless the server has more than 2^31 frames in its buffer,
2573 // in which case the use of uint32_t for these counters has bigger issues.
2574 ALOGE_IF(delta < 0,
2575 "%s(%d): detected illegal retrograde motion by the server: mServer advanced by %d",
2576 __func__, mPortId, delta);
2577 mServer = newServer;
2578 if (delta > 0) { // avoid retrograde
2579 mPosition += delta;
2580 }
2581 return mPosition;
2582 }
2583
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2584 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2585 {
2586 updateLatency_l();
2587 // applicable for mixing tracks only (not offloaded or direct)
2588 if (mStaticProxy != 0) {
2589 return true; // static tracks do not have issues with buffer sizing.
2590 }
2591 const size_t minFrameCount =
2592 AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2593 sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2594 const bool allowed = mFrameCount >= minFrameCount;
2595 ALOGD_IF(!allowed,
2596 "%s(%d): denied "
2597 "mAfLatency:%u mAfFrameCount:%zu mAfSampleRate:%u sampleRate:%u speed:%f "
2598 "mFrameCount:%zu < minFrameCount:%zu",
2599 __func__, mPortId,
2600 mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2601 mFrameCount, minFrameCount);
2602 return allowed;
2603 }
2604
setParameters(const String8 & keyValuePairs)2605 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2606 {
2607 AutoMutex lock(mLock);
2608 return mAudioTrack->setParameters(keyValuePairs);
2609 }
2610
selectPresentation(int presentationId,int programId)2611 status_t AudioTrack::selectPresentation(int presentationId, int programId)
2612 {
2613 AutoMutex lock(mLock);
2614 AudioParameter param = AudioParameter();
2615 param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
2616 param.addInt(String8(AudioParameter::keyProgramId), programId);
2617 ALOGV("%s(%d): PresentationId/ProgramId[%s]",
2618 __func__, mPortId, param.toString().string());
2619
2620 return mAudioTrack->setParameters(param.toString());
2621 }
2622
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2623 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2624 const sp<VolumeShaper::Configuration>& configuration,
2625 const sp<VolumeShaper::Operation>& operation)
2626 {
2627 AutoMutex lock(mLock);
2628 mVolumeHandler->setIdIfNecessary(configuration);
2629 VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2630
2631 if (status == DEAD_OBJECT) {
2632 if (restoreTrack_l("applyVolumeShaper") == OK) {
2633 status = mAudioTrack->applyVolumeShaper(configuration, operation);
2634 }
2635 }
2636 if (status >= 0) {
2637 // save VolumeShaper for restore
2638 mVolumeHandler->applyVolumeShaper(configuration, operation);
2639 if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2640 mVolumeHandler->setStarted();
2641 }
2642 } else {
2643 // warn only if not an expected restore failure.
2644 ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2645 "%s(%d): applyVolumeShaper failed: %d", __func__, mPortId, status);
2646 }
2647 return status;
2648 }
2649
getVolumeShaperState(int id)2650 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2651 {
2652 AutoMutex lock(mLock);
2653 sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2654 if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2655 if (restoreTrack_l("getVolumeShaperState") == OK) {
2656 state = mAudioTrack->getVolumeShaperState(id);
2657 }
2658 }
2659 return state;
2660 }
2661
getTimestamp(ExtendedTimestamp * timestamp)2662 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2663 {
2664 if (timestamp == nullptr) {
2665 return BAD_VALUE;
2666 }
2667 AutoMutex lock(mLock);
2668 return getTimestamp_l(timestamp);
2669 }
2670
getTimestamp_l(ExtendedTimestamp * timestamp)2671 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2672 {
2673 if (mCblk->mFlags & CBLK_INVALID) {
2674 const status_t status = restoreTrack_l("getTimestampExtended");
2675 if (status != OK) {
2676 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2677 // recommending that the track be recreated.
2678 return DEAD_OBJECT;
2679 }
2680 }
2681 // check for offloaded/direct here in case restoring somehow changed those flags.
2682 if (isOffloadedOrDirect_l()) {
2683 return INVALID_OPERATION; // not supported
2684 }
2685 status_t status = mProxy->getTimestamp(timestamp);
2686 LOG_ALWAYS_FATAL_IF(status != OK, "%s(%d): status %d not allowed from proxy getTimestamp",
2687 __func__, mPortId, status);
2688 bool found = false;
2689 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2690 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2691 // server side frame offset in case AudioTrack has been restored.
2692 for (int i = ExtendedTimestamp::LOCATION_SERVER;
2693 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2694 if (timestamp->mTimeNs[i] >= 0) {
2695 // apply server offset (frames flushed is ignored
2696 // so we don't report the jump when the flush occurs).
2697 timestamp->mPosition[i] += mFramesWrittenServerOffset;
2698 found = true;
2699 }
2700 }
2701 return found ? OK : WOULD_BLOCK;
2702 }
2703
getTimestamp(AudioTimestamp & timestamp)2704 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2705 {
2706 AutoMutex lock(mLock);
2707 return getTimestamp_l(timestamp);
2708 }
2709
getTimestamp_l(AudioTimestamp & timestamp)2710 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2711 {
2712 bool previousTimestampValid = mPreviousTimestampValid;
2713 // Set false here to cover all the error return cases.
2714 mPreviousTimestampValid = false;
2715
2716 switch (mState) {
2717 case STATE_ACTIVE:
2718 case STATE_PAUSED:
2719 break; // handle below
2720 case STATE_FLUSHED:
2721 case STATE_STOPPED:
2722 return WOULD_BLOCK;
2723 case STATE_STOPPING:
2724 case STATE_PAUSED_STOPPING:
2725 if (!isOffloaded_l()) {
2726 return INVALID_OPERATION;
2727 }
2728 break; // offloaded tracks handled below
2729 default:
2730 LOG_ALWAYS_FATAL("%s(%d): Invalid mState in getTimestamp(): %d",
2731 __func__, mPortId, mState);
2732 break;
2733 }
2734
2735 if (mCblk->mFlags & CBLK_INVALID) {
2736 const status_t status = restoreTrack_l("getTimestamp");
2737 if (status != OK) {
2738 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2739 // recommending that the track be recreated.
2740 return DEAD_OBJECT;
2741 }
2742 }
2743
2744 // The presented frame count must always lag behind the consumed frame count.
2745 // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
2746
2747 status_t status;
2748 if (isOffloadedOrDirect_l()) {
2749 // use Binder to get timestamp
2750 status = mAudioTrack->getTimestamp(timestamp);
2751 } else {
2752 // read timestamp from shared memory
2753 ExtendedTimestamp ets;
2754 status = mProxy->getTimestamp(&ets);
2755 if (status == OK) {
2756 ExtendedTimestamp::Location location;
2757 status = ets.getBestTimestamp(×tamp, &location);
2758
2759 if (status == OK) {
2760 updateLatency_l();
2761 // It is possible that the best location has moved from the kernel to the server.
2762 // In this case we adjust the position from the previous computed latency.
2763 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2764 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2765 "%s(%d): location moved from kernel to server",
2766 __func__, mPortId);
2767 // check that the last kernel OK time info exists and the positions
2768 // are valid (if they predate the current track, the positions may
2769 // be zero or negative).
2770 const int64_t frames =
2771 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2772 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2773 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2774 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2775 ?
2776 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2777 / 1000)
2778 :
2779 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2780 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2781 ALOGV("%s(%d): frame adjustment:%lld timestamp:%s",
2782 __func__, mPortId, (long long)frames, ets.toString().c_str());
2783 if (frames >= ets.mPosition[location]) {
2784 timestamp.mPosition = 0;
2785 } else {
2786 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2787 }
2788 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2789 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2790 "%s(%d): location moved from server to kernel",
2791 __func__, mPortId);
2792
2793 if (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER] ==
2794 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL]) {
2795 // In Q, we don't return errors as an invalid time
2796 // but instead we leave the last kernel good timestamp alone.
2797 //
2798 // If server is identical to kernel, the device data pipeline is idle.
2799 // A better start time is now. The retrograde check ensures
2800 // timestamp monotonicity.
2801 const int64_t nowNs = systemTime();
2802 if (!mTimestampStallReported) {
2803 ALOGD("%s(%d): device stall time corrected using current time %lld",
2804 __func__, mPortId, (long long)nowNs);
2805 mTimestampStallReported = true;
2806 }
2807 timestamp.mTime = convertNsToTimespec(nowNs);
2808 } else {
2809 mTimestampStallReported = false;
2810 }
2811 }
2812
2813 // We update the timestamp time even when paused.
2814 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2815 const int64_t now = systemTime();
2816 const int64_t at = audio_utils_ns_from_timespec(×tamp.mTime);
2817 const int64_t lag =
2818 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2819 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2820 ? int64_t(mAfLatency * 1000000LL)
2821 : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2822 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2823 * NANOS_PER_SECOND / mSampleRate;
2824 const int64_t limit = now - lag; // no earlier than this limit
2825 if (at < limit) {
2826 ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2827 (long long)lag, (long long)at, (long long)limit);
2828 timestamp.mTime = convertNsToTimespec(limit);
2829 }
2830 }
2831 mPreviousLocation = location;
2832 } else {
2833 // right after AudioTrack is started, one may not find a timestamp
2834 ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mPortId);
2835 }
2836 }
2837 if (status == INVALID_OPERATION) {
2838 // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2839 // other failures are signaled by a negative time.
2840 // If we come out of FLUSHED or STOPPED where the position is known
2841 // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2842 // "zero" for NuPlayer). We don't convert for track restoration as position
2843 // does not reset.
2844 ALOGV("%s(%d): timestamp server offset:%lld restore frames:%lld",
2845 __func__, mPortId,
2846 (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2847 if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2848 status = WOULD_BLOCK;
2849 }
2850 }
2851 }
2852 if (status != NO_ERROR) {
2853 ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mPortId, status);
2854 return status;
2855 }
2856 if (isOffloadedOrDirect_l()) {
2857 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2858 // use cached paused position in case another offloaded track is running.
2859 timestamp.mPosition = mPausedPosition;
2860 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
2861 // TODO: adjust for delay
2862 return NO_ERROR;
2863 }
2864
2865 // Check whether a pending flush or stop has completed, as those commands may
2866 // be asynchronous or return near finish or exhibit glitchy behavior.
2867 //
2868 // Originally this showed up as the first timestamp being a continuation of
2869 // the previous song under gapless playback.
2870 // However, we sometimes see zero timestamps, then a glitch of
2871 // the previous song's position, and then correct timestamps afterwards.
2872 if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2873 static const int kTimeJitterUs = 100000; // 100 ms
2874 static const int k1SecUs = 1000000;
2875
2876 const int64_t timeNow = getNowUs();
2877
2878 if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2879 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2880 if (timestampTimeUs < mStartFromZeroUs) {
2881 return WOULD_BLOCK; // stale timestamp time, occurs before start.
2882 }
2883 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2884 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2885 / ((double)mSampleRate * mPlaybackRate.mSpeed);
2886
2887 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2888 // Verify that the counter can't count faster than the sample rate
2889 // since the start time. If greater, then that means we may have failed
2890 // to completely flush or stop the previous playing track.
2891 ALOGW_IF(!mTimestampStartupGlitchReported,
2892 "%s(%d): startup glitch detected"
2893 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2894 __func__, mPortId,
2895 (long long)deltaTimeUs, (long long)deltaPositionByUs,
2896 timestamp.mPosition);
2897 mTimestampStartupGlitchReported = true;
2898 if (previousTimestampValid
2899 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2900 timestamp = mPreviousTimestamp;
2901 mPreviousTimestampValid = true;
2902 return NO_ERROR;
2903 }
2904 return WOULD_BLOCK;
2905 }
2906 if (deltaPositionByUs != 0) {
2907 mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2908 }
2909 } else {
2910 mStartFromZeroUs = 0; // don't check again, start time expired.
2911 }
2912 mTimestampStartupGlitchReported = false;
2913 }
2914 } else {
2915 // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2916 (void) updateAndGetPosition_l();
2917 // Server consumed (mServer) and presented both use the same server time base,
2918 // and server consumed is always >= presented.
2919 // The delta between these represents the number of frames in the buffer pipeline.
2920 // If this delta between these is greater than the client position, it means that
2921 // actually presented is still stuck at the starting line (figuratively speaking),
2922 // waiting for the first frame to go by. So we can't report a valid timestamp yet.
2923 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2924 // mPosition exceeds 32 bits.
2925 // TODO Remove when timestamp is updated to contain pipeline status info.
2926 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2927 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2928 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2929 return INVALID_OPERATION;
2930 }
2931 // Convert timestamp position from server time base to client time base.
2932 // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2933 // But if we change it to 64-bit then this could fail.
2934 // Use Modulo computation here.
2935 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2936 // Immediately after a call to getPosition_l(), mPosition and
2937 // mServer both represent the same frame position. mPosition is
2938 // in client's point of view, and mServer is in server's point of
2939 // view. So the difference between them is the "fudge factor"
2940 // between client and server views due to stop() and/or new
2941 // IAudioTrack. And timestamp.mPosition is initially in server's
2942 // point of view, so we need to apply the same fudge factor to it.
2943 }
2944
2945 // Prevent retrograde motion in timestamp.
2946 // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2947 if (status == NO_ERROR) {
2948 // Fix stale time when checking timestamp right after start().
2949 // The position is at the last reported location but the time can be stale
2950 // due to pause or standby or cold start latency.
2951 //
2952 // We keep advancing the time (but not the position) to ensure that the
2953 // stale value does not confuse the application.
2954 //
2955 // For offload compatibility, use a default lag value here.
2956 // Any time discrepancy between this update and the pause timestamp is handled
2957 // by the retrograde check afterwards.
2958 int64_t currentTimeNanos = audio_utils_ns_from_timespec(×tamp.mTime);
2959 const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2960 const int64_t limitNs = mStartNs - lagNs;
2961 if (currentTimeNanos < limitNs) {
2962 if (!mTimestampStaleTimeReported) {
2963 ALOGD("%s(%d): stale timestamp time corrected, "
2964 "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2965 __func__, mPortId,
2966 (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2967 mTimestampStaleTimeReported = true;
2968 }
2969 timestamp.mTime = convertNsToTimespec(limitNs);
2970 currentTimeNanos = limitNs;
2971 } else {
2972 mTimestampStaleTimeReported = false;
2973 }
2974
2975 // previousTimestampValid is set to false when starting after a stop or flush.
2976 if (previousTimestampValid) {
2977 const int64_t previousTimeNanos =
2978 audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2979
2980 // retrograde check
2981 if (currentTimeNanos < previousTimeNanos) {
2982 if (!mTimestampRetrogradeTimeReported) {
2983 ALOGW("%s(%d): retrograde timestamp time corrected, %lld < %lld",
2984 __func__, mPortId,
2985 (long long)currentTimeNanos, (long long)previousTimeNanos);
2986 mTimestampRetrogradeTimeReported = true;
2987 }
2988 timestamp.mTime = mPreviousTimestamp.mTime;
2989 } else {
2990 mTimestampRetrogradeTimeReported = false;
2991 }
2992
2993 // Looking at signed delta will work even when the timestamps
2994 // are wrapping around.
2995 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2996 - mPreviousTimestamp.mPosition).signedValue();
2997 if (deltaPosition < 0) {
2998 // Only report once per position instead of spamming the log.
2999 if (!mTimestampRetrogradePositionReported) {
3000 ALOGW("%s(%d): retrograde timestamp position corrected, %d = %u - %u",
3001 __func__, mPortId,
3002 deltaPosition,
3003 timestamp.mPosition,
3004 mPreviousTimestamp.mPosition);
3005 mTimestampRetrogradePositionReported = true;
3006 }
3007 } else {
3008 mTimestampRetrogradePositionReported = false;
3009 }
3010 if (deltaPosition < 0) {
3011 timestamp.mPosition = mPreviousTimestamp.mPosition;
3012 deltaPosition = 0;
3013 }
3014 #if 0
3015 // Uncomment this to verify audio timestamp rate.
3016 const int64_t deltaTime =
3017 audio_utils_ns_from_timespec(×tamp.mTime) - previousTimeNanos;
3018 if (deltaTime != 0) {
3019 const int64_t computedSampleRate =
3020 deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
3021 ALOGD("%s(%d): computedSampleRate:%u sampleRate:%u",
3022 __func__, mPortId,
3023 (unsigned)computedSampleRate, mSampleRate);
3024 }
3025 #endif
3026 }
3027 mPreviousTimestamp = timestamp;
3028 mPreviousTimestampValid = true;
3029 }
3030
3031 return status;
3032 }
3033
getParameters(const String8 & keys)3034 String8 AudioTrack::getParameters(const String8& keys)
3035 {
3036 audio_io_handle_t output = getOutput();
3037 if (output != AUDIO_IO_HANDLE_NONE) {
3038 return AudioSystem::getParameters(output, keys);
3039 } else {
3040 return String8::empty();
3041 }
3042 }
3043
isOffloaded() const3044 bool AudioTrack::isOffloaded() const
3045 {
3046 AutoMutex lock(mLock);
3047 return isOffloaded_l();
3048 }
3049
isDirect() const3050 bool AudioTrack::isDirect() const
3051 {
3052 AutoMutex lock(mLock);
3053 return isDirect_l();
3054 }
3055
isOffloadedOrDirect() const3056 bool AudioTrack::isOffloadedOrDirect() const
3057 {
3058 AutoMutex lock(mLock);
3059 return isOffloadedOrDirect_l();
3060 }
3061
3062
dump(int fd,const Vector<String16> & args __unused) const3063 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
3064 {
3065 String8 result;
3066
3067 result.append(" AudioTrack::dump\n");
3068 result.appendFormat(" id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
3069 mPortId, mStatus, mState, mSessionId, mFlags);
3070 result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
3071 (mStreamType == AUDIO_STREAM_DEFAULT) ?
3072 AudioSystem::attributesToStreamType(mAttributes) :
3073 mStreamType,
3074 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
3075 result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
3076 mFormat, mChannelMask, mChannelCount);
3077 result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
3078 mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
3079 result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
3080 mFrameCount, mReqFrameCount);
3081 result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u),"
3082 " req. notif. per buff(%u)\n",
3083 mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
3084 result.appendFormat(" latency (%d), selected device Id(%d), routed device Id(%d)\n",
3085 mLatency, mSelectedDeviceId, mRoutedDeviceId);
3086 result.appendFormat(" output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
3087 mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
3088 ::write(fd, result.string(), result.size());
3089 return NO_ERROR;
3090 }
3091
getUnderrunCount() const3092 uint32_t AudioTrack::getUnderrunCount() const
3093 {
3094 AutoMutex lock(mLock);
3095 return getUnderrunCount_l();
3096 }
3097
getUnderrunCount_l() const3098 uint32_t AudioTrack::getUnderrunCount_l() const
3099 {
3100 return mProxy->getUnderrunCount() + mUnderrunCountOffset;
3101 }
3102
getUnderrunFrames() const3103 uint32_t AudioTrack::getUnderrunFrames() const
3104 {
3105 AutoMutex lock(mLock);
3106 return mProxy->getUnderrunFrames();
3107 }
3108
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)3109 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
3110 {
3111
3112 if (callback == 0) {
3113 ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
3114 return BAD_VALUE;
3115 }
3116 AutoMutex lock(mLock);
3117 if (mDeviceCallback.unsafe_get() == callback.get()) {
3118 ALOGW("%s(%d): adding same callback!", __func__, mPortId);
3119 return INVALID_OPERATION;
3120 }
3121 status_t status = NO_ERROR;
3122 if (mOutput != AUDIO_IO_HANDLE_NONE) {
3123 if (mDeviceCallback != 0) {
3124 ALOGW("%s(%d): callback already present!", __func__, mPortId);
3125 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
3126 }
3127 status = AudioSystem::addAudioDeviceCallback(this, mOutput, mPortId);
3128 }
3129 mDeviceCallback = callback;
3130 return status;
3131 }
3132
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)3133 status_t AudioTrack::removeAudioDeviceCallback(
3134 const sp<AudioSystem::AudioDeviceCallback>& callback)
3135 {
3136 if (callback == 0) {
3137 ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
3138 return BAD_VALUE;
3139 }
3140 AutoMutex lock(mLock);
3141 if (mDeviceCallback.unsafe_get() != callback.get()) {
3142 ALOGW("%s removing different callback!", __FUNCTION__);
3143 return INVALID_OPERATION;
3144 }
3145 mDeviceCallback.clear();
3146 if (mOutput != AUDIO_IO_HANDLE_NONE) {
3147 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
3148 }
3149 return NO_ERROR;
3150 }
3151
3152
onAudioDeviceUpdate(audio_io_handle_t audioIo,audio_port_handle_t deviceId)3153 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
3154 audio_port_handle_t deviceId)
3155 {
3156 sp<AudioSystem::AudioDeviceCallback> callback;
3157 {
3158 AutoMutex lock(mLock);
3159 if (audioIo != mOutput) {
3160 return;
3161 }
3162 callback = mDeviceCallback.promote();
3163 // only update device if the track is active as route changes due to other use cases are
3164 // irrelevant for this client
3165 if (mState == STATE_ACTIVE) {
3166 mRoutedDeviceId = deviceId;
3167 }
3168 }
3169
3170 if (callback.get() != nullptr) {
3171 callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
3172 }
3173 }
3174
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)3175 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
3176 {
3177 if (msec == nullptr ||
3178 (location != ExtendedTimestamp::LOCATION_SERVER
3179 && location != ExtendedTimestamp::LOCATION_KERNEL)) {
3180 return BAD_VALUE;
3181 }
3182 AutoMutex lock(mLock);
3183 // inclusive of offloaded and direct tracks.
3184 //
3185 // It is possible, but not enabled, to allow duration computation for non-pcm
3186 // audio_has_proportional_frames() formats because currently they have
3187 // the drain rate equivalent to the pcm sample rate * framesize.
3188 if (!isPurePcmData_l()) {
3189 return INVALID_OPERATION;
3190 }
3191 ExtendedTimestamp ets;
3192 if (getTimestamp_l(&ets) == OK
3193 && ets.mTimeNs[location] > 0) {
3194 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
3195 - ets.mPosition[location];
3196 if (diff < 0) {
3197 *msec = 0;
3198 } else {
3199 // ms is the playback time by frames
3200 int64_t ms = (int64_t)((double)diff * 1000 /
3201 ((double)mSampleRate * mPlaybackRate.mSpeed));
3202 // clockdiff is the timestamp age (negative)
3203 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
3204 ets.mTimeNs[location]
3205 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
3206 - systemTime(SYSTEM_TIME_MONOTONIC);
3207
3208 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
3209 static const int NANOS_PER_MILLIS = 1000000;
3210 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
3211 }
3212 return NO_ERROR;
3213 }
3214 if (location != ExtendedTimestamp::LOCATION_SERVER) {
3215 return INVALID_OPERATION; // LOCATION_KERNEL is not available
3216 }
3217 // use server position directly (offloaded and direct arrive here)
3218 updateAndGetPosition_l();
3219 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
3220 *msec = (diff <= 0) ? 0
3221 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
3222 return NO_ERROR;
3223 }
3224
hasStarted()3225 bool AudioTrack::hasStarted()
3226 {
3227 AutoMutex lock(mLock);
3228 switch (mState) {
3229 case STATE_STOPPED:
3230 if (isOffloadedOrDirect_l()) {
3231 // check if we have started in the past to return true.
3232 return mStartFromZeroUs > 0;
3233 }
3234 // A normal audio track may still be draining, so
3235 // check if stream has ended. This covers fasttrack position
3236 // instability and start/stop without any data written.
3237 if (mProxy->getStreamEndDone()) {
3238 return true;
3239 }
3240 FALLTHROUGH_INTENDED;
3241 case STATE_ACTIVE:
3242 case STATE_STOPPING:
3243 break;
3244 case STATE_PAUSED:
3245 case STATE_PAUSED_STOPPING:
3246 case STATE_FLUSHED:
3247 return false; // we're not active
3248 default:
3249 LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mPortId, mState);
3250 break;
3251 }
3252
3253 // wait indicates whether we need to wait for a timestamp.
3254 // This is conservatively figured - if we encounter an unexpected error
3255 // then we will not wait.
3256 bool wait = false;
3257 if (isOffloadedOrDirect_l()) {
3258 AudioTimestamp ts;
3259 status_t status = getTimestamp_l(ts);
3260 if (status == WOULD_BLOCK) {
3261 wait = true;
3262 } else if (status == OK) {
3263 wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3264 }
3265 ALOGV("%s(%d): hasStarted wait:%d ts:%u start position:%lld",
3266 __func__, mPortId,
3267 (int)wait,
3268 ts.mPosition,
3269 (long long)mStartTs.mPosition);
3270 } else {
3271 int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3272 ExtendedTimestamp ets;
3273 status_t status = getTimestamp_l(&ets);
3274 if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets
3275 wait = true;
3276 } else if (status == OK) {
3277 for (location = ExtendedTimestamp::LOCATION_KERNEL;
3278 location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3279 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3280 continue;
3281 }
3282 wait = ets.mPosition[location] == 0
3283 || ets.mPosition[location] == mStartEts.mPosition[location];
3284 break;
3285 }
3286 }
3287 ALOGV("%s(%d): hasStarted wait:%d ets:%lld start position:%lld",
3288 __func__, mPortId,
3289 (int)wait,
3290 (long long)ets.mPosition[location],
3291 (long long)mStartEts.mPosition[location]);
3292 }
3293 return !wait;
3294 }
3295
3296 // =========================================================================
3297
binderDied(const wp<IBinder> & who __unused)3298 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3299 {
3300 sp<AudioTrack> audioTrack = mAudioTrack.promote();
3301 if (audioTrack != 0) {
3302 AutoMutex lock(audioTrack->mLock);
3303 audioTrack->mProxy->binderDied();
3304 }
3305 }
3306
3307 // =========================================================================
3308
AudioTrackThread(AudioTrack & receiver)3309 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
3310 : Thread(true /* bCanCallJava */) // binder recursion on restoreTrack_l() may call Java.
3311 , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3312 mIgnoreNextPausedInt(false)
3313 {
3314 }
3315
~AudioTrackThread()3316 AudioTrack::AudioTrackThread::~AudioTrackThread()
3317 {
3318 }
3319
threadLoop()3320 bool AudioTrack::AudioTrackThread::threadLoop()
3321 {
3322 {
3323 AutoMutex _l(mMyLock);
3324 if (mPaused) {
3325 // TODO check return value and handle or log
3326 mMyCond.wait(mMyLock);
3327 // caller will check for exitPending()
3328 return true;
3329 }
3330 if (mIgnoreNextPausedInt) {
3331 mIgnoreNextPausedInt = false;
3332 mPausedInt = false;
3333 }
3334 if (mPausedInt) {
3335 // TODO use futex instead of condition, for event flag "or"
3336 if (mPausedNs > 0) {
3337 // TODO check return value and handle or log
3338 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3339 } else {
3340 // TODO check return value and handle or log
3341 mMyCond.wait(mMyLock);
3342 }
3343 mPausedInt = false;
3344 return true;
3345 }
3346 }
3347 if (exitPending()) {
3348 return false;
3349 }
3350 nsecs_t ns = mReceiver.processAudioBuffer();
3351 switch (ns) {
3352 case 0:
3353 return true;
3354 case NS_INACTIVE:
3355 pauseInternal();
3356 return true;
3357 case NS_NEVER:
3358 return false;
3359 case NS_WHENEVER:
3360 // Event driven: call wake() when callback notifications conditions change.
3361 ns = INT64_MAX;
3362 FALLTHROUGH_INTENDED;
3363 default:
3364 LOG_ALWAYS_FATAL_IF(ns < 0, "%s(%d): processAudioBuffer() returned %lld",
3365 __func__, mReceiver.mPortId, (long long)ns);
3366 pauseInternal(ns);
3367 return true;
3368 }
3369 }
3370
requestExit()3371 void AudioTrack::AudioTrackThread::requestExit()
3372 {
3373 // must be in this order to avoid a race condition
3374 Thread::requestExit();
3375 resume();
3376 }
3377
pause()3378 void AudioTrack::AudioTrackThread::pause()
3379 {
3380 AutoMutex _l(mMyLock);
3381 mPaused = true;
3382 }
3383
resume()3384 void AudioTrack::AudioTrackThread::resume()
3385 {
3386 AutoMutex _l(mMyLock);
3387 mIgnoreNextPausedInt = true;
3388 if (mPaused || mPausedInt) {
3389 mPaused = false;
3390 mPausedInt = false;
3391 mMyCond.signal();
3392 }
3393 }
3394
wake()3395 void AudioTrack::AudioTrackThread::wake()
3396 {
3397 AutoMutex _l(mMyLock);
3398 if (!mPaused) {
3399 // wake() might be called while servicing a callback - ignore the next
3400 // pause time and call processAudioBuffer.
3401 mIgnoreNextPausedInt = true;
3402 if (mPausedInt && mPausedNs > 0) {
3403 // audio track is active and internally paused with timeout.
3404 mPausedInt = false;
3405 mMyCond.signal();
3406 }
3407 }
3408 }
3409
pauseInternal(nsecs_t ns)3410 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3411 {
3412 AutoMutex _l(mMyLock);
3413 mPausedInt = true;
3414 mPausedNs = ns;
3415 }
3416
onCodecFormatChanged(const std::vector<uint8_t> & audioMetadata)3417 binder::Status AudioTrack::AudioTrackCallback::onCodecFormatChanged(
3418 const std::vector<uint8_t>& audioMetadata)
3419 {
3420 AutoMutex _l(mAudioTrackCbLock);
3421 sp<media::IAudioTrackCallback> callback = mCallback.promote();
3422 if (callback.get() != nullptr) {
3423 callback->onCodecFormatChanged(audioMetadata);
3424 } else {
3425 mCallback.clear();
3426 }
3427 return binder::Status::ok();
3428 }
3429
setAudioTrackCallback(const sp<media::IAudioTrackCallback> & callback)3430 void AudioTrack::AudioTrackCallback::setAudioTrackCallback(
3431 const sp<media::IAudioTrackCallback> &callback) {
3432 AutoMutex lock(mAudioTrackCbLock);
3433 mCallback = callback;
3434 }
3435
3436 } // namespace android
3437