1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 #include <thread>
25
26 #include <android/media/IAudioPolicyService.h>
27 #include <android-base/macros.h>
28 #include <android-base/stringprintf.h>
29 #include <audio_utils/clock.h>
30 #include <audio_utils/primitives.h>
31 #include <binder/IPCThreadState.h>
32 #include <binder/IServiceManager.h>
33 #include <media/AudioTrack.h>
34 #include <utils/Log.h>
35 #include <private/media/AudioTrackShared.h>
36 #include <processgroup/sched_policy.h>
37 #include <media/IAudioFlinger.h>
38 #include <media/AudioParameter.h>
39 #include <media/AudioResamplerPublic.h>
40 #include <media/AudioSystem.h>
41 #include <media/MediaMetricsItem.h>
42 #include <media/TypeConverter.h>
43
44 #define WAIT_PERIOD_MS 10
45 #define WAIT_STREAM_END_TIMEOUT_SEC 120
46
47 static const int kMaxLoopCountNotifications = 32;
48 static constexpr char kAudioServiceName[] = "audio";
49
50 using ::android::aidl_utils::statusTFromBinderStatus;
51 using ::android::base::StringPrintf;
52
53 namespace android {
54 // ---------------------------------------------------------------------------
55
56 using media::VolumeShaper;
57 using android::content::AttributionSourceState;
58
59 // TODO: Move to a separate .h
60
61 template <typename T>
min(const T & x,const T & y)62 static inline const T &min(const T &x, const T &y) {
63 return x < y ? x : y;
64 }
65
66 template <typename T>
max(const T & x,const T & y)67 static inline const T &max(const T &x, const T &y) {
68 return x > y ? x : y;
69 }
70
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)71 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
72 {
73 return ((double)frames * 1000000000) / ((double)sampleRate * speed);
74 }
75
convertTimespecToUs(const struct timespec & tv)76 static int64_t convertTimespecToUs(const struct timespec &tv)
77 {
78 return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
79 }
80
81 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)82 static inline struct timespec convertNsToTimespec(int64_t ns) {
83 struct timespec tv;
84 tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
85 tv.tv_nsec = static_cast<int64_t>(ns % NANOS_PER_SECOND);
86 return tv;
87 }
88
89 // current monotonic time in microseconds.
getNowUs()90 static int64_t getNowUs()
91 {
92 struct timespec tv;
93 (void) clock_gettime(CLOCK_MONOTONIC, &tv);
94 return convertTimespecToUs(tv);
95 }
96
97 // FIXME: we don't use the pitch setting in the time stretcher (not working);
98 // instead we emulate it using our sample rate converter.
99 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)100 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
101 {
102 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
103 }
104
adjustSpeed(float speed,float pitch)105 static inline float adjustSpeed(float speed, float pitch)
106 {
107 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
108 }
109
adjustPitch(float pitch)110 static inline float adjustPitch(float pitch)
111 {
112 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
113 }
114
115 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)116 status_t AudioTrack::getMinFrameCount(
117 size_t* frameCount,
118 audio_stream_type_t streamType,
119 uint32_t sampleRate)
120 {
121 if (frameCount == NULL) {
122 return BAD_VALUE;
123 }
124
125 // FIXME handle in server, like createTrack_l(), possible missing info:
126 // audio_io_handle_t output
127 // audio_format_t format
128 // audio_channel_mask_t channelMask
129 // audio_output_flags_t flags (FAST)
130 uint32_t afSampleRate;
131 status_t status;
132 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
133 if (status != NO_ERROR) {
134 ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
135 __func__, streamType, status);
136 return status;
137 }
138 size_t afFrameCount;
139 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
140 if (status != NO_ERROR) {
141 ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
142 __func__, streamType, status);
143 return status;
144 }
145 uint32_t afLatency;
146 status = AudioSystem::getOutputLatency(&afLatency, streamType);
147 if (status != NO_ERROR) {
148 ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
149 __func__, streamType, status);
150 return status;
151 }
152
153 // When called from createTrack, speed is 1.0f (normal speed).
154 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
155 *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
156 sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
157
158 // The formula above should always produce a non-zero value under normal circumstances:
159 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
160 // Return error in the unlikely event that it does not, as that's part of the API contract.
161 if (*frameCount == 0) {
162 ALOGE("%s(): failed for streamType %d, sampleRate %u",
163 __func__, streamType, sampleRate);
164 return BAD_VALUE;
165 }
166 ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
167 __func__, *frameCount, afFrameCount, afSampleRate, afLatency);
168 return NO_ERROR;
169 }
170
171 // static
isDirectOutputSupported(const audio_config_base_t & config,const audio_attributes_t & attributes)172 bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
173 const audio_attributes_t& attributes) {
174 ALOGV("%s()", __FUNCTION__);
175 const sp<media::IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
176 if (aps == 0) return false;
177
178 auto result = [&]() -> ConversionResult<bool> {
179 media::audio::common::AudioConfigBase configAidl = VALUE_OR_RETURN(
180 legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
181 media::audio::common::AudioAttributes attributesAidl = VALUE_OR_RETURN(
182 legacy2aidl_audio_attributes_t_AudioAttributes(attributes));
183 bool retAidl;
184 RETURN_IF_ERROR(aidl_utils::statusTFromBinderStatus(
185 aps->isDirectOutputSupported(configAidl, attributesAidl, &retAidl)));
186 return retAidl;
187 }();
188 return result.value_or(false);
189 }
190
logIfErrorAndReturnStatus(status_t status,const std::string & errorMessage)191 status_t AudioTrack::logIfErrorAndReturnStatus(status_t status, const std::string& errorMessage) {
192 if (status != NO_ERROR) {
193 ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
194 reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
195 }
196 mStatus = status;
197 return mStatus;
198 }
199 // ---------------------------------------------------------------------------
200
gather(const AudioTrack * track)201 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
202 {
203 // only if we're in a good state...
204 // XXX: shall we gather alternative info if failing?
205 const status_t lstatus = track->initCheck();
206 if (lstatus != NO_ERROR) {
207 ALOGD("%s(): no metrics gathered, track status=%d", __func__, (int) lstatus);
208 return;
209 }
210
211 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
212
213 // Do not change this without changing the MediaMetricsService side.
214 // Java API 28 entries, do not change.
215 mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
216 mMetricsItem->setCString(MM_PREFIX "type",
217 toString(track->mAttributes.content_type).c_str());
218 mMetricsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
219
220 // Non-API entries, these can change due to a Java string mistake.
221 mMetricsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
222 mMetricsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
223 // Non-API entries, these can change.
224 mMetricsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
225 mMetricsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
226 mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
227 mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
228 mMetricsItem->setCString(MM_PREFIX "logSessionId", track->mLogSessionId.c_str());
229 mMetricsItem->setInt32(MM_PREFIX "underrunFrames", (int32_t)track->getUnderrunFrames());
230 }
231
232 // hand the user a snapshot of the metrics.
getMetrics(mediametrics::Item * & item)233 status_t AudioTrack::getMetrics(mediametrics::Item * &item)
234 {
235 mMediaMetrics.gather(this);
236 mediametrics::Item *tmp = mMediaMetrics.dup();
237 if (tmp == nullptr) {
238 return BAD_VALUE;
239 }
240 item = tmp;
241 return NO_ERROR;
242 }
243
AudioTrack(const AttributionSourceState & attributionSource)244 AudioTrack::AudioTrack(const AttributionSourceState& attributionSource)
245 : mClientAttributionSource(attributionSource)
246 {
247 }
248
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,const wp<IAudioTrackCallback> & callback,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,const AttributionSourceState & attributionSource,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)249 AudioTrack::AudioTrack(
250 audio_stream_type_t streamType,
251 uint32_t sampleRate,
252 audio_format_t format,
253 audio_channel_mask_t channelMask,
254 size_t frameCount,
255 audio_output_flags_t flags,
256 const wp<IAudioTrackCallback> & callback,
257 int32_t notificationFrames,
258 audio_session_t sessionId,
259 transfer_type transferType,
260 const audio_offload_info_t *offloadInfo,
261 const AttributionSourceState& attributionSource,
262 const audio_attributes_t* pAttributes,
263 bool doNotReconnect,
264 float maxRequiredSpeed,
265 audio_port_handle_t selectedDeviceId)
266 {
267 mSetParams = std::make_unique<SetParams>(
268 streamType, sampleRate, format, channelMask, frameCount, flags, callback,
269 notificationFrames, nullptr /*sharedBuffer*/, false /*threadCanCallJava*/,
270 sessionId, transferType, offloadInfo, attributionSource, pAttributes,
271 doNotReconnect, maxRequiredSpeed, selectedDeviceId);
272 }
273
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,const wp<IAudioTrackCallback> & callback,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,const AttributionSourceState & attributionSource,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)274 AudioTrack::AudioTrack(
275 audio_stream_type_t streamType,
276 uint32_t sampleRate,
277 audio_format_t format,
278 audio_channel_mask_t channelMask,
279 const sp<IMemory>& sharedBuffer,
280 audio_output_flags_t flags,
281 const wp<IAudioTrackCallback>& callback,
282 int32_t notificationFrames,
283 audio_session_t sessionId,
284 transfer_type transferType,
285 const audio_offload_info_t *offloadInfo,
286 const AttributionSourceState& attributionSource,
287 const audio_attributes_t* pAttributes,
288 bool doNotReconnect,
289 float maxRequiredSpeed)
290 {
291 mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
292
293 mSetParams = std::unique_ptr<SetParams>{
294 new SetParams{streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
295 callback, notificationFrames, sharedBuffer, false /*threadCanCallJava*/,
296 sessionId, transferType, offloadInfo, attributionSource, pAttributes,
297 doNotReconnect, maxRequiredSpeed, AUDIO_PORT_HANDLE_NONE}};
298 }
299
onFirstRef()300 void AudioTrack::onFirstRef() {
301 if (mSetParams) {
302 set(*mSetParams);
303 mSetParams.reset();
304 }
305 }
306
~AudioTrack()307 AudioTrack::~AudioTrack()
308 {
309 // pull together the numbers, before we clean up our structures
310 mMediaMetrics.gather(this);
311
312 mediametrics::LogItem(mMetricsId)
313 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DTOR)
314 .set(AMEDIAMETRICS_PROP_CALLERNAME,
315 mCallerName.empty()
316 ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
317 : mCallerName.c_str())
318 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
319 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
320 .record();
321
322 stopAndJoinCallbacks(); // checks mStatus
323
324 if (mStatus == NO_ERROR) {
325 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
326 mAudioTrack.clear();
327 mCblkMemory.clear();
328 mSharedBuffer.clear();
329 IPCThreadState::self()->flushCommands();
330 pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
331 ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
332 __func__, mPortId,
333 mSessionId, IPCThreadState::self()->getCallingPid(), clientPid);
334 AudioSystem::releaseAudioSessionId(mSessionId, clientPid);
335 }
336
337 if (mOutput != AUDIO_IO_HANDLE_NONE) {
338 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
339 }
340 }
341
stopAndJoinCallbacks()342 void AudioTrack::stopAndJoinCallbacks() {
343 // Make sure that callback function exits in the case where
344 // it is looping on buffer full condition in obtainBuffer().
345 // Otherwise the callback thread will never exit.
346 stop();
347 if (mAudioTrackThread != 0) { // not thread safe
348 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
349 mProxy->interrupt();
350 mAudioTrackThread->requestExitAndWait();
351 mAudioTrackThread.clear();
352 }
353
354 AutoMutex lock(mLock);
355 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
356 // This may not stop all of these device callbacks!
357 // TODO: Add some sort of protection.
358 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
359 mDeviceCallback.clear();
360 }
361 }
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,const wp<IAudioTrackCallback> & callback,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,const AttributionSourceState & attributionSource,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)362 status_t AudioTrack::set(
363 audio_stream_type_t streamType,
364 uint32_t sampleRate,
365 audio_format_t format,
366 audio_channel_mask_t channelMask,
367 size_t frameCount,
368 audio_output_flags_t flags,
369 const wp<IAudioTrackCallback>& callback,
370 int32_t notificationFrames,
371 const sp<IMemory>& sharedBuffer,
372 bool threadCanCallJava,
373 audio_session_t sessionId,
374 transfer_type transferType,
375 const audio_offload_info_t *offloadInfo,
376 const AttributionSourceState& attributionSource,
377 const audio_attributes_t* pAttributes,
378 bool doNotReconnect,
379 float maxRequiredSpeed,
380 audio_port_handle_t selectedDeviceId)
381 {
382 LOG_ALWAYS_FATAL_IF(mInitialized, "%s: should not be called twice", __func__);
383 mInitialized = true;
384 status_t status;
385 uint32_t channelCount;
386 pid_t callingPid;
387 pid_t myPid;
388 auto uid = aidl2legacy_int32_t_uid_t(attributionSource.uid);
389 auto pid = aidl2legacy_int32_t_pid_t(attributionSource.pid);
390 if (!uid.ok()) {
391 return logIfErrorAndReturnStatus(
392 BAD_VALUE, StringPrintf("%s: received invalid attribution source uid", __func__));
393 }
394 if (!pid.ok()) {
395 return logIfErrorAndReturnStatus(
396 BAD_VALUE, StringPrintf("%s: received invalid attribution source pid", __func__));
397 }
398 // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
399 ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
400 "flags %#x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
401 __func__,
402 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
403 sessionId, transferType, attributionSource.uid, attributionSource.pid);
404
405 mThreadCanCallJava = threadCanCallJava;
406
407 // These variables are pulled in an error report, so we initialize them early.
408 mSelectedDeviceId = selectedDeviceId;
409 mSessionId = sessionId;
410 mChannelMask = channelMask;
411 mReqFrameCount = mFrameCount = frameCount;
412 mSampleRate = sampleRate;
413 mOriginalSampleRate = sampleRate;
414 mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
415 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
416
417 // update format and flags before storing them in mFormat, mOrigFlags and mFlags
418 if (pAttributes != NULL) {
419 // stream type shouldn't be looked at, this track has audio attributes
420 ALOGV("%s(): Building AudioTrack with attributes:"
421 " usage=%d content=%d flags=0x%x tags=[%s]",
422 __func__,
423 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
424 audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
425 }
426
427 // these below should probably come from the audioFlinger too...
428 if (format == AUDIO_FORMAT_DEFAULT) {
429 format = AUDIO_FORMAT_PCM_16_BIT;
430 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
431 flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
432 }
433
434 // force direct flag if format is not linear PCM
435 // or offload was requested
436 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
437 || !audio_is_linear_pcm(format)) {
438 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
439 ? "%s(): Offload request, forcing to Direct Output"
440 : "%s(): Not linear PCM, forcing to Direct Output",
441 __func__);
442 flags = (audio_output_flags_t)
443 // FIXME why can't we allow direct AND fast?
444 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
445 }
446
447 // force direct flag if HW A/V sync requested
448 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
449 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
450 }
451
452 mFormat = format;
453 mOrigFlags = mFlags = flags;
454
455 switch (transferType) {
456 case TRANSFER_DEFAULT:
457 if (sharedBuffer != 0) {
458 transferType = TRANSFER_SHARED;
459 } else if (callback == nullptr|| threadCanCallJava) {
460 transferType = TRANSFER_SYNC;
461 } else {
462 transferType = TRANSFER_CALLBACK;
463 }
464 break;
465 case TRANSFER_CALLBACK:
466 case TRANSFER_SYNC_NOTIF_CALLBACK:
467 if (callback == nullptr || sharedBuffer != 0) {
468 return logIfErrorAndReturnStatus(
469 BAD_VALUE,
470 StringPrintf(
471 "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
472 convertTransferToText(transferType), __func__));
473 }
474 break;
475 case TRANSFER_OBTAIN:
476 case TRANSFER_SYNC:
477 if (sharedBuffer != 0) {
478 return logIfErrorAndReturnStatus(
479 BAD_VALUE,
480 StringPrintf("%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0",
481 __func__));
482 }
483 break;
484 case TRANSFER_SHARED:
485 if (sharedBuffer == 0) {
486 return logIfErrorAndReturnStatus(
487 BAD_VALUE,
488 StringPrintf("%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0",
489 __func__));
490 }
491 break;
492 default:
493 return logIfErrorAndReturnStatus(
494 BAD_VALUE, StringPrintf("%s: Invalid transfer type %d", __func__, transferType));
495 }
496 mSharedBuffer = sharedBuffer;
497 mTransfer = transferType;
498 mDoNotReconnect = doNotReconnect;
499
500 ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
501 __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
502
503 // invariant that mAudioTrack != 0 is true only after set() returns successfully
504 if (mAudioTrack != 0) {
505 return logIfErrorAndReturnStatus(INVALID_OPERATION,
506 StringPrintf("%s: Track already in use", __func__));
507 }
508
509 // handle default values first.
510 if (streamType == AUDIO_STREAM_DEFAULT) {
511 streamType = AUDIO_STREAM_MUSIC;
512 }
513 if (pAttributes == NULL) {
514 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
515 return logIfErrorAndReturnStatus(
516 BAD_VALUE, StringPrintf("%s: Invalid stream type %d", __func__, streamType));
517 }
518 mOriginalStreamType = streamType;
519 } else {
520 mOriginalStreamType = AUDIO_STREAM_DEFAULT;
521 }
522
523 // validate parameters
524 if (!audio_is_valid_format(format)) {
525 return logIfErrorAndReturnStatus(BAD_VALUE,
526 StringPrintf("%s: Invalid format %#x", __func__, format));
527 }
528
529 if (!audio_is_output_channel(channelMask)) {
530 return logIfErrorAndReturnStatus(
531 BAD_VALUE, StringPrintf("%s: Invalid channel mask %#x", __func__, channelMask));
532 }
533 channelCount = audio_channel_count_from_out_mask(channelMask);
534 mChannelCount = channelCount;
535
536 if (!(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
537 // createTrack will return an error if PCM format is not supported by server,
538 // so no need to check for specific PCM formats here
539 ALOGW_IF(!audio_has_proportional_frames(format), "%s(): no direct flag for format 0x%x",
540 __func__, format);
541 }
542 mFrameSize = audio_bytes_per_frame(channelCount, format);
543
544 // sampling rate must be specified for direct outputs
545 if (sampleRate == 0 && (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
546 return logIfErrorAndReturnStatus(
547 BAD_VALUE,
548 StringPrintf("%s: sample rate must be specified for direct outputs", __func__));
549 }
550 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
551 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
552
553 // Make copy of input parameter offloadInfo so that in the future:
554 // (a) createTrack_l doesn't need it as an input parameter
555 // (b) we can support re-creation of offloaded tracks
556 if (offloadInfo != NULL) {
557 mOffloadInfoCopy = *offloadInfo;
558 } else {
559 memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
560 mOffloadInfoCopy = AUDIO_INFO_INITIALIZER;
561 mOffloadInfoCopy.format = format;
562 mOffloadInfoCopy.sample_rate = sampleRate;
563 mOffloadInfoCopy.channel_mask = channelMask;
564 mOffloadInfoCopy.stream_type = streamType;
565 mOffloadInfoCopy.usage = mAttributes.usage;
566 mOffloadInfoCopy.bit_width = audio_bytes_per_sample(format) * 8;
567 }
568
569 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
570 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
571 mSendLevel = 0.0f;
572 // mFrameCount is initialized in createTrack_l
573 if (notificationFrames >= 0) {
574 mNotificationFramesReq = notificationFrames;
575 mNotificationsPerBufferReq = 0;
576 } else {
577 if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
578 return logIfErrorAndReturnStatus(
579 BAD_VALUE,
580 StringPrintf("%s: notificationFrames=%d not permitted for non-fast track",
581 __func__, notificationFrames));
582 }
583 if (frameCount > 0) {
584 return logIfErrorAndReturnStatus(
585 BAD_VALUE, StringPrintf("%s(): notificationFrames=%d not permitted "
586 "with non-zero frameCount=%zu",
587 __func__, notificationFrames, frameCount));
588 }
589 mNotificationFramesReq = 0;
590 const uint32_t minNotificationsPerBuffer = 1;
591 const uint32_t maxNotificationsPerBuffer = 8;
592 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
593 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
594 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
595 "%s(): notificationFrames=%d clamped to the range -%u to -%u",
596 __func__,
597 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
598 }
599 mNotificationFramesAct = 0;
600 // TODO b/182392553: refactor or remove
601 mClientAttributionSource = AttributionSourceState(attributionSource);
602 callingPid = IPCThreadState::self()->getCallingPid();
603 myPid = getpid();
604 if (uid.value() == -1 || (callingPid != myPid)) {
605 auto clientAttributionSourceUid =
606 legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid());
607 if (!clientAttributionSourceUid.ok()) {
608 return logIfErrorAndReturnStatus(
609 BAD_VALUE,
610 StringPrintf("%s: received invalid client attribution source uid", __func__));
611 }
612 mClientAttributionSource.uid = clientAttributionSourceUid.value();
613 }
614 if (pid.value() == (pid_t)-1 || (callingPid != myPid)) {
615 auto clientAttributionSourcePid = legacy2aidl_uid_t_int32_t(callingPid);
616 if (!clientAttributionSourcePid.ok()) {
617 return logIfErrorAndReturnStatus(
618 BAD_VALUE,
619 StringPrintf("%s: received invalid client attribution source pid", __func__));
620 }
621 mClientAttributionSource.pid = clientAttributionSourcePid.value();
622 }
623 mAuxEffectId = 0;
624 mCallback = callback;
625
626 if (callback != nullptr) {
627 mAudioTrackThread = sp<AudioTrackThread>::make(*this);
628 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
629 // thread begins in paused state, and will not reference us until start()
630 }
631
632 // create the IAudioTrack
633 {
634 AutoMutex lock(mLock);
635 status = createTrack_l();
636 }
637 if (status != NO_ERROR) {
638 if (mAudioTrackThread != 0) {
639 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
640 mAudioTrackThread->requestExitAndWait();
641 mAudioTrackThread.clear();
642 }
643 // We do not goto error to prevent double-logging errors.
644 mStatus = status;
645 return mStatus;
646 }
647
648 mLoopCount = 0;
649 mLoopStart = 0;
650 mLoopEnd = 0;
651 mLoopCountNotified = 0;
652 mMarkerPosition = 0;
653 mMarkerReached = false;
654 mNewPosition = 0;
655 mUpdatePeriod = 0;
656 mPosition = 0;
657 mReleased = 0;
658 mStartNs = 0;
659 mStartFromZeroUs = 0;
660 AudioSystem::acquireAudioSessionId(mSessionId, pid.value(), uid.value());
661 mSequence = 1;
662 mObservedSequence = mSequence;
663 mInUnderrun = false;
664 mPreviousTimestampValid = false;
665 mTimestampStartupGlitchReported = false;
666 mTimestampRetrogradePositionReported = false;
667 mTimestampRetrogradeTimeReported = false;
668 mTimestampStallReported = false;
669 mTimestampStaleTimeReported = false;
670 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
671 mStartTs.mPosition = 0;
672 mUnderrunCountOffset = 0;
673 mFramesWritten = 0;
674 mFramesWrittenServerOffset = 0;
675 mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
676 mVolumeHandler = new media::VolumeHandler();
677
678 return logIfErrorAndReturnStatus(status, "");
679 }
680
681 // -------------------------------------------------------------------------
682
start()683 status_t AudioTrack::start()
684 {
685 AutoMutex lock(mLock);
686
687 if (mState == STATE_ACTIVE) {
688 return INVALID_OPERATION;
689 }
690
691 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
692
693 // Defer logging here due to OpenSL ES repeated start calls.
694 // TODO(b/154868033) after fix, restore this logging back to the beginning of start().
695 const int64_t beginNs = systemTime();
696 status_t status = NO_ERROR; // logged: make sure to set this before returning.
697 mediametrics::Defer defer([&] {
698 mediametrics::LogItem(mMetricsId)
699 .set(AMEDIAMETRICS_PROP_CALLERNAME,
700 mCallerName.empty()
701 ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
702 : mCallerName.c_str())
703 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_START)
704 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
705 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
706 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
707 .record(); });
708
709
710 mInUnderrun = true;
711
712 State previousState = mState;
713 if (previousState == STATE_PAUSED_STOPPING) {
714 mState = STATE_STOPPING;
715 } else {
716 mState = STATE_ACTIVE;
717 }
718 (void) updateAndGetPosition_l();
719
720 // save start timestamp
721 if (isAfTrackOffloadedOrDirect_l()) {
722 if (getTimestamp_l(mStartTs) != OK) {
723 mStartTs.mPosition = 0;
724 }
725 } else {
726 if (getTimestamp_l(&mStartEts) != OK) {
727 mStartEts.clear();
728 }
729 }
730 mStartNs = systemTime(); // save this for timestamp adjustment after starting.
731 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
732 // reset current position as seen by client to 0
733 mPosition = 0;
734 mPreviousTimestampValid = false;
735 mTimestampStartupGlitchReported = false;
736 mTimestampRetrogradePositionReported = false;
737 mTimestampRetrogradeTimeReported = false;
738 mTimestampStallReported = false;
739 mTimestampStaleTimeReported = false;
740 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
741
742 if (!isAfTrackOffloadedOrDirect_l()
743 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
744 // Server side has consumed something, but is it finished consuming?
745 // It is possible since flush and stop are asynchronous that the server
746 // is still active at this point.
747 ALOGV("%s(%d): server read:%lld cumulative flushed:%lld client written:%lld",
748 __func__, mPortId,
749 (long long)(mFramesWrittenServerOffset
750 + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
751 (long long)mStartEts.mFlushed,
752 (long long)mFramesWritten);
753 // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
754 mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
755 }
756 mFramesWritten = 0;
757 mProxy->clearTimestamp(); // need new server push for valid timestamp
758 mMarkerReached = false;
759
760 // For offloaded tracks, we don't know if the hardware counters are really zero here,
761 // since the flush is asynchronous and stop may not fully drain.
762 // We save the time when the track is started to later verify whether
763 // the counters are realistic (i.e. start from zero after this time).
764 mStartFromZeroUs = mStartNs / 1000;
765
766 // force refresh of remaining frames by processAudioBuffer() as last
767 // write before stop could be partial.
768 mRefreshRemaining = true;
769
770 // for static track, clear the old flags when starting from stopped state
771 if (mSharedBuffer != 0) {
772 android_atomic_and(
773 ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
774 &mCblk->mFlags);
775 }
776 }
777 mNewPosition = mPosition + mUpdatePeriod;
778 int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
779
780 if (!(flags & CBLK_INVALID)) {
781 mAudioTrack->start(&status);
782 if (status == DEAD_OBJECT) {
783 flags |= CBLK_INVALID;
784 }
785 }
786 if (flags & CBLK_INVALID) {
787 status = restoreTrack_l("start");
788 }
789
790 // resume or pause the callback thread as needed.
791 sp<AudioTrackThread> t = mAudioTrackThread;
792 if (status == NO_ERROR) {
793 if (t != 0) {
794 if (previousState == STATE_STOPPING) {
795 mProxy->interrupt();
796 } else {
797 t->resume();
798 }
799 } else {
800 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
801 get_sched_policy(0, &mPreviousSchedulingGroup);
802 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
803 }
804
805 // Start our local VolumeHandler for restoration purposes.
806 mVolumeHandler->setStarted();
807 } else {
808 ALOGE("%s(%d): status %d", __func__, mPortId, status);
809 mState = previousState;
810 if (t != 0) {
811 if (previousState != STATE_STOPPING) {
812 t->pause();
813 }
814 } else {
815 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
816 set_sched_policy(0, mPreviousSchedulingGroup);
817 }
818 }
819
820 return status;
821 }
822
stop()823 void AudioTrack::stop()
824 {
825 const int64_t beginNs = systemTime();
826
827 AutoMutex lock(mLock);
828 if (mProxy == nullptr) return; // not successfully initialized.
829 mediametrics::Defer defer([&]() {
830 mediametrics::LogItem(mMetricsId)
831 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_STOP)
832 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
833 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
834 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
835 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getUnderrunCount_l())
836 .record();
837 });
838
839 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
840
841 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
842 return;
843 }
844
845 if (isOffloaded_l()) {
846 mState = STATE_STOPPING;
847 } else {
848 mState = STATE_STOPPED;
849 ALOGD_IF(mSharedBuffer == nullptr,
850 "%s(%d): called with %u frames delivered", __func__, mPortId, mReleased.value());
851 mReleased = 0;
852 }
853
854 mProxy->stop(); // notify server not to read beyond current client position until start().
855 mProxy->interrupt();
856 mAudioTrack->stop();
857
858 // Note: legacy handling - stop does not clear playback marker
859 // and periodic update counter, but flush does for streaming tracks.
860
861 if (mSharedBuffer != 0) {
862 // clear buffer position and loop count.
863 mStaticProxy->setBufferPositionAndLoop(0 /* position */,
864 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
865 }
866
867 sp<AudioTrackThread> t = mAudioTrackThread;
868 if (t != 0) {
869 if (!isOffloaded_l()) {
870 t->pause();
871 } else if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
872 // causes wake up of the playback thread, that will callback the client for
873 // EVENT_STREAM_END in processAudioBuffer()
874 t->wake();
875 }
876 } else {
877 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
878 set_sched_policy(0, mPreviousSchedulingGroup);
879 }
880 }
881
stopped() const882 bool AudioTrack::stopped() const
883 {
884 AutoMutex lock(mLock);
885 return mState != STATE_ACTIVE;
886 }
887
flush()888 void AudioTrack::flush()
889 {
890 const int64_t beginNs = systemTime();
891 AutoMutex lock(mLock);
892 mediametrics::Defer defer([&]() {
893 mediametrics::LogItem(mMetricsId)
894 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_FLUSH)
895 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
896 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
897 .record(); });
898
899 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
900
901 if (mSharedBuffer != 0) {
902 return;
903 }
904 if (mState == STATE_ACTIVE) {
905 return;
906 }
907 flush_l();
908 }
909
flush_l()910 void AudioTrack::flush_l()
911 {
912 ALOG_ASSERT(mState != STATE_ACTIVE);
913
914 // clear playback marker and periodic update counter
915 mMarkerPosition = 0;
916 mMarkerReached = false;
917 mUpdatePeriod = 0;
918 mRefreshRemaining = true;
919
920 mState = STATE_FLUSHED;
921 mReleased = 0;
922 if (isOffloaded_l()) {
923 mProxy->interrupt();
924 }
925 mProxy->flush();
926 mAudioTrack->flush();
927 }
928
pauseAndWait(const std::chrono::milliseconds & timeout)929 bool AudioTrack::pauseAndWait(const std::chrono::milliseconds& timeout)
930 {
931 using namespace std::chrono_literals;
932
933 // We use atomic access here for state variables - these are used as hints
934 // to ensure we have ramped down audio.
935 const int priorState = mProxy->getState();
936 const uint32_t priorPosition = mProxy->getPosition().unsignedValue();
937
938 pause();
939
940 // Only if we were previously active, do we wait to ramp down the audio.
941 if (priorState != CBLK_STATE_ACTIVE) return true;
942
943 AutoMutex lock(mLock);
944 // offload and direct tracks do not wait because pause volume ramp is handled by hardware.
945 if (isOffloadedOrDirect_l()) return true;
946
947 // Wait for the track state to be anything besides pausing.
948 // This ensures that the volume has ramped down.
949 constexpr auto SLEEP_INTERVAL_MS = 10ms;
950 constexpr auto POSITION_TIMEOUT_MS = 40ms; // don't wait longer than this for position change.
951 auto begin = std::chrono::steady_clock::now();
952 while (true) {
953 // Wait for state and position to change.
954 // After pause() the server state should be PAUSING, but that may immediately
955 // convert to PAUSED by prepareTracks before data is read into the mixer.
956 // Hence we check that the state is not PAUSING and that the server position
957 // has advanced to be a more reliable estimate that the volume ramp has completed.
958 const int state = mProxy->getState();
959 const uint32_t position = mProxy->getPosition().unsignedValue();
960
961 mLock.unlock(); // only local variables accessed until lock.
962 auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
963 std::chrono::steady_clock::now() - begin);
964 if (state != CBLK_STATE_PAUSING &&
965 (elapsed >= POSITION_TIMEOUT_MS || position != priorPosition)) {
966 ALOGV("%s: success state:%d, position:%u after %lld ms"
967 " (prior state:%d prior position:%u)",
968 __func__, state, position, elapsed.count(), priorState, priorPosition);
969 return true;
970 }
971 std::chrono::milliseconds remaining = timeout - elapsed;
972 if (remaining.count() <= 0) {
973 ALOGW("%s: timeout expired state:%d still pausing:%d after %lld ms",
974 __func__, state, CBLK_STATE_PAUSING, elapsed.count());
975 return false;
976 }
977 // It is conceivable that the track is restored while sleeping;
978 // as this logic is advisory, we allow that.
979 std::this_thread::sleep_for(std::min(remaining, SLEEP_INTERVAL_MS));
980 mLock.lock();
981 }
982 }
983
pause()984 void AudioTrack::pause()
985 {
986 const int64_t beginNs = systemTime();
987 AutoMutex lock(mLock);
988 mediametrics::Defer defer([&]() {
989 mediametrics::LogItem(mMetricsId)
990 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_PAUSE)
991 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
992 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
993 .record(); });
994
995 ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
996
997 if (mState == STATE_ACTIVE) {
998 mState = STATE_PAUSED;
999 } else if (mState == STATE_STOPPING) {
1000 mState = STATE_PAUSED_STOPPING;
1001 } else {
1002 return;
1003 }
1004 mProxy->interrupt();
1005 mAudioTrack->pause();
1006
1007 if (isOffloaded_l()) {
1008 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1009 // An offload output can be re-used between two audio tracks having
1010 // the same configuration. A timestamp query for a paused track
1011 // while the other is running would return an incorrect time.
1012 // To fix this, cache the playback position on a pause() and return
1013 // this time when requested until the track is resumed.
1014
1015 // OffloadThread sends HAL pause in its threadLoop. Time saved
1016 // here can be slightly off.
1017
1018 // TODO: check return code for getRenderPosition.
1019
1020 uint32_t halFrames;
1021 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
1022 ALOGV("%s(%d): for offload, cache current position %u",
1023 __func__, mPortId, mPausedPosition);
1024 }
1025 }
1026 }
1027
setVolume(float left,float right)1028 status_t AudioTrack::setVolume(float left, float right)
1029 {
1030 // This duplicates a test by AudioTrack JNI, but that is not the only caller
1031 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
1032 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
1033 return BAD_VALUE;
1034 }
1035
1036 mediametrics::LogItem(mMetricsId)
1037 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOLUME)
1038 .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)left)
1039 .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)right)
1040 .record();
1041
1042 AutoMutex lock(mLock);
1043 mVolume[AUDIO_INTERLEAVE_LEFT] = left;
1044 mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
1045
1046 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
1047
1048 if (isOffloaded_l()) {
1049 mAudioTrack->signal();
1050 }
1051 return NO_ERROR;
1052 }
1053
setVolume(float volume)1054 status_t AudioTrack::setVolume(float volume)
1055 {
1056 return setVolume(volume, volume);
1057 }
1058
setAuxEffectSendLevel(float level)1059 status_t AudioTrack::setAuxEffectSendLevel(float level)
1060 {
1061 // This duplicates a test by AudioTrack JNI, but that is not the only caller
1062 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
1063 return BAD_VALUE;
1064 }
1065
1066 AutoMutex lock(mLock);
1067 mSendLevel = level;
1068 mProxy->setSendLevel(level);
1069
1070 return NO_ERROR;
1071 }
1072
getAuxEffectSendLevel(float * level) const1073 void AudioTrack::getAuxEffectSendLevel(float* level) const
1074 {
1075 if (level != NULL) {
1076 *level = mSendLevel;
1077 }
1078 }
1079
setSampleRate(uint32_t rate)1080 status_t AudioTrack::setSampleRate(uint32_t rate)
1081 {
1082 AutoMutex lock(mLock);
1083 ALOGV("%s(%d): prior state:%s rate:%u", __func__, mPortId, stateToString(mState), rate);
1084
1085 if (rate == mSampleRate) {
1086 return NO_ERROR;
1087 }
1088 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
1089 || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
1090 return INVALID_OPERATION;
1091 }
1092 if (mOutput == AUDIO_IO_HANDLE_NONE) {
1093 return NO_INIT;
1094 }
1095 // NOTE: it is theoretically possible, but highly unlikely, that a device change
1096 // could mean a previously allowed sampling rate is no longer allowed.
1097 uint32_t afSamplingRate;
1098 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
1099 return NO_INIT;
1100 }
1101 // pitch is emulated by adjusting speed and sampleRate
1102 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
1103 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1104 return BAD_VALUE;
1105 }
1106 // TODO: Should we also check if the buffer size is compatible?
1107
1108 mSampleRate = rate;
1109 mProxy->setSampleRate(effectiveSampleRate);
1110
1111 mediametrics::LogItem(mMetricsId)
1112 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETSAMPLERATE)
1113 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE AMEDIAMETRICS_PROP_SAMPLERATE,
1114 static_cast<int32_t>(effectiveSampleRate))
1115 .set(AMEDIAMETRICS_PROP_SAMPLERATE, static_cast<int32_t>(rate))
1116 .record();
1117
1118 return NO_ERROR;
1119 }
1120
getSampleRate() const1121 uint32_t AudioTrack::getSampleRate() const
1122 {
1123 AutoMutex lock(mLock);
1124
1125 // sample rate can be updated during playback by the offloaded decoder so we need to
1126 // query the HAL and update if needed.
1127 // FIXME use Proxy return channel to update the rate from server and avoid polling here
1128 if (isOffloadedOrDirect_l()) {
1129 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1130 uint32_t sampleRate = 0;
1131 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
1132 if (status == NO_ERROR) {
1133 mSampleRate = sampleRate;
1134 }
1135 }
1136 }
1137 return mSampleRate;
1138 }
1139
getOriginalSampleRate() const1140 uint32_t AudioTrack::getOriginalSampleRate() const
1141 {
1142 return mOriginalSampleRate;
1143 }
1144
getHalSampleRate() const1145 uint32_t AudioTrack::getHalSampleRate() const
1146 {
1147 return mAfSampleRate;
1148 }
1149
getHalChannelCount() const1150 uint32_t AudioTrack::getHalChannelCount() const
1151 {
1152 return mAfChannelCount;
1153 }
1154
getHalFormat() const1155 audio_format_t AudioTrack::getHalFormat() const
1156 {
1157 return mAfFormat;
1158 }
1159
setDualMonoMode(audio_dual_mono_mode_t mode)1160 status_t AudioTrack::setDualMonoMode(audio_dual_mono_mode_t mode)
1161 {
1162 AutoMutex lock(mLock);
1163 return setDualMonoMode_l(mode);
1164 }
1165
setDualMonoMode_l(audio_dual_mono_mode_t mode)1166 status_t AudioTrack::setDualMonoMode_l(audio_dual_mono_mode_t mode)
1167 {
1168 const status_t status = statusTFromBinderStatus(
1169 mAudioTrack->setDualMonoMode(VALUE_OR_RETURN_STATUS(
1170 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode))));
1171 if (status == NO_ERROR) mDualMonoMode = mode;
1172 return status;
1173 }
1174
getDualMonoMode(audio_dual_mono_mode_t * mode) const1175 status_t AudioTrack::getDualMonoMode(audio_dual_mono_mode_t* mode) const
1176 {
1177 AutoMutex lock(mLock);
1178 media::audio::common::AudioDualMonoMode mediaMode;
1179 const status_t status = statusTFromBinderStatus(mAudioTrack->getDualMonoMode(&mediaMode));
1180 if (status == NO_ERROR) {
1181 *mode = VALUE_OR_RETURN_STATUS(
1182 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mediaMode));
1183 }
1184 return status;
1185 }
1186
setAudioDescriptionMixLevel(float leveldB)1187 status_t AudioTrack::setAudioDescriptionMixLevel(float leveldB)
1188 {
1189 AutoMutex lock(mLock);
1190 return setAudioDescriptionMixLevel_l(leveldB);
1191 }
1192
setAudioDescriptionMixLevel_l(float leveldB)1193 status_t AudioTrack::setAudioDescriptionMixLevel_l(float leveldB)
1194 {
1195 const status_t status = statusTFromBinderStatus(
1196 mAudioTrack->setAudioDescriptionMixLevel(leveldB));
1197 if (status == NO_ERROR) mAudioDescriptionMixLeveldB = leveldB;
1198 return status;
1199 }
1200
getAudioDescriptionMixLevel(float * leveldB) const1201 status_t AudioTrack::getAudioDescriptionMixLevel(float* leveldB) const
1202 {
1203 AutoMutex lock(mLock);
1204 return statusTFromBinderStatus(mAudioTrack->getAudioDescriptionMixLevel(leveldB));
1205 }
1206
setPlaybackRate(const AudioPlaybackRate & playbackRate)1207 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
1208 {
1209 AutoMutex lock(mLock);
1210 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
1211 return NO_ERROR;
1212 }
1213 if (isAfTrackOffloadedOrDirect_l()) {
1214 const status_t status = statusTFromBinderStatus(mAudioTrack->setPlaybackRateParameters(
1215 VALUE_OR_RETURN_STATUS(
1216 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(playbackRate))));
1217 if (status == NO_ERROR) {
1218 mPlaybackRate = playbackRate;
1219 } else if (status == INVALID_OPERATION
1220 && playbackRate.mSpeed == 1.0f && mPlaybackRate.mPitch == 1.0f) {
1221 mPlaybackRate = playbackRate;
1222 return NO_ERROR;
1223 }
1224 return status;
1225 }
1226 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1227 return INVALID_OPERATION;
1228 }
1229
1230 ALOGV("%s(%d): mSampleRate:%u mSpeed:%f mPitch:%f",
1231 __func__, mPortId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
1232 // pitch is emulated by adjusting speed and sampleRate
1233 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
1234 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
1235 const float effectivePitch = adjustPitch(playbackRate.mPitch);
1236 AudioPlaybackRate playbackRateTemp = playbackRate;
1237 playbackRateTemp.mSpeed = effectiveSpeed;
1238 playbackRateTemp.mPitch = effectivePitch;
1239
1240 ALOGV("%s(%d) (effective) mSampleRate:%u mSpeed:%f mPitch:%f",
1241 __func__, mPortId, effectiveRate, effectiveSpeed, effectivePitch);
1242
1243 if (!isAudioPlaybackRateValid(playbackRateTemp)) {
1244 ALOGW("%s(%d) (%f, %f) failed (effective rate out of bounds)",
1245 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1246 return BAD_VALUE;
1247 }
1248 // Check if the buffer size is compatible.
1249 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
1250 ALOGW("%s(%d) (%f, %f) failed (buffer size)",
1251 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1252 return BAD_VALUE;
1253 }
1254
1255 // Check resampler ratios are within bounds
1256 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1257 (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1258 ALOGW("%s(%d) (%f, %f) failed. Resample rate exceeds max accepted value",
1259 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1260 return BAD_VALUE;
1261 }
1262
1263 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1264 ALOGW("%s(%d) (%f, %f) failed. Resample rate below min accepted value",
1265 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1266 return BAD_VALUE;
1267 }
1268 mPlaybackRate = playbackRate;
1269 //set effective rates
1270 mProxy->setPlaybackRate(playbackRateTemp);
1271 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1272
1273 mediametrics::LogItem(mMetricsId)
1274 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYBACKPARAM)
1275 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
1276 .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
1277 .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
1278 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1279 AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveRate)
1280 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1281 AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)playbackRateTemp.mSpeed)
1282 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
1283 AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)playbackRateTemp.mPitch)
1284 .record();
1285
1286 return NO_ERROR;
1287 }
1288
getPlaybackRate()1289 const AudioPlaybackRate& AudioTrack::getPlaybackRate()
1290 {
1291 AutoMutex lock(mLock);
1292 if (isOffloadedOrDirect_l()) {
1293 media::audio::common::AudioPlaybackRate playbackRateTemp;
1294 const status_t status = statusTFromBinderStatus(
1295 mAudioTrack->getPlaybackRateParameters(&playbackRateTemp));
1296 if (status == NO_ERROR) { // update local version if changed.
1297 mPlaybackRate =
1298 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRateTemp).value();
1299 }
1300 }
1301 return mPlaybackRate;
1302 }
1303
getBufferSizeInFrames()1304 ssize_t AudioTrack::getBufferSizeInFrames()
1305 {
1306 AutoMutex lock(mLock);
1307 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1308 return NO_INIT;
1309 }
1310
1311 return (ssize_t) mProxy->getBufferSizeInFrames();
1312 }
1313
getBufferDurationInUs(int64_t * duration)1314 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1315 {
1316 if (duration == nullptr) {
1317 return BAD_VALUE;
1318 }
1319 AutoMutex lock(mLock);
1320 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1321 return NO_INIT;
1322 }
1323 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1324 if (bufferSizeInFrames < 0) {
1325 return (status_t)bufferSizeInFrames;
1326 }
1327 *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1328 / ((double)mSampleRate * mPlaybackRate.mSpeed));
1329 return NO_ERROR;
1330 }
1331
setBufferSizeInFrames(size_t bufferSizeInFrames)1332 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1333 {
1334 AutoMutex lock(mLock);
1335 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1336 return NO_INIT;
1337 }
1338
1339 ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
1340 ssize_t finalBufferSize = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1341 if (originalBufferSize != finalBufferSize) {
1342 android::mediametrics::LogItem(mMetricsId)
1343 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
1344 .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, (int32_t)mProxy->getBufferSizeInFrames())
1345 .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t)getUnderrunCount_l())
1346 .record();
1347 }
1348 return finalBufferSize;
1349 }
1350
getStartThresholdInFrames() const1351 ssize_t AudioTrack::getStartThresholdInFrames() const
1352 {
1353 AutoMutex lock(mLock);
1354 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1355 return NO_INIT;
1356 }
1357 return (ssize_t) mProxy->getStartThresholdInFrames();
1358 }
1359
setStartThresholdInFrames(size_t startThresholdInFrames)1360 ssize_t AudioTrack::setStartThresholdInFrames(size_t startThresholdInFrames)
1361 {
1362 if (startThresholdInFrames > INT32_MAX || startThresholdInFrames == 0) {
1363 // contractually we could simply return the current threshold in frames
1364 // to indicate the request was ignored, but we return an error here.
1365 return BAD_VALUE;
1366 }
1367 AutoMutex lock(mLock);
1368 // We do not permit calling setStartThresholdInFrames() between the AudioTrack
1369 // default ctor AudioTrack() and set(...) but rather fail such an attempt.
1370 // (To do so would require a cached mOrigStartThresholdInFrames and we may
1371 // not have proper validation for the actual set value).
1372 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1373 return NO_INIT;
1374 }
1375 const uint32_t original = mProxy->getStartThresholdInFrames();
1376 const uint32_t final = mProxy->setStartThresholdInFrames(startThresholdInFrames);
1377 if (original != final) {
1378 android::mediametrics::LogItem(mMetricsId)
1379 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETSTARTTHRESHOLD)
1380 .set(AMEDIAMETRICS_PROP_STARTTHRESHOLDFRAMES, (int32_t)final)
1381 .record();
1382 if (original > final) {
1383 // restart track if it was disabled by audioflinger due to previous underrun
1384 // and we reduced the number of frames for the threshold.
1385 restartIfDisabled();
1386 }
1387 }
1388 return final;
1389 }
1390
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)1391 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1392 {
1393 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1394 return INVALID_OPERATION;
1395 }
1396
1397 if (loopCount == 0) {
1398 ;
1399 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1400 loopEnd - loopStart >= MIN_LOOP) {
1401 ;
1402 } else {
1403 return BAD_VALUE;
1404 }
1405
1406 AutoMutex lock(mLock);
1407 // See setPosition() regarding setting parameters such as loop points or position while active
1408 if (mState == STATE_ACTIVE) {
1409 return INVALID_OPERATION;
1410 }
1411 setLoop_l(loopStart, loopEnd, loopCount);
1412 return NO_ERROR;
1413 }
1414
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1415 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1416 {
1417 // We do not update the periodic notification point.
1418 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1419 mLoopCount = loopCount;
1420 mLoopEnd = loopEnd;
1421 mLoopStart = loopStart;
1422 mLoopCountNotified = loopCount;
1423 mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1424
1425 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1426 }
1427
setMarkerPosition(uint32_t marker)1428 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1429 {
1430 AutoMutex lock(mLock);
1431 // The only purpose of setting marker position is to get a callback
1432 if (!mCallback.promote() || isOffloadedOrDirect_l()) {
1433 return INVALID_OPERATION;
1434 }
1435
1436 mMarkerPosition = marker;
1437 mMarkerReached = false;
1438
1439 sp<AudioTrackThread> t = mAudioTrackThread;
1440 if (t != 0) {
1441 t->wake();
1442 }
1443 return NO_ERROR;
1444 }
1445
getMarkerPosition(uint32_t * marker) const1446 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1447 {
1448 if (isOffloadedOrDirect()) {
1449 return INVALID_OPERATION;
1450 }
1451 if (marker == NULL) {
1452 return BAD_VALUE;
1453 }
1454
1455 AutoMutex lock(mLock);
1456 mMarkerPosition.getValue(marker);
1457
1458 return NO_ERROR;
1459 }
1460
setPositionUpdatePeriod(uint32_t updatePeriod)1461 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1462 {
1463 AutoMutex lock(mLock);
1464 // The only purpose of setting position update period is to get a callback
1465 if (!mCallback.promote() || isOffloadedOrDirect_l()) {
1466 return INVALID_OPERATION;
1467 }
1468
1469 mNewPosition = updateAndGetPosition_l() + updatePeriod;
1470 mUpdatePeriod = updatePeriod;
1471
1472 sp<AudioTrackThread> t = mAudioTrackThread;
1473 if (t != 0) {
1474 t->wake();
1475 }
1476 return NO_ERROR;
1477 }
1478
getPositionUpdatePeriod(uint32_t * updatePeriod) const1479 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1480 {
1481 if (isOffloadedOrDirect()) {
1482 return INVALID_OPERATION;
1483 }
1484 if (updatePeriod == NULL) {
1485 return BAD_VALUE;
1486 }
1487
1488 AutoMutex lock(mLock);
1489 *updatePeriod = mUpdatePeriod;
1490
1491 return NO_ERROR;
1492 }
1493
setPosition(uint32_t position)1494 status_t AudioTrack::setPosition(uint32_t position)
1495 {
1496 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1497 return INVALID_OPERATION;
1498 }
1499 if (position > mFrameCount) {
1500 return BAD_VALUE;
1501 }
1502
1503 AutoMutex lock(mLock);
1504 // Currently we require that the player is inactive before setting parameters such as position
1505 // or loop points. Otherwise, there could be a race condition: the application could read the
1506 // current position, compute a new position or loop parameters, and then set that position or
1507 // loop parameters but it would do the "wrong" thing since the position has continued to advance
1508 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app
1509 // to specify how it wants to handle such scenarios.
1510 if (mState == STATE_ACTIVE) {
1511 return INVALID_OPERATION;
1512 }
1513 // After setting the position, use full update period before notification.
1514 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1515 mStaticProxy->setBufferPosition(position);
1516
1517 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1518 return NO_ERROR;
1519 }
1520
getPosition(uint32_t * position)1521 status_t AudioTrack::getPosition(uint32_t *position)
1522 {
1523 if (position == NULL) {
1524 return BAD_VALUE;
1525 }
1526
1527 AutoMutex lock(mLock);
1528 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1529 if (mState == STATE_STOPPED || mState == STATE_FLUSHED) {
1530 *position = 0;
1531 return NO_ERROR;
1532 }
1533 // FIXME: offloaded and direct tracks call into the HAL for render positions
1534 // for compressed/synced data; however, we use proxy position for pure linear pcm data
1535 // as we do not know the capability of the HAL for pcm position support and standby.
1536 // There may be some latency differences between the HAL position and the proxy position.
1537 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1538 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1539 ALOGV("%s(%d): called in paused state, return cached position %u",
1540 __func__, mPortId, mPausedPosition);
1541 *position = mPausedPosition;
1542 return NO_ERROR;
1543 }
1544
1545 uint32_t dspFrames = 0;
1546 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1547 uint32_t halFrames; // actually unused
1548 // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1549 if (AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames) != NO_ERROR) {
1550 *position = 0;
1551 return NO_ERROR;
1552 }
1553 }
1554 *position = dspFrames;
1555 } else {
1556 if (mCblk->mFlags & CBLK_INVALID) {
1557 (void) restoreTrack_l("getPosition");
1558 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1559 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1560 }
1561 *position = updateAndGetPosition_l().value();
1562 }
1563
1564 return NO_ERROR;
1565 }
1566
getBufferPosition(uint32_t * position)1567 status_t AudioTrack::getBufferPosition(uint32_t *position)
1568 {
1569 if (mSharedBuffer == 0) {
1570 return INVALID_OPERATION;
1571 }
1572 if (position == NULL) {
1573 return BAD_VALUE;
1574 }
1575
1576 AutoMutex lock(mLock);
1577 *position = mStaticProxy->getBufferPosition();
1578 return NO_ERROR;
1579 }
1580
reload()1581 status_t AudioTrack::reload()
1582 {
1583 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1584 return INVALID_OPERATION;
1585 }
1586
1587 AutoMutex lock(mLock);
1588 // See setPosition() regarding setting parameters such as loop points or position while active
1589 if (mState == STATE_ACTIVE) {
1590 return INVALID_OPERATION;
1591 }
1592 mNewPosition = mUpdatePeriod;
1593 (void) updateAndGetPosition_l();
1594 mPosition = 0;
1595 mPreviousTimestampValid = false;
1596 #if 0
1597 // The documentation is not clear on the behavior of reload() and the restoration
1598 // of loop count. Historically we have not restored loop count, start, end,
1599 // but it makes sense if one desires to repeat playing a particular sound.
1600 if (mLoopCount != 0) {
1601 mLoopCountNotified = mLoopCount;
1602 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1603 }
1604 #endif
1605 mStaticProxy->setBufferPosition(0);
1606 return NO_ERROR;
1607 }
1608
getOutput() const1609 audio_io_handle_t AudioTrack::getOutput() const
1610 {
1611 AutoMutex lock(mLock);
1612 return mOutput;
1613 }
1614
setOutputDevice(audio_port_handle_t deviceId)1615 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1616 status_t result = NO_ERROR;
1617 AutoMutex lock(mLock);
1618 ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
1619 __func__, mPortId, deviceId, mSelectedDeviceId);
1620 const int64_t beginNs = systemTime();
1621 mediametrics::Defer defer([&] {
1622 mediametrics::LogItem(mMetricsId)
1623 .set(AMEDIAMETRICS_PROP_CALLERNAME,
1624 mCallerName.empty()
1625 ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
1626 : mCallerName.c_str())
1627 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPREFERREDDEVICE)
1628 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
1629 .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)deviceId)
1630 .record(); });
1631
1632 if (mSelectedDeviceId != deviceId) {
1633 mSelectedDeviceId = deviceId;
1634 if (mStatus == NO_ERROR) {
1635 if (isOffloadedOrDirect_l()) {
1636 if (isPlaying_l()) {
1637 ALOGW("%s(%d). Offloaded or Direct track is not STOPPED or FLUSHED. "
1638 "State: %s.",
1639 __func__, mPortId, stateToString(mState));
1640 result = INVALID_OPERATION;
1641 } else {
1642 ALOGD("%s(%d): creating a new AudioTrack", __func__, mPortId);
1643 result = restoreTrack_l("setOutputDevice", true /* forceRestore */);
1644 }
1645 } else {
1646 // allow track invalidation when track is not playing to propagate
1647 // the updated mSelectedDeviceId
1648 if (isPlaying_l()) {
1649 if (getFirstDeviceId(mRoutedDeviceIds) != mSelectedDeviceId) {
1650 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1651 mProxy->interrupt();
1652 }
1653 } else {
1654 // if the track is idle, try to restore now and
1655 // defer to next start if not possible
1656 if (restoreTrack_l("setOutputDevice") != OK) {
1657 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1658 }
1659 }
1660 }
1661 }
1662 }
1663 return result;
1664 }
1665
getOutputDevice()1666 audio_port_handle_t AudioTrack::getOutputDevice() {
1667 AutoMutex lock(mLock);
1668 return mSelectedDeviceId;
1669 }
1670
1671 // must be called with mLock held
updateRoutedDeviceIds_l()1672 void AudioTrack::updateRoutedDeviceIds_l()
1673 {
1674 // if the track is inactive, do not update actual device as the output stream maybe routed
1675 // to a device not relevant to this client because of other active use cases.
1676 if (mState != STATE_ACTIVE) {
1677 return;
1678 }
1679 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1680 DeviceIdVector deviceIds;
1681 status_t result = AudioSystem::getDeviceIdsForIo(mOutput, deviceIds);
1682 if (result != OK) {
1683 ALOGW("%s: getDeviceIdsForIo returned: %d", __func__, result);
1684 }
1685 if (!deviceIds.empty()) {
1686 mRoutedDeviceIds = deviceIds;
1687 }
1688 }
1689 }
1690
getRoutedDeviceIds()1691 DeviceIdVector AudioTrack::getRoutedDeviceIds() {
1692 AutoMutex lock(mLock);
1693 updateRoutedDeviceIds_l();
1694 return mRoutedDeviceIds;
1695 }
1696
attachAuxEffect(int effectId)1697 status_t AudioTrack::attachAuxEffect(int effectId)
1698 {
1699 AutoMutex lock(mLock);
1700 status_t status;
1701 mAudioTrack->attachAuxEffect(effectId, &status);
1702 if (status == NO_ERROR) {
1703 mAuxEffectId = effectId;
1704 }
1705 return status;
1706 }
1707
streamType() const1708 audio_stream_type_t AudioTrack::streamType() const
1709 {
1710 return mStreamType;
1711 }
1712
latency()1713 uint32_t AudioTrack::latency()
1714 {
1715 AutoMutex lock(mLock);
1716 updateLatency_l();
1717 return mLatency;
1718 }
1719
1720 // -------------------------------------------------------------------------
1721
1722 // must be called with mLock held
updateLatency_l()1723 void AudioTrack::updateLatency_l()
1724 {
1725 status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1726 if (status != NO_ERROR) {
1727 ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mPortId, mOutput, status);
1728 } else {
1729 // FIXME don't believe this lie
1730 mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1731 }
1732 }
1733
1734 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1735 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1736 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1737 switch (transferType) {
1738 MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1739 MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1740 MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1741 MEDIA_CASE_ENUM(TRANSFER_SYNC);
1742 MEDIA_CASE_ENUM(TRANSFER_SHARED);
1743 MEDIA_CASE_ENUM(TRANSFER_SYNC_NOTIF_CALLBACK);
1744 default:
1745 return "UNRECOGNIZED";
1746 }
1747 }
1748
createTrack_l()1749 status_t AudioTrack::createTrack_l()
1750 {
1751 status_t status;
1752
1753 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1754 if (audioFlinger == 0) {
1755 return logIfErrorAndReturnStatus(
1756 DEAD_OBJECT, StringPrintf("%s(%d): Could not get audioflinger", __func__, mPortId));
1757 }
1758
1759 {
1760 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1761 // After fast request is denied, we will request again if IAudioTrack is re-created.
1762 // Client can only express a preference for FAST. Server will perform additional tests.
1763 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1764 // either of these use cases:
1765 // use case 1: shared buffer
1766 bool sharedBuffer = mSharedBuffer != 0;
1767 bool transferAllowed =
1768 // use case 2: callback transfer mode
1769 (mTransfer == TRANSFER_CALLBACK) ||
1770 // use case 3: obtain/release mode
1771 (mTransfer == TRANSFER_OBTAIN) ||
1772 // use case 4: synchronous write
1773 ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
1774 && mThreadCanCallJava);
1775
1776 bool fastAllowed = sharedBuffer || transferAllowed;
1777 if (!fastAllowed) {
1778 ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
1779 " not shared buffer and transfer = %s",
1780 __func__, mPortId,
1781 convertTransferToText(mTransfer));
1782 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1783 }
1784 }
1785
1786 IAudioFlinger::CreateTrackInput input;
1787 if (mOriginalStreamType != AUDIO_STREAM_DEFAULT) {
1788 // Legacy: This is based on original parameters even if the track is recreated.
1789 input.attr = AudioSystem::streamTypeToAttributes(mOriginalStreamType);
1790 } else {
1791 input.attr = mAttributes;
1792 }
1793 input.config = AUDIO_CONFIG_INITIALIZER;
1794 input.config.sample_rate = mSampleRate;
1795 input.config.channel_mask = mChannelMask;
1796 input.config.format = mFormat;
1797 input.config.offload_info = mOffloadInfoCopy;
1798 input.clientInfo.attributionSource = mClientAttributionSource;
1799 input.clientInfo.clientTid = -1;
1800 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1801 // It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
1802 // application-level code follows all non-blocking design rules, the language runtime
1803 // doesn't also follow those rules, so the thread will not benefit overall.
1804 if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1805 input.clientInfo.clientTid = mAudioTrackThread->getTid();
1806 }
1807 }
1808 input.sharedBuffer = mSharedBuffer;
1809 input.notificationsPerBuffer = mNotificationsPerBufferReq;
1810 input.speed = 1.0;
1811 if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1812 (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1813 input.speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1814 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1815 }
1816 input.flags = mFlags;
1817 input.frameCount = mReqFrameCount;
1818 input.notificationFrameCount = mNotificationFramesReq;
1819 input.selectedDeviceId = mSelectedDeviceId;
1820 input.sessionId = mSessionId;
1821 input.audioTrackCallback = mAudioTrackCallback;
1822
1823 media::CreateTrackResponse response;
1824 auto aidlInput = input.toAidl();
1825 if (!aidlInput.ok()) {
1826 return logIfErrorAndReturnStatus(
1827 BAD_VALUE, StringPrintf("%s(%d): Could not create track due to invalid input",
1828 __func__, mPortId));
1829 }
1830 status = audioFlinger->createTrack(aidlInput.value(), response);
1831
1832 IAudioFlinger::CreateTrackOutput output{};
1833 if (status == NO_ERROR) {
1834 auto trackOutput = IAudioFlinger::CreateTrackOutput::fromAidl(response);
1835 if (!trackOutput.ok()) {
1836 return logIfErrorAndReturnStatus(
1837 BAD_VALUE,
1838 StringPrintf("%s(%d): Could not create track output due to invalid response",
1839 __func__, mPortId));
1840 }
1841 output = trackOutput.value();
1842 }
1843
1844 if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1845 return logIfErrorAndReturnStatus(
1846 status == NO_ERROR ? INVALID_OPERATION : status, // device not ready
1847 StringPrintf("%s(%d): AudioFlinger could not create track, status: %d output %d",
1848 __func__, mPortId, status, output.outputId));
1849 }
1850 ALOG_ASSERT(output.audioTrack != 0);
1851
1852 mFrameCount = output.frameCount;
1853 mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1854 mRoutedDeviceIds = output.selectedDeviceIds;
1855 mSessionId = output.sessionId;
1856 mStreamType = output.streamType;
1857
1858 mSampleRate = output.sampleRate;
1859 if (mOriginalSampleRate == 0) {
1860 mOriginalSampleRate = mSampleRate;
1861 }
1862
1863 mAfFrameCount = output.afFrameCount;
1864 mAfSampleRate = output.afSampleRate;
1865 mAfChannelCount = audio_channel_count_from_out_mask(output.afChannelMask);
1866 mAfFormat = output.afFormat;
1867 mAfLatency = output.afLatencyMs;
1868 mAfTrackFlags = output.afTrackFlags;
1869
1870 mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1871
1872 // AudioFlinger now owns the reference to the I/O handle,
1873 // so we are no longer responsible for releasing it.
1874
1875 // FIXME compare to AudioRecord
1876 std::optional<media::SharedFileRegion> sfr;
1877 output.audioTrack->getCblk(&sfr);
1878 auto iMemory = aidl2legacy_NullableSharedFileRegion_IMemory(sfr);
1879 if (!iMemory.ok() || iMemory.value() == 0) {
1880 return logIfErrorAndReturnStatus(
1881 FAILED_TRANSACTION,
1882 StringPrintf("%s(%d): Could not get control block", __func__, mPortId));
1883 }
1884 sp<IMemory> iMem = iMemory.value();
1885 // TODO: Using unsecurePointer() has some associated security pitfalls
1886 // (see declaration for details).
1887 // Either document why it is safe in this case or address the
1888 // issue (e.g. by copying).
1889 void *iMemPointer = iMem->unsecurePointer();
1890 if (iMemPointer == NULL) {
1891 return logIfErrorAndReturnStatus(
1892 FAILED_TRANSACTION,
1893 StringPrintf("%s(%d): Could not get control block pointer", __func__, mPortId));
1894 }
1895 // invariant that mAudioTrack != 0 is true only after set() returns successfully
1896 if (mAudioTrack != 0) {
1897 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1898 mDeathNotifier.clear();
1899 }
1900 mAudioTrack = output.audioTrack;
1901 mCblkMemory = iMem;
1902 IPCThreadState::self()->flushCommands();
1903
1904 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1905 mCblk = cblk;
1906
1907 mAwaitBoost = false;
1908 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1909 if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1910 ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1911 __func__, mPortId, mReqFrameCount, mFrameCount);
1912 if (!mThreadCanCallJava) {
1913 mAwaitBoost = true;
1914 }
1915 } else {
1916 ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
1917 __func__, mPortId, mReqFrameCount, mFrameCount);
1918 }
1919 }
1920 mFlags = output.flags;
1921
1922 //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1923 if (mDeviceCallback != 0) {
1924 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1925 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1926 }
1927 AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
1928 }
1929
1930 mPortId = output.portId;
1931 // notify the upper layers about the new portId
1932 triggerPortIdUpdate_l();
1933
1934 // We retain a copy of the I/O handle, but don't own the reference
1935 mOutput = output.outputId;
1936 mRefreshRemaining = true;
1937
1938 // Starting address of buffers in shared memory. If there is a shared buffer, buffers
1939 // is the value of pointer() for the shared buffer, otherwise buffers points
1940 // immediately after the control block. This address is for the mapping within client
1941 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
1942 void* buffers;
1943 if (mSharedBuffer == 0) {
1944 buffers = cblk + 1;
1945 } else {
1946 // TODO: Using unsecurePointer() has some associated security pitfalls
1947 // (see declaration for details).
1948 // Either document why it is safe in this case or address the
1949 // issue (e.g. by copying).
1950 buffers = mSharedBuffer->unsecurePointer();
1951 if (buffers == NULL) {
1952 return logIfErrorAndReturnStatus(
1953 FAILED_TRANSACTION,
1954 StringPrintf("%s(%d): Could not get buffer pointer", __func__, mPortId));
1955 }
1956 }
1957
1958 mAudioTrack->attachAuxEffect(mAuxEffectId, &status);
1959
1960 // If IAudioTrack is re-created, don't let the requested frameCount
1961 // decrease. This can confuse clients that cache frameCount().
1962 if (mFrameCount > mReqFrameCount) {
1963 mReqFrameCount = mFrameCount;
1964 }
1965
1966 // reset server position to 0 as we have new cblk.
1967 mServer = 0;
1968
1969 // update proxy
1970 if (mSharedBuffer == 0) {
1971 mStaticProxy.clear();
1972 mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1973 } else {
1974 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1975 mProxy = mStaticProxy;
1976 }
1977
1978 mProxy->setVolumeLR(gain_minifloat_pack(
1979 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1980 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1981
1982 mProxy->setSendLevel(mSendLevel);
1983 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1984 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1985 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1986 mProxy->setSampleRate(effectiveSampleRate);
1987
1988 AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1989 playbackRateTemp.mSpeed = effectiveSpeed;
1990 playbackRateTemp.mPitch = effectivePitch;
1991 mProxy->setPlaybackRate(playbackRateTemp);
1992 mProxy->setMinimum(mNotificationFramesAct);
1993
1994 if (mDualMonoMode != AUDIO_DUAL_MONO_MODE_OFF) {
1995 setDualMonoMode_l(mDualMonoMode);
1996 }
1997 if (mAudioDescriptionMixLeveldB != -std::numeric_limits<float>::infinity()) {
1998 setAudioDescriptionMixLevel_l(mAudioDescriptionMixLeveldB);
1999 }
2000
2001 mDeathNotifier = new DeathNotifier(this);
2002 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
2003
2004 // This is the first log sent from the AudioTrack client.
2005 // The creation of the audio track by AudioFlinger (in the code above)
2006 // is the first log of the AudioTrack and must be present before
2007 // any AudioTrack client logs will be accepted.
2008
2009 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
2010 mediametrics::LogItem(mMetricsId)
2011 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
2012 // the following are immutable
2013 .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
2014 .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
2015 .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
2016 .set(AMEDIAMETRICS_PROP_LOGSESSIONID, mLogSessionId)
2017 .set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerIId)
2018 .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
2019 .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
2020 .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
2021 .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.outputId)
2022 .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
2023 .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)(getFirstDeviceId(mRoutedDeviceIds)))
2024 .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEIDS, toString(mRoutedDeviceIds).c_str())
2025 .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
2026 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
2027 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
2028 // the following are NOT immutable
2029 .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
2030 .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
2031 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
2032 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)NO_ERROR)
2033 .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
2034 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
2035 .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
2036 .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
2037 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
2038 AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveSampleRate)
2039 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
2040 AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)effectiveSpeed)
2041 .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
2042 AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)effectivePitch)
2043 .record();
2044
2045 // mSendLevel
2046 // mReqFrameCount?
2047 // mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq
2048 // mLatency, mAfLatency, mAfFrameCount, mAfSampleRate
2049
2050 }
2051
2052 // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
2053 return logIfErrorAndReturnStatus(status, "");
2054 }
2055
reportError(status_t status,const char * event,const char * message) const2056 void AudioTrack::reportError(status_t status, const char *event, const char *message) const
2057 {
2058 if (status == NO_ERROR) return;
2059 // We report error on the native side because some callers do not come
2060 // from Java.
2061 // Ensure these variables are initialized in set().
2062 mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR)
2063 .set(AMEDIAMETRICS_PROP_EVENT, event)
2064 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
2065 .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
2066 .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
2067 .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
2068 .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
2069 .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
2070 .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
2071 .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
2072 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
2073 // the following are NOT immutable
2074 // frame count is initially the requested frame count, but may be adjusted
2075 // by AudioFlinger after creation.
2076 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
2077 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
2078 .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
2079 .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
2080 .record();
2081 }
2082
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)2083 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
2084 {
2085 if (audioBuffer == NULL) {
2086 if (nonContig != NULL) {
2087 *nonContig = 0;
2088 }
2089 return BAD_VALUE;
2090 }
2091 if (mTransfer != TRANSFER_OBTAIN) {
2092 audioBuffer->frameCount = 0;
2093 audioBuffer->mSize = 0;
2094 audioBuffer->raw = NULL;
2095 if (nonContig != NULL) {
2096 *nonContig = 0;
2097 }
2098 return INVALID_OPERATION;
2099 }
2100
2101 const struct timespec *requested;
2102 struct timespec timeout;
2103 if (waitCount == -1) {
2104 requested = &ClientProxy::kForever;
2105 } else if (waitCount == 0) {
2106 requested = &ClientProxy::kNonBlocking;
2107 } else if (waitCount > 0) {
2108 time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
2109 timeout.tv_sec = ms / 1000;
2110 timeout.tv_nsec = (ms % 1000) * 1000000;
2111 requested = &timeout;
2112 } else {
2113 ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
2114 requested = NULL;
2115 }
2116 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
2117 }
2118
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)2119 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
2120 struct timespec *elapsed, size_t *nonContig)
2121 {
2122 // previous and new IAudioTrack sequence numbers are used to detect track re-creation
2123 uint32_t oldSequence = 0;
2124
2125 Proxy::Buffer buffer;
2126 status_t status = NO_ERROR;
2127
2128 static const int32_t kMaxTries = 5;
2129 int32_t tryCounter = kMaxTries;
2130
2131 do {
2132 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
2133 // keep them from going away if another thread re-creates the track during obtainBuffer()
2134 sp<AudioTrackClientProxy> proxy;
2135
2136 { // start of lock scope
2137 AutoMutex lock(mLock);
2138
2139 // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
2140 if (status == DEAD_OBJECT) {
2141 // re-create track, unless someone else has already done so
2142 if (mSequence == oldSequence) {
2143 status = restoreTrack_l("obtainBuffer");
2144 if (status != NO_ERROR) {
2145 buffer.mFrameCount = 0;
2146 buffer.mRaw = NULL;
2147 buffer.mNonContig = 0;
2148 break;
2149 }
2150 }
2151 }
2152 oldSequence = mSequence;
2153
2154 if (status == NOT_ENOUGH_DATA) {
2155 restartIfDisabled();
2156 }
2157
2158 // Keep the extra references
2159 mProxyObtainBufferRef = mProxy;
2160 proxy = mProxy;
2161 mCblkMemoryObtainBufferRef = mCblkMemory;
2162
2163 if (mState == STATE_STOPPING) {
2164 status = -EINTR;
2165 buffer.mFrameCount = 0;
2166 buffer.mRaw = NULL;
2167 buffer.mNonContig = 0;
2168 break;
2169 }
2170
2171 // Non-blocking if track is stopped or paused
2172 if (mState != STATE_ACTIVE) {
2173 requested = &ClientProxy::kNonBlocking;
2174 }
2175
2176 } // end of lock scope
2177
2178 buffer.mFrameCount = audioBuffer->frameCount;
2179 // FIXME starts the requested timeout and elapsed over from scratch
2180 status = proxy->obtainBuffer(&buffer, requested, elapsed);
2181 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
2182
2183 audioBuffer->frameCount = buffer.mFrameCount;
2184 audioBuffer->mSize = buffer.mFrameCount * mFrameSize;
2185 audioBuffer->raw = buffer.mRaw;
2186 audioBuffer->sequence = oldSequence;
2187 if (nonContig != NULL) {
2188 *nonContig = buffer.mNonContig;
2189 }
2190 return status;
2191 }
2192
releaseBuffer(const Buffer * audioBuffer)2193 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
2194 {
2195 // FIXME add error checking on mode, by adding an internal version
2196 if (mTransfer == TRANSFER_SHARED) {
2197 return;
2198 }
2199
2200 size_t stepCount = audioBuffer->mSize / mFrameSize;
2201 if (stepCount == 0) {
2202 return;
2203 }
2204
2205 Proxy::Buffer buffer;
2206 buffer.mFrameCount = stepCount;
2207 buffer.mRaw = audioBuffer->raw;
2208
2209 sp<IMemory> tempMemory;
2210 sp<AudioTrackClientProxy> tempProxy;
2211 AutoMutex lock(mLock);
2212 if (audioBuffer->sequence != mSequence) {
2213 // This Buffer came from a different IAudioTrack instance, so ignore the releaseBuffer
2214 ALOGD("%s is no-op due to IAudioTrack sequence mismatch %u != %u",
2215 __func__, audioBuffer->sequence, mSequence);
2216 return;
2217 }
2218 mReleased += stepCount;
2219 mInUnderrun = false;
2220 mProxyObtainBufferRef->releaseBuffer(&buffer);
2221 // The extra reference of shared memory and proxy from `obtainBuffer` is not used after
2222 // calling `releaseBuffer`. Move the extra reference to a temp strong pointer so that it
2223 // will be cleared outside `releaseBuffer`.
2224 tempMemory = std::move(mCblkMemoryObtainBufferRef);
2225 tempProxy = std::move(mProxyObtainBufferRef);
2226
2227 // restart track if it was disabled by audioflinger due to previous underrun
2228 restartIfDisabled();
2229 }
2230
restartIfDisabled()2231 void AudioTrack::restartIfDisabled()
2232 {
2233 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2234 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
2235 ALOGW("%s(%d): releaseBuffer() track %p disabled due to previous underrun, restarting",
2236 __func__, mPortId, this);
2237 // FIXME ignoring status
2238 status_t status;
2239 mAudioTrack->start(&status);
2240 }
2241 }
2242
2243 // -------------------------------------------------------------------------
2244
write(const void * buffer,size_t userSize,bool blocking)2245 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
2246 {
2247 if (mTransfer != TRANSFER_SYNC && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
2248 return INVALID_OPERATION;
2249 }
2250
2251 if (isDirect()) {
2252 AutoMutex lock(mLock);
2253 int32_t flags = android_atomic_and(
2254 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
2255 &mCblk->mFlags);
2256 if (flags & CBLK_INVALID) {
2257 return DEAD_OBJECT;
2258 }
2259 }
2260
2261 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
2262 // Validation: user is most-likely passing an error code, and it would
2263 // make the return value ambiguous (actualSize vs error).
2264 ALOGE("%s(%d): AudioTrack::write(buffer=%p, size=%zu (%zd)",
2265 __func__, mPortId, buffer, userSize, userSize);
2266 return BAD_VALUE;
2267 }
2268
2269 size_t written = 0;
2270 Buffer audioBuffer;
2271
2272 while (userSize >= mFrameSize) {
2273 audioBuffer.frameCount = userSize / mFrameSize;
2274
2275 status_t err = obtainBuffer(&audioBuffer,
2276 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
2277 if (err < 0) {
2278 if (written > 0) {
2279 break;
2280 }
2281 if (err == TIMED_OUT || err == -EINTR) {
2282 err = WOULD_BLOCK;
2283 }
2284 return ssize_t(err);
2285 }
2286
2287 size_t toWrite = audioBuffer.size();
2288 memcpy(audioBuffer.raw, buffer, toWrite);
2289 buffer = ((const char *) buffer) + toWrite;
2290 userSize -= toWrite;
2291 written += toWrite;
2292
2293 releaseBuffer(&audioBuffer);
2294 }
2295
2296 if (written > 0) {
2297 mFramesWritten += written / mFrameSize;
2298
2299 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2300 const sp<AudioTrackThread> t = mAudioTrackThread;
2301 if (t != 0) {
2302 // causes wake up of the playback thread, that will callback the client for
2303 // more data (with EVENT_CAN_WRITE_MORE_DATA) in processAudioBuffer()
2304 t->wake();
2305 }
2306 }
2307 }
2308
2309 return written;
2310 }
2311
2312 // -------------------------------------------------------------------------
2313
processAudioBuffer()2314 nsecs_t AudioTrack::processAudioBuffer()
2315 {
2316 // Currently the AudioTrack thread is not created if there are no callbacks.
2317 // Would it ever make sense to run the thread, even without callbacks?
2318 // If so, then replace this by checks at each use for mCallback != NULL.
2319 LOG_ALWAYS_FATAL_IF(mCblk == NULL);
2320 mLock.lock();
2321 sp<IAudioTrackCallback> callback = mCallback.promote();
2322 if (!callback) {
2323 mCallback = nullptr;
2324 mLock.unlock();
2325 return NS_NEVER;
2326 }
2327 if (mAwaitBoost) {
2328 mAwaitBoost = false;
2329 mLock.unlock();
2330 static const int32_t kMaxTries = 5;
2331 int32_t tryCounter = kMaxTries;
2332 uint32_t pollUs = 10000;
2333 do {
2334 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
2335 if (policy == SCHED_FIFO || policy == SCHED_RR) {
2336 break;
2337 }
2338 usleep(pollUs);
2339 pollUs <<= 1;
2340 } while (tryCounter-- > 0);
2341 if (tryCounter < 0) {
2342 ALOGE("%s(%d): did not receive expected priority boost on time",
2343 __func__, mPortId);
2344 }
2345 // Run again immediately
2346 return 0;
2347 }
2348
2349 // Can only reference mCblk while locked
2350 int32_t flags = android_atomic_and(
2351 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
2352
2353 const bool isOffloaded = isOffloaded_l();
2354 const bool isOffloadedOrDirect = isOffloadedOrDirect_l();
2355 // Check for track invalidation
2356 if (flags & CBLK_INVALID) {
2357 // for offloaded tracks restoreTrack_l() will just update the sequence and clear
2358 // AudioSystem cache. We should not exit here but after calling the callback so
2359 // that the upper layers can recreate the track
2360 if (!isOffloadedOrDirect || (mSequence == mObservedSequence)) {
2361 status_t status __unused = restoreTrack_l("processAudioBuffer");
2362 // FIXME unused status
2363 // after restoration, continue below to make sure that the loop and buffer events
2364 // are notified because they have been cleared from mCblk->mFlags above.
2365 }
2366 }
2367
2368 bool waitStreamEnd = mState == STATE_STOPPING;
2369 bool active = mState == STATE_ACTIVE;
2370
2371 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
2372 bool newUnderrun = false;
2373 if (flags & CBLK_UNDERRUN) {
2374 #if 0
2375 // Currently in shared buffer mode, when the server reaches the end of buffer,
2376 // the track stays active in continuous underrun state. It's up to the application
2377 // to pause or stop the track, or set the position to a new offset within buffer.
2378 // This was some experimental code to auto-pause on underrun. Keeping it here
2379 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
2380 if (mTransfer == TRANSFER_SHARED) {
2381 mState = STATE_PAUSED;
2382 active = false;
2383 }
2384 #endif
2385 if (!mInUnderrun) {
2386 mInUnderrun = true;
2387 newUnderrun = true;
2388 }
2389 }
2390
2391 // Get current position of server
2392 Modulo<uint32_t> position(updateAndGetPosition_l());
2393
2394 // Manage marker callback
2395 bool markerReached = false;
2396 Modulo<uint32_t> markerPosition(mMarkerPosition);
2397 // uses 32 bit wraparound for comparison with position.
2398 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
2399 mMarkerReached = markerReached = true;
2400 }
2401
2402 // Determine number of new position callback(s) that will be needed, while locked
2403 size_t newPosCount = 0;
2404 Modulo<uint32_t> newPosition(mNewPosition);
2405 uint32_t updatePeriod = mUpdatePeriod;
2406 // FIXME fails for wraparound, need 64 bits
2407 if (updatePeriod > 0 && position >= newPosition) {
2408 newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
2409 mNewPosition += updatePeriod * newPosCount;
2410 }
2411
2412 // Cache other fields that will be needed soon
2413 uint32_t sampleRate = mSampleRate;
2414 float speed = mPlaybackRate.mSpeed;
2415 const uint32_t notificationFrames = mNotificationFramesAct;
2416 if (mRefreshRemaining) {
2417 mRefreshRemaining = false;
2418 mRemainingFrames = notificationFrames;
2419 mRetryOnPartialBuffer = false;
2420 }
2421 size_t misalignment = mProxy->getMisalignment();
2422 uint32_t sequence = mSequence;
2423 sp<AudioTrackClientProxy> proxy = mProxy;
2424
2425 // Determine the number of new loop callback(s) that will be needed, while locked.
2426 uint32_t loopCountNotifications = 0;
2427 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
2428
2429 if (mLoopCount > 0) {
2430 int loopCount;
2431 size_t bufferPosition;
2432 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2433 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
2434 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
2435 mLoopCountNotified = loopCount; // discard any excess notifications
2436 } else if (mLoopCount < 0) {
2437 // FIXME: We're not accurate with notification count and position with infinite looping
2438 // since loopCount from server side will always return -1 (we could decrement it).
2439 size_t bufferPosition = mStaticProxy->getBufferPosition();
2440 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
2441 loopPeriod = mLoopEnd - bufferPosition;
2442 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
2443 size_t bufferPosition = mStaticProxy->getBufferPosition();
2444 loopPeriod = mFrameCount - bufferPosition;
2445 }
2446
2447 // These fields don't need to be cached, because they are assigned only by set():
2448 // mTransfer, mCallback, mUserData, mFormat, mFrameSize, mFlags
2449 // mFlags is also assigned by createTrack_l(), but not the bit we care about.
2450
2451 mLock.unlock();
2452
2453 // get anchor time to account for callbacks.
2454 const nsecs_t timeBeforeCallbacks = systemTime();
2455
2456 if (waitStreamEnd) {
2457 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2458 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2459 // (and make sure we don't callback for more data while we're stopping).
2460 // This helps with position, marker notifications, and track invalidation.
2461 struct timespec timeout;
2462 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2463 timeout.tv_nsec = 0;
2464
2465 // Use timestamp progress to safeguard we don't falsely time out.
2466 AudioTimestamp timestamp{};
2467 const bool isTimestampValid = getTimestamp(timestamp) == OK;
2468 const auto frameCount = isTimestampValid ? timestamp.mPosition : 0;
2469
2470 status_t status = proxy->waitStreamEndDone(&timeout);
2471 switch (status) {
2472 case TIMED_OUT:
2473 if (isTimestampValid
2474 && getTimestamp(timestamp) == OK && frameCount != timestamp.mPosition) {
2475 ALOGD("%s: waitStreamEndDone retrying", __func__);
2476 break; // we retry again (and recheck possible state change).
2477 }
2478 [[fallthrough]];
2479 case NO_ERROR:
2480 case DEAD_OBJECT:
2481 if (status != DEAD_OBJECT) {
2482 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2483 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2484 callback->onStreamEnd();
2485 }
2486 {
2487 AutoMutex lock(mLock);
2488 // The previously assigned value of waitStreamEnd is no longer valid,
2489 // since the mutex has been unlocked and either the callback handler
2490 // or another thread could have re-started the AudioTrack during that time.
2491 waitStreamEnd = mState == STATE_STOPPING;
2492 if (waitStreamEnd) {
2493 mState = STATE_STOPPED;
2494 mReleased = 0;
2495 }
2496 }
2497 if (waitStreamEnd && status != DEAD_OBJECT) {
2498 ALOGV("%s: waitStreamEndDone complete", __func__);
2499 return NS_INACTIVE;
2500 }
2501 break;
2502 }
2503 return 0;
2504 }
2505
2506 // perform callbacks while unlocked
2507 if (newUnderrun) {
2508 callback->onUnderrun();
2509 }
2510 while (loopCountNotifications > 0) {
2511 --loopCountNotifications;
2512 callback->onLoopEnd(mLoopCount > 0 ? loopCountNotifications + mLoopCountNotified : -1);
2513 }
2514 if (flags & CBLK_BUFFER_END) {
2515 callback->onBufferEnd();
2516 }
2517 if (markerReached) {
2518 callback->onMarker(markerPosition.value());
2519 }
2520 while (newPosCount > 0) {
2521 callback->onNewPos(newPosition.value());
2522 newPosition += updatePeriod;
2523 newPosCount--;
2524 }
2525
2526 if (mObservedSequence != sequence) {
2527 mObservedSequence = sequence;
2528 callback->onNewIAudioTrack();
2529 // for offloaded tracks, just wait for the upper layers to recreate the track
2530 if (isOffloadedOrDirect) {
2531 return NS_INACTIVE;
2532 }
2533 }
2534
2535 // if inactive, then don't run me again until re-started
2536 if (!active) {
2537 return NS_INACTIVE;
2538 }
2539
2540 // Compute the estimated time until the next timed event (position, markers, loops)
2541 // FIXME only for non-compressed audio
2542 uint32_t minFrames = ~0;
2543 if (!markerReached && position < markerPosition) {
2544 minFrames = (markerPosition - position).value();
2545 }
2546 if (loopPeriod > 0 && loopPeriod < minFrames) {
2547 // loopPeriod is already adjusted for actual position.
2548 minFrames = loopPeriod;
2549 }
2550 if (updatePeriod > 0) {
2551 minFrames = min(minFrames, (newPosition - position).value());
2552 }
2553
2554 // If > 0, poll periodically to recover from a stuck server. A good value is 2.
2555 static const uint32_t kPoll = 0;
2556 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2557 minFrames = kPoll * notificationFrames;
2558 }
2559
2560 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2561 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2562 const nsecs_t timeAfterCallbacks = systemTime();
2563
2564 // Convert frame units to time units
2565 nsecs_t ns = NS_WHENEVER;
2566 if (minFrames != (uint32_t) ~0) {
2567 // AudioFlinger consumption of client data may be irregular when coming out of device
2568 // standby since the kernel buffers require filling. This is throttled to no more than 2x
2569 // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2570 // half (but no more than half a second) to improve callback accuracy during these temporary
2571 // data surges.
2572 const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2573 constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2574 ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2575 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
2576 // TODO: Should we warn if the callback time is too long?
2577 if (ns < 0) ns = 0;
2578 }
2579
2580 // If not supplying data by EVENT_MORE_DATA or EVENT_CAN_WRITE_MORE_DATA, then we're done
2581 if (mTransfer != TRANSFER_CALLBACK && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
2582 return ns;
2583 }
2584
2585 // EVENT_MORE_DATA callback handling.
2586 // Timing for linear pcm audio data formats can be derived directly from the
2587 // buffer fill level.
2588 // Timing for compressed data is not directly available from the buffer fill level,
2589 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2590 // to return a certain fill level.
2591
2592 struct timespec timeout;
2593 const struct timespec *requested = &ClientProxy::kForever;
2594 if (ns != NS_WHENEVER) {
2595 timeout.tv_sec = ns / 1000000000LL;
2596 timeout.tv_nsec = ns % 1000000000LL;
2597 ALOGV("%s(%d): timeout %ld.%03d",
2598 __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2599 requested = &timeout;
2600 }
2601
2602 size_t writtenFrames = 0;
2603 while (mRemainingFrames > 0) {
2604
2605 Buffer audioBuffer;
2606 audioBuffer.frameCount = mRemainingFrames;
2607 size_t nonContig;
2608 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2609 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2610 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
2611 __func__, mPortId, err, audioBuffer.frameCount);
2612 requested = &ClientProxy::kNonBlocking;
2613 size_t avail = audioBuffer.frameCount + nonContig;
2614 ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2615 __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2616 if (err != NO_ERROR) {
2617 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2618 (isOffloaded && (err == DEAD_OBJECT))) {
2619 // FIXME bug 25195759
2620 return 1000000;
2621 }
2622 ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
2623 __func__, mPortId, err);
2624 return NS_NEVER;
2625 }
2626
2627 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2628 mRetryOnPartialBuffer = false;
2629 if (avail < mRemainingFrames) {
2630 if (ns > 0) { // account for obtain time
2631 const nsecs_t timeNow = systemTime();
2632 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2633 }
2634
2635 // delayNs is first computed by the additional frames required in the buffer.
2636 nsecs_t delayNs = framesToNanoseconds(
2637 mRemainingFrames - avail, sampleRate, speed);
2638
2639 // afNs is the AudioFlinger mixer period in ns.
2640 const nsecs_t afNs = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2641
2642 // If the AudioTrack is double buffered based on the AudioFlinger mixer period,
2643 // we may have a race if we wait based on the number of frames desired.
2644 // This is a possible issue with resampling and AAudio.
2645 //
2646 // The granularity of audioflinger processing is one mixer period; if
2647 // our wait time is less than one mixer period, wait at most half the period.
2648 if (delayNs < afNs) {
2649 delayNs = std::min(delayNs, afNs / 2);
2650 }
2651
2652 // adjust our ns wait by delayNs.
2653 if (ns < 0 /* NS_WHENEVER */ || delayNs < ns) {
2654 ns = delayNs;
2655 }
2656 return ns;
2657 }
2658 }
2659
2660 size_t reqSize = audioBuffer.size();
2661 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2662 // when notifying client it can write more data, pass the total size that can be
2663 // written in the next write() call, since it's not passed through the callback
2664 audioBuffer.mSize += nonContig;
2665 }
2666 const size_t writtenSize = (mTransfer == TRANSFER_CALLBACK)
2667 ? callback->onMoreData(audioBuffer)
2668 : callback->onCanWriteMoreData(audioBuffer);
2669 // Validate on returned size
2670 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2671 ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2672 __func__, mPortId, reqSize, ssize_t(writtenSize));
2673 return NS_NEVER;
2674 }
2675
2676 if (writtenSize == 0) {
2677 if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2678 // The callback EVENT_CAN_WRITE_MORE_DATA was processed in the JNI of
2679 // android.media.AudioTrack. The JNI is not using the callback to provide data,
2680 // it only signals to the Java client that it can provide more data, which
2681 // this track is read to accept now.
2682 // The playback thread will be awaken at the next ::write()
2683 return NS_WHENEVER;
2684 }
2685 // The callback is done filling buffers
2686 // Keep this thread going to handle timed events and
2687 // still try to get more data in intervals of WAIT_PERIOD_MS
2688 // but don't just loop and block the CPU, so wait
2689
2690 // mCbf(EVENT_MORE_DATA, ...) might either
2691 // (1) Block until it can fill the buffer, returning 0 size on EOS.
2692 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2693 // (3) Return 0 size when no data is available, does not wait for more data.
2694 //
2695 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2696 // We try to compute the wait time to avoid a tight sleep-wait cycle,
2697 // especially for case (3).
2698 //
2699 // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2700 // and this loop; whereas for case (3) we could simply check once with the full
2701 // buffer size and skip the loop entirely.
2702
2703 nsecs_t myns;
2704 if (!isOffloaded && audio_has_proportional_frames(mFormat)) {
2705 // time to wait based on buffer occupancy
2706 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2707 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2708 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2709 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2710 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2711 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2712 myns = datans + (afns / 2);
2713 } else {
2714 // FIXME: This could ping quite a bit if the buffer isn't full.
2715 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2716 myns = kWaitPeriodNs;
2717 }
2718 if (ns > 0) { // account for obtain and callback time
2719 const nsecs_t timeNow = systemTime();
2720 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2721 }
2722 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2723 ns = myns;
2724 }
2725 return ns;
2726 }
2727
2728 // releaseBuffer reads from audioBuffer.size
2729 audioBuffer.mSize = writtenSize;
2730
2731 size_t releasedFrames = writtenSize / mFrameSize;
2732 audioBuffer.frameCount = releasedFrames;
2733 mRemainingFrames -= releasedFrames;
2734 if (misalignment >= releasedFrames) {
2735 misalignment -= releasedFrames;
2736 } else {
2737 misalignment = 0;
2738 }
2739
2740 releaseBuffer(&audioBuffer);
2741 writtenFrames += releasedFrames;
2742
2743 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2744 // if callback doesn't like to accept the full chunk
2745 if (writtenSize < reqSize) {
2746 continue;
2747 }
2748
2749 // There could be enough non-contiguous frames available to satisfy the remaining request
2750 if (mRemainingFrames <= nonContig) {
2751 continue;
2752 }
2753
2754 #if 0
2755 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2756 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA
2757 // that total to a sum == notificationFrames.
2758 if (0 < misalignment && misalignment <= mRemainingFrames) {
2759 mRemainingFrames = misalignment;
2760 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2761 }
2762 #endif
2763
2764 }
2765 if (writtenFrames > 0) {
2766 AutoMutex lock(mLock);
2767 mFramesWritten += writtenFrames;
2768 }
2769 mRemainingFrames = notificationFrames;
2770 mRetryOnPartialBuffer = true;
2771
2772 // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2773 return 0;
2774 }
2775
restoreTrack_l(const char * from,bool forceRestore)2776 status_t AudioTrack::restoreTrack_l(const char *from, bool forceRestore)
2777 {
2778 status_t result = NO_ERROR; // logged: make sure to set this before returning.
2779 const int64_t beginNs = systemTime();
2780 mediametrics::Defer defer([&] {
2781 mediametrics::LogItem(mMetricsId)
2782 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_RESTORE)
2783 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
2784 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
2785 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
2786 .set(AMEDIAMETRICS_PROP_WHERE, from)
2787 .record(); });
2788
2789 ALOGW("%s(%d): dead IAudioTrack, %s, creating a new one from %s()",
2790 __func__, mPortId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2791 ++mSequence;
2792
2793 if (!forceRestore &&
2794 (isOffloadedOrDirect_l() || mDoNotReconnect)) {
2795 // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2796 // Disabled since (1) timestamp correction is not implemented for non-PCM and
2797 // (2) We pre-empt existing direct tracks on resource constraint, so these tracks
2798 // shouldn't reconnect.
2799 result = DEAD_OBJECT;
2800 return result;
2801 }
2802
2803 // Save so we can return count since creation.
2804 mUnderrunCountOffset = getUnderrunCount_l();
2805
2806 // save the old static buffer position
2807 uint32_t staticPosition = 0;
2808 size_t bufferPosition = 0;
2809 int loopCount = 0;
2810 if (mStaticProxy != 0) {
2811 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2812 staticPosition = mStaticProxy->getPosition().unsignedValue();
2813 }
2814
2815 // save the old startThreshold and framecount
2816 const uint32_t originalStartThresholdInFrames = mProxy->getStartThresholdInFrames();
2817 const uint32_t originalFrameCount = mProxy->frameCount();
2818
2819 // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
2820 // causes a lot of churn on the service side, and it can reject starting
2821 // playback of a previously created track. May also apply to other cases.
2822 const int INITIAL_RETRIES = 10;
2823 const uint32_t RETRY_DELAY_US = 150000;
2824 int retries = INITIAL_RETRIES;
2825 retry:
2826 mFlags = mOrigFlags;
2827
2828 // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2829 // following member variables: mAudioTrack, mCblkMemory and mCblk.
2830 // It will also delete the strong references on previous IAudioTrack and IMemory.
2831 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2832 result = createTrack_l();
2833
2834 if (result == NO_ERROR) {
2835 // take the frames that will be lost by track recreation into account in saved position
2836 // For streaming tracks, this is the amount we obtained from the user/client
2837 // (not the number actually consumed at the server - those are already lost).
2838 if (mStaticProxy == 0) {
2839 mPosition = mReleased;
2840 }
2841 // Continue playback from last known position and restore loop.
2842 if (mStaticProxy != 0) {
2843 if (loopCount != 0) {
2844 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2845 mLoopStart, mLoopEnd, loopCount);
2846 } else {
2847 mStaticProxy->setBufferPosition(bufferPosition);
2848 if (bufferPosition == mFrameCount) {
2849 ALOGD("%s(%d): restoring track at end of static buffer", __func__, mPortId);
2850 }
2851 }
2852 }
2853 // restore volume handler
2854 mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2855 sp<VolumeShaper::Operation> operationToEnd =
2856 new VolumeShaper::Operation(shaper.mOperation);
2857 // TODO: Ideally we would restore to the exact xOffset position
2858 // as returned by getVolumeShaperState(), but we don't have that
2859 // information when restoring at the client unless we periodically poll
2860 // the server or create shared memory state.
2861 //
2862 // For now, we simply advance to the end of the VolumeShaper effect
2863 // if it has been started.
2864 if (shaper.isStarted()) {
2865 operationToEnd->setNormalizedTime(1.f);
2866 }
2867 media::VolumeShaperConfiguration config;
2868 shaper.mConfiguration->writeToParcelable(&config);
2869 media::VolumeShaperOperation operation;
2870 operationToEnd->writeToParcelable(&operation);
2871 status_t status;
2872 mAudioTrack->applyVolumeShaper(config, operation, &status);
2873 return status;
2874 });
2875
2876 // restore the original start threshold if different than frameCount.
2877 if (originalStartThresholdInFrames != originalFrameCount) {
2878 // Note: mProxy->setStartThresholdInFrames() call is in the Proxy
2879 // and does not trigger a restart.
2880 // (Also CBLK_DISABLED is not set, buffers are empty after track recreation).
2881 // Any start would be triggered on the mState == ACTIVE check below.
2882 const uint32_t currentThreshold =
2883 mProxy->setStartThresholdInFrames(originalStartThresholdInFrames);
2884 ALOGD_IF(originalStartThresholdInFrames != currentThreshold,
2885 "%s(%d) startThresholdInFrames changing from %u to %u",
2886 __func__, mPortId, originalStartThresholdInFrames, currentThreshold);
2887 }
2888 if (mState == STATE_ACTIVE) {
2889 mAudioTrack->start(&result);
2890 }
2891 // server resets to zero so we offset
2892 mFramesWrittenServerOffset =
2893 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2894 mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2895 }
2896 if (result != NO_ERROR) {
2897 ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
2898 if (--retries > 0) {
2899 // leave time for an eventual race condition to clear before retrying
2900 usleep(RETRY_DELAY_US);
2901 goto retry;
2902 }
2903 // if no retries left, set invalid bit to force restoring at next occasion
2904 // and avoid inconsistent active state on client and server sides
2905 if (mCblk != nullptr) {
2906 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
2907 }
2908 }
2909 return result;
2910 }
2911
updateAndGetPosition_l()2912 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2913 {
2914 // This is the sole place to read server consumed frames
2915 Modulo<uint32_t> newServer(mProxy->getPosition());
2916 const int32_t delta = (newServer - mServer).signedValue();
2917 // TODO There is controversy about whether there can be "negative jitter" in server position.
2918 // This should be investigated further, and if possible, it should be addressed.
2919 // A more definite failure mode is infrequent polling by client.
2920 // One could call (void)getPosition_l() in releaseBuffer(),
2921 // so mReleased and mPosition are always lock-step as best possible.
2922 // That should ensure delta never goes negative for infrequent polling
2923 // unless the server has more than 2^31 frames in its buffer,
2924 // in which case the use of uint32_t for these counters has bigger issues.
2925 ALOGE_IF(delta < 0,
2926 "%s(%d): detected illegal retrograde motion by the server: mServer advanced by %d",
2927 __func__, mPortId, delta);
2928 mServer = newServer;
2929 if (delta > 0) { // avoid retrograde
2930 mPosition += delta;
2931 }
2932 return mPosition;
2933 }
2934
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2935 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2936 {
2937 updateLatency_l();
2938 // applicable for mixing tracks only (not offloaded or direct)
2939 if (mStaticProxy != 0) {
2940 return true; // static tracks do not have issues with buffer sizing.
2941 }
2942 const size_t minFrameCount =
2943 AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2944 sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2945 const bool allowed = mFrameCount >= minFrameCount;
2946 ALOGD_IF(!allowed,
2947 "%s(%d): denied "
2948 "mAfLatency:%u mAfFrameCount:%zu mAfSampleRate:%u sampleRate:%u speed:%f "
2949 "mFrameCount:%zu < minFrameCount:%zu",
2950 __func__, mPortId,
2951 mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2952 mFrameCount, minFrameCount);
2953 return allowed;
2954 }
2955
setParameters(const String8 & keyValuePairs)2956 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2957 {
2958 AutoMutex lock(mLock);
2959 status_t status;
2960 mAudioTrack->setParameters(keyValuePairs.c_str(), &status);
2961 return status;
2962 }
2963
selectPresentation(int presentationId,int programId)2964 status_t AudioTrack::selectPresentation(int presentationId, int programId)
2965 {
2966 AutoMutex lock(mLock);
2967 AudioParameter param = AudioParameter();
2968 param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
2969 param.addInt(String8(AudioParameter::keyProgramId), programId);
2970 ALOGV("%s(%d): PresentationId/ProgramId[%s]",
2971 __func__, mPortId, param.toString().c_str());
2972
2973 status_t status;
2974 mAudioTrack->setParameters(param.toString().c_str(), &status);
2975 return status;
2976 }
2977
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2978 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2979 const sp<VolumeShaper::Configuration>& configuration,
2980 const sp<VolumeShaper::Operation>& operation)
2981 {
2982 const int64_t beginNs = systemTime();
2983 AutoMutex lock(mLock);
2984 mVolumeHandler->setIdIfNecessary(configuration);
2985 media::VolumeShaperConfiguration config;
2986 configuration->writeToParcelable(&config);
2987 media::VolumeShaperOperation op;
2988 operation->writeToParcelable(&op);
2989 VolumeShaper::Status status;
2990
2991 mediametrics::Defer defer([&] {
2992 mediametrics::LogItem(mMetricsId)
2993 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_APPLYVOLUMESHAPER)
2994 .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
2995 .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
2996 .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
2997 .set(AMEDIAMETRICS_PROP_TOSTRING, configuration->toString()
2998 .append(" ")
2999 .append(operation->toString()))
3000 .record(); });
3001
3002 mAudioTrack->applyVolumeShaper(config, op, &status);
3003
3004 if (status == DEAD_OBJECT) {
3005 if (restoreTrack_l("applyVolumeShaper") == OK) {
3006 mAudioTrack->applyVolumeShaper(config, op, &status);
3007 }
3008 }
3009 if (status >= 0) {
3010 // save VolumeShaper for restore
3011 mVolumeHandler->applyVolumeShaper(configuration, operation);
3012 if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
3013 mVolumeHandler->setStarted();
3014 }
3015 } else {
3016 // warn only if not an expected restore failure.
3017 ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
3018 "%s(%d): applyVolumeShaper failed: %d", __func__, mPortId, status);
3019 }
3020 return status;
3021 }
3022
getVolumeShaperState(int id)3023 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
3024 {
3025 AutoMutex lock(mLock);
3026 std::optional<media::VolumeShaperState> vss;
3027 mAudioTrack->getVolumeShaperState(id, &vss);
3028 sp<VolumeShaper::State> state;
3029 if (vss.has_value()) {
3030 state = new VolumeShaper::State();
3031 state->readFromParcelable(vss.value());
3032 }
3033 if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
3034 if (restoreTrack_l("getVolumeShaperState") == OK) {
3035 mAudioTrack->getVolumeShaperState(id, &vss);
3036 if (vss.has_value()) {
3037 state = new VolumeShaper::State();
3038 state->readFromParcelable(vss.value());
3039 }
3040 }
3041 }
3042 return state;
3043 }
3044
getTimestamp(ExtendedTimestamp * timestamp)3045 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
3046 {
3047 if (timestamp == nullptr) {
3048 return BAD_VALUE;
3049 }
3050 AutoMutex lock(mLock);
3051 return getTimestamp_l(timestamp);
3052 }
3053
getTimestamp_l(ExtendedTimestamp * timestamp)3054 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
3055 {
3056 if (mCblk->mFlags & CBLK_INVALID) {
3057 const status_t status = restoreTrack_l("getTimestampExtended");
3058 if (status != OK) {
3059 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
3060 // recommending that the track be recreated.
3061 return DEAD_OBJECT;
3062 }
3063 }
3064 // check for offloaded/direct here in case restoring somehow changed those flags.
3065 if (isOffloadedOrDirect_l()) {
3066 return INVALID_OPERATION; // not supported
3067 }
3068 status_t status = mProxy->getTimestamp(timestamp);
3069 LOG_ALWAYS_FATAL_IF(status != OK, "%s(%d): status %d not allowed from proxy getTimestamp",
3070 __func__, mPortId, status);
3071 bool found = false;
3072 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
3073 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
3074 // server side frame offset in case AudioTrack has been restored.
3075 for (int i = ExtendedTimestamp::LOCATION_SERVER;
3076 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
3077 if (timestamp->mTimeNs[i] >= 0) {
3078 // apply server offset (frames flushed is ignored
3079 // so we don't report the jump when the flush occurs).
3080 timestamp->mPosition[i] += mFramesWrittenServerOffset;
3081 found = true;
3082 }
3083 }
3084 return found ? OK : WOULD_BLOCK;
3085 }
3086
getTimestamp(AudioTimestamp & timestamp)3087 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
3088 {
3089 AutoMutex lock(mLock);
3090 return getTimestamp_l(timestamp);
3091 }
3092
getTimestamp_l(AudioTimestamp & timestamp)3093 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
3094 {
3095 bool previousTimestampValid = mPreviousTimestampValid;
3096 // Set false here to cover all the error return cases.
3097 mPreviousTimestampValid = false;
3098
3099 switch (mState) {
3100 case STATE_ACTIVE:
3101 case STATE_PAUSED:
3102 break; // handle below
3103 case STATE_FLUSHED:
3104 case STATE_STOPPED:
3105 return WOULD_BLOCK;
3106 case STATE_STOPPING:
3107 case STATE_PAUSED_STOPPING:
3108 if (!isOffloaded_l()) {
3109 return INVALID_OPERATION;
3110 }
3111 break; // offloaded tracks handled below
3112 default:
3113 LOG_ALWAYS_FATAL("%s(%d): Invalid mState in getTimestamp(): %d",
3114 __func__, mPortId, mState);
3115 break;
3116 }
3117
3118 if (mCblk->mFlags & CBLK_INVALID) {
3119 const status_t status = restoreTrack_l("getTimestamp");
3120 if (status != OK) {
3121 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
3122 // recommending that the track be recreated.
3123 return DEAD_OBJECT;
3124 }
3125 }
3126
3127 // The presented frame count must always lag behind the consumed frame count.
3128 // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
3129
3130 status_t status;
3131 if (isAfTrackOffloadedOrDirect_l()) {
3132 // use Binder to get timestamp
3133 media::AudioTimestampInternal ts;
3134 mAudioTrack->getTimestamp(&ts, &status);
3135 if (status == OK) {
3136 auto legacyTs = aidl2legacy_AudioTimestampInternal_AudioTimestamp(ts);
3137 if (!legacyTs.ok()) {
3138 return logIfErrorAndReturnStatus(
3139 BAD_VALUE, StringPrintf("%s: received invalid audio timestamp", __func__));
3140 }
3141 timestamp = legacyTs.value();
3142 }
3143 } else {
3144 // read timestamp from shared memory
3145 ExtendedTimestamp ets;
3146 status = mProxy->getTimestamp(&ets);
3147 if (status == OK) {
3148 ExtendedTimestamp::Location location;
3149 status = ets.getBestTimestamp(×tamp, &location);
3150
3151 if (status == OK) {
3152 updateLatency_l();
3153 // It is possible that the best location has moved from the kernel to the server.
3154 // In this case we adjust the position from the previous computed latency.
3155 if (location == ExtendedTimestamp::LOCATION_SERVER) {
3156 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
3157 "%s(%d): location moved from kernel to server",
3158 __func__, mPortId);
3159 // check that the last kernel OK time info exists and the positions
3160 // are valid (if they predate the current track, the positions may
3161 // be zero or negative).
3162 const int64_t frames =
3163 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
3164 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
3165 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
3166 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
3167 ?
3168 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
3169 / 1000)
3170 :
3171 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
3172 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
3173 ALOGV("%s(%d): frame adjustment:%lld timestamp:%s",
3174 __func__, mPortId, (long long)frames, ets.toString().c_str());
3175 if (frames >= ets.mPosition[location]) {
3176 timestamp.mPosition = 0;
3177 } else {
3178 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
3179 }
3180 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
3181 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
3182 "%s(%d): location moved from server to kernel",
3183 __func__, mPortId);
3184
3185 if (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER] ==
3186 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL]) {
3187 // In Q, we don't return errors as an invalid time
3188 // but instead we leave the last kernel good timestamp alone.
3189 //
3190 // If server is identical to kernel, the device data pipeline is idle.
3191 // A better start time is now. The retrograde check ensures
3192 // timestamp monotonicity.
3193 const int64_t nowNs = systemTime();
3194 if (!mTimestampStallReported) {
3195 ALOGD("%s(%d): device stall time corrected using current time %lld",
3196 __func__, mPortId, (long long)nowNs);
3197 mTimestampStallReported = true;
3198 }
3199 timestamp.mTime = convertNsToTimespec(nowNs);
3200 } else {
3201 mTimestampStallReported = false;
3202 }
3203 }
3204
3205 // We update the timestamp time even when paused.
3206 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
3207 const int64_t now = systemTime();
3208 const int64_t at = audio_utils_ns_from_timespec(×tamp.mTime);
3209 const int64_t lag =
3210 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
3211 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
3212 ? int64_t(mAfLatency * 1000000LL)
3213 : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
3214 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
3215 * NANOS_PER_SECOND / mSampleRate;
3216 const int64_t limit = now - lag; // no earlier than this limit
3217 if (at < limit) {
3218 ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
3219 (long long)lag, (long long)at, (long long)limit);
3220 timestamp.mTime = convertNsToTimespec(limit);
3221 }
3222 }
3223 mPreviousLocation = location;
3224 } else {
3225 // right after AudioTrack is started, one may not find a timestamp
3226 ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mPortId);
3227 }
3228 }
3229 if (status == INVALID_OPERATION) {
3230 // INVALID_OPERATION occurs when no timestamp has been issued by the server;
3231 // other failures are signaled by a negative time.
3232 // If we come out of FLUSHED or STOPPED where the position is known
3233 // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
3234 // "zero" for NuPlayer). We don't convert for track restoration as position
3235 // does not reset.
3236 ALOGV("%s(%d): timestamp server offset:%lld restore frames:%lld",
3237 __func__, mPortId,
3238 (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
3239 if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
3240 status = WOULD_BLOCK;
3241 }
3242 }
3243 }
3244 if (status != NO_ERROR) {
3245 ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mPortId, status);
3246 return status;
3247 }
3248 if (isAfTrackOffloadedOrDirect_l()) {
3249 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
3250 // use cached paused position in case another offloaded track is running.
3251 timestamp.mPosition = mPausedPosition;
3252 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
3253 // TODO: adjust for delay
3254 return NO_ERROR;
3255 }
3256
3257 // Check whether a pending flush or stop has completed, as those commands may
3258 // be asynchronous or return near finish or exhibit glitchy behavior.
3259 //
3260 // Originally this showed up as the first timestamp being a continuation of
3261 // the previous song under gapless playback.
3262 // However, we sometimes see zero timestamps, then a glitch of
3263 // the previous song's position, and then correct timestamps afterwards.
3264 if (mStartFromZeroUs != 0 && mSampleRate != 0) {
3265 static const int kTimeJitterUs = 100000; // 100 ms
3266 static const int k1SecUs = 1000000;
3267
3268 const int64_t timeNow = getNowUs();
3269
3270 if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
3271 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
3272 if (timestampTimeUs < mStartFromZeroUs) {
3273 return WOULD_BLOCK; // stale timestamp time, occurs before start.
3274 }
3275 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
3276 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
3277 / ((double)mSampleRate * mPlaybackRate.mSpeed);
3278
3279 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
3280 // Verify that the counter can't count faster than the sample rate
3281 // since the start time. If greater, then that means we may have failed
3282 // to completely flush or stop the previous playing track.
3283 ALOGW_IF(!mTimestampStartupGlitchReported,
3284 "%s(%d): startup glitch detected"
3285 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
3286 __func__, mPortId,
3287 (long long)deltaTimeUs, (long long)deltaPositionByUs,
3288 timestamp.mPosition);
3289 mTimestampStartupGlitchReported = true;
3290 if (previousTimestampValid
3291 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
3292 timestamp = mPreviousTimestamp;
3293 mPreviousTimestampValid = true;
3294 return NO_ERROR;
3295 }
3296 return WOULD_BLOCK;
3297 }
3298 if (deltaPositionByUs != 0) {
3299 mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
3300 }
3301 } else {
3302 mStartFromZeroUs = 0; // don't check again, start time expired.
3303 }
3304 mTimestampStartupGlitchReported = false;
3305 }
3306 } else {
3307 // Update the mapping between local consumed (mPosition) and server consumed (mServer)
3308 (void) updateAndGetPosition_l();
3309 // Server consumed (mServer) and presented both use the same server time base,
3310 // and server consumed is always >= presented.
3311 // The delta between these represents the number of frames in the buffer pipeline.
3312 // If this delta between these is greater than the client position, it means that
3313 // actually presented is still stuck at the starting line (figuratively speaking),
3314 // waiting for the first frame to go by. So we can't report a valid timestamp yet.
3315 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
3316 // mPosition exceeds 32 bits.
3317 // TODO Remove when timestamp is updated to contain pipeline status info.
3318 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
3319 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
3320 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
3321 return INVALID_OPERATION;
3322 }
3323 // Convert timestamp position from server time base to client time base.
3324 // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
3325 // But if we change it to 64-bit then this could fail.
3326 // Use Modulo computation here.
3327 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
3328 // Immediately after a call to getPosition_l(), mPosition and
3329 // mServer both represent the same frame position. mPosition is
3330 // in client's point of view, and mServer is in server's point of
3331 // view. So the difference between them is the "fudge factor"
3332 // between client and server views due to stop() and/or new
3333 // IAudioTrack. And timestamp.mPosition is initially in server's
3334 // point of view, so we need to apply the same fudge factor to it.
3335 }
3336
3337 // Prevent retrograde motion in timestamp.
3338 // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
3339 if (status == NO_ERROR) {
3340 // Fix stale time when checking timestamp right after start().
3341 // The position is at the last reported location but the time can be stale
3342 // due to pause or standby or cold start latency.
3343 //
3344 // We keep advancing the time (but not the position) to ensure that the
3345 // stale value does not confuse the application.
3346 //
3347 // For offload compatibility, use a default lag value here.
3348 // Any time discrepancy between this update and the pause timestamp is handled
3349 // by the retrograde check afterwards.
3350 int64_t currentTimeNanos = audio_utils_ns_from_timespec(×tamp.mTime);
3351 const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
3352 const int64_t limitNs = mStartNs - lagNs;
3353 if (currentTimeNanos < limitNs) {
3354 if (!mTimestampStaleTimeReported) {
3355 ALOGD("%s(%d): stale timestamp time corrected, "
3356 "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
3357 __func__, mPortId,
3358 (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
3359 mTimestampStaleTimeReported = true;
3360 }
3361 timestamp.mTime = convertNsToTimespec(limitNs);
3362 currentTimeNanos = limitNs;
3363 } else {
3364 mTimestampStaleTimeReported = false;
3365 }
3366
3367 // previousTimestampValid is set to false when starting after a stop or flush.
3368 if (previousTimestampValid) {
3369 const int64_t previousTimeNanos =
3370 audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
3371
3372 // retrograde check
3373 if (currentTimeNanos < previousTimeNanos) {
3374 if (!mTimestampRetrogradeTimeReported) {
3375 ALOGW("%s(%d): retrograde timestamp time corrected, %lld < %lld",
3376 __func__, mPortId,
3377 (long long)currentTimeNanos, (long long)previousTimeNanos);
3378 mTimestampRetrogradeTimeReported = true;
3379 }
3380 timestamp.mTime = mPreviousTimestamp.mTime;
3381 } else {
3382 mTimestampRetrogradeTimeReported = false;
3383 }
3384
3385 // Looking at signed delta will work even when the timestamps
3386 // are wrapping around.
3387 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
3388 - mPreviousTimestamp.mPosition).signedValue();
3389 if (deltaPosition < 0) {
3390 // Only report once per position instead of spamming the log.
3391 if (!mTimestampRetrogradePositionReported) {
3392 ALOGW("%s(%d): retrograde timestamp position corrected, %d = %u - %u",
3393 __func__, mPortId,
3394 deltaPosition,
3395 timestamp.mPosition,
3396 mPreviousTimestamp.mPosition);
3397 mTimestampRetrogradePositionReported = true;
3398 }
3399 } else {
3400 mTimestampRetrogradePositionReported = false;
3401 }
3402 if (deltaPosition < 0) {
3403 timestamp.mPosition = mPreviousTimestamp.mPosition;
3404 deltaPosition = 0;
3405 }
3406 #if 0
3407 // Uncomment this to verify audio timestamp rate.
3408 const int64_t deltaTime =
3409 audio_utils_ns_from_timespec(×tamp.mTime) - previousTimeNanos;
3410 if (deltaTime != 0) {
3411 const int64_t computedSampleRate =
3412 deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
3413 ALOGD("%s(%d): computedSampleRate:%u sampleRate:%u",
3414 __func__, mPortId,
3415 (unsigned)computedSampleRate, mSampleRate);
3416 }
3417 #endif
3418 }
3419 mPreviousTimestamp = timestamp;
3420 mPreviousTimestampValid = true;
3421 }
3422
3423 return status;
3424 }
3425
getParameters(const String8 & keys)3426 String8 AudioTrack::getParameters(const String8& keys)
3427 {
3428 audio_io_handle_t output = getOutput();
3429 if (output != AUDIO_IO_HANDLE_NONE) {
3430 return AudioSystem::getParameters(output, keys);
3431 } else {
3432 return String8();
3433 }
3434 }
3435
isOffloaded() const3436 bool AudioTrack::isOffloaded() const
3437 {
3438 AutoMutex lock(mLock);
3439 return isOffloaded_l();
3440 }
3441
isDirect() const3442 bool AudioTrack::isDirect() const
3443 {
3444 AutoMutex lock(mLock);
3445 return isDirect_l();
3446 }
3447
isOffloadedOrDirect() const3448 bool AudioTrack::isOffloadedOrDirect() const
3449 {
3450 AutoMutex lock(mLock);
3451 return isOffloadedOrDirect_l();
3452 }
3453
3454
dump(int fd,const Vector<String16> & args __unused) const3455 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
3456 {
3457 String8 result;
3458
3459 result.append(" AudioTrack::dump\n");
3460 result.appendFormat(" id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
3461 mPortId, mStatus, mState, mSessionId, mFlags);
3462 result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
3463 mStreamType,
3464 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
3465 result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
3466 mFormat, mChannelMask, mChannelCount);
3467 result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
3468 mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
3469 result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
3470 mFrameCount, mReqFrameCount);
3471 result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u),"
3472 " req. notif. per buff(%u)\n",
3473 mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
3474 result.appendFormat(" latency (%d), selected device Id(%d), routed device Ids(%s)\n",
3475 mLatency, mSelectedDeviceId, toString(mRoutedDeviceIds).c_str());
3476 result.appendFormat(" output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
3477 mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
3478 ::write(fd, result.c_str(), result.size());
3479 return NO_ERROR;
3480 }
3481
getUnderrunCount() const3482 uint32_t AudioTrack::getUnderrunCount() const
3483 {
3484 AutoMutex lock(mLock);
3485 return getUnderrunCount_l();
3486 }
3487
getUnderrunCount_l() const3488 uint32_t AudioTrack::getUnderrunCount_l() const
3489 {
3490 return mProxy->getUnderrunCount() + mUnderrunCountOffset;
3491 }
3492
getUnderrunFrames() const3493 uint32_t AudioTrack::getUnderrunFrames() const
3494 {
3495 AutoMutex lock(mLock);
3496 return mProxy->getUnderrunFrames();
3497 }
3498
setLogSessionId(const char * logSessionId)3499 void AudioTrack::setLogSessionId(const char *logSessionId)
3500 {
3501 AutoMutex lock(mLock);
3502 if (logSessionId == nullptr) logSessionId = ""; // an empty string is an unset session id.
3503 if (mLogSessionId == logSessionId) return;
3504
3505 mLogSessionId = logSessionId;
3506 mediametrics::LogItem(mMetricsId)
3507 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETLOGSESSIONID)
3508 .set(AMEDIAMETRICS_PROP_LOGSESSIONID, logSessionId)
3509 .record();
3510 }
3511
setPlayerIId(int playerIId)3512 void AudioTrack::setPlayerIId(int playerIId)
3513 {
3514 AutoMutex lock(mLock);
3515 if (mPlayerIId == playerIId) return;
3516
3517 mPlayerIId = playerIId;
3518 triggerPortIdUpdate_l();
3519 mediametrics::LogItem(mMetricsId)
3520 .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYERIID)
3521 .set(AMEDIAMETRICS_PROP_PLAYERIID, playerIId)
3522 .record();
3523 }
3524
triggerPortIdUpdate_l()3525 void AudioTrack::triggerPortIdUpdate_l() {
3526 if (mAudioManager == nullptr) {
3527 // use checkService() to avoid blocking if audio service is not up yet
3528 sp<IBinder> binder =
3529 defaultServiceManager()->checkService(String16(kAudioServiceName));
3530 if (binder == nullptr) {
3531 ALOGE("%s(%d): binding to audio service failed.",
3532 __func__,
3533 mPlayerIId);
3534 return;
3535 }
3536
3537 mAudioManager = interface_cast<IAudioManager>(binder);
3538 }
3539
3540 // first time when the track is created we do not have a valid piid
3541 if (mPlayerIId != PLAYER_PIID_INVALID) {
3542 mAudioManager->playerEvent(mPlayerIId, PLAYER_UPDATE_PORT_ID, {mPortId});
3543 }
3544 }
3545
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)3546 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
3547 {
3548
3549 if (callback == 0) {
3550 ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
3551 return BAD_VALUE;
3552 }
3553 AutoMutex lock(mLock);
3554 if (mDeviceCallback.unsafe_get() == callback.get()) {
3555 ALOGW("%s(%d): adding same callback!", __func__, mPortId);
3556 return INVALID_OPERATION;
3557 }
3558 status_t status = NO_ERROR;
3559 if (mOutput != AUDIO_IO_HANDLE_NONE) {
3560 if (mDeviceCallback != 0) {
3561 ALOGW("%s(%d): callback already present!", __func__, mPortId);
3562 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
3563 }
3564 status = AudioSystem::addAudioDeviceCallback(this, mOutput, mPortId);
3565 }
3566 mDeviceCallback = callback;
3567 return status;
3568 }
3569
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)3570 status_t AudioTrack::removeAudioDeviceCallback(
3571 const sp<AudioSystem::AudioDeviceCallback>& callback)
3572 {
3573 if (callback == 0) {
3574 ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
3575 return BAD_VALUE;
3576 }
3577 AutoMutex lock(mLock);
3578 if (mDeviceCallback.unsafe_get() != callback.get()) {
3579 ALOGW("%s removing different callback!", __FUNCTION__);
3580 return INVALID_OPERATION;
3581 }
3582 mDeviceCallback.clear();
3583 if (mOutput != AUDIO_IO_HANDLE_NONE) {
3584 AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
3585 }
3586 return NO_ERROR;
3587 }
3588
3589
onAudioDeviceUpdate(audio_io_handle_t audioIo,const DeviceIdVector & deviceIds)3590 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
3591 const DeviceIdVector& deviceIds)
3592 {
3593 sp<AudioSystem::AudioDeviceCallback> callback;
3594 {
3595 AutoMutex lock(mLock);
3596 if (audioIo != mOutput) {
3597 return;
3598 }
3599 callback = mDeviceCallback.promote();
3600 // only update device if the track is active as route changes due to other use cases are
3601 // irrelevant for this client
3602 if (mState == STATE_ACTIVE) {
3603 mRoutedDeviceIds = deviceIds;
3604 }
3605 }
3606
3607 if (callback.get() != nullptr) {
3608 callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceIds);
3609 }
3610 }
3611
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)3612 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
3613 {
3614 if (msec == nullptr ||
3615 (location != ExtendedTimestamp::LOCATION_SERVER
3616 && location != ExtendedTimestamp::LOCATION_KERNEL)) {
3617 return BAD_VALUE;
3618 }
3619 AutoMutex lock(mLock);
3620 // inclusive of offloaded and direct tracks.
3621 //
3622 // It is possible, but not enabled, to allow duration computation for non-pcm
3623 // audio_has_proportional_frames() formats because currently they have
3624 // the drain rate equivalent to the pcm sample rate * framesize.
3625 if (!isPurePcmData_l()) {
3626 return INVALID_OPERATION;
3627 }
3628 ExtendedTimestamp ets;
3629 if (getTimestamp_l(&ets) == OK
3630 && ets.mTimeNs[location] > 0) {
3631 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
3632 - ets.mPosition[location];
3633 if (diff < 0) {
3634 *msec = 0;
3635 } else {
3636 // ms is the playback time by frames
3637 int64_t ms = (int64_t)((double)diff * 1000 /
3638 ((double)mSampleRate * mPlaybackRate.mSpeed));
3639 // clockdiff is the timestamp age (negative)
3640 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
3641 ets.mTimeNs[location]
3642 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
3643 - systemTime(SYSTEM_TIME_MONOTONIC);
3644
3645 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
3646 static const int NANOS_PER_MILLIS = 1000000;
3647 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
3648 }
3649 return NO_ERROR;
3650 }
3651 if (location != ExtendedTimestamp::LOCATION_SERVER) {
3652 return INVALID_OPERATION; // LOCATION_KERNEL is not available
3653 }
3654 // use server position directly (offloaded and direct arrive here)
3655 updateAndGetPosition_l();
3656 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
3657 *msec = (diff <= 0) ? 0
3658 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
3659 return NO_ERROR;
3660 }
3661
hasStarted()3662 bool AudioTrack::hasStarted()
3663 {
3664 AutoMutex lock(mLock);
3665 switch (mState) {
3666 case STATE_STOPPED:
3667 if (isOffloadedOrDirect_l()) {
3668 // check if we have started in the past to return true.
3669 return mStartFromZeroUs > 0;
3670 }
3671 // A normal audio track may still be draining, so
3672 // check if stream has ended. This covers fasttrack position
3673 // instability and start/stop without any data written.
3674 if (mProxy->getStreamEndDone()) {
3675 return true;
3676 }
3677 FALLTHROUGH_INTENDED;
3678 case STATE_ACTIVE:
3679 case STATE_STOPPING:
3680 break;
3681 case STATE_PAUSED:
3682 case STATE_PAUSED_STOPPING:
3683 case STATE_FLUSHED:
3684 return false; // we're not active
3685 default:
3686 LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mPortId, mState);
3687 break;
3688 }
3689
3690 // wait indicates whether we need to wait for a timestamp.
3691 // This is conservatively figured - if we encounter an unexpected error
3692 // then we will not wait.
3693 bool wait = false;
3694 if (isAfTrackOffloadedOrDirect_l()) {
3695 AudioTimestamp ts;
3696 status_t status = getTimestamp_l(ts);
3697 if (status == WOULD_BLOCK) {
3698 wait = true;
3699 } else if (status == OK) {
3700 wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3701 }
3702 ALOGV("%s(%d): hasStarted wait:%d ts:%u start position:%lld",
3703 __func__, mPortId,
3704 (int)wait,
3705 ts.mPosition,
3706 (long long)mStartTs.mPosition);
3707 } else {
3708 int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3709 ExtendedTimestamp ets;
3710 status_t status = getTimestamp_l(&ets);
3711 if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets
3712 wait = true;
3713 } else if (status == OK) {
3714 for (location = ExtendedTimestamp::LOCATION_KERNEL;
3715 location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3716 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3717 continue;
3718 }
3719 wait = ets.mPosition[location] == 0
3720 || ets.mPosition[location] == mStartEts.mPosition[location];
3721 break;
3722 }
3723 }
3724 ALOGV("%s(%d): hasStarted wait:%d ets:%lld start position:%lld",
3725 __func__, mPortId,
3726 (int)wait,
3727 (long long)ets.mPosition[location],
3728 (long long)mStartEts.mPosition[location]);
3729 }
3730 return !wait;
3731 }
3732
3733 // =========================================================================
3734
binderDied(const wp<IBinder> & who __unused)3735 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3736 {
3737 sp<AudioTrack> audioTrack = mAudioTrack.promote();
3738 if (audioTrack != 0) {
3739 AutoMutex lock(audioTrack->mLock);
3740 audioTrack->mProxy->binderDied();
3741 }
3742 }
3743
3744 // =========================================================================
3745
AudioTrackThread(AudioTrack & receiver)3746 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
3747 : Thread(true /* bCanCallJava */) // binder recursion on restoreTrack_l() may call Java.
3748 , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3749 mIgnoreNextPausedInt(false)
3750 {
3751 }
3752
~AudioTrackThread()3753 AudioTrack::AudioTrackThread::~AudioTrackThread()
3754 {
3755 }
3756
threadLoop()3757 bool AudioTrack::AudioTrackThread::threadLoop()
3758 {
3759 {
3760 AutoMutex _l(mMyLock);
3761 if (mPaused) {
3762 // TODO check return value and handle or log
3763 mMyCond.wait(mMyLock);
3764 // caller will check for exitPending()
3765 return true;
3766 }
3767 if (mIgnoreNextPausedInt) {
3768 mIgnoreNextPausedInt = false;
3769 mPausedInt = false;
3770 }
3771 if (mPausedInt) {
3772 // TODO use futex instead of condition, for event flag "or"
3773 if (mPausedNs > 0) {
3774 // TODO check return value and handle or log
3775 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3776 } else {
3777 // TODO check return value and handle or log
3778 mMyCond.wait(mMyLock);
3779 }
3780 mPausedInt = false;
3781 return true;
3782 }
3783 }
3784 if (exitPending()) {
3785 return false;
3786 }
3787 nsecs_t ns = mReceiver.processAudioBuffer();
3788 switch (ns) {
3789 case 0:
3790 return true;
3791 case NS_INACTIVE:
3792 pauseInternal();
3793 return true;
3794 case NS_NEVER:
3795 return false;
3796 case NS_WHENEVER:
3797 // Event driven: call wake() when callback notifications conditions change.
3798 ns = INT64_MAX;
3799 FALLTHROUGH_INTENDED;
3800 default:
3801 LOG_ALWAYS_FATAL_IF(ns < 0, "%s(%d): processAudioBuffer() returned %lld",
3802 __func__, mReceiver.mPortId, (long long)ns);
3803 pauseInternal(ns);
3804 return true;
3805 }
3806 }
3807
requestExit()3808 void AudioTrack::AudioTrackThread::requestExit()
3809 {
3810 // must be in this order to avoid a race condition
3811 Thread::requestExit();
3812 resume();
3813 }
3814
pause()3815 void AudioTrack::AudioTrackThread::pause()
3816 {
3817 AutoMutex _l(mMyLock);
3818 mPaused = true;
3819 }
3820
resume()3821 void AudioTrack::AudioTrackThread::resume()
3822 {
3823 AutoMutex _l(mMyLock);
3824 mIgnoreNextPausedInt = true;
3825 if (mPaused || mPausedInt) {
3826 mPaused = false;
3827 mPausedInt = false;
3828 mMyCond.signal();
3829 }
3830 }
3831
wake()3832 void AudioTrack::AudioTrackThread::wake()
3833 {
3834 AutoMutex _l(mMyLock);
3835 if (!mPaused) {
3836 // wake() might be called while servicing a callback - ignore the next
3837 // pause time and call processAudioBuffer.
3838 mIgnoreNextPausedInt = true;
3839 if (mPausedInt && mPausedNs > 0) {
3840 // audio track is active and internally paused with timeout.
3841 mPausedInt = false;
3842 mMyCond.signal();
3843 }
3844 }
3845 }
3846
pauseInternal(nsecs_t ns)3847 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3848 {
3849 AutoMutex _l(mMyLock);
3850 mPausedInt = true;
3851 mPausedNs = ns;
3852 }
3853
onCodecFormatChanged(const std::vector<uint8_t> & audioMetadata)3854 binder::Status AudioTrack::AudioTrackCallback::onCodecFormatChanged(
3855 const std::vector<uint8_t>& audioMetadata)
3856 {
3857 AutoMutex _l(mAudioTrackCbLock);
3858 sp<media::IAudioTrackCallback> callback = mCallback.promote();
3859 if (callback.get() != nullptr) {
3860 callback->onCodecFormatChanged(audioMetadata);
3861 } else {
3862 mCallback.clear();
3863 }
3864 return binder::Status::ok();
3865 }
3866
setAudioTrackCallback(const sp<media::IAudioTrackCallback> & callback)3867 void AudioTrack::AudioTrackCallback::setAudioTrackCallback(
3868 const sp<media::IAudioTrackCallback> &callback) {
3869 AutoMutex lock(mAudioTrackCbLock);
3870 mCallback = callback;
3871 }
3872
3873 } // namespace android
3874