1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "AudioFlinger"
20 //#define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include "Configuration.h"
24 #include <linux/futex.h>
25 #include <math.h>
26 #include <sys/syscall.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29
30 #include <private/media/AudioTrackShared.h>
31
32 #include "AudioFlinger.h"
33
34 #include <media/nbaio/Pipe.h>
35 #include <media/nbaio/PipeReader.h>
36 #include <media/AudioValidator.h>
37 #include <media/RecordBufferConverter.h>
38 #include <mediautils/ServiceUtilities.h>
39 #include <audio_utils/minifloat.h>
40
41 // ----------------------------------------------------------------------------
42
43 // Note: the following macro is used for extremely verbose logging message. In
44 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
45 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
46 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
47 // turned on. Do not uncomment the #def below unless you really know what you
48 // are doing and want to see all of the extremely verbose messages.
49 //#define VERY_VERY_VERBOSE_LOGGING
50 #ifdef VERY_VERY_VERBOSE_LOGGING
51 #define ALOGVV ALOGV
52 #else
53 #define ALOGVV(a...) do { } while(0)
54 #endif
55
56 // TODO: Remove when this is put into AidlConversionUtil.h
57 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
58 ({ \
59 auto _tmp = (x); \
60 if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
61 std::move(_tmp.value()); \
62 })
63
64 namespace android {
65
66 using ::android::aidl_utils::binderStatusFromStatusT;
67 using binder::Status;
68 using content::AttributionSourceState;
69 using media::VolumeShaper;
70 // ----------------------------------------------------------------------------
71 // TrackBase
72 // ----------------------------------------------------------------------------
73 #undef LOG_TAG
74 #define LOG_TAG "AF::TrackBase"
75
76 static volatile int32_t nextTrackId = 55;
77
78 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)79 AudioFlinger::ThreadBase::TrackBase::TrackBase(
80 ThreadBase *thread,
81 const sp<Client>& client,
82 const audio_attributes_t& attr,
83 uint32_t sampleRate,
84 audio_format_t format,
85 audio_channel_mask_t channelMask,
86 size_t frameCount,
87 void *buffer,
88 size_t bufferSize,
89 audio_session_t sessionId,
90 pid_t creatorPid,
91 uid_t clientUid,
92 bool isOut,
93 alloc_type alloc,
94 track_type type,
95 audio_port_handle_t portId,
96 std::string metricsId)
97 : RefBase(),
98 mThread(thread),
99 mClient(client),
100 mCblk(NULL),
101 // mBuffer, mBufferSize
102 mState(IDLE),
103 mAttr(attr),
104 mSampleRate(sampleRate),
105 mFormat(format),
106 mChannelMask(channelMask),
107 mChannelCount(isOut ?
108 audio_channel_count_from_out_mask(channelMask) :
109 audio_channel_count_from_in_mask(channelMask)),
110 mFrameSize(audio_has_proportional_frames(format) ?
111 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
112 mFrameCount(frameCount),
113 mSessionId(sessionId),
114 mIsOut(isOut),
115 mId(android_atomic_inc(&nextTrackId)),
116 mTerminated(false),
117 mType(type),
118 mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
119 mPortId(portId),
120 mIsInvalid(false),
121 mTrackMetrics(std::move(metricsId), isOut),
122 mCreatorPid(creatorPid)
123 {
124 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
125 if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
126 ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
127 "%s(%d): uid %d tried to pass itself off as %d",
128 __func__, mId, callingUid, clientUid);
129 clientUid = callingUid;
130 }
131 // clientUid contains the uid of the app that is responsible for this track, so we can blame
132 // battery usage on it.
133 mUid = clientUid;
134
135 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
136
137 size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
138 // check overflow when computing bufferSize due to multiplication by mFrameSize.
139 if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
140 || mFrameSize == 0 // format needs to be correct
141 || minBufferSize > SIZE_MAX / mFrameSize) {
142 android_errorWriteLog(0x534e4554, "34749571");
143 return;
144 }
145 minBufferSize *= mFrameSize;
146
147 if (buffer == nullptr) {
148 bufferSize = minBufferSize; // allocated here.
149 } else if (minBufferSize > bufferSize) {
150 android_errorWriteLog(0x534e4554, "38340117");
151 return;
152 }
153
154 size_t size = sizeof(audio_track_cblk_t);
155 if (buffer == NULL && alloc == ALLOC_CBLK) {
156 // check overflow when computing allocation size for streaming tracks.
157 if (size > SIZE_MAX - bufferSize) {
158 android_errorWriteLog(0x534e4554, "34749571");
159 return;
160 }
161 size += bufferSize;
162 }
163
164 if (client != 0) {
165 mCblkMemory = client->heap()->allocate(size);
166 if (mCblkMemory == 0 ||
167 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
168 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
169 client->heap()->dump("AudioTrack");
170 mCblkMemory.clear();
171 return;
172 }
173 } else {
174 mCblk = (audio_track_cblk_t *) malloc(size);
175 if (mCblk == NULL) {
176 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
177 return;
178 }
179 }
180
181 // construct the shared structure in-place.
182 if (mCblk != NULL) {
183 new(mCblk) audio_track_cblk_t();
184 switch (alloc) {
185 case ALLOC_READONLY: {
186 const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
187 if (roHeap == 0 ||
188 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
189 (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
190 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
191 __func__, mId, bufferSize);
192 if (roHeap != 0) {
193 roHeap->dump("buffer");
194 }
195 mCblkMemory.clear();
196 mBufferMemory.clear();
197 return;
198 }
199 memset(mBuffer, 0, bufferSize);
200 } break;
201 case ALLOC_PIPE:
202 mBufferMemory = thread->pipeMemory();
203 // mBuffer is the virtual address as seen from current process (mediaserver),
204 // and should normally be coming from mBufferMemory->unsecurePointer().
205 // However in this case the TrackBase does not reference the buffer directly.
206 // It should references the buffer via the pipe.
207 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
208 mBuffer = NULL;
209 bufferSize = 0;
210 break;
211 case ALLOC_CBLK:
212 // clear all buffers
213 if (buffer == NULL) {
214 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
215 memset(mBuffer, 0, bufferSize);
216 } else {
217 mBuffer = buffer;
218 #if 0
219 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
220 #endif
221 }
222 break;
223 case ALLOC_LOCAL:
224 mBuffer = calloc(1, bufferSize);
225 break;
226 case ALLOC_NONE:
227 mBuffer = buffer;
228 break;
229 default:
230 LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
231 }
232 mBufferSize = bufferSize;
233
234 #ifdef TEE_SINK
235 mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
236 #endif
237
238 }
239 }
240
241 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)242 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
243 AttributionSourceState attributionSource{};
244 attributionSource.uid = AID_AUDIOSERVER;
245 attributionSource.pid = pid;
246 attributionSource.token = sp<BBinder>::make();
247 return attributionSource;
248 }
249
initCheck() const250 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
251 {
252 status_t status;
253 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
254 status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
255 } else {
256 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
257 }
258 return status;
259 }
260
~TrackBase()261 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
262 {
263 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
264 mServerProxy.clear();
265 releaseCblk();
266 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
267 if (mClient != 0) {
268 // Client destructor must run with AudioFlinger client mutex locked
269 Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
270 // If the client's reference count drops to zero, the associated destructor
271 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
272 // relying on the automatic clear() at end of scope.
273 mClient.clear();
274 }
275 // flush the binder command buffer
276 IPCThreadState::self()->flushCommands();
277 }
278
279 // AudioBufferProvider interface
280 // getNextBuffer() = 0;
281 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)282 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
283 {
284 #ifdef TEE_SINK
285 mTee.write(buffer->raw, buffer->frameCount);
286 #endif
287
288 ServerProxy::Buffer buf;
289 buf.mFrameCount = buffer->frameCount;
290 buf.mRaw = buffer->raw;
291 buffer->frameCount = 0;
292 buffer->raw = NULL;
293 mServerProxy->releaseBuffer(&buf);
294 }
295
setSyncEvent(const sp<SyncEvent> & event)296 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
297 {
298 mSyncEvents.add(event);
299 return NO_ERROR;
300 }
301
PatchTrackBase(sp<ClientProxy> proxy,const ThreadBase & thread,const Timeout & timeout)302 AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(sp<ClientProxy> proxy,
303 const ThreadBase& thread,
304 const Timeout& timeout)
305 : mProxy(proxy)
306 {
307 if (timeout) {
308 setPeerTimeout(*timeout);
309 } else {
310 // Double buffer mixer
311 uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
312 thread.sampleRate();
313 setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
314 }
315 }
316
setPeerTimeout(std::chrono::nanoseconds timeout)317 void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
318 mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
319 mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
320 }
321
322
323 // ----------------------------------------------------------------------------
324 // Playback
325 // ----------------------------------------------------------------------------
326 #undef LOG_TAG
327 #define LOG_TAG "AF::TrackHandle"
328
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)329 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
330 : BnAudioTrack(),
331 mTrack(track)
332 {
333 }
334
~TrackHandle()335 AudioFlinger::TrackHandle::~TrackHandle() {
336 // just stop the track on deletion, associated resources
337 // will be freed from the main thread once all pending buffers have
338 // been played. Unless it's not in the active track list, in which
339 // case we free everything now...
340 mTrack->destroy();
341 }
342
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)343 Status AudioFlinger::TrackHandle::getCblk(
344 std::optional<media::SharedFileRegion>* _aidl_return) {
345 *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
346 return Status::ok();
347 }
348
start(int32_t * _aidl_return)349 Status AudioFlinger::TrackHandle::start(int32_t* _aidl_return) {
350 *_aidl_return = mTrack->start();
351 return Status::ok();
352 }
353
stop()354 Status AudioFlinger::TrackHandle::stop() {
355 mTrack->stop();
356 return Status::ok();
357 }
358
flush()359 Status AudioFlinger::TrackHandle::flush() {
360 mTrack->flush();
361 return Status::ok();
362 }
363
pause()364 Status AudioFlinger::TrackHandle::pause() {
365 mTrack->pause();
366 return Status::ok();
367 }
368
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)369 Status AudioFlinger::TrackHandle::attachAuxEffect(int32_t effectId,
370 int32_t* _aidl_return) {
371 *_aidl_return = mTrack->attachAuxEffect(effectId);
372 return Status::ok();
373 }
374
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)375 Status AudioFlinger::TrackHandle::setParameters(const std::string& keyValuePairs,
376 int32_t* _aidl_return) {
377 *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
378 return Status::ok();
379 }
380
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)381 Status AudioFlinger::TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
382 int32_t* _aidl_return) {
383 *_aidl_return = mTrack->selectPresentation(presentationId, programId);
384 return Status::ok();
385 }
386
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)387 Status AudioFlinger::TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
388 int32_t* _aidl_return) {
389 AudioTimestamp legacy;
390 *_aidl_return = mTrack->getTimestamp(legacy);
391 if (*_aidl_return != OK) {
392 return Status::ok();
393 }
394 *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
395 return Status::ok();
396 }
397
signal()398 Status AudioFlinger::TrackHandle::signal() {
399 mTrack->signal();
400 return Status::ok();
401 }
402
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)403 Status AudioFlinger::TrackHandle::applyVolumeShaper(
404 const media::VolumeShaperConfiguration& configuration,
405 const media::VolumeShaperOperation& operation,
406 int32_t* _aidl_return) {
407 sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
408 *_aidl_return = conf->readFromParcelable(configuration);
409 if (*_aidl_return != OK) {
410 return Status::ok();
411 }
412
413 sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
414 *_aidl_return = op->readFromParcelable(operation);
415 if (*_aidl_return != OK) {
416 return Status::ok();
417 }
418
419 *_aidl_return = mTrack->applyVolumeShaper(conf, op);
420 return Status::ok();
421 }
422
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)423 Status AudioFlinger::TrackHandle::getVolumeShaperState(
424 int32_t id,
425 std::optional<media::VolumeShaperState>* _aidl_return) {
426 sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
427 if (legacy == nullptr) {
428 _aidl_return->reset();
429 return Status::ok();
430 }
431 media::VolumeShaperState aidl;
432 legacy->writeToParcelable(&aidl);
433 *_aidl_return = aidl;
434 return Status::ok();
435 }
436
getDualMonoMode(media::AudioDualMonoMode * _aidl_return)437 Status AudioFlinger::TrackHandle::getDualMonoMode(media::AudioDualMonoMode* _aidl_return)
438 {
439 audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
440 const status_t status = mTrack->getDualMonoMode(&mode)
441 ?: AudioValidator::validateDualMonoMode(mode);
442 if (status == OK) {
443 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
444 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
445 }
446 return binderStatusFromStatusT(status);
447 }
448
setDualMonoMode(media::AudioDualMonoMode mode)449 Status AudioFlinger::TrackHandle::setDualMonoMode(
450 media::AudioDualMonoMode mode)
451 {
452 const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
453 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
454 return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
455 ?: mTrack->setDualMonoMode(localMonoMode));
456 }
457
getAudioDescriptionMixLevel(float * _aidl_return)458 Status AudioFlinger::TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
459 {
460 float leveldB = -std::numeric_limits<float>::infinity();
461 const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
462 ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
463 if (status == OK) *_aidl_return = leveldB;
464 return binderStatusFromStatusT(status);
465 }
466
setAudioDescriptionMixLevel(float leveldB)467 Status AudioFlinger::TrackHandle::setAudioDescriptionMixLevel(float leveldB)
468 {
469 return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
470 ?: mTrack->setAudioDescriptionMixLevel(leveldB));
471 }
472
getPlaybackRateParameters(media::AudioPlaybackRate * _aidl_return)473 Status AudioFlinger::TrackHandle::getPlaybackRateParameters(
474 media::AudioPlaybackRate* _aidl_return)
475 {
476 audio_playback_rate_t localPlaybackRate{};
477 status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
478 ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
479 if (status == NO_ERROR) {
480 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
481 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
482 }
483 return binderStatusFromStatusT(status);
484 }
485
setPlaybackRateParameters(const media::AudioPlaybackRate & playbackRate)486 Status AudioFlinger::TrackHandle::setPlaybackRateParameters(
487 const media::AudioPlaybackRate& playbackRate)
488 {
489 const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
490 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
491 return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
492 ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
493 }
494
495 // ----------------------------------------------------------------------------
496 // AppOp for audio playback
497 // -------------------------------
498
499 // static
500 sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
createIfNeeded(const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)501 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
502 const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
503 audio_stream_type_t streamType)
504 {
505 Vector <String16> packages;
506 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
507 getPackagesForUid(uid, packages);
508 if (isServiceUid(uid)) {
509 if (packages.isEmpty()) {
510 ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
511 id,
512 attr.usage,
513 uid);
514 return nullptr;
515 }
516 }
517 // stream type has been filtered by audio policy to indicate whether it can be muted
518 if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
519 ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
520 return nullptr;
521 }
522 if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
523 == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
524 ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
525 id, attr.flags);
526 return nullptr;
527 }
528
529 AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
530 attributionSource);
531 return new OpPlayAudioMonitor(checkedAttributionSource, attr.usage, id);
532 }
533
OpPlayAudioMonitor(const AttributionSourceState & attributionSource,audio_usage_t usage,int id)534 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
535 const AttributionSourceState& attributionSource, audio_usage_t usage, int id)
536 : mHasOpPlayAudio(true), mAttributionSource(attributionSource), mUsage((int32_t) usage),
537 mId(id)
538 {
539 }
540
~OpPlayAudioMonitor()541 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::~OpPlayAudioMonitor()
542 {
543 if (mOpCallback != 0) {
544 mAppOpsManager.stopWatchingMode(mOpCallback);
545 }
546 mOpCallback.clear();
547 }
548
onFirstRef()549 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
550 {
551 checkPlayAudioForUsage();
552 if (mAttributionSource.packageName.has_value()) {
553 mOpCallback = new PlayAudioOpCallback(this);
554 mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO,
555 VALUE_OR_FATAL(aidl2legacy_string_view_String16(
556 mAttributionSource.packageName.value_or("")))
557 , mOpCallback);
558 }
559 }
560
hasOpPlayAudio() const561 bool AudioFlinger::PlaybackThread::OpPlayAudioMonitor::hasOpPlayAudio() const {
562 return mHasOpPlayAudio.load();
563 }
564
565 // Note this method is never called (and never to be) for audio server / patch record track
566 // - not called from constructor due to check on UID,
567 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage()568 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
569 {
570 if (!mAttributionSource.packageName.has_value()) {
571 mHasOpPlayAudio.store(false);
572 } else {
573 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
574 String16 packageName = VALUE_OR_FATAL(
575 aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
576 bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
577 mUsage, uid, packageName) == AppOpsManager::MODE_ALLOWED;
578 ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
579 mHasOpPlayAudio.store(hasIt);
580 }
581 }
582
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)583 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
584 const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
585 { }
586
opChanged(int32_t op,const String16 & packageName)587 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
588 const String16& packageName) {
589 // we only have uid, so we need to check all package names anyway
590 UNUSED(packageName);
591 if (op != AppOpsManager::OP_PLAY_AUDIO) {
592 return;
593 }
594 sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
595 if (monitor != NULL) {
596 monitor->checkPlayAudioForUsage();
597 }
598 }
599
600 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)601 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::getPackagesForUid(
602 uid_t uid, Vector<String16>& packages)
603 {
604 PermissionController permissionController;
605 permissionController.getPackagesForUid(uid, packages);
606 }
607
608 // ----------------------------------------------------------------------------
609 #undef LOG_TAG
610 #define LOG_TAG "AF::Track"
611
612 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed)613 AudioFlinger::PlaybackThread::Track::Track(
614 PlaybackThread *thread,
615 const sp<Client>& client,
616 audio_stream_type_t streamType,
617 const audio_attributes_t& attr,
618 uint32_t sampleRate,
619 audio_format_t format,
620 audio_channel_mask_t channelMask,
621 size_t frameCount,
622 void *buffer,
623 size_t bufferSize,
624 const sp<IMemory>& sharedBuffer,
625 audio_session_t sessionId,
626 pid_t creatorPid,
627 const AttributionSourceState& attributionSource,
628 audio_output_flags_t flags,
629 track_type type,
630 audio_port_handle_t portId,
631 size_t frameCountToBeReady,
632 float speed)
633 : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
634 // TODO: Using unsecurePointer() has some associated security pitfalls
635 // (see declaration for details).
636 // Either document why it is safe in this case or address the
637 // issue (e.g. by copying).
638 (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
639 (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
640 sessionId, creatorPid,
641 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
642 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
643 type,
644 portId,
645 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
646 mFillingUpStatus(FS_INVALID),
647 // mRetryCount initialized later when needed
648 mSharedBuffer(sharedBuffer),
649 mStreamType(streamType),
650 mMainBuffer(thread->sinkBuffer()),
651 mAuxBuffer(NULL),
652 mAuxEffectId(0), mHasVolumeController(false),
653 mFrameMap(16 /* sink-frame-to-track-frame map memory */),
654 mVolumeHandler(new media::VolumeHandler(sampleRate)),
655 mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(attributionSource, attr, id(),
656 streamType)),
657 // mSinkTimestamp
658 mFastIndex(-1),
659 mCachedVolume(1.0),
660 /* The track might not play immediately after being active, similarly as if its volume was 0.
661 * When the track starts playing, its volume will be computed. */
662 mFinalVolume(0.f),
663 mResumeToStopping(false),
664 mFlushHwPending(false),
665 mFlags(flags),
666 mSpeed(speed)
667 {
668 // client == 0 implies sharedBuffer == 0
669 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
670
671 ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
672 __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
673
674 if (mCblk == NULL) {
675 return;
676 }
677
678 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
679 if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
680 ALOGE("%s(%d): no more tracks available", __func__, mId);
681 releaseCblk(); // this makes the track invalid.
682 return;
683 }
684
685 if (sharedBuffer == 0) {
686 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
687 mFrameSize, !isExternalTrack(), sampleRate);
688 } else {
689 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
690 mFrameSize, sampleRate);
691 }
692 mServerProxy = mAudioTrackServerProxy;
693 mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
694
695 // only allocate a fast track index if we were able to allocate a normal track name
696 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
697 // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
698 // race with setSyncEvent(). However, if we call it, we cannot properly start
699 // static fast tracks (SoundPool) immediately after stopping.
700 //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
701 ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
702 int i = __builtin_ctz(thread->mFastTrackAvailMask);
703 ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
704 // FIXME This is too eager. We allocate a fast track index before the
705 // fast track becomes active. Since fast tracks are a scarce resource,
706 // this means we are potentially denying other more important fast tracks from
707 // being created. It would be better to allocate the index dynamically.
708 mFastIndex = i;
709 thread->mFastTrackAvailMask &= ~(1 << i);
710 }
711
712 mServerLatencySupported = thread->type() == ThreadBase::MIXER
713 || thread->type() == ThreadBase::DUPLICATING;
714 #ifdef TEE_SINK
715 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
716 + "_" + std::to_string(mId) + "_T");
717 #endif
718
719 if (thread->supportsHapticPlayback()) {
720 // If the track is attached to haptic playback thread, it is potentially to have
721 // HapticGenerator effect, which will generate haptic data, on the track. In that case,
722 // external vibration is always created for all tracks attached to haptic playback thread.
723 mAudioVibrationController = new AudioVibrationController(this);
724 std::string packageName = attributionSource.packageName.has_value() ?
725 attributionSource.packageName.value() : "";
726 mExternalVibration = new os::ExternalVibration(
727 mUid, packageName, mAttr, mAudioVibrationController);
728 }
729
730 // Once this item is logged by the server, the client can add properties.
731 const char * const traits = sharedBuffer == 0 ? "" : "static";
732 mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
733 }
734
~Track()735 AudioFlinger::PlaybackThread::Track::~Track()
736 {
737 ALOGV("%s(%d)", __func__, mId);
738
739 // The destructor would clear mSharedBuffer,
740 // but it will not push the decremented reference count,
741 // leaving the client's IMemory dangling indefinitely.
742 // This prevents that leak.
743 if (mSharedBuffer != 0) {
744 mSharedBuffer.clear();
745 }
746 }
747
initCheck() const748 status_t AudioFlinger::PlaybackThread::Track::initCheck() const
749 {
750 status_t status = TrackBase::initCheck();
751 if (status == NO_ERROR && mCblk == nullptr) {
752 status = NO_MEMORY;
753 }
754 return status;
755 }
756
destroy()757 void AudioFlinger::PlaybackThread::Track::destroy()
758 {
759 // NOTE: destroyTrack_l() can remove a strong reference to this Track
760 // by removing it from mTracks vector, so there is a risk that this Tracks's
761 // destructor is called. As the destructor needs to lock mLock,
762 // we must acquire a strong reference on this Track before locking mLock
763 // here so that the destructor is called only when exiting this function.
764 // On the other hand, as long as Track::destroy() is only called by
765 // TrackHandle destructor, the TrackHandle still holds a strong ref on
766 // this Track with its member mTrack.
767 sp<Track> keep(this);
768 { // scope for mLock
769 bool wasActive = false;
770 sp<ThreadBase> thread = mThread.promote();
771 if (thread != 0) {
772 Mutex::Autolock _l(thread->mLock);
773 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
774 wasActive = playbackThread->destroyTrack_l(this);
775 }
776 if (isExternalTrack() && !wasActive) {
777 AudioSystem::releaseOutput(mPortId);
778 }
779 }
780 forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
781 }
782
appendDumpHeader(String8 & result)783 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
784 {
785 result.appendFormat("Type Id Active Client Session Port Id S Flags "
786 " Format Chn mask SRate "
787 "ST Usg CT "
788 " G db L dB R dB VS dB "
789 " Server FrmCnt FrmRdy F Underruns Flushed"
790 "%s\n",
791 isServerLatencySupported() ? " Latency" : "");
792 }
793
appendDump(String8 & result,bool active)794 void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
795 {
796 char trackType;
797 switch (mType) {
798 case TYPE_DEFAULT:
799 case TYPE_OUTPUT:
800 if (isStatic()) {
801 trackType = 'S'; // static
802 } else {
803 trackType = ' '; // normal
804 }
805 break;
806 case TYPE_PATCH:
807 trackType = 'P';
808 break;
809 default:
810 trackType = '?';
811 }
812
813 if (isFastTrack()) {
814 result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
815 } else {
816 result.appendFormat(" %c %6d", trackType, mId);
817 }
818
819 char nowInUnderrun;
820 switch (mObservedUnderruns.mBitFields.mMostRecent) {
821 case UNDERRUN_FULL:
822 nowInUnderrun = ' ';
823 break;
824 case UNDERRUN_PARTIAL:
825 nowInUnderrun = '<';
826 break;
827 case UNDERRUN_EMPTY:
828 nowInUnderrun = '*';
829 break;
830 default:
831 nowInUnderrun = '?';
832 break;
833 }
834
835 char fillingStatus;
836 switch (mFillingUpStatus) {
837 case FS_INVALID:
838 fillingStatus = 'I';
839 break;
840 case FS_FILLING:
841 fillingStatus = 'f';
842 break;
843 case FS_FILLED:
844 fillingStatus = 'F';
845 break;
846 case FS_ACTIVE:
847 fillingStatus = 'A';
848 break;
849 default:
850 fillingStatus = '?';
851 break;
852 }
853
854 // clip framesReadySafe to max representation in dump
855 const size_t framesReadySafe =
856 std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
857
858 // obtain volumes
859 const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
860 const std::pair<float /* volume */, bool /* active */> vsVolume =
861 mVolumeHandler->getLastVolume();
862
863 // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
864 // as it may be reduced by the application.
865 const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
866 // Check whether the buffer size has been modified by the app.
867 const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
868 ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
869 ? 'e' /* error */ : ' ' /* identical */;
870
871 result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
872 "%08X %08X %6u "
873 "%2u %3x %2x "
874 "%5.2g %5.2g %5.2g %5.2g%c "
875 "%08X %6zu%c %6zu %c %9u%c %7u",
876 active ? "yes" : "no",
877 (mClient == 0) ? getpid() : mClient->pid(),
878 mSessionId,
879 mPortId,
880 getTrackStateAsCodedString(),
881 mCblk->mFlags,
882
883 mFormat,
884 mChannelMask,
885 sampleRate(),
886
887 mStreamType,
888 mAttr.usage,
889 mAttr.content_type,
890
891 20.0 * log10(mFinalVolume),
892 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
893 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
894 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
895 vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
896
897 mCblk->mServer,
898 bufferSizeInFrames,
899 modifiedBufferChar,
900 framesReadySafe,
901 fillingStatus,
902 mAudioTrackServerProxy->getUnderrunFrames(),
903 nowInUnderrun,
904 (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
905 );
906
907 if (isServerLatencySupported()) {
908 double latencyMs;
909 bool fromTrack;
910 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
911 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
912 // or 'k' if estimated from kernel because track frames haven't been presented yet.
913 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
914 } else {
915 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
916 }
917 }
918 result.append("\n");
919 }
920
sampleRate() const921 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
922 return mAudioTrackServerProxy->getSampleRate();
923 }
924
925 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)926 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
927 {
928 ServerProxy::Buffer buf;
929 size_t desiredFrames = buffer->frameCount;
930 buf.mFrameCount = desiredFrames;
931 status_t status = mServerProxy->obtainBuffer(&buf);
932 buffer->frameCount = buf.mFrameCount;
933 buffer->raw = buf.mRaw;
934 if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
935 ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
936 __func__, mId, buf.mFrameCount, desiredFrames, mState);
937 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
938 } else {
939 mAudioTrackServerProxy->tallyUnderrunFrames(0);
940 }
941 return status;
942 }
943
releaseBuffer(AudioBufferProvider::Buffer * buffer)944 void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
945 {
946 interceptBuffer(*buffer);
947 TrackBase::releaseBuffer(buffer);
948 }
949
950 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)951 void AudioFlinger::PlaybackThread::Track::interceptBuffer(
952 const AudioBufferProvider::Buffer& sourceBuffer) {
953 auto start = std::chrono::steady_clock::now();
954 const size_t frameCount = sourceBuffer.frameCount;
955 if (frameCount == 0) {
956 return; // No audio to intercept.
957 // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
958 // does not allow 0 frame size request contrary to getNextBuffer
959 }
960 for (auto& teePatch : mTeePatches) {
961 RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
962 const size_t framesWritten = patchRecord->writeFrames(
963 sourceBuffer.i8, frameCount, mFrameSize);
964 const size_t framesLeft = frameCount - framesWritten;
965 ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
966 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
967 framesWritten, frameCount, framesLeft);
968 }
969 auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
970 using namespace std::chrono_literals;
971 // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
972 ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
973 spent.count(), mTeePatches.size());
974 }
975
976 // ExtendedAudioBufferProvider interface
977
978 // framesReady() may return an approximation of the number of frames if called
979 // from a different thread than the one calling Proxy->obtainBuffer() and
980 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
981 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const982 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
983 if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
984 // Static tracks return zero frames immediately upon stopping (for FastTracks).
985 // The remainder of the buffer is not drained.
986 return 0;
987 }
988 return mAudioTrackServerProxy->framesReady();
989 }
990
framesReleased() const991 int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
992 {
993 return mAudioTrackServerProxy->framesReleased();
994 }
995
onTimestamp(const ExtendedTimestamp & timestamp)996 void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp ×tamp)
997 {
998 // This call comes from a FastTrack and should be kept lockless.
999 // The server side frames are already translated to client frames.
1000 mAudioTrackServerProxy->setTimestamp(timestamp);
1001
1002 // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1003
1004 // Compute latency.
1005 // TODO: Consider whether the server latency may be passed in by FastMixer
1006 // as a constant for all active FastTracks.
1007 const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1008 mServerLatencyFromTrack.store(true);
1009 mServerLatencyMs.store(latencyMs);
1010 }
1011
1012 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1013 bool AudioFlinger::PlaybackThread::Track::isReady() const {
1014 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
1015 return true;
1016 }
1017
1018 if (isStopping()) {
1019 if (framesReady() > 0) {
1020 mFillingUpStatus = FS_FILLED;
1021 }
1022 return true;
1023 }
1024
1025 size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1026 // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1027 const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1028 const size_t framesToBeReady = std::clamp( // clamp again to validate client values.
1029 std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1030
1031 if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1032 ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1033 __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1034 mFillingUpStatus = FS_FILLED;
1035 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1036 return true;
1037 }
1038 return false;
1039 }
1040
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1041 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
1042 audio_session_t triggerSession __unused)
1043 {
1044 status_t status = NO_ERROR;
1045 ALOGV("%s(%d): calling pid %d session %d",
1046 __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1047
1048 sp<ThreadBase> thread = mThread.promote();
1049 if (thread != 0) {
1050 if (isOffloaded()) {
1051 Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
1052 Mutex::Autolock _lth(thread->mLock);
1053 sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
1054 if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
1055 (ec != 0 && ec->isNonOffloadableEnabled())) {
1056 invalidate();
1057 return PERMISSION_DENIED;
1058 }
1059 }
1060 Mutex::Autolock _lth(thread->mLock);
1061 track_state state = mState;
1062 // here the track could be either new, or restarted
1063 // in both cases "unstop" the track
1064
1065 // initial state-stopping. next state-pausing.
1066 // What if resume is called ?
1067
1068 if (state == FLUSHED) {
1069 // avoid underrun glitches when starting after flush
1070 reset();
1071 }
1072
1073 // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1074 mPauseHwPending = false;
1075 if (state == PAUSED || state == PAUSING) {
1076 if (mResumeToStopping) {
1077 // happened we need to resume to STOPPING_1
1078 mState = TrackBase::STOPPING_1;
1079 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1080 __func__, mId, (int)mThreadIoHandle);
1081 } else {
1082 mState = TrackBase::RESUMING;
1083 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1084 __func__, mId, (int)mThreadIoHandle);
1085 }
1086 } else {
1087 mState = TrackBase::ACTIVE;
1088 ALOGV("%s(%d): ? => ACTIVE on thread %d",
1089 __func__, mId, (int)mThreadIoHandle);
1090 }
1091
1092 // states to reset position info for non-offloaded/direct tracks
1093 if (!isOffloaded() && !isDirect()
1094 && (state == IDLE || state == STOPPED || state == FLUSHED)) {
1095 mFrameMap.reset();
1096 }
1097 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1098 if (isFastTrack()) {
1099 // refresh fast track underruns on start because that field is never cleared
1100 // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1101 // after stop.
1102 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1103 }
1104 status = playbackThread->addTrack_l(this);
1105 if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
1106 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1107 // restore previous state if start was rejected by policy manager
1108 if (status == PERMISSION_DENIED) {
1109 mState = state;
1110 }
1111 }
1112
1113 // Audio timing metrics are computed a few mix cycles after starting.
1114 {
1115 mLogStartCountdown = LOG_START_COUNTDOWN;
1116 mLogStartTimeNs = systemTime();
1117 mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1118 .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1119 mLogLatencyMs = 0.;
1120 }
1121
1122 if (status == NO_ERROR || status == ALREADY_EXISTS) {
1123 // for streaming tracks, remove the buffer read stop limit.
1124 mAudioTrackServerProxy->start();
1125 }
1126
1127 // track was already in the active list, not a problem
1128 if (status == ALREADY_EXISTS) {
1129 status = NO_ERROR;
1130 } else {
1131 // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1132 // It is usually unsafe to access the server proxy from a binder thread.
1133 // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1134 // isn't looking at this track yet: we still hold the normal mixer thread lock,
1135 // and for fast tracks the track is not yet in the fast mixer thread's active set.
1136 // For static tracks, this is used to acknowledge change in position or loop.
1137 ServerProxy::Buffer buffer;
1138 buffer.mFrameCount = 1;
1139 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1140 }
1141 } else {
1142 status = BAD_VALUE;
1143 }
1144 if (status == NO_ERROR) {
1145 forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1146 }
1147 return status;
1148 }
1149
stop()1150 void AudioFlinger::PlaybackThread::Track::stop()
1151 {
1152 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1153 sp<ThreadBase> thread = mThread.promote();
1154 if (thread != 0) {
1155 Mutex::Autolock _l(thread->mLock);
1156 track_state state = mState;
1157 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1158 // If the track is not active (PAUSED and buffers full), flush buffers
1159 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1160 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1161 reset();
1162 mState = STOPPED;
1163 } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
1164 mState = STOPPED;
1165 } else {
1166 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1167 // presentation is complete
1168 // For an offloaded track this starts a drain and state will
1169 // move to STOPPING_2 when drain completes and then STOPPED
1170 mState = STOPPING_1;
1171 if (isOffloaded()) {
1172 mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
1173 }
1174 }
1175 playbackThread->broadcast_l();
1176 ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1177 __func__, mId, (int)mThreadIoHandle);
1178 }
1179 }
1180 forEachTeePatchTrack([](auto patchTrack) { patchTrack->stop(); });
1181 }
1182
pause()1183 void AudioFlinger::PlaybackThread::Track::pause()
1184 {
1185 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1186 sp<ThreadBase> thread = mThread.promote();
1187 if (thread != 0) {
1188 Mutex::Autolock _l(thread->mLock);
1189 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1190 switch (mState) {
1191 case STOPPING_1:
1192 case STOPPING_2:
1193 if (!isOffloaded()) {
1194 /* nothing to do if track is not offloaded */
1195 break;
1196 }
1197
1198 // Offloaded track was draining, we need to carry on draining when resumed
1199 mResumeToStopping = true;
1200 FALLTHROUGH_INTENDED;
1201 case ACTIVE:
1202 case RESUMING:
1203 mState = PAUSING;
1204 ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1205 __func__, mId, (int)mThreadIoHandle);
1206 if (isOffloadedOrDirect()) {
1207 mPauseHwPending = true;
1208 }
1209 playbackThread->broadcast_l();
1210 break;
1211
1212 default:
1213 break;
1214 }
1215 }
1216 // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1217 forEachTeePatchTrack([](auto patchTrack) { patchTrack->pause(); });
1218 }
1219
flush()1220 void AudioFlinger::PlaybackThread::Track::flush()
1221 {
1222 ALOGV("%s(%d)", __func__, mId);
1223 sp<ThreadBase> thread = mThread.promote();
1224 if (thread != 0) {
1225 Mutex::Autolock _l(thread->mLock);
1226 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1227
1228 // Flush the ring buffer now if the track is not active in the PlaybackThread.
1229 // Otherwise the flush would not be done until the track is resumed.
1230 // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1231 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1232 (void)mServerProxy->flushBufferIfNeeded();
1233 }
1234
1235 if (isOffloaded()) {
1236 // If offloaded we allow flush during any state except terminated
1237 // and keep the track active to avoid problems if user is seeking
1238 // rapidly and underlying hardware has a significant delay handling
1239 // a pause
1240 if (isTerminated()) {
1241 return;
1242 }
1243
1244 ALOGV("%s(%d): offload flush", __func__, mId);
1245 reset();
1246
1247 if (mState == STOPPING_1 || mState == STOPPING_2) {
1248 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1249 __func__, mId);
1250 mState = ACTIVE;
1251 }
1252
1253 mFlushHwPending = true;
1254 mResumeToStopping = false;
1255 } else {
1256 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1257 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1258 return;
1259 }
1260 // No point remaining in PAUSED state after a flush => go to
1261 // FLUSHED state
1262 mState = FLUSHED;
1263 // do not reset the track if it is still in the process of being stopped or paused.
1264 // this will be done by prepareTracks_l() when the track is stopped.
1265 // prepareTracks_l() will see mState == FLUSHED, then
1266 // remove from active track list, reset(), and trigger presentation complete
1267 if (isDirect()) {
1268 mFlushHwPending = true;
1269 }
1270 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1271 reset();
1272 }
1273 }
1274 // Prevent flush being lost if the track is flushed and then resumed
1275 // before mixer thread can run. This is important when offloading
1276 // because the hardware buffer could hold a large amount of audio
1277 playbackThread->broadcast_l();
1278 }
1279 // Flush the Tee to avoid on resume playing old data and glitching on the transition to new data
1280 forEachTeePatchTrack([](auto patchTrack) { patchTrack->flush(); });
1281 }
1282
1283 // must be called with thread lock held
flushAck()1284 void AudioFlinger::PlaybackThread::Track::flushAck()
1285 {
1286 if (!isOffloaded() && !isDirect())
1287 return;
1288
1289 // Clear the client ring buffer so that the app can prime the buffer while paused.
1290 // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1291 mServerProxy->flushBufferIfNeeded();
1292
1293 mFlushHwPending = false;
1294 }
1295
pauseAck()1296 void AudioFlinger::PlaybackThread::Track::pauseAck()
1297 {
1298 mPauseHwPending = false;
1299 }
1300
reset()1301 void AudioFlinger::PlaybackThread::Track::reset()
1302 {
1303 // Do not reset twice to avoid discarding data written just after a flush and before
1304 // the audioflinger thread detects the track is stopped.
1305 if (!mResetDone) {
1306 // Force underrun condition to avoid false underrun callback until first data is
1307 // written to buffer
1308 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1309 mFillingUpStatus = FS_FILLING;
1310 mResetDone = true;
1311 if (mState == FLUSHED) {
1312 mState = IDLE;
1313 }
1314 }
1315 }
1316
setParameters(const String8 & keyValuePairs)1317 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
1318 {
1319 sp<ThreadBase> thread = mThread.promote();
1320 if (thread == 0) {
1321 ALOGE("%s(%d): thread is dead", __func__, mId);
1322 return FAILED_TRANSACTION;
1323 } else if ((thread->type() == ThreadBase::DIRECT) ||
1324 (thread->type() == ThreadBase::OFFLOAD)) {
1325 return thread->setParameters(keyValuePairs);
1326 } else {
1327 return PERMISSION_DENIED;
1328 }
1329 }
1330
selectPresentation(int presentationId,int programId)1331 status_t AudioFlinger::PlaybackThread::Track::selectPresentation(int presentationId,
1332 int programId) {
1333 sp<ThreadBase> thread = mThread.promote();
1334 if (thread == 0) {
1335 ALOGE("thread is dead");
1336 return FAILED_TRANSACTION;
1337 } else if ((thread->type() == ThreadBase::DIRECT) || (thread->type() == ThreadBase::OFFLOAD)) {
1338 DirectOutputThread *directOutputThread = static_cast<DirectOutputThread*>(thread.get());
1339 return directOutputThread->selectPresentation(presentationId, programId);
1340 }
1341 return INVALID_OPERATION;
1342 }
1343
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1344 VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
1345 const sp<VolumeShaper::Configuration>& configuration,
1346 const sp<VolumeShaper::Operation>& operation)
1347 {
1348 sp<VolumeShaper::Configuration> newConfiguration;
1349
1350 if (isOffloadedOrDirect()) {
1351 const VolumeShaper::Configuration::OptionFlag optionFlag
1352 = configuration->getOptionFlags();
1353 if ((optionFlag & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) == 0) {
1354 ALOGW("%s(%d): %s tracks do not support frame counted VolumeShaper,"
1355 " using clock time instead",
1356 __func__, mId,
1357 isOffloaded() ? "Offload" : "Direct");
1358 newConfiguration = new VolumeShaper::Configuration(*configuration);
1359 newConfiguration->setOptionFlags(
1360 VolumeShaper::Configuration::OptionFlag(optionFlag
1361 | VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME));
1362 }
1363 }
1364
1365 VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(
1366 (newConfiguration.get() != nullptr ? newConfiguration : configuration), operation);
1367
1368 if (isOffloadedOrDirect()) {
1369 // Signal thread to fetch new volume.
1370 sp<ThreadBase> thread = mThread.promote();
1371 if (thread != 0) {
1372 Mutex::Autolock _l(thread->mLock);
1373 thread->broadcast_l();
1374 }
1375 }
1376 return status;
1377 }
1378
getVolumeShaperState(int id)1379 sp<VolumeShaper::State> AudioFlinger::PlaybackThread::Track::getVolumeShaperState(int id)
1380 {
1381 // Note: We don't check if Thread exists.
1382
1383 // mVolumeHandler is thread safe.
1384 return mVolumeHandler->getVolumeShaperState(id);
1385 }
1386
setFinalVolume(float volume)1387 void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
1388 {
1389 if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1390 mFinalVolume = volume;
1391 setMetadataHasChanged();
1392 mTrackMetrics.logVolume(volume);
1393 }
1394 }
1395
copyMetadataTo(MetadataInserter & backInserter) const1396 void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
1397 {
1398 playback_track_metadata_v7_t metadata;
1399 metadata.base = {
1400 .usage = mAttr.usage,
1401 .content_type = mAttr.content_type,
1402 .gain = mFinalVolume,
1403 };
1404 metadata.channel_mask = mChannelMask,
1405 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1406 *backInserter++ = metadata;
1407 }
1408
setTeePatches(TeePatches teePatches)1409 void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
1410 forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
1411 mTeePatches = std::move(teePatches);
1412 if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1413 mState == TrackBase::STOPPING_1) {
1414 forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1415 }
1416 }
1417
getTimestamp(AudioTimestamp & timestamp)1418 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
1419 {
1420 if (!isOffloaded() && !isDirect()) {
1421 return INVALID_OPERATION; // normal tracks handled through SSQ
1422 }
1423 sp<ThreadBase> thread = mThread.promote();
1424 if (thread == 0) {
1425 return INVALID_OPERATION;
1426 }
1427
1428 Mutex::Autolock _l(thread->mLock);
1429 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1430 return playbackThread->getTimestamp_l(timestamp);
1431 }
1432
attachAuxEffect(int EffectId)1433 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
1434 {
1435 sp<ThreadBase> thread = mThread.promote();
1436 if (thread == nullptr) {
1437 return DEAD_OBJECT;
1438 }
1439
1440 sp<PlaybackThread> dstThread = (PlaybackThread *)thread.get();
1441 sp<PlaybackThread> srcThread; // srcThread is initialized by call to moveAuxEffectToIo()
1442 sp<AudioFlinger> af = mClient->audioFlinger();
1443 status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1444
1445 if (EffectId != 0 && status == NO_ERROR) {
1446 status = dstThread->attachAuxEffect(this, EffectId);
1447 if (status == NO_ERROR) {
1448 AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1449 }
1450 }
1451
1452 if (status != NO_ERROR && srcThread != nullptr) {
1453 af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1454 }
1455 return status;
1456 }
1457
setAuxBuffer(int EffectId,int32_t * buffer)1458 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
1459 {
1460 mAuxEffectId = EffectId;
1461 mAuxBuffer = buffer;
1462 }
1463
1464 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1465 bool AudioFlinger::PlaybackThread::Track::presentationComplete(
1466 int64_t framesWritten, size_t audioHalFrames)
1467 {
1468 // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1469 // This assists in proper timestamp computation as well as wakelock management.
1470
1471 // a track is considered presented when the total number of frames written to audio HAL
1472 // corresponds to the number of frames written when presentationComplete() is called for the
1473 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1474 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1475 // to detect when all frames have been played. In this case framesWritten isn't
1476 // useful because it doesn't always reflect whether there is data in the h/w
1477 // buffers, particularly if a track has been paused and resumed during draining
1478 ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1479 __func__, mId,
1480 (long long)mPresentationCompleteFrames, (long long)framesWritten);
1481 if (mPresentationCompleteFrames == 0) {
1482 mPresentationCompleteFrames = framesWritten + audioHalFrames;
1483 ALOGV("%s(%d): set:"
1484 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1485 __func__, mId,
1486 (long long)mPresentationCompleteFrames, audioHalFrames);
1487 }
1488
1489 bool complete;
1490 if (isFastTrack()) { // does not go through linear map
1491 complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1492 ALOGV("%s(%d): %s framesWritten:%lld mPresentationCompleteFrames:%lld",
1493 __func__, mId, (complete ? "complete" : "waiting"),
1494 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1495 } else { // Normal tracks, OutputTracks, and PatchTracks
1496 complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1497 && mAudioTrackServerProxy->isDrained();
1498 }
1499
1500 if (complete) {
1501 notifyPresentationComplete();
1502 return true;
1503 }
1504 return false;
1505 }
1506
1507 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1508 bool AudioFlinger::PlaybackThread::Track::presentationComplete(uint32_t latencyMs)
1509 {
1510 // For Offloaded or Direct tracks.
1511
1512 // For a direct track, we incorporated time based testing for presentationComplete.
1513
1514 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1515 // to detect when all frames have been played. In this case latencyMs isn't
1516 // useful because it doesn't always reflect whether there is data in the h/w
1517 // buffers, particularly if a track has been paused and resumed during draining
1518
1519 constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1520 if (mPresentationCompleteTimeNs == 0) {
1521 mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1522 ALOGV("%s(%d): set: latencyMs %u mPresentationCompleteTimeNs:%lld",
1523 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1524 }
1525
1526 bool complete;
1527 if (isOffloaded()) {
1528 complete = true;
1529 } else { // Direct
1530 complete = systemTime() >= mPresentationCompleteTimeNs;
1531 ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1532 }
1533 if (complete) {
1534 notifyPresentationComplete();
1535 return true;
1536 }
1537 return false;
1538 }
1539
notifyPresentationComplete()1540 void AudioFlinger::PlaybackThread::Track::notifyPresentationComplete()
1541 {
1542 // This only triggers once. TODO: should we enforce this?
1543 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1544 mAudioTrackServerProxy->setStreamEndDone();
1545 }
1546
triggerEvents(AudioSystem::sync_event_t type)1547 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
1548 {
1549 for (size_t i = 0; i < mSyncEvents.size();) {
1550 if (mSyncEvents[i]->type() == type) {
1551 mSyncEvents[i]->trigger();
1552 mSyncEvents.removeAt(i);
1553 } else {
1554 ++i;
1555 }
1556 }
1557 }
1558
1559 // implement VolumeBufferProvider interface
1560
getVolumeLR()1561 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
1562 {
1563 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1564 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1565 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1566 float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1567 float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1568 // track volumes come from shared memory, so can't be trusted and must be clamped
1569 if (vl > GAIN_FLOAT_UNITY) {
1570 vl = GAIN_FLOAT_UNITY;
1571 }
1572 if (vr > GAIN_FLOAT_UNITY) {
1573 vr = GAIN_FLOAT_UNITY;
1574 }
1575 // now apply the cached master volume and stream type volume;
1576 // this is trusted but lacks any synchronization or barrier so may be stale
1577 float v = mCachedVolume;
1578 vl *= v;
1579 vr *= v;
1580 // re-combine into packed minifloat
1581 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1582 // FIXME look at mute, pause, and stop flags
1583 return vlr;
1584 }
1585
setSyncEvent(const sp<SyncEvent> & event)1586 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
1587 {
1588 if (isTerminated() || mState == PAUSED ||
1589 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1590 (mState == STOPPED)))) {
1591 ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
1592 __func__, mId,
1593 mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1594 event->cancel();
1595 return INVALID_OPERATION;
1596 }
1597 (void) TrackBase::setSyncEvent(event);
1598 return NO_ERROR;
1599 }
1600
invalidate()1601 void AudioFlinger::PlaybackThread::Track::invalidate()
1602 {
1603 TrackBase::invalidate();
1604 signalClientFlag(CBLK_INVALID);
1605 }
1606
disable()1607 void AudioFlinger::PlaybackThread::Track::disable()
1608 {
1609 // TODO(b/142394888): the filling status should also be reset to filling
1610 signalClientFlag(CBLK_DISABLED);
1611 }
1612
signalClientFlag(int32_t flag)1613 void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
1614 {
1615 // FIXME should use proxy, and needs work
1616 audio_track_cblk_t* cblk = mCblk;
1617 android_atomic_or(flag, &cblk->mFlags);
1618 android_atomic_release_store(0x40000000, &cblk->mFutex);
1619 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1620 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1621 }
1622
signal()1623 void AudioFlinger::PlaybackThread::Track::signal()
1624 {
1625 sp<ThreadBase> thread = mThread.promote();
1626 if (thread != 0) {
1627 PlaybackThread *t = (PlaybackThread *)thread.get();
1628 Mutex::Autolock _l(t->mLock);
1629 t->broadcast_l();
1630 }
1631 }
1632
getDualMonoMode(audio_dual_mono_mode_t * mode)1633 status_t AudioFlinger::PlaybackThread::Track::getDualMonoMode(audio_dual_mono_mode_t* mode)
1634 {
1635 status_t status = INVALID_OPERATION;
1636 if (isOffloadedOrDirect()) {
1637 sp<ThreadBase> thread = mThread.promote();
1638 if (thread != nullptr) {
1639 PlaybackThread *t = (PlaybackThread *)thread.get();
1640 Mutex::Autolock _l(t->mLock);
1641 status = t->mOutput->stream->getDualMonoMode(mode);
1642 ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
1643 "%s: mode %d inconsistent", __func__, mDualMonoMode);
1644 }
1645 }
1646 return status;
1647 }
1648
setDualMonoMode(audio_dual_mono_mode_t mode)1649 status_t AudioFlinger::PlaybackThread::Track::setDualMonoMode(audio_dual_mono_mode_t mode)
1650 {
1651 status_t status = INVALID_OPERATION;
1652 if (isOffloadedOrDirect()) {
1653 sp<ThreadBase> thread = mThread.promote();
1654 if (thread != nullptr) {
1655 auto t = static_cast<PlaybackThread *>(thread.get());
1656 Mutex::Autolock lock(t->mLock);
1657 status = t->mOutput->stream->setDualMonoMode(mode);
1658 if (status == NO_ERROR) {
1659 mDualMonoMode = mode;
1660 }
1661 }
1662 }
1663 return status;
1664 }
1665
getAudioDescriptionMixLevel(float * leveldB)1666 status_t AudioFlinger::PlaybackThread::Track::getAudioDescriptionMixLevel(float* leveldB)
1667 {
1668 status_t status = INVALID_OPERATION;
1669 if (isOffloadedOrDirect()) {
1670 sp<ThreadBase> thread = mThread.promote();
1671 if (thread != nullptr) {
1672 auto t = static_cast<PlaybackThread *>(thread.get());
1673 Mutex::Autolock lock(t->mLock);
1674 status = t->mOutput->stream->getAudioDescriptionMixLevel(leveldB);
1675 ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
1676 "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
1677 }
1678 }
1679 return status;
1680 }
1681
setAudioDescriptionMixLevel(float leveldB)1682 status_t AudioFlinger::PlaybackThread::Track::setAudioDescriptionMixLevel(float leveldB)
1683 {
1684 status_t status = INVALID_OPERATION;
1685 if (isOffloadedOrDirect()) {
1686 sp<ThreadBase> thread = mThread.promote();
1687 if (thread != nullptr) {
1688 auto t = static_cast<PlaybackThread *>(thread.get());
1689 Mutex::Autolock lock(t->mLock);
1690 status = t->mOutput->stream->setAudioDescriptionMixLevel(leveldB);
1691 if (status == NO_ERROR) {
1692 mAudioDescriptionMixLevel = leveldB;
1693 }
1694 }
1695 }
1696 return status;
1697 }
1698
getPlaybackRateParameters(audio_playback_rate_t * playbackRate)1699 status_t AudioFlinger::PlaybackThread::Track::getPlaybackRateParameters(
1700 audio_playback_rate_t* playbackRate)
1701 {
1702 status_t status = INVALID_OPERATION;
1703 if (isOffloadedOrDirect()) {
1704 sp<ThreadBase> thread = mThread.promote();
1705 if (thread != nullptr) {
1706 auto t = static_cast<PlaybackThread *>(thread.get());
1707 Mutex::Autolock lock(t->mLock);
1708 status = t->mOutput->stream->getPlaybackRateParameters(playbackRate);
1709 ALOGD_IF((status == NO_ERROR) &&
1710 !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
1711 "%s: playbackRate inconsistent", __func__);
1712 }
1713 }
1714 return status;
1715 }
1716
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)1717 status_t AudioFlinger::PlaybackThread::Track::setPlaybackRateParameters(
1718 const audio_playback_rate_t& playbackRate)
1719 {
1720 status_t status = INVALID_OPERATION;
1721 if (isOffloadedOrDirect()) {
1722 sp<ThreadBase> thread = mThread.promote();
1723 if (thread != nullptr) {
1724 auto t = static_cast<PlaybackThread *>(thread.get());
1725 Mutex::Autolock lock(t->mLock);
1726 status = t->mOutput->stream->setPlaybackRateParameters(playbackRate);
1727 if (status == NO_ERROR) {
1728 mPlaybackRateParameters = playbackRate;
1729 }
1730 }
1731 }
1732 return status;
1733 }
1734
1735 //To be called with thread lock held
isResumePending()1736 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
1737
1738 if (mState == RESUMING)
1739 return true;
1740 /* Resume is pending if track was stopping before pause was called */
1741 if (mState == STOPPING_1 &&
1742 mResumeToStopping)
1743 return true;
1744
1745 return false;
1746 }
1747
1748 //To be called with thread lock held
resumeAck()1749 void AudioFlinger::PlaybackThread::Track::resumeAck() {
1750
1751
1752 if (mState == RESUMING)
1753 mState = ACTIVE;
1754
1755 // Other possibility of pending resume is stopping_1 state
1756 // Do not update the state from stopping as this prevents
1757 // drain being called.
1758 if (mState == STOPPING_1) {
1759 mResumeToStopping = false;
1760 }
1761 }
1762
1763 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)1764 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
1765 int64_t trackFramesReleased, int64_t sinkFramesWritten,
1766 uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
1767 // Make the kernel frametime available.
1768 const FrameTime ft{
1769 timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1770 timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
1771 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
1772 mKernelFrameTime.store(ft);
1773 if (!audio_is_linear_pcm(mFormat)) {
1774 return;
1775 }
1776
1777 //update frame map
1778 mFrameMap.push(trackFramesReleased, sinkFramesWritten);
1779
1780 // adjust server times and set drained state.
1781 //
1782 // Our timestamps are only updated when the track is on the Thread active list.
1783 // We need to ensure that tracks are not removed before full drain.
1784 ExtendedTimestamp local = timeStamp;
1785 bool drained = true; // default assume drained, if no server info found
1786 bool checked = false;
1787 for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
1788 i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
1789 // Lookup the track frame corresponding to the sink frame position.
1790 if (local.mTimeNs[i] > 0) {
1791 local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
1792 // check drain state from the latest stage in the pipeline.
1793 if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
1794 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
1795 checked = true;
1796 }
1797 }
1798 }
1799
1800 mAudioTrackServerProxy->setDrained(drained);
1801 // Set correction for flushed frames that are not accounted for in released.
1802 local.mFlushed = mAudioTrackServerProxy->framesFlushed();
1803 mServerProxy->setTimestamp(local);
1804
1805 // Compute latency info.
1806 const bool useTrackTimestamp = !drained;
1807 const double latencyMs = useTrackTimestamp
1808 ? local.getOutputServerLatencyMs(sampleRate())
1809 : timeStamp.getOutputServerLatencyMs(halSampleRate);
1810
1811 mServerLatencyFromTrack.store(useTrackTimestamp);
1812 mServerLatencyMs.store(latencyMs);
1813
1814 if (mLogStartCountdown > 0
1815 && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
1816 && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
1817 {
1818 if (mLogStartCountdown > 1) {
1819 --mLogStartCountdown;
1820 } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
1821 mLogStartCountdown = 0;
1822 // startup is the difference in times for the current timestamp and our start
1823 double startUpMs =
1824 (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
1825 // adjust for frames played.
1826 startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
1827 * 1e3 / mSampleRate;
1828 ALOGV("%s: latencyMs:%lf startUpMs:%lf"
1829 " localTime:%lld startTime:%lld"
1830 " localPosition:%lld startPosition:%lld",
1831 __func__, latencyMs, startUpMs,
1832 (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
1833 (long long)mLogStartTimeNs,
1834 (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1835 (long long)mLogStartFrames);
1836 mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
1837 }
1838 mLogLatencyMs = latencyMs;
1839 }
1840 }
1841
mute(bool * ret)1842 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
1843 /*out*/ bool *ret) {
1844 *ret = false;
1845 sp<ThreadBase> thread = mTrack->mThread.promote();
1846 if (thread != 0) {
1847 // Lock for updating mHapticPlaybackEnabled.
1848 Mutex::Autolock _l(thread->mLock);
1849 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1850 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1851 && playbackThread->mHapticChannelCount > 0) {
1852 mTrack->setHapticPlaybackEnabled(false);
1853 *ret = true;
1854 }
1855 }
1856 return binder::Status::ok();
1857 }
1858
unmute(bool * ret)1859 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
1860 /*out*/ bool *ret) {
1861 *ret = false;
1862 sp<ThreadBase> thread = mTrack->mThread.promote();
1863 if (thread != 0) {
1864 // Lock for updating mHapticPlaybackEnabled.
1865 Mutex::Autolock _l(thread->mLock);
1866 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1867 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1868 && playbackThread->mHapticChannelCount > 0) {
1869 mTrack->setHapticPlaybackEnabled(true);
1870 *ret = true;
1871 }
1872 }
1873 return binder::Status::ok();
1874 }
1875
1876 // ----------------------------------------------------------------------------
1877 #undef LOG_TAG
1878 #define LOG_TAG "AF::OutputTrack"
1879
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)1880 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1881 PlaybackThread *playbackThread,
1882 DuplicatingThread *sourceThread,
1883 uint32_t sampleRate,
1884 audio_format_t format,
1885 audio_channel_mask_t channelMask,
1886 size_t frameCount,
1887 const AttributionSourceState& attributionSource)
1888 : Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
1889 audio_attributes_t{} /* currently unused for output track */,
1890 sampleRate, format, channelMask, frameCount,
1891 nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
1892 AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
1893 TYPE_OUTPUT),
1894 mActive(false), mSourceThread(sourceThread)
1895 {
1896
1897 if (mCblk != NULL) {
1898 mOutBuffer.frameCount = 0;
1899 playbackThread->mTracks.add(this);
1900 ALOGV("%s(): mCblk %p, mBuffer %p, "
1901 "frameCount %zu, mChannelMask 0x%08x",
1902 __func__, mCblk, mBuffer,
1903 frameCount, mChannelMask);
1904 // since client and server are in the same process,
1905 // the buffer has the same virtual address on both sides
1906 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1907 true /*clientInServer*/);
1908 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1909 mClientProxy->setSendLevel(0.0);
1910 mClientProxy->setSampleRate(sampleRate);
1911 } else {
1912 ALOGW("%s(%d): Error creating output track on thread %d",
1913 __func__, mId, (int)mThreadIoHandle);
1914 }
1915 }
1916
~OutputTrack()1917 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1918 {
1919 clearBufferQueue();
1920 // superclass destructor will now delete the server proxy and shared memory both refer to
1921 }
1922
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)1923 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1924 audio_session_t triggerSession)
1925 {
1926 status_t status = Track::start(event, triggerSession);
1927 if (status != NO_ERROR) {
1928 return status;
1929 }
1930
1931 mActive = true;
1932 mRetryCount = 127;
1933 return status;
1934 }
1935
stop()1936 void AudioFlinger::PlaybackThread::OutputTrack::stop()
1937 {
1938 Track::stop();
1939 clearBufferQueue();
1940 mOutBuffer.frameCount = 0;
1941 mActive = false;
1942 }
1943
write(void * data,uint32_t frames)1944 ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
1945 {
1946 Buffer *pInBuffer;
1947 Buffer inBuffer;
1948 bool outputBufferFull = false;
1949 inBuffer.frameCount = frames;
1950 inBuffer.raw = data;
1951
1952 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1953
1954 if (!mActive && frames != 0) {
1955 (void) start();
1956 }
1957
1958 while (waitTimeLeftMs) {
1959 // First write pending buffers, then new data
1960 if (mBufferQueue.size()) {
1961 pInBuffer = mBufferQueue.itemAt(0);
1962 } else {
1963 pInBuffer = &inBuffer;
1964 }
1965
1966 if (pInBuffer->frameCount == 0) {
1967 break;
1968 }
1969
1970 if (mOutBuffer.frameCount == 0) {
1971 mOutBuffer.frameCount = pInBuffer->frameCount;
1972 nsecs_t startTime = systemTime();
1973 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1974 if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
1975 ALOGV("%s(%d): thread %d no more output buffers; status %d",
1976 __func__, mId,
1977 (int)mThreadIoHandle, status);
1978 outputBufferFull = true;
1979 break;
1980 }
1981 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1982 if (waitTimeLeftMs >= waitTimeMs) {
1983 waitTimeLeftMs -= waitTimeMs;
1984 } else {
1985 waitTimeLeftMs = 0;
1986 }
1987 if (status == NOT_ENOUGH_DATA) {
1988 restartIfDisabled();
1989 continue;
1990 }
1991 }
1992
1993 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1994 pInBuffer->frameCount;
1995 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
1996 Proxy::Buffer buf;
1997 buf.mFrameCount = outFrames;
1998 buf.mRaw = NULL;
1999 mClientProxy->releaseBuffer(&buf);
2000 restartIfDisabled();
2001 pInBuffer->frameCount -= outFrames;
2002 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2003 mOutBuffer.frameCount -= outFrames;
2004 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2005
2006 if (pInBuffer->frameCount == 0) {
2007 if (mBufferQueue.size()) {
2008 mBufferQueue.removeAt(0);
2009 free(pInBuffer->mBuffer);
2010 if (pInBuffer != &inBuffer) {
2011 delete pInBuffer;
2012 }
2013 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2014 __func__, mId,
2015 (int)mThreadIoHandle, mBufferQueue.size());
2016 } else {
2017 break;
2018 }
2019 }
2020 }
2021
2022 // If we could not write all frames, allocate a buffer and queue it for next time.
2023 if (inBuffer.frameCount) {
2024 sp<ThreadBase> thread = mThread.promote();
2025 if (thread != 0 && !thread->standby()) {
2026 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2027 pInBuffer = new Buffer;
2028 pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
2029 pInBuffer->frameCount = inBuffer.frameCount;
2030 pInBuffer->raw = pInBuffer->mBuffer;
2031 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2032 mBufferQueue.add(pInBuffer);
2033 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2034 (int)mThreadIoHandle, mBufferQueue.size());
2035 // audio data is consumed (stored locally); set frameCount to 0.
2036 inBuffer.frameCount = 0;
2037 } else {
2038 ALOGW("%s(%d): thread %d no more overflow buffers",
2039 __func__, mId, (int)mThreadIoHandle);
2040 // TODO: return error for this.
2041 }
2042 }
2043 }
2044
2045 // Calling write() with a 0 length buffer means that no more data will be written:
2046 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2047 if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2048 stop();
2049 }
2050
2051 return frames - inBuffer.frameCount; // number of frames consumed.
2052 }
2053
copyMetadataTo(MetadataInserter & backInserter) const2054 void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2055 {
2056 std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2057 backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2058 }
2059
setMetadatas(const SourceMetadatas & metadatas)2060 void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2061 {
2062 std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2063 mTrackMetadatas = metadatas;
2064 }
2065 // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2066 setMetadataHasChanged();
2067 }
2068
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2069 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
2070 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2071 {
2072 ClientProxy::Buffer buf;
2073 buf.mFrameCount = buffer->frameCount;
2074 struct timespec timeout;
2075 timeout.tv_sec = waitTimeMs / 1000;
2076 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2077 status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2078 buffer->frameCount = buf.mFrameCount;
2079 buffer->raw = buf.mRaw;
2080 return status;
2081 }
2082
clearBufferQueue()2083 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
2084 {
2085 size_t size = mBufferQueue.size();
2086
2087 for (size_t i = 0; i < size; i++) {
2088 Buffer *pBuffer = mBufferQueue.itemAt(i);
2089 free(pBuffer->mBuffer);
2090 delete pBuffer;
2091 }
2092 mBufferQueue.clear();
2093 }
2094
restartIfDisabled()2095 void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
2096 {
2097 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2098 if (mActive && (flags & CBLK_DISABLED)) {
2099 start();
2100 }
2101 }
2102
2103 // ----------------------------------------------------------------------------
2104 #undef LOG_TAG
2105 #define LOG_TAG "AF::PatchTrack"
2106
PatchTrack(PlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady)2107 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
2108 audio_stream_type_t streamType,
2109 uint32_t sampleRate,
2110 audio_channel_mask_t channelMask,
2111 audio_format_t format,
2112 size_t frameCount,
2113 void *buffer,
2114 size_t bufferSize,
2115 audio_output_flags_t flags,
2116 const Timeout& timeout,
2117 size_t frameCountToBeReady)
2118 : Track(playbackThread, NULL, streamType,
2119 audio_attributes_t{} /* currently unused for patch track */,
2120 sampleRate, format, channelMask, frameCount,
2121 buffer, bufferSize, nullptr /* sharedBuffer */,
2122 AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2123 TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
2124 PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
2125 *playbackThread, timeout)
2126 {
2127 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2128 __func__, mId, sampleRate,
2129 (int)mPeerTimeout.tv_sec,
2130 (int)(mPeerTimeout.tv_nsec / 1000000));
2131 }
2132
~PatchTrack()2133 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
2134 {
2135 ALOGV("%s(%d)", __func__, mId);
2136 }
2137
framesReady() const2138 size_t AudioFlinger::PlaybackThread::PatchTrack::framesReady() const
2139 {
2140 if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2141 return std::numeric_limits<size_t>::max();
2142 } else {
2143 return Track::framesReady();
2144 }
2145 }
2146
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2147 status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
2148 audio_session_t triggerSession)
2149 {
2150 status_t status = Track::start(event, triggerSession);
2151 if (status != NO_ERROR) {
2152 return status;
2153 }
2154 android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2155 return status;
2156 }
2157
2158 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2159 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
2160 AudioBufferProvider::Buffer* buffer)
2161 {
2162 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2163 Proxy::Buffer buf;
2164 buf.mFrameCount = buffer->frameCount;
2165 if (ATRACE_ENABLED()) {
2166 std::string traceName("PTnReq");
2167 traceName += std::to_string(id());
2168 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2169 }
2170 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2171 ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2172 buffer->frameCount = buf.mFrameCount;
2173 if (ATRACE_ENABLED()) {
2174 std::string traceName("PTnObt");
2175 traceName += std::to_string(id());
2176 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2177 }
2178 if (buf.mFrameCount == 0) {
2179 return WOULD_BLOCK;
2180 }
2181 status = Track::getNextBuffer(buffer);
2182 return status;
2183 }
2184
releaseBuffer(AudioBufferProvider::Buffer * buffer)2185 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2186 {
2187 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2188 Proxy::Buffer buf;
2189 buf.mFrameCount = buffer->frameCount;
2190 buf.mRaw = buffer->raw;
2191 mPeerProxy->releaseBuffer(&buf);
2192 TrackBase::releaseBuffer(buffer);
2193 }
2194
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2195 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2196 const struct timespec *timeOut)
2197 {
2198 status_t status = NO_ERROR;
2199 static const int32_t kMaxTries = 5;
2200 int32_t tryCounter = kMaxTries;
2201 const size_t originalFrameCount = buffer->mFrameCount;
2202 do {
2203 if (status == NOT_ENOUGH_DATA) {
2204 restartIfDisabled();
2205 buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2206 }
2207 status = mProxy->obtainBuffer(buffer, timeOut);
2208 } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2209 return status;
2210 }
2211
releaseBuffer(Proxy::Buffer * buffer)2212 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2213 {
2214 mProxy->releaseBuffer(buffer);
2215 restartIfDisabled();
2216
2217 // Check if the PatchTrack has enough data to write once in releaseBuffer().
2218 // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2219 // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2220 // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2221 if (mFillingUpStatus == FS_ACTIVE
2222 && audio_is_linear_pcm(mFormat)
2223 && !isOffloadedOrDirect()) {
2224 if (sp<ThreadBase> thread = mThread.promote();
2225 thread != 0) {
2226 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
2227 const size_t frameCount = playbackThread->frameCount() * sampleRate()
2228 / playbackThread->sampleRate();
2229 if (framesReady() < frameCount) {
2230 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2231 mFillingUpStatus = FS_FILLING;
2232 }
2233 }
2234 }
2235 }
2236
restartIfDisabled()2237 void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
2238 {
2239 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2240 ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2241 start();
2242 }
2243 }
2244
2245 // ----------------------------------------------------------------------------
2246 // Record
2247 // ----------------------------------------------------------------------------
2248
2249
2250 #undef LOG_TAG
2251 #define LOG_TAG "AF::RecordHandle"
2252
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)2253 AudioFlinger::RecordHandle::RecordHandle(
2254 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
2255 : BnAudioRecord(),
2256 mRecordTrack(recordTrack)
2257 {
2258 }
2259
~RecordHandle()2260 AudioFlinger::RecordHandle::~RecordHandle() {
2261 stop_nonvirtual();
2262 mRecordTrack->destroy();
2263 }
2264
start(int event,int triggerSession)2265 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2266 int /*audio_session_t*/ triggerSession) {
2267 ALOGV("%s()", __func__);
2268 return binderStatusFromStatusT(
2269 mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2270 }
2271
stop()2272 binder::Status AudioFlinger::RecordHandle::stop() {
2273 stop_nonvirtual();
2274 return binder::Status::ok();
2275 }
2276
stop_nonvirtual()2277 void AudioFlinger::RecordHandle::stop_nonvirtual() {
2278 ALOGV("%s()", __func__);
2279 mRecordTrack->stop();
2280 }
2281
getActiveMicrophones(std::vector<media::MicrophoneInfoData> * activeMicrophones)2282 binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
2283 std::vector<media::MicrophoneInfoData>* activeMicrophones) {
2284 ALOGV("%s()", __func__);
2285 std::vector<media::MicrophoneInfo> mics;
2286 status_t status = mRecordTrack->getActiveMicrophones(&mics);
2287 activeMicrophones->resize(mics.size());
2288 for (size_t i = 0; status == OK && i < mics.size(); ++i) {
2289 status = mics[i].writeToParcelable(&activeMicrophones->at(i));
2290 }
2291 return binderStatusFromStatusT(status);
2292 }
2293
setPreferredMicrophoneDirection(int direction)2294 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
2295 int /*audio_microphone_direction_t*/ direction) {
2296 ALOGV("%s()", __func__);
2297 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2298 static_cast<audio_microphone_direction_t>(direction)));
2299 }
2300
setPreferredMicrophoneFieldDimension(float zoom)2301 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2302 ALOGV("%s()", __func__);
2303 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2304 }
2305
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2306 binder::Status AudioFlinger::RecordHandle::shareAudioHistory(
2307 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2308 return binderStatusFromStatusT(
2309 mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2310 }
2311
2312 // ----------------------------------------------------------------------------
2313 #undef LOG_TAG
2314 #define LOG_TAG "AF::RecordTrack"
2315
2316 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2317 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
2318 RecordThread *thread,
2319 const sp<Client>& client,
2320 const audio_attributes_t& attr,
2321 uint32_t sampleRate,
2322 audio_format_t format,
2323 audio_channel_mask_t channelMask,
2324 size_t frameCount,
2325 void *buffer,
2326 size_t bufferSize,
2327 audio_session_t sessionId,
2328 pid_t creatorPid,
2329 const AttributionSourceState& attributionSource,
2330 audio_input_flags_t flags,
2331 track_type type,
2332 audio_port_handle_t portId,
2333 int32_t startFrames)
2334 : TrackBase(thread, client, attr, sampleRate, format,
2335 channelMask, frameCount, buffer, bufferSize, sessionId,
2336 creatorPid,
2337 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2338 false /*isOut*/,
2339 (type == TYPE_DEFAULT) ?
2340 ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2341 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2342 type, portId,
2343 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2344 mOverflow(false),
2345 mFramesToDrop(0),
2346 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2347 mRecordBufferConverter(NULL),
2348 mFlags(flags),
2349 mSilenced(false),
2350 mStartFrames(startFrames)
2351 {
2352 if (mCblk == NULL) {
2353 return;
2354 }
2355
2356 if (!isDirect()) {
2357 mRecordBufferConverter = new RecordBufferConverter(
2358 thread->mChannelMask, thread->mFormat, thread->mSampleRate,
2359 channelMask, format, sampleRate);
2360 // Check if the RecordBufferConverter construction was successful.
2361 // If not, don't continue with construction.
2362 //
2363 // NOTE: It would be extremely rare that the record track cannot be created
2364 // for the current device, but a pending or future device change would make
2365 // the record track configuration valid.
2366 if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2367 ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2368 return;
2369 }
2370 }
2371
2372 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2373 mFrameSize, !isExternalTrack());
2374
2375 mResamplerBufferProvider = new ResamplerBufferProvider(this);
2376
2377 if (flags & AUDIO_INPUT_FLAG_FAST) {
2378 ALOG_ASSERT(thread->mFastTrackAvail);
2379 thread->mFastTrackAvail = false;
2380 } else {
2381 // TODO: only Normal Record has timestamps (Fast Record does not).
2382 mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
2383 }
2384 #ifdef TEE_SINK
2385 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
2386 + "_" + std::to_string(mId)
2387 + "_R");
2388 #endif
2389
2390 // Once this item is logged by the server, the client can add properties.
2391 mTrackMetrics.logConstructor(creatorPid, uid(), id());
2392 }
2393
~RecordTrack()2394 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
2395 {
2396 ALOGV("%s()", __func__);
2397 delete mRecordBufferConverter;
2398 delete mResamplerBufferProvider;
2399 }
2400
initCheck() const2401 status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const
2402 {
2403 status_t status = TrackBase::initCheck();
2404 if (status == NO_ERROR && mServerProxy == 0) {
2405 status = BAD_VALUE;
2406 }
2407 return status;
2408 }
2409
2410 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2411 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
2412 {
2413 ServerProxy::Buffer buf;
2414 buf.mFrameCount = buffer->frameCount;
2415 status_t status = mServerProxy->obtainBuffer(&buf);
2416 buffer->frameCount = buf.mFrameCount;
2417 buffer->raw = buf.mRaw;
2418 if (buf.mFrameCount == 0) {
2419 // FIXME also wake futex so that overrun is noticed more quickly
2420 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2421 }
2422 return status;
2423 }
2424
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2425 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
2426 audio_session_t triggerSession)
2427 {
2428 sp<ThreadBase> thread = mThread.promote();
2429 if (thread != 0) {
2430 RecordThread *recordThread = (RecordThread *)thread.get();
2431 return recordThread->start(this, event, triggerSession);
2432 } else {
2433 ALOGW("%s track %d: thread was destroyed", __func__, portId());
2434 return DEAD_OBJECT;
2435 }
2436 }
2437
stop()2438 void AudioFlinger::RecordThread::RecordTrack::stop()
2439 {
2440 sp<ThreadBase> thread = mThread.promote();
2441 if (thread != 0) {
2442 RecordThread *recordThread = (RecordThread *)thread.get();
2443 if (recordThread->stop(this) && isExternalTrack()) {
2444 AudioSystem::stopInput(mPortId);
2445 }
2446 }
2447 }
2448
destroy()2449 void AudioFlinger::RecordThread::RecordTrack::destroy()
2450 {
2451 // see comments at AudioFlinger::PlaybackThread::Track::destroy()
2452 sp<RecordTrack> keep(this);
2453 {
2454 track_state priorState = mState;
2455 sp<ThreadBase> thread = mThread.promote();
2456 if (thread != 0) {
2457 Mutex::Autolock _l(thread->mLock);
2458 RecordThread *recordThread = (RecordThread *) thread.get();
2459 priorState = mState;
2460 if (!mSharedAudioPackageName.empty()) {
2461 recordThread->resetAudioHistory_l();
2462 }
2463 recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
2464 }
2465 // APM portid/client management done outside of lock.
2466 // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
2467 if (isExternalTrack()) {
2468 switch (priorState) {
2469 case ACTIVE: // invalidated while still active
2470 case STARTING_2: // invalidated/start-aborted after startInput successfully called
2471 case PAUSING: // invalidated while in the middle of stop() pausing (still active)
2472 AudioSystem::stopInput(mPortId);
2473 break;
2474
2475 case STARTING_1: // invalidated/start-aborted and startInput not successful
2476 case PAUSED: // OK, not active
2477 case IDLE: // OK, not active
2478 break;
2479
2480 case STOPPED: // unexpected (destroyed)
2481 default:
2482 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
2483 }
2484 AudioSystem::releaseInput(mPortId);
2485 }
2486 }
2487 }
2488
invalidate()2489 void AudioFlinger::RecordThread::RecordTrack::invalidate()
2490 {
2491 TrackBase::invalidate();
2492 // FIXME should use proxy, and needs work
2493 audio_track_cblk_t* cblk = mCblk;
2494 android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2495 android_atomic_release_store(0x40000000, &cblk->mFutex);
2496 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2497 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2498 }
2499
2500
appendDumpHeader(String8 & result)2501 void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
2502 {
2503 result.appendFormat("Active Id Client Session Port Id S Flags "
2504 " Format Chn mask SRate Source "
2505 " Server FrmCnt FrmRdy Sil%s\n",
2506 isServerLatencySupported() ? " Latency" : "");
2507 }
2508
appendDump(String8 & result,bool active)2509 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
2510 {
2511 result.appendFormat("%c%5s %6d %6u %7u %7u %2s 0x%03X "
2512 "%08X %08X %6u %6X "
2513 "%08X %6zu %6zu %3c",
2514 isFastTrack() ? 'F' : ' ',
2515 active ? "yes" : "no",
2516 mId,
2517 (mClient == 0) ? getpid() : mClient->pid(),
2518 mSessionId,
2519 mPortId,
2520 getTrackStateAsCodedString(),
2521 mCblk->mFlags,
2522
2523 mFormat,
2524 mChannelMask,
2525 mSampleRate,
2526 mAttr.source,
2527
2528 mCblk->mServer,
2529 mFrameCount,
2530 mServerProxy->framesReadySafe(),
2531 isSilenced() ? 's' : 'n'
2532 );
2533 if (isServerLatencySupported()) {
2534 double latencyMs;
2535 bool fromTrack;
2536 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
2537 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
2538 // or 'k' if estimated from kernel (usually for debugging).
2539 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
2540 } else {
2541 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
2542 }
2543 }
2544 result.append("\n");
2545 }
2546
handleSyncStartEvent(const sp<SyncEvent> & event)2547 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
2548 {
2549 if (event == mSyncStartEvent) {
2550 ssize_t framesToDrop = 0;
2551 sp<ThreadBase> threadBase = mThread.promote();
2552 if (threadBase != 0) {
2553 // TODO: use actual buffer filling status instead of 2 buffers when info is available
2554 // from audio HAL
2555 framesToDrop = threadBase->mFrameCount * 2;
2556 }
2557 mFramesToDrop = framesToDrop;
2558 }
2559 }
2560
clearSyncStartEvent()2561 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
2562 {
2563 if (mSyncStartEvent != 0) {
2564 mSyncStartEvent->cancel();
2565 mSyncStartEvent.clear();
2566 }
2567 mFramesToDrop = 0;
2568 }
2569
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)2570 void AudioFlinger::RecordThread::RecordTrack::updateTrackFrameInfo(
2571 int64_t trackFramesReleased, int64_t sourceFramesRead,
2572 uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
2573 {
2574 // Make the kernel frametime available.
2575 const FrameTime ft{
2576 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2577 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2578 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2579 mKernelFrameTime.store(ft);
2580 if (!audio_is_linear_pcm(mFormat)) {
2581 return;
2582 }
2583
2584 ExtendedTimestamp local = timestamp;
2585
2586 // Convert HAL frames to server-side track frames at track sample rate.
2587 // We use trackFramesReleased and sourceFramesRead as an anchor point.
2588 for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2589 if (local.mTimeNs[i] != 0) {
2590 const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
2591 const int64_t relativeTrackFrames = relativeServerFrames
2592 * mSampleRate / halSampleRate; // TODO: potential computation overflow
2593 local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
2594 }
2595 }
2596 mServerProxy->setTimestamp(local);
2597
2598 // Compute latency info.
2599 const bool useTrackTimestamp = true; // use track unless debugging.
2600 const double latencyMs = - (useTrackTimestamp
2601 ? local.getOutputServerLatencyMs(sampleRate())
2602 : timestamp.getOutputServerLatencyMs(halSampleRate));
2603
2604 mServerLatencyFromTrack.store(useTrackTimestamp);
2605 mServerLatencyMs.store(latencyMs);
2606 }
2607
getActiveMicrophones(std::vector<media::MicrophoneInfo> * activeMicrophones)2608 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
2609 std::vector<media::MicrophoneInfo>* activeMicrophones)
2610 {
2611 sp<ThreadBase> thread = mThread.promote();
2612 if (thread != 0) {
2613 RecordThread *recordThread = (RecordThread *)thread.get();
2614 return recordThread->getActiveMicrophones(activeMicrophones);
2615 } else {
2616 return BAD_VALUE;
2617 }
2618 }
2619
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)2620 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
2621 audio_microphone_direction_t direction) {
2622 sp<ThreadBase> thread = mThread.promote();
2623 if (thread != 0) {
2624 RecordThread *recordThread = (RecordThread *)thread.get();
2625 return recordThread->setPreferredMicrophoneDirection(direction);
2626 } else {
2627 return BAD_VALUE;
2628 }
2629 }
2630
setPreferredMicrophoneFieldDimension(float zoom)2631 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
2632 sp<ThreadBase> thread = mThread.promote();
2633 if (thread != 0) {
2634 RecordThread *recordThread = (RecordThread *)thread.get();
2635 return recordThread->setPreferredMicrophoneFieldDimension(zoom);
2636 } else {
2637 return BAD_VALUE;
2638 }
2639 }
2640
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2641 status_t AudioFlinger::RecordThread::RecordTrack::shareAudioHistory(
2642 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2643
2644 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
2645 const pid_t callingPid = IPCThreadState::self()->getCallingPid();
2646 if (callingUid != mUid || callingPid != mCreatorPid) {
2647 return PERMISSION_DENIED;
2648 }
2649
2650 AttributionSourceState attributionSource{};
2651 attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
2652 attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
2653 attributionSource.token = sp<BBinder>::make();
2654 if (!captureHotwordAllowed(attributionSource)) {
2655 return PERMISSION_DENIED;
2656 }
2657
2658 sp<ThreadBase> thread = mThread.promote();
2659 if (thread != 0) {
2660 RecordThread *recordThread = (RecordThread *)thread.get();
2661 status_t status = recordThread->shareAudioHistory(
2662 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
2663 if (status == NO_ERROR) {
2664 mSharedAudioPackageName = sharedAudioPackageName;
2665 }
2666 return status;
2667 } else {
2668 return BAD_VALUE;
2669 }
2670 }
2671
2672
2673 // ----------------------------------------------------------------------------
2674 #undef LOG_TAG
2675 #define LOG_TAG "AF::PatchRecord"
2676
PatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout)2677 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
2678 uint32_t sampleRate,
2679 audio_channel_mask_t channelMask,
2680 audio_format_t format,
2681 size_t frameCount,
2682 void *buffer,
2683 size_t bufferSize,
2684 audio_input_flags_t flags,
2685 const Timeout& timeout)
2686 : RecordTrack(recordThread, NULL,
2687 audio_attributes_t{} /* currently unused for patch track */,
2688 sampleRate, format, channelMask, frameCount,
2689 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
2690 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
2691 PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
2692 *recordThread, timeout)
2693 {
2694 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2695 __func__, mId, sampleRate,
2696 (int)mPeerTimeout.tv_sec,
2697 (int)(mPeerTimeout.tv_nsec / 1000000));
2698 }
2699
~PatchRecord()2700 AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
2701 {
2702 ALOGV("%s(%d)", __func__, mId);
2703 }
2704
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2705 static size_t writeFramesHelper(
2706 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2707 {
2708 AudioBufferProvider::Buffer patchBuffer;
2709 patchBuffer.frameCount = frameCount;
2710 auto status = dest->getNextBuffer(&patchBuffer);
2711 if (status != NO_ERROR) {
2712 ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
2713 __func__, status, strerror(-status));
2714 return 0;
2715 }
2716 ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
2717 memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
2718 size_t framesWritten = patchBuffer.frameCount;
2719 dest->releaseBuffer(&patchBuffer);
2720 return framesWritten;
2721 }
2722
2723 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2724 size_t AudioFlinger::RecordThread::PatchRecord::writeFrames(
2725 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2726 {
2727 size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
2728 // On buffer wrap, the buffer frame count will be less than requested,
2729 // when this happens a second buffer needs to be used to write the leftover audio
2730 const size_t framesLeft = frameCount - framesWritten;
2731 if (framesWritten != 0 && framesLeft != 0) {
2732 framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
2733 framesLeft, frameSize);
2734 }
2735 return framesWritten;
2736 }
2737
2738 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2739 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
2740 AudioBufferProvider::Buffer* buffer)
2741 {
2742 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2743 Proxy::Buffer buf;
2744 buf.mFrameCount = buffer->frameCount;
2745 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2746 ALOGV_IF(status != NO_ERROR,
2747 "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
2748 buffer->frameCount = buf.mFrameCount;
2749 if (ATRACE_ENABLED()) {
2750 std::string traceName("PRnObt");
2751 traceName += std::to_string(id());
2752 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2753 }
2754 if (buf.mFrameCount == 0) {
2755 return WOULD_BLOCK;
2756 }
2757 status = RecordTrack::getNextBuffer(buffer);
2758 return status;
2759 }
2760
releaseBuffer(AudioBufferProvider::Buffer * buffer)2761 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2762 {
2763 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2764 Proxy::Buffer buf;
2765 buf.mFrameCount = buffer->frameCount;
2766 buf.mRaw = buffer->raw;
2767 mPeerProxy->releaseBuffer(&buf);
2768 TrackBase::releaseBuffer(buffer);
2769 }
2770
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2771 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
2772 const struct timespec *timeOut)
2773 {
2774 return mProxy->obtainBuffer(buffer, timeOut);
2775 }
2776
releaseBuffer(Proxy::Buffer * buffer)2777 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2778 {
2779 mProxy->releaseBuffer(buffer);
2780 }
2781
2782 #undef LOG_TAG
2783 #define LOG_TAG "AF::PthrPatchRecord"
2784
allocAligned(size_t alignment,size_t size)2785 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
2786 {
2787 void *ptr = nullptr;
2788 (void)posix_memalign(&ptr, alignment, size);
2789 return std::unique_ptr<void, decltype(free)*>(ptr, free);
2790 }
2791
PassthruPatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags)2792 AudioFlinger::RecordThread::PassthruPatchRecord::PassthruPatchRecord(
2793 RecordThread *recordThread,
2794 uint32_t sampleRate,
2795 audio_channel_mask_t channelMask,
2796 audio_format_t format,
2797 size_t frameCount,
2798 audio_input_flags_t flags)
2799 : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
2800 nullptr /*buffer*/, 0 /*bufferSize*/, flags),
2801 mPatchRecordAudioBufferProvider(*this),
2802 mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
2803 mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
2804 {
2805 memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
2806 }
2807
obtainStream(sp<ThreadBase> * thread)2808 sp<StreamInHalInterface> AudioFlinger::RecordThread::PassthruPatchRecord::obtainStream(
2809 sp<ThreadBase>* thread)
2810 {
2811 *thread = mThread.promote();
2812 if (!*thread) return nullptr;
2813 RecordThread *recordThread = static_cast<RecordThread*>((*thread).get());
2814 Mutex::Autolock _l(recordThread->mLock);
2815 return recordThread->mInput ? recordThread->mInput->stream : nullptr;
2816 }
2817
2818 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2819 status_t AudioFlinger::RecordThread::PassthruPatchRecord::obtainBuffer(
2820 Proxy::Buffer* buffer, const struct timespec* timeOut)
2821 {
2822 if (mUnconsumedFrames) {
2823 buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
2824 // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
2825 return PatchRecord::obtainBuffer(buffer, timeOut);
2826 }
2827
2828 // Otherwise, execute a read from HAL and write into the buffer.
2829 nsecs_t startTimeNs = 0;
2830 if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
2831 // Will need to correct timeOut by elapsed time.
2832 startTimeNs = systemTime();
2833 }
2834 const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
2835 buffer->mFrameCount = 0;
2836 buffer->mRaw = nullptr;
2837 sp<ThreadBase> thread;
2838 sp<StreamInHalInterface> stream = obtainStream(&thread);
2839 if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
2840
2841 status_t result = NO_ERROR;
2842 size_t bytesRead = 0;
2843 {
2844 ATRACE_NAME("read");
2845 result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
2846 if (result != NO_ERROR) goto stream_error;
2847 if (bytesRead == 0) return NO_ERROR;
2848 }
2849
2850 {
2851 std::lock_guard<std::mutex> lock(mReadLock);
2852 mReadBytes += bytesRead;
2853 mReadError = NO_ERROR;
2854 }
2855 mReadCV.notify_one();
2856 // writeFrames handles wraparound and should write all the provided frames.
2857 // If it couldn't, there is something wrong with the client/server buffer of the software patch.
2858 buffer->mFrameCount = writeFrames(
2859 &mPatchRecordAudioBufferProvider,
2860 mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
2861 ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
2862 "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
2863 mUnconsumedFrames = buffer->mFrameCount;
2864 struct timespec newTimeOut;
2865 if (startTimeNs) {
2866 // Correct the timeout by elapsed time.
2867 nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
2868 if (newTimeOutNs < 0) newTimeOutNs = 0;
2869 newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
2870 newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
2871 timeOut = &newTimeOut;
2872 }
2873 return PatchRecord::obtainBuffer(buffer, timeOut);
2874
2875 stream_error:
2876 stream->standby();
2877 {
2878 std::lock_guard<std::mutex> lock(mReadLock);
2879 mReadError = result;
2880 }
2881 mReadCV.notify_one();
2882 return result;
2883 }
2884
releaseBuffer(Proxy::Buffer * buffer)2885 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2886 {
2887 if (buffer->mFrameCount <= mUnconsumedFrames) {
2888 mUnconsumedFrames -= buffer->mFrameCount;
2889 } else {
2890 ALOGW("Write side has consumed more frames than we had: %zu > %zu",
2891 buffer->mFrameCount, mUnconsumedFrames);
2892 mUnconsumedFrames = 0;
2893 }
2894 PatchRecord::releaseBuffer(buffer);
2895 }
2896
2897 // AudioBufferProvider and Source methods are called on RecordThread
2898 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
2899 // and 'releaseBuffer' are stubbed out and ignore their input.
2900 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
2901 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)2902 status_t AudioFlinger::RecordThread::PassthruPatchRecord::read(
2903 void* buffer, size_t bytes, size_t* read)
2904 {
2905 bytes = std::min(bytes, mFrameCount * mFrameSize);
2906 {
2907 std::unique_lock<std::mutex> lock(mReadLock);
2908 mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
2909 if (mReadError != NO_ERROR) {
2910 mLastReadFrames = 0;
2911 return mReadError;
2912 }
2913 *read = std::min(bytes, mReadBytes);
2914 mReadBytes -= *read;
2915 }
2916 mLastReadFrames = *read / mFrameSize;
2917 memset(buffer, 0, *read);
2918 return 0;
2919 }
2920
getCapturePosition(int64_t * frames,int64_t * time)2921 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getCapturePosition(
2922 int64_t* frames, int64_t* time)
2923 {
2924 sp<ThreadBase> thread;
2925 sp<StreamInHalInterface> stream = obtainStream(&thread);
2926 return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
2927 }
2928
standby()2929 status_t AudioFlinger::RecordThread::PassthruPatchRecord::standby()
2930 {
2931 // RecordThread issues 'standby' command in two major cases:
2932 // 1. Error on read--this case is handled in 'obtainBuffer'.
2933 // 2. Track is stopping--as PassthruPatchRecord assumes continuous
2934 // output, this can only happen when the software patch
2935 // is being torn down. In this case, the RecordThread
2936 // will terminate and close the HAL stream.
2937 return 0;
2938 }
2939
2940 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)2941 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getNextBuffer(
2942 AudioBufferProvider::Buffer* buffer)
2943 {
2944 buffer->frameCount = mLastReadFrames;
2945 buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
2946 return NO_ERROR;
2947 }
2948
releaseBuffer(AudioBufferProvider::Buffer * buffer)2949 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(
2950 AudioBufferProvider::Buffer* buffer)
2951 {
2952 buffer->frameCount = 0;
2953 buffer->raw = nullptr;
2954 }
2955
2956 // ----------------------------------------------------------------------------
2957 #undef LOG_TAG
2958 #define LOG_TAG "AF::MmapTrack"
2959
MmapTrack(ThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)2960 AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
2961 const audio_attributes_t& attr,
2962 uint32_t sampleRate,
2963 audio_format_t format,
2964 audio_channel_mask_t channelMask,
2965 audio_session_t sessionId,
2966 bool isOut,
2967 const AttributionSourceState& attributionSource,
2968 pid_t creatorPid,
2969 audio_port_handle_t portId)
2970 : TrackBase(thread, NULL, attr, sampleRate, format,
2971 channelMask, (size_t)0 /* frameCount */,
2972 nullptr /* buffer */, (size_t)0 /* bufferSize */,
2973 sessionId, creatorPid,
2974 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2975 isOut,
2976 ALLOC_NONE,
2977 TYPE_DEFAULT, portId,
2978 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
2979 mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
2980 mSilenced(false), mSilencedNotified(false)
2981 {
2982 // Once this item is logged by the server, the client can add properties.
2983 mTrackMetrics.logConstructor(creatorPid, uid(), id());
2984 }
2985
~MmapTrack()2986 AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
2987 {
2988 }
2989
initCheck() const2990 status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
2991 {
2992 return NO_ERROR;
2993 }
2994
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)2995 status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
2996 audio_session_t triggerSession __unused)
2997 {
2998 return NO_ERROR;
2999 }
3000
stop()3001 void AudioFlinger::MmapThread::MmapTrack::stop()
3002 {
3003 }
3004
3005 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3006 status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3007 {
3008 buffer->frameCount = 0;
3009 buffer->raw = nullptr;
3010 return INVALID_OPERATION;
3011 }
3012
3013 // ExtendedAudioBufferProvider interface
framesReady() const3014 size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
3015 return 0;
3016 }
3017
framesReleased() const3018 int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
3019 {
3020 return 0;
3021 }
3022
onTimestamp(const ExtendedTimestamp & timestamp __unused)3023 void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp ×tamp __unused)
3024 {
3025 }
3026
appendDumpHeader(String8 & result)3027 void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
3028 {
3029 result.appendFormat("Client Session Port Id Format Chn mask SRate Flags %s\n",
3030 isOut() ? "Usg CT": "Source");
3031 }
3032
appendDump(String8 & result,bool active __unused)3033 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
3034 {
3035 result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
3036 mPid,
3037 mSessionId,
3038 mPortId,
3039 mFormat,
3040 mChannelMask,
3041 mSampleRate,
3042 mAttr.flags);
3043 if (isOut()) {
3044 result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
3045 } else {
3046 result.appendFormat("%6x", mAttr.source);
3047 }
3048 result.append("\n");
3049 }
3050
3051 } // namespace android
3052