• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 #define LOG_TAG "AudioFlinger"
20 //#define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22 
23 #include "Configuration.h"
24 #include <linux/futex.h>
25 #include <math.h>
26 #include <sys/syscall.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29 
30 #include <private/media/AudioTrackShared.h>
31 
32 #include "AudioFlinger.h"
33 
34 #include <media/nbaio/Pipe.h>
35 #include <media/nbaio/PipeReader.h>
36 #include <media/AudioValidator.h>
37 #include <media/RecordBufferConverter.h>
38 #include <mediautils/ServiceUtilities.h>
39 #include <audio_utils/minifloat.h>
40 
41 // ----------------------------------------------------------------------------
42 
43 // Note: the following macro is used for extremely verbose logging message.  In
44 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
45 // 0; but one side effect of this is to turn all LOGV's as well.  Some messages
46 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
47 // turned on.  Do not uncomment the #def below unless you really know what you
48 // are doing and want to see all of the extremely verbose messages.
49 //#define VERY_VERY_VERBOSE_LOGGING
50 #ifdef VERY_VERY_VERBOSE_LOGGING
51 #define ALOGVV ALOGV
52 #else
53 #define ALOGVV(a...) do { } while(0)
54 #endif
55 
56 // TODO: Remove when this is put into AidlConversionUtil.h
57 #define VALUE_OR_RETURN_BINDER_STATUS(x)    \
58     ({                                      \
59        auto _tmp = (x);                     \
60        if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
61        std::move(_tmp.value());             \
62      })
63 
64 namespace android {
65 
66 using ::android::aidl_utils::binderStatusFromStatusT;
67 using binder::Status;
68 using content::AttributionSourceState;
69 using media::VolumeShaper;
70 // ----------------------------------------------------------------------------
71 //      TrackBase
72 // ----------------------------------------------------------------------------
73 #undef LOG_TAG
74 #define LOG_TAG "AF::TrackBase"
75 
76 static volatile int32_t nextTrackId = 55;
77 
78 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)79 AudioFlinger::ThreadBase::TrackBase::TrackBase(
80             ThreadBase *thread,
81             const sp<Client>& client,
82             const audio_attributes_t& attr,
83             uint32_t sampleRate,
84             audio_format_t format,
85             audio_channel_mask_t channelMask,
86             size_t frameCount,
87             void *buffer,
88             size_t bufferSize,
89             audio_session_t sessionId,
90             pid_t creatorPid,
91             uid_t clientUid,
92             bool isOut,
93             alloc_type alloc,
94             track_type type,
95             audio_port_handle_t portId,
96             std::string metricsId)
97     :   RefBase(),
98         mThread(thread),
99         mClient(client),
100         mCblk(NULL),
101         // mBuffer, mBufferSize
102         mState(IDLE),
103         mAttr(attr),
104         mSampleRate(sampleRate),
105         mFormat(format),
106         mChannelMask(channelMask),
107         mChannelCount(isOut ?
108                 audio_channel_count_from_out_mask(channelMask) :
109                 audio_channel_count_from_in_mask(channelMask)),
110         mFrameSize(audio_has_proportional_frames(format) ?
111                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
112         mFrameCount(frameCount),
113         mSessionId(sessionId),
114         mIsOut(isOut),
115         mId(android_atomic_inc(&nextTrackId)),
116         mTerminated(false),
117         mType(type),
118         mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
119         mPortId(portId),
120         mIsInvalid(false),
121         mTrackMetrics(std::move(metricsId), isOut, clientUid),
122         mCreatorPid(creatorPid)
123 {
124     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
125     if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
126         ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
127                 "%s(%d): uid %d tried to pass itself off as %d",
128                  __func__, mId, callingUid, clientUid);
129         clientUid = callingUid;
130     }
131     // clientUid contains the uid of the app that is responsible for this track, so we can blame
132     // battery usage on it.
133     mUid = clientUid;
134 
135     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
136 
137     size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
138     // check overflow when computing bufferSize due to multiplication by mFrameSize.
139     if (minBufferSize < frameCount  // roundup rounds down for values above UINT_MAX / 2
140             || mFrameSize == 0   // format needs to be correct
141             || minBufferSize > SIZE_MAX / mFrameSize) {
142         android_errorWriteLog(0x534e4554, "34749571");
143         return;
144     }
145     minBufferSize *= mFrameSize;
146 
147     if (buffer == nullptr) {
148         bufferSize = minBufferSize; // allocated here.
149     } else if (minBufferSize > bufferSize) {
150         android_errorWriteLog(0x534e4554, "38340117");
151         return;
152     }
153 
154     size_t size = sizeof(audio_track_cblk_t);
155     if (buffer == NULL && alloc == ALLOC_CBLK) {
156         // check overflow when computing allocation size for streaming tracks.
157         if (size > SIZE_MAX - bufferSize) {
158             android_errorWriteLog(0x534e4554, "34749571");
159             return;
160         }
161         size += bufferSize;
162     }
163 
164     if (client != 0) {
165         mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
166                 std::string("Track ID: ").append(std::to_string(mId))});
167         if (mCblkMemory == 0 ||
168                 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
169             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
170             ALOGE("%s", client->allocator().dump().c_str());
171             mCblkMemory.clear();
172             return;
173         }
174     } else {
175         mCblk = (audio_track_cblk_t *) malloc(size);
176         if (mCblk == NULL) {
177             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
178             return;
179         }
180     }
181 
182     // construct the shared structure in-place.
183     if (mCblk != NULL) {
184         new(mCblk) audio_track_cblk_t();
185         switch (alloc) {
186         case ALLOC_READONLY: {
187             const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
188             if (roHeap == 0 ||
189                     (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
190                     (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
191                 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
192                         __func__, mId, bufferSize);
193                 if (roHeap != 0) {
194                     roHeap->dump("buffer");
195                 }
196                 mCblkMemory.clear();
197                 mBufferMemory.clear();
198                 return;
199             }
200             memset(mBuffer, 0, bufferSize);
201             } break;
202         case ALLOC_PIPE:
203             mBufferMemory = thread->pipeMemory();
204             // mBuffer is the virtual address as seen from current process (mediaserver),
205             // and should normally be coming from mBufferMemory->unsecurePointer().
206             // However in this case the TrackBase does not reference the buffer directly.
207             // It should references the buffer via the pipe.
208             // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
209             mBuffer = NULL;
210             bufferSize = 0;
211             break;
212         case ALLOC_CBLK:
213             // clear all buffers
214             if (buffer == NULL) {
215                 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
216                 memset(mBuffer, 0, bufferSize);
217             } else {
218                 mBuffer = buffer;
219 #if 0
220                 mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
221 #endif
222             }
223             break;
224         case ALLOC_LOCAL:
225             mBuffer = calloc(1, bufferSize);
226             break;
227         case ALLOC_NONE:
228             mBuffer = buffer;
229             break;
230         default:
231             LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
232         }
233         mBufferSize = bufferSize;
234 
235 #ifdef TEE_SINK
236         mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
237 #endif
238         // mState is mirrored for the client to read.
239         mState.setMirror(&mCblk->mState);
240         // ensure our state matches up until we consolidate the enumeration.
241         static_assert(CBLK_STATE_IDLE == IDLE);
242         static_assert(CBLK_STATE_PAUSING == PAUSING);
243     }
244 }
245 
246 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)247 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
248    AttributionSourceState attributionSource{};
249    attributionSource.uid = AID_AUDIOSERVER;
250    attributionSource.pid = pid;
251    attributionSource.token = sp<BBinder>::make();
252    return attributionSource;
253 }
254 
initCheck() const255 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
256 {
257     status_t status;
258     if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
259         status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
260     } else {
261         status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
262     }
263     return status;
264 }
265 
~TrackBase()266 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
267 {
268     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
269     mServerProxy.clear();
270     releaseCblk();
271     mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
272     if (mClient != 0) {
273         // Client destructor must run with AudioFlinger client mutex locked
274         Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
275         // If the client's reference count drops to zero, the associated destructor
276         // must run with AudioFlinger lock held. Thus the explicit clear() rather than
277         // relying on the automatic clear() at end of scope.
278         mClient.clear();
279     }
280     // flush the binder command buffer
281     IPCThreadState::self()->flushCommands();
282 }
283 
284 // AudioBufferProvider interface
285 // getNextBuffer() = 0;
286 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)287 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
288 {
289 #ifdef TEE_SINK
290     mTee.write(buffer->raw, buffer->frameCount);
291 #endif
292 
293     ServerProxy::Buffer buf;
294     buf.mFrameCount = buffer->frameCount;
295     buf.mRaw = buffer->raw;
296     buffer->frameCount = 0;
297     buffer->raw = NULL;
298     mServerProxy->releaseBuffer(&buf);
299 }
300 
setSyncEvent(const sp<SyncEvent> & event)301 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
302 {
303     mSyncEvents.add(event);
304     return NO_ERROR;
305 }
306 
PatchTrackBase(const sp<ClientProxy> & proxy,const ThreadBase & thread,const Timeout & timeout)307 AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(const sp<ClientProxy>& proxy,
308                                                          const ThreadBase& thread,
309                                                          const Timeout& timeout)
310     : mProxy(proxy)
311 {
312     if (timeout) {
313         setPeerTimeout(*timeout);
314     } else {
315         // Double buffer mixer
316         uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
317                                               thread.sampleRate();
318         setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
319     }
320 }
321 
setPeerTimeout(std::chrono::nanoseconds timeout)322 void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
323     mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
324     mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
325 }
326 
327 
328 // ----------------------------------------------------------------------------
329 //      Playback
330 // ----------------------------------------------------------------------------
331 #undef LOG_TAG
332 #define LOG_TAG "AF::TrackHandle"
333 
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)334 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
335     : BnAudioTrack(),
336       mTrack(track)
337 {
338     setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
339 }
340 
~TrackHandle()341 AudioFlinger::TrackHandle::~TrackHandle() {
342     // just stop the track on deletion, associated resources
343     // will be freed from the main thread once all pending buffers have
344     // been played. Unless it's not in the active track list, in which
345     // case we free everything now...
346     mTrack->destroy();
347 }
348 
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)349 Status AudioFlinger::TrackHandle::getCblk(
350         std::optional<media::SharedFileRegion>* _aidl_return) {
351     *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
352     return Status::ok();
353 }
354 
start(int32_t * _aidl_return)355 Status AudioFlinger::TrackHandle::start(int32_t* _aidl_return) {
356     *_aidl_return = mTrack->start();
357     return Status::ok();
358 }
359 
stop()360 Status AudioFlinger::TrackHandle::stop() {
361     mTrack->stop();
362     return Status::ok();
363 }
364 
flush()365 Status AudioFlinger::TrackHandle::flush() {
366     mTrack->flush();
367     return Status::ok();
368 }
369 
pause()370 Status AudioFlinger::TrackHandle::pause() {
371     mTrack->pause();
372     return Status::ok();
373 }
374 
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)375 Status AudioFlinger::TrackHandle::attachAuxEffect(int32_t effectId,
376                                                   int32_t* _aidl_return) {
377     *_aidl_return = mTrack->attachAuxEffect(effectId);
378     return Status::ok();
379 }
380 
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)381 Status AudioFlinger::TrackHandle::setParameters(const std::string& keyValuePairs,
382                                                 int32_t* _aidl_return) {
383     *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
384     return Status::ok();
385 }
386 
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)387 Status AudioFlinger::TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
388                                                      int32_t* _aidl_return) {
389     *_aidl_return = mTrack->selectPresentation(presentationId, programId);
390     return Status::ok();
391 }
392 
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)393 Status AudioFlinger::TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
394                                                int32_t* _aidl_return) {
395     AudioTimestamp legacy;
396     *_aidl_return = mTrack->getTimestamp(legacy);
397     if (*_aidl_return != OK) {
398         return Status::ok();
399     }
400     *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
401     return Status::ok();
402 }
403 
signal()404 Status AudioFlinger::TrackHandle::signal() {
405     mTrack->signal();
406     return Status::ok();
407 }
408 
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)409 Status AudioFlinger::TrackHandle::applyVolumeShaper(
410         const media::VolumeShaperConfiguration& configuration,
411         const media::VolumeShaperOperation& operation,
412         int32_t* _aidl_return) {
413     sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
414     *_aidl_return = conf->readFromParcelable(configuration);
415     if (*_aidl_return != OK) {
416         return Status::ok();
417     }
418 
419     sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
420     *_aidl_return = op->readFromParcelable(operation);
421     if (*_aidl_return != OK) {
422         return Status::ok();
423     }
424 
425     *_aidl_return = mTrack->applyVolumeShaper(conf, op);
426     return Status::ok();
427 }
428 
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)429 Status AudioFlinger::TrackHandle::getVolumeShaperState(
430         int32_t id,
431         std::optional<media::VolumeShaperState>* _aidl_return) {
432     sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
433     if (legacy == nullptr) {
434         _aidl_return->reset();
435         return Status::ok();
436     }
437     media::VolumeShaperState aidl;
438     legacy->writeToParcelable(&aidl);
439     *_aidl_return = aidl;
440     return Status::ok();
441 }
442 
getDualMonoMode(media::audio::common::AudioDualMonoMode * _aidl_return)443 Status AudioFlinger::TrackHandle::getDualMonoMode(
444         media::audio::common::AudioDualMonoMode* _aidl_return)
445 {
446     audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
447     const status_t status = mTrack->getDualMonoMode(&mode)
448             ?: AudioValidator::validateDualMonoMode(mode);
449     if (status == OK) {
450         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
451                 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
452     }
453     return binderStatusFromStatusT(status);
454 }
455 
setDualMonoMode(media::audio::common::AudioDualMonoMode mode)456 Status AudioFlinger::TrackHandle::setDualMonoMode(
457         media::audio::common::AudioDualMonoMode mode)
458 {
459     const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
460             aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
461     return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
462             ?: mTrack->setDualMonoMode(localMonoMode));
463 }
464 
getAudioDescriptionMixLevel(float * _aidl_return)465 Status AudioFlinger::TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
466 {
467     float leveldB = -std::numeric_limits<float>::infinity();
468     const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
469             ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
470     if (status == OK) *_aidl_return = leveldB;
471     return binderStatusFromStatusT(status);
472 }
473 
setAudioDescriptionMixLevel(float leveldB)474 Status AudioFlinger::TrackHandle::setAudioDescriptionMixLevel(float leveldB)
475 {
476     return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
477              ?: mTrack->setAudioDescriptionMixLevel(leveldB));
478 }
479 
getPlaybackRateParameters(media::audio::common::AudioPlaybackRate * _aidl_return)480 Status AudioFlinger::TrackHandle::getPlaybackRateParameters(
481         media::audio::common::AudioPlaybackRate* _aidl_return)
482 {
483     audio_playback_rate_t localPlaybackRate{};
484     status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
485             ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
486     if (status == NO_ERROR) {
487         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
488                 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
489     }
490     return binderStatusFromStatusT(status);
491 }
492 
setPlaybackRateParameters(const media::audio::common::AudioPlaybackRate & playbackRate)493 Status AudioFlinger::TrackHandle::setPlaybackRateParameters(
494         const media::audio::common::AudioPlaybackRate& playbackRate)
495 {
496     const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
497             aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
498     return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
499             ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
500 }
501 
502 // ----------------------------------------------------------------------------
503 //      AppOp for audio playback
504 // -------------------------------
505 
506 // static
507 sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
createIfNeeded(AudioFlinger::ThreadBase * thread,const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)508 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
509             AudioFlinger::ThreadBase* thread,
510             const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
511             audio_stream_type_t streamType)
512 {
513     Vector<String16> packages;
514     const uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
515     getPackagesForUid(uid, packages);
516     if (isServiceUid(uid)) {
517         if (packages.isEmpty()) {
518             ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
519                   id,
520                   attr.usage,
521                   uid);
522             return nullptr;
523         }
524     }
525     // stream type has been filtered by audio policy to indicate whether it can be muted
526     if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
527         ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
528         return nullptr;
529     }
530     if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
531             == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
532         ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
533             id, attr.flags);
534         return nullptr;
535     }
536     return sp<OpPlayAudioMonitor>::make(thread, attributionSource, attr.usage, id, uid);
537 }
538 
OpPlayAudioMonitor(AudioFlinger::ThreadBase * thread,const AttributionSourceState & attributionSource,audio_usage_t usage,int id,uid_t uid)539 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
540         AudioFlinger::ThreadBase* thread,
541         const AttributionSourceState& attributionSource,
542         audio_usage_t usage, int id, uid_t uid)
543     : mThread(wp<AudioFlinger::ThreadBase>::fromExisting(thread)),
544       mHasOpPlayAudio(true),
545       mAttributionSource(attributionSource),
546       mUsage((int32_t)usage),
547       mId(id),
548       mUid(uid),
549       mPackageName(VALUE_OR_FATAL(aidl2legacy_string_view_String16(
550                   attributionSource.packageName.value_or("")))) {}
551 
~OpPlayAudioMonitor()552 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::~OpPlayAudioMonitor()
553 {
554     if (mOpCallback != 0) {
555         mAppOpsManager.stopWatchingMode(mOpCallback);
556     }
557     mOpCallback.clear();
558 }
559 
onFirstRef()560 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
561 {
562     // make sure not to broadcast the initial state since it is not needed and could
563     // cause a deadlock since this method can be called with the mThread->mLock held
564     checkPlayAudioForUsage(/*doBroadcast=*/false);
565     if (mAttributionSource.packageName.has_value()) {
566         mOpCallback = new PlayAudioOpCallback(this);
567         mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO,
568                 mPackageName, mOpCallback);
569     }
570 }
571 
hasOpPlayAudio() const572 bool AudioFlinger::PlaybackThread::OpPlayAudioMonitor::hasOpPlayAudio() const {
573     return mHasOpPlayAudio.load();
574 }
575 
576 // Note this method is never called (and never to be) for audio server / patch record track
577 // - not called from constructor due to check on UID,
578 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage(bool doBroadcast)579 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage(bool doBroadcast)
580 {
581     const bool hasAppOps = mAttributionSource.packageName.has_value()
582         && mAppOpsManager.checkAudioOpNoThrow(
583                 AppOpsManager::OP_PLAY_AUDIO, mUsage, mUid, mPackageName) ==
584                         AppOpsManager::MODE_ALLOWED;
585 
586     bool shouldChange = !hasAppOps;  // check if we need to update.
587     if (mHasOpPlayAudio.compare_exchange_strong(shouldChange, hasAppOps)) {
588         ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasAppOps ? "not " : "");
589         if (doBroadcast) {
590             auto thread = mThread.promote();
591             if (thread != nullptr && thread->type() == AudioFlinger::ThreadBase::OFFLOAD) {
592                 // Wake up Thread if offloaded, otherwise it may be several seconds for update.
593                 Mutex::Autolock _l(thread->mLock);
594                 thread->broadcast_l();
595             }
596         }
597     }
598 }
599 
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)600 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
601         const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
602 { }
603 
opChanged(int32_t op,const String16 & packageName)604 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
605             const String16& packageName) {
606     // we only have uid, so we need to check all package names anyway
607     UNUSED(packageName);
608     if (op != AppOpsManager::OP_PLAY_AUDIO) {
609         return;
610     }
611     sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
612     if (monitor != NULL) {
613         monitor->checkPlayAudioForUsage(/*doBroadcast=*/true);
614     }
615 }
616 
617 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)618 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::getPackagesForUid(
619     uid_t uid, Vector<String16>& packages)
620 {
621     PermissionController permissionController;
622     permissionController.getPackagesForUid(uid, packages);
623 }
624 
625 // ----------------------------------------------------------------------------
626 #undef LOG_TAG
627 #define LOG_TAG "AF::Track"
628 
629 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect)630 AudioFlinger::PlaybackThread::Track::Track(
631             PlaybackThread *thread,
632             const sp<Client>& client,
633             audio_stream_type_t streamType,
634             const audio_attributes_t& attr,
635             uint32_t sampleRate,
636             audio_format_t format,
637             audio_channel_mask_t channelMask,
638             size_t frameCount,
639             void *buffer,
640             size_t bufferSize,
641             const sp<IMemory>& sharedBuffer,
642             audio_session_t sessionId,
643             pid_t creatorPid,
644             const AttributionSourceState& attributionSource,
645             audio_output_flags_t flags,
646             track_type type,
647             audio_port_handle_t portId,
648             size_t frameCountToBeReady,
649             float speed,
650             bool isSpatialized,
651             bool isBitPerfect)
652     :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
653                   // TODO: Using unsecurePointer() has some associated security pitfalls
654                   //       (see declaration for details).
655                   //       Either document why it is safe in this case or address the
656                   //       issue (e.g. by copying).
657                   (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
658                   (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
659                   sessionId, creatorPid,
660                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
661                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
662                   type,
663                   portId,
664                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
665     mFillingUpStatus(FS_INVALID),
666     // mRetryCount initialized later when needed
667     mSharedBuffer(sharedBuffer),
668     mStreamType(streamType),
669     mMainBuffer(thread->sinkBuffer()),
670     mAuxBuffer(NULL),
671     mAuxEffectId(0), mHasVolumeController(false),
672     mFrameMap(16 /* sink-frame-to-track-frame map memory */),
673     mVolumeHandler(new media::VolumeHandler(sampleRate)),
674     mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(thread, attributionSource, attr, id(),
675         streamType)),
676     // mSinkTimestamp
677     mFastIndex(-1),
678     mCachedVolume(1.0),
679     /* The track might not play immediately after being active, similarly as if its volume was 0.
680      * When the track starts playing, its volume will be computed. */
681     mFinalVolume(0.f),
682     mResumeToStopping(false),
683     mFlushHwPending(false),
684     mFlags(flags),
685     mSpeed(speed),
686     mIsSpatialized(isSpatialized),
687     mIsBitPerfect(isBitPerfect)
688 {
689     // client == 0 implies sharedBuffer == 0
690     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
691 
692     ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
693             __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
694 
695     if (mCblk == NULL) {
696         return;
697     }
698 
699     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
700     if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
701         ALOGE("%s(%d): no more tracks available", __func__, mId);
702         releaseCblk(); // this makes the track invalid.
703         return;
704     }
705 
706     if (sharedBuffer == 0) {
707         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
708                 mFrameSize, !isExternalTrack(), sampleRate);
709     } else {
710         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
711                 mFrameSize, sampleRate);
712     }
713     mServerProxy = mAudioTrackServerProxy;
714     mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
715 
716     // only allocate a fast track index if we were able to allocate a normal track name
717     if (flags & AUDIO_OUTPUT_FLAG_FAST) {
718         // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
719         // race with setSyncEvent(). However, if we call it, we cannot properly start
720         // static fast tracks (SoundPool) immediately after stopping.
721         //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
722         ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
723         int i = __builtin_ctz(thread->mFastTrackAvailMask);
724         ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
725         // FIXME This is too eager.  We allocate a fast track index before the
726         //       fast track becomes active.  Since fast tracks are a scarce resource,
727         //       this means we are potentially denying other more important fast tracks from
728         //       being created.  It would be better to allocate the index dynamically.
729         mFastIndex = i;
730         thread->mFastTrackAvailMask &= ~(1 << i);
731     }
732 
733     mServerLatencySupported = checkServerLatencySupported(format, flags);
734 #ifdef TEE_SINK
735     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
736             + "_" + std::to_string(mId) + "_T");
737 #endif
738 
739     if (thread->supportsHapticPlayback()) {
740         // If the track is attached to haptic playback thread, it is potentially to have
741         // HapticGenerator effect, which will generate haptic data, on the track. In that case,
742         // external vibration is always created for all tracks attached to haptic playback thread.
743         mAudioVibrationController = new AudioVibrationController(this);
744         std::string packageName = attributionSource.packageName.has_value() ?
745             attributionSource.packageName.value() : "";
746         mExternalVibration = new os::ExternalVibration(
747                 mUid, packageName, mAttr, mAudioVibrationController);
748     }
749 
750     // Once this item is logged by the server, the client can add properties.
751     const char * const traits = sharedBuffer == 0 ? "" : "static";
752     mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
753 }
754 
~Track()755 AudioFlinger::PlaybackThread::Track::~Track()
756 {
757     ALOGV("%s(%d)", __func__, mId);
758 
759     // The destructor would clear mSharedBuffer,
760     // but it will not push the decremented reference count,
761     // leaving the client's IMemory dangling indefinitely.
762     // This prevents that leak.
763     if (mSharedBuffer != 0) {
764         mSharedBuffer.clear();
765     }
766 }
767 
initCheck() const768 status_t AudioFlinger::PlaybackThread::Track::initCheck() const
769 {
770     status_t status = TrackBase::initCheck();
771     if (status == NO_ERROR && mCblk == nullptr) {
772         status = NO_MEMORY;
773     }
774     return status;
775 }
776 
destroy()777 void AudioFlinger::PlaybackThread::Track::destroy()
778 {
779     // NOTE: destroyTrack_l() can remove a strong reference to this Track
780     // by removing it from mTracks vector, so there is a risk that this Tracks's
781     // destructor is called. As the destructor needs to lock mLock,
782     // we must acquire a strong reference on this Track before locking mLock
783     // here so that the destructor is called only when exiting this function.
784     // On the other hand, as long as Track::destroy() is only called by
785     // TrackHandle destructor, the TrackHandle still holds a strong ref on
786     // this Track with its member mTrack.
787     sp<Track> keep(this);
788     { // scope for mLock
789         bool wasActive = false;
790         sp<ThreadBase> thread = mThread.promote();
791         if (thread != 0) {
792             Mutex::Autolock _l(thread->mLock);
793             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
794             wasActive = playbackThread->destroyTrack_l(this);
795             forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
796         }
797         if (isExternalTrack() && !wasActive) {
798             AudioSystem::releaseOutput(mPortId);
799         }
800     }
801 }
802 
appendDumpHeader(String8 & result)803 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
804 {
805     result.appendFormat("Type     Id Active Client Session Port Id S  Flags "
806                         "  Format Chn mask  SRate "
807                         "ST Usg CT "
808                         " G db  L dB  R dB  VS dB "
809                         "  Server FrmCnt  FrmRdy F Underruns  Flushed BitPerfect"
810                         "%s\n",
811                         isServerLatencySupported() ? "   Latency" : "");
812 }
813 
appendDump(String8 & result,bool active)814 void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
815 {
816     char trackType;
817     switch (mType) {
818     case TYPE_DEFAULT:
819     case TYPE_OUTPUT:
820         if (isStatic()) {
821             trackType = 'S'; // static
822         } else {
823             trackType = ' '; // normal
824         }
825         break;
826     case TYPE_PATCH:
827         trackType = 'P';
828         break;
829     default:
830         trackType = '?';
831     }
832 
833     if (isFastTrack()) {
834         result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
835     } else {
836         result.appendFormat("   %c %6d", trackType, mId);
837     }
838 
839     char nowInUnderrun;
840     switch (mObservedUnderruns.mBitFields.mMostRecent) {
841     case UNDERRUN_FULL:
842         nowInUnderrun = ' ';
843         break;
844     case UNDERRUN_PARTIAL:
845         nowInUnderrun = '<';
846         break;
847     case UNDERRUN_EMPTY:
848         nowInUnderrun = '*';
849         break;
850     default:
851         nowInUnderrun = '?';
852         break;
853     }
854 
855     char fillingStatus;
856     switch (mFillingUpStatus) {
857     case FS_INVALID:
858         fillingStatus = 'I';
859         break;
860     case FS_FILLING:
861         fillingStatus = 'f';
862         break;
863     case FS_FILLED:
864         fillingStatus = 'F';
865         break;
866     case FS_ACTIVE:
867         fillingStatus = 'A';
868         break;
869     default:
870         fillingStatus = '?';
871         break;
872     }
873 
874     // clip framesReadySafe to max representation in dump
875     const size_t framesReadySafe =
876             std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
877 
878     // obtain volumes
879     const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
880     const std::pair<float /* volume */, bool /* active */> vsVolume =
881             mVolumeHandler->getLastVolume();
882 
883     // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
884     // as it may be reduced by the application.
885     const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
886     // Check whether the buffer size has been modified by the app.
887     const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
888             ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
889                     ? 'e' /* error */ : ' ' /* identical */;
890 
891     result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
892                         "%08X %08X %6u "
893                         "%2u %3x %2x "
894                         "%5.2g %5.2g %5.2g %5.2g%c "
895                         "%08X %6zu%c %6zu %c %9u%c %7u %10s",
896             active ? "yes" : "no",
897             (mClient == 0) ? getpid() : mClient->pid(),
898             mSessionId,
899             mPortId,
900             getTrackStateAsCodedString(),
901             mCblk->mFlags,
902 
903             mFormat,
904             mChannelMask,
905             sampleRate(),
906 
907             mStreamType,
908             mAttr.usage,
909             mAttr.content_type,
910 
911             20.0 * log10(mFinalVolume),
912             20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
913             20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
914             20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
915             vsVolume.second ? 'A' : ' ',  // if any VolumeShapers active
916 
917             mCblk->mServer,
918             bufferSizeInFrames,
919             modifiedBufferChar,
920             framesReadySafe,
921             fillingStatus,
922             mAudioTrackServerProxy->getUnderrunFrames(),
923             nowInUnderrun,
924             (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
925             isBitPerfect() ? "true" : "false"
926             );
927 
928     if (isServerLatencySupported()) {
929         double latencyMs;
930         bool fromTrack;
931         if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
932             // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
933             // or 'k' if estimated from kernel because track frames haven't been presented yet.
934             result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
935         } else {
936             result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
937         }
938     }
939     result.append("\n");
940 }
941 
sampleRate() const942 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
943     return mAudioTrackServerProxy->getSampleRate();
944 }
945 
946 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)947 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
948 {
949     ServerProxy::Buffer buf;
950     size_t desiredFrames = buffer->frameCount;
951     buf.mFrameCount = desiredFrames;
952     status_t status = mServerProxy->obtainBuffer(&buf);
953     buffer->frameCount = buf.mFrameCount;
954     buffer->raw = buf.mRaw;
955     if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
956         ALOGV("%s(%d): underrun,  framesReady(%zu) < framesDesired(%zd), state: %d",
957                 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
958         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
959     } else {
960         mAudioTrackServerProxy->tallyUnderrunFrames(0);
961     }
962     return status;
963 }
964 
releaseBuffer(AudioBufferProvider::Buffer * buffer)965 void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
966 {
967     interceptBuffer(*buffer);
968     TrackBase::releaseBuffer(buffer);
969 }
970 
971 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)972 void AudioFlinger::PlaybackThread::Track::interceptBuffer(
973         const AudioBufferProvider::Buffer& sourceBuffer) {
974     auto start = std::chrono::steady_clock::now();
975     const size_t frameCount = sourceBuffer.frameCount;
976     if (frameCount == 0) {
977         return;  // No audio to intercept.
978         // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
979         // does not allow 0 frame size request contrary to getNextBuffer
980     }
981     for (auto& teePatch : mTeePatches) {
982         RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
983         const size_t framesWritten = patchRecord->writeFrames(
984                 sourceBuffer.i8, frameCount, mFrameSize);
985         const size_t framesLeft = frameCount - framesWritten;
986         ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
987                  "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
988                  framesWritten, frameCount, framesLeft);
989     }
990     auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
991     using namespace std::chrono_literals;
992     // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
993     ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
994              spent.count(), mTeePatches.size());
995 }
996 
997 // ExtendedAudioBufferProvider interface
998 
999 // framesReady() may return an approximation of the number of frames if called
1000 // from a different thread than the one calling Proxy->obtainBuffer() and
1001 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
1002 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const1003 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
1004     if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
1005         // Static tracks return zero frames immediately upon stopping (for FastTracks).
1006         // The remainder of the buffer is not drained.
1007         return 0;
1008     }
1009     return mAudioTrackServerProxy->framesReady();
1010 }
1011 
framesReleased() const1012 int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
1013 {
1014     return mAudioTrackServerProxy->framesReleased();
1015 }
1016 
onTimestamp(const ExtendedTimestamp & timestamp)1017 void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp &timestamp)
1018 {
1019     // This call comes from a FastTrack and should be kept lockless.
1020     // The server side frames are already translated to client frames.
1021     mAudioTrackServerProxy->setTimestamp(timestamp);
1022 
1023     // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1024 
1025     // Compute latency.
1026     // TODO: Consider whether the server latency may be passed in by FastMixer
1027     // as a constant for all active FastTracks.
1028     const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1029     mServerLatencyFromTrack.store(true);
1030     mServerLatencyMs.store(latencyMs);
1031 }
1032 
1033 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1034 bool AudioFlinger::PlaybackThread::Track::isReady() const {
1035     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
1036         return true;
1037     }
1038 
1039     if (isStopping()) {
1040         if (framesReady() > 0) {
1041             mFillingUpStatus = FS_FILLED;
1042         }
1043         return true;
1044     }
1045 
1046     size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1047     // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1048     const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1049     const size_t framesToBeReady = std::clamp(  // clamp again to validate client values.
1050             std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1051 
1052     if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1053         ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1054               __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1055         mFillingUpStatus = FS_FILLED;
1056         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1057         return true;
1058     }
1059     return false;
1060 }
1061 
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1062 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
1063                                                     audio_session_t triggerSession __unused)
1064 {
1065     status_t status = NO_ERROR;
1066     ALOGV("%s(%d): calling pid %d session %d",
1067             __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1068 
1069     sp<ThreadBase> thread = mThread.promote();
1070     if (thread != 0) {
1071         if (isOffloaded()) {
1072             Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
1073             Mutex::Autolock _lth(thread->mLock);
1074             sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
1075             if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
1076                     (ec != 0 && ec->isNonOffloadableEnabled())) {
1077                 invalidate();
1078                 return PERMISSION_DENIED;
1079             }
1080         }
1081         Mutex::Autolock _lth(thread->mLock);
1082         track_state state = mState;
1083         // here the track could be either new, or restarted
1084         // in both cases "unstop" the track
1085 
1086         // initial state-stopping. next state-pausing.
1087         // What if resume is called ?
1088 
1089         if (state == FLUSHED) {
1090             // avoid underrun glitches when starting after flush
1091             reset();
1092         }
1093 
1094         // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1095         mPauseHwPending = false;
1096         if (state == PAUSED || state == PAUSING) {
1097             if (mResumeToStopping) {
1098                 // happened we need to resume to STOPPING_1
1099                 mState = TrackBase::STOPPING_1;
1100                 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1101                         __func__, mId, (int)mThreadIoHandle);
1102             } else {
1103                 mState = TrackBase::RESUMING;
1104                 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1105                         __func__,  mId, (int)mThreadIoHandle);
1106             }
1107         } else {
1108             mState = TrackBase::ACTIVE;
1109             ALOGV("%s(%d): ? => ACTIVE on thread %d",
1110                     __func__, mId, (int)mThreadIoHandle);
1111         }
1112 
1113         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1114 
1115         // states to reset position info for pcm tracks
1116         if (audio_is_linear_pcm(mFormat)
1117                 && (state == IDLE || state == STOPPED || state == FLUSHED)) {
1118             mFrameMap.reset();
1119 
1120             if (!isFastTrack() && (isDirect() || isOffloaded())) {
1121                 // Start point of track -> sink frame map. If the HAL returns a
1122                 // frame position smaller than the first written frame in
1123                 // updateTrackFrameInfo, the timestamp can be interpolated
1124                 // instead of using a larger value.
1125                 mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
1126                                playbackThread->framesWritten());
1127             }
1128         }
1129         if (isFastTrack()) {
1130             // refresh fast track underruns on start because that field is never cleared
1131             // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1132             // after stop.
1133             mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1134         }
1135         status = playbackThread->addTrack_l(this);
1136         if (status == INVALID_OPERATION || status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1137             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1138             //  restore previous state if start was rejected by policy manager
1139             if (status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1140                 mState = state;
1141             }
1142         }
1143 
1144         // Audio timing metrics are computed a few mix cycles after starting.
1145         {
1146             mLogStartCountdown = LOG_START_COUNTDOWN;
1147             mLogStartTimeNs = systemTime();
1148             mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1149                     .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1150             mLogLatencyMs = 0.;
1151         }
1152         mLogForceVolumeUpdate = true;  // at least one volume logged for metrics when starting.
1153 
1154         if (status == NO_ERROR || status == ALREADY_EXISTS) {
1155             // for streaming tracks, remove the buffer read stop limit.
1156             mAudioTrackServerProxy->start();
1157         }
1158 
1159         // track was already in the active list, not a problem
1160         if (status == ALREADY_EXISTS) {
1161             status = NO_ERROR;
1162         } else {
1163             // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1164             // It is usually unsafe to access the server proxy from a binder thread.
1165             // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1166             // isn't looking at this track yet:  we still hold the normal mixer thread lock,
1167             // and for fast tracks the track is not yet in the fast mixer thread's active set.
1168             // For static tracks, this is used to acknowledge change in position or loop.
1169             ServerProxy::Buffer buffer;
1170             buffer.mFrameCount = 1;
1171             (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1172         }
1173         if (status == NO_ERROR) {
1174             forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1175         }
1176     } else {
1177         status = BAD_VALUE;
1178     }
1179     if (status == NO_ERROR) {
1180         // send format to AudioManager for playback activity monitoring
1181         sp<IAudioManager> audioManager = thread->mAudioFlinger->getOrCreateAudioManager();
1182         if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1183             std::unique_ptr<os::PersistableBundle> bundle =
1184                     std::make_unique<os::PersistableBundle>();
1185         bundle->putBoolean(String16(kExtraPlayerEventSpatializedKey),
1186                            isSpatialized());
1187         bundle->putInt(String16(kExtraPlayerEventSampleRateKey), mSampleRate);
1188         bundle->putInt(String16(kExtraPlayerEventChannelMaskKey), mChannelMask);
1189         status_t result = audioManager->portEvent(mPortId,
1190                                                   PLAYER_UPDATE_FORMAT, bundle);
1191         if (result != OK) {
1192             ALOGE("%s: unable to send playback format for port ID %d, status error %d",
1193                   __func__, mPortId, result);
1194         }
1195       }
1196     }
1197     return status;
1198 }
1199 
stop()1200 void AudioFlinger::PlaybackThread::Track::stop()
1201 {
1202     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1203     sp<ThreadBase> thread = mThread.promote();
1204     if (thread != 0) {
1205         Mutex::Autolock _l(thread->mLock);
1206         track_state state = mState;
1207         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1208             // If the track is not active (PAUSED and buffers full), flush buffers
1209             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1210             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1211                 reset();
1212                 mState = STOPPED;
1213             } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
1214                 mState = STOPPED;
1215             } else {
1216                 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1217                 // presentation is complete
1218                 // For an offloaded track this starts a drain and state will
1219                 // move to STOPPING_2 when drain completes and then STOPPED
1220                 mState = STOPPING_1;
1221                 if (isOffloaded()) {
1222                     mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
1223                 }
1224             }
1225             playbackThread->broadcast_l();
1226             ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1227                     __func__, mId, (int)mThreadIoHandle);
1228         }
1229         forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->stop(); });
1230     }
1231 }
1232 
pause()1233 void AudioFlinger::PlaybackThread::Track::pause()
1234 {
1235     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1236     sp<ThreadBase> thread = mThread.promote();
1237     if (thread != 0) {
1238         Mutex::Autolock _l(thread->mLock);
1239         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1240         switch (mState) {
1241         case STOPPING_1:
1242         case STOPPING_2:
1243             if (!isOffloaded()) {
1244                 /* nothing to do if track is not offloaded */
1245                 break;
1246             }
1247 
1248             // Offloaded track was draining, we need to carry on draining when resumed
1249             mResumeToStopping = true;
1250             FALLTHROUGH_INTENDED;
1251         case ACTIVE:
1252         case RESUMING:
1253             mState = PAUSING;
1254             ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1255                     __func__, mId, (int)mThreadIoHandle);
1256             if (isOffloadedOrDirect()) {
1257                 mPauseHwPending = true;
1258             }
1259             playbackThread->broadcast_l();
1260             break;
1261 
1262         default:
1263             break;
1264         }
1265         // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1266         forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->pause(); });
1267     }
1268 }
1269 
flush()1270 void AudioFlinger::PlaybackThread::Track::flush()
1271 {
1272     ALOGV("%s(%d)", __func__, mId);
1273     sp<ThreadBase> thread = mThread.promote();
1274     if (thread != 0) {
1275         Mutex::Autolock _l(thread->mLock);
1276         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1277 
1278         // Flush the ring buffer now if the track is not active in the PlaybackThread.
1279         // Otherwise the flush would not be done until the track is resumed.
1280         // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1281         if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1282             (void)mServerProxy->flushBufferIfNeeded();
1283         }
1284 
1285         if (isOffloaded()) {
1286             // If offloaded we allow flush during any state except terminated
1287             // and keep the track active to avoid problems if user is seeking
1288             // rapidly and underlying hardware has a significant delay handling
1289             // a pause
1290             if (isTerminated()) {
1291                 return;
1292             }
1293 
1294             ALOGV("%s(%d): offload flush", __func__, mId);
1295             reset();
1296 
1297             if (mState == STOPPING_1 || mState == STOPPING_2) {
1298                 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1299                         __func__, mId);
1300                 mState = ACTIVE;
1301             }
1302 
1303             mFlushHwPending = true;
1304             mResumeToStopping = false;
1305         } else {
1306             if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1307                     mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1308                 return;
1309             }
1310             // No point remaining in PAUSED state after a flush => go to
1311             // FLUSHED state
1312             mState = FLUSHED;
1313             // do not reset the track if it is still in the process of being stopped or paused.
1314             // this will be done by prepareTracks_l() when the track is stopped.
1315             // prepareTracks_l() will see mState == FLUSHED, then
1316             // remove from active track list, reset(), and trigger presentation complete
1317             if (isDirect()) {
1318                 mFlushHwPending = true;
1319             }
1320             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1321                 reset();
1322             }
1323         }
1324         // Prevent flush being lost if the track is flushed and then resumed
1325         // before mixer thread can run. This is important when offloading
1326         // because the hardware buffer could hold a large amount of audio
1327         playbackThread->broadcast_l();
1328         // Flush the Tee to avoid on resume playing old data and glitching on the transition to
1329         // new data
1330         forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->flush(); });
1331     }
1332 }
1333 
1334 // must be called with thread lock held
flushAck()1335 void AudioFlinger::PlaybackThread::Track::flushAck()
1336 {
1337     if (!isOffloaded() && !isDirect()) {
1338         return;
1339     }
1340 
1341     // Clear the client ring buffer so that the app can prime the buffer while paused.
1342     // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1343     mServerProxy->flushBufferIfNeeded();
1344 
1345     mFlushHwPending = false;
1346 }
1347 
pauseAck()1348 void AudioFlinger::PlaybackThread::Track::pauseAck()
1349 {
1350     mPauseHwPending = false;
1351 }
1352 
reset()1353 void AudioFlinger::PlaybackThread::Track::reset()
1354 {
1355     // Do not reset twice to avoid discarding data written just after a flush and before
1356     // the audioflinger thread detects the track is stopped.
1357     if (!mResetDone) {
1358         // Force underrun condition to avoid false underrun callback until first data is
1359         // written to buffer
1360         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1361         mFillingUpStatus = FS_FILLING;
1362         mResetDone = true;
1363         if (mState == FLUSHED) {
1364             mState = IDLE;
1365         }
1366     }
1367 }
1368 
setParameters(const String8 & keyValuePairs)1369 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
1370 {
1371     sp<ThreadBase> thread = mThread.promote();
1372     if (thread == 0) {
1373         ALOGE("%s(%d): thread is dead", __func__, mId);
1374         return FAILED_TRANSACTION;
1375     } else if ((thread->type() == ThreadBase::DIRECT) ||
1376                     (thread->type() == ThreadBase::OFFLOAD)) {
1377         return thread->setParameters(keyValuePairs);
1378     } else {
1379         return PERMISSION_DENIED;
1380     }
1381 }
1382 
selectPresentation(int presentationId,int programId)1383 status_t AudioFlinger::PlaybackThread::Track::selectPresentation(int presentationId,
1384         int programId) {
1385     sp<ThreadBase> thread = mThread.promote();
1386     if (thread == 0) {
1387         ALOGE("thread is dead");
1388         return FAILED_TRANSACTION;
1389     } else if ((thread->type() == ThreadBase::DIRECT) || (thread->type() == ThreadBase::OFFLOAD)) {
1390         DirectOutputThread *directOutputThread = static_cast<DirectOutputThread*>(thread.get());
1391         return directOutputThread->selectPresentation(presentationId, programId);
1392     }
1393     return INVALID_OPERATION;
1394 }
1395 
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1396 VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
1397         const sp<VolumeShaper::Configuration>& configuration,
1398         const sp<VolumeShaper::Operation>& operation)
1399 {
1400     VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(configuration, operation);
1401 
1402     if (isOffloadedOrDirect()) {
1403         // Signal thread to fetch new volume.
1404         sp<ThreadBase> thread = mThread.promote();
1405         if (thread != 0) {
1406             Mutex::Autolock _l(thread->mLock);
1407             thread->broadcast_l();
1408         }
1409     }
1410     return status;
1411 }
1412 
getVolumeShaperState(int id)1413 sp<VolumeShaper::State> AudioFlinger::PlaybackThread::Track::getVolumeShaperState(int id)
1414 {
1415     // Note: We don't check if Thread exists.
1416 
1417     // mVolumeHandler is thread safe.
1418     return mVolumeHandler->getVolumeShaperState(id);
1419 }
1420 
setFinalVolume(float volumeLeft,float volumeRight)1421 void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volumeLeft, float volumeRight)
1422 {
1423     mFinalVolumeLeft = volumeLeft;
1424     mFinalVolumeRight = volumeRight;
1425     const float volume = (volumeLeft + volumeRight) * 0.5f;
1426     if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1427         mFinalVolume = volume;
1428         setMetadataHasChanged();
1429         mLogForceVolumeUpdate = true;
1430     }
1431     if (mLogForceVolumeUpdate) {
1432         mLogForceVolumeUpdate = false;
1433         mTrackMetrics.logVolume(mFinalVolume);
1434     }
1435 }
1436 
copyMetadataTo(MetadataInserter & backInserter) const1437 void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
1438 {
1439     // Do not forward metadata for PatchTrack with unspecified stream type
1440     if (mStreamType == AUDIO_STREAM_PATCH) {
1441         return;
1442     }
1443 
1444     playback_track_metadata_v7_t metadata;
1445     metadata.base = {
1446             .usage = mAttr.usage,
1447             .content_type = mAttr.content_type,
1448             .gain = mFinalVolume,
1449     };
1450 
1451     // When attributes are undefined, derive default values from stream type.
1452     // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
1453     if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
1454         switch (mStreamType) {
1455         case AUDIO_STREAM_VOICE_CALL:
1456             metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1457             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1458             break;
1459         case AUDIO_STREAM_SYSTEM:
1460             metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1461             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1462             break;
1463         case AUDIO_STREAM_RING:
1464             metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1465             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1466             break;
1467         case AUDIO_STREAM_MUSIC:
1468             metadata.base.usage = AUDIO_USAGE_MEDIA;
1469             metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1470             break;
1471         case AUDIO_STREAM_ALARM:
1472             metadata.base.usage = AUDIO_USAGE_ALARM;
1473             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1474             break;
1475         case AUDIO_STREAM_NOTIFICATION:
1476             metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
1477             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1478             break;
1479         case AUDIO_STREAM_DTMF:
1480             metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1481             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1482             break;
1483         case AUDIO_STREAM_ACCESSIBILITY:
1484             metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1485             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1486             break;
1487         case AUDIO_STREAM_ASSISTANT:
1488             metadata.base.usage = AUDIO_USAGE_ASSISTANT;
1489             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1490             break;
1491         case AUDIO_STREAM_REROUTING:
1492             metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
1493             // unknown content type
1494             break;
1495         case AUDIO_STREAM_CALL_ASSISTANT:
1496             metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
1497             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1498             break;
1499         default:
1500             break;
1501         }
1502     }
1503 
1504     metadata.channel_mask = mChannelMask;
1505     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1506     *backInserter++ = metadata;
1507 }
1508 
updateTeePatches_l()1509 void AudioFlinger::PlaybackThread::Track::updateTeePatches_l() {
1510     if (mTeePatchesToUpdate.has_value()) {
1511         forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1512         mTeePatches = mTeePatchesToUpdate.value();
1513         if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1514                 mState == TrackBase::STOPPING_1) {
1515             forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1516         }
1517         mTeePatchesToUpdate.reset();
1518     }
1519 }
1520 
setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate)1521 void AudioFlinger::PlaybackThread::Track::setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) {
1522     ALOGW_IF(mTeePatchesToUpdate.has_value(),
1523              "%s, existing tee patches to update will be ignored", __func__);
1524     mTeePatchesToUpdate = std::move(teePatchesToUpdate);
1525 }
1526 
1527 // must be called with player thread lock held
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)1528 void AudioFlinger::PlaybackThread::Track::processMuteEvent_l(const sp<
1529     IAudioManager>& audioManager, mute_state_t muteState)
1530 {
1531     if (mMuteState == muteState) {
1532         // mute state did not change, do nothing
1533         return;
1534     }
1535 
1536     status_t result = UNKNOWN_ERROR;
1537     if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1538         if (mMuteEventExtras == nullptr) {
1539             mMuteEventExtras = std::make_unique<os::PersistableBundle>();
1540         }
1541         mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey),
1542                                  static_cast<int>(muteState));
1543 
1544         result = audioManager->portEvent(mPortId,
1545                                          PLAYER_UPDATE_MUTED,
1546                                          mMuteEventExtras);
1547     }
1548 
1549     if (result == OK) {
1550         mMuteState = muteState;
1551     } else {
1552         ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d",
1553               __func__,
1554               id(),
1555               mPortId,
1556               result);
1557     }
1558 }
1559 
getTimestamp(AudioTimestamp & timestamp)1560 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
1561 {
1562     if (!isOffloaded() && !isDirect()) {
1563         return INVALID_OPERATION; // normal tracks handled through SSQ
1564     }
1565     sp<ThreadBase> thread = mThread.promote();
1566     if (thread == 0) {
1567         return INVALID_OPERATION;
1568     }
1569 
1570     Mutex::Autolock _l(thread->mLock);
1571     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1572     return playbackThread->getTimestamp_l(timestamp);
1573 }
1574 
attachAuxEffect(int EffectId)1575 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
1576 {
1577     sp<ThreadBase> thread = mThread.promote();
1578     if (thread == nullptr) {
1579         return DEAD_OBJECT;
1580     }
1581 
1582     sp<PlaybackThread> dstThread = (PlaybackThread *)thread.get();
1583     sp<PlaybackThread> srcThread; // srcThread is initialized by call to moveAuxEffectToIo()
1584     sp<AudioFlinger> af = mClient->audioFlinger();
1585     status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1586 
1587     if (EffectId != 0 && status == NO_ERROR) {
1588         status = dstThread->attachAuxEffect(this, EffectId);
1589         if (status == NO_ERROR) {
1590             AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1591         }
1592     }
1593 
1594     if (status != NO_ERROR && srcThread != nullptr) {
1595         af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1596     }
1597     return status;
1598 }
1599 
setAuxBuffer(int EffectId,int32_t * buffer)1600 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
1601 {
1602     mAuxEffectId = EffectId;
1603     mAuxBuffer = buffer;
1604 }
1605 
1606 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1607 bool AudioFlinger::PlaybackThread::Track::presentationComplete(
1608         int64_t framesWritten, size_t audioHalFrames)
1609 {
1610     // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1611     // This assists in proper timestamp computation as well as wakelock management.
1612 
1613     // a track is considered presented when the total number of frames written to audio HAL
1614     // corresponds to the number of frames written when presentationComplete() is called for the
1615     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1616     // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1617     // to detect when all frames have been played. In this case framesWritten isn't
1618     // useful because it doesn't always reflect whether there is data in the h/w
1619     // buffers, particularly if a track has been paused and resumed during draining
1620     ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1621             __func__, mId,
1622             (long long)mPresentationCompleteFrames, (long long)framesWritten);
1623     if (mPresentationCompleteFrames == 0) {
1624         mPresentationCompleteFrames = framesWritten + audioHalFrames;
1625         ALOGV("%s(%d): set:"
1626                 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1627                 __func__, mId,
1628                 (long long)mPresentationCompleteFrames, audioHalFrames);
1629     }
1630 
1631     bool complete;
1632     if (isFastTrack()) { // does not go through linear map
1633         complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1634         ALOGV("%s(%d): %s framesWritten:%lld  mPresentationCompleteFrames:%lld",
1635                 __func__, mId, (complete ? "complete" : "waiting"),
1636                 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1637     } else {  // Normal tracks, OutputTracks, and PatchTracks
1638         complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1639                 && mAudioTrackServerProxy->isDrained();
1640     }
1641 
1642     if (complete) {
1643         notifyPresentationComplete();
1644         return true;
1645     }
1646     return false;
1647 }
1648 
1649 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1650 bool AudioFlinger::PlaybackThread::Track::presentationComplete(uint32_t latencyMs)
1651 {
1652     // For Offloaded or Direct tracks.
1653 
1654     // For a direct track, we incorporated time based testing for presentationComplete.
1655 
1656     // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1657     // to detect when all frames have been played. In this case latencyMs isn't
1658     // useful because it doesn't always reflect whether there is data in the h/w
1659     // buffers, particularly if a track has been paused and resumed during draining
1660 
1661     constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1662     if (mPresentationCompleteTimeNs == 0) {
1663         mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1664         ALOGV("%s(%d): set: latencyMs %u  mPresentationCompleteTimeNs:%lld",
1665                 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1666     }
1667 
1668     bool complete;
1669     if (isOffloaded()) {
1670         complete = true;
1671     } else { // Direct
1672         complete = systemTime() >= mPresentationCompleteTimeNs;
1673         ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1674     }
1675     if (complete) {
1676         notifyPresentationComplete();
1677         return true;
1678     }
1679     return false;
1680 }
1681 
notifyPresentationComplete()1682 void AudioFlinger::PlaybackThread::Track::notifyPresentationComplete()
1683 {
1684     // This only triggers once. TODO: should we enforce this?
1685     triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1686     mAudioTrackServerProxy->setStreamEndDone();
1687 }
1688 
triggerEvents(AudioSystem::sync_event_t type)1689 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
1690 {
1691     for (size_t i = 0; i < mSyncEvents.size();) {
1692         if (mSyncEvents[i]->type() == type) {
1693             mSyncEvents[i]->trigger();
1694             mSyncEvents.removeAt(i);
1695         } else {
1696             ++i;
1697         }
1698     }
1699 }
1700 
1701 // implement VolumeBufferProvider interface
1702 
getVolumeLR()1703 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
1704 {
1705     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1706     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1707     gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1708     float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1709     float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1710     // track volumes come from shared memory, so can't be trusted and must be clamped
1711     if (vl > GAIN_FLOAT_UNITY) {
1712         vl = GAIN_FLOAT_UNITY;
1713     }
1714     if (vr > GAIN_FLOAT_UNITY) {
1715         vr = GAIN_FLOAT_UNITY;
1716     }
1717     // now apply the cached master volume and stream type volume;
1718     // this is trusted but lacks any synchronization or barrier so may be stale
1719     float v = mCachedVolume;
1720     vl *= v;
1721     vr *= v;
1722     // re-combine into packed minifloat
1723     vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1724     // FIXME look at mute, pause, and stop flags
1725     return vlr;
1726 }
1727 
setSyncEvent(const sp<SyncEvent> & event)1728 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
1729 {
1730     if (isTerminated() || mState == PAUSED ||
1731             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1732                                       (mState == STOPPED)))) {
1733         ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
1734               __func__, mId,
1735               (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1736         event->cancel();
1737         return INVALID_OPERATION;
1738     }
1739     (void) TrackBase::setSyncEvent(event);
1740     return NO_ERROR;
1741 }
1742 
invalidate()1743 void AudioFlinger::PlaybackThread::Track::invalidate()
1744 {
1745     TrackBase::invalidate();
1746     signalClientFlag(CBLK_INVALID);
1747 }
1748 
disable()1749 void AudioFlinger::PlaybackThread::Track::disable()
1750 {
1751     // TODO(b/142394888): the filling status should also be reset to filling
1752     signalClientFlag(CBLK_DISABLED);
1753 }
1754 
signalClientFlag(int32_t flag)1755 void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
1756 {
1757     // FIXME should use proxy, and needs work
1758     audio_track_cblk_t* cblk = mCblk;
1759     android_atomic_or(flag, &cblk->mFlags);
1760     android_atomic_release_store(0x40000000, &cblk->mFutex);
1761     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1762     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1763 }
1764 
signal()1765 void AudioFlinger::PlaybackThread::Track::signal()
1766 {
1767     sp<ThreadBase> thread = mThread.promote();
1768     if (thread != 0) {
1769         PlaybackThread *t = (PlaybackThread *)thread.get();
1770         Mutex::Autolock _l(t->mLock);
1771         t->broadcast_l();
1772     }
1773 }
1774 
getDualMonoMode(audio_dual_mono_mode_t * mode)1775 status_t AudioFlinger::PlaybackThread::Track::getDualMonoMode(audio_dual_mono_mode_t* mode)
1776 {
1777     status_t status = INVALID_OPERATION;
1778     if (isOffloadedOrDirect()) {
1779         sp<ThreadBase> thread = mThread.promote();
1780         if (thread != nullptr) {
1781             PlaybackThread *t = (PlaybackThread *)thread.get();
1782             Mutex::Autolock _l(t->mLock);
1783             status = t->mOutput->stream->getDualMonoMode(mode);
1784             ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
1785                     "%s: mode %d inconsistent", __func__, mDualMonoMode);
1786         }
1787     }
1788     return status;
1789 }
1790 
setDualMonoMode(audio_dual_mono_mode_t mode)1791 status_t AudioFlinger::PlaybackThread::Track::setDualMonoMode(audio_dual_mono_mode_t mode)
1792 {
1793     status_t status = INVALID_OPERATION;
1794     if (isOffloadedOrDirect()) {
1795         sp<ThreadBase> thread = mThread.promote();
1796         if (thread != nullptr) {
1797             auto t = static_cast<PlaybackThread *>(thread.get());
1798             Mutex::Autolock lock(t->mLock);
1799             status = t->mOutput->stream->setDualMonoMode(mode);
1800             if (status == NO_ERROR) {
1801                 mDualMonoMode = mode;
1802             }
1803         }
1804     }
1805     return status;
1806 }
1807 
getAudioDescriptionMixLevel(float * leveldB)1808 status_t AudioFlinger::PlaybackThread::Track::getAudioDescriptionMixLevel(float* leveldB)
1809 {
1810     status_t status = INVALID_OPERATION;
1811     if (isOffloadedOrDirect()) {
1812         sp<ThreadBase> thread = mThread.promote();
1813         if (thread != nullptr) {
1814             auto t = static_cast<PlaybackThread *>(thread.get());
1815             Mutex::Autolock lock(t->mLock);
1816             status = t->mOutput->stream->getAudioDescriptionMixLevel(leveldB);
1817             ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
1818                     "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
1819         }
1820     }
1821     return status;
1822 }
1823 
setAudioDescriptionMixLevel(float leveldB)1824 status_t AudioFlinger::PlaybackThread::Track::setAudioDescriptionMixLevel(float leveldB)
1825 {
1826     status_t status = INVALID_OPERATION;
1827     if (isOffloadedOrDirect()) {
1828         sp<ThreadBase> thread = mThread.promote();
1829         if (thread != nullptr) {
1830             auto t = static_cast<PlaybackThread *>(thread.get());
1831             Mutex::Autolock lock(t->mLock);
1832             status = t->mOutput->stream->setAudioDescriptionMixLevel(leveldB);
1833             if (status == NO_ERROR) {
1834                 mAudioDescriptionMixLevel = leveldB;
1835             }
1836         }
1837     }
1838     return status;
1839 }
1840 
getPlaybackRateParameters(audio_playback_rate_t * playbackRate)1841 status_t AudioFlinger::PlaybackThread::Track::getPlaybackRateParameters(
1842         audio_playback_rate_t* playbackRate)
1843 {
1844     status_t status = INVALID_OPERATION;
1845     if (isOffloadedOrDirect()) {
1846         sp<ThreadBase> thread = mThread.promote();
1847         if (thread != nullptr) {
1848             auto t = static_cast<PlaybackThread *>(thread.get());
1849             Mutex::Autolock lock(t->mLock);
1850             status = t->mOutput->stream->getPlaybackRateParameters(playbackRate);
1851             ALOGD_IF((status == NO_ERROR) &&
1852                     !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
1853                     "%s: playbackRate inconsistent", __func__);
1854         }
1855     }
1856     return status;
1857 }
1858 
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)1859 status_t AudioFlinger::PlaybackThread::Track::setPlaybackRateParameters(
1860         const audio_playback_rate_t& playbackRate)
1861 {
1862     status_t status = INVALID_OPERATION;
1863     if (isOffloadedOrDirect()) {
1864         sp<ThreadBase> thread = mThread.promote();
1865         if (thread != nullptr) {
1866             auto t = static_cast<PlaybackThread *>(thread.get());
1867             Mutex::Autolock lock(t->mLock);
1868             status = t->mOutput->stream->setPlaybackRateParameters(playbackRate);
1869             if (status == NO_ERROR) {
1870                 mPlaybackRateParameters = playbackRate;
1871             }
1872         }
1873     }
1874     return status;
1875 }
1876 
1877 //To be called with thread lock held
isResumePending()1878 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
1879     if (mState == RESUMING) {
1880         return true;
1881     }
1882     /* Resume is pending if track was stopping before pause was called */
1883     if (mState == STOPPING_1 &&
1884         mResumeToStopping) {
1885         return true;
1886     }
1887 
1888     return false;
1889 }
1890 
1891 //To be called with thread lock held
resumeAck()1892 void AudioFlinger::PlaybackThread::Track::resumeAck() {
1893     if (mState == RESUMING) {
1894         mState = ACTIVE;
1895     }
1896 
1897     // Other possibility of  pending resume is stopping_1 state
1898     // Do not update the state from stopping as this prevents
1899     // drain being called.
1900     if (mState == STOPPING_1) {
1901         mResumeToStopping = false;
1902     }
1903 }
1904 
1905 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)1906 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
1907         int64_t trackFramesReleased, int64_t sinkFramesWritten,
1908         uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
1909    // Make the kernel frametime available.
1910     const FrameTime ft{
1911             timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1912             timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
1913     // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
1914     mKernelFrameTime.store(ft);
1915     if (!audio_is_linear_pcm(mFormat)) {
1916         return;
1917     }
1918 
1919     //update frame map
1920     mFrameMap.push(trackFramesReleased, sinkFramesWritten);
1921 
1922     // adjust server times and set drained state.
1923     //
1924     // Our timestamps are only updated when the track is on the Thread active list.
1925     // We need to ensure that tracks are not removed before full drain.
1926     ExtendedTimestamp local = timeStamp;
1927     bool drained = true; // default assume drained, if no server info found
1928     bool checked = false;
1929     for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
1930             i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
1931         // Lookup the track frame corresponding to the sink frame position.
1932         if (local.mTimeNs[i] > 0) {
1933             local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
1934             // check drain state from the latest stage in the pipeline.
1935             if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
1936                 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
1937                 checked = true;
1938             }
1939         }
1940     }
1941 
1942     mAudioTrackServerProxy->setDrained(drained);
1943     // Set correction for flushed frames that are not accounted for in released.
1944     local.mFlushed = mAudioTrackServerProxy->framesFlushed();
1945     mServerProxy->setTimestamp(local);
1946 
1947     // Compute latency info.
1948     const bool useTrackTimestamp = !drained;
1949     const double latencyMs = useTrackTimestamp
1950             ? local.getOutputServerLatencyMs(sampleRate())
1951             : timeStamp.getOutputServerLatencyMs(halSampleRate);
1952 
1953     mServerLatencyFromTrack.store(useTrackTimestamp);
1954     mServerLatencyMs.store(latencyMs);
1955 
1956     if (mLogStartCountdown > 0
1957             && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
1958             && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
1959     {
1960         if (mLogStartCountdown > 1) {
1961             --mLogStartCountdown;
1962         } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
1963             mLogStartCountdown = 0;
1964             // startup is the difference in times for the current timestamp and our start
1965             double startUpMs =
1966                     (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
1967             // adjust for frames played.
1968             startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
1969                     * 1e3 / mSampleRate;
1970             ALOGV("%s: latencyMs:%lf startUpMs:%lf"
1971                     " localTime:%lld startTime:%lld"
1972                     " localPosition:%lld startPosition:%lld",
1973                     __func__, latencyMs, startUpMs,
1974                     (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
1975                     (long long)mLogStartTimeNs,
1976                     (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1977                     (long long)mLogStartFrames);
1978             mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
1979         }
1980         mLogLatencyMs = latencyMs;
1981     }
1982 }
1983 
setMute(bool muted)1984 bool AudioFlinger::PlaybackThread::Track::AudioVibrationController::setMute(bool muted) {
1985     sp<ThreadBase> thread = mTrack->mThread.promote();
1986     if (thread != 0) {
1987         // Lock for updating mHapticPlaybackEnabled.
1988         Mutex::Autolock _l(thread->mLock);
1989         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1990         if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1991                 && playbackThread->mHapticChannelCount > 0) {
1992             ALOGD("%s, haptic playback was %s for track %d",
1993                     __func__, muted ? "muted" : "unmuted", mTrack->id());
1994             mTrack->setHapticPlaybackEnabled(!muted);
1995             return true;
1996         }
1997     }
1998     return false;
1999 }
2000 
mute(bool * ret)2001 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
2002         /*out*/ bool *ret) {
2003     *ret = setMute(true);
2004     return binder::Status::ok();
2005 }
2006 
unmute(bool * ret)2007 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
2008         /*out*/ bool *ret) {
2009     *ret = setMute(false);
2010     return binder::Status::ok();
2011 }
2012 
2013 // ----------------------------------------------------------------------------
2014 #undef LOG_TAG
2015 #define LOG_TAG "AF::OutputTrack"
2016 
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2017 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
2018             PlaybackThread *playbackThread,
2019             DuplicatingThread *sourceThread,
2020             uint32_t sampleRate,
2021             audio_format_t format,
2022             audio_channel_mask_t channelMask,
2023             size_t frameCount,
2024             const AttributionSourceState& attributionSource)
2025     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
2026               audio_attributes_t{} /* currently unused for output track */,
2027               sampleRate, format, channelMask, frameCount,
2028               nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
2029               AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
2030               TYPE_OUTPUT),
2031     mActive(false), mSourceThread(sourceThread)
2032 {
2033 
2034     if (mCblk != NULL) {
2035         mOutBuffer.frameCount = 0;
2036         playbackThread->mTracks.add(this);
2037         ALOGV("%s(): mCblk %p, mBuffer %p, "
2038                 "frameCount %zu, mChannelMask 0x%08x",
2039                 __func__, mCblk, mBuffer,
2040                 frameCount, mChannelMask);
2041         // since client and server are in the same process,
2042         // the buffer has the same virtual address on both sides
2043         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
2044                 true /*clientInServer*/);
2045         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
2046         mClientProxy->setSendLevel(0.0);
2047         mClientProxy->setSampleRate(sampleRate);
2048     } else {
2049         ALOGW("%s(%d): Error creating output track on thread %d",
2050                 __func__, mId, (int)mThreadIoHandle);
2051     }
2052 }
2053 
~OutputTrack()2054 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
2055 {
2056     clearBufferQueue();
2057     // superclass destructor will now delete the server proxy and shared memory both refer to
2058 }
2059 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2060 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
2061                                                           audio_session_t triggerSession)
2062 {
2063     status_t status = Track::start(event, triggerSession);
2064     if (status != NO_ERROR) {
2065         return status;
2066     }
2067 
2068     mActive = true;
2069     mRetryCount = 127;
2070     return status;
2071 }
2072 
stop()2073 void AudioFlinger::PlaybackThread::OutputTrack::stop()
2074 {
2075     Track::stop();
2076     clearBufferQueue();
2077     mOutBuffer.frameCount = 0;
2078     mActive = false;
2079 }
2080 
write(void * data,uint32_t frames)2081 ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
2082 {
2083     if (!mActive && frames != 0) {
2084         sp<ThreadBase> thread = mThread.promote();
2085         if (thread != nullptr && thread->standby()) {
2086             // preload one silent buffer to trigger mixer on start()
2087             ClientProxy::Buffer buf { .mFrameCount = mClientProxy->getStartThresholdInFrames() };
2088             status_t status = mClientProxy->obtainBuffer(&buf);
2089             if (status != NO_ERROR && status != NOT_ENOUGH_DATA && status != WOULD_BLOCK) {
2090                 ALOGE("%s(%d): could not obtain buffer on start", __func__, mId);
2091                 return 0;
2092             }
2093             memset(buf.mRaw, 0, buf.mFrameCount * mFrameSize);
2094             mClientProxy->releaseBuffer(&buf);
2095 
2096             (void) start();
2097 
2098             // wait for HAL stream to start before sending actual audio. Doing this on each
2099             // OutputTrack makes that playback start on all output streams is synchronized.
2100             // If another OutputTrack has already started it can underrun but this is OK
2101             // as only silence has been played so far and the retry count is very high on
2102             // OutputTrack.
2103             auto pt = static_cast<PlaybackThread *>(thread.get());
2104             if (!pt->waitForHalStart()) {
2105                 ALOGW("%s(%d): timeout waiting for thread to exit standby", __func__, mId);
2106                 stop();
2107                 return 0;
2108             }
2109 
2110             // enqueue the first buffer and exit so that other OutputTracks will also start before
2111             // write() is called again and this buffer actually consumed.
2112             Buffer firstBuffer;
2113             firstBuffer.frameCount = frames;
2114             firstBuffer.raw = data;
2115             queueBuffer(firstBuffer);
2116             return frames;
2117         } else {
2118             (void) start();
2119         }
2120     }
2121 
2122     Buffer *pInBuffer;
2123     Buffer inBuffer;
2124     inBuffer.frameCount = frames;
2125     inBuffer.raw = data;
2126     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
2127     while (waitTimeLeftMs) {
2128         // First write pending buffers, then new data
2129         if (mBufferQueue.size()) {
2130             pInBuffer = mBufferQueue.itemAt(0);
2131         } else {
2132             pInBuffer = &inBuffer;
2133         }
2134 
2135         if (pInBuffer->frameCount == 0) {
2136             break;
2137         }
2138 
2139         if (mOutBuffer.frameCount == 0) {
2140             mOutBuffer.frameCount = pInBuffer->frameCount;
2141             nsecs_t startTime = systemTime();
2142             status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
2143             if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
2144                 ALOGV("%s(%d): thread %d no more output buffers; status %d",
2145                         __func__, mId,
2146                         (int)mThreadIoHandle, status);
2147                 break;
2148             }
2149             uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
2150             if (waitTimeLeftMs >= waitTimeMs) {
2151                 waitTimeLeftMs -= waitTimeMs;
2152             } else {
2153                 waitTimeLeftMs = 0;
2154             }
2155             if (status == NOT_ENOUGH_DATA) {
2156                 restartIfDisabled();
2157                 continue;
2158             }
2159         }
2160 
2161         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
2162                 pInBuffer->frameCount;
2163         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2164         Proxy::Buffer buf;
2165         buf.mFrameCount = outFrames;
2166         buf.mRaw = NULL;
2167         mClientProxy->releaseBuffer(&buf);
2168         restartIfDisabled();
2169         pInBuffer->frameCount -= outFrames;
2170         pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2171         mOutBuffer.frameCount -= outFrames;
2172         mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2173 
2174         if (pInBuffer->frameCount == 0) {
2175             if (mBufferQueue.size()) {
2176                 mBufferQueue.removeAt(0);
2177                 free(pInBuffer->mBuffer);
2178                 if (pInBuffer != &inBuffer) {
2179                     delete pInBuffer;
2180                 }
2181                 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2182                         __func__, mId,
2183                         (int)mThreadIoHandle, mBufferQueue.size());
2184             } else {
2185                 break;
2186             }
2187         }
2188     }
2189 
2190     // If we could not write all frames, allocate a buffer and queue it for next time.
2191     if (inBuffer.frameCount) {
2192         sp<ThreadBase> thread = mThread.promote();
2193         if (thread != 0 && !thread->standby()) {
2194             queueBuffer(inBuffer);
2195         }
2196     }
2197 
2198     // Calling write() with a 0 length buffer means that no more data will be written:
2199     // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2200     if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2201         stop();
2202     }
2203 
2204     return frames - inBuffer.frameCount;  // number of frames consumed.
2205 }
2206 
queueBuffer(Buffer & inBuffer)2207 void AudioFlinger::PlaybackThread::OutputTrack::queueBuffer(Buffer& inBuffer) {
2208 
2209     if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2210         Buffer *pInBuffer = new Buffer;
2211         const size_t bufferSize = inBuffer.frameCount * mFrameSize;
2212         pInBuffer->mBuffer = malloc(bufferSize);
2213         LOG_ALWAYS_FATAL_IF(pInBuffer->mBuffer == nullptr,
2214                 "%s: Unable to malloc size %zu", __func__, bufferSize);
2215         pInBuffer->frameCount = inBuffer.frameCount;
2216         pInBuffer->raw = pInBuffer->mBuffer;
2217         memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2218         mBufferQueue.add(pInBuffer);
2219         ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2220                 (int)mThreadIoHandle, mBufferQueue.size());
2221         // audio data is consumed (stored locally); set frameCount to 0.
2222         inBuffer.frameCount = 0;
2223     } else {
2224         ALOGW("%s(%d): thread %d no more overflow buffers",
2225                 __func__, mId, (int)mThreadIoHandle);
2226         // TODO: return error for this.
2227     }
2228 }
2229 
copyMetadataTo(MetadataInserter & backInserter) const2230 void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2231 {
2232     std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2233     backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2234 }
2235 
setMetadatas(const SourceMetadatas & metadatas)2236 void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2237     {
2238         std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2239         mTrackMetadatas = metadatas;
2240     }
2241     // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2242     setMetadataHasChanged();
2243 }
2244 
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2245 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
2246         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2247 {
2248     ClientProxy::Buffer buf;
2249     buf.mFrameCount = buffer->frameCount;
2250     struct timespec timeout;
2251     timeout.tv_sec = waitTimeMs / 1000;
2252     timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2253     status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2254     buffer->frameCount = buf.mFrameCount;
2255     buffer->raw = buf.mRaw;
2256     return status;
2257 }
2258 
clearBufferQueue()2259 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
2260 {
2261     size_t size = mBufferQueue.size();
2262 
2263     for (size_t i = 0; i < size; i++) {
2264         Buffer *pBuffer = mBufferQueue.itemAt(i);
2265         free(pBuffer->mBuffer);
2266         delete pBuffer;
2267     }
2268     mBufferQueue.clear();
2269 }
2270 
restartIfDisabled()2271 void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
2272 {
2273     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2274     if (mActive && (flags & CBLK_DISABLED)) {
2275         start();
2276     }
2277 }
2278 
2279 // ----------------------------------------------------------------------------
2280 #undef LOG_TAG
2281 #define LOG_TAG "AF::PatchTrack"
2282 
PatchTrack(PlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady)2283 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
2284                                                      audio_stream_type_t streamType,
2285                                                      uint32_t sampleRate,
2286                                                      audio_channel_mask_t channelMask,
2287                                                      audio_format_t format,
2288                                                      size_t frameCount,
2289                                                      void *buffer,
2290                                                      size_t bufferSize,
2291                                                      audio_output_flags_t flags,
2292                                                      const Timeout& timeout,
2293                                                      size_t frameCountToBeReady)
2294     :   Track(playbackThread, NULL, streamType,
2295               audio_attributes_t{} /* currently unused for patch track */,
2296               sampleRate, format, channelMask, frameCount,
2297               buffer, bufferSize, nullptr /* sharedBuffer */,
2298               AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2299               TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
2300         PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
2301                        *playbackThread, timeout)
2302 {
2303     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2304                                       __func__, mId, sampleRate,
2305                                       (int)mPeerTimeout.tv_sec,
2306                                       (int)(mPeerTimeout.tv_nsec / 1000000));
2307 }
2308 
~PatchTrack()2309 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
2310 {
2311     ALOGV("%s(%d)", __func__, mId);
2312 }
2313 
framesReady() const2314 size_t AudioFlinger::PlaybackThread::PatchTrack::framesReady() const
2315 {
2316     if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2317         return std::numeric_limits<size_t>::max();
2318     } else {
2319         return Track::framesReady();
2320     }
2321 }
2322 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2323 status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
2324                                                          audio_session_t triggerSession)
2325 {
2326     status_t status = Track::start(event, triggerSession);
2327     if (status != NO_ERROR) {
2328         return status;
2329     }
2330     android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2331     return status;
2332 }
2333 
2334 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2335 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
2336         AudioBufferProvider::Buffer* buffer)
2337 {
2338     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2339     Proxy::Buffer buf;
2340     buf.mFrameCount = buffer->frameCount;
2341     if (ATRACE_ENABLED()) {
2342         std::string traceName("PTnReq");
2343         traceName += std::to_string(id());
2344         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2345     }
2346     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2347     ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2348     buffer->frameCount = buf.mFrameCount;
2349     if (ATRACE_ENABLED()) {
2350         std::string traceName("PTnObt");
2351         traceName += std::to_string(id());
2352         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2353     }
2354     if (buf.mFrameCount == 0) {
2355         return WOULD_BLOCK;
2356     }
2357     status = Track::getNextBuffer(buffer);
2358     return status;
2359 }
2360 
releaseBuffer(AudioBufferProvider::Buffer * buffer)2361 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2362 {
2363     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2364     Proxy::Buffer buf;
2365     buf.mFrameCount = buffer->frameCount;
2366     buf.mRaw = buffer->raw;
2367     mPeerProxy->releaseBuffer(&buf);
2368     TrackBase::releaseBuffer(buffer); // Note: this is the base class.
2369 }
2370 
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2371 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2372                                                                 const struct timespec *timeOut)
2373 {
2374     status_t status = NO_ERROR;
2375     static const int32_t kMaxTries = 5;
2376     int32_t tryCounter = kMaxTries;
2377     const size_t originalFrameCount = buffer->mFrameCount;
2378     do {
2379         if (status == NOT_ENOUGH_DATA) {
2380             restartIfDisabled();
2381             buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2382         }
2383         status = mProxy->obtainBuffer(buffer, timeOut);
2384     } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2385     return status;
2386 }
2387 
releaseBuffer(Proxy::Buffer * buffer)2388 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2389 {
2390     mProxy->releaseBuffer(buffer);
2391     restartIfDisabled();
2392 
2393     // Check if the PatchTrack has enough data to write once in releaseBuffer().
2394     // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2395     // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2396     // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2397     if (mFillingUpStatus == FS_ACTIVE
2398             && audio_is_linear_pcm(mFormat)
2399             && !isOffloadedOrDirect()) {
2400         if (sp<ThreadBase> thread = mThread.promote();
2401             thread != 0) {
2402             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
2403             const size_t frameCount = playbackThread->frameCount() * sampleRate()
2404                     / playbackThread->sampleRate();
2405             if (framesReady() < frameCount) {
2406                 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2407                 mFillingUpStatus = FS_FILLING;
2408             }
2409         }
2410     }
2411 }
2412 
restartIfDisabled()2413 void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
2414 {
2415     if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2416         ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2417         start();
2418     }
2419 }
2420 
2421 // ----------------------------------------------------------------------------
2422 //      Record
2423 // ----------------------------------------------------------------------------
2424 
2425 
2426 #undef LOG_TAG
2427 #define LOG_TAG "AF::RecordHandle"
2428 
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)2429 AudioFlinger::RecordHandle::RecordHandle(
2430         const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
2431     : BnAudioRecord(),
2432     mRecordTrack(recordTrack)
2433 {
2434     setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
2435 }
2436 
~RecordHandle()2437 AudioFlinger::RecordHandle::~RecordHandle() {
2438     stop_nonvirtual();
2439     mRecordTrack->destroy();
2440 }
2441 
start(int event,int triggerSession)2442 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2443         int /*audio_session_t*/ triggerSession) {
2444     ALOGV("%s()", __func__);
2445     return binderStatusFromStatusT(
2446         mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2447 }
2448 
stop()2449 binder::Status AudioFlinger::RecordHandle::stop() {
2450     stop_nonvirtual();
2451     return binder::Status::ok();
2452 }
2453 
stop_nonvirtual()2454 void AudioFlinger::RecordHandle::stop_nonvirtual() {
2455     ALOGV("%s()", __func__);
2456     mRecordTrack->stop();
2457 }
2458 
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)2459 binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
2460         std::vector<media::MicrophoneInfoFw>* activeMicrophones) {
2461     ALOGV("%s()", __func__);
2462     return binderStatusFromStatusT(mRecordTrack->getActiveMicrophones(activeMicrophones));
2463 }
2464 
setPreferredMicrophoneDirection(int direction)2465 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
2466         int /*audio_microphone_direction_t*/ direction) {
2467     ALOGV("%s()", __func__);
2468     return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2469             static_cast<audio_microphone_direction_t>(direction)));
2470 }
2471 
setPreferredMicrophoneFieldDimension(float zoom)2472 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2473     ALOGV("%s()", __func__);
2474     return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2475 }
2476 
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2477 binder::Status AudioFlinger::RecordHandle::shareAudioHistory(
2478         const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2479     return binderStatusFromStatusT(
2480             mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2481 }
2482 
2483 // ----------------------------------------------------------------------------
2484 #undef LOG_TAG
2485 #define LOG_TAG "AF::RecordTrack"
2486 
2487 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2488 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
2489             RecordThread *thread,
2490             const sp<Client>& client,
2491             const audio_attributes_t& attr,
2492             uint32_t sampleRate,
2493             audio_format_t format,
2494             audio_channel_mask_t channelMask,
2495             size_t frameCount,
2496             void *buffer,
2497             size_t bufferSize,
2498             audio_session_t sessionId,
2499             pid_t creatorPid,
2500             const AttributionSourceState& attributionSource,
2501             audio_input_flags_t flags,
2502             track_type type,
2503             audio_port_handle_t portId,
2504             int32_t startFrames)
2505     :   TrackBase(thread, client, attr, sampleRate, format,
2506                   channelMask, frameCount, buffer, bufferSize, sessionId,
2507                   creatorPid,
2508                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2509                   false /*isOut*/,
2510                   (type == TYPE_DEFAULT) ?
2511                           ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2512                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2513                   type, portId,
2514                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2515         mOverflow(false),
2516         mFramesToDrop(0),
2517         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2518         mRecordBufferConverter(NULL),
2519         mFlags(flags),
2520         mSilenced(false),
2521         mStartFrames(startFrames)
2522 {
2523     if (mCblk == NULL) {
2524         return;
2525     }
2526 
2527     if (!isDirect()) {
2528         mRecordBufferConverter = new RecordBufferConverter(
2529                 thread->mChannelMask, thread->mFormat, thread->mSampleRate,
2530                 channelMask, format, sampleRate);
2531         // Check if the RecordBufferConverter construction was successful.
2532         // If not, don't continue with construction.
2533         //
2534         // NOTE: It would be extremely rare that the record track cannot be created
2535         // for the current device, but a pending or future device change would make
2536         // the record track configuration valid.
2537         if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2538             ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2539             return;
2540         }
2541     }
2542 
2543     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2544             mFrameSize, !isExternalTrack());
2545 
2546     mResamplerBufferProvider = new ResamplerBufferProvider(this);
2547 
2548     if (flags & AUDIO_INPUT_FLAG_FAST) {
2549         ALOG_ASSERT(thread->mFastTrackAvail);
2550         thread->mFastTrackAvail = false;
2551     } else {
2552         // TODO: only Normal Record has timestamps (Fast Record does not).
2553         mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
2554     }
2555 #ifdef TEE_SINK
2556     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
2557             + "_" + std::to_string(mId)
2558             + "_R");
2559 #endif
2560 
2561     // Once this item is logged by the server, the client can add properties.
2562     mTrackMetrics.logConstructor(creatorPid, uid(), id());
2563 }
2564 
~RecordTrack()2565 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
2566 {
2567     ALOGV("%s()", __func__);
2568     delete mRecordBufferConverter;
2569     delete mResamplerBufferProvider;
2570 }
2571 
initCheck() const2572 status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const
2573 {
2574     status_t status = TrackBase::initCheck();
2575     if (status == NO_ERROR && mServerProxy == 0) {
2576         status = BAD_VALUE;
2577     }
2578     return status;
2579 }
2580 
2581 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2582 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
2583 {
2584     ServerProxy::Buffer buf;
2585     buf.mFrameCount = buffer->frameCount;
2586     status_t status = mServerProxy->obtainBuffer(&buf);
2587     buffer->frameCount = buf.mFrameCount;
2588     buffer->raw = buf.mRaw;
2589     if (buf.mFrameCount == 0) {
2590         // FIXME also wake futex so that overrun is noticed more quickly
2591         (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2592     }
2593     return status;
2594 }
2595 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2596 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
2597                                                         audio_session_t triggerSession)
2598 {
2599     sp<ThreadBase> thread = mThread.promote();
2600     if (thread != 0) {
2601         RecordThread *recordThread = (RecordThread *)thread.get();
2602         return recordThread->start(this, event, triggerSession);
2603     } else {
2604         ALOGW("%s track %d: thread was destroyed", __func__, portId());
2605         return DEAD_OBJECT;
2606     }
2607 }
2608 
stop()2609 void AudioFlinger::RecordThread::RecordTrack::stop()
2610 {
2611     sp<ThreadBase> thread = mThread.promote();
2612     if (thread != 0) {
2613         RecordThread *recordThread = (RecordThread *)thread.get();
2614         if (recordThread->stop(this) && isExternalTrack()) {
2615             AudioSystem::stopInput(mPortId);
2616         }
2617     }
2618 }
2619 
destroy()2620 void AudioFlinger::RecordThread::RecordTrack::destroy()
2621 {
2622     // see comments at AudioFlinger::PlaybackThread::Track::destroy()
2623     sp<RecordTrack> keep(this);
2624     {
2625         track_state priorState = mState;
2626         sp<ThreadBase> thread = mThread.promote();
2627         if (thread != 0) {
2628             Mutex::Autolock _l(thread->mLock);
2629             RecordThread *recordThread = (RecordThread *) thread.get();
2630             priorState = mState;
2631             if (!mSharedAudioPackageName.empty()) {
2632                 recordThread->resetAudioHistory_l();
2633             }
2634             recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
2635         }
2636         // APM portid/client management done outside of lock.
2637         // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
2638         if (isExternalTrack()) {
2639             switch (priorState) {
2640             case ACTIVE:     // invalidated while still active
2641             case STARTING_2: // invalidated/start-aborted after startInput successfully called
2642             case PAUSING:    // invalidated while in the middle of stop() pausing (still active)
2643                 AudioSystem::stopInput(mPortId);
2644                 break;
2645 
2646             case STARTING_1: // invalidated/start-aborted and startInput not successful
2647             case PAUSED:     // OK, not active
2648             case IDLE:       // OK, not active
2649                 break;
2650 
2651             case STOPPED:    // unexpected (destroyed)
2652             default:
2653                 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
2654             }
2655             AudioSystem::releaseInput(mPortId);
2656         }
2657     }
2658 }
2659 
invalidate()2660 void AudioFlinger::RecordThread::RecordTrack::invalidate()
2661 {
2662     TrackBase::invalidate();
2663     // FIXME should use proxy, and needs work
2664     audio_track_cblk_t* cblk = mCblk;
2665     android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2666     android_atomic_release_store(0x40000000, &cblk->mFutex);
2667     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2668     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2669 }
2670 
2671 
appendDumpHeader(String8 & result)2672 void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
2673 {
2674     result.appendFormat("Active     Id Client Session Port Id  S  Flags  "
2675                         " Format Chn mask  SRate Source  "
2676                         " Server FrmCnt FrmRdy Sil%s\n",
2677                         isServerLatencySupported() ? "   Latency" : "");
2678 }
2679 
appendDump(String8 & result,bool active)2680 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
2681 {
2682     result.appendFormat("%c%5s %6d %6u %7u %7u  %2s 0x%03X "
2683             "%08X %08X %6u %6X "
2684             "%08X %6zu %6zu %3c",
2685             isFastTrack() ? 'F' : ' ',
2686             active ? "yes" : "no",
2687             mId,
2688             (mClient == 0) ? getpid() : mClient->pid(),
2689             mSessionId,
2690             mPortId,
2691             getTrackStateAsCodedString(),
2692             mCblk->mFlags,
2693 
2694             mFormat,
2695             mChannelMask,
2696             mSampleRate,
2697             mAttr.source,
2698 
2699             mCblk->mServer,
2700             mFrameCount,
2701             mServerProxy->framesReadySafe(),
2702             isSilenced() ? 's' : 'n'
2703             );
2704     if (isServerLatencySupported()) {
2705         double latencyMs;
2706         bool fromTrack;
2707         if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
2708             // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
2709             // or 'k' if estimated from kernel (usually for debugging).
2710             result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
2711         } else {
2712             result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
2713         }
2714     }
2715     result.append("\n");
2716 }
2717 
handleSyncStartEvent(const sp<SyncEvent> & event)2718 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
2719 {
2720     if (event == mSyncStartEvent) {
2721         ssize_t framesToDrop = 0;
2722         sp<ThreadBase> threadBase = mThread.promote();
2723         if (threadBase != 0) {
2724             // TODO: use actual buffer filling status instead of 2 buffers when info is available
2725             // from audio HAL
2726             framesToDrop = threadBase->mFrameCount * 2;
2727         }
2728         mFramesToDrop = framesToDrop;
2729     }
2730 }
2731 
clearSyncStartEvent()2732 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
2733 {
2734     if (mSyncStartEvent != 0) {
2735         mSyncStartEvent->cancel();
2736         mSyncStartEvent.clear();
2737     }
2738     mFramesToDrop = 0;
2739 }
2740 
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)2741 void AudioFlinger::RecordThread::RecordTrack::updateTrackFrameInfo(
2742         int64_t trackFramesReleased, int64_t sourceFramesRead,
2743         uint32_t halSampleRate, const ExtendedTimestamp &timestamp)
2744 {
2745    // Make the kernel frametime available.
2746     const FrameTime ft{
2747             timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2748             timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2749     // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2750     mKernelFrameTime.store(ft);
2751     if (!audio_is_linear_pcm(mFormat)) {
2752         // Stream is direct, return provided timestamp with no conversion
2753         mServerProxy->setTimestamp(timestamp);
2754         return;
2755     }
2756 
2757     ExtendedTimestamp local = timestamp;
2758 
2759     // Convert HAL frames to server-side track frames at track sample rate.
2760     // We use trackFramesReleased and sourceFramesRead as an anchor point.
2761     for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2762         if (local.mTimeNs[i] != 0) {
2763             const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
2764             const int64_t relativeTrackFrames = relativeServerFrames
2765                     * mSampleRate / halSampleRate; // TODO: potential computation overflow
2766             local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
2767         }
2768     }
2769     mServerProxy->setTimestamp(local);
2770 
2771     // Compute latency info.
2772     const bool useTrackTimestamp = true; // use track unless debugging.
2773     const double latencyMs = - (useTrackTimestamp
2774             ? local.getOutputServerLatencyMs(sampleRate())
2775             : timestamp.getOutputServerLatencyMs(halSampleRate));
2776 
2777     mServerLatencyFromTrack.store(useTrackTimestamp);
2778     mServerLatencyMs.store(latencyMs);
2779 }
2780 
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)2781 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
2782         std::vector<media::MicrophoneInfoFw>* activeMicrophones)
2783 {
2784     sp<ThreadBase> thread = mThread.promote();
2785     if (thread != 0) {
2786         RecordThread *recordThread = (RecordThread *)thread.get();
2787         return recordThread->getActiveMicrophones(activeMicrophones);
2788     } else {
2789         return BAD_VALUE;
2790     }
2791 }
2792 
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)2793 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
2794         audio_microphone_direction_t direction) {
2795     sp<ThreadBase> thread = mThread.promote();
2796     if (thread != 0) {
2797         RecordThread *recordThread = (RecordThread *)thread.get();
2798         return recordThread->setPreferredMicrophoneDirection(direction);
2799     } else {
2800         return BAD_VALUE;
2801     }
2802 }
2803 
setPreferredMicrophoneFieldDimension(float zoom)2804 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
2805     sp<ThreadBase> thread = mThread.promote();
2806     if (thread != 0) {
2807         RecordThread *recordThread = (RecordThread *)thread.get();
2808         return recordThread->setPreferredMicrophoneFieldDimension(zoom);
2809     } else {
2810         return BAD_VALUE;
2811     }
2812 }
2813 
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2814 status_t AudioFlinger::RecordThread::RecordTrack::shareAudioHistory(
2815         const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2816 
2817     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
2818     const pid_t callingPid = IPCThreadState::self()->getCallingPid();
2819     if (callingUid != mUid || callingPid != mCreatorPid) {
2820         return PERMISSION_DENIED;
2821     }
2822 
2823     AttributionSourceState attributionSource{};
2824     attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
2825     attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
2826     attributionSource.token = sp<BBinder>::make();
2827     if (!captureHotwordAllowed(attributionSource)) {
2828         return PERMISSION_DENIED;
2829     }
2830 
2831     sp<ThreadBase> thread = mThread.promote();
2832     if (thread != 0) {
2833         RecordThread *recordThread = (RecordThread *)thread.get();
2834         status_t status = recordThread->shareAudioHistory(
2835                 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
2836         if (status == NO_ERROR) {
2837             mSharedAudioPackageName = sharedAudioPackageName;
2838         }
2839         return status;
2840     } else {
2841         return BAD_VALUE;
2842     }
2843 }
2844 
copyMetadataTo(MetadataInserter & backInserter) const2845 void AudioFlinger::RecordThread::RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
2846 {
2847 
2848     // Do not forward PatchRecord metadata with unspecified audio source
2849     if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
2850         return;
2851     }
2852 
2853     // No track is invalid as this is called after prepareTrack_l in the same critical section
2854     record_track_metadata_v7_t metadata;
2855     metadata.base = {
2856             .source = mAttr.source,
2857             .gain = 1, // capture tracks do not have volumes
2858     };
2859     metadata.channel_mask = mChannelMask;
2860     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
2861 
2862     *backInserter++ = metadata;
2863 }
2864 
2865 // ----------------------------------------------------------------------------
2866 #undef LOG_TAG
2867 #define LOG_TAG "AF::PatchRecord"
2868 
PatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)2869 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
2870                                                      uint32_t sampleRate,
2871                                                      audio_channel_mask_t channelMask,
2872                                                      audio_format_t format,
2873                                                      size_t frameCount,
2874                                                      void *buffer,
2875                                                      size_t bufferSize,
2876                                                      audio_input_flags_t flags,
2877                                                      const Timeout& timeout,
2878                                                      audio_source_t source)
2879     :   RecordTrack(recordThread, NULL,
2880                 audio_attributes_t{ .source = source } ,
2881                 sampleRate, format, channelMask, frameCount,
2882                 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
2883                 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
2884         PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
2885                        *recordThread, timeout)
2886 {
2887     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2888                                       __func__, mId, sampleRate,
2889                                       (int)mPeerTimeout.tv_sec,
2890                                       (int)(mPeerTimeout.tv_nsec / 1000000));
2891 }
2892 
~PatchRecord()2893 AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
2894 {
2895     ALOGV("%s(%d)", __func__, mId);
2896 }
2897 
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2898 static size_t writeFramesHelper(
2899         AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2900 {
2901     AudioBufferProvider::Buffer patchBuffer;
2902     patchBuffer.frameCount = frameCount;
2903     auto status = dest->getNextBuffer(&patchBuffer);
2904     if (status != NO_ERROR) {
2905        ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
2906              __func__, status, strerror(-status));
2907        return 0;
2908     }
2909     ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
2910     memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
2911     size_t framesWritten = patchBuffer.frameCount;
2912     dest->releaseBuffer(&patchBuffer);
2913     return framesWritten;
2914 }
2915 
2916 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2917 size_t AudioFlinger::RecordThread::PatchRecord::writeFrames(
2918         AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2919 {
2920     size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
2921     // On buffer wrap, the buffer frame count will be less than requested,
2922     // when this happens a second buffer needs to be used to write the leftover audio
2923     const size_t framesLeft = frameCount - framesWritten;
2924     if (framesWritten != 0 && framesLeft != 0) {
2925         framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
2926                         framesLeft, frameSize);
2927     }
2928     return framesWritten;
2929 }
2930 
2931 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2932 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
2933                                                   AudioBufferProvider::Buffer* buffer)
2934 {
2935     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2936     Proxy::Buffer buf;
2937     buf.mFrameCount = buffer->frameCount;
2938     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2939     ALOGV_IF(status != NO_ERROR,
2940              "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
2941     buffer->frameCount = buf.mFrameCount;
2942     if (ATRACE_ENABLED()) {
2943         std::string traceName("PRnObt");
2944         traceName += std::to_string(id());
2945         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2946     }
2947     if (buf.mFrameCount == 0) {
2948         return WOULD_BLOCK;
2949     }
2950     status = RecordTrack::getNextBuffer(buffer);
2951     return status;
2952 }
2953 
releaseBuffer(AudioBufferProvider::Buffer * buffer)2954 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2955 {
2956     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2957     Proxy::Buffer buf;
2958     buf.mFrameCount = buffer->frameCount;
2959     buf.mRaw = buffer->raw;
2960     mPeerProxy->releaseBuffer(&buf);
2961     TrackBase::releaseBuffer(buffer);
2962 }
2963 
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2964 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
2965                                                                const struct timespec *timeOut)
2966 {
2967     return mProxy->obtainBuffer(buffer, timeOut);
2968 }
2969 
releaseBuffer(Proxy::Buffer * buffer)2970 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2971 {
2972     mProxy->releaseBuffer(buffer);
2973 }
2974 
2975 #undef LOG_TAG
2976 #define LOG_TAG "AF::PthrPatchRecord"
2977 
allocAligned(size_t alignment,size_t size)2978 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
2979 {
2980     void *ptr = nullptr;
2981     (void)posix_memalign(&ptr, alignment, size);
2982     return {ptr, free};
2983 }
2984 
PassthruPatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)2985 AudioFlinger::RecordThread::PassthruPatchRecord::PassthruPatchRecord(
2986         RecordThread *recordThread,
2987         uint32_t sampleRate,
2988         audio_channel_mask_t channelMask,
2989         audio_format_t format,
2990         size_t frameCount,
2991         audio_input_flags_t flags,
2992         audio_source_t source)
2993         : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
2994                 nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
2995           mPatchRecordAudioBufferProvider(*this),
2996           mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
2997           mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
2998 {
2999     memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
3000 }
3001 
obtainStream(sp<ThreadBase> * thread)3002 sp<StreamInHalInterface> AudioFlinger::RecordThread::PassthruPatchRecord::obtainStream(
3003         sp<ThreadBase>* thread)
3004 {
3005     *thread = mThread.promote();
3006     if (!*thread) return nullptr;
3007     RecordThread *recordThread = static_cast<RecordThread*>((*thread).get());
3008     Mutex::Autolock _l(recordThread->mLock);
3009     return recordThread->mInput ? recordThread->mInput->stream : nullptr;
3010 }
3011 
3012 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3013 status_t AudioFlinger::RecordThread::PassthruPatchRecord::obtainBuffer(
3014         Proxy::Buffer* buffer, const struct timespec* timeOut)
3015 {
3016     if (mUnconsumedFrames) {
3017         buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
3018         // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
3019         return PatchRecord::obtainBuffer(buffer, timeOut);
3020     }
3021 
3022     // Otherwise, execute a read from HAL and write into the buffer.
3023     nsecs_t startTimeNs = 0;
3024     if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
3025         // Will need to correct timeOut by elapsed time.
3026         startTimeNs = systemTime();
3027     }
3028     const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
3029     buffer->mFrameCount = 0;
3030     buffer->mRaw = nullptr;
3031     sp<ThreadBase> thread;
3032     sp<StreamInHalInterface> stream = obtainStream(&thread);
3033     if (!stream) return NO_INIT;  // If there is no stream, RecordThread is not reading.
3034 
3035     status_t result = NO_ERROR;
3036     size_t bytesRead = 0;
3037     {
3038         ATRACE_NAME("read");
3039         result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
3040         if (result != NO_ERROR) goto stream_error;
3041         if (bytesRead == 0) return NO_ERROR;
3042     }
3043 
3044     {
3045         std::lock_guard<std::mutex> lock(mReadLock);
3046         mReadBytes += bytesRead;
3047         mReadError = NO_ERROR;
3048     }
3049     mReadCV.notify_one();
3050     // writeFrames handles wraparound and should write all the provided frames.
3051     // If it couldn't, there is something wrong with the client/server buffer of the software patch.
3052     buffer->mFrameCount = writeFrames(
3053             &mPatchRecordAudioBufferProvider,
3054             mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
3055     ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
3056             "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
3057     mUnconsumedFrames = buffer->mFrameCount;
3058     struct timespec newTimeOut;
3059     if (startTimeNs) {
3060         // Correct the timeout by elapsed time.
3061         nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
3062         if (newTimeOutNs < 0) newTimeOutNs = 0;
3063         newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
3064         newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
3065         timeOut = &newTimeOut;
3066     }
3067     return PatchRecord::obtainBuffer(buffer, timeOut);
3068 
3069 stream_error:
3070     stream->standby();
3071     {
3072         std::lock_guard<std::mutex> lock(mReadLock);
3073         mReadError = result;
3074     }
3075     mReadCV.notify_one();
3076     return result;
3077 }
3078 
releaseBuffer(Proxy::Buffer * buffer)3079 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3080 {
3081     if (buffer->mFrameCount <= mUnconsumedFrames) {
3082         mUnconsumedFrames -= buffer->mFrameCount;
3083     } else {
3084         ALOGW("Write side has consumed more frames than we had: %zu > %zu",
3085                 buffer->mFrameCount, mUnconsumedFrames);
3086         mUnconsumedFrames = 0;
3087     }
3088     PatchRecord::releaseBuffer(buffer);
3089 }
3090 
3091 // AudioBufferProvider and Source methods are called on RecordThread
3092 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
3093 // and 'releaseBuffer' are stubbed out and ignore their input.
3094 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
3095 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)3096 status_t AudioFlinger::RecordThread::PassthruPatchRecord::read(
3097         void* buffer, size_t bytes, size_t* read)
3098 {
3099     bytes = std::min(bytes, mFrameCount * mFrameSize);
3100     {
3101         std::unique_lock<std::mutex> lock(mReadLock);
3102         mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
3103         if (mReadError != NO_ERROR) {
3104             mLastReadFrames = 0;
3105             return mReadError;
3106         }
3107         *read = std::min(bytes, mReadBytes);
3108         mReadBytes -= *read;
3109     }
3110     mLastReadFrames = *read / mFrameSize;
3111     memset(buffer, 0, *read);
3112     return 0;
3113 }
3114 
getCapturePosition(int64_t * frames,int64_t * time)3115 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getCapturePosition(
3116         int64_t* frames, int64_t* time)
3117 {
3118     sp<ThreadBase> thread;
3119     sp<StreamInHalInterface> stream = obtainStream(&thread);
3120     return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
3121 }
3122 
standby()3123 status_t AudioFlinger::RecordThread::PassthruPatchRecord::standby()
3124 {
3125     // RecordThread issues 'standby' command in two major cases:
3126     // 1. Error on read--this case is handled in 'obtainBuffer'.
3127     // 2. Track is stopping--as PassthruPatchRecord assumes continuous
3128     //    output, this can only happen when the software patch
3129     //    is being torn down. In this case, the RecordThread
3130     //    will terminate and close the HAL stream.
3131     return 0;
3132 }
3133 
3134 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)3135 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getNextBuffer(
3136         AudioBufferProvider::Buffer* buffer)
3137 {
3138     buffer->frameCount = mLastReadFrames;
3139     buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
3140     return NO_ERROR;
3141 }
3142 
releaseBuffer(AudioBufferProvider::Buffer * buffer)3143 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(
3144         AudioBufferProvider::Buffer* buffer)
3145 {
3146     buffer->frameCount = 0;
3147     buffer->raw = nullptr;
3148 }
3149 
3150 // ----------------------------------------------------------------------------
3151 #undef LOG_TAG
3152 #define LOG_TAG "AF::MmapTrack"
3153 
MmapTrack(ThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)3154 AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
3155         const audio_attributes_t& attr,
3156         uint32_t sampleRate,
3157         audio_format_t format,
3158         audio_channel_mask_t channelMask,
3159         audio_session_t sessionId,
3160         bool isOut,
3161         const AttributionSourceState& attributionSource,
3162         pid_t creatorPid,
3163         audio_port_handle_t portId)
3164     :   TrackBase(thread, NULL, attr, sampleRate, format,
3165                   channelMask, (size_t)0 /* frameCount */,
3166                   nullptr /* buffer */, (size_t)0 /* bufferSize */,
3167                   sessionId, creatorPid,
3168                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
3169                   isOut,
3170                   ALLOC_NONE,
3171                   TYPE_DEFAULT, portId,
3172                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
3173         mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
3174             mSilenced(false), mSilencedNotified(false)
3175 {
3176     // Once this item is logged by the server, the client can add properties.
3177     mTrackMetrics.logConstructor(creatorPid, uid(), id());
3178 }
3179 
~MmapTrack()3180 AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
3181 {
3182 }
3183 
initCheck() const3184 status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
3185 {
3186     return NO_ERROR;
3187 }
3188 
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)3189 status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
3190                                                     audio_session_t triggerSession __unused)
3191 {
3192     return NO_ERROR;
3193 }
3194 
stop()3195 void AudioFlinger::MmapThread::MmapTrack::stop()
3196 {
3197 }
3198 
3199 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3200 status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3201 {
3202     buffer->frameCount = 0;
3203     buffer->raw = nullptr;
3204     return INVALID_OPERATION;
3205 }
3206 
3207 // ExtendedAudioBufferProvider interface
framesReady() const3208 size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
3209     return 0;
3210 }
3211 
framesReleased() const3212 int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
3213 {
3214     return 0;
3215 }
3216 
onTimestamp(const ExtendedTimestamp & timestamp __unused)3217 void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp &timestamp __unused)
3218 {
3219 }
3220 
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)3221 void AudioFlinger::MmapThread::MmapTrack::processMuteEvent_l(const sp<
3222     IAudioManager>& audioManager, mute_state_t muteState)
3223 {
3224     if (mMuteState == muteState) {
3225         // mute state did not change, do nothing
3226         return;
3227     }
3228 
3229     status_t result = UNKNOWN_ERROR;
3230     if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
3231         if (mMuteEventExtras == nullptr) {
3232             mMuteEventExtras = std::make_unique<os::PersistableBundle>();
3233         }
3234         mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey),
3235                                  static_cast<int>(muteState));
3236 
3237         result = audioManager->portEvent(mPortId,
3238                                          PLAYER_UPDATE_MUTED,
3239                                          mMuteEventExtras);
3240     }
3241 
3242     if (result == OK) {
3243         mMuteState = muteState;
3244     } else {
3245         ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d",
3246               __func__,
3247               id(),
3248               mPortId,
3249               result);
3250     }
3251 }
3252 
appendDumpHeader(String8 & result)3253 void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
3254 {
3255     result.appendFormat("Client Session Port Id  Format Chn mask  SRate Flags %s\n",
3256                         isOut() ? "Usg CT": "Source");
3257 }
3258 
appendDump(String8 & result,bool active __unused)3259 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
3260 {
3261     result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
3262             mPid,
3263             mSessionId,
3264             mPortId,
3265             mFormat,
3266             mChannelMask,
3267             mSampleRate,
3268             mAttr.flags);
3269     if (isOut()) {
3270         result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
3271     } else {
3272         result.appendFormat("%6x", mAttr.source);
3273     }
3274     result.append("\n");
3275 }
3276 
3277 } // namespace android
3278