• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 #define LOG_TAG "AudioFlinger"
20 //#define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22 
23 #include "Configuration.h"
24 #include <linux/futex.h>
25 #include <math.h>
26 #include <sys/syscall.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29 
30 #include <private/media/AudioTrackShared.h>
31 
32 #include "AudioFlinger.h"
33 
34 #include <media/nbaio/Pipe.h>
35 #include <media/nbaio/PipeReader.h>
36 #include <media/AudioValidator.h>
37 #include <media/RecordBufferConverter.h>
38 #include <mediautils/ServiceUtilities.h>
39 #include <audio_utils/minifloat.h>
40 
41 // ----------------------------------------------------------------------------
42 
43 // Note: the following macro is used for extremely verbose logging message.  In
44 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
45 // 0; but one side effect of this is to turn all LOGV's as well.  Some messages
46 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
47 // turned on.  Do not uncomment the #def below unless you really know what you
48 // are doing and want to see all of the extremely verbose messages.
49 //#define VERY_VERY_VERBOSE_LOGGING
50 #ifdef VERY_VERY_VERBOSE_LOGGING
51 #define ALOGVV ALOGV
52 #else
53 #define ALOGVV(a...) do { } while(0)
54 #endif
55 
56 // TODO: Remove when this is put into AidlConversionUtil.h
57 #define VALUE_OR_RETURN_BINDER_STATUS(x)    \
58     ({                                      \
59        auto _tmp = (x);                     \
60        if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
61        std::move(_tmp.value());             \
62      })
63 
64 namespace android {
65 
66 using ::android::aidl_utils::binderStatusFromStatusT;
67 using binder::Status;
68 using content::AttributionSourceState;
69 using media::VolumeShaper;
70 // ----------------------------------------------------------------------------
71 //      TrackBase
72 // ----------------------------------------------------------------------------
73 #undef LOG_TAG
74 #define LOG_TAG "AF::TrackBase"
75 
76 static volatile int32_t nextTrackId = 55;
77 
78 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)79 AudioFlinger::ThreadBase::TrackBase::TrackBase(
80             ThreadBase *thread,
81             const sp<Client>& client,
82             const audio_attributes_t& attr,
83             uint32_t sampleRate,
84             audio_format_t format,
85             audio_channel_mask_t channelMask,
86             size_t frameCount,
87             void *buffer,
88             size_t bufferSize,
89             audio_session_t sessionId,
90             pid_t creatorPid,
91             uid_t clientUid,
92             bool isOut,
93             alloc_type alloc,
94             track_type type,
95             audio_port_handle_t portId,
96             std::string metricsId)
97     :   RefBase(),
98         mThread(thread),
99         mClient(client),
100         mCblk(NULL),
101         // mBuffer, mBufferSize
102         mState(IDLE),
103         mAttr(attr),
104         mSampleRate(sampleRate),
105         mFormat(format),
106         mChannelMask(channelMask),
107         mChannelCount(isOut ?
108                 audio_channel_count_from_out_mask(channelMask) :
109                 audio_channel_count_from_in_mask(channelMask)),
110         mFrameSize(audio_has_proportional_frames(format) ?
111                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
112         mFrameCount(frameCount),
113         mSessionId(sessionId),
114         mIsOut(isOut),
115         mId(android_atomic_inc(&nextTrackId)),
116         mTerminated(false),
117         mType(type),
118         mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
119         mPortId(portId),
120         mIsInvalid(false),
121         mTrackMetrics(std::move(metricsId), isOut),
122         mCreatorPid(creatorPid)
123 {
124     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
125     if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
126         ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
127                 "%s(%d): uid %d tried to pass itself off as %d",
128                  __func__, mId, callingUid, clientUid);
129         clientUid = callingUid;
130     }
131     // clientUid contains the uid of the app that is responsible for this track, so we can blame
132     // battery usage on it.
133     mUid = clientUid;
134 
135     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
136 
137     size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
138     // check overflow when computing bufferSize due to multiplication by mFrameSize.
139     if (minBufferSize < frameCount  // roundup rounds down for values above UINT_MAX / 2
140             || mFrameSize == 0   // format needs to be correct
141             || minBufferSize > SIZE_MAX / mFrameSize) {
142         android_errorWriteLog(0x534e4554, "34749571");
143         return;
144     }
145     minBufferSize *= mFrameSize;
146 
147     if (buffer == nullptr) {
148         bufferSize = minBufferSize; // allocated here.
149     } else if (minBufferSize > bufferSize) {
150         android_errorWriteLog(0x534e4554, "38340117");
151         return;
152     }
153 
154     size_t size = sizeof(audio_track_cblk_t);
155     if (buffer == NULL && alloc == ALLOC_CBLK) {
156         // check overflow when computing allocation size for streaming tracks.
157         if (size > SIZE_MAX - bufferSize) {
158             android_errorWriteLog(0x534e4554, "34749571");
159             return;
160         }
161         size += bufferSize;
162     }
163 
164     if (client != 0) {
165         mCblkMemory = client->heap()->allocate(size);
166         if (mCblkMemory == 0 ||
167                 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
168             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
169             client->heap()->dump("AudioTrack");
170             mCblkMemory.clear();
171             return;
172         }
173     } else {
174         mCblk = (audio_track_cblk_t *) malloc(size);
175         if (mCblk == NULL) {
176             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
177             return;
178         }
179     }
180 
181     // construct the shared structure in-place.
182     if (mCblk != NULL) {
183         new(mCblk) audio_track_cblk_t();
184         switch (alloc) {
185         case ALLOC_READONLY: {
186             const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
187             if (roHeap == 0 ||
188                     (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
189                     (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
190                 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
191                         __func__, mId, bufferSize);
192                 if (roHeap != 0) {
193                     roHeap->dump("buffer");
194                 }
195                 mCblkMemory.clear();
196                 mBufferMemory.clear();
197                 return;
198             }
199             memset(mBuffer, 0, bufferSize);
200             } break;
201         case ALLOC_PIPE:
202             mBufferMemory = thread->pipeMemory();
203             // mBuffer is the virtual address as seen from current process (mediaserver),
204             // and should normally be coming from mBufferMemory->unsecurePointer().
205             // However in this case the TrackBase does not reference the buffer directly.
206             // It should references the buffer via the pipe.
207             // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
208             mBuffer = NULL;
209             bufferSize = 0;
210             break;
211         case ALLOC_CBLK:
212             // clear all buffers
213             if (buffer == NULL) {
214                 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
215                 memset(mBuffer, 0, bufferSize);
216             } else {
217                 mBuffer = buffer;
218 #if 0
219                 mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
220 #endif
221             }
222             break;
223         case ALLOC_LOCAL:
224             mBuffer = calloc(1, bufferSize);
225             break;
226         case ALLOC_NONE:
227             mBuffer = buffer;
228             break;
229         default:
230             LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
231         }
232         mBufferSize = bufferSize;
233 
234 #ifdef TEE_SINK
235         mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
236 #endif
237         // mState is mirrored for the client to read.
238         mState.setMirror(&mCblk->mState);
239         // ensure our state matches up until we consolidate the enumeration.
240         static_assert(CBLK_STATE_IDLE == IDLE);
241         static_assert(CBLK_STATE_PAUSING == PAUSING);
242     }
243 }
244 
245 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)246 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
247    AttributionSourceState attributionSource{};
248    attributionSource.uid = AID_AUDIOSERVER;
249    attributionSource.pid = pid;
250    attributionSource.token = sp<BBinder>::make();
251    return attributionSource;
252 }
253 
initCheck() const254 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
255 {
256     status_t status;
257     if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
258         status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
259     } else {
260         status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
261     }
262     return status;
263 }
264 
~TrackBase()265 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
266 {
267     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
268     mServerProxy.clear();
269     releaseCblk();
270     mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
271     if (mClient != 0) {
272         // Client destructor must run with AudioFlinger client mutex locked
273         Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
274         // If the client's reference count drops to zero, the associated destructor
275         // must run with AudioFlinger lock held. Thus the explicit clear() rather than
276         // relying on the automatic clear() at end of scope.
277         mClient.clear();
278     }
279     // flush the binder command buffer
280     IPCThreadState::self()->flushCommands();
281 }
282 
283 // AudioBufferProvider interface
284 // getNextBuffer() = 0;
285 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)286 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
287 {
288 #ifdef TEE_SINK
289     mTee.write(buffer->raw, buffer->frameCount);
290 #endif
291 
292     ServerProxy::Buffer buf;
293     buf.mFrameCount = buffer->frameCount;
294     buf.mRaw = buffer->raw;
295     buffer->frameCount = 0;
296     buffer->raw = NULL;
297     mServerProxy->releaseBuffer(&buf);
298 }
299 
setSyncEvent(const sp<SyncEvent> & event)300 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
301 {
302     mSyncEvents.add(event);
303     return NO_ERROR;
304 }
305 
PatchTrackBase(sp<ClientProxy> proxy,const ThreadBase & thread,const Timeout & timeout)306 AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(sp<ClientProxy> proxy,
307                                                          const ThreadBase& thread,
308                                                          const Timeout& timeout)
309     : mProxy(proxy)
310 {
311     if (timeout) {
312         setPeerTimeout(*timeout);
313     } else {
314         // Double buffer mixer
315         uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
316                                               thread.sampleRate();
317         setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
318     }
319 }
320 
setPeerTimeout(std::chrono::nanoseconds timeout)321 void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
322     mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
323     mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
324 }
325 
326 
327 // ----------------------------------------------------------------------------
328 //      Playback
329 // ----------------------------------------------------------------------------
330 #undef LOG_TAG
331 #define LOG_TAG "AF::TrackHandle"
332 
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)333 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
334     : BnAudioTrack(),
335       mTrack(track)
336 {
337     setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
338 }
339 
~TrackHandle()340 AudioFlinger::TrackHandle::~TrackHandle() {
341     // just stop the track on deletion, associated resources
342     // will be freed from the main thread once all pending buffers have
343     // been played. Unless it's not in the active track list, in which
344     // case we free everything now...
345     mTrack->destroy();
346 }
347 
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)348 Status AudioFlinger::TrackHandle::getCblk(
349         std::optional<media::SharedFileRegion>* _aidl_return) {
350     *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
351     return Status::ok();
352 }
353 
start(int32_t * _aidl_return)354 Status AudioFlinger::TrackHandle::start(int32_t* _aidl_return) {
355     *_aidl_return = mTrack->start();
356     return Status::ok();
357 }
358 
stop()359 Status AudioFlinger::TrackHandle::stop() {
360     mTrack->stop();
361     return Status::ok();
362 }
363 
flush()364 Status AudioFlinger::TrackHandle::flush() {
365     mTrack->flush();
366     return Status::ok();
367 }
368 
pause()369 Status AudioFlinger::TrackHandle::pause() {
370     mTrack->pause();
371     return Status::ok();
372 }
373 
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)374 Status AudioFlinger::TrackHandle::attachAuxEffect(int32_t effectId,
375                                                   int32_t* _aidl_return) {
376     *_aidl_return = mTrack->attachAuxEffect(effectId);
377     return Status::ok();
378 }
379 
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)380 Status AudioFlinger::TrackHandle::setParameters(const std::string& keyValuePairs,
381                                                 int32_t* _aidl_return) {
382     *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
383     return Status::ok();
384 }
385 
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)386 Status AudioFlinger::TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
387                                                      int32_t* _aidl_return) {
388     *_aidl_return = mTrack->selectPresentation(presentationId, programId);
389     return Status::ok();
390 }
391 
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)392 Status AudioFlinger::TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
393                                                int32_t* _aidl_return) {
394     AudioTimestamp legacy;
395     *_aidl_return = mTrack->getTimestamp(legacy);
396     if (*_aidl_return != OK) {
397         return Status::ok();
398     }
399     *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
400     return Status::ok();
401 }
402 
signal()403 Status AudioFlinger::TrackHandle::signal() {
404     mTrack->signal();
405     return Status::ok();
406 }
407 
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)408 Status AudioFlinger::TrackHandle::applyVolumeShaper(
409         const media::VolumeShaperConfiguration& configuration,
410         const media::VolumeShaperOperation& operation,
411         int32_t* _aidl_return) {
412     sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
413     *_aidl_return = conf->readFromParcelable(configuration);
414     if (*_aidl_return != OK) {
415         return Status::ok();
416     }
417 
418     sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
419     *_aidl_return = op->readFromParcelable(operation);
420     if (*_aidl_return != OK) {
421         return Status::ok();
422     }
423 
424     *_aidl_return = mTrack->applyVolumeShaper(conf, op);
425     return Status::ok();
426 }
427 
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)428 Status AudioFlinger::TrackHandle::getVolumeShaperState(
429         int32_t id,
430         std::optional<media::VolumeShaperState>* _aidl_return) {
431     sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
432     if (legacy == nullptr) {
433         _aidl_return->reset();
434         return Status::ok();
435     }
436     media::VolumeShaperState aidl;
437     legacy->writeToParcelable(&aidl);
438     *_aidl_return = aidl;
439     return Status::ok();
440 }
441 
getDualMonoMode(media::AudioDualMonoMode * _aidl_return)442 Status AudioFlinger::TrackHandle::getDualMonoMode(media::AudioDualMonoMode* _aidl_return)
443 {
444     audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
445     const status_t status = mTrack->getDualMonoMode(&mode)
446             ?: AudioValidator::validateDualMonoMode(mode);
447     if (status == OK) {
448         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
449                 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
450     }
451     return binderStatusFromStatusT(status);
452 }
453 
setDualMonoMode(media::AudioDualMonoMode mode)454 Status AudioFlinger::TrackHandle::setDualMonoMode(
455         media::AudioDualMonoMode mode)
456 {
457     const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
458             aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
459     return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
460             ?: mTrack->setDualMonoMode(localMonoMode));
461 }
462 
getAudioDescriptionMixLevel(float * _aidl_return)463 Status AudioFlinger::TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
464 {
465     float leveldB = -std::numeric_limits<float>::infinity();
466     const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
467             ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
468     if (status == OK) *_aidl_return = leveldB;
469     return binderStatusFromStatusT(status);
470 }
471 
setAudioDescriptionMixLevel(float leveldB)472 Status AudioFlinger::TrackHandle::setAudioDescriptionMixLevel(float leveldB)
473 {
474     return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
475              ?: mTrack->setAudioDescriptionMixLevel(leveldB));
476 }
477 
getPlaybackRateParameters(media::AudioPlaybackRate * _aidl_return)478 Status AudioFlinger::TrackHandle::getPlaybackRateParameters(
479         media::AudioPlaybackRate* _aidl_return)
480 {
481     audio_playback_rate_t localPlaybackRate{};
482     status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
483             ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
484     if (status == NO_ERROR) {
485         *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
486                 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
487     }
488     return binderStatusFromStatusT(status);
489 }
490 
setPlaybackRateParameters(const media::AudioPlaybackRate & playbackRate)491 Status AudioFlinger::TrackHandle::setPlaybackRateParameters(
492         const media::AudioPlaybackRate& playbackRate)
493 {
494     const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
495             aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
496     return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
497             ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
498 }
499 
500 // ----------------------------------------------------------------------------
501 //      AppOp for audio playback
502 // -------------------------------
503 
504 // static
505 sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
createIfNeeded(const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)506 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
507             const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
508             audio_stream_type_t streamType)
509 {
510     Vector <String16> packages;
511     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
512     getPackagesForUid(uid, packages);
513     if (isServiceUid(uid)) {
514         if (packages.isEmpty()) {
515             ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
516                   id,
517                   attr.usage,
518                   uid);
519             return nullptr;
520         }
521     }
522     // stream type has been filtered by audio policy to indicate whether it can be muted
523     if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
524         ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
525         return nullptr;
526     }
527     if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
528             == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
529         ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
530             id, attr.flags);
531         return nullptr;
532     }
533     return new OpPlayAudioMonitor(attributionSource, attr.usage, id);
534 }
535 
OpPlayAudioMonitor(const AttributionSourceState & attributionSource,audio_usage_t usage,int id)536 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
537         const AttributionSourceState& attributionSource, audio_usage_t usage, int id)
538         : mHasOpPlayAudio(true), mAttributionSource(attributionSource), mUsage((int32_t) usage),
539         mId(id)
540 {
541 }
542 
~OpPlayAudioMonitor()543 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::~OpPlayAudioMonitor()
544 {
545     if (mOpCallback != 0) {
546         mAppOpsManager.stopWatchingMode(mOpCallback);
547     }
548     mOpCallback.clear();
549 }
550 
onFirstRef()551 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
552 {
553     checkPlayAudioForUsage();
554     if (mAttributionSource.packageName.has_value()) {
555         mOpCallback = new PlayAudioOpCallback(this);
556         mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO,
557             VALUE_OR_FATAL(aidl2legacy_string_view_String16(
558             mAttributionSource.packageName.value_or("")))
559             , mOpCallback);
560     }
561 }
562 
hasOpPlayAudio() const563 bool AudioFlinger::PlaybackThread::OpPlayAudioMonitor::hasOpPlayAudio() const {
564     return mHasOpPlayAudio.load();
565 }
566 
567 // Note this method is never called (and never to be) for audio server / patch record track
568 // - not called from constructor due to check on UID,
569 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage()570 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
571 {
572     if (!mAttributionSource.packageName.has_value()) {
573         mHasOpPlayAudio.store(false);
574     } else {
575         uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
576         String16 packageName = VALUE_OR_FATAL(
577             aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
578         bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
579                     mUsage, uid, packageName) == AppOpsManager::MODE_ALLOWED;
580         ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
581         mHasOpPlayAudio.store(hasIt);
582     }
583 }
584 
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)585 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
586         const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
587 { }
588 
opChanged(int32_t op,const String16 & packageName)589 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
590             const String16& packageName) {
591     // we only have uid, so we need to check all package names anyway
592     UNUSED(packageName);
593     if (op != AppOpsManager::OP_PLAY_AUDIO) {
594         return;
595     }
596     sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
597     if (monitor != NULL) {
598         monitor->checkPlayAudioForUsage();
599     }
600 }
601 
602 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)603 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::getPackagesForUid(
604     uid_t uid, Vector<String16>& packages)
605 {
606     PermissionController permissionController;
607     permissionController.getPackagesForUid(uid, packages);
608 }
609 
610 // ----------------------------------------------------------------------------
611 #undef LOG_TAG
612 #define LOG_TAG "AF::Track"
613 
614 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized)615 AudioFlinger::PlaybackThread::Track::Track(
616             PlaybackThread *thread,
617             const sp<Client>& client,
618             audio_stream_type_t streamType,
619             const audio_attributes_t& attr,
620             uint32_t sampleRate,
621             audio_format_t format,
622             audio_channel_mask_t channelMask,
623             size_t frameCount,
624             void *buffer,
625             size_t bufferSize,
626             const sp<IMemory>& sharedBuffer,
627             audio_session_t sessionId,
628             pid_t creatorPid,
629             const AttributionSourceState& attributionSource,
630             audio_output_flags_t flags,
631             track_type type,
632             audio_port_handle_t portId,
633             size_t frameCountToBeReady,
634             float speed,
635             bool isSpatialized)
636     :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
637                   // TODO: Using unsecurePointer() has some associated security pitfalls
638                   //       (see declaration for details).
639                   //       Either document why it is safe in this case or address the
640                   //       issue (e.g. by copying).
641                   (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
642                   (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
643                   sessionId, creatorPid,
644                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
645                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
646                   type,
647                   portId,
648                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
649     mFillingUpStatus(FS_INVALID),
650     // mRetryCount initialized later when needed
651     mSharedBuffer(sharedBuffer),
652     mStreamType(streamType),
653     mMainBuffer(thread->sinkBuffer()),
654     mAuxBuffer(NULL),
655     mAuxEffectId(0), mHasVolumeController(false),
656     mFrameMap(16 /* sink-frame-to-track-frame map memory */),
657     mVolumeHandler(new media::VolumeHandler(sampleRate)),
658     mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(attributionSource, attr, id(),
659         streamType)),
660     // mSinkTimestamp
661     mFastIndex(-1),
662     mCachedVolume(1.0),
663     /* The track might not play immediately after being active, similarly as if its volume was 0.
664      * When the track starts playing, its volume will be computed. */
665     mFinalVolume(0.f),
666     mResumeToStopping(false),
667     mFlushHwPending(false),
668     mFlags(flags),
669     mSpeed(speed),
670     mIsSpatialized(isSpatialized)
671 {
672     // client == 0 implies sharedBuffer == 0
673     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
674 
675     ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
676             __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
677 
678     if (mCblk == NULL) {
679         return;
680     }
681 
682     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
683     if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
684         ALOGE("%s(%d): no more tracks available", __func__, mId);
685         releaseCblk(); // this makes the track invalid.
686         return;
687     }
688 
689     if (sharedBuffer == 0) {
690         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
691                 mFrameSize, !isExternalTrack(), sampleRate);
692     } else {
693         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
694                 mFrameSize, sampleRate);
695     }
696     mServerProxy = mAudioTrackServerProxy;
697     mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
698 
699     // only allocate a fast track index if we were able to allocate a normal track name
700     if (flags & AUDIO_OUTPUT_FLAG_FAST) {
701         // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
702         // race with setSyncEvent(). However, if we call it, we cannot properly start
703         // static fast tracks (SoundPool) immediately after stopping.
704         //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
705         ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
706         int i = __builtin_ctz(thread->mFastTrackAvailMask);
707         ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
708         // FIXME This is too eager.  We allocate a fast track index before the
709         //       fast track becomes active.  Since fast tracks are a scarce resource,
710         //       this means we are potentially denying other more important fast tracks from
711         //       being created.  It would be better to allocate the index dynamically.
712         mFastIndex = i;
713         thread->mFastTrackAvailMask &= ~(1 << i);
714     }
715 
716     mServerLatencySupported = checkServerLatencySupported(format, flags);
717 #ifdef TEE_SINK
718     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
719             + "_" + std::to_string(mId) + "_T");
720 #endif
721 
722     if (thread->supportsHapticPlayback()) {
723         // If the track is attached to haptic playback thread, it is potentially to have
724         // HapticGenerator effect, which will generate haptic data, on the track. In that case,
725         // external vibration is always created for all tracks attached to haptic playback thread.
726         mAudioVibrationController = new AudioVibrationController(this);
727         std::string packageName = attributionSource.packageName.has_value() ?
728             attributionSource.packageName.value() : "";
729         mExternalVibration = new os::ExternalVibration(
730                 mUid, packageName, mAttr, mAudioVibrationController);
731     }
732 
733     // Once this item is logged by the server, the client can add properties.
734     const char * const traits = sharedBuffer == 0 ? "" : "static";
735     mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
736 }
737 
~Track()738 AudioFlinger::PlaybackThread::Track::~Track()
739 {
740     ALOGV("%s(%d)", __func__, mId);
741 
742     // The destructor would clear mSharedBuffer,
743     // but it will not push the decremented reference count,
744     // leaving the client's IMemory dangling indefinitely.
745     // This prevents that leak.
746     if (mSharedBuffer != 0) {
747         mSharedBuffer.clear();
748     }
749 }
750 
initCheck() const751 status_t AudioFlinger::PlaybackThread::Track::initCheck() const
752 {
753     status_t status = TrackBase::initCheck();
754     if (status == NO_ERROR && mCblk == nullptr) {
755         status = NO_MEMORY;
756     }
757     return status;
758 }
759 
destroy()760 void AudioFlinger::PlaybackThread::Track::destroy()
761 {
762     // NOTE: destroyTrack_l() can remove a strong reference to this Track
763     // by removing it from mTracks vector, so there is a risk that this Tracks's
764     // destructor is called. As the destructor needs to lock mLock,
765     // we must acquire a strong reference on this Track before locking mLock
766     // here so that the destructor is called only when exiting this function.
767     // On the other hand, as long as Track::destroy() is only called by
768     // TrackHandle destructor, the TrackHandle still holds a strong ref on
769     // this Track with its member mTrack.
770     sp<Track> keep(this);
771     { // scope for mLock
772         bool wasActive = false;
773         sp<ThreadBase> thread = mThread.promote();
774         if (thread != 0) {
775             Mutex::Autolock _l(thread->mLock);
776             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
777             wasActive = playbackThread->destroyTrack_l(this);
778         }
779         if (isExternalTrack() && !wasActive) {
780             AudioSystem::releaseOutput(mPortId);
781         }
782     }
783     forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
784 }
785 
appendDumpHeader(String8 & result)786 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
787 {
788     result.appendFormat("Type     Id Active Client Session Port Id S  Flags "
789                         "  Format Chn mask  SRate "
790                         "ST Usg CT "
791                         " G db  L dB  R dB  VS dB "
792                         "  Server FrmCnt  FrmRdy F Underruns  Flushed"
793                         "%s\n",
794                         isServerLatencySupported() ? "   Latency" : "");
795 }
796 
appendDump(String8 & result,bool active)797 void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
798 {
799     char trackType;
800     switch (mType) {
801     case TYPE_DEFAULT:
802     case TYPE_OUTPUT:
803         if (isStatic()) {
804             trackType = 'S'; // static
805         } else {
806             trackType = ' '; // normal
807         }
808         break;
809     case TYPE_PATCH:
810         trackType = 'P';
811         break;
812     default:
813         trackType = '?';
814     }
815 
816     if (isFastTrack()) {
817         result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
818     } else {
819         result.appendFormat("   %c %6d", trackType, mId);
820     }
821 
822     char nowInUnderrun;
823     switch (mObservedUnderruns.mBitFields.mMostRecent) {
824     case UNDERRUN_FULL:
825         nowInUnderrun = ' ';
826         break;
827     case UNDERRUN_PARTIAL:
828         nowInUnderrun = '<';
829         break;
830     case UNDERRUN_EMPTY:
831         nowInUnderrun = '*';
832         break;
833     default:
834         nowInUnderrun = '?';
835         break;
836     }
837 
838     char fillingStatus;
839     switch (mFillingUpStatus) {
840     case FS_INVALID:
841         fillingStatus = 'I';
842         break;
843     case FS_FILLING:
844         fillingStatus = 'f';
845         break;
846     case FS_FILLED:
847         fillingStatus = 'F';
848         break;
849     case FS_ACTIVE:
850         fillingStatus = 'A';
851         break;
852     default:
853         fillingStatus = '?';
854         break;
855     }
856 
857     // clip framesReadySafe to max representation in dump
858     const size_t framesReadySafe =
859             std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
860 
861     // obtain volumes
862     const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
863     const std::pair<float /* volume */, bool /* active */> vsVolume =
864             mVolumeHandler->getLastVolume();
865 
866     // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
867     // as it may be reduced by the application.
868     const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
869     // Check whether the buffer size has been modified by the app.
870     const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
871             ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
872                     ? 'e' /* error */ : ' ' /* identical */;
873 
874     result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
875                         "%08X %08X %6u "
876                         "%2u %3x %2x "
877                         "%5.2g %5.2g %5.2g %5.2g%c "
878                         "%08X %6zu%c %6zu %c %9u%c %7u",
879             active ? "yes" : "no",
880             (mClient == 0) ? getpid() : mClient->pid(),
881             mSessionId,
882             mPortId,
883             getTrackStateAsCodedString(),
884             mCblk->mFlags,
885 
886             mFormat,
887             mChannelMask,
888             sampleRate(),
889 
890             mStreamType,
891             mAttr.usage,
892             mAttr.content_type,
893 
894             20.0 * log10(mFinalVolume),
895             20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
896             20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
897             20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
898             vsVolume.second ? 'A' : ' ',  // if any VolumeShapers active
899 
900             mCblk->mServer,
901             bufferSizeInFrames,
902             modifiedBufferChar,
903             framesReadySafe,
904             fillingStatus,
905             mAudioTrackServerProxy->getUnderrunFrames(),
906             nowInUnderrun,
907             (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
908             );
909 
910     if (isServerLatencySupported()) {
911         double latencyMs;
912         bool fromTrack;
913         if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
914             // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
915             // or 'k' if estimated from kernel because track frames haven't been presented yet.
916             result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
917         } else {
918             result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
919         }
920     }
921     result.append("\n");
922 }
923 
sampleRate() const924 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
925     return mAudioTrackServerProxy->getSampleRate();
926 }
927 
928 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)929 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
930 {
931     ServerProxy::Buffer buf;
932     size_t desiredFrames = buffer->frameCount;
933     buf.mFrameCount = desiredFrames;
934     status_t status = mServerProxy->obtainBuffer(&buf);
935     buffer->frameCount = buf.mFrameCount;
936     buffer->raw = buf.mRaw;
937     if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
938         ALOGV("%s(%d): underrun,  framesReady(%zu) < framesDesired(%zd), state: %d",
939                 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
940         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
941     } else {
942         mAudioTrackServerProxy->tallyUnderrunFrames(0);
943     }
944     return status;
945 }
946 
releaseBuffer(AudioBufferProvider::Buffer * buffer)947 void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
948 {
949     interceptBuffer(*buffer);
950     TrackBase::releaseBuffer(buffer);
951 }
952 
953 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)954 void AudioFlinger::PlaybackThread::Track::interceptBuffer(
955         const AudioBufferProvider::Buffer& sourceBuffer) {
956     auto start = std::chrono::steady_clock::now();
957     const size_t frameCount = sourceBuffer.frameCount;
958     if (frameCount == 0) {
959         return;  // No audio to intercept.
960         // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
961         // does not allow 0 frame size request contrary to getNextBuffer
962     }
963     for (auto& teePatch : mTeePatches) {
964         RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
965         const size_t framesWritten = patchRecord->writeFrames(
966                 sourceBuffer.i8, frameCount, mFrameSize);
967         const size_t framesLeft = frameCount - framesWritten;
968         ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
969                  "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
970                  framesWritten, frameCount, framesLeft);
971     }
972     auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
973     using namespace std::chrono_literals;
974     // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
975     ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
976              spent.count(), mTeePatches.size());
977 }
978 
979 // ExtendedAudioBufferProvider interface
980 
981 // framesReady() may return an approximation of the number of frames if called
982 // from a different thread than the one calling Proxy->obtainBuffer() and
983 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
984 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const985 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
986     if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
987         // Static tracks return zero frames immediately upon stopping (for FastTracks).
988         // The remainder of the buffer is not drained.
989         return 0;
990     }
991     return mAudioTrackServerProxy->framesReady();
992 }
993 
framesReleased() const994 int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
995 {
996     return mAudioTrackServerProxy->framesReleased();
997 }
998 
onTimestamp(const ExtendedTimestamp & timestamp)999 void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp &timestamp)
1000 {
1001     // This call comes from a FastTrack and should be kept lockless.
1002     // The server side frames are already translated to client frames.
1003     mAudioTrackServerProxy->setTimestamp(timestamp);
1004 
1005     // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1006 
1007     // Compute latency.
1008     // TODO: Consider whether the server latency may be passed in by FastMixer
1009     // as a constant for all active FastTracks.
1010     const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1011     mServerLatencyFromTrack.store(true);
1012     mServerLatencyMs.store(latencyMs);
1013 }
1014 
1015 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1016 bool AudioFlinger::PlaybackThread::Track::isReady() const {
1017     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
1018         return true;
1019     }
1020 
1021     if (isStopping()) {
1022         if (framesReady() > 0) {
1023             mFillingUpStatus = FS_FILLED;
1024         }
1025         return true;
1026     }
1027 
1028     size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1029     // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1030     const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1031     const size_t framesToBeReady = std::clamp(  // clamp again to validate client values.
1032             std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1033 
1034     if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1035         ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1036               __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1037         mFillingUpStatus = FS_FILLED;
1038         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1039         return true;
1040     }
1041     return false;
1042 }
1043 
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1044 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
1045                                                     audio_session_t triggerSession __unused)
1046 {
1047     status_t status = NO_ERROR;
1048     ALOGV("%s(%d): calling pid %d session %d",
1049             __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1050 
1051     sp<ThreadBase> thread = mThread.promote();
1052     if (thread != 0) {
1053         if (isOffloaded()) {
1054             Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
1055             Mutex::Autolock _lth(thread->mLock);
1056             sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
1057             if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
1058                     (ec != 0 && ec->isNonOffloadableEnabled())) {
1059                 invalidate();
1060                 return PERMISSION_DENIED;
1061             }
1062         }
1063         Mutex::Autolock _lth(thread->mLock);
1064         track_state state = mState;
1065         // here the track could be either new, or restarted
1066         // in both cases "unstop" the track
1067 
1068         // initial state-stopping. next state-pausing.
1069         // What if resume is called ?
1070 
1071         if (state == FLUSHED) {
1072             // avoid underrun glitches when starting after flush
1073             reset();
1074         }
1075 
1076         // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1077         mPauseHwPending = false;
1078         if (state == PAUSED || state == PAUSING) {
1079             if (mResumeToStopping) {
1080                 // happened we need to resume to STOPPING_1
1081                 mState = TrackBase::STOPPING_1;
1082                 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1083                         __func__, mId, (int)mThreadIoHandle);
1084             } else {
1085                 mState = TrackBase::RESUMING;
1086                 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1087                         __func__,  mId, (int)mThreadIoHandle);
1088             }
1089         } else {
1090             mState = TrackBase::ACTIVE;
1091             ALOGV("%s(%d): ? => ACTIVE on thread %d",
1092                     __func__, mId, (int)mThreadIoHandle);
1093         }
1094 
1095         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1096 
1097         // states to reset position info for pcm tracks
1098         if (audio_is_linear_pcm(mFormat)
1099                 && (state == IDLE || state == STOPPED || state == FLUSHED)) {
1100             mFrameMap.reset();
1101 
1102             if (!isFastTrack() && (isDirect() || isOffloaded())) {
1103                 // Start point of track -> sink frame map. If the HAL returns a
1104                 // frame position smaller than the first written frame in
1105                 // updateTrackFrameInfo, the timestamp can be interpolated
1106                 // instead of using a larger value.
1107                 mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
1108                                playbackThread->framesWritten());
1109             }
1110         }
1111         if (isFastTrack()) {
1112             // refresh fast track underruns on start because that field is never cleared
1113             // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1114             // after stop.
1115             mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1116         }
1117         status = playbackThread->addTrack_l(this);
1118         if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
1119             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1120             //  restore previous state if start was rejected by policy manager
1121             if (status == PERMISSION_DENIED) {
1122                 mState = state;
1123             }
1124         }
1125 
1126         // Audio timing metrics are computed a few mix cycles after starting.
1127         {
1128             mLogStartCountdown = LOG_START_COUNTDOWN;
1129             mLogStartTimeNs = systemTime();
1130             mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1131                     .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1132             mLogLatencyMs = 0.;
1133         }
1134         mLogForceVolumeUpdate = true;  // at least one volume logged for metrics when starting.
1135 
1136         if (status == NO_ERROR || status == ALREADY_EXISTS) {
1137             // for streaming tracks, remove the buffer read stop limit.
1138             mAudioTrackServerProxy->start();
1139         }
1140 
1141         // track was already in the active list, not a problem
1142         if (status == ALREADY_EXISTS) {
1143             status = NO_ERROR;
1144         } else {
1145             // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1146             // It is usually unsafe to access the server proxy from a binder thread.
1147             // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1148             // isn't looking at this track yet:  we still hold the normal mixer thread lock,
1149             // and for fast tracks the track is not yet in the fast mixer thread's active set.
1150             // For static tracks, this is used to acknowledge change in position or loop.
1151             ServerProxy::Buffer buffer;
1152             buffer.mFrameCount = 1;
1153             (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1154         }
1155     } else {
1156         status = BAD_VALUE;
1157     }
1158     if (status == NO_ERROR) {
1159         forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1160     }
1161     return status;
1162 }
1163 
stop()1164 void AudioFlinger::PlaybackThread::Track::stop()
1165 {
1166     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1167     sp<ThreadBase> thread = mThread.promote();
1168     if (thread != 0) {
1169         Mutex::Autolock _l(thread->mLock);
1170         track_state state = mState;
1171         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1172             // If the track is not active (PAUSED and buffers full), flush buffers
1173             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1174             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1175                 reset();
1176                 mState = STOPPED;
1177             } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
1178                 mState = STOPPED;
1179             } else {
1180                 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1181                 // presentation is complete
1182                 // For an offloaded track this starts a drain and state will
1183                 // move to STOPPING_2 when drain completes and then STOPPED
1184                 mState = STOPPING_1;
1185                 if (isOffloaded()) {
1186                     mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
1187                 }
1188             }
1189             playbackThread->broadcast_l();
1190             ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1191                     __func__, mId, (int)mThreadIoHandle);
1192         }
1193     }
1194     forEachTeePatchTrack([](auto patchTrack) { patchTrack->stop(); });
1195 }
1196 
pause()1197 void AudioFlinger::PlaybackThread::Track::pause()
1198 {
1199     ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1200     sp<ThreadBase> thread = mThread.promote();
1201     if (thread != 0) {
1202         Mutex::Autolock _l(thread->mLock);
1203         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1204         switch (mState) {
1205         case STOPPING_1:
1206         case STOPPING_2:
1207             if (!isOffloaded()) {
1208                 /* nothing to do if track is not offloaded */
1209                 break;
1210             }
1211 
1212             // Offloaded track was draining, we need to carry on draining when resumed
1213             mResumeToStopping = true;
1214             FALLTHROUGH_INTENDED;
1215         case ACTIVE:
1216         case RESUMING:
1217             mState = PAUSING;
1218             ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1219                     __func__, mId, (int)mThreadIoHandle);
1220             if (isOffloadedOrDirect()) {
1221                 mPauseHwPending = true;
1222             }
1223             playbackThread->broadcast_l();
1224             break;
1225 
1226         default:
1227             break;
1228         }
1229     }
1230     // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1231     forEachTeePatchTrack([](auto patchTrack) { patchTrack->pause(); });
1232 }
1233 
flush()1234 void AudioFlinger::PlaybackThread::Track::flush()
1235 {
1236     ALOGV("%s(%d)", __func__, mId);
1237     sp<ThreadBase> thread = mThread.promote();
1238     if (thread != 0) {
1239         Mutex::Autolock _l(thread->mLock);
1240         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1241 
1242         // Flush the ring buffer now if the track is not active in the PlaybackThread.
1243         // Otherwise the flush would not be done until the track is resumed.
1244         // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1245         if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1246             (void)mServerProxy->flushBufferIfNeeded();
1247         }
1248 
1249         if (isOffloaded()) {
1250             // If offloaded we allow flush during any state except terminated
1251             // and keep the track active to avoid problems if user is seeking
1252             // rapidly and underlying hardware has a significant delay handling
1253             // a pause
1254             if (isTerminated()) {
1255                 return;
1256             }
1257 
1258             ALOGV("%s(%d): offload flush", __func__, mId);
1259             reset();
1260 
1261             if (mState == STOPPING_1 || mState == STOPPING_2) {
1262                 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1263                         __func__, mId);
1264                 mState = ACTIVE;
1265             }
1266 
1267             mFlushHwPending = true;
1268             mResumeToStopping = false;
1269         } else {
1270             if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1271                     mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1272                 return;
1273             }
1274             // No point remaining in PAUSED state after a flush => go to
1275             // FLUSHED state
1276             mState = FLUSHED;
1277             // do not reset the track if it is still in the process of being stopped or paused.
1278             // this will be done by prepareTracks_l() when the track is stopped.
1279             // prepareTracks_l() will see mState == FLUSHED, then
1280             // remove from active track list, reset(), and trigger presentation complete
1281             if (isDirect()) {
1282                 mFlushHwPending = true;
1283             }
1284             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1285                 reset();
1286             }
1287         }
1288         // Prevent flush being lost if the track is flushed and then resumed
1289         // before mixer thread can run. This is important when offloading
1290         // because the hardware buffer could hold a large amount of audio
1291         playbackThread->broadcast_l();
1292     }
1293     // Flush the Tee to avoid on resume playing old data and glitching on the transition to new data
1294     forEachTeePatchTrack([](auto patchTrack) { patchTrack->flush(); });
1295 }
1296 
1297 // must be called with thread lock held
flushAck()1298 void AudioFlinger::PlaybackThread::Track::flushAck()
1299 {
1300     if (!isOffloaded() && !isDirect())
1301         return;
1302 
1303     // Clear the client ring buffer so that the app can prime the buffer while paused.
1304     // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1305     mServerProxy->flushBufferIfNeeded();
1306 
1307     mFlushHwPending = false;
1308 }
1309 
pauseAck()1310 void AudioFlinger::PlaybackThread::Track::pauseAck()
1311 {
1312     mPauseHwPending = false;
1313 }
1314 
reset()1315 void AudioFlinger::PlaybackThread::Track::reset()
1316 {
1317     // Do not reset twice to avoid discarding data written just after a flush and before
1318     // the audioflinger thread detects the track is stopped.
1319     if (!mResetDone) {
1320         // Force underrun condition to avoid false underrun callback until first data is
1321         // written to buffer
1322         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1323         mFillingUpStatus = FS_FILLING;
1324         mResetDone = true;
1325         if (mState == FLUSHED) {
1326             mState = IDLE;
1327         }
1328     }
1329 }
1330 
setParameters(const String8 & keyValuePairs)1331 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
1332 {
1333     sp<ThreadBase> thread = mThread.promote();
1334     if (thread == 0) {
1335         ALOGE("%s(%d): thread is dead", __func__, mId);
1336         return FAILED_TRANSACTION;
1337     } else if ((thread->type() == ThreadBase::DIRECT) ||
1338                     (thread->type() == ThreadBase::OFFLOAD)) {
1339         return thread->setParameters(keyValuePairs);
1340     } else {
1341         return PERMISSION_DENIED;
1342     }
1343 }
1344 
selectPresentation(int presentationId,int programId)1345 status_t AudioFlinger::PlaybackThread::Track::selectPresentation(int presentationId,
1346         int programId) {
1347     sp<ThreadBase> thread = mThread.promote();
1348     if (thread == 0) {
1349         ALOGE("thread is dead");
1350         return FAILED_TRANSACTION;
1351     } else if ((thread->type() == ThreadBase::DIRECT) || (thread->type() == ThreadBase::OFFLOAD)) {
1352         DirectOutputThread *directOutputThread = static_cast<DirectOutputThread*>(thread.get());
1353         return directOutputThread->selectPresentation(presentationId, programId);
1354     }
1355     return INVALID_OPERATION;
1356 }
1357 
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1358 VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
1359         const sp<VolumeShaper::Configuration>& configuration,
1360         const sp<VolumeShaper::Operation>& operation)
1361 {
1362     sp<VolumeShaper::Configuration> newConfiguration;
1363 
1364     if (isOffloadedOrDirect()) {
1365         const VolumeShaper::Configuration::OptionFlag optionFlag
1366             = configuration->getOptionFlags();
1367         if ((optionFlag & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) == 0) {
1368             ALOGW("%s(%d): %s tracks do not support frame counted VolumeShaper,"
1369                     " using clock time instead",
1370                     __func__, mId,
1371                     isOffloaded() ? "Offload" : "Direct");
1372             newConfiguration = new VolumeShaper::Configuration(*configuration);
1373             newConfiguration->setOptionFlags(
1374                 VolumeShaper::Configuration::OptionFlag(optionFlag
1375                         | VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME));
1376         }
1377     }
1378 
1379     VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(
1380             (newConfiguration.get() != nullptr ? newConfiguration : configuration), operation);
1381 
1382     if (isOffloadedOrDirect()) {
1383         // Signal thread to fetch new volume.
1384         sp<ThreadBase> thread = mThread.promote();
1385         if (thread != 0) {
1386             Mutex::Autolock _l(thread->mLock);
1387             thread->broadcast_l();
1388         }
1389     }
1390     return status;
1391 }
1392 
getVolumeShaperState(int id)1393 sp<VolumeShaper::State> AudioFlinger::PlaybackThread::Track::getVolumeShaperState(int id)
1394 {
1395     // Note: We don't check if Thread exists.
1396 
1397     // mVolumeHandler is thread safe.
1398     return mVolumeHandler->getVolumeShaperState(id);
1399 }
1400 
setFinalVolume(float volume)1401 void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
1402 {
1403     if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1404         mFinalVolume = volume;
1405         setMetadataHasChanged();
1406         mLogForceVolumeUpdate = true;
1407     }
1408     if (mLogForceVolumeUpdate) {
1409         mLogForceVolumeUpdate = false;
1410         mTrackMetrics.logVolume(mFinalVolume);
1411     }
1412 }
1413 
copyMetadataTo(MetadataInserter & backInserter) const1414 void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
1415 {
1416     // Do not forward metadata for PatchTrack with unspecified stream type
1417     if (mStreamType == AUDIO_STREAM_PATCH) {
1418         return;
1419     }
1420 
1421     playback_track_metadata_v7_t metadata;
1422     metadata.base = {
1423             .usage = mAttr.usage,
1424             .content_type = mAttr.content_type,
1425             .gain = mFinalVolume,
1426     };
1427 
1428     // When attributes are undefined, derive default values from stream type.
1429     // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
1430     if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
1431         switch (mStreamType) {
1432         case AUDIO_STREAM_VOICE_CALL:
1433             metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1434             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1435             break;
1436         case AUDIO_STREAM_SYSTEM:
1437             metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1438             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1439             break;
1440         case AUDIO_STREAM_RING:
1441             metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1442             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1443             break;
1444         case AUDIO_STREAM_MUSIC:
1445             metadata.base.usage = AUDIO_USAGE_MEDIA;
1446             metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1447             break;
1448         case AUDIO_STREAM_ALARM:
1449             metadata.base.usage = AUDIO_USAGE_ALARM;
1450             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1451             break;
1452         case AUDIO_STREAM_NOTIFICATION:
1453             metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
1454             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1455             break;
1456         case AUDIO_STREAM_DTMF:
1457             metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1458             metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1459             break;
1460         case AUDIO_STREAM_ACCESSIBILITY:
1461             metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1462             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1463             break;
1464         case AUDIO_STREAM_ASSISTANT:
1465             metadata.base.usage = AUDIO_USAGE_ASSISTANT;
1466             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1467             break;
1468         case AUDIO_STREAM_REROUTING:
1469             metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
1470             // unknown content type
1471             break;
1472         case AUDIO_STREAM_CALL_ASSISTANT:
1473             metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
1474             metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1475             break;
1476         default:
1477             break;
1478         }
1479     }
1480 
1481     metadata.channel_mask = mChannelMask;
1482     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1483     *backInserter++ = metadata;
1484 }
1485 
setTeePatches(TeePatches teePatches)1486 void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
1487     forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
1488     mTeePatches = std::move(teePatches);
1489     if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1490             mState == TrackBase::STOPPING_1) {
1491         forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1492     }
1493 }
1494 
getTimestamp(AudioTimestamp & timestamp)1495 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
1496 {
1497     if (!isOffloaded() && !isDirect()) {
1498         return INVALID_OPERATION; // normal tracks handled through SSQ
1499     }
1500     sp<ThreadBase> thread = mThread.promote();
1501     if (thread == 0) {
1502         return INVALID_OPERATION;
1503     }
1504 
1505     Mutex::Autolock _l(thread->mLock);
1506     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1507     return playbackThread->getTimestamp_l(timestamp);
1508 }
1509 
attachAuxEffect(int EffectId)1510 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
1511 {
1512     sp<ThreadBase> thread = mThread.promote();
1513     if (thread == nullptr) {
1514         return DEAD_OBJECT;
1515     }
1516 
1517     sp<PlaybackThread> dstThread = (PlaybackThread *)thread.get();
1518     sp<PlaybackThread> srcThread; // srcThread is initialized by call to moveAuxEffectToIo()
1519     sp<AudioFlinger> af = mClient->audioFlinger();
1520     status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1521 
1522     if (EffectId != 0 && status == NO_ERROR) {
1523         status = dstThread->attachAuxEffect(this, EffectId);
1524         if (status == NO_ERROR) {
1525             AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1526         }
1527     }
1528 
1529     if (status != NO_ERROR && srcThread != nullptr) {
1530         af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1531     }
1532     return status;
1533 }
1534 
setAuxBuffer(int EffectId,int32_t * buffer)1535 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
1536 {
1537     mAuxEffectId = EffectId;
1538     mAuxBuffer = buffer;
1539 }
1540 
1541 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1542 bool AudioFlinger::PlaybackThread::Track::presentationComplete(
1543         int64_t framesWritten, size_t audioHalFrames)
1544 {
1545     // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1546     // This assists in proper timestamp computation as well as wakelock management.
1547 
1548     // a track is considered presented when the total number of frames written to audio HAL
1549     // corresponds to the number of frames written when presentationComplete() is called for the
1550     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1551     // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1552     // to detect when all frames have been played. In this case framesWritten isn't
1553     // useful because it doesn't always reflect whether there is data in the h/w
1554     // buffers, particularly if a track has been paused and resumed during draining
1555     ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1556             __func__, mId,
1557             (long long)mPresentationCompleteFrames, (long long)framesWritten);
1558     if (mPresentationCompleteFrames == 0) {
1559         mPresentationCompleteFrames = framesWritten + audioHalFrames;
1560         ALOGV("%s(%d): set:"
1561                 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1562                 __func__, mId,
1563                 (long long)mPresentationCompleteFrames, audioHalFrames);
1564     }
1565 
1566     bool complete;
1567     if (isFastTrack()) { // does not go through linear map
1568         complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1569         ALOGV("%s(%d): %s framesWritten:%lld  mPresentationCompleteFrames:%lld",
1570                 __func__, mId, (complete ? "complete" : "waiting"),
1571                 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1572     } else {  // Normal tracks, OutputTracks, and PatchTracks
1573         complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1574                 && mAudioTrackServerProxy->isDrained();
1575     }
1576 
1577     if (complete) {
1578         notifyPresentationComplete();
1579         return true;
1580     }
1581     return false;
1582 }
1583 
1584 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1585 bool AudioFlinger::PlaybackThread::Track::presentationComplete(uint32_t latencyMs)
1586 {
1587     // For Offloaded or Direct tracks.
1588 
1589     // For a direct track, we incorporated time based testing for presentationComplete.
1590 
1591     // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1592     // to detect when all frames have been played. In this case latencyMs isn't
1593     // useful because it doesn't always reflect whether there is data in the h/w
1594     // buffers, particularly if a track has been paused and resumed during draining
1595 
1596     constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1597     if (mPresentationCompleteTimeNs == 0) {
1598         mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1599         ALOGV("%s(%d): set: latencyMs %u  mPresentationCompleteTimeNs:%lld",
1600                 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1601     }
1602 
1603     bool complete;
1604     if (isOffloaded()) {
1605         complete = true;
1606     } else { // Direct
1607         complete = systemTime() >= mPresentationCompleteTimeNs;
1608         ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1609     }
1610     if (complete) {
1611         notifyPresentationComplete();
1612         return true;
1613     }
1614     return false;
1615 }
1616 
notifyPresentationComplete()1617 void AudioFlinger::PlaybackThread::Track::notifyPresentationComplete()
1618 {
1619     // This only triggers once. TODO: should we enforce this?
1620     triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1621     mAudioTrackServerProxy->setStreamEndDone();
1622 }
1623 
triggerEvents(AudioSystem::sync_event_t type)1624 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
1625 {
1626     for (size_t i = 0; i < mSyncEvents.size();) {
1627         if (mSyncEvents[i]->type() == type) {
1628             mSyncEvents[i]->trigger();
1629             mSyncEvents.removeAt(i);
1630         } else {
1631             ++i;
1632         }
1633     }
1634 }
1635 
1636 // implement VolumeBufferProvider interface
1637 
getVolumeLR()1638 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
1639 {
1640     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1641     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1642     gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1643     float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1644     float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1645     // track volumes come from shared memory, so can't be trusted and must be clamped
1646     if (vl > GAIN_FLOAT_UNITY) {
1647         vl = GAIN_FLOAT_UNITY;
1648     }
1649     if (vr > GAIN_FLOAT_UNITY) {
1650         vr = GAIN_FLOAT_UNITY;
1651     }
1652     // now apply the cached master volume and stream type volume;
1653     // this is trusted but lacks any synchronization or barrier so may be stale
1654     float v = mCachedVolume;
1655     vl *= v;
1656     vr *= v;
1657     // re-combine into packed minifloat
1658     vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1659     // FIXME look at mute, pause, and stop flags
1660     return vlr;
1661 }
1662 
setSyncEvent(const sp<SyncEvent> & event)1663 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
1664 {
1665     if (isTerminated() || mState == PAUSED ||
1666             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1667                                       (mState == STOPPED)))) {
1668         ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
1669               __func__, mId,
1670               (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1671         event->cancel();
1672         return INVALID_OPERATION;
1673     }
1674     (void) TrackBase::setSyncEvent(event);
1675     return NO_ERROR;
1676 }
1677 
invalidate()1678 void AudioFlinger::PlaybackThread::Track::invalidate()
1679 {
1680     TrackBase::invalidate();
1681     signalClientFlag(CBLK_INVALID);
1682 }
1683 
disable()1684 void AudioFlinger::PlaybackThread::Track::disable()
1685 {
1686     // TODO(b/142394888): the filling status should also be reset to filling
1687     signalClientFlag(CBLK_DISABLED);
1688 }
1689 
signalClientFlag(int32_t flag)1690 void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
1691 {
1692     // FIXME should use proxy, and needs work
1693     audio_track_cblk_t* cblk = mCblk;
1694     android_atomic_or(flag, &cblk->mFlags);
1695     android_atomic_release_store(0x40000000, &cblk->mFutex);
1696     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1697     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1698 }
1699 
signal()1700 void AudioFlinger::PlaybackThread::Track::signal()
1701 {
1702     sp<ThreadBase> thread = mThread.promote();
1703     if (thread != 0) {
1704         PlaybackThread *t = (PlaybackThread *)thread.get();
1705         Mutex::Autolock _l(t->mLock);
1706         t->broadcast_l();
1707     }
1708 }
1709 
getDualMonoMode(audio_dual_mono_mode_t * mode)1710 status_t AudioFlinger::PlaybackThread::Track::getDualMonoMode(audio_dual_mono_mode_t* mode)
1711 {
1712     status_t status = INVALID_OPERATION;
1713     if (isOffloadedOrDirect()) {
1714         sp<ThreadBase> thread = mThread.promote();
1715         if (thread != nullptr) {
1716             PlaybackThread *t = (PlaybackThread *)thread.get();
1717             Mutex::Autolock _l(t->mLock);
1718             status = t->mOutput->stream->getDualMonoMode(mode);
1719             ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
1720                     "%s: mode %d inconsistent", __func__, mDualMonoMode);
1721         }
1722     }
1723     return status;
1724 }
1725 
setDualMonoMode(audio_dual_mono_mode_t mode)1726 status_t AudioFlinger::PlaybackThread::Track::setDualMonoMode(audio_dual_mono_mode_t mode)
1727 {
1728     status_t status = INVALID_OPERATION;
1729     if (isOffloadedOrDirect()) {
1730         sp<ThreadBase> thread = mThread.promote();
1731         if (thread != nullptr) {
1732             auto t = static_cast<PlaybackThread *>(thread.get());
1733             Mutex::Autolock lock(t->mLock);
1734             status = t->mOutput->stream->setDualMonoMode(mode);
1735             if (status == NO_ERROR) {
1736                 mDualMonoMode = mode;
1737             }
1738         }
1739     }
1740     return status;
1741 }
1742 
getAudioDescriptionMixLevel(float * leveldB)1743 status_t AudioFlinger::PlaybackThread::Track::getAudioDescriptionMixLevel(float* leveldB)
1744 {
1745     status_t status = INVALID_OPERATION;
1746     if (isOffloadedOrDirect()) {
1747         sp<ThreadBase> thread = mThread.promote();
1748         if (thread != nullptr) {
1749             auto t = static_cast<PlaybackThread *>(thread.get());
1750             Mutex::Autolock lock(t->mLock);
1751             status = t->mOutput->stream->getAudioDescriptionMixLevel(leveldB);
1752             ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
1753                     "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
1754         }
1755     }
1756     return status;
1757 }
1758 
setAudioDescriptionMixLevel(float leveldB)1759 status_t AudioFlinger::PlaybackThread::Track::setAudioDescriptionMixLevel(float leveldB)
1760 {
1761     status_t status = INVALID_OPERATION;
1762     if (isOffloadedOrDirect()) {
1763         sp<ThreadBase> thread = mThread.promote();
1764         if (thread != nullptr) {
1765             auto t = static_cast<PlaybackThread *>(thread.get());
1766             Mutex::Autolock lock(t->mLock);
1767             status = t->mOutput->stream->setAudioDescriptionMixLevel(leveldB);
1768             if (status == NO_ERROR) {
1769                 mAudioDescriptionMixLevel = leveldB;
1770             }
1771         }
1772     }
1773     return status;
1774 }
1775 
getPlaybackRateParameters(audio_playback_rate_t * playbackRate)1776 status_t AudioFlinger::PlaybackThread::Track::getPlaybackRateParameters(
1777         audio_playback_rate_t* playbackRate)
1778 {
1779     status_t status = INVALID_OPERATION;
1780     if (isOffloadedOrDirect()) {
1781         sp<ThreadBase> thread = mThread.promote();
1782         if (thread != nullptr) {
1783             auto t = static_cast<PlaybackThread *>(thread.get());
1784             Mutex::Autolock lock(t->mLock);
1785             status = t->mOutput->stream->getPlaybackRateParameters(playbackRate);
1786             ALOGD_IF((status == NO_ERROR) &&
1787                     !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
1788                     "%s: playbackRate inconsistent", __func__);
1789         }
1790     }
1791     return status;
1792 }
1793 
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)1794 status_t AudioFlinger::PlaybackThread::Track::setPlaybackRateParameters(
1795         const audio_playback_rate_t& playbackRate)
1796 {
1797     status_t status = INVALID_OPERATION;
1798     if (isOffloadedOrDirect()) {
1799         sp<ThreadBase> thread = mThread.promote();
1800         if (thread != nullptr) {
1801             auto t = static_cast<PlaybackThread *>(thread.get());
1802             Mutex::Autolock lock(t->mLock);
1803             status = t->mOutput->stream->setPlaybackRateParameters(playbackRate);
1804             if (status == NO_ERROR) {
1805                 mPlaybackRateParameters = playbackRate;
1806             }
1807         }
1808     }
1809     return status;
1810 }
1811 
1812 //To be called with thread lock held
isResumePending()1813 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
1814 
1815     if (mState == RESUMING)
1816         return true;
1817     /* Resume is pending if track was stopping before pause was called */
1818     if (mState == STOPPING_1 &&
1819         mResumeToStopping)
1820         return true;
1821 
1822     return false;
1823 }
1824 
1825 //To be called with thread lock held
resumeAck()1826 void AudioFlinger::PlaybackThread::Track::resumeAck() {
1827 
1828 
1829     if (mState == RESUMING)
1830         mState = ACTIVE;
1831 
1832     // Other possibility of  pending resume is stopping_1 state
1833     // Do not update the state from stopping as this prevents
1834     // drain being called.
1835     if (mState == STOPPING_1) {
1836         mResumeToStopping = false;
1837     }
1838 }
1839 
1840 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)1841 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
1842         int64_t trackFramesReleased, int64_t sinkFramesWritten,
1843         uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
1844    // Make the kernel frametime available.
1845     const FrameTime ft{
1846             timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1847             timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
1848     // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
1849     mKernelFrameTime.store(ft);
1850     if (!audio_is_linear_pcm(mFormat)) {
1851         return;
1852     }
1853 
1854     //update frame map
1855     mFrameMap.push(trackFramesReleased, sinkFramesWritten);
1856 
1857     // adjust server times and set drained state.
1858     //
1859     // Our timestamps are only updated when the track is on the Thread active list.
1860     // We need to ensure that tracks are not removed before full drain.
1861     ExtendedTimestamp local = timeStamp;
1862     bool drained = true; // default assume drained, if no server info found
1863     bool checked = false;
1864     for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
1865             i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
1866         // Lookup the track frame corresponding to the sink frame position.
1867         if (local.mTimeNs[i] > 0) {
1868             local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
1869             // check drain state from the latest stage in the pipeline.
1870             if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
1871                 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
1872                 checked = true;
1873             }
1874         }
1875     }
1876 
1877     mAudioTrackServerProxy->setDrained(drained);
1878     // Set correction for flushed frames that are not accounted for in released.
1879     local.mFlushed = mAudioTrackServerProxy->framesFlushed();
1880     mServerProxy->setTimestamp(local);
1881 
1882     // Compute latency info.
1883     const bool useTrackTimestamp = !drained;
1884     const double latencyMs = useTrackTimestamp
1885             ? local.getOutputServerLatencyMs(sampleRate())
1886             : timeStamp.getOutputServerLatencyMs(halSampleRate);
1887 
1888     mServerLatencyFromTrack.store(useTrackTimestamp);
1889     mServerLatencyMs.store(latencyMs);
1890 
1891     if (mLogStartCountdown > 0
1892             && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
1893             && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
1894     {
1895         if (mLogStartCountdown > 1) {
1896             --mLogStartCountdown;
1897         } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
1898             mLogStartCountdown = 0;
1899             // startup is the difference in times for the current timestamp and our start
1900             double startUpMs =
1901                     (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
1902             // adjust for frames played.
1903             startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
1904                     * 1e3 / mSampleRate;
1905             ALOGV("%s: latencyMs:%lf startUpMs:%lf"
1906                     " localTime:%lld startTime:%lld"
1907                     " localPosition:%lld startPosition:%lld",
1908                     __func__, latencyMs, startUpMs,
1909                     (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
1910                     (long long)mLogStartTimeNs,
1911                     (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1912                     (long long)mLogStartFrames);
1913             mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
1914         }
1915         mLogLatencyMs = latencyMs;
1916     }
1917 }
1918 
mute(bool * ret)1919 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
1920         /*out*/ bool *ret) {
1921     *ret = false;
1922     sp<ThreadBase> thread = mTrack->mThread.promote();
1923     if (thread != 0) {
1924         // Lock for updating mHapticPlaybackEnabled.
1925         Mutex::Autolock _l(thread->mLock);
1926         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1927         if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1928                 && playbackThread->mHapticChannelCount > 0) {
1929             mTrack->setHapticPlaybackEnabled(false);
1930             *ret = true;
1931         }
1932     }
1933     return binder::Status::ok();
1934 }
1935 
unmute(bool * ret)1936 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
1937         /*out*/ bool *ret) {
1938     *ret = false;
1939     sp<ThreadBase> thread = mTrack->mThread.promote();
1940     if (thread != 0) {
1941         // Lock for updating mHapticPlaybackEnabled.
1942         Mutex::Autolock _l(thread->mLock);
1943         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1944         if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1945                 && playbackThread->mHapticChannelCount > 0) {
1946             mTrack->setHapticPlaybackEnabled(true);
1947             *ret = true;
1948         }
1949     }
1950     return binder::Status::ok();
1951 }
1952 
1953 // ----------------------------------------------------------------------------
1954 #undef LOG_TAG
1955 #define LOG_TAG "AF::OutputTrack"
1956 
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)1957 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1958             PlaybackThread *playbackThread,
1959             DuplicatingThread *sourceThread,
1960             uint32_t sampleRate,
1961             audio_format_t format,
1962             audio_channel_mask_t channelMask,
1963             size_t frameCount,
1964             const AttributionSourceState& attributionSource)
1965     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
1966               audio_attributes_t{} /* currently unused for output track */,
1967               sampleRate, format, channelMask, frameCount,
1968               nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
1969               AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
1970               TYPE_OUTPUT),
1971     mActive(false), mSourceThread(sourceThread)
1972 {
1973 
1974     if (mCblk != NULL) {
1975         mOutBuffer.frameCount = 0;
1976         playbackThread->mTracks.add(this);
1977         ALOGV("%s(): mCblk %p, mBuffer %p, "
1978                 "frameCount %zu, mChannelMask 0x%08x",
1979                 __func__, mCblk, mBuffer,
1980                 frameCount, mChannelMask);
1981         // since client and server are in the same process,
1982         // the buffer has the same virtual address on both sides
1983         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1984                 true /*clientInServer*/);
1985         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1986         mClientProxy->setSendLevel(0.0);
1987         mClientProxy->setSampleRate(sampleRate);
1988     } else {
1989         ALOGW("%s(%d): Error creating output track on thread %d",
1990                 __func__, mId, (int)mThreadIoHandle);
1991     }
1992 }
1993 
~OutputTrack()1994 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1995 {
1996     clearBufferQueue();
1997     // superclass destructor will now delete the server proxy and shared memory both refer to
1998 }
1999 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2000 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
2001                                                           audio_session_t triggerSession)
2002 {
2003     status_t status = Track::start(event, triggerSession);
2004     if (status != NO_ERROR) {
2005         return status;
2006     }
2007 
2008     mActive = true;
2009     mRetryCount = 127;
2010     return status;
2011 }
2012 
stop()2013 void AudioFlinger::PlaybackThread::OutputTrack::stop()
2014 {
2015     Track::stop();
2016     clearBufferQueue();
2017     mOutBuffer.frameCount = 0;
2018     mActive = false;
2019 }
2020 
write(void * data,uint32_t frames)2021 ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
2022 {
2023     Buffer *pInBuffer;
2024     Buffer inBuffer;
2025     inBuffer.frameCount = frames;
2026     inBuffer.raw = data;
2027 
2028     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
2029 
2030     if (!mActive && frames != 0) {
2031         (void) start();
2032     }
2033 
2034     while (waitTimeLeftMs) {
2035         // First write pending buffers, then new data
2036         if (mBufferQueue.size()) {
2037             pInBuffer = mBufferQueue.itemAt(0);
2038         } else {
2039             pInBuffer = &inBuffer;
2040         }
2041 
2042         if (pInBuffer->frameCount == 0) {
2043             break;
2044         }
2045 
2046         if (mOutBuffer.frameCount == 0) {
2047             mOutBuffer.frameCount = pInBuffer->frameCount;
2048             nsecs_t startTime = systemTime();
2049             status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
2050             if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
2051                 ALOGV("%s(%d): thread %d no more output buffers; status %d",
2052                         __func__, mId,
2053                         (int)mThreadIoHandle, status);
2054                 break;
2055             }
2056             uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
2057             if (waitTimeLeftMs >= waitTimeMs) {
2058                 waitTimeLeftMs -= waitTimeMs;
2059             } else {
2060                 waitTimeLeftMs = 0;
2061             }
2062             if (status == NOT_ENOUGH_DATA) {
2063                 restartIfDisabled();
2064                 continue;
2065             }
2066         }
2067 
2068         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
2069                 pInBuffer->frameCount;
2070         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2071         Proxy::Buffer buf;
2072         buf.mFrameCount = outFrames;
2073         buf.mRaw = NULL;
2074         mClientProxy->releaseBuffer(&buf);
2075         restartIfDisabled();
2076         pInBuffer->frameCount -= outFrames;
2077         pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2078         mOutBuffer.frameCount -= outFrames;
2079         mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2080 
2081         if (pInBuffer->frameCount == 0) {
2082             if (mBufferQueue.size()) {
2083                 mBufferQueue.removeAt(0);
2084                 free(pInBuffer->mBuffer);
2085                 if (pInBuffer != &inBuffer) {
2086                     delete pInBuffer;
2087                 }
2088                 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2089                         __func__, mId,
2090                         (int)mThreadIoHandle, mBufferQueue.size());
2091             } else {
2092                 break;
2093             }
2094         }
2095     }
2096 
2097     // If we could not write all frames, allocate a buffer and queue it for next time.
2098     if (inBuffer.frameCount) {
2099         sp<ThreadBase> thread = mThread.promote();
2100         if (thread != 0 && !thread->standby()) {
2101             if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2102                 pInBuffer = new Buffer;
2103                 pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
2104                 pInBuffer->frameCount = inBuffer.frameCount;
2105                 pInBuffer->raw = pInBuffer->mBuffer;
2106                 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2107                 mBufferQueue.add(pInBuffer);
2108                 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2109                         (int)mThreadIoHandle, mBufferQueue.size());
2110                 // audio data is consumed (stored locally); set frameCount to 0.
2111                 inBuffer.frameCount = 0;
2112             } else {
2113                 ALOGW("%s(%d): thread %d no more overflow buffers",
2114                         __func__, mId, (int)mThreadIoHandle);
2115                 // TODO: return error for this.
2116             }
2117         }
2118     }
2119 
2120     // Calling write() with a 0 length buffer means that no more data will be written:
2121     // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2122     if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2123         stop();
2124     }
2125 
2126     return frames - inBuffer.frameCount;  // number of frames consumed.
2127 }
2128 
copyMetadataTo(MetadataInserter & backInserter) const2129 void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2130 {
2131     std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2132     backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2133 }
2134 
setMetadatas(const SourceMetadatas & metadatas)2135 void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2136     {
2137         std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2138         mTrackMetadatas = metadatas;
2139     }
2140     // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2141     setMetadataHasChanged();
2142 }
2143 
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2144 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
2145         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2146 {
2147     ClientProxy::Buffer buf;
2148     buf.mFrameCount = buffer->frameCount;
2149     struct timespec timeout;
2150     timeout.tv_sec = waitTimeMs / 1000;
2151     timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2152     status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2153     buffer->frameCount = buf.mFrameCount;
2154     buffer->raw = buf.mRaw;
2155     return status;
2156 }
2157 
clearBufferQueue()2158 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
2159 {
2160     size_t size = mBufferQueue.size();
2161 
2162     for (size_t i = 0; i < size; i++) {
2163         Buffer *pBuffer = mBufferQueue.itemAt(i);
2164         free(pBuffer->mBuffer);
2165         delete pBuffer;
2166     }
2167     mBufferQueue.clear();
2168 }
2169 
restartIfDisabled()2170 void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
2171 {
2172     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2173     if (mActive && (flags & CBLK_DISABLED)) {
2174         start();
2175     }
2176 }
2177 
2178 // ----------------------------------------------------------------------------
2179 #undef LOG_TAG
2180 #define LOG_TAG "AF::PatchTrack"
2181 
PatchTrack(PlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady)2182 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
2183                                                      audio_stream_type_t streamType,
2184                                                      uint32_t sampleRate,
2185                                                      audio_channel_mask_t channelMask,
2186                                                      audio_format_t format,
2187                                                      size_t frameCount,
2188                                                      void *buffer,
2189                                                      size_t bufferSize,
2190                                                      audio_output_flags_t flags,
2191                                                      const Timeout& timeout,
2192                                                      size_t frameCountToBeReady)
2193     :   Track(playbackThread, NULL, streamType,
2194               audio_attributes_t{} /* currently unused for patch track */,
2195               sampleRate, format, channelMask, frameCount,
2196               buffer, bufferSize, nullptr /* sharedBuffer */,
2197               AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2198               TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
2199         PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
2200                        *playbackThread, timeout)
2201 {
2202     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2203                                       __func__, mId, sampleRate,
2204                                       (int)mPeerTimeout.tv_sec,
2205                                       (int)(mPeerTimeout.tv_nsec / 1000000));
2206 }
2207 
~PatchTrack()2208 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
2209 {
2210     ALOGV("%s(%d)", __func__, mId);
2211 }
2212 
framesReady() const2213 size_t AudioFlinger::PlaybackThread::PatchTrack::framesReady() const
2214 {
2215     if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2216         return std::numeric_limits<size_t>::max();
2217     } else {
2218         return Track::framesReady();
2219     }
2220 }
2221 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2222 status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
2223                                                          audio_session_t triggerSession)
2224 {
2225     status_t status = Track::start(event, triggerSession);
2226     if (status != NO_ERROR) {
2227         return status;
2228     }
2229     android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2230     return status;
2231 }
2232 
2233 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2234 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
2235         AudioBufferProvider::Buffer* buffer)
2236 {
2237     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2238     Proxy::Buffer buf;
2239     buf.mFrameCount = buffer->frameCount;
2240     if (ATRACE_ENABLED()) {
2241         std::string traceName("PTnReq");
2242         traceName += std::to_string(id());
2243         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2244     }
2245     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2246     ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2247     buffer->frameCount = buf.mFrameCount;
2248     if (ATRACE_ENABLED()) {
2249         std::string traceName("PTnObt");
2250         traceName += std::to_string(id());
2251         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2252     }
2253     if (buf.mFrameCount == 0) {
2254         return WOULD_BLOCK;
2255     }
2256     status = Track::getNextBuffer(buffer);
2257     return status;
2258 }
2259 
releaseBuffer(AudioBufferProvider::Buffer * buffer)2260 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2261 {
2262     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2263     Proxy::Buffer buf;
2264     buf.mFrameCount = buffer->frameCount;
2265     buf.mRaw = buffer->raw;
2266     mPeerProxy->releaseBuffer(&buf);
2267     TrackBase::releaseBuffer(buffer);
2268 }
2269 
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2270 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2271                                                                 const struct timespec *timeOut)
2272 {
2273     status_t status = NO_ERROR;
2274     static const int32_t kMaxTries = 5;
2275     int32_t tryCounter = kMaxTries;
2276     const size_t originalFrameCount = buffer->mFrameCount;
2277     do {
2278         if (status == NOT_ENOUGH_DATA) {
2279             restartIfDisabled();
2280             buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2281         }
2282         status = mProxy->obtainBuffer(buffer, timeOut);
2283     } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2284     return status;
2285 }
2286 
releaseBuffer(Proxy::Buffer * buffer)2287 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2288 {
2289     mProxy->releaseBuffer(buffer);
2290     restartIfDisabled();
2291 
2292     // Check if the PatchTrack has enough data to write once in releaseBuffer().
2293     // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2294     // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2295     // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2296     if (mFillingUpStatus == FS_ACTIVE
2297             && audio_is_linear_pcm(mFormat)
2298             && !isOffloadedOrDirect()) {
2299         if (sp<ThreadBase> thread = mThread.promote();
2300             thread != 0) {
2301             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
2302             const size_t frameCount = playbackThread->frameCount() * sampleRate()
2303                     / playbackThread->sampleRate();
2304             if (framesReady() < frameCount) {
2305                 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2306                 mFillingUpStatus = FS_FILLING;
2307             }
2308         }
2309     }
2310 }
2311 
restartIfDisabled()2312 void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
2313 {
2314     if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2315         ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2316         start();
2317     }
2318 }
2319 
2320 // ----------------------------------------------------------------------------
2321 //      Record
2322 // ----------------------------------------------------------------------------
2323 
2324 
2325 #undef LOG_TAG
2326 #define LOG_TAG "AF::RecordHandle"
2327 
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)2328 AudioFlinger::RecordHandle::RecordHandle(
2329         const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
2330     : BnAudioRecord(),
2331     mRecordTrack(recordTrack)
2332 {
2333     setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
2334 }
2335 
~RecordHandle()2336 AudioFlinger::RecordHandle::~RecordHandle() {
2337     stop_nonvirtual();
2338     mRecordTrack->destroy();
2339 }
2340 
start(int event,int triggerSession)2341 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2342         int /*audio_session_t*/ triggerSession) {
2343     ALOGV("%s()", __func__);
2344     return binderStatusFromStatusT(
2345         mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2346 }
2347 
stop()2348 binder::Status AudioFlinger::RecordHandle::stop() {
2349     stop_nonvirtual();
2350     return binder::Status::ok();
2351 }
2352 
stop_nonvirtual()2353 void AudioFlinger::RecordHandle::stop_nonvirtual() {
2354     ALOGV("%s()", __func__);
2355     mRecordTrack->stop();
2356 }
2357 
getActiveMicrophones(std::vector<media::MicrophoneInfoData> * activeMicrophones)2358 binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
2359         std::vector<media::MicrophoneInfoData>* activeMicrophones) {
2360     ALOGV("%s()", __func__);
2361     std::vector<media::MicrophoneInfo> mics;
2362     status_t status = mRecordTrack->getActiveMicrophones(&mics);
2363     activeMicrophones->resize(mics.size());
2364     for (size_t i = 0; status == OK && i < mics.size(); ++i) {
2365        status = mics[i].writeToParcelable(&activeMicrophones->at(i));
2366     }
2367     return binderStatusFromStatusT(status);
2368 }
2369 
setPreferredMicrophoneDirection(int direction)2370 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
2371         int /*audio_microphone_direction_t*/ direction) {
2372     ALOGV("%s()", __func__);
2373     return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2374             static_cast<audio_microphone_direction_t>(direction)));
2375 }
2376 
setPreferredMicrophoneFieldDimension(float zoom)2377 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2378     ALOGV("%s()", __func__);
2379     return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2380 }
2381 
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2382 binder::Status AudioFlinger::RecordHandle::shareAudioHistory(
2383         const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2384     return binderStatusFromStatusT(
2385             mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2386 }
2387 
2388 // ----------------------------------------------------------------------------
2389 #undef LOG_TAG
2390 #define LOG_TAG "AF::RecordTrack"
2391 
2392 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2393 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
2394             RecordThread *thread,
2395             const sp<Client>& client,
2396             const audio_attributes_t& attr,
2397             uint32_t sampleRate,
2398             audio_format_t format,
2399             audio_channel_mask_t channelMask,
2400             size_t frameCount,
2401             void *buffer,
2402             size_t bufferSize,
2403             audio_session_t sessionId,
2404             pid_t creatorPid,
2405             const AttributionSourceState& attributionSource,
2406             audio_input_flags_t flags,
2407             track_type type,
2408             audio_port_handle_t portId,
2409             int32_t startFrames)
2410     :   TrackBase(thread, client, attr, sampleRate, format,
2411                   channelMask, frameCount, buffer, bufferSize, sessionId,
2412                   creatorPid,
2413                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2414                   false /*isOut*/,
2415                   (type == TYPE_DEFAULT) ?
2416                           ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2417                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2418                   type, portId,
2419                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2420         mOverflow(false),
2421         mFramesToDrop(0),
2422         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2423         mRecordBufferConverter(NULL),
2424         mFlags(flags),
2425         mSilenced(false),
2426         mStartFrames(startFrames)
2427 {
2428     if (mCblk == NULL) {
2429         return;
2430     }
2431 
2432     if (!isDirect()) {
2433         mRecordBufferConverter = new RecordBufferConverter(
2434                 thread->mChannelMask, thread->mFormat, thread->mSampleRate,
2435                 channelMask, format, sampleRate);
2436         // Check if the RecordBufferConverter construction was successful.
2437         // If not, don't continue with construction.
2438         //
2439         // NOTE: It would be extremely rare that the record track cannot be created
2440         // for the current device, but a pending or future device change would make
2441         // the record track configuration valid.
2442         if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2443             ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2444             return;
2445         }
2446     }
2447 
2448     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2449             mFrameSize, !isExternalTrack());
2450 
2451     mResamplerBufferProvider = new ResamplerBufferProvider(this);
2452 
2453     if (flags & AUDIO_INPUT_FLAG_FAST) {
2454         ALOG_ASSERT(thread->mFastTrackAvail);
2455         thread->mFastTrackAvail = false;
2456     } else {
2457         // TODO: only Normal Record has timestamps (Fast Record does not).
2458         mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
2459     }
2460 #ifdef TEE_SINK
2461     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
2462             + "_" + std::to_string(mId)
2463             + "_R");
2464 #endif
2465 
2466     // Once this item is logged by the server, the client can add properties.
2467     mTrackMetrics.logConstructor(creatorPid, uid(), id());
2468 }
2469 
~RecordTrack()2470 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
2471 {
2472     ALOGV("%s()", __func__);
2473     delete mRecordBufferConverter;
2474     delete mResamplerBufferProvider;
2475 }
2476 
initCheck() const2477 status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const
2478 {
2479     status_t status = TrackBase::initCheck();
2480     if (status == NO_ERROR && mServerProxy == 0) {
2481         status = BAD_VALUE;
2482     }
2483     return status;
2484 }
2485 
2486 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2487 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
2488 {
2489     ServerProxy::Buffer buf;
2490     buf.mFrameCount = buffer->frameCount;
2491     status_t status = mServerProxy->obtainBuffer(&buf);
2492     buffer->frameCount = buf.mFrameCount;
2493     buffer->raw = buf.mRaw;
2494     if (buf.mFrameCount == 0) {
2495         // FIXME also wake futex so that overrun is noticed more quickly
2496         (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2497     }
2498     return status;
2499 }
2500 
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2501 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
2502                                                         audio_session_t triggerSession)
2503 {
2504     sp<ThreadBase> thread = mThread.promote();
2505     if (thread != 0) {
2506         RecordThread *recordThread = (RecordThread *)thread.get();
2507         return recordThread->start(this, event, triggerSession);
2508     } else {
2509         ALOGW("%s track %d: thread was destroyed", __func__, portId());
2510         return DEAD_OBJECT;
2511     }
2512 }
2513 
stop()2514 void AudioFlinger::RecordThread::RecordTrack::stop()
2515 {
2516     sp<ThreadBase> thread = mThread.promote();
2517     if (thread != 0) {
2518         RecordThread *recordThread = (RecordThread *)thread.get();
2519         if (recordThread->stop(this) && isExternalTrack()) {
2520             AudioSystem::stopInput(mPortId);
2521         }
2522     }
2523 }
2524 
destroy()2525 void AudioFlinger::RecordThread::RecordTrack::destroy()
2526 {
2527     // see comments at AudioFlinger::PlaybackThread::Track::destroy()
2528     sp<RecordTrack> keep(this);
2529     {
2530         track_state priorState = mState;
2531         sp<ThreadBase> thread = mThread.promote();
2532         if (thread != 0) {
2533             Mutex::Autolock _l(thread->mLock);
2534             RecordThread *recordThread = (RecordThread *) thread.get();
2535             priorState = mState;
2536             if (!mSharedAudioPackageName.empty()) {
2537                 recordThread->resetAudioHistory_l();
2538             }
2539             recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
2540         }
2541         // APM portid/client management done outside of lock.
2542         // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
2543         if (isExternalTrack()) {
2544             switch (priorState) {
2545             case ACTIVE:     // invalidated while still active
2546             case STARTING_2: // invalidated/start-aborted after startInput successfully called
2547             case PAUSING:    // invalidated while in the middle of stop() pausing (still active)
2548                 AudioSystem::stopInput(mPortId);
2549                 break;
2550 
2551             case STARTING_1: // invalidated/start-aborted and startInput not successful
2552             case PAUSED:     // OK, not active
2553             case IDLE:       // OK, not active
2554                 break;
2555 
2556             case STOPPED:    // unexpected (destroyed)
2557             default:
2558                 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
2559             }
2560             AudioSystem::releaseInput(mPortId);
2561         }
2562     }
2563 }
2564 
invalidate()2565 void AudioFlinger::RecordThread::RecordTrack::invalidate()
2566 {
2567     TrackBase::invalidate();
2568     // FIXME should use proxy, and needs work
2569     audio_track_cblk_t* cblk = mCblk;
2570     android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2571     android_atomic_release_store(0x40000000, &cblk->mFutex);
2572     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2573     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2574 }
2575 
2576 
appendDumpHeader(String8 & result)2577 void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
2578 {
2579     result.appendFormat("Active     Id Client Session Port Id  S  Flags  "
2580                         " Format Chn mask  SRate Source  "
2581                         " Server FrmCnt FrmRdy Sil%s\n",
2582                         isServerLatencySupported() ? "   Latency" : "");
2583 }
2584 
appendDump(String8 & result,bool active)2585 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
2586 {
2587     result.appendFormat("%c%5s %6d %6u %7u %7u  %2s 0x%03X "
2588             "%08X %08X %6u %6X "
2589             "%08X %6zu %6zu %3c",
2590             isFastTrack() ? 'F' : ' ',
2591             active ? "yes" : "no",
2592             mId,
2593             (mClient == 0) ? getpid() : mClient->pid(),
2594             mSessionId,
2595             mPortId,
2596             getTrackStateAsCodedString(),
2597             mCblk->mFlags,
2598 
2599             mFormat,
2600             mChannelMask,
2601             mSampleRate,
2602             mAttr.source,
2603 
2604             mCblk->mServer,
2605             mFrameCount,
2606             mServerProxy->framesReadySafe(),
2607             isSilenced() ? 's' : 'n'
2608             );
2609     if (isServerLatencySupported()) {
2610         double latencyMs;
2611         bool fromTrack;
2612         if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
2613             // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
2614             // or 'k' if estimated from kernel (usually for debugging).
2615             result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
2616         } else {
2617             result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
2618         }
2619     }
2620     result.append("\n");
2621 }
2622 
handleSyncStartEvent(const sp<SyncEvent> & event)2623 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
2624 {
2625     if (event == mSyncStartEvent) {
2626         ssize_t framesToDrop = 0;
2627         sp<ThreadBase> threadBase = mThread.promote();
2628         if (threadBase != 0) {
2629             // TODO: use actual buffer filling status instead of 2 buffers when info is available
2630             // from audio HAL
2631             framesToDrop = threadBase->mFrameCount * 2;
2632         }
2633         mFramesToDrop = framesToDrop;
2634     }
2635 }
2636 
clearSyncStartEvent()2637 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
2638 {
2639     if (mSyncStartEvent != 0) {
2640         mSyncStartEvent->cancel();
2641         mSyncStartEvent.clear();
2642     }
2643     mFramesToDrop = 0;
2644 }
2645 
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)2646 void AudioFlinger::RecordThread::RecordTrack::updateTrackFrameInfo(
2647         int64_t trackFramesReleased, int64_t sourceFramesRead,
2648         uint32_t halSampleRate, const ExtendedTimestamp &timestamp)
2649 {
2650    // Make the kernel frametime available.
2651     const FrameTime ft{
2652             timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2653             timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2654     // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2655     mKernelFrameTime.store(ft);
2656     if (!audio_is_linear_pcm(mFormat)) {
2657         // Stream is direct, return provided timestamp with no conversion
2658         mServerProxy->setTimestamp(timestamp);
2659         return;
2660     }
2661 
2662     ExtendedTimestamp local = timestamp;
2663 
2664     // Convert HAL frames to server-side track frames at track sample rate.
2665     // We use trackFramesReleased and sourceFramesRead as an anchor point.
2666     for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2667         if (local.mTimeNs[i] != 0) {
2668             const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
2669             const int64_t relativeTrackFrames = relativeServerFrames
2670                     * mSampleRate / halSampleRate; // TODO: potential computation overflow
2671             local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
2672         }
2673     }
2674     mServerProxy->setTimestamp(local);
2675 
2676     // Compute latency info.
2677     const bool useTrackTimestamp = true; // use track unless debugging.
2678     const double latencyMs = - (useTrackTimestamp
2679             ? local.getOutputServerLatencyMs(sampleRate())
2680             : timestamp.getOutputServerLatencyMs(halSampleRate));
2681 
2682     mServerLatencyFromTrack.store(useTrackTimestamp);
2683     mServerLatencyMs.store(latencyMs);
2684 }
2685 
getActiveMicrophones(std::vector<media::MicrophoneInfo> * activeMicrophones)2686 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
2687         std::vector<media::MicrophoneInfo>* activeMicrophones)
2688 {
2689     sp<ThreadBase> thread = mThread.promote();
2690     if (thread != 0) {
2691         RecordThread *recordThread = (RecordThread *)thread.get();
2692         return recordThread->getActiveMicrophones(activeMicrophones);
2693     } else {
2694         return BAD_VALUE;
2695     }
2696 }
2697 
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)2698 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
2699         audio_microphone_direction_t direction) {
2700     sp<ThreadBase> thread = mThread.promote();
2701     if (thread != 0) {
2702         RecordThread *recordThread = (RecordThread *)thread.get();
2703         return recordThread->setPreferredMicrophoneDirection(direction);
2704     } else {
2705         return BAD_VALUE;
2706     }
2707 }
2708 
setPreferredMicrophoneFieldDimension(float zoom)2709 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
2710     sp<ThreadBase> thread = mThread.promote();
2711     if (thread != 0) {
2712         RecordThread *recordThread = (RecordThread *)thread.get();
2713         return recordThread->setPreferredMicrophoneFieldDimension(zoom);
2714     } else {
2715         return BAD_VALUE;
2716     }
2717 }
2718 
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2719 status_t AudioFlinger::RecordThread::RecordTrack::shareAudioHistory(
2720         const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2721 
2722     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
2723     const pid_t callingPid = IPCThreadState::self()->getCallingPid();
2724     if (callingUid != mUid || callingPid != mCreatorPid) {
2725         return PERMISSION_DENIED;
2726     }
2727 
2728     AttributionSourceState attributionSource{};
2729     attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
2730     attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
2731     attributionSource.token = sp<BBinder>::make();
2732     if (!captureHotwordAllowed(attributionSource)) {
2733         return PERMISSION_DENIED;
2734     }
2735 
2736     sp<ThreadBase> thread = mThread.promote();
2737     if (thread != 0) {
2738         RecordThread *recordThread = (RecordThread *)thread.get();
2739         status_t status = recordThread->shareAudioHistory(
2740                 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
2741         if (status == NO_ERROR) {
2742             mSharedAudioPackageName = sharedAudioPackageName;
2743         }
2744         return status;
2745     } else {
2746         return BAD_VALUE;
2747     }
2748 }
2749 
copyMetadataTo(MetadataInserter & backInserter) const2750 void AudioFlinger::RecordThread::RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
2751 {
2752 
2753     // Do not forward PatchRecord metadata with unspecified audio source
2754     if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
2755         return;
2756     }
2757 
2758     // No track is invalid as this is called after prepareTrack_l in the same critical section
2759     record_track_metadata_v7_t metadata;
2760     metadata.base = {
2761             .source = mAttr.source,
2762             .gain = 1, // capture tracks do not have volumes
2763     };
2764     metadata.channel_mask = mChannelMask;
2765     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
2766 
2767     *backInserter++ = metadata;
2768 }
2769 
2770 // ----------------------------------------------------------------------------
2771 #undef LOG_TAG
2772 #define LOG_TAG "AF::PatchRecord"
2773 
PatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)2774 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
2775                                                      uint32_t sampleRate,
2776                                                      audio_channel_mask_t channelMask,
2777                                                      audio_format_t format,
2778                                                      size_t frameCount,
2779                                                      void *buffer,
2780                                                      size_t bufferSize,
2781                                                      audio_input_flags_t flags,
2782                                                      const Timeout& timeout,
2783                                                      audio_source_t source)
2784     :   RecordTrack(recordThread, NULL,
2785                 audio_attributes_t{ .source = source } ,
2786                 sampleRate, format, channelMask, frameCount,
2787                 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
2788                 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
2789         PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
2790                        *recordThread, timeout)
2791 {
2792     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2793                                       __func__, mId, sampleRate,
2794                                       (int)mPeerTimeout.tv_sec,
2795                                       (int)(mPeerTimeout.tv_nsec / 1000000));
2796 }
2797 
~PatchRecord()2798 AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
2799 {
2800     ALOGV("%s(%d)", __func__, mId);
2801 }
2802 
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2803 static size_t writeFramesHelper(
2804         AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2805 {
2806     AudioBufferProvider::Buffer patchBuffer;
2807     patchBuffer.frameCount = frameCount;
2808     auto status = dest->getNextBuffer(&patchBuffer);
2809     if (status != NO_ERROR) {
2810        ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
2811              __func__, status, strerror(-status));
2812        return 0;
2813     }
2814     ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
2815     memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
2816     size_t framesWritten = patchBuffer.frameCount;
2817     dest->releaseBuffer(&patchBuffer);
2818     return framesWritten;
2819 }
2820 
2821 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2822 size_t AudioFlinger::RecordThread::PatchRecord::writeFrames(
2823         AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2824 {
2825     size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
2826     // On buffer wrap, the buffer frame count will be less than requested,
2827     // when this happens a second buffer needs to be used to write the leftover audio
2828     const size_t framesLeft = frameCount - framesWritten;
2829     if (framesWritten != 0 && framesLeft != 0) {
2830         framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
2831                         framesLeft, frameSize);
2832     }
2833     return framesWritten;
2834 }
2835 
2836 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2837 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
2838                                                   AudioBufferProvider::Buffer* buffer)
2839 {
2840     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2841     Proxy::Buffer buf;
2842     buf.mFrameCount = buffer->frameCount;
2843     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2844     ALOGV_IF(status != NO_ERROR,
2845              "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
2846     buffer->frameCount = buf.mFrameCount;
2847     if (ATRACE_ENABLED()) {
2848         std::string traceName("PRnObt");
2849         traceName += std::to_string(id());
2850         ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2851     }
2852     if (buf.mFrameCount == 0) {
2853         return WOULD_BLOCK;
2854     }
2855     status = RecordTrack::getNextBuffer(buffer);
2856     return status;
2857 }
2858 
releaseBuffer(AudioBufferProvider::Buffer * buffer)2859 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2860 {
2861     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2862     Proxy::Buffer buf;
2863     buf.mFrameCount = buffer->frameCount;
2864     buf.mRaw = buffer->raw;
2865     mPeerProxy->releaseBuffer(&buf);
2866     TrackBase::releaseBuffer(buffer);
2867 }
2868 
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2869 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
2870                                                                const struct timespec *timeOut)
2871 {
2872     return mProxy->obtainBuffer(buffer, timeOut);
2873 }
2874 
releaseBuffer(Proxy::Buffer * buffer)2875 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2876 {
2877     mProxy->releaseBuffer(buffer);
2878 }
2879 
2880 #undef LOG_TAG
2881 #define LOG_TAG "AF::PthrPatchRecord"
2882 
allocAligned(size_t alignment,size_t size)2883 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
2884 {
2885     void *ptr = nullptr;
2886     (void)posix_memalign(&ptr, alignment, size);
2887     return std::unique_ptr<void, decltype(free)*>(ptr, free);
2888 }
2889 
PassthruPatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)2890 AudioFlinger::RecordThread::PassthruPatchRecord::PassthruPatchRecord(
2891         RecordThread *recordThread,
2892         uint32_t sampleRate,
2893         audio_channel_mask_t channelMask,
2894         audio_format_t format,
2895         size_t frameCount,
2896         audio_input_flags_t flags,
2897         audio_source_t source)
2898         : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
2899                 nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
2900           mPatchRecordAudioBufferProvider(*this),
2901           mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
2902           mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
2903 {
2904     memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
2905 }
2906 
obtainStream(sp<ThreadBase> * thread)2907 sp<StreamInHalInterface> AudioFlinger::RecordThread::PassthruPatchRecord::obtainStream(
2908         sp<ThreadBase>* thread)
2909 {
2910     *thread = mThread.promote();
2911     if (!*thread) return nullptr;
2912     RecordThread *recordThread = static_cast<RecordThread*>((*thread).get());
2913     Mutex::Autolock _l(recordThread->mLock);
2914     return recordThread->mInput ? recordThread->mInput->stream : nullptr;
2915 }
2916 
2917 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2918 status_t AudioFlinger::RecordThread::PassthruPatchRecord::obtainBuffer(
2919         Proxy::Buffer* buffer, const struct timespec* timeOut)
2920 {
2921     if (mUnconsumedFrames) {
2922         buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
2923         // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
2924         return PatchRecord::obtainBuffer(buffer, timeOut);
2925     }
2926 
2927     // Otherwise, execute a read from HAL and write into the buffer.
2928     nsecs_t startTimeNs = 0;
2929     if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
2930         // Will need to correct timeOut by elapsed time.
2931         startTimeNs = systemTime();
2932     }
2933     const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
2934     buffer->mFrameCount = 0;
2935     buffer->mRaw = nullptr;
2936     sp<ThreadBase> thread;
2937     sp<StreamInHalInterface> stream = obtainStream(&thread);
2938     if (!stream) return NO_INIT;  // If there is no stream, RecordThread is not reading.
2939 
2940     status_t result = NO_ERROR;
2941     size_t bytesRead = 0;
2942     {
2943         ATRACE_NAME("read");
2944         result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
2945         if (result != NO_ERROR) goto stream_error;
2946         if (bytesRead == 0) return NO_ERROR;
2947     }
2948 
2949     {
2950         std::lock_guard<std::mutex> lock(mReadLock);
2951         mReadBytes += bytesRead;
2952         mReadError = NO_ERROR;
2953     }
2954     mReadCV.notify_one();
2955     // writeFrames handles wraparound and should write all the provided frames.
2956     // If it couldn't, there is something wrong with the client/server buffer of the software patch.
2957     buffer->mFrameCount = writeFrames(
2958             &mPatchRecordAudioBufferProvider,
2959             mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
2960     ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
2961             "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
2962     mUnconsumedFrames = buffer->mFrameCount;
2963     struct timespec newTimeOut;
2964     if (startTimeNs) {
2965         // Correct the timeout by elapsed time.
2966         nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
2967         if (newTimeOutNs < 0) newTimeOutNs = 0;
2968         newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
2969         newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
2970         timeOut = &newTimeOut;
2971     }
2972     return PatchRecord::obtainBuffer(buffer, timeOut);
2973 
2974 stream_error:
2975     stream->standby();
2976     {
2977         std::lock_guard<std::mutex> lock(mReadLock);
2978         mReadError = result;
2979     }
2980     mReadCV.notify_one();
2981     return result;
2982 }
2983 
releaseBuffer(Proxy::Buffer * buffer)2984 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2985 {
2986     if (buffer->mFrameCount <= mUnconsumedFrames) {
2987         mUnconsumedFrames -= buffer->mFrameCount;
2988     } else {
2989         ALOGW("Write side has consumed more frames than we had: %zu > %zu",
2990                 buffer->mFrameCount, mUnconsumedFrames);
2991         mUnconsumedFrames = 0;
2992     }
2993     PatchRecord::releaseBuffer(buffer);
2994 }
2995 
2996 // AudioBufferProvider and Source methods are called on RecordThread
2997 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
2998 // and 'releaseBuffer' are stubbed out and ignore their input.
2999 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
3000 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)3001 status_t AudioFlinger::RecordThread::PassthruPatchRecord::read(
3002         void* buffer, size_t bytes, size_t* read)
3003 {
3004     bytes = std::min(bytes, mFrameCount * mFrameSize);
3005     {
3006         std::unique_lock<std::mutex> lock(mReadLock);
3007         mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
3008         if (mReadError != NO_ERROR) {
3009             mLastReadFrames = 0;
3010             return mReadError;
3011         }
3012         *read = std::min(bytes, mReadBytes);
3013         mReadBytes -= *read;
3014     }
3015     mLastReadFrames = *read / mFrameSize;
3016     memset(buffer, 0, *read);
3017     return 0;
3018 }
3019 
getCapturePosition(int64_t * frames,int64_t * time)3020 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getCapturePosition(
3021         int64_t* frames, int64_t* time)
3022 {
3023     sp<ThreadBase> thread;
3024     sp<StreamInHalInterface> stream = obtainStream(&thread);
3025     return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
3026 }
3027 
standby()3028 status_t AudioFlinger::RecordThread::PassthruPatchRecord::standby()
3029 {
3030     // RecordThread issues 'standby' command in two major cases:
3031     // 1. Error on read--this case is handled in 'obtainBuffer'.
3032     // 2. Track is stopping--as PassthruPatchRecord assumes continuous
3033     //    output, this can only happen when the software patch
3034     //    is being torn down. In this case, the RecordThread
3035     //    will terminate and close the HAL stream.
3036     return 0;
3037 }
3038 
3039 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)3040 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getNextBuffer(
3041         AudioBufferProvider::Buffer* buffer)
3042 {
3043     buffer->frameCount = mLastReadFrames;
3044     buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
3045     return NO_ERROR;
3046 }
3047 
releaseBuffer(AudioBufferProvider::Buffer * buffer)3048 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(
3049         AudioBufferProvider::Buffer* buffer)
3050 {
3051     buffer->frameCount = 0;
3052     buffer->raw = nullptr;
3053 }
3054 
3055 // ----------------------------------------------------------------------------
3056 #undef LOG_TAG
3057 #define LOG_TAG "AF::MmapTrack"
3058 
MmapTrack(ThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)3059 AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
3060         const audio_attributes_t& attr,
3061         uint32_t sampleRate,
3062         audio_format_t format,
3063         audio_channel_mask_t channelMask,
3064         audio_session_t sessionId,
3065         bool isOut,
3066         const AttributionSourceState& attributionSource,
3067         pid_t creatorPid,
3068         audio_port_handle_t portId)
3069     :   TrackBase(thread, NULL, attr, sampleRate, format,
3070                   channelMask, (size_t)0 /* frameCount */,
3071                   nullptr /* buffer */, (size_t)0 /* bufferSize */,
3072                   sessionId, creatorPid,
3073                   VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
3074                   isOut,
3075                   ALLOC_NONE,
3076                   TYPE_DEFAULT, portId,
3077                   std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
3078         mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
3079             mSilenced(false), mSilencedNotified(false)
3080 {
3081     // Once this item is logged by the server, the client can add properties.
3082     mTrackMetrics.logConstructor(creatorPid, uid(), id());
3083 }
3084 
~MmapTrack()3085 AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
3086 {
3087 }
3088 
initCheck() const3089 status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
3090 {
3091     return NO_ERROR;
3092 }
3093 
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)3094 status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
3095                                                     audio_session_t triggerSession __unused)
3096 {
3097     return NO_ERROR;
3098 }
3099 
stop()3100 void AudioFlinger::MmapThread::MmapTrack::stop()
3101 {
3102 }
3103 
3104 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3105 status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3106 {
3107     buffer->frameCount = 0;
3108     buffer->raw = nullptr;
3109     return INVALID_OPERATION;
3110 }
3111 
3112 // ExtendedAudioBufferProvider interface
framesReady() const3113 size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
3114     return 0;
3115 }
3116 
framesReleased() const3117 int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
3118 {
3119     return 0;
3120 }
3121 
onTimestamp(const ExtendedTimestamp & timestamp __unused)3122 void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp &timestamp __unused)
3123 {
3124 }
3125 
appendDumpHeader(String8 & result)3126 void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
3127 {
3128     result.appendFormat("Client Session Port Id  Format Chn mask  SRate Flags %s\n",
3129                         isOut() ? "Usg CT": "Source");
3130 }
3131 
appendDump(String8 & result,bool active __unused)3132 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
3133 {
3134     result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
3135             mPid,
3136             mSessionId,
3137             mPortId,
3138             mFormat,
3139             mChannelMask,
3140             mSampleRate,
3141             mAttr.flags);
3142     if (isOut()) {
3143         result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
3144     } else {
3145         result.appendFormat("%6x", mAttr.source);
3146     }
3147     result.append("\n");
3148 }
3149 
3150 } // namespace android
3151