1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 #define LOG_TAG "AudioFlinger"
19 //#define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include "MmapTracks.h"
23 #include "PlaybackTracks.h"
24 #include "RecordTracks.h"
25
26 #include "Client.h"
27 #include "IAfEffect.h"
28 #include "IAfThread.h"
29 #include "ResamplerBufferProvider.h"
30
31 #include <audio_utils/StringUtils.h>
32 #include <audio_utils/minifloat.h>
33 #include <com_android_media_audio.h>
34 #include <com_android_media_audioserver.h>
35 #include <media/AppOpsSession.h>
36 #include <media/AudioPermissionPolicy.h>
37 #include <media/AudioValidator.h>
38 #include <media/IPermissionProvider.h>
39 #include <media/RecordBufferConverter.h>
40 #include <media/nbaio/Pipe.h>
41 #include <media/nbaio/PipeReader.h>
42 #include <mediautils/Runnable.h>
43 #include <mediautils/ServiceUtilities.h>
44 #include <mediautils/SharedMemoryAllocator.h>
45 #include <private/media/AudioTrackShared.h>
46 #include <utils/Log.h>
47 #include <utils/Trace.h>
48
49 #include <linux/futex.h>
50 #include <math.h>
51 #include <sys/syscall.h>
52
53 // ----------------------------------------------------------------------------
54
55 // Note: the following macro is used for extremely verbose logging message. In
56 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
57 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
58 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
59 // turned on. Do not uncomment the #def below unless you really know what you
60 // are doing and want to see all of the extremely verbose messages.
61 //#define VERY_VERY_VERBOSE_LOGGING
62 #ifdef VERY_VERY_VERBOSE_LOGGING
63 #define ALOGVV ALOGV
64 #else
65 #define ALOGVV(a...) do { } while(0)
66 #endif
67
68 // TODO: Remove when this is put into AidlConversionUtil.h
69 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
70 ({ \
71 auto _tmp = (x); \
72 if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
73 std::move(_tmp.value()); \
74 })
75
76 namespace audioserver_flags = com::android::media::audioserver;
77
78 namespace android {
79
80 using ::android::aidl_utils::binderStatusFromStatusT;
81 using ::com::android::media::audio::hardening_impl;
82 using ::com::android::media::audio::hardening_partial;
83 using ::com::android::media::audio::hardening_strict;
84 using binder::Status;
85 using com::android::media::audio::audioserver_permissions;
86 using com::android::media::permission::PermissionEnum::CAPTURE_AUDIO_HOTWORD;
87 using content::AttributionSourceState;
88 using media::VolumeShaper;
89
90 // ----------------------------------------------------------------------------
91 // TrackBase
92 // ----------------------------------------------------------------------------
93 #undef LOG_TAG
94 #define LOG_TAG "AF::TrackBase"
95
96 static volatile int32_t nextTrackId = 55;
97
98 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(IAfThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,const alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)99 TrackBase::TrackBase(
100 IAfThreadBase *thread,
101 const sp<Client>& client,
102 const audio_attributes_t& attr,
103 uint32_t sampleRate,
104 audio_format_t format,
105 audio_channel_mask_t channelMask,
106 size_t frameCount,
107 void *buffer,
108 size_t bufferSize,
109 audio_session_t sessionId,
110 pid_t creatorPid,
111 uid_t clientUid,
112 bool isOut,
113 const alloc_type alloc,
114 track_type type,
115 audio_port_handle_t portId,
116 std::string metricsId)
117 :
118 mThread(thread),
119 mAllocType(alloc),
120 mClient(client),
121 mCblk(NULL),
122 // mBuffer, mBufferSize
123 mState(IDLE),
124 mAttr(attr),
125 mSampleRate(sampleRate),
126 mFormat(format),
127 mChannelMask(channelMask),
128 mChannelCount(isOut ?
129 audio_channel_count_from_out_mask(channelMask) :
130 audio_channel_count_from_in_mask(channelMask)),
131 mFrameSize(audio_bytes_per_frame(mChannelCount, format)),
132 mFrameCount(frameCount),
133 mSessionId(sessionId),
134 mIsOut(isOut),
135 mId(android_atomic_inc(&nextTrackId)),
136 mTerminated(false),
137 mType(type),
138 mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
139 mPortId(portId),
140 mIsInvalid(false),
141 mTrackMetrics(std::move(metricsId), isOut, clientUid),
142 mCreatorPid(creatorPid),
143 mTraceSuffix{std::to_string(mPortId).append(".").append(std::to_string(mId))
144 .append(".").append(std::to_string(mThreadIoHandle))},
145 mTraceActionId{std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_ACTION).append(mTraceSuffix)},
146 mTraceIntervalId{std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_INTERVAL)
147 .append(mTraceSuffix)}
148 {
149 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
150 if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
151 ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
152 "%s(%d): uid %d tried to pass itself off as %d",
153 __func__, mId, callingUid, clientUid);
154 clientUid = callingUid;
155 }
156 // clientUid contains the uid of the app that is responsible for this track, so we can blame
157 // battery usage on it.
158 mUid = clientUid;
159
160 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
161
162 size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
163 // check overflow when computing bufferSize due to multiplication by mFrameSize.
164 if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
165 || mFrameSize == 0 // format needs to be correct
166 || minBufferSize > SIZE_MAX / mFrameSize) {
167 android_errorWriteLog(0x534e4554, "34749571");
168 return;
169 }
170 minBufferSize *= mFrameSize;
171
172 if (buffer == nullptr) {
173 bufferSize = minBufferSize; // allocated here.
174 } else if (minBufferSize > bufferSize) {
175 android_errorWriteLog(0x534e4554, "38340117");
176 return;
177 }
178
179 size_t size = sizeof(audio_track_cblk_t);
180 if (buffer == NULL && alloc == ALLOC_CBLK) {
181 // check overflow when computing allocation size for streaming tracks.
182 if (size > SIZE_MAX - bufferSize) {
183 android_errorWriteLog(0x534e4554, "34749571");
184 return;
185 }
186 size += bufferSize;
187 }
188
189 if (client != 0) {
190 mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
191 std::string("Track ID: ").append(std::to_string(mId))});
192 if (mCblkMemory == 0 ||
193 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
194 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
195 ALOGE("%s", client->allocator().dump().c_str());
196 mCblkMemory.clear();
197 return;
198 }
199 } else {
200 mCblk = (audio_track_cblk_t *) malloc(size);
201 if (mCblk == NULL) {
202 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
203 return;
204 }
205 }
206
207 // construct the shared structure in-place.
208 if (mCblk != NULL) {
209 new(mCblk) audio_track_cblk_t();
210 switch (alloc) {
211 case ALLOC_READONLY: {
212 const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
213 if (roHeap == 0 ||
214 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
215 (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
216 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
217 __func__, mId, bufferSize);
218 if (roHeap != 0) {
219 roHeap->dump("buffer");
220 }
221 mCblkMemory.clear();
222 mBufferMemory.clear();
223 return;
224 }
225 memset(mBuffer, 0, bufferSize);
226 } break;
227 case ALLOC_PIPE:
228 mBufferMemory = thread->pipeMemory();
229 // mBuffer is the virtual address as seen from current process (mediaserver),
230 // and should normally be coming from mBufferMemory->unsecurePointer().
231 // However in this case the TrackBase does not reference the buffer directly.
232 // It should references the buffer via the pipe.
233 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
234 mBuffer = NULL;
235 bufferSize = 0;
236 break;
237 case ALLOC_CBLK:
238 // clear all buffers
239 if (buffer == NULL) {
240 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
241 memset(mBuffer, 0, bufferSize);
242 } else {
243 mBuffer = buffer;
244 #if 0
245 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
246 #endif
247 }
248 break;
249 case ALLOC_LOCAL:
250 mBuffer = calloc(1, bufferSize);
251 break;
252 case ALLOC_NONE:
253 mBuffer = buffer;
254 break;
255 default:
256 LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
257 }
258 mBufferSize = bufferSize;
259
260 #ifdef TEE_SINK
261 mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
262 #endif
263 // mState is mirrored for the client to read.
264 mState.setMirror(&mCblk->mState);
265 // ensure our state matches up until we consolidate the enumeration.
266 static_assert(CBLK_STATE_IDLE == IDLE);
267 static_assert(CBLK_STATE_PAUSING == PAUSING);
268 }
269 }
270
271 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)272 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
273 AttributionSourceState attributionSource{};
274 attributionSource.uid = AID_AUDIOSERVER;
275 attributionSource.pid = pid;
276 attributionSource.token = sp<BBinder>::make();
277 return attributionSource;
278 }
279
initCheck() const280 status_t TrackBase::initCheck() const
281 {
282 status_t status;
283 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
284 status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
285 } else {
286 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
287 }
288 return status;
289 }
290
~TrackBase()291 TrackBase::~TrackBase()
292 {
293 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
294 mServerProxy.clear();
295 releaseCblk();
296 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
297 if (mClient != 0) {
298 // Client destructor must run with AudioFlinger client mutex locked
299 audio_utils::lock_guard _l(mClient->afClientCallback()->clientMutex());
300 // If the client's reference count drops to zero, the associated destructor
301 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
302 // relying on the automatic clear() at end of scope.
303 mClient.clear();
304 }
305 if (mAllocType == ALLOC_LOCAL) {
306 free(mBuffer);
307 mBuffer = nullptr;
308 }
309 // flush the binder command buffer
310 IPCThreadState::self()->flushCommands();
311 }
312
313 // AudioBufferProvider interface
314 // getNextBuffer() = 0;
315 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)316 void TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
317 {
318 #ifdef TEE_SINK
319 mTee.write(buffer->raw, buffer->frameCount);
320 #endif
321
322 ServerProxy::Buffer buf;
323 buf.mFrameCount = buffer->frameCount;
324 buf.mRaw = buffer->raw;
325 buffer->frameCount = 0;
326 buffer->raw = NULL;
327 mServerProxy->releaseBuffer(&buf);
328 }
329
setSyncEvent(const sp<audioflinger::SyncEvent> & event)330 status_t TrackBase::setSyncEvent(
331 const sp<audioflinger::SyncEvent>& event)
332 {
333 mSyncEvents.emplace_back(event);
334 return NO_ERROR;
335 }
336
deferRestartIfDisabled()337 void TrackBase::deferRestartIfDisabled()
338 {
339 const auto thread = mThread.promote();
340 if (thread == nullptr) return;
341 auto weakTrack = wp<TrackBase>::fromExisting(this);
342 thread->getThreadloopExecutor().defer([weakTrack] {
343 const auto actual = weakTrack.promote();
344 if (actual) actual->restartIfDisabled();
345 });
346 }
347
beginBatteryAttribution()348 void TrackBase::beginBatteryAttribution() {
349 mBatteryStatsHolder.emplace(uid());
350 if (media::psh_utils::AudioPowerManager::enabled()) {
351 mTrackToken = media::psh_utils::createAudioTrackToken(uid());
352 }
353 }
354
endBatteryAttribution()355 void TrackBase::endBatteryAttribution() {
356 mBatteryStatsHolder.reset();
357 mTrackToken.reset();
358 }
359
createDeviceIntervalTrace(const std::string & devices)360 audio_utils::trace::Object TrackBase::createDeviceIntervalTrace(const std::string& devices) {
361 audio_utils::trace::Object trace;
362
363 // Please do not modify any items without approval (look at git blame).
364 // Sanitize the device string to remove addresses.
365 std::string plainDevices;
366 if (devices.find(")") != std::string::npos) {
367 auto deviceAddrVector = audio_utils::stringutils::getDeviceAddressPairs(devices);
368 for (const auto& deviceAddr : deviceAddrVector) {
369 // "|" not compatible with ATRACE filtering so we use "+".
370 if (!plainDevices.empty()) plainDevices.append("+");
371 plainDevices.append(deviceAddr.first);
372 }
373 } else {
374 plainDevices = devices;
375 }
376
377 trace // the following key, value pairs should be alphabetical
378 .set(AUDIO_TRACE_OBJECT_KEY_CHANNEL_MASK, static_cast<int32_t>(mChannelMask))
379 .set(AUDIO_TRACE_OBJECT_KEY_CONTENT_TYPE, toString(mAttr.content_type))
380 .set(AUDIO_TRACE_OBJECT_KEY_DEVICES, plainDevices)
381 .set(AUDIO_TRACE_OBJECT_KEY_FLAGS, trackFlagsAsString())
382 .set(AUDIO_TRACE_OBJECT_KEY_FORMAT, IAfThreadBase::formatToString(mFormat))
383 .set(AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT, static_cast<int64_t>(mFrameCount))
384 .set(AUDIO_TRACE_OBJECT_KEY_PID, static_cast<int32_t>(
385 mClient ? mClient->pid() : getpid()))
386 .set(AUDIO_TRACE_OBJECT_KEY_SAMPLE_RATE, static_cast<int32_t>(sampleRate()));
387 if (const auto thread = mThread.promote()) {
388 trace // continue in alphabetical order
389 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_CHANNEL_MASK,
390 static_cast<int32_t>(thread->channelMask()))
391 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FLAGS,
392 thread->flagsAsString())
393 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FORMAT,
394 IAfThreadBase::formatToString(thread->format()))
395 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT,
396 static_cast<int64_t>(thread->frameCount()))
397 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_ID,
398 static_cast<int32_t>(mThreadIoHandle))
399 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_SAMPLE_RATE,
400 static_cast<int32_t>(thread->sampleRate()))
401 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_TYPE,
402 IAfThreadBase::threadTypeToString(thread->type()));
403 }
404 trace // continue in alphabetical order
405 .set(AUDIO_TRACE_OBJECT_KEY_UID, static_cast<int32_t>(uid()))
406 .set(AUDIO_TRACE_OBJECT_KEY_USAGE, toString(mAttr.usage));
407 return trace;
408 }
409
logBeginInterval(const std::string & devices)410 void TrackBase::logBeginInterval(const std::string& devices) {
411 mTrackMetrics.logBeginInterval(devices);
412
413 if (ATRACE_ENABLED()) [[unlikely]] {
414 auto trace = createDeviceIntervalTrace(devices);
415 mLastTrace = trace;
416 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
417 trace.set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_BEGIN_INTERVAL)
418 .toTrace().c_str());
419 }
420 }
421
logEndInterval()422 void TrackBase::logEndInterval() {
423 if (!mLastTrace.empty()) {
424 if (ATRACE_ENABLED()) [[unlikely]] {
425 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
426 mLastTrace.set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_END_INTERVAL)
427 .toTrace().c_str());
428 }
429 mLastTrace.clear();
430 }
431 mTrackMetrics.logEndInterval();
432 }
433
logRefreshInterval(const std::string & devices)434 void TrackBase::logRefreshInterval(const std::string& devices) {
435 if (ATRACE_ENABLED()) [[unlikely]] {
436 if (mLastTrace.empty()) mLastTrace = createDeviceIntervalTrace(devices);
437 auto trace = mLastTrace;
438 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
439 trace.set(AUDIO_TRACE_OBJECT_KEY_EVENT,
440 AUDIO_TRACE_EVENT_REFRESH_INTERVAL)
441 .toTrace().c_str());
442 }
443 }
444
signal()445 void TrackBase::signal() {
446 const sp<IAfThreadBase> thread = mThread.promote();
447 if (thread != nullptr) {
448 audio_utils::lock_guard _l(thread->mutex());
449 thread->broadcast_l();
450 }
451 }
452
PatchTrackBase(const sp<ClientProxy> & proxy,IAfThreadBase * thread,const Timeout & timeout)453 PatchTrackBase::PatchTrackBase(const sp<ClientProxy>& proxy,
454 IAfThreadBase* thread, const Timeout& timeout)
455 : mProxy(proxy)
456 {
457 if (timeout) {
458 setPeerTimeout(*timeout);
459 } else {
460 // Double buffer mixer
461 uint64_t mixBufferNs = ((uint64_t)2 * thread->frameCount() * 1000000000) /
462 thread->sampleRate();
463 setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
464 }
465 }
466
setPeerTimeout(std::chrono::nanoseconds timeout)467 void PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
468 mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
469 mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
470 }
471
472
473 // ----------------------------------------------------------------------------
474 // Playback
475 // ----------------------------------------------------------------------------
476 #undef LOG_TAG
477 #define LOG_TAG "AF::TrackHandle"
478
479 class TrackHandle : public android::media::BnAudioTrack {
480 public:
481 explicit TrackHandle(const sp<IAfTrack>& track);
482 ~TrackHandle() override;
483
484 binder::Status getCblk(std::optional<media::SharedFileRegion>* _aidl_return) final;
485 binder::Status start(int32_t* _aidl_return) final;
486 binder::Status stop() final;
487 binder::Status flush() final;
488 binder::Status pause() final;
489 binder::Status attachAuxEffect(int32_t effectId, int32_t* _aidl_return) final;
490 binder::Status setParameters(const std::string& keyValuePairs,
491 int32_t* _aidl_return) final;
492 binder::Status selectPresentation(int32_t presentationId, int32_t programId,
493 int32_t* _aidl_return) final;
494 binder::Status getTimestamp(media::AudioTimestampInternal* timestamp,
495 int32_t* _aidl_return) final;
496 binder::Status signal() final;
497 binder::Status applyVolumeShaper(const media::VolumeShaperConfiguration& configuration,
498 const media::VolumeShaperOperation& operation,
499 int32_t* _aidl_return) final;
500 binder::Status getVolumeShaperState(
501 int32_t id,
502 std::optional<media::VolumeShaperState>* _aidl_return) final;
503 binder::Status getDualMonoMode(
504 media::audio::common::AudioDualMonoMode* _aidl_return) final;
505 binder::Status setDualMonoMode(
506 media::audio::common::AudioDualMonoMode mode) final;
507 binder::Status getAudioDescriptionMixLevel(float* _aidl_return) final;
508 binder::Status setAudioDescriptionMixLevel(float leveldB) final;
509 binder::Status getPlaybackRateParameters(
510 media::audio::common::AudioPlaybackRate* _aidl_return) final;
511 binder::Status setPlaybackRateParameters(
512 const media::audio::common::AudioPlaybackRate& playbackRate) final;
513
514 private:
515 const sp<IAfTrack> mTrack;
516 };
517
518 /* static */
createIAudioTrackAdapter(const sp<IAfTrack> & track)519 sp<media::IAudioTrack> IAfTrack::createIAudioTrackAdapter(const sp<IAfTrack>& track) {
520 return sp<TrackHandle>::make(track);
521 }
522
TrackHandle(const sp<IAfTrack> & track)523 TrackHandle::TrackHandle(const sp<IAfTrack>& track)
524 : BnAudioTrack(),
525 mTrack(track)
526 {
527 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
528 setInheritRt(true);
529 }
530
~TrackHandle()531 TrackHandle::~TrackHandle() {
532 // just stop the track on deletion, associated resources
533 // will be freed from the main thread once all pending buffers have
534 // been played. Unless it's not in the active track list, in which
535 // case we free everything now...
536 mTrack->destroy();
537 }
538
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)539 Status TrackHandle::getCblk(
540 std::optional<media::SharedFileRegion>* _aidl_return) {
541 *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
542 return Status::ok();
543 }
544
start(int32_t * _aidl_return)545 Status TrackHandle::start(int32_t* _aidl_return) {
546 *_aidl_return = mTrack->start();
547 return Status::ok();
548 }
549
stop()550 Status TrackHandle::stop() {
551 mTrack->stop();
552 return Status::ok();
553 }
554
flush()555 Status TrackHandle::flush() {
556 mTrack->flush();
557 return Status::ok();
558 }
559
pause()560 Status TrackHandle::pause() {
561 mTrack->pause();
562 return Status::ok();
563 }
564
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)565 Status TrackHandle::attachAuxEffect(int32_t effectId,
566 int32_t* _aidl_return) {
567 *_aidl_return = mTrack->attachAuxEffect(effectId);
568 return Status::ok();
569 }
570
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)571 Status TrackHandle::setParameters(const std::string& keyValuePairs,
572 int32_t* _aidl_return) {
573 *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
574 return Status::ok();
575 }
576
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)577 Status TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
578 int32_t* _aidl_return) {
579 *_aidl_return = mTrack->selectPresentation(presentationId, programId);
580 return Status::ok();
581 }
582
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)583 Status TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
584 int32_t* _aidl_return) {
585 AudioTimestamp legacy;
586 *_aidl_return = mTrack->getTimestamp(legacy);
587 if (*_aidl_return != OK) {
588 return Status::ok();
589 }
590
591 // restrict position modulo INT_MAX to avoid integer sanitization abort
592 legacy.mPosition &= INT_MAX;
593
594 *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
595 return Status::ok();
596 }
597
signal()598 Status TrackHandle::signal() {
599 mTrack->signal();
600 return Status::ok();
601 }
602
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)603 Status TrackHandle::applyVolumeShaper(
604 const media::VolumeShaperConfiguration& configuration,
605 const media::VolumeShaperOperation& operation,
606 int32_t* _aidl_return) {
607 sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
608 *_aidl_return = conf->readFromParcelable(configuration);
609 if (*_aidl_return != OK) {
610 return Status::ok();
611 }
612
613 sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
614 *_aidl_return = op->readFromParcelable(operation);
615 if (*_aidl_return != OK) {
616 return Status::ok();
617 }
618
619 *_aidl_return = mTrack->applyVolumeShaper(conf, op);
620 return Status::ok();
621 }
622
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)623 Status TrackHandle::getVolumeShaperState(
624 int32_t id,
625 std::optional<media::VolumeShaperState>* _aidl_return) {
626 sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
627 if (legacy == nullptr) {
628 _aidl_return->reset();
629 return Status::ok();
630 }
631 media::VolumeShaperState aidl;
632 legacy->writeToParcelable(&aidl);
633 *_aidl_return = aidl;
634 return Status::ok();
635 }
636
getDualMonoMode(media::audio::common::AudioDualMonoMode * _aidl_return)637 Status TrackHandle::getDualMonoMode(
638 media::audio::common::AudioDualMonoMode* _aidl_return)
639 {
640 audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
641 const status_t status = mTrack->getDualMonoMode(&mode)
642 ?: AudioValidator::validateDualMonoMode(mode);
643 if (status == OK) {
644 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
645 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
646 }
647 return binderStatusFromStatusT(status);
648 }
649
setDualMonoMode(media::audio::common::AudioDualMonoMode mode)650 Status TrackHandle::setDualMonoMode(
651 media::audio::common::AudioDualMonoMode mode)
652 {
653 const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
654 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
655 return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
656 ?: mTrack->setDualMonoMode(localMonoMode));
657 }
658
getAudioDescriptionMixLevel(float * _aidl_return)659 Status TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
660 {
661 float leveldB = -std::numeric_limits<float>::infinity();
662 const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
663 ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
664 if (status == OK) *_aidl_return = leveldB;
665 return binderStatusFromStatusT(status);
666 }
667
setAudioDescriptionMixLevel(float leveldB)668 Status TrackHandle::setAudioDescriptionMixLevel(float leveldB)
669 {
670 return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
671 ?: mTrack->setAudioDescriptionMixLevel(leveldB));
672 }
673
getPlaybackRateParameters(media::audio::common::AudioPlaybackRate * _aidl_return)674 Status TrackHandle::getPlaybackRateParameters(
675 media::audio::common::AudioPlaybackRate* _aidl_return)
676 {
677 audio_playback_rate_t localPlaybackRate{};
678 status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
679 ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
680 if (status == NO_ERROR) {
681 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
682 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
683 }
684 return binderStatusFromStatusT(status);
685 }
686
setPlaybackRateParameters(const media::audio::common::AudioPlaybackRate & playbackRate)687 Status TrackHandle::setPlaybackRateParameters(
688 const media::audio::common::AudioPlaybackRate& playbackRate)
689 {
690 const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
691 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
692 return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
693 ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
694 }
695
696 // ----------------------------------------------------------------------------
697 // AppOp for audio playback
698 // -------------------------------
699
700 // static
createIfNeeded(IAfThreadBase * thread,const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)701 sp<OpPlayAudioMonitor> OpPlayAudioMonitor::createIfNeeded(
702 IAfThreadBase* thread,
703 const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
704 audio_stream_type_t streamType)
705 {
706 const uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
707 if (isServiceUid(uid)) {
708 ALOGW("OpPlayAudio: not muting track:%d usage:%d for service UID %d", id, attr.usage,
709 uid);
710 return nullptr;
711 }
712 // stream type has been filtered by audio policy to indicate whether it can be muted
713 if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
714 ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
715 return nullptr;
716 }
717 if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
718 == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
719 ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
720 id, attr.flags);
721 return nullptr;
722 }
723 return sp<OpPlayAudioMonitor>::make(thread, attributionSource, attr.usage, id, uid);
724 }
725
OpPlayAudioMonitor(IAfThreadBase * thread,const AttributionSourceState & attributionSource,audio_usage_t usage,int id,uid_t uid)726 OpPlayAudioMonitor::OpPlayAudioMonitor(IAfThreadBase* thread,
727 const AttributionSourceState& attributionSource,
728 audio_usage_t usage, int id, uid_t uid)
729 : mThread(wp<IAfThreadBase>::fromExisting(thread)),
730 mHasOpPlayAudio(true),
731 mUsage((int32_t)usage),
732 mId(id),
733 mUid(uid),
734 mPackageName(VALUE_OR_FATAL(aidl2legacy_string_view_String16(
735 attributionSource.packageName.value_or("")))) {}
736
~OpPlayAudioMonitor()737 OpPlayAudioMonitor::~OpPlayAudioMonitor()
738 {
739 if (mOpCallback != 0) {
740 mAppOpsManager.stopWatchingMode(mOpCallback);
741 }
742 mOpCallback.clear();
743 }
744
onFirstRef()745 void OpPlayAudioMonitor::onFirstRef()
746 {
747 // make sure not to broadcast the initial state since it is not needed and could
748 // cause a deadlock since this method can be called with the mThread->mLock held
749 checkPlayAudioForUsage(/*doBroadcast=*/false);
750 if (mPackageName.size()) {
751 mOpCallback = new PlayAudioOpCallback(this);
752 mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mPackageName, mOpCallback);
753 } else {
754 ALOGW("Skipping OpPlayAudioMonitor due to null package name");
755 }
756 }
757
hasOpPlayAudio() const758 bool OpPlayAudioMonitor::hasOpPlayAudio() const {
759 return mHasOpPlayAudio.load();
760 }
761
762 // Note this method is never called (and never to be) for audio server / patch record track
763 // - not called from constructor due to check on UID,
764 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage(bool doBroadcast)765 void OpPlayAudioMonitor::checkPlayAudioForUsage(bool doBroadcast) {
766 const bool hasAppOps =
767 mPackageName.size() &&
768 mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO, mUsage, mUid,
769 mPackageName) == AppOpsManager::MODE_ALLOWED;
770
771 bool shouldChange = !hasAppOps; // check if we need to update.
772 if (mHasOpPlayAudio.compare_exchange_strong(shouldChange, hasAppOps)) {
773 ALOGI("OpPlayAudio: track:%d package:%s usage:%d %smuted", mId,
774 String8(mPackageName).c_str(), mUsage, hasAppOps ? "not " : "");
775 if (doBroadcast) {
776 auto thread = mThread.promote();
777 if (thread != nullptr && thread->type() == IAfThreadBase::OFFLOAD) {
778 // Wake up Thread if offloaded, otherwise it may be several seconds for update.
779 audio_utils::lock_guard _l(thread->mutex());
780 thread->broadcast_l();
781 }
782 }
783 }
784 }
785
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)786 OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
787 const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
788 { }
789
opChanged(int32_t op,int32_t,const String16 & packageName,const String16 &)790 binder::Status OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op, int32_t,
791 const String16& packageName, const String16&) {
792 if (op != AppOpsManager::OP_PLAY_AUDIO) {
793 return binder::Status::ok();
794 }
795
796 ALOGI("%s OP_PLAY_AUDIO callback received for %s", __func__, String8(packageName).c_str());
797 sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
798 if (monitor != NULL) {
799 monitor->checkPlayAudioForUsage(/*doBroadcast=*/true);
800 }
801 return binder::Status::ok();
802 }
803
804 // ----------------------------------------------------------------------------
805 #undef LOG_TAG
806 #define LOG_TAG "AF::Track"
807
808 /* static */
create(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect,float volume,bool muted)809 sp<IAfTrack> IAfTrack::create(
810 IAfPlaybackThread* thread,
811 const sp<Client>& client,
812 audio_stream_type_t streamType,
813 const audio_attributes_t& attr,
814 uint32_t sampleRate,
815 audio_format_t format,
816 audio_channel_mask_t channelMask,
817 size_t frameCount,
818 void *buffer,
819 size_t bufferSize,
820 const sp<IMemory>& sharedBuffer,
821 audio_session_t sessionId,
822 pid_t creatorPid,
823 const AttributionSourceState& attributionSource,
824 audio_output_flags_t flags,
825 track_type type,
826 audio_port_handle_t portId,
827 /** default behaviour is to start when there are as many frames
828 * ready as possible (aka. Buffer is full). */
829 size_t frameCountToBeReady,
830 float speed,
831 bool isSpatialized,
832 bool isBitPerfect,
833 float volume,
834 bool muted) {
835 return sp<Track>::make(thread,
836 client,
837 streamType,
838 attr,
839 sampleRate,
840 format,
841 channelMask,
842 frameCount,
843 buffer,
844 bufferSize,
845 sharedBuffer,
846 sessionId,
847 creatorPid,
848 attributionSource,
849 flags,
850 type,
851 portId,
852 frameCountToBeReady,
853 speed,
854 isSpatialized,
855 isBitPerfect,
856 volume,
857 muted);
858 }
859
860 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect,float volume,bool muted)861 Track::Track(
862 IAfPlaybackThread* thread,
863 const sp<Client>& client,
864 audio_stream_type_t streamType,
865 const audio_attributes_t& attr,
866 uint32_t sampleRate,
867 audio_format_t format,
868 audio_channel_mask_t channelMask,
869 size_t frameCount,
870 void *buffer,
871 size_t bufferSize,
872 const sp<IMemory>& sharedBuffer,
873 audio_session_t sessionId,
874 pid_t creatorPid,
875 const AttributionSourceState& attributionSource,
876 audio_output_flags_t flags,
877 track_type type,
878 audio_port_handle_t portId,
879 size_t frameCountToBeReady,
880 float speed,
881 bool isSpatialized,
882 bool isBitPerfect,
883 float volume,
884 bool muted)
885 :
886 AfPlaybackCommon(*this, *thread, volume, muted,
887 attr, attributionSource, thread->isOffloadOrMmap(), type != TYPE_PATCH),
888 TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
889 // TODO: Using unsecurePointer() has some associated security pitfalls
890 // (see declaration for details).
891 // Either document why it is safe in this case or address the
892 // issue (e.g. by copying).
893 (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
894 (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
895 sessionId, creatorPid,
896 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
897 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
898 type,
899 portId,
900 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
901 mFillingStatus(FS_INVALID),
902 // mRetryCount initialized later when needed
903 mSharedBuffer(sharedBuffer),
904 mStreamType(streamType),
905 mMainBuffer(thread->sinkBuffer()),
906 mAuxBuffer(NULL),
907 mAuxEffectId(0), mHasVolumeController(false),
908 mFrameMap(16 /* sink-frame-to-track-frame map memory */),
909 mVolumeHandler(new media::VolumeHandler(sampleRate)),
910 mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(thread, attributionSource, attr, id(),
911 streamType)),
912 // mSinkTimestamp
913 mFastIndex(-1),
914 mCachedVolume(1.0),
915 /* The track might not play immediately after being active, similarly as if its volume was 0.
916 * When the track starts playing, its volume will be computed. */
917 mFinalVolume(0.f),
918 mResumeToStopping(false),
919 mFlushHwPending(false),
920 mFlags(flags),
921 mSpeed(speed),
922 mIsSpatialized(isSpatialized),
923 mIsBitPerfect(isBitPerfect)
924 {
925 // client == 0 implies sharedBuffer == 0
926 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
927
928 ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
929 __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
930
931 if (mCblk == NULL) {
932 return;
933 }
934
935 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
936 if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
937 ALOGE("%s(%d): no more tracks available", __func__, mId);
938 releaseCblk(); // this makes the track invalid.
939 return;
940 }
941
942 if (sharedBuffer == 0) {
943 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
944 mFrameSize, !isExternalTrack(), sampleRate);
945 } else {
946 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
947 mFrameSize, sampleRate);
948 }
949 mServerProxy = mAudioTrackServerProxy;
950 mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
951
952 // only allocate a fast track index if we were able to allocate a normal track name
953 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
954 // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
955 // race with setSyncEvent(). However, if we call it, we cannot properly start
956 // static fast tracks (SoundPool) immediately after stopping.
957 //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
958 ALOG_ASSERT(thread->fastTrackAvailMask_l() != 0);
959 const int i = __builtin_ctz(thread->fastTrackAvailMask_l());
960 ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
961 // FIXME This is too eager. We allocate a fast track index before the
962 // fast track becomes active. Since fast tracks are a scarce resource,
963 // this means we are potentially denying other more important fast tracks from
964 // being created. It would be better to allocate the index dynamically.
965 mFastIndex = i;
966 thread->fastTrackAvailMask_l() &= ~(1 << i);
967 }
968
969 populateUsageAndContentTypeFromStreamType();
970
971 // Audio patch and call assistant volume are always max
972 if (mAttr.usage == AUDIO_USAGE_CALL_ASSISTANT
973 || mAttr.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
974 setPortVolume(1.0f);
975 setPortMute(false);
976 }
977
978 mServerLatencySupported = checkServerLatencySupported(format, flags);
979 #ifdef TEE_SINK
980 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
981 + "_" + std::to_string(mId) + "_T");
982 #endif
983
984 if (thread->supportsHapticPlayback()) {
985 // If the track is attached to haptic playback thread, it is potentially to have
986 // HapticGenerator effect, which will generate haptic data, on the track. In that case,
987 // external vibration is always created for all tracks attached to haptic playback thread.
988 mAudioVibrationController = new AudioVibrationController(this);
989 std::string packageName = attributionSource.packageName.has_value() ?
990 attributionSource.packageName.value() : "";
991 mExternalVibration = new os::ExternalVibration(
992 mUid, packageName, mAttr, mAudioVibrationController);
993 }
994
995 // Once this item is logged by the server, the client can add properties.
996 const char * const traits = sharedBuffer == 0 ? "" : "static";
997 mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
998 }
999
1000 // When attributes are undefined, derive default values from stream type.
1001 // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
populateUsageAndContentTypeFromStreamType()1002 void Track::populateUsageAndContentTypeFromStreamType() {
1003 if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
1004 switch (mStreamType) {
1005 case AUDIO_STREAM_VOICE_CALL:
1006 mAttr.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1007 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1008 break;
1009 case AUDIO_STREAM_SYSTEM:
1010 mAttr.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1011 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1012 break;
1013 case AUDIO_STREAM_RING:
1014 mAttr.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1015 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1016 break;
1017 case AUDIO_STREAM_MUSIC:
1018 mAttr.usage = AUDIO_USAGE_MEDIA;
1019 mAttr.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1020 break;
1021 case AUDIO_STREAM_ALARM:
1022 mAttr.usage = AUDIO_USAGE_ALARM;
1023 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1024 break;
1025 case AUDIO_STREAM_NOTIFICATION:
1026 mAttr.usage = AUDIO_USAGE_NOTIFICATION;
1027 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1028 break;
1029 case AUDIO_STREAM_DTMF:
1030 mAttr.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1031 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1032 break;
1033 case AUDIO_STREAM_ACCESSIBILITY:
1034 mAttr.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1035 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1036 break;
1037 case AUDIO_STREAM_ASSISTANT:
1038 mAttr.usage = AUDIO_USAGE_ASSISTANT;
1039 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1040 break;
1041 case AUDIO_STREAM_REROUTING:
1042 case AUDIO_STREAM_PATCH:
1043 mAttr.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
1044 // unknown content type
1045 break;
1046 case AUDIO_STREAM_CALL_ASSISTANT:
1047 mAttr.usage = AUDIO_USAGE_CALL_ASSISTANT;
1048 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1049 break;
1050 default:
1051 break;
1052 }
1053 }
1054 }
1055
~Track()1056 Track::~Track()
1057 {
1058 ALOGV("%s(%d)", __func__, mId);
1059
1060 // The destructor would clear mSharedBuffer,
1061 // but it will not push the decremented reference count,
1062 // leaving the client's IMemory dangling indefinitely.
1063 // This prevents that leak.
1064 if (mSharedBuffer != 0) {
1065 mSharedBuffer.clear();
1066 }
1067 }
1068
initCheck() const1069 status_t Track::initCheck() const
1070 {
1071 status_t status = TrackBase::initCheck();
1072 if (status == NO_ERROR && mCblk == nullptr) {
1073 status = NO_MEMORY;
1074 }
1075 return status;
1076 }
1077
destroy()1078 void Track::destroy()
1079 {
1080 // NOTE: destroyTrack_l() can remove a strong reference to this Track
1081 // by removing it from mTracks vector, so there is a risk that this Tracks's
1082 // destructor is called. As the destructor needs to lock mLock,
1083 // we must acquire a strong reference on this Track before locking mLock
1084 // here so that the destructor is called only when exiting this function.
1085 // On the other hand, as long as Track::destroy() is only called by
1086 // TrackHandle destructor, the TrackHandle still holds a strong ref on
1087 // this Track with its member mTrack.
1088 sp<Track> keep(this);
1089 { // scope for mLock
1090 bool wasActive = false;
1091 const sp<IAfThreadBase> thread = mThread.promote();
1092 if (thread != 0) {
1093 audio_utils::unique_lock ul(thread->mutex());
1094 thread->waitWhileThreadBusy_l(ul);
1095
1096 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1097 wasActive = playbackThread->destroyTrack_l(this);
1098 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1099 }
1100 if (isExternalTrack() && !wasActive) {
1101 // If the track is not active, the TrackHandle is responsible for
1102 // releasing the port id, not the ThreadBase::threadLoop().
1103 // At this point, there is no concurrency issue as the track is going away.
1104 AudioSystem::releaseOutput(mPortId);
1105 }
1106 }
1107 }
1108
appendDumpHeader(String8 & result) const1109 void Track::appendDumpHeader(String8& result) const
1110 {
1111 const auto res = IAfTrack::getLogHeader();
1112 result.append(res.data(), res.size());
1113 }
1114
appendDump(String8 & result,bool active) const1115 void Track::appendDump(String8& result, bool active) const
1116 {
1117 char trackType;
1118 switch (mType) {
1119 case TYPE_DEFAULT:
1120 case TYPE_OUTPUT:
1121 if (isStatic()) {
1122 trackType = 'S'; // static
1123 } else {
1124 trackType = ' '; // normal
1125 }
1126 break;
1127 case TYPE_PATCH:
1128 trackType = 'P';
1129 break;
1130 default:
1131 trackType = '?';
1132 }
1133
1134 if (isFastTrack()) {
1135 result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
1136 } else {
1137 result.appendFormat(" %c %6d", trackType, mId);
1138 }
1139
1140 char nowInUnderrun;
1141 switch (mObservedUnderruns.mBitFields.mMostRecent) {
1142 case UNDERRUN_FULL:
1143 nowInUnderrun = ' ';
1144 break;
1145 case UNDERRUN_PARTIAL:
1146 nowInUnderrun = '<';
1147 break;
1148 case UNDERRUN_EMPTY:
1149 nowInUnderrun = '*';
1150 break;
1151 default:
1152 nowInUnderrun = '?';
1153 break;
1154 }
1155
1156 char fillingStatus;
1157 switch (mFillingStatus) {
1158 case FS_INVALID:
1159 fillingStatus = 'I';
1160 break;
1161 case FS_FILLING:
1162 fillingStatus = 'f';
1163 break;
1164 case FS_FILLED:
1165 fillingStatus = 'F';
1166 break;
1167 case FS_ACTIVE:
1168 fillingStatus = 'A';
1169 break;
1170 default:
1171 fillingStatus = '?';
1172 break;
1173 }
1174
1175 // clip framesReadySafe to max representation in dump
1176 const size_t framesReadySafe =
1177 std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
1178
1179 // obtain volumes
1180 const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1181 const std::pair<float /* volume */, bool /* active */> vsVolume =
1182 mVolumeHandler->getLastVolume();
1183
1184 // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
1185 // as it may be reduced by the application.
1186 const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
1187 // Check whether the buffer size has been modified by the app.
1188 const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
1189 ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
1190 ? 'e' /* error */ : ' ' /* identical */;
1191
1192 result.appendFormat("%7s %7u/%7u %7u %7u %2s 0x%03X "
1193 "%08X %08X %6u "
1194 "%2u %3x %2x "
1195 "%5.2g %5.2g %5.2g %5.2g%c %11.2g %10s "
1196 "%08X %6zu%c %6zu %c %9u%c %7u %10s %12s",
1197 active ? "yes" : "no",
1198 mClient ? mClient->pid() : getpid() ,
1199 mClient ? mClient->uid() : getuid(),
1200 mSessionId,
1201 mPortId,
1202 getTrackStateAsCodedString(),
1203 mCblk->mFlags,
1204
1205 mFormat,
1206 mChannelMask,
1207 sampleRate(),
1208
1209 mStreamType,
1210 mAttr.usage,
1211 mAttr.content_type,
1212
1213 20.0 * log10(mFinalVolume),
1214 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
1215 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
1216 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
1217 vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
1218 20.0 * log10(getPortVolume()),
1219 getPortMute() ? "true" : "false",
1220
1221 mCblk->mServer,
1222 bufferSizeInFrames,
1223 modifiedBufferChar,
1224 framesReadySafe,
1225 fillingStatus,
1226 mAudioTrackServerProxy->getUnderrunFrames(),
1227 nowInUnderrun,
1228 (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
1229 isBitPerfect() ? "true" : "false",
1230 getInternalMute() ? "true" : "false"
1231 );
1232
1233 if (isServerLatencySupported()) {
1234 double latencyMs;
1235 bool fromTrack;
1236 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
1237 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
1238 // or 'k' if estimated from kernel because track frames haven't been presented yet.
1239 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
1240 } else {
1241 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
1242 }
1243 }
1244 result.append("\n");
1245 }
1246
sampleRate() const1247 uint32_t Track::sampleRate() const {
1248 return mAudioTrackServerProxy->getSampleRate();
1249 }
1250
1251 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)1252 status_t Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
1253 {
1254 ServerProxy::Buffer buf;
1255 size_t desiredFrames = buffer->frameCount;
1256 buf.mFrameCount = desiredFrames;
1257 status_t status = mServerProxy->obtainBuffer(&buf);
1258 buffer->frameCount = buf.mFrameCount;
1259 buffer->raw = buf.mRaw;
1260 if (buf.mFrameCount == 0 && !isStopping() && !isPausing()
1261 && !isStopped() && !isPaused() && !isOffloaded()) {
1262 ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
1263 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
1264 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
1265 if (ATRACE_ENABLED()) [[unlikely]] {
1266 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1267 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_UNDERRUN)
1268 .set(AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT, desiredFrames)
1269 .toTrace().c_str());
1270 }
1271 } else {
1272 mAudioTrackServerProxy->tallyUnderrunFrames(0);
1273 }
1274 return status;
1275 }
1276
releaseBuffer(AudioBufferProvider::Buffer * buffer)1277 void Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
1278 {
1279 interceptBuffer(*buffer);
1280 TrackBase::releaseBuffer(buffer);
1281 }
1282
1283 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)1284 void Track::interceptBuffer(
1285 const AudioBufferProvider::Buffer& sourceBuffer) {
1286 auto start = std::chrono::steady_clock::now();
1287 const size_t frameCount = sourceBuffer.frameCount;
1288 if (frameCount == 0) {
1289 return; // No audio to intercept.
1290 // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
1291 // does not allow 0 frame size request contrary to getNextBuffer
1292 }
1293 TeePatches teePatches;
1294 if (mTeePatchesRWLock.tryReadLock() == NO_ERROR) {
1295 // Cache a copy of tee patches in case it is updated while using.
1296 teePatches = mTeePatches;
1297 mTeePatchesRWLock.unlock();
1298 }
1299 for (auto& teePatch : teePatches) {
1300 IAfPatchRecord* patchRecord = teePatch.patchRecord.get();
1301 const size_t framesWritten = patchRecord->writeFrames(
1302 sourceBuffer.i8, frameCount, mFrameSize);
1303 const size_t framesLeft = frameCount - framesWritten;
1304 ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
1305 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->id(),
1306 framesWritten, frameCount, framesLeft);
1307 }
1308 auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
1309 using namespace std::chrono_literals;
1310 // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
1311 ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
1312 spent.count(), teePatches.size());
1313 }
1314
1315 // ExtendedAudioBufferProvider interface
1316
1317 // framesReady() may return an approximation of the number of frames if called
1318 // from a different thread than the one calling Proxy->obtainBuffer() and
1319 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
1320 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const1321 size_t Track::framesReady() const {
1322 if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
1323 // Static tracks return zero frames immediately upon stopping (for FastTracks).
1324 // The remainder of the buffer is not drained.
1325 return 0;
1326 }
1327 return mAudioTrackServerProxy->framesReady();
1328 }
1329
framesReleased() const1330 int64_t Track::framesReleased() const
1331 {
1332 return mAudioTrackServerProxy->framesReleased();
1333 }
1334
onTimestamp(const ExtendedTimestamp & timestamp)1335 void Track::onTimestamp(const ExtendedTimestamp ×tamp)
1336 {
1337 // This call comes from a FastTrack and should be kept lockless.
1338 // The server side frames are already translated to client frames.
1339 mAudioTrackServerProxy->setTimestamp(timestamp);
1340
1341 // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1342
1343 // Compute latency.
1344 // TODO: Consider whether the server latency may be passed in by FastMixer
1345 // as a constant for all active FastTracks.
1346 const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1347 mServerLatencyFromTrack.store(true);
1348 mServerLatencyMs.store(latencyMs);
1349 }
1350
1351 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1352 bool Track::isReady() const {
1353 if (mFillingStatus != FS_FILLING || isStopped() || isPausing()) {
1354 return true;
1355 }
1356
1357 if (isStopping()) {
1358 if (framesReady() > 0) {
1359 mFillingStatus = FS_FILLED;
1360 }
1361 return true;
1362 }
1363
1364 size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1365 // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1366 const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1367 const size_t framesToBeReady = std::clamp( // clamp again to validate client values.
1368 std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1369
1370 if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1371 ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1372 __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1373 mFillingStatus = FS_FILLED;
1374 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1375 return true;
1376 }
1377 return false;
1378 }
1379
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1380 status_t Track::start(AudioSystem::sync_event_t event __unused,
1381 audio_session_t triggerSession __unused)
1382 {
1383 if (ATRACE_ENABLED()) [[unlikely]] {
1384 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1385 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
1386 .toTrace().c_str());
1387 }
1388 status_t status = NO_ERROR;
1389 ALOGV("%s(%d): calling pid %d session %d",
1390 __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1391
1392 const sp<IAfThreadBase> thread = mThread.promote();
1393 if (thread != 0) {
1394 if (isOffloaded()) {
1395 audio_utils::lock_guard _laf(thread->afThreadCallback()->mutex());
1396 const bool nonOffloadableGlobalEffectEnabled =
1397 thread->afThreadCallback()->isNonOffloadableGlobalEffectEnabled_l();
1398 audio_utils::lock_guard _lth(thread->mutex());
1399 sp<IAfEffectChain> ec = thread->getEffectChain_l(mSessionId);
1400 if (nonOffloadableGlobalEffectEnabled ||
1401 (ec != 0 && ec->isNonOffloadableEnabled())) {
1402 invalidate();
1403 return PERMISSION_DENIED;
1404 }
1405 }
1406 audio_utils::unique_lock ul(thread->mutex());
1407 thread->waitWhileThreadBusy_l(ul);
1408
1409 track_state state = mState;
1410 // here the track could be either new, or restarted
1411 // in both cases "unstop" the track
1412
1413 // initial state-stopping. next state-pausing.
1414 // What if resume is called ?
1415
1416 if (state == FLUSHED) {
1417 // avoid underrun glitches when starting after flush
1418 reset();
1419 }
1420
1421 // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1422 mPauseHwPending = false;
1423 if (state == PAUSED || state == PAUSING) {
1424 if (mResumeToStopping) {
1425 // happened we need to resume to STOPPING_1
1426 mState = TrackBase::STOPPING_1;
1427 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1428 __func__, mId, (int)mThreadIoHandle);
1429 } else {
1430 mState = TrackBase::RESUMING;
1431 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1432 __func__, mId, (int)mThreadIoHandle);
1433 }
1434 } else {
1435 mState = TrackBase::ACTIVE;
1436 ALOGV("%s(%d): ? => ACTIVE on thread %d",
1437 __func__, mId, (int)mThreadIoHandle);
1438 }
1439
1440 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1441
1442 // states to reset position info for pcm tracks
1443 if (audio_is_linear_pcm(mFormat)
1444 && (state == IDLE || state == STOPPED || state == FLUSHED
1445 || state == PAUSED)) {
1446 mFrameMap.reset();
1447
1448 if (!isFastTrack()) {
1449 // Start point of track -> sink frame map. If the HAL returns a
1450 // frame position smaller than the first written frame in
1451 // updateTrackFrameInfo, the timestamp can be interpolated
1452 // instead of using a larger value.
1453 mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
1454 playbackThread->framesWritten());
1455 }
1456 }
1457 if (isFastTrack()) {
1458 // refresh fast track underruns on start because that field is never cleared
1459 // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1460 // after stop.
1461 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1462 }
1463 status = playbackThread->addTrack_l(this);
1464 if (status == INVALID_OPERATION || status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1465 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1466 // restore previous state if start was rejected by policy manager
1467 if (status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1468 mState = state;
1469 }
1470 }
1471
1472 // Audio timing metrics are computed a few mix cycles after starting.
1473 {
1474 mLogStartCountdown = LOG_START_COUNTDOWN;
1475 mLogStartTimeNs = systemTime();
1476 mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1477 .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1478 mLogLatencyMs = 0.;
1479 }
1480 mLogForceVolumeUpdate = true; // at least one volume logged for metrics when starting.
1481
1482 if (status == NO_ERROR || status == ALREADY_EXISTS) {
1483 // for streaming tracks, remove the buffer read stop limit.
1484 mAudioTrackServerProxy->start();
1485 }
1486
1487 // track was already in the active list, not a problem
1488 if (status == ALREADY_EXISTS) {
1489 status = NO_ERROR;
1490 } else {
1491 // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1492 // It is usually unsafe to access the server proxy from a binder thread.
1493 // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1494 // isn't looking at this track yet: we still hold the normal mixer thread lock,
1495 // and for fast tracks the track is not yet in the fast mixer thread's active set.
1496 // For static tracks, this is used to acknowledge change in position or loop.
1497 ServerProxy::Buffer buffer;
1498 buffer.mFrameCount = 1;
1499 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1500 }
1501 if (status == NO_ERROR) {
1502 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1503 }
1504 } else {
1505 status = BAD_VALUE;
1506 }
1507 if (status == NO_ERROR) {
1508 startPlaybackDelivery();
1509 // send format to AudioManager for playback activity monitoring
1510 const sp<IAudioManager> audioManager =
1511 thread->afThreadCallback()->getOrCreateAudioManager();
1512 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1513 std::unique_ptr<os::PersistableBundle> bundle =
1514 std::make_unique<os::PersistableBundle>();
1515 bundle->putBoolean(String16(kExtraPlayerEventSpatializedKey),
1516 isSpatialized());
1517 bundle->putInt(String16(kExtraPlayerEventSampleRateKey), mSampleRate);
1518 bundle->putInt(String16(kExtraPlayerEventChannelMaskKey), mChannelMask);
1519 status_t result = audioManager->portEvent(mPortId,
1520 PLAYER_UPDATE_FORMAT, bundle);
1521 if (result != OK) {
1522 ALOGE("%s: unable to send playback format for port ID %d, status error %d",
1523 __func__, mPortId, result);
1524 }
1525 }
1526 }
1527 return status;
1528 }
1529
stop()1530 void Track::stop()
1531 {
1532 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1533 if (ATRACE_ENABLED()) [[unlikely]] {
1534 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1535 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
1536 .toTrace().c_str());
1537 }
1538 const sp<IAfThreadBase> thread = mThread.promote();
1539 if (thread != 0) {
1540 audio_utils::unique_lock ul(thread->mutex());
1541 thread->waitWhileThreadBusy_l(ul);
1542
1543 track_state state = mState;
1544 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1545 // If the track is not active (PAUSED and buffers full), flush buffers
1546 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1547 if (!playbackThread->isTrackActive(this)) {
1548 reset();
1549 mState = STOPPED;
1550 } else if (isPatchTrack() || (!isFastTrack() && !isOffloaded() && !isDirect())) {
1551 // for a PatchTrack (whatever fast ot not), do not drain but move directly
1552 // to STOPPED to avoid closing while active.
1553 mState = STOPPED;
1554 } else {
1555 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1556 // presentation is complete
1557 // For an offloaded track this starts a drain and state will
1558 // move to STOPPING_2 when drain completes and then STOPPED
1559 mState = STOPPING_1;
1560 if (isOffloaded()) {
1561 mRetryCount = IAfPlaybackThread::kMaxTrackStopRetriesOffload;
1562 }
1563 }
1564 playbackThread->broadcast_l();
1565 ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1566 __func__, mId, (int)mThreadIoHandle);
1567 }
1568 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->stop(); });
1569 }
1570 // TODO(b/385417236)
1571 // Due to the complexity of state management for offload we do not call endDeliveryRequest().
1572 // For offload tracks, sonification may continue significantly after the STOP
1573 // phase begins. Leave the session on-going until the track is eventually
1574 // destroyed. We continue to allow appop callbacks during STOPPING and STOPPED state.
1575 // This is suboptimal but harmless.
1576 if (!isOffloaded()) {
1577 endPlaybackDelivery();
1578 }
1579 }
1580
pause()1581 void Track::pause()
1582 {
1583 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1584 if (ATRACE_ENABLED()) [[unlikely]] {
1585 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1586 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_PAUSE)
1587 .toTrace().c_str());
1588 }
1589 const sp<IAfThreadBase> thread = mThread.promote();
1590 if (thread != 0) {
1591 audio_utils::unique_lock ul(thread->mutex());
1592 thread->waitWhileThreadBusy_l(ul);
1593
1594 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1595 switch (mState) {
1596 case STOPPING_1:
1597 case STOPPING_2:
1598 if (!isOffloaded()) {
1599 /* nothing to do if track is not offloaded */
1600 break;
1601 }
1602
1603 // Offloaded track was draining, we need to carry on draining when resumed
1604 mResumeToStopping = true;
1605 FALLTHROUGH_INTENDED;
1606 case ACTIVE:
1607 case RESUMING:
1608 mState = PAUSING;
1609 ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1610 __func__, mId, (int)mThreadIoHandle);
1611 if (isOffloadedOrDirect()) {
1612 mPauseHwPending = true;
1613 }
1614 playbackThread->broadcast_l();
1615 break;
1616
1617 default:
1618 break;
1619 }
1620 // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1621 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->pause(); });
1622 }
1623 // When stopping a paused track, there will be two endDeliveryRequests. This is tolerated by
1624 // the implementation.
1625 endPlaybackDelivery();
1626 }
1627
flush()1628 void Track::flush()
1629 {
1630 ALOGV("%s(%d)", __func__, mId);
1631 if (ATRACE_ENABLED()) [[unlikely]] {
1632 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1633 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_FLUSH)
1634 .toTrace().c_str());
1635 }
1636 const sp<IAfThreadBase> thread = mThread.promote();
1637 if (thread != 0) {
1638 audio_utils::unique_lock ul(thread->mutex());
1639 thread->waitWhileThreadBusy_l(ul);
1640
1641 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1642
1643 // Flush the ring buffer now if the track is not active in the PlaybackThread.
1644 // Otherwise the flush would not be done until the track is resumed.
1645 // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1646 if (!playbackThread->isTrackActive(this)) {
1647 (void)mServerProxy->flushBufferIfNeeded();
1648 }
1649
1650 if (isOffloaded()) {
1651 // If offloaded we allow flush during any state except terminated
1652 // and keep the track active to avoid problems if user is seeking
1653 // rapidly and underlying hardware has a significant delay handling
1654 // a pause
1655 if (isTerminated()) {
1656 return;
1657 }
1658
1659 ALOGV("%s(%d): offload flush", __func__, mId);
1660 reset();
1661
1662 if (mState == STOPPING_1 || mState == STOPPING_2) {
1663 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1664 __func__, mId);
1665 mState = ACTIVE;
1666 }
1667
1668 mFlushHwPending = true;
1669 mResumeToStopping = false;
1670 } else {
1671 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1672 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1673 return;
1674 }
1675 // No point remaining in PAUSED state after a flush => go to
1676 // FLUSHED state
1677 mState = FLUSHED;
1678 // do not reset the track if it is still in the process of being stopped or paused.
1679 // this will be done by prepareTracks_l() when the track is stopped.
1680 // prepareTracks_l() will see mState == FLUSHED, then
1681 // remove from active track list, reset(), and trigger presentation complete
1682 if (isDirect()) {
1683 mFlushHwPending = true;
1684 }
1685 if (!playbackThread->isTrackActive(this)) {
1686 reset();
1687 }
1688 }
1689 // Prevent flush being lost if the track is flushed and then resumed
1690 // before mixer thread can run. This is important when offloading
1691 // because the hardware buffer could hold a large amount of audio
1692 playbackThread->broadcast_l();
1693 // Flush the Tee to avoid on resume playing old data and glitching on the transition to
1694 // new data
1695 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->flush(); });
1696 }
1697 }
1698
1699 // must be called with thread lock held
flushAck()1700 void Track::flushAck()
1701 {
1702 if (!isOffloaded() && !isDirect()) {
1703 return;
1704 }
1705
1706 // Clear the client ring buffer so that the app can prime the buffer while paused.
1707 // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1708 mServerProxy->flushBufferIfNeeded();
1709
1710 mFlushHwPending = false;
1711 }
1712
pauseAck()1713 void Track::pauseAck()
1714 {
1715 mPauseHwPending = false;
1716 }
1717
reset()1718 void Track::reset()
1719 {
1720 // Do not reset twice to avoid discarding data written just after a flush and before
1721 // the audioflinger thread detects the track is stopped.
1722 if (!mResetDone) {
1723 // Force underrun condition to avoid false underrun callback until first data is
1724 // written to buffer
1725 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1726 mFillingStatus = FS_FILLING;
1727 mResetDone = true;
1728 if (mState == FLUSHED) {
1729 mState = IDLE;
1730 }
1731 }
1732 }
1733
setParameters(const String8 & keyValuePairs)1734 status_t Track::setParameters(const String8& keyValuePairs)
1735 {
1736 const sp<IAfThreadBase> thread = mThread.promote();
1737 if (thread == 0) {
1738 ALOGE("%s(%d): thread is dead", __func__, mId);
1739 return FAILED_TRANSACTION;
1740 } else if (thread->type() == IAfThreadBase::DIRECT
1741 || thread->type() == IAfThreadBase::OFFLOAD) {
1742 return thread->setParameters(keyValuePairs);
1743 } else {
1744 return PERMISSION_DENIED;
1745 }
1746 }
1747
selectPresentation(int presentationId,int programId)1748 status_t Track::selectPresentation(int presentationId,
1749 int programId) {
1750 const sp<IAfThreadBase> thread = mThread.promote();
1751 if (thread == 0) {
1752 ALOGE("thread is dead");
1753 return FAILED_TRANSACTION;
1754 } else if (thread->type() == IAfThreadBase::DIRECT
1755 || thread->type() == IAfThreadBase::OFFLOAD) {
1756 auto directOutputThread = thread->asIAfDirectOutputThread().get();
1757 return directOutputThread->selectPresentation(presentationId, programId);
1758 }
1759 return INVALID_OPERATION;
1760 }
1761
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1762 VolumeShaper::Status Track::applyVolumeShaper(
1763 const sp<VolumeShaper::Configuration>& configuration,
1764 const sp<VolumeShaper::Operation>& operation)
1765 {
1766 VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(configuration, operation);
1767
1768 if (isOffloadedOrDirect()) {
1769 // Signal thread to fetch new volume.
1770 const sp<IAfThreadBase> thread = mThread.promote();
1771 if (thread != 0) {
1772 audio_utils::lock_guard _l(thread->mutex());
1773 thread->broadcast_l();
1774 }
1775 }
1776 return status;
1777 }
1778
getVolumeShaperState(int id) const1779 sp<VolumeShaper::State> Track::getVolumeShaperState(int id) const
1780 {
1781 // Note: We don't check if Thread exists.
1782
1783 // mVolumeHandler is thread safe.
1784 return mVolumeHandler->getVolumeShaperState(id);
1785 }
1786
setFinalVolume(float volumeLeft,float volumeRight)1787 void Track::setFinalVolume(float volumeLeft, float volumeRight)
1788 {
1789 mFinalVolumeLeft = volumeLeft;
1790 mFinalVolumeRight = volumeRight;
1791 const float volume = (volumeLeft + volumeRight) * 0.5f;
1792 if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1793 mFinalVolume = volume;
1794 setMetadataHasChanged();
1795 mLogForceVolumeUpdate = true;
1796 }
1797 if (mLogForceVolumeUpdate) {
1798 mLogForceVolumeUpdate = false;
1799 mTrackMetrics.logVolume(mFinalVolume);
1800 }
1801 }
1802
copyMetadataTo(MetadataInserter & backInserter) const1803 void Track::copyMetadataTo(MetadataInserter& backInserter) const
1804 {
1805 // Do not forward metadata for PatchTrack with unspecified stream type
1806 if (mStreamType == AUDIO_STREAM_PATCH) {
1807 return;
1808 }
1809
1810 playback_track_metadata_v7_t metadata;
1811 metadata.base = {
1812 .usage = mAttr.usage,
1813 .content_type = mAttr.content_type,
1814 .gain = mFinalVolume,
1815 };
1816
1817 metadata.channel_mask = mChannelMask;
1818
1819 std::string tagStr(mAttr.tags);
1820 const sp<IAfThreadBase> thread = mThread.promote();
1821 if (audioserver_flags::enable_gmap_mode() && mAttr.usage == AUDIO_USAGE_GAME
1822 && thread != nullptr && thread->afThreadCallback()->hasAlreadyCaptured(uid())
1823 && (tagStr.size() + strlen(AUDIO_ATTRIBUTES_TAG_GMAP_BIDIRECTIONAL)
1824 + (tagStr.size() ? 1 : 0))
1825 < AUDIO_ATTRIBUTES_TAGS_MAX_SIZE) {
1826 if (tagStr.size() != 0) {
1827 tagStr.append(1, AUDIO_ATTRIBUTES_TAGS_SEPARATOR);
1828 }
1829 tagStr.append(AUDIO_ATTRIBUTES_TAG_GMAP_BIDIRECTIONAL);
1830 }
1831 strncpy(metadata.tags, tagStr.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1832 metadata.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1] = '\0';
1833 *backInserter++ = metadata;
1834 }
1835
updateTeePatches_l()1836 void Track::updateTeePatches_l() {
1837 if (mTeePatchesToUpdate.has_value()) {
1838 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1839 {
1840 RWLock::AutoWLock writeLock(mTeePatchesRWLock);
1841 mTeePatches = std::move(mTeePatchesToUpdate.value());
1842 }
1843 if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1844 mState == TrackBase::STOPPING_1) {
1845 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1846 }
1847 mTeePatchesToUpdate.reset();
1848 }
1849 }
1850
setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate)1851 void Track::setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) {
1852 ALOGW_IF(mTeePatchesToUpdate.has_value(),
1853 "%s, existing tee patches to update will be ignored", __func__);
1854 mTeePatchesToUpdate = std::move(teePatchesToUpdate);
1855 }
1856
getTimestamp(AudioTimestamp & timestamp)1857 status_t Track::getTimestamp(AudioTimestamp& timestamp)
1858 {
1859 if (!isOffloaded() && !isDirect()) {
1860 return INVALID_OPERATION; // normal tracks handled through SSQ
1861 }
1862 const sp<IAfThreadBase> thread = mThread.promote();
1863 if (thread == 0) {
1864 return INVALID_OPERATION;
1865 }
1866
1867 audio_utils::lock_guard _l(thread->mutex());
1868 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1869 return playbackThread->getTimestamp_l(timestamp);
1870 }
1871
attachAuxEffect(int EffectId)1872 status_t Track::attachAuxEffect(int EffectId)
1873 {
1874 const sp<IAfThreadBase> thread = mThread.promote();
1875 if (thread == nullptr) {
1876 return DEAD_OBJECT;
1877 }
1878
1879 auto dstThread = thread->asIAfPlaybackThread();
1880 // srcThread is initialized by call to moveAuxEffectToIo()
1881 sp<IAfPlaybackThread> srcThread;
1882 const auto& af = mClient->afClientCallback();
1883 status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1884
1885 if (EffectId != 0 && status == NO_ERROR) {
1886 status = dstThread->attachAuxEffect(this, EffectId);
1887 if (status == NO_ERROR) {
1888 AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1889 }
1890 }
1891
1892 if (status != NO_ERROR && srcThread != nullptr) {
1893 af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1894 }
1895 return status;
1896 }
1897
setAuxBuffer(int EffectId,int32_t * buffer)1898 void Track::setAuxBuffer(int EffectId, int32_t *buffer)
1899 {
1900 mAuxEffectId = EffectId;
1901 mAuxBuffer = buffer;
1902 }
1903
1904 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1905 bool Track::presentationComplete(
1906 int64_t framesWritten, size_t audioHalFrames)
1907 {
1908 // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1909 // This assists in proper timestamp computation as well as wakelock management.
1910
1911 // a track is considered presented when the total number of frames written to audio HAL
1912 // corresponds to the number of frames written when presentationComplete() is called for the
1913 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1914 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1915 // to detect when all frames have been played. In this case framesWritten isn't
1916 // useful because it doesn't always reflect whether there is data in the h/w
1917 // buffers, particularly if a track has been paused and resumed during draining
1918 ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1919 __func__, mId,
1920 (long long)mPresentationCompleteFrames, (long long)framesWritten);
1921 if (mPresentationCompleteFrames == 0) {
1922 mPresentationCompleteFrames = framesWritten + audioHalFrames;
1923 ALOGV("%s(%d): set:"
1924 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1925 __func__, mId,
1926 (long long)mPresentationCompleteFrames, audioHalFrames);
1927 }
1928
1929 bool complete;
1930 if (isFastTrack()) { // does not go through linear map
1931 complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1932 ALOGV("%s(%d): %s framesWritten:%lld mPresentationCompleteFrames:%lld",
1933 __func__, mId, (complete ? "complete" : "waiting"),
1934 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1935 } else { // Normal tracks, OutputTracks, and PatchTracks
1936 complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1937 && mAudioTrackServerProxy->isDrained();
1938 }
1939
1940 if (complete) {
1941 notifyPresentationComplete();
1942 return true;
1943 }
1944 return false;
1945 }
1946
1947 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1948 bool Track::presentationComplete(uint32_t latencyMs)
1949 {
1950 // For Offloaded or Direct tracks.
1951
1952 // For a direct track, we incorporated time based testing for presentationComplete.
1953
1954 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1955 // to detect when all frames have been played. In this case latencyMs isn't
1956 // useful because it doesn't always reflect whether there is data in the h/w
1957 // buffers, particularly if a track has been paused and resumed during draining
1958
1959 constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1960 if (mPresentationCompleteTimeNs == 0) {
1961 mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1962 ALOGV("%s(%d): set: latencyMs %u mPresentationCompleteTimeNs:%lld",
1963 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1964 }
1965
1966 bool complete;
1967 if (isOffloaded()) {
1968 complete = true;
1969 } else { // Direct
1970 complete = systemTime() >= mPresentationCompleteTimeNs;
1971 ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1972 }
1973 if (complete) {
1974 notifyPresentationComplete();
1975 return true;
1976 }
1977 return false;
1978 }
1979
notifyPresentationComplete()1980 void Track::notifyPresentationComplete()
1981 {
1982 // This only triggers once. TODO: should we enforce this?
1983 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1984 mAudioTrackServerProxy->setStreamEndDone();
1985 }
1986
triggerEvents(AudioSystem::sync_event_t type)1987 void Track::triggerEvents(AudioSystem::sync_event_t type)
1988 {
1989 for (auto it = mSyncEvents.begin(); it != mSyncEvents.end();) {
1990 if ((*it)->type() == type) {
1991 ALOGV("%s: triggering SyncEvent type %d", __func__, type);
1992 (*it)->trigger();
1993 it = mSyncEvents.erase(it);
1994 } else {
1995 ++it;
1996 }
1997 }
1998 }
1999
2000 // implement VolumeBufferProvider interface
2001
getVolumeLR() const2002 gain_minifloat_packed_t Track::getVolumeLR() const
2003 {
2004 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
2005 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
2006 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
2007 float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
2008 float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
2009 // track volumes come from shared memory, so can't be trusted and must be clamped
2010 if (vl > GAIN_FLOAT_UNITY) {
2011 vl = GAIN_FLOAT_UNITY;
2012 }
2013 if (vr > GAIN_FLOAT_UNITY) {
2014 vr = GAIN_FLOAT_UNITY;
2015 }
2016 // now apply the cached master volume and stream type volume;
2017 // this is trusted but lacks any synchronization or barrier so may be stale
2018 float v = mCachedVolume;
2019 vl *= v;
2020 vr *= v;
2021 // re-combine into packed minifloat
2022 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
2023 // FIXME look at mute, pause, and stop flags
2024 return vlr;
2025 }
2026
setSyncEvent(const sp<audioflinger::SyncEvent> & event)2027 status_t Track::setSyncEvent(
2028 const sp<audioflinger::SyncEvent>& event)
2029 {
2030 if (isTerminated() || mState == PAUSED ||
2031 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
2032 (mState == STOPPED)))) {
2033 ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
2034 __func__, mId,
2035 (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
2036 event->cancel();
2037 return INVALID_OPERATION;
2038 }
2039 (void) TrackBase::setSyncEvent(event);
2040 return NO_ERROR;
2041 }
2042
invalidate()2043 void Track::invalidate()
2044 {
2045 TrackBase::invalidate();
2046 signalClientFlag(CBLK_INVALID);
2047 }
2048
disable()2049 void Track::disable()
2050 {
2051 // TODO(b/142394888): the filling status should also be reset to filling
2052 signalClientFlag(CBLK_DISABLED);
2053 }
2054
isDisabled() const2055 bool Track::isDisabled() const {
2056 audio_track_cblk_t* cblk = mCblk;
2057 return (cblk != nullptr)
2058 && ((android_atomic_release_load(&cblk->mFlags) & CBLK_DISABLED) != 0);
2059 }
2060
signalClientFlag(int32_t flag)2061 void Track::signalClientFlag(int32_t flag)
2062 {
2063 // FIXME should use proxy, and needs work
2064 audio_track_cblk_t* cblk = mCblk;
2065 android_atomic_or(flag, &cblk->mFlags);
2066 android_atomic_release_store(0x40000000, &cblk->mFutex);
2067 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2068 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2069 }
2070
getDualMonoMode(audio_dual_mono_mode_t * mode) const2071 status_t Track::getDualMonoMode(audio_dual_mono_mode_t* mode) const
2072 {
2073 status_t status = INVALID_OPERATION;
2074 if (isOffloadedOrDirect()) {
2075 const sp<IAfThreadBase> thread = mThread.promote();
2076 if (thread != nullptr) {
2077 auto* const t = thread->asIAfPlaybackThread().get();
2078 audio_utils::lock_guard _l(t->mutex());
2079 status = t->getOutput_l()->stream->getDualMonoMode(mode);
2080 ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
2081 "%s: mode %d inconsistent", __func__, mDualMonoMode);
2082 }
2083 }
2084 return status;
2085 }
2086
setDualMonoMode(audio_dual_mono_mode_t mode)2087 status_t Track::setDualMonoMode(audio_dual_mono_mode_t mode)
2088 {
2089 status_t status = INVALID_OPERATION;
2090 if (isOffloadedOrDirect()) {
2091 const sp<IAfThreadBase> thread = mThread.promote();
2092 if (thread != nullptr) {
2093 auto* const t = thread->asIAfPlaybackThread().get();
2094 audio_utils::lock_guard lock(t->mutex());
2095 status = t->getOutput_l()->stream->setDualMonoMode(mode);
2096 if (status == NO_ERROR) {
2097 mDualMonoMode = mode;
2098 }
2099 }
2100 }
2101 return status;
2102 }
2103
getAudioDescriptionMixLevel(float * leveldB) const2104 status_t Track::getAudioDescriptionMixLevel(float* leveldB) const
2105 {
2106 status_t status = INVALID_OPERATION;
2107 if (isOffloadedOrDirect()) {
2108 sp<IAfThreadBase> thread = mThread.promote();
2109 if (thread != nullptr) {
2110 auto* const t = thread->asIAfPlaybackThread().get();
2111 audio_utils::lock_guard lock(t->mutex());
2112 status = t->getOutput_l()->stream->getAudioDescriptionMixLevel(leveldB);
2113 ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
2114 "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
2115 }
2116 }
2117 return status;
2118 }
2119
setAudioDescriptionMixLevel(float leveldB)2120 status_t Track::setAudioDescriptionMixLevel(float leveldB)
2121 {
2122 status_t status = INVALID_OPERATION;
2123 if (isOffloadedOrDirect()) {
2124 const sp<IAfThreadBase> thread = mThread.promote();
2125 if (thread != nullptr) {
2126 auto* const t = thread->asIAfPlaybackThread().get();
2127 audio_utils::lock_guard lock(t->mutex());
2128 status = t->getOutput_l()->stream->setAudioDescriptionMixLevel(leveldB);
2129 if (status == NO_ERROR) {
2130 mAudioDescriptionMixLevel = leveldB;
2131 }
2132 }
2133 }
2134 return status;
2135 }
2136
getPlaybackRateParameters(audio_playback_rate_t * playbackRate) const2137 status_t Track::getPlaybackRateParameters(
2138 audio_playback_rate_t* playbackRate) const
2139 {
2140 status_t status = INVALID_OPERATION;
2141 if (isOffloadedOrDirect()) {
2142 const sp<IAfThreadBase> thread = mThread.promote();
2143 if (thread != nullptr) {
2144 auto* const t = thread->asIAfPlaybackThread().get();
2145 audio_utils::lock_guard lock(t->mutex());
2146 if (auto* const output = t->getOutput_l()) {
2147 status = output->stream->getPlaybackRateParameters(playbackRate);
2148 }
2149 ALOGD_IF((status == NO_ERROR) &&
2150 !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
2151 "%s: playbackRate inconsistent", __func__);
2152 }
2153 }
2154 return status;
2155 }
2156
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)2157 status_t Track::setPlaybackRateParameters(
2158 const audio_playback_rate_t& playbackRate)
2159 {
2160 status_t status = INVALID_OPERATION;
2161 if (isOffloadedOrDirect()) {
2162 const sp<IAfThreadBase> thread = mThread.promote();
2163 if (thread != nullptr) {
2164 auto* const t = thread->asIAfPlaybackThread().get();
2165 audio_utils::lock_guard lock(t->mutex());
2166 if (auto* const output = t->getOutput_l()) {
2167 status = output->stream->setPlaybackRateParameters(playbackRate);
2168 if (status == NO_ERROR) {
2169 mPlaybackRateParameters = playbackRate;
2170 }
2171 }
2172 }
2173 }
2174 return status;
2175 }
2176
2177 //To be called with thread lock held
isResumePending() const2178 bool Track::isResumePending() const {
2179 if (mState == RESUMING) {
2180 return true;
2181 }
2182 /* Resume is pending if track was stopping before pause was called */
2183 if (mState == STOPPING_1 &&
2184 mResumeToStopping) {
2185 return true;
2186 }
2187
2188 return false;
2189 }
2190
2191 //To be called with thread lock held
resumeAck()2192 void Track::resumeAck() {
2193 if (mState == RESUMING) {
2194 mState = ACTIVE;
2195 }
2196
2197 // Other possibility of pending resume is stopping_1 state
2198 // Do not update the state from stopping as this prevents
2199 // drain being called.
2200 if (mState == STOPPING_1) {
2201 mResumeToStopping = false;
2202 }
2203 }
2204
2205 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)2206 void Track::updateTrackFrameInfo(
2207 int64_t trackFramesReleased, int64_t sinkFramesWritten,
2208 uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
2209 // Make the kernel frametime available.
2210 const FrameTime ft{
2211 timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2212 timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2213 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2214 mKernelFrameTime.store(ft);
2215 if (!audio_is_linear_pcm(mFormat)) {
2216 return;
2217 }
2218
2219 //update frame map
2220 mFrameMap.push(trackFramesReleased, sinkFramesWritten);
2221
2222 // adjust server times and set drained state.
2223 //
2224 // Our timestamps are only updated when the track is on the Thread active list.
2225 // We need to ensure that tracks are not removed before full drain.
2226 ExtendedTimestamp local = timeStamp;
2227 bool drained = true; // default assume drained, if no server info found
2228 bool checked = false;
2229 for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
2230 i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
2231 // Lookup the track frame corresponding to the sink frame position.
2232 if (local.mTimeNs[i] > 0) {
2233 local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
2234 // check drain state from the latest stage in the pipeline.
2235 if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
2236 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
2237 checked = true;
2238 }
2239 }
2240 }
2241
2242 ALOGV("%s: trackFramesReleased:%lld sinkFramesWritten:%lld setDrained: %d",
2243 __func__, (long long)trackFramesReleased, (long long)sinkFramesWritten, drained);
2244 mAudioTrackServerProxy->setDrained(drained);
2245 // Set correction for flushed frames that are not accounted for in released.
2246 local.mFlushed = mAudioTrackServerProxy->framesFlushed();
2247 mServerProxy->setTimestamp(local);
2248
2249 // Compute latency info.
2250 const bool useTrackTimestamp = !drained;
2251 const double latencyMs = useTrackTimestamp
2252 ? local.getOutputServerLatencyMs(sampleRate())
2253 : timeStamp.getOutputServerLatencyMs(halSampleRate);
2254
2255 mServerLatencyFromTrack.store(useTrackTimestamp);
2256 mServerLatencyMs.store(latencyMs);
2257
2258 if (mLogStartCountdown > 0
2259 && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
2260 && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
2261 {
2262 if (mLogStartCountdown > 1) {
2263 --mLogStartCountdown;
2264 } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
2265 mLogStartCountdown = 0;
2266 // startup is the difference in times for the current timestamp and our start
2267 double startUpMs =
2268 (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
2269 // adjust for frames played.
2270 startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
2271 * 1e3 / mSampleRate;
2272 ALOGV("%s: latencyMs:%lf startUpMs:%lf"
2273 " localTime:%lld startTime:%lld"
2274 " localPosition:%lld startPosition:%lld",
2275 __func__, latencyMs, startUpMs,
2276 (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
2277 (long long)mLogStartTimeNs,
2278 (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2279 (long long)mLogStartFrames);
2280 mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
2281 }
2282 mLogLatencyMs = latencyMs;
2283 }
2284 }
2285
setMute(bool muted)2286 bool Track::AudioVibrationController::setMute(bool muted) {
2287 const sp<IAfThreadBase> thread = mTrack->mThread.promote();
2288 if (thread != 0) {
2289 // Lock for updating mHapticPlaybackEnabled.
2290 audio_utils::lock_guard _l(thread->mutex());
2291 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2292 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2293 && playbackThread->hapticChannelCount() > 0) {
2294 ALOGD("%s, haptic playback was %s for track %d",
2295 __func__, muted ? "muted" : "unmuted", mTrack->id());
2296 mTrack->setHapticPlaybackEnabled(!muted);
2297 return true;
2298 }
2299 }
2300 return false;
2301 }
2302
mute(bool * ret)2303 binder::Status Track::AudioVibrationController::mute(
2304 /*out*/ bool *ret) {
2305 *ret = setMute(true);
2306 return binder::Status::ok();
2307 }
2308
unmute(bool * ret)2309 binder::Status Track::AudioVibrationController::unmute(
2310 /*out*/ bool *ret) {
2311 *ret = setMute(false);
2312 return binder::Status::ok();
2313 }
2314
2315 // ----------------------------------------------------------------------------
2316 #undef LOG_TAG
2317 #define LOG_TAG "AF::OutputTrack"
2318
2319 /* static */
create(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2320 sp<IAfOutputTrack> IAfOutputTrack::create(
2321 IAfPlaybackThread* playbackThread,
2322 IAfDuplicatingThread* sourceThread,
2323 uint32_t sampleRate,
2324 audio_format_t format,
2325 audio_channel_mask_t channelMask,
2326 size_t frameCount,
2327 const AttributionSourceState& attributionSource) {
2328 return sp<OutputTrack>::make(
2329 playbackThread,
2330 sourceThread,
2331 sampleRate,
2332 format,
2333 channelMask,
2334 frameCount,
2335 attributionSource);
2336 }
2337
OutputTrack(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2338 OutputTrack::OutputTrack(
2339 IAfPlaybackThread* playbackThread,
2340 IAfDuplicatingThread* sourceThread,
2341 uint32_t sampleRate,
2342 audio_format_t format,
2343 audio_channel_mask_t channelMask,
2344 size_t frameCount,
2345 const AttributionSourceState& attributionSource)
2346 :
2347 AfPlaybackCommon(*this, *playbackThread, /* volume= */ 0.0f,
2348 /* muted= */ false,
2349 AUDIO_ATTRIBUTES_INITIALIZER, attributionSource, /* isOffloadOrMmap= */ false,
2350 /* shouldPlaybackHarden= */ false),
2351 Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
2352 AUDIO_ATTRIBUTES_INITIALIZER ,
2353 sampleRate, format, channelMask, frameCount,
2354 nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
2355 AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
2356 TYPE_OUTPUT),
2357 mActive(false), mSourceThread(sourceThread)
2358 {
2359 if (mCblk != NULL) {
2360 mOutBuffer.frameCount = 0;
2361 playbackThread->addOutputTrack_l(this);
2362 ALOGV("%s(): mCblk %p, mBuffer %p, "
2363 "frameCount %zu, mChannelMask 0x%08x",
2364 __func__, mCblk, mBuffer,
2365 frameCount, mChannelMask);
2366 // since client and server are in the same process,
2367 // the buffer has the same virtual address on both sides
2368 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
2369 true /*clientInServer*/);
2370 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
2371 mClientProxy->setSendLevel(0.0);
2372 mClientProxy->setSampleRate(sampleRate);
2373 } else {
2374 ALOGW("%s(%d): Error creating output track on thread %d",
2375 __func__, mId, (int)mThreadIoHandle);
2376 }
2377 }
2378
~OutputTrack()2379 OutputTrack::~OutputTrack()
2380 {
2381 clearBufferQueue();
2382 // superclass destructor will now delete the server proxy and shared memory both refer to
2383 }
2384
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2385 status_t OutputTrack::start(AudioSystem::sync_event_t event,
2386 audio_session_t triggerSession)
2387 {
2388 status_t status = Track::start(event, triggerSession);
2389 if (status != NO_ERROR) {
2390 return status;
2391 }
2392
2393 mActive = true;
2394 mRetryCount = 127;
2395 return status;
2396 }
2397
stop()2398 void OutputTrack::stop()
2399 {
2400 Track::stop();
2401 clearBufferQueue();
2402 mOutBuffer.frameCount = 0;
2403 mActive = false;
2404 }
2405
write(void * data,uint32_t frames)2406 ssize_t OutputTrack::write(void* data, uint32_t frames)
2407 {
2408 if (!mActive && frames != 0) {
2409 const sp<IAfThreadBase> thread = mThread.promote();
2410 if (thread != nullptr && thread->inStandby()) {
2411 // preload one silent buffer to trigger mixer on start()
2412 ClientProxy::Buffer buf { .mFrameCount = mClientProxy->getStartThresholdInFrames() };
2413 status_t status = mClientProxy->obtainBuffer(&buf);
2414 if (status != NO_ERROR && status != NOT_ENOUGH_DATA && status != WOULD_BLOCK) {
2415 ALOGE("%s(%d): could not obtain buffer on start", __func__, mId);
2416 return 0;
2417 }
2418 memset(buf.mRaw, 0, buf.mFrameCount * mFrameSize);
2419 mClientProxy->releaseBuffer(&buf);
2420
2421 (void) start();
2422
2423 // wait for HAL stream to start before sending actual audio. Doing this on each
2424 // OutputTrack makes that playback start on all output streams is synchronized.
2425 // If another OutputTrack has already started it can underrun but this is OK
2426 // as only silence has been played so far and the retry count is very high on
2427 // OutputTrack.
2428 auto* const pt = thread->asIAfPlaybackThread().get();
2429 if (!pt->waitForHalStart()) {
2430 ALOGW("%s(%d): timeout waiting for thread to exit standby", __func__, mId);
2431 stop();
2432 return 0;
2433 }
2434
2435 // enqueue the first buffer and exit so that other OutputTracks will also start before
2436 // write() is called again and this buffer actually consumed.
2437 Buffer firstBuffer;
2438 firstBuffer.frameCount = frames;
2439 firstBuffer.raw = data;
2440 queueBuffer(firstBuffer);
2441 return frames;
2442 } else {
2443 (void) start();
2444 }
2445 }
2446
2447 Buffer *pInBuffer;
2448 Buffer inBuffer;
2449 inBuffer.frameCount = frames;
2450 inBuffer.raw = data;
2451 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
2452 while (waitTimeLeftMs) {
2453 // First write pending buffers, then new data
2454 if (mBufferQueue.size()) {
2455 pInBuffer = mBufferQueue.itemAt(0);
2456 } else {
2457 pInBuffer = &inBuffer;
2458 }
2459
2460 if (pInBuffer->frameCount == 0) {
2461 break;
2462 }
2463
2464 if (mOutBuffer.frameCount == 0) {
2465 mOutBuffer.frameCount = pInBuffer->frameCount;
2466 nsecs_t startTime = systemTime();
2467 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
2468 if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
2469 ALOGV("%s(%d): thread %d no more output buffers; status %d",
2470 __func__, mId,
2471 (int)mThreadIoHandle, status);
2472 break;
2473 }
2474 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
2475 if (waitTimeLeftMs >= waitTimeMs) {
2476 waitTimeLeftMs -= waitTimeMs;
2477 } else {
2478 waitTimeLeftMs = 0;
2479 }
2480 if (status == NOT_ENOUGH_DATA) {
2481 deferRestartIfDisabled();
2482 continue;
2483 }
2484 }
2485
2486 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
2487 pInBuffer->frameCount;
2488 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2489 Proxy::Buffer buf;
2490 buf.mFrameCount = outFrames;
2491 buf.mRaw = NULL;
2492 mClientProxy->releaseBuffer(&buf);
2493 deferRestartIfDisabled();
2494 pInBuffer->frameCount -= outFrames;
2495 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2496 mOutBuffer.frameCount -= outFrames;
2497 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2498
2499 if (pInBuffer->frameCount == 0) {
2500 if (mBufferQueue.size()) {
2501 mBufferQueue.removeAt(0);
2502 free(pInBuffer->mBuffer);
2503 if (pInBuffer != &inBuffer) {
2504 delete pInBuffer;
2505 }
2506 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2507 __func__, mId,
2508 (int)mThreadIoHandle, mBufferQueue.size());
2509 } else {
2510 break;
2511 }
2512 }
2513 }
2514
2515 // If we could not write all frames, allocate a buffer and queue it for next time.
2516 if (inBuffer.frameCount) {
2517 const sp<IAfThreadBase> thread = mThread.promote();
2518 if (thread != nullptr && !thread->inStandby()) {
2519 queueBuffer(inBuffer);
2520 }
2521 }
2522
2523 // Calling write() with a 0 length buffer means that no more data will be written:
2524 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2525 if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2526 stop();
2527 }
2528
2529 return frames - inBuffer.frameCount; // number of frames consumed.
2530 }
2531
queueBuffer(Buffer & inBuffer)2532 void OutputTrack::queueBuffer(Buffer& inBuffer) {
2533
2534 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2535 Buffer *pInBuffer = new Buffer;
2536 const size_t bufferSize = inBuffer.frameCount * mFrameSize;
2537 pInBuffer->mBuffer = malloc(bufferSize);
2538 LOG_ALWAYS_FATAL_IF(pInBuffer->mBuffer == nullptr,
2539 "%s: Unable to malloc size %zu", __func__, bufferSize);
2540 pInBuffer->frameCount = inBuffer.frameCount;
2541 pInBuffer->raw = pInBuffer->mBuffer;
2542 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2543 mBufferQueue.add(pInBuffer);
2544 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2545 (int)mThreadIoHandle, mBufferQueue.size());
2546 // audio data is consumed (stored locally); set frameCount to 0.
2547 inBuffer.frameCount = 0;
2548 } else {
2549 ALOGW("%s(%d): thread %d no more overflow buffers",
2550 __func__, mId, (int)mThreadIoHandle);
2551 // TODO: return error for this.
2552 }
2553 }
2554
copyMetadataTo(MetadataInserter & backInserter) const2555 void OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2556 {
2557 audio_utils::lock_guard lock(trackMetadataMutex());
2558 backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2559 }
2560
setMetadatas(const SourceMetadatas & metadatas)2561 void OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2562 {
2563 audio_utils::lock_guard lock(trackMetadataMutex());
2564 mTrackMetadatas = metadatas;
2565 }
2566 // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2567 setMetadataHasChanged();
2568 }
2569
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2570 status_t OutputTrack::obtainBuffer(
2571 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2572 {
2573 ClientProxy::Buffer buf;
2574 buf.mFrameCount = buffer->frameCount;
2575 struct timespec timeout;
2576 timeout.tv_sec = waitTimeMs / 1000;
2577 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2578 status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2579 buffer->frameCount = buf.mFrameCount;
2580 buffer->raw = buf.mRaw;
2581 return status;
2582 }
2583
clearBufferQueue()2584 void OutputTrack::clearBufferQueue()
2585 {
2586 size_t size = mBufferQueue.size();
2587
2588 for (size_t i = 0; i < size; i++) {
2589 Buffer *pBuffer = mBufferQueue.itemAt(i);
2590 free(pBuffer->mBuffer);
2591 delete pBuffer;
2592 }
2593 mBufferQueue.clear();
2594 }
2595
restartIfDisabled()2596 void OutputTrack::restartIfDisabled()
2597 {
2598 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2599 if (mActive && (flags & CBLK_DISABLED)) {
2600 start();
2601 }
2602 }
2603
2604 // ----------------------------------------------------------------------------
2605 #undef LOG_TAG
2606 #define LOG_TAG "AF::PatchTrack"
2607
2608 /* static */
create(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed,float volume,bool muted)2609 sp<IAfPatchTrack> IAfPatchTrack::create(
2610 IAfPlaybackThread* playbackThread,
2611 audio_stream_type_t streamType,
2612 uint32_t sampleRate,
2613 audio_channel_mask_t channelMask,
2614 audio_format_t format,
2615 size_t frameCount,
2616 void* buffer,
2617 size_t bufferSize,
2618 audio_output_flags_t flags,
2619 const Timeout& timeout,
2620 size_t frameCountToBeReady, /** Default behaviour is to start
2621 * as soon as possible to have
2622 * the lowest possible latency
2623 * even if it might glitch. */
2624 float speed,
2625 float volume,
2626 bool muted)
2627 {
2628 return sp<PatchTrack>::make(
2629 playbackThread,
2630 streamType,
2631 sampleRate,
2632 channelMask,
2633 format,
2634 frameCount,
2635 buffer,
2636 bufferSize,
2637 flags,
2638 timeout,
2639 frameCountToBeReady,
2640 speed,
2641 volume,
2642 muted);
2643 }
2644
PatchTrack(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed,float volume,bool muted)2645 PatchTrack::PatchTrack(IAfPlaybackThread* playbackThread,
2646 audio_stream_type_t streamType,
2647 uint32_t sampleRate,
2648 audio_channel_mask_t channelMask,
2649 audio_format_t format,
2650 size_t frameCount,
2651 void *buffer,
2652 size_t bufferSize,
2653 audio_output_flags_t flags,
2654 const Timeout& timeout,
2655 size_t frameCountToBeReady,
2656 float speed,
2657 float volume,
2658 bool muted)
2659 : AfPlaybackCommon(*this, *playbackThread, volume, muted,
2660 AUDIO_ATTRIBUTES_INITIALIZER,
2661 audioServerAttributionSource(getpid()),
2662 /* isOffloadOrMmap= */ false,
2663 /* shouldPlaybackHarden= */ false),
2664 Track(playbackThread, NULL, streamType,
2665 AUDIO_ATTRIBUTES_INITIALIZER,
2666 sampleRate, format, channelMask, frameCount,
2667 buffer, bufferSize, nullptr /* sharedBuffer */,
2668 AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2669 TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady, speed,
2670 false /*isSpatialized*/, false /*isBitPerfect*/, volume, muted),
2671 PatchTrackBase(mCblk ? new AudioTrackClientProxy(mCblk, mBuffer, frameCount, mFrameSize,
2672 true /*clientInServer*/) : nullptr,
2673 playbackThread, timeout)
2674 {
2675 if (mProxy != nullptr) {
2676 sp<AudioTrackClientProxy>::cast(mProxy)->setPlaybackRate({
2677 /* .mSpeed = */ speed,
2678 /* .mPitch = */ AUDIO_TIMESTRETCH_PITCH_NORMAL,
2679 /* .mStretchMode = */ AUDIO_TIMESTRETCH_STRETCH_DEFAULT,
2680 /* .mFallbackMode = */ AUDIO_TIMESTRETCH_FALLBACK_FAIL
2681 });
2682 }
2683 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2684 __func__, mId, sampleRate,
2685 (int)mPeerTimeout.tv_sec,
2686 (int)(mPeerTimeout.tv_nsec / 1000000));
2687 }
2688
~PatchTrack()2689 PatchTrack::~PatchTrack()
2690 {
2691 ALOGV("%s(%d)", __func__, mId);
2692 }
2693
framesReady() const2694 size_t PatchTrack::framesReady() const
2695 {
2696 if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2697 return std::numeric_limits<size_t>::max();
2698 } else {
2699 return Track::framesReady();
2700 }
2701 }
2702
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2703 status_t PatchTrack::start(AudioSystem::sync_event_t event,
2704 audio_session_t triggerSession)
2705 {
2706 status_t status = Track::start(event, triggerSession);
2707 if (status != NO_ERROR) {
2708 return status;
2709 }
2710 android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2711 return status;
2712 }
2713
2714 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2715 status_t PatchTrack::getNextBuffer(
2716 AudioBufferProvider::Buffer* buffer)
2717 {
2718 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2719 Proxy::Buffer buf;
2720 buf.mFrameCount = buffer->frameCount;
2721 if (ATRACE_ENABLED()) {
2722 std::string traceName("PTnReq");
2723 traceName += std::to_string(id());
2724 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2725 }
2726 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2727 ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2728 buffer->frameCount = buf.mFrameCount;
2729 if (ATRACE_ENABLED()) {
2730 std::string traceName("PTnObt");
2731 traceName += std::to_string(id());
2732 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2733 }
2734 if (buf.mFrameCount == 0) {
2735 return WOULD_BLOCK;
2736 }
2737 status = Track::getNextBuffer(buffer);
2738 return status;
2739 }
2740
releaseBuffer(AudioBufferProvider::Buffer * buffer)2741 void PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2742 {
2743 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2744 Proxy::Buffer buf;
2745 buf.mFrameCount = buffer->frameCount;
2746 buf.mRaw = buffer->raw;
2747 mPeerProxy->releaseBuffer(&buf);
2748 TrackBase::releaseBuffer(buffer); // Note: this is the base class.
2749 }
2750
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2751 status_t PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2752 const struct timespec *timeOut)
2753 {
2754 status_t status = NO_ERROR;
2755 static const int32_t kMaxTries = 5;
2756 int32_t tryCounter = kMaxTries;
2757 const size_t originalFrameCount = buffer->mFrameCount;
2758 do {
2759 if (status == NOT_ENOUGH_DATA) {
2760 deferRestartIfDisabled();
2761 buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2762 }
2763 status = mProxy->obtainBuffer(buffer, timeOut);
2764 } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2765 return status;
2766 }
2767
releaseBuffer(Proxy::Buffer * buffer)2768 void PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2769 {
2770 mProxy->releaseBuffer(buffer);
2771 deferRestartIfDisabled();
2772
2773 // Check if the PatchTrack has enough data to write once in releaseBuffer().
2774 // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2775 // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2776 // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2777 if (mFillingStatus == FS_ACTIVE
2778 && audio_is_linear_pcm(mFormat)
2779 && !isOffloadedOrDirect()) {
2780 if (const sp<IAfThreadBase> thread = mThread.promote();
2781 thread != 0) {
2782 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2783 const size_t frameCount = playbackThread->frameCount() * sampleRate()
2784 / playbackThread->sampleRate();
2785 if (framesReady() < frameCount) {
2786 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2787 mFillingStatus = FS_FILLING;
2788 }
2789 }
2790 }
2791 }
2792
restartIfDisabled()2793 void PatchTrack::restartIfDisabled()
2794 {
2795 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2796 ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2797 start();
2798 }
2799 }
2800
2801 // ----------------------------------------------------------------------------
2802 // Record
2803 // ----------------------------------------------------------------------------
2804
2805
2806 #undef LOG_TAG
2807 #define LOG_TAG "AF::RecordHandle"
2808
2809 class RecordHandle : public android::media::BnAudioRecord {
2810 public:
2811 explicit RecordHandle(const sp<IAfRecordTrack>& recordTrack);
2812 ~RecordHandle() override;
2813 binder::Status start(int /*AudioSystem::sync_event_t*/ event,
2814 int /*audio_session_t*/ triggerSession) final;
2815 binder::Status stop() final;
2816 binder::Status getActiveMicrophones(
2817 std::vector<media::MicrophoneInfoFw>* activeMicrophones) final;
2818 binder::Status setPreferredMicrophoneDirection(
2819 int /*audio_microphone_direction_t*/ direction) final;
2820 binder::Status setPreferredMicrophoneFieldDimension(float zoom) final;
2821 binder::Status shareAudioHistory(
2822 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) final;
2823 binder::Status setParameters(const ::std::string& keyValuePairs) final;
2824
2825 private:
2826 const sp<IAfRecordTrack> mRecordTrack;
2827
2828 // for use from destructor
2829 void stop_nonvirtual();
2830 };
2831
2832 /* static */
createIAudioRecordAdapter(const sp<IAfRecordTrack> & recordTrack)2833 sp<media::IAudioRecord> IAfRecordTrack::createIAudioRecordAdapter(
2834 const sp<IAfRecordTrack>& recordTrack) {
2835 return sp<RecordHandle>::make(recordTrack);
2836 }
2837
RecordHandle(const sp<IAfRecordTrack> & recordTrack)2838 RecordHandle::RecordHandle(
2839 const sp<IAfRecordTrack>& recordTrack)
2840 : BnAudioRecord(),
2841 mRecordTrack(recordTrack)
2842 {
2843 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
2844 setInheritRt(true);
2845 }
2846
~RecordHandle()2847 RecordHandle::~RecordHandle() {
2848 stop_nonvirtual();
2849 mRecordTrack->destroy();
2850 }
2851
start(int event,int triggerSession)2852 binder::Status RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2853 int /*audio_session_t*/ triggerSession) {
2854 ALOGV("%s()", __func__);
2855 return binderStatusFromStatusT(
2856 mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2857 }
2858
stop()2859 binder::Status RecordHandle::stop() {
2860 stop_nonvirtual();
2861 return binder::Status::ok();
2862 }
2863
stop_nonvirtual()2864 void RecordHandle::stop_nonvirtual() {
2865 ALOGV("%s()", __func__);
2866 mRecordTrack->stop();
2867 }
2868
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)2869 binder::Status RecordHandle::getActiveMicrophones(
2870 std::vector<media::MicrophoneInfoFw>* activeMicrophones) {
2871 ALOGV("%s()", __func__);
2872 return binderStatusFromStatusT(mRecordTrack->getActiveMicrophones(activeMicrophones));
2873 }
2874
setPreferredMicrophoneDirection(int direction)2875 binder::Status RecordHandle::setPreferredMicrophoneDirection(
2876 int /*audio_microphone_direction_t*/ direction) {
2877 ALOGV("%s()", __func__);
2878 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2879 static_cast<audio_microphone_direction_t>(direction)));
2880 }
2881
setPreferredMicrophoneFieldDimension(float zoom)2882 binder::Status RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2883 ALOGV("%s()", __func__);
2884 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2885 }
2886
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2887 binder::Status RecordHandle::shareAudioHistory(
2888 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2889 return binderStatusFromStatusT(
2890 mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2891 }
2892
setParameters(const::std::string & keyValuePairs)2893 binder::Status RecordHandle::setParameters(const ::std::string& keyValuePairs) {
2894 return binderStatusFromStatusT(mRecordTrack->setParameters(
2895 String8(keyValuePairs.c_str())));
2896 }
2897
2898 // ----------------------------------------------------------------------------
2899 #undef LOG_TAG
2900 #define LOG_TAG "AF::RecordTrack"
2901
2902
2903 /* static */
create(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2904 sp<IAfRecordTrack> IAfRecordTrack::create(IAfRecordThread* thread,
2905 const sp<Client>& client,
2906 const audio_attributes_t& attr,
2907 uint32_t sampleRate,
2908 audio_format_t format,
2909 audio_channel_mask_t channelMask,
2910 size_t frameCount,
2911 void* buffer,
2912 size_t bufferSize,
2913 audio_session_t sessionId,
2914 pid_t creatorPid,
2915 const AttributionSourceState& attributionSource,
2916 audio_input_flags_t flags,
2917 track_type type,
2918 audio_port_handle_t portId,
2919 int32_t startFrames)
2920 {
2921 return sp<RecordTrack>::make(
2922 thread,
2923 client,
2924 attr,
2925 sampleRate,
2926 format,
2927 channelMask,
2928 frameCount,
2929 buffer,
2930 bufferSize,
2931 sessionId,
2932 creatorPid,
2933 attributionSource,
2934 flags,
2935 type,
2936 portId,
2937 startFrames);
2938 }
2939
2940 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2941 RecordTrack::RecordTrack(
2942 IAfRecordThread* thread,
2943 const sp<Client>& client,
2944 const audio_attributes_t& attr,
2945 uint32_t sampleRate,
2946 audio_format_t format,
2947 audio_channel_mask_t channelMask,
2948 size_t frameCount,
2949 void *buffer,
2950 size_t bufferSize,
2951 audio_session_t sessionId,
2952 pid_t creatorPid,
2953 const AttributionSourceState& attributionSource,
2954 audio_input_flags_t flags,
2955 track_type type,
2956 audio_port_handle_t portId,
2957 int32_t startFrames)
2958 : TrackBase(thread, client, attr, sampleRate, format,
2959 channelMask, frameCount, buffer, bufferSize, sessionId,
2960 creatorPid,
2961 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2962 false /*isOut*/,
2963 (type == TYPE_DEFAULT) ?
2964 ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2965 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2966 type, portId,
2967 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2968 mOverflow(false),
2969 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2970 mRecordBufferConverter(NULL),
2971 mFlags(flags),
2972 mSilenced(false),
2973 mStartFrames(startFrames)
2974 {
2975 if (mCblk == NULL) {
2976 return;
2977 }
2978
2979 if (!isDirect()) {
2980 mRecordBufferConverter = new RecordBufferConverter(
2981 thread->channelMask(), thread->format(), thread->sampleRate(),
2982 channelMask, format, sampleRate);
2983 // Check if the RecordBufferConverter construction was successful.
2984 // If not, don't continue with construction.
2985 //
2986 // NOTE: It would be extremely rare that the record track cannot be created
2987 // for the current device, but a pending or future device change would make
2988 // the record track configuration valid.
2989 if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2990 ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2991 return;
2992 }
2993 }
2994
2995 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2996 mFrameSize, !isExternalTrack());
2997
2998 mResamplerBufferProvider = new ResamplerBufferProvider(this);
2999
3000 if (flags & AUDIO_INPUT_FLAG_FAST) {
3001 ALOG_ASSERT(thread->fastTrackAvailable());
3002 thread->setFastTrackAvailable(false);
3003 } else {
3004 // TODO: only Normal Record has timestamps (Fast Record does not).
3005 mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
3006 }
3007 #ifdef TEE_SINK
3008 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
3009 + "_" + std::to_string(mId)
3010 + "_R");
3011 #endif
3012
3013 // Once this item is logged by the server, the client can add properties.
3014 mTrackMetrics.logConstructor(creatorPid, uid(), id());
3015 }
3016
~RecordTrack()3017 RecordTrack::~RecordTrack()
3018 {
3019 ALOGV("%s()", __func__);
3020 delete mRecordBufferConverter;
3021 delete mResamplerBufferProvider;
3022 }
3023
initCheck() const3024 status_t RecordTrack::initCheck() const
3025 {
3026 status_t status = TrackBase::initCheck();
3027 if (status == NO_ERROR && mServerProxy == 0) {
3028 status = BAD_VALUE;
3029 }
3030 return status;
3031 }
3032
3033 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3034 status_t RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3035 {
3036 ServerProxy::Buffer buf;
3037 buf.mFrameCount = buffer->frameCount;
3038 status_t status = mServerProxy->obtainBuffer(&buf);
3039 buffer->frameCount = buf.mFrameCount;
3040 buffer->raw = buf.mRaw;
3041 if (buf.mFrameCount == 0) {
3042 // FIXME also wake futex so that overrun is noticed more quickly
3043 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
3044 }
3045 return status;
3046 }
3047
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)3048 status_t RecordTrack::start(AudioSystem::sync_event_t event,
3049 audio_session_t triggerSession)
3050 {
3051 if (ATRACE_ENABLED()) [[unlikely]] {
3052 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3053 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
3054 .toTrace().c_str());
3055 }
3056 const sp<IAfThreadBase> thread = mThread.promote();
3057 if (thread != 0) {
3058 auto* const recordThread = thread->asIAfRecordThread().get();
3059 return recordThread->start(this, event, triggerSession);
3060 } else {
3061 ALOGW("%s track %d: thread was destroyed", __func__, portId());
3062 return DEAD_OBJECT;
3063 }
3064 }
3065
stop()3066 void RecordTrack::stop()
3067 {
3068 if (ATRACE_ENABLED()) [[unlikely]] {
3069 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3070 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
3071 .toTrace().c_str());
3072 }
3073 const sp<IAfThreadBase> thread = mThread.promote();
3074 if (thread != 0) {
3075 auto* const recordThread = thread->asIAfRecordThread().get();
3076 if (recordThread->stop(this) && isExternalTrack()) {
3077 AudioSystem::stopInput(mPortId);
3078 }
3079 }
3080 }
3081
destroy()3082 void RecordTrack::destroy()
3083 {
3084 // see comments at Track::destroy()
3085 sp<RecordTrack> keep(this);
3086 {
3087 track_state priorState = mState;
3088 const sp<IAfThreadBase> thread = mThread.promote();
3089 if (thread != 0) {
3090 audio_utils::lock_guard _l(thread->mutex());
3091 auto* const recordThread = thread->asIAfRecordThread().get();
3092 priorState = mState;
3093 if (!mSharedAudioPackageName.empty()) {
3094 recordThread->resetAudioHistory_l();
3095 }
3096 recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
3097 }
3098 // APM portid/client management done outside of lock.
3099 // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
3100 if (isExternalTrack()) {
3101 switch (priorState) {
3102 case ACTIVE: // invalidated while still active
3103 case STARTING_2: // invalidated/start-aborted after startInput successfully called
3104 case PAUSING: // invalidated while in the middle of stop() pausing (still active)
3105 AudioSystem::stopInput(mPortId);
3106 break;
3107
3108 case STARTING_1: // invalidated/start-aborted and startInput not successful
3109 case PAUSED: // OK, not active
3110 case IDLE: // OK, not active
3111 break;
3112
3113 case STOPPED: // unexpected (destroyed)
3114 default:
3115 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
3116 }
3117 AudioSystem::releaseInput(mPortId);
3118 }
3119 }
3120 }
3121
invalidate()3122 void RecordTrack::invalidate()
3123 {
3124 TrackBase::invalidate();
3125 // FIXME should use proxy, and needs work
3126 audio_track_cblk_t* cblk = mCblk;
3127 android_atomic_or(CBLK_INVALID, &cblk->mFlags);
3128 android_atomic_release_store(0x40000000, &cblk->mFutex);
3129 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
3130 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
3131 }
3132
3133
appendDumpHeader(String8 & result) const3134 void RecordTrack::appendDumpHeader(String8& result) const
3135 {
3136 const auto res = IAfRecordTrack::getLogHeader();
3137 result.append(res.data(), res.size());
3138 }
3139
appendDump(String8 & result,bool active) const3140 void RecordTrack::appendDump(String8& result, bool active) const
3141 {
3142 result.appendFormat("%c%5s %6d %7u/%7u %7u %7u %2s 0x%03X "
3143 "%08X %08X %6u %6X "
3144 "%08X %6zu %6zu %3c",
3145 isFastTrack() ? 'F' : ' ',
3146 active ? "yes" : "no",
3147 mId,
3148 mClient ? mClient->pid() : getpid(),
3149 mClient ? mClient->uid() : getuid(),
3150 mSessionId,
3151 mPortId,
3152 getTrackStateAsCodedString(),
3153 mCblk->mFlags,
3154
3155 mFormat,
3156 mChannelMask,
3157 mSampleRate,
3158 mAttr.source,
3159
3160 mCblk->mServer,
3161 mFrameCount,
3162 mServerProxy->framesReadySafe(),
3163 isSilenced() ? 's' : 'n'
3164 );
3165 if (isServerLatencySupported()) {
3166 double latencyMs;
3167 bool fromTrack;
3168 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
3169 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
3170 // or 'k' if estimated from kernel (usually for debugging).
3171 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
3172 } else {
3173 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
3174 }
3175 }
3176 result.append("\n");
3177 }
3178
3179 // This is invoked by SyncEvent callback.
handleSyncStartEvent(const sp<audioflinger::SyncEvent> & event)3180 void RecordTrack::handleSyncStartEvent(
3181 const sp<audioflinger::SyncEvent>& event)
3182 {
3183 size_t framesToDrop = 0;
3184 const sp<IAfThreadBase> threadBase = mThread.promote();
3185 if (threadBase != 0) {
3186 // TODO: use actual buffer filling status instead of 2 buffers when info is available
3187 // from audio HAL
3188 framesToDrop = threadBase->frameCount() * 2;
3189 }
3190
3191 mSynchronizedRecordState.onPlaybackFinished(event, framesToDrop);
3192 }
3193
clearSyncStartEvent()3194 void RecordTrack::clearSyncStartEvent()
3195 {
3196 mSynchronizedRecordState.clear();
3197 }
3198
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)3199 void RecordTrack::updateTrackFrameInfo(
3200 int64_t trackFramesReleased, int64_t sourceFramesRead,
3201 uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
3202 {
3203 // Make the kernel frametime available.
3204 const FrameTime ft{
3205 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
3206 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
3207 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
3208 mKernelFrameTime.store(ft);
3209 if (!audio_is_linear_pcm(mFormat)) {
3210 // Stream is direct, return provided timestamp with no conversion
3211 mServerProxy->setTimestamp(timestamp);
3212 return;
3213 }
3214
3215 ExtendedTimestamp local = timestamp;
3216
3217 // Convert HAL frames to server-side track frames at track sample rate.
3218 // We use trackFramesReleased and sourceFramesRead as an anchor point.
3219 for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
3220 if (local.mTimeNs[i] != 0) {
3221 const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
3222 const int64_t relativeTrackFrames = relativeServerFrames
3223 * mSampleRate / halSampleRate; // TODO: potential computation overflow
3224 local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
3225 }
3226 }
3227 mServerProxy->setTimestamp(local);
3228
3229 // Compute latency info.
3230 const bool useTrackTimestamp = true; // use track unless debugging.
3231 const double latencyMs = - (useTrackTimestamp
3232 ? local.getOutputServerLatencyMs(sampleRate())
3233 : timestamp.getOutputServerLatencyMs(halSampleRate));
3234
3235 mServerLatencyFromTrack.store(useTrackTimestamp);
3236 mServerLatencyMs.store(latencyMs);
3237 }
3238
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones) const3239 status_t RecordTrack::getActiveMicrophones(
3240 std::vector<media::MicrophoneInfoFw>* activeMicrophones) const
3241 {
3242 const sp<IAfThreadBase> thread = mThread.promote();
3243 if (thread != 0) {
3244 auto* const recordThread = thread->asIAfRecordThread().get();
3245 return recordThread->getActiveMicrophones(activeMicrophones);
3246 } else {
3247 return BAD_VALUE;
3248 }
3249 }
3250
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)3251 status_t RecordTrack::setPreferredMicrophoneDirection(
3252 audio_microphone_direction_t direction) {
3253 const sp<IAfThreadBase> thread = mThread.promote();
3254 if (thread != 0) {
3255 auto* const recordThread = thread->asIAfRecordThread().get();
3256 return recordThread->setPreferredMicrophoneDirection(direction);
3257 } else {
3258 return BAD_VALUE;
3259 }
3260 }
3261
setPreferredMicrophoneFieldDimension(float zoom)3262 status_t RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
3263 const sp<IAfThreadBase> thread = mThread.promote();
3264 if (thread != 0) {
3265 auto* const recordThread = thread->asIAfRecordThread().get();
3266 return recordThread->setPreferredMicrophoneFieldDimension(zoom);
3267 } else {
3268 return BAD_VALUE;
3269 }
3270 }
3271
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)3272 status_t RecordTrack::shareAudioHistory(
3273 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
3274
3275 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
3276 const pid_t callingPid = IPCThreadState::self()->getCallingPid();
3277 if (callingUid != mUid || callingPid != mCreatorPid) {
3278 return PERMISSION_DENIED;
3279 }
3280
3281 AttributionSourceState attributionSource{};
3282 attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
3283 attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
3284 attributionSource.token = sp<BBinder>::make();
3285 const sp<IAfThreadBase> thread = mThread.promote();
3286 if (audioserver_permissions()) {
3287 const auto res = thread->afThreadCallback()->getPermissionProvider().checkPermission(
3288 CAPTURE_AUDIO_HOTWORD,
3289 attributionSource.uid);
3290 if (!res.ok()) {
3291 return aidl_utils::statusTFromBinderStatus(res.error());
3292 }
3293 if (!res.value()) {
3294 return PERMISSION_DENIED;
3295 }
3296 } else {
3297 if (!captureHotwordAllowed(attributionSource)) {
3298 return PERMISSION_DENIED;
3299 }
3300 }
3301
3302 if (thread != 0) {
3303 auto* const recordThread = thread->asIAfRecordThread().get();
3304 status_t status = recordThread->shareAudioHistory(
3305 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
3306 if (status == NO_ERROR) {
3307 mSharedAudioPackageName = sharedAudioPackageName;
3308 }
3309 return status;
3310 } else {
3311 return BAD_VALUE;
3312 }
3313 }
3314
setParameters(const String8 & keyValuePairs)3315 status_t RecordTrack::setParameters(const String8& keyValuePairs) {
3316 const sp<IAfThreadBase> thread = mThread.promote();
3317 if (thread == nullptr) {
3318 ALOGE("%s(%d): thread is dead", __func__, mId);
3319 return FAILED_TRANSACTION;
3320 } else if (thread->type() == IAfThreadBase::DIRECT_RECORD) {
3321 return thread->setParameters(keyValuePairs);
3322 } else {
3323 return PERMISSION_DENIED;
3324 }
3325 }
3326
copyMetadataTo(MetadataInserter & backInserter) const3327 void RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
3328 {
3329
3330 // Do not forward PatchRecord metadata with unspecified audio source
3331 if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
3332 return;
3333 }
3334
3335 // No track is invalid as this is called after prepareTrack_l in the same critical section
3336 record_track_metadata_v7_t metadata;
3337 metadata.base = {
3338 .source = mAttr.source,
3339 .gain = 1, // capture tracks do not have volumes
3340 };
3341 metadata.channel_mask = mChannelMask;
3342 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
3343 metadata.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1] = '\0';
3344 *backInserter++ = metadata;
3345 }
3346
setSilenced(bool silenced)3347 void RecordTrack::setSilenced(bool silenced) {
3348 if (!isPatchTrack() && mSilenced != silenced) {
3349 mSilenced = silenced;
3350 ALOGD("%s: track with port id: %d, (%s)", __func__, mPortId,
3351 mSilenced ? "silenced" : "unsilenced");
3352 }
3353 }
3354
3355 // ----------------------------------------------------------------------------
3356 #undef LOG_TAG
3357 #define LOG_TAG "AF::PatchRecord"
3358
3359 /* static */
create(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3360 sp<IAfPatchRecord> IAfPatchRecord::create(
3361 IAfRecordThread* recordThread,
3362 uint32_t sampleRate,
3363 audio_channel_mask_t channelMask,
3364 audio_format_t format,
3365 size_t frameCount,
3366 void *buffer,
3367 size_t bufferSize,
3368 audio_input_flags_t flags,
3369 const Timeout& timeout,
3370 audio_source_t source)
3371 {
3372 return sp<PatchRecord>::make(
3373 recordThread,
3374 sampleRate,
3375 channelMask,
3376 format,
3377 frameCount,
3378 buffer,
3379 bufferSize,
3380 flags,
3381 timeout,
3382 source);
3383 }
3384
PatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3385 PatchRecord::PatchRecord(IAfRecordThread* recordThread,
3386 uint32_t sampleRate,
3387 audio_channel_mask_t channelMask,
3388 audio_format_t format,
3389 size_t frameCount,
3390 void *buffer,
3391 size_t bufferSize,
3392 audio_input_flags_t flags,
3393 const Timeout& timeout,
3394 audio_source_t source)
3395 : RecordTrack(recordThread, NULL,
3396 audio_attributes_t{ .source = source } ,
3397 sampleRate, format, channelMask, frameCount,
3398 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
3399 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
3400 PatchTrackBase(mCblk ? new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true)
3401 : nullptr,
3402 recordThread, timeout)
3403 {
3404 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
3405 __func__, mId, sampleRate,
3406 (int)mPeerTimeout.tv_sec,
3407 (int)(mPeerTimeout.tv_nsec / 1000000));
3408 }
3409
~PatchRecord()3410 PatchRecord::~PatchRecord()
3411 {
3412 ALOGV("%s(%d)", __func__, mId);
3413 }
3414
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3415 static size_t writeFramesHelper(
3416 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3417 {
3418 AudioBufferProvider::Buffer patchBuffer;
3419 patchBuffer.frameCount = frameCount;
3420 auto status = dest->getNextBuffer(&patchBuffer);
3421 if (status != NO_ERROR) {
3422 ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
3423 __func__, status, strerror(-status));
3424 return 0;
3425 }
3426 ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
3427 memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
3428 size_t framesWritten = patchBuffer.frameCount;
3429 dest->releaseBuffer(&patchBuffer);
3430 return framesWritten;
3431 }
3432
3433 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3434 size_t PatchRecord::writeFrames(
3435 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3436 {
3437 size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
3438 // On buffer wrap, the buffer frame count will be less than requested,
3439 // when this happens a second buffer needs to be used to write the leftover audio
3440 const size_t framesLeft = frameCount - framesWritten;
3441 if (framesWritten != 0 && framesLeft != 0) {
3442 framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
3443 framesLeft, frameSize);
3444 }
3445 return framesWritten;
3446 }
3447
3448 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3449 status_t PatchRecord::getNextBuffer(
3450 AudioBufferProvider::Buffer* buffer)
3451 {
3452 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3453 Proxy::Buffer buf;
3454 buf.mFrameCount = buffer->frameCount;
3455 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
3456 ALOGV_IF(status != NO_ERROR,
3457 "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
3458 buffer->frameCount = buf.mFrameCount;
3459 if (ATRACE_ENABLED()) {
3460 std::string traceName("PRnObt");
3461 traceName += std::to_string(id());
3462 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
3463 }
3464 if (buf.mFrameCount == 0) {
3465 return WOULD_BLOCK;
3466 }
3467 status = RecordTrack::getNextBuffer(buffer);
3468 return status;
3469 }
3470
releaseBuffer(AudioBufferProvider::Buffer * buffer)3471 void PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
3472 {
3473 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3474 Proxy::Buffer buf;
3475 buf.mFrameCount = buffer->frameCount;
3476 buf.mRaw = buffer->raw;
3477 mPeerProxy->releaseBuffer(&buf);
3478 TrackBase::releaseBuffer(buffer);
3479 }
3480
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3481 status_t PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
3482 const struct timespec *timeOut)
3483 {
3484 return mProxy->obtainBuffer(buffer, timeOut);
3485 }
3486
releaseBuffer(Proxy::Buffer * buffer)3487 void PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3488 {
3489 mProxy->releaseBuffer(buffer);
3490 }
3491
3492 #undef LOG_TAG
3493 #define LOG_TAG "AF::PthrPatchRecord"
3494
allocAligned(size_t alignment,size_t size)3495 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
3496 {
3497 void *ptr = nullptr;
3498 (void)posix_memalign(&ptr, alignment, size);
3499 return {ptr, free};
3500 }
3501
3502 /* static */
createPassThru(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3503 sp<IAfPatchRecord> IAfPatchRecord::createPassThru(
3504 IAfRecordThread* recordThread,
3505 uint32_t sampleRate,
3506 audio_channel_mask_t channelMask,
3507 audio_format_t format,
3508 size_t frameCount,
3509 audio_input_flags_t flags,
3510 audio_source_t source)
3511 {
3512 return sp<PassthruPatchRecord>::make(
3513 recordThread,
3514 sampleRate,
3515 channelMask,
3516 format,
3517 frameCount,
3518 flags,
3519 source);
3520 }
3521
PassthruPatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3522 PassthruPatchRecord::PassthruPatchRecord(
3523 IAfRecordThread* recordThread,
3524 uint32_t sampleRate,
3525 audio_channel_mask_t channelMask,
3526 audio_format_t format,
3527 size_t frameCount,
3528 audio_input_flags_t flags,
3529 audio_source_t source)
3530 : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
3531 nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
3532 mPatchRecordAudioBufferProvider(*this),
3533 mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
3534 mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
3535 {
3536 memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
3537 }
3538
obtainStream(sp<IAfThreadBase> * thread)3539 sp<StreamInHalInterface> PassthruPatchRecord::obtainStream(
3540 sp<IAfThreadBase>* thread)
3541 {
3542 *thread = mThread.promote();
3543 if (!*thread) return nullptr;
3544 auto* const recordThread = (*thread)->asIAfRecordThread().get();
3545 audio_utils::lock_guard _l(recordThread->mutex());
3546 return recordThread->getInput() ? recordThread->getInput()->stream : nullptr;
3547 }
3548
3549 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3550 status_t PassthruPatchRecord::obtainBuffer(
3551 Proxy::Buffer* buffer, const struct timespec* timeOut)
3552 {
3553 if (mUnconsumedFrames) {
3554 buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
3555 // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
3556 return PatchRecord::obtainBuffer(buffer, timeOut);
3557 }
3558
3559 // Otherwise, execute a read from HAL and write into the buffer.
3560 nsecs_t startTimeNs = 0;
3561 if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
3562 // Will need to correct timeOut by elapsed time.
3563 startTimeNs = systemTime();
3564 }
3565 const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
3566 buffer->mFrameCount = 0;
3567 buffer->mRaw = nullptr;
3568 sp<IAfThreadBase> thread;
3569 sp<StreamInHalInterface> stream = obtainStream(&thread);
3570 if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
3571
3572 status_t result = NO_ERROR;
3573 size_t bytesRead = 0;
3574 {
3575 ATRACE_NAME("read");
3576 result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
3577 if (result != NO_ERROR) goto stream_error;
3578 if (bytesRead == 0) return NO_ERROR;
3579 }
3580
3581 {
3582 audio_utils::lock_guard lock(readMutex());
3583 mReadBytes += bytesRead;
3584 mReadError = NO_ERROR;
3585 }
3586 mReadCV.notify_one();
3587 // writeFrames handles wraparound and should write all the provided frames.
3588 // If it couldn't, there is something wrong with the client/server buffer of the software patch.
3589 buffer->mFrameCount = writeFrames(
3590 &mPatchRecordAudioBufferProvider,
3591 mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
3592 ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
3593 "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
3594 mUnconsumedFrames = buffer->mFrameCount;
3595 struct timespec newTimeOut;
3596 if (startTimeNs) {
3597 // Correct the timeout by elapsed time.
3598 nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
3599 if (newTimeOutNs < 0) newTimeOutNs = 0;
3600 newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
3601 newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
3602 timeOut = &newTimeOut;
3603 }
3604 return PatchRecord::obtainBuffer(buffer, timeOut);
3605
3606 stream_error:
3607 stream->standby();
3608 {
3609 audio_utils::lock_guard lock(readMutex());
3610 mReadError = result;
3611 }
3612 mReadCV.notify_one();
3613 return result;
3614 }
3615
releaseBuffer(Proxy::Buffer * buffer)3616 void PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3617 {
3618 if (buffer->mFrameCount <= mUnconsumedFrames) {
3619 mUnconsumedFrames -= buffer->mFrameCount;
3620 } else {
3621 ALOGW("Write side has consumed more frames than we had: %zu > %zu",
3622 buffer->mFrameCount, mUnconsumedFrames);
3623 mUnconsumedFrames = 0;
3624 }
3625 PatchRecord::releaseBuffer(buffer);
3626 }
3627
3628 // AudioBufferProvider and Source methods are called on RecordThread
3629 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
3630 // and 'releaseBuffer' are stubbed out and ignore their input.
3631 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
3632 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)3633 status_t PassthruPatchRecord::read(
3634 void* buffer, size_t bytes, size_t* read)
3635 {
3636 bytes = std::min(bytes, mFrameCount * mFrameSize);
3637 {
3638 audio_utils::unique_lock lock(readMutex());
3639 mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
3640 if (mReadError != NO_ERROR) {
3641 mLastReadFrames = 0;
3642 return mReadError;
3643 }
3644 *read = std::min(bytes, mReadBytes);
3645 mReadBytes -= *read;
3646 }
3647 mLastReadFrames = *read / mFrameSize;
3648 memset(buffer, 0, *read);
3649 return 0;
3650 }
3651
getCapturePosition(int64_t * frames,int64_t * time)3652 status_t PassthruPatchRecord::getCapturePosition(
3653 int64_t* frames, int64_t* time)
3654 {
3655 sp<IAfThreadBase> thread;
3656 sp<StreamInHalInterface> stream = obtainStream(&thread);
3657 return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
3658 }
3659
standby()3660 status_t PassthruPatchRecord::standby()
3661 {
3662 // RecordThread issues 'standby' command in two major cases:
3663 // 1. Error on read--this case is handled in 'obtainBuffer'.
3664 // 2. Track is stopping--as PassthruPatchRecord assumes continuous
3665 // output, this can only happen when the software patch
3666 // is being torn down. In this case, the RecordThread
3667 // will terminate and close the HAL stream.
3668 return 0;
3669 }
3670
3671 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)3672 status_t PassthruPatchRecord::getNextBuffer(
3673 AudioBufferProvider::Buffer* buffer)
3674 {
3675 buffer->frameCount = mLastReadFrames;
3676 buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
3677 return NO_ERROR;
3678 }
3679
releaseBuffer(AudioBufferProvider::Buffer * buffer)3680 void PassthruPatchRecord::releaseBuffer(
3681 AudioBufferProvider::Buffer* buffer)
3682 {
3683 buffer->frameCount = 0;
3684 buffer->raw = nullptr;
3685 }
3686
3687 // ----------------------------------------------------------------------------
3688 // AfPlaybackCommon
3689
getOpControlEnforcementLevel(audio_usage_t usage,IAfThreadCallback & cb)3690 static AfPlaybackCommon::EnforcementLevel getOpControlEnforcementLevel(audio_usage_t usage,
3691 IAfThreadCallback& cb) {
3692 using enum AfPlaybackCommon::EnforcementLevel;
3693 if (cb.isHardeningOverrideEnabled()) {
3694 return FULL;
3695 }
3696 if (usage == AUDIO_USAGE_VIRTUAL_SOURCE || media::permission::isSystemUsage(usage)) {
3697 return NONE;
3698 }
3699 if (hardening_strict()) {
3700 return FULL;
3701 } else if (hardening_partial()) {
3702 return PARTIAL;
3703 } else {
3704 return NONE;
3705 }
3706 }
3707
AfPlaybackCommon(IAfTrackBase & self,IAfThreadBase & thread,float volume,bool muted,const audio_attributes_t & attr,const AttributionSourceState & attributionSource,bool isOffloadOrMmap,bool shouldPlaybackHarden)3708 AfPlaybackCommon::AfPlaybackCommon(IAfTrackBase& self, IAfThreadBase& thread, float volume,
3709 bool muted, const audio_attributes_t& attr,
3710 const AttributionSourceState& attributionSource,
3711 bool isOffloadOrMmap,
3712 bool shouldPlaybackHarden)
3713 : mSelf(self),
3714 mMutedFromPort(muted),
3715 mVolume(volume),
3716 mEnforcementLevel(getOpControlEnforcementLevel(attr.usage, *thread.afThreadCallback())) {
3717 ALOGI("creating track with enforcement level %d", mEnforcementLevel);
3718 using AppOpsManager::OP_CONTROL_AUDIO_PARTIAL;
3719 using AppOpsManager::OP_CONTROL_AUDIO;
3720 using media::permission::Ops;
3721 using media::permission::skipOpsForUid;
3722 using media::permission::ValidatedAttributionSourceState;
3723
3724 if (hardening_impl()) {
3725 // Don't bother for trusted uids
3726 if (!skipOpsForUid(attributionSource.uid) && shouldPlaybackHarden) {
3727 if (isOffloadOrMmap) {
3728 mExecutor.emplace();
3729 }
3730 auto thread_wp = wp<IAfThreadBase>::fromExisting(&thread);
3731 mOpControlPartialSession.emplace(
3732 ValidatedAttributionSourceState::createFromTrustedSource(attributionSource),
3733 Ops{.attributedOp = OP_CONTROL_AUDIO_PARTIAL},
3734 [this, isOffloadOrMmap, thread_wp](bool isPermitted) {
3735 mHasOpControlPartial.store(isPermitted, std::memory_order_release);
3736 if (isOffloadOrMmap) {
3737 mExecutor->enqueue(mediautils::Runnable{[thread_wp]() {
3738 auto thread = thread_wp.promote();
3739 if (thread != nullptr) {
3740 audio_utils::lock_guard l {thread->mutex()};
3741 thread->broadcast_l();
3742 }
3743 }});
3744 }
3745 }
3746 );
3747 // Same as previous but for mHasOpControlFull, OP_CONTROL_AUDIO
3748 mOpControlFullSession.emplace(
3749 ValidatedAttributionSourceState::createFromTrustedSource(attributionSource),
3750 Ops{.attributedOp = OP_CONTROL_AUDIO},
3751 [this, isOffloadOrMmap, thread_wp](bool isPermitted) {
3752 mHasOpControlFull.store(isPermitted, std::memory_order_release);
3753 if (isOffloadOrMmap) {
3754 mExecutor->enqueue(mediautils::Runnable{[thread_wp]() {
3755 auto thread = thread_wp.promote();
3756 if (thread != nullptr) {
3757 audio_utils::lock_guard l {thread->mutex()};
3758 thread->broadcast_l();
3759 }
3760 }});
3761 }
3762 }
3763 );
3764 }
3765 }
3766 }
3767
maybeLogPlaybackHardening(media::IAudioManagerNative & am) const3768 void AfPlaybackCommon::maybeLogPlaybackHardening(media::IAudioManagerNative& am) const {
3769 using media::IAudioManagerNative::HardeningType::PARTIAL;
3770 using media::IAudioManagerNative::HardeningType::FULL;
3771 // The op state deviates from if the track is actually muted if the playback was exempted for
3772 // some compat reason.
3773 // The state could have technically TOCTOU, but this is for metrics and that is very unlikely
3774 if (!hasOpControlPartial()) {
3775 if (!mPlaybackHardeningLogged.exchange(true, std::memory_order_acq_rel)) {
3776 am.playbackHardeningEvent(mSelf.uid(), PARTIAL,
3777 /* bypassed= */
3778 !isPlaybackRestrictedControl());
3779 }
3780 } else if (!hasOpControlFull()) {
3781 if (!mPlaybackHardeningLogged.exchange(true, std::memory_order_acq_rel)) {
3782 am.playbackHardeningEvent(mSelf.uid(), FULL,
3783 /* bypassed= */
3784 !isPlaybackRestrictedControl());
3785 }
3786 }
3787 }
3788
processMuteEvent(media::IAudioManagerNative & am,mute_state_t muteState)3789 void AfPlaybackCommon::processMuteEvent(media::IAudioManagerNative& am, mute_state_t muteState) {
3790 const auto trackId = mSelf.id();
3791 const auto portId = mSelf.portId();
3792 if (mMuteState == muteState) {
3793 // mute state did not change, do nothing
3794 return;
3795 }
3796
3797 const auto result = portId != AUDIO_PORT_HANDLE_NONE
3798 ? am.portMuteEvent(portId, static_cast<int>(muteState))
3799 : Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
3800 if (result.isOk()) {
3801 ALOGI("%s(%d): processed mute state for port ID %d from %#x to %#x", __func__, trackId,
3802 portId, static_cast<int>(mMuteState.load()), static_cast<int>(muteState));
3803 mMuteState = muteState;
3804 } else {
3805 ALOGW("%s(%d): cannot process mute state for port ID %d, status error %s", __func__,
3806 trackId, portId, result.toString8().c_str());
3807 }
3808 }
3809
startPlaybackDelivery()3810 void AfPlaybackCommon::startPlaybackDelivery() {
3811 if (mOpControlPartialSession) {
3812 mHasOpControlPartial.store(mOpControlPartialSession->beginDeliveryRequest(),
3813 std::memory_order_release);
3814 }
3815 if (mOpControlFullSession) {
3816 mHasOpControlFull.store(mOpControlFullSession->beginDeliveryRequest(),
3817 std::memory_order_release);
3818 }
3819 }
3820
endPlaybackDelivery()3821 void AfPlaybackCommon::endPlaybackDelivery() {
3822 if (mOpControlPartialSession) {
3823 mOpControlPartialSession->endDeliveryRequest();
3824 }
3825 if (mOpControlFullSession) {
3826 mOpControlFullSession->endDeliveryRequest();
3827 }
3828 }
3829
3830 // ----------------------------------------------------------------------------
3831 #undef LOG_TAG
3832 #define LOG_TAG "AF::MmapTrack"
3833
3834 /* static */
create(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const android::content::AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId,float volume,bool muted)3835 sp<IAfMmapTrack> IAfMmapTrack::create(IAfThreadBase* thread,
3836 const audio_attributes_t& attr,
3837 uint32_t sampleRate,
3838 audio_format_t format,
3839 audio_channel_mask_t channelMask,
3840 audio_session_t sessionId,
3841 bool isOut,
3842 const android::content::AttributionSourceState& attributionSource,
3843 pid_t creatorPid,
3844 audio_port_handle_t portId,
3845 float volume,
3846 bool muted)
3847 {
3848 return sp<MmapTrack>::make(
3849 thread,
3850 attr,
3851 sampleRate,
3852 format,
3853 channelMask,
3854 sessionId,
3855 isOut,
3856 attributionSource,
3857 creatorPid,
3858 portId,
3859 volume,
3860 muted);
3861 }
3862
MmapTrack(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId,float volume,bool muted)3863 MmapTrack::MmapTrack(IAfThreadBase* thread,
3864 const audio_attributes_t& attr,
3865 uint32_t sampleRate,
3866 audio_format_t format,
3867 audio_channel_mask_t channelMask,
3868 audio_session_t sessionId,
3869 bool isOut,
3870 const AttributionSourceState& attributionSource,
3871 pid_t creatorPid,
3872 audio_port_handle_t portId,
3873 float volume,
3874 bool muted)
3875 : AfPlaybackCommon(*this, *thread,
3876 volume, muted, attr, attributionSource, /* isOffloadOrMmap */ true),
3877 TrackBase(thread, NULL, attr, sampleRate, format,
3878 channelMask, (size_t)0 /* frameCount */,
3879 nullptr /* buffer */, (size_t)0 /* bufferSize */,
3880 sessionId, creatorPid,
3881 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
3882 isOut,
3883 ALLOC_NONE,
3884 TYPE_DEFAULT, portId,
3885 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
3886 mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
3887 mUid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid))),
3888 mSilenced(false), mSilencedNotified(false)
3889 {
3890 // Once this item is logged by the server, the client can add properties.
3891 mTrackMetrics.logConstructor(creatorPid, uid(), id());
3892 if (isOut && (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
3893 || attr.usage == AUDIO_USAGE_VIRTUAL_SOURCE)) {
3894 // Audio patch and call assistant volume are always max
3895 setPortVolume(1.0f);
3896 setPortMute(false);
3897 }
3898 }
3899
~MmapTrack()3900 MmapTrack::~MmapTrack()
3901 {
3902 }
3903
initCheck() const3904 status_t MmapTrack::initCheck() const
3905 {
3906 return NO_ERROR;
3907 }
3908
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)3909 status_t MmapTrack::start(AudioSystem::sync_event_t event __unused,
3910 audio_session_t triggerSession __unused)
3911 {
3912 startPlaybackDelivery();
3913 if (ATRACE_ENABLED()) [[unlikely]] {
3914 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3915 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
3916 .toTrace().c_str());
3917 }
3918 return NO_ERROR;
3919 }
3920
stop()3921 void MmapTrack::stop()
3922 {
3923 endPlaybackDelivery();
3924 if (ATRACE_ENABLED()) [[unlikely]] {
3925 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3926 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
3927 .toTrace().c_str());
3928 }
3929 }
3930
3931 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3932 status_t MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3933 {
3934 buffer->frameCount = 0;
3935 buffer->raw = nullptr;
3936 return INVALID_OPERATION;
3937 }
3938
3939 // ExtendedAudioBufferProvider interface
framesReady() const3940 size_t MmapTrack::framesReady() const {
3941 return 0;
3942 }
3943
framesReleased() const3944 int64_t MmapTrack::framesReleased() const
3945 {
3946 return 0;
3947 }
3948
onTimestamp(const ExtendedTimestamp & timestamp __unused)3949 void MmapTrack::onTimestamp(const ExtendedTimestamp& timestamp __unused)
3950 {
3951 }
3952
appendDumpHeader(String8 & result) const3953 void MmapTrack::appendDumpHeader(String8& result) const
3954 {
3955 const auto res = IAfMmapTrack::getLogHeader();
3956 result.append(res.data(), res.size());
3957 }
3958
appendDump(String8 & result,bool active __unused) const3959 void MmapTrack::appendDump(String8& result, bool active __unused) const
3960 {
3961 result.appendFormat("%7u/%7u %7u %7u %08X %08X %6u 0x%03X ",
3962 mPid,
3963 mUid,
3964 mSessionId,
3965 mPortId,
3966 mFormat,
3967 mChannelMask,
3968 mSampleRate,
3969 mAttr.flags);
3970 if (isOut()) {
3971 result.appendFormat("%4x %2x", mAttr.usage, mAttr.content_type);
3972 result.appendFormat("%11.2g", 20.0 * log10(getPortVolume()));
3973 result.appendFormat("%12s", getPortMute() ? "true" : "false");
3974 } else {
3975 result.appendFormat("%7x", mAttr.source);
3976 }
3977 result.append("\n");
3978 }
3979
3980 } // namespace android
3981