1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "AudioFlinger"
20 // #define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include "Configuration.h"
24 #include <math.h>
25 #include <fcntl.h>
26 #include <memory>
27 #include <sstream>
28 #include <string>
29 #include <linux/futex.h>
30 #include <sys/stat.h>
31 #include <sys/syscall.h>
32 #include <cutils/bitops.h>
33 #include <cutils/properties.h>
34 #include <binder/PersistableBundle.h>
35 #include <media/AudioContainers.h>
36 #include <media/AudioDeviceTypeAddr.h>
37 #include <media/AudioParameter.h>
38 #include <media/AudioResamplerPublic.h>
39 #include <media/RecordBufferConverter.h>
40 #include <media/TypeConverter.h>
41 #include <utils/Log.h>
42 #include <utils/Trace.h>
43
44 #include <private/media/AudioTrackShared.h>
45 #include <private/android_filesystem_config.h>
46 #include <audio_utils/Balance.h>
47 #include <audio_utils/MelProcessor.h>
48 #include <audio_utils/Metadata.h>
49 #include <audio_utils/channels.h>
50 #include <audio_utils/mono_blend.h>
51 #include <audio_utils/primitives.h>
52 #include <audio_utils/format.h>
53 #include <audio_utils/minifloat.h>
54 #include <audio_utils/safe_math.h>
55 #include <system/audio_effects/effect_aec.h>
56 #include <system/audio_effects/effect_downmix.h>
57 #include <system/audio_effects/effect_ns.h>
58 #include <system/audio_effects/effect_spatializer.h>
59 #include <system/audio.h>
60
61 // NBAIO implementations
62 #include <media/nbaio/AudioStreamInSource.h>
63 #include <media/nbaio/AudioStreamOutSink.h>
64 #include <media/nbaio/MonoPipe.h>
65 #include <media/nbaio/MonoPipeReader.h>
66 #include <media/nbaio/Pipe.h>
67 #include <media/nbaio/PipeReader.h>
68 #include <media/nbaio/SourceAudioBufferProvider.h>
69 #include <mediautils/BatteryNotifier.h>
70 #include <mediautils/Process.h>
71
72 #include <audiomanager/AudioManager.h>
73 #include <powermanager/PowerManager.h>
74
75 #include <media/audiohal/EffectsFactoryHalInterface.h>
76 #include <media/audiohal/StreamHalInterface.h>
77
78 #include "AudioFlinger.h"
79 #include "FastMixer.h"
80 #include "FastCapture.h"
81 #include <mediautils/SchedulingPolicyService.h>
82 #include <mediautils/ServiceUtilities.h>
83
84 #ifdef ADD_BATTERY_DATA
85 #include <media/IMediaPlayerService.h>
86 #include <media/IMediaDeathNotifier.h>
87 #endif
88
89 #ifdef DEBUG_CPU_USAGE
90 #include <audio_utils/Statistics.h>
91 #include <cpustats/ThreadCpuUsage.h>
92 #endif
93
94 #include "AutoPark.h"
95
96 #include <pthread.h>
97 #include "TypedLogger.h"
98
99 // ----------------------------------------------------------------------------
100
101 // Note: the following macro is used for extremely verbose logging message. In
102 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
103 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
104 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
105 // turned on. Do not uncomment the #def below unless you really know what you
106 // are doing and want to see all of the extremely verbose messages.
107 //#define VERY_VERY_VERBOSE_LOGGING
108 #ifdef VERY_VERY_VERBOSE_LOGGING
109 #define ALOGVV ALOGV
110 #else
111 #define ALOGVV(a...) do { } while(0)
112 #endif
113
114 // TODO: Move these macro/inlines to a header file.
115 #define max(a, b) ((a) > (b) ? (a) : (b))
116
117 template <typename T>
min(const T & a,const T & b)118 static inline T min(const T& a, const T& b)
119 {
120 return a < b ? a : b;
121 }
122
123 namespace android {
124
125 using media::IEffectClient;
126 using content::AttributionSourceState;
127
128 // retry counts for buffer fill timeout
129 // 50 * ~20msecs = 1 second
130 static const int8_t kMaxTrackRetries = 50;
131 static const int8_t kMaxTrackStartupRetries = 50;
132
133 // allow less retry attempts on direct output thread.
134 // direct outputs can be a scarce resource in audio hardware and should
135 // be released as quickly as possible.
136 // Notes:
137 // 1) The retry duration kMaxTrackRetriesDirectMs may be increased
138 // in case the data write is bursty for the AudioTrack. The application
139 // should endeavor to write at least once every kMaxTrackRetriesDirectMs
140 // to prevent an underrun situation. If the data is bursty, then
141 // the application can also throttle the data sent to be even.
142 // 2) For compressed audio data, any data present in the AudioTrack buffer
143 // will be sent and reset the retry count. This delivers data as
144 // it arrives, with approximately kDirectMinSleepTimeUs = 10ms checking interval.
145 // 3) For linear PCM or proportional PCM, we wait one period for a period's worth
146 // of data to be available, then any remaining data is delivered.
147 // This is required to ensure the last bit of data is delivered before underrun.
148 //
149 // Sleep time per cycle is kDirectMinSleepTimeUs for compressed tracks
150 // or the size of the HAL period for proportional / linear PCM tracks.
151 static const int32_t kMaxTrackRetriesDirectMs = 200;
152
153 // don't warn about blocked writes or record buffer overflows more often than this
154 static const nsecs_t kWarningThrottleNs = seconds(5);
155
156 // RecordThread loop sleep time upon application overrun or audio HAL read error
157 static const int kRecordThreadSleepUs = 5000;
158
159 // maximum time to wait in sendConfigEvent_l() for a status to be received
160 static const nsecs_t kConfigEventTimeoutNs = seconds(2);
161
162 // minimum sleep time for the mixer thread loop when tracks are active but in underrun
163 static const uint32_t kMinThreadSleepTimeUs = 5000;
164 // maximum divider applied to the active sleep time in the mixer thread loop
165 static const uint32_t kMaxThreadSleepTimeShift = 2;
166
167 // minimum normal sink buffer size, expressed in milliseconds rather than frames
168 // FIXME This should be based on experimentally observed scheduling jitter
169 static const uint32_t kMinNormalSinkBufferSizeMs = 20;
170 // maximum normal sink buffer size
171 static const uint32_t kMaxNormalSinkBufferSizeMs = 24;
172
173 // minimum capture buffer size in milliseconds to _not_ need a fast capture thread
174 // FIXME This should be based on experimentally observed scheduling jitter
175 static const uint32_t kMinNormalCaptureBufferSizeMs = 12;
176
177 // Offloaded output thread standby delay: allows track transition without going to standby
178 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
179
180 // Direct output thread minimum sleep time in idle or active(underrun) state
181 static const nsecs_t kDirectMinSleepTimeUs = 10000;
182
183 // Minimum amount of time between checking to see if the timestamp is advancing
184 // for underrun detection. If we check too frequently, we may not detect a
185 // timestamp update and will falsely detect underrun.
186 static const nsecs_t kMinimumTimeBetweenTimestampChecksNs = 150 /* ms */ * 1000;
187
188 // The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
189 // balance between power consumption and latency, and allows threads to be scheduled reliably
190 // by the CFS scheduler.
191 // FIXME Express other hardcoded references to 20ms with references to this constant and move
192 // it appropriately.
193 #define FMS_20 20
194
195 // Whether to use fast mixer
196 static const enum {
197 FastMixer_Never, // never initialize or use: for debugging only
198 FastMixer_Always, // always initialize and use, even if not needed: for debugging only
199 // normal mixer multiplier is 1
200 FastMixer_Static, // initialize if needed, then use all the time if initialized,
201 // multiplier is calculated based on min & max normal mixer buffer size
202 FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
203 // multiplier is calculated based on min & max normal mixer buffer size
204 // FIXME for FastMixer_Dynamic:
205 // Supporting this option will require fixing HALs that can't handle large writes.
206 // For example, one HAL implementation returns an error from a large write,
207 // and another HAL implementation corrupts memory, possibly in the sample rate converter.
208 // We could either fix the HAL implementations, or provide a wrapper that breaks
209 // up large writes into smaller ones, and the wrapper would need to deal with scheduler.
210 } kUseFastMixer = FastMixer_Static;
211
212 // Whether to use fast capture
213 static const enum {
214 FastCapture_Never, // never initialize or use: for debugging only
215 FastCapture_Always, // always initialize and use, even if not needed: for debugging only
216 FastCapture_Static, // initialize if needed, then use all the time if initialized
217 } kUseFastCapture = FastCapture_Static;
218
219 // Priorities for requestPriority
220 static const int kPriorityAudioApp = 2;
221 static const int kPriorityFastMixer = 3;
222 static const int kPriorityFastCapture = 3;
223
224 // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
225 // track buffer in shared memory. Zero on input means to use a default value. For fast tracks,
226 // AudioFlinger derives the default from HAL buffer size and 'fast track multiplier'.
227
228 // This is the default value, if not specified by property.
229 static const int kFastTrackMultiplier = 2;
230
231 // The minimum and maximum allowed values
232 static const int kFastTrackMultiplierMin = 1;
233 static const int kFastTrackMultiplierMax = 2;
234
235 // The actual value to use, which can be specified per-device via property af.fast_track_multiplier.
236 static int sFastTrackMultiplier = kFastTrackMultiplier;
237
238 // See Thread::readOnlyHeap().
239 // Initially this heap is used to allocate client buffers for "fast" AudioRecord.
240 // Eventually it will be the single buffer that FastCapture writes into via HAL read(),
241 // and that all "fast" AudioRecord clients read from. In either case, the size can be small.
242 static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
243
244 // ----------------------------------------------------------------------------
245
246 // TODO: move all toString helpers to audio.h
247 // under #ifdef __cplusplus #endif
patchSinksToString(const struct audio_patch * patch)248 static std::string patchSinksToString(const struct audio_patch *patch)
249 {
250 std::stringstream ss;
251 for (size_t i = 0; i < patch->num_sinks; ++i) {
252 if (i > 0) {
253 ss << "|";
254 }
255 ss << "(" << toString(patch->sinks[i].ext.device.type)
256 << ", " << patch->sinks[i].ext.device.address << ")";
257 }
258 return ss.str();
259 }
260
patchSourcesToString(const struct audio_patch * patch)261 static std::string patchSourcesToString(const struct audio_patch *patch)
262 {
263 std::stringstream ss;
264 for (size_t i = 0; i < patch->num_sources; ++i) {
265 if (i > 0) {
266 ss << "|";
267 }
268 ss << "(" << toString(patch->sources[i].ext.device.type)
269 << ", " << patch->sources[i].ext.device.address << ")";
270 }
271 return ss.str();
272 }
273
toString(audio_latency_mode_t mode)274 static std::string toString(audio_latency_mode_t mode) {
275 // We convert to the AIDL type to print (eventually the legacy type will be removed).
276 const auto result = legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode);
277 return result.has_value() ? media::audio::common::toString(*result) : "UNKNOWN";
278 }
279
280 // Could be made a template, but other toString overloads for std::vector are confused.
toString(const std::vector<audio_latency_mode_t> & elements)281 static std::string toString(const std::vector<audio_latency_mode_t>& elements) {
282 std::string s("{ ");
283 for (const auto& e : elements) {
284 s.append(toString(e));
285 s.append(" ");
286 }
287 s.append("}");
288 return s;
289 }
290
291 static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
292
sFastTrackMultiplierInit()293 static void sFastTrackMultiplierInit()
294 {
295 char value[PROPERTY_VALUE_MAX];
296 if (property_get("af.fast_track_multiplier", value, NULL) > 0) {
297 char *endptr;
298 unsigned long ul = strtoul(value, &endptr, 0);
299 if (*endptr == '\0' && kFastTrackMultiplierMin <= ul && ul <= kFastTrackMultiplierMax) {
300 sFastTrackMultiplier = (int) ul;
301 }
302 }
303 }
304
305 // ----------------------------------------------------------------------------
306
307 #ifdef ADD_BATTERY_DATA
308 // To collect the amplifier usage
addBatteryData(uint32_t params)309 static void addBatteryData(uint32_t params) {
310 sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
311 if (service == NULL) {
312 // it already logged
313 return;
314 }
315
316 service->addBatteryData(params);
317 }
318 #endif
319
320 // Track the CLOCK_BOOTTIME versus CLOCK_MONOTONIC timebase offset
321 struct {
322 // call when you acquire a partial wakelock
acquireandroid::__anon8f5b02910308323 void acquire(const sp<IBinder> &wakeLockToken) {
324 pthread_mutex_lock(&mLock);
325 if (wakeLockToken.get() == nullptr) {
326 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
327 } else {
328 if (mCount == 0) {
329 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
330 }
331 ++mCount;
332 }
333 pthread_mutex_unlock(&mLock);
334 }
335
336 // call when you release a partial wakelock.
releaseandroid::__anon8f5b02910308337 void release(const sp<IBinder> &wakeLockToken) {
338 if (wakeLockToken.get() == nullptr) {
339 return;
340 }
341 pthread_mutex_lock(&mLock);
342 if (--mCount < 0) {
343 ALOGE("negative wakelock count");
344 mCount = 0;
345 }
346 pthread_mutex_unlock(&mLock);
347 }
348
349 // retrieves the boottime timebase offset from monotonic.
getBoottimeOffsetandroid::__anon8f5b02910308350 int64_t getBoottimeOffset() {
351 pthread_mutex_lock(&mLock);
352 int64_t boottimeOffset = mBoottimeOffset;
353 pthread_mutex_unlock(&mLock);
354 return boottimeOffset;
355 }
356
357 // Adjusts the timebase offset between TIMEBASE_MONOTONIC
358 // and the selected timebase.
359 // Currently only TIMEBASE_BOOTTIME is allowed.
360 //
361 // This only needs to be called upon acquiring the first partial wakelock
362 // after all other partial wakelocks are released.
363 //
364 // We do an empirical measurement of the offset rather than parsing
365 // /proc/timer_list since the latter is not a formal kernel ABI.
adjustTimebaseOffsetandroid::__anon8f5b02910308366 static void adjustTimebaseOffset(int64_t *offset, ExtendedTimestamp::Timebase timebase) {
367 int clockbase;
368 switch (timebase) {
369 case ExtendedTimestamp::TIMEBASE_BOOTTIME:
370 clockbase = SYSTEM_TIME_BOOTTIME;
371 break;
372 default:
373 LOG_ALWAYS_FATAL("invalid timebase %d", timebase);
374 break;
375 }
376 // try three times to get the clock offset, choose the one
377 // with the minimum gap in measurements.
378 const int tries = 3;
379 nsecs_t bestGap = 0, measured = 0; // not required, initialized for clang-tidy
380 for (int i = 0; i < tries; ++i) {
381 const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
382 const nsecs_t tbase = systemTime(clockbase);
383 const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
384 const nsecs_t gap = tmono2 - tmono;
385 if (i == 0 || gap < bestGap) {
386 bestGap = gap;
387 measured = tbase - ((tmono + tmono2) >> 1);
388 }
389 }
390
391 // to avoid micro-adjusting, we don't change the timebase
392 // unless it is significantly different.
393 //
394 // Assumption: It probably takes more than toleranceNs to
395 // suspend and resume the device.
396 static int64_t toleranceNs = 10000; // 10 us
397 if (llabs(*offset - measured) > toleranceNs) {
398 ALOGV("Adjusting timebase offset old: %lld new: %lld",
399 (long long)*offset, (long long)measured);
400 *offset = measured;
401 }
402 }
403
404 pthread_mutex_t mLock;
405 int32_t mCount;
406 int64_t mBoottimeOffset;
407 } gBoottime = { PTHREAD_MUTEX_INITIALIZER, 0, 0 }; // static, so use POD initialization
408
409 // ----------------------------------------------------------------------------
410 // CPU Stats
411 // ----------------------------------------------------------------------------
412
413 class CpuStats {
414 public:
415 CpuStats();
416 void sample(const String8 &title);
417 #ifdef DEBUG_CPU_USAGE
418 private:
419 ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
420 audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
421
422 audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
423
424 int mCpuNum; // thread's current CPU number
425 int mCpukHz; // frequency of thread's current CPU in kHz
426 #endif
427 };
428
CpuStats()429 CpuStats::CpuStats()
430 #ifdef DEBUG_CPU_USAGE
431 : mCpuNum(-1), mCpukHz(-1)
432 #endif
433 {
434 }
435
sample(const String8 & title __unused)436 void CpuStats::sample(const String8 &title
437 #ifndef DEBUG_CPU_USAGE
438 __unused
439 #endif
440 ) {
441 #ifdef DEBUG_CPU_USAGE
442 // get current thread's delta CPU time in wall clock ns
443 double wcNs;
444 bool valid = mCpuUsage.sampleAndEnable(wcNs);
445
446 // record sample for wall clock statistics
447 if (valid) {
448 mWcStats.add(wcNs);
449 }
450
451 // get the current CPU number
452 int cpuNum = sched_getcpu();
453
454 // get the current CPU frequency in kHz
455 int cpukHz = mCpuUsage.getCpukHz(cpuNum);
456
457 // check if either CPU number or frequency changed
458 if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
459 mCpuNum = cpuNum;
460 mCpukHz = cpukHz;
461 // ignore sample for purposes of cycles
462 valid = false;
463 }
464
465 // if no change in CPU number or frequency, then record sample for cycle statistics
466 if (valid && mCpukHz > 0) {
467 const double cycles = wcNs * cpukHz * 0.000001;
468 mHzStats.add(cycles);
469 }
470
471 const unsigned n = mWcStats.getN();
472 // mCpuUsage.elapsed() is expensive, so don't call it every loop
473 if ((n & 127) == 1) {
474 const long long elapsed = mCpuUsage.elapsed();
475 if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
476 const double perLoop = elapsed / (double) n;
477 const double perLoop100 = perLoop * 0.01;
478 const double perLoop1k = perLoop * 0.001;
479 const double mean = mWcStats.getMean();
480 const double stddev = mWcStats.getStdDev();
481 const double minimum = mWcStats.getMin();
482 const double maximum = mWcStats.getMax();
483 const double meanCycles = mHzStats.getMean();
484 const double stddevCycles = mHzStats.getStdDev();
485 const double minCycles = mHzStats.getMin();
486 const double maxCycles = mHzStats.getMax();
487 mCpuUsage.resetElapsed();
488 mWcStats.reset();
489 mHzStats.reset();
490 ALOGD("CPU usage for %s over past %.1f secs\n"
491 " (%u mixer loops at %.1f mean ms per loop):\n"
492 " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
493 " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
494 " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
495 title.string(),
496 elapsed * .000000001, n, perLoop * .000001,
497 mean * .001,
498 stddev * .001,
499 minimum * .001,
500 maximum * .001,
501 mean / perLoop100,
502 stddev / perLoop100,
503 minimum / perLoop100,
504 maximum / perLoop100,
505 meanCycles / perLoop1k,
506 stddevCycles / perLoop1k,
507 minCycles / perLoop1k,
508 maxCycles / perLoop1k);
509
510 }
511 }
512 #endif
513 };
514
515 // ----------------------------------------------------------------------------
516 // ThreadBase
517 // ----------------------------------------------------------------------------
518
519 // static
threadTypeToString(AudioFlinger::ThreadBase::type_t type)520 const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type)
521 {
522 switch (type) {
523 case MIXER:
524 return "MIXER";
525 case DIRECT:
526 return "DIRECT";
527 case DUPLICATING:
528 return "DUPLICATING";
529 case RECORD:
530 return "RECORD";
531 case OFFLOAD:
532 return "OFFLOAD";
533 case MMAP_PLAYBACK:
534 return "MMAP_PLAYBACK";
535 case MMAP_CAPTURE:
536 return "MMAP_CAPTURE";
537 case SPATIALIZER:
538 return "SPATIALIZER";
539 case BIT_PERFECT:
540 return "BIT_PERFECT";
541 default:
542 return "unknown";
543 }
544 }
545
ThreadBase(const sp<AudioFlinger> & audioFlinger,audio_io_handle_t id,type_t type,bool systemReady,bool isOut)546 AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
547 type_t type, bool systemReady, bool isOut)
548 : Thread(false /*canCallJava*/),
549 mType(type),
550 mAudioFlinger(audioFlinger),
551 mThreadMetrics(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_THREAD) + std::to_string(id),
552 isOut),
553 mIsOut(isOut),
554 // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
555 // are set by PlaybackThread::readOutputParameters_l() or
556 // RecordThread::readInputParameters_l()
557 //FIXME: mStandby should be true here. Is this some kind of hack?
558 mStandby(false),
559 mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
560 // mName will be set by concrete (non-virtual) subclass
561 mDeathRecipient(new PMDeathRecipient(this)),
562 mSystemReady(systemReady),
563 mSignalPending(false)
564 {
565 mThreadMetrics.logConstructor(getpid(), threadTypeToString(type), id);
566 memset(&mPatch, 0, sizeof(struct audio_patch));
567 }
568
~ThreadBase()569 AudioFlinger::ThreadBase::~ThreadBase()
570 {
571 // mConfigEvents should be empty, but just in case it isn't, free the memory it owns
572 mConfigEvents.clear();
573
574 // do not lock the mutex in destructor
575 releaseWakeLock_l();
576 if (mPowerManager != 0) {
577 sp<IBinder> binder = IInterface::asBinder(mPowerManager);
578 binder->unlinkToDeath(mDeathRecipient);
579 }
580
581 sendStatistics(true /* force */);
582 }
583
readyToRun()584 status_t AudioFlinger::ThreadBase::readyToRun()
585 {
586 status_t status = initCheck();
587 if (status == NO_ERROR) {
588 ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid());
589 } else {
590 ALOGE("No working audio driver found.");
591 }
592 return status;
593 }
594
exit()595 void AudioFlinger::ThreadBase::exit()
596 {
597 ALOGV("ThreadBase::exit");
598 // do any cleanup required for exit to succeed
599 preExit();
600 {
601 // This lock prevents the following race in thread (uniprocessor for illustration):
602 // if (!exitPending()) {
603 // // context switch from here to exit()
604 // // exit() calls requestExit(), what exitPending() observes
605 // // exit() calls signal(), which is dropped since no waiters
606 // // context switch back from exit() to here
607 // mWaitWorkCV.wait(...);
608 // // now thread is hung
609 // }
610 AutoMutex lock(mLock);
611 requestExit();
612 mWaitWorkCV.broadcast();
613 }
614 // When Thread::requestExitAndWait is made virtual and this method is renamed to
615 // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
616 requestExitAndWait();
617 }
618
setParameters(const String8 & keyValuePairs)619 status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
620 {
621 ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
622 Mutex::Autolock _l(mLock);
623
624 return sendSetParameterConfigEvent_l(keyValuePairs);
625 }
626
627 // sendConfigEvent_l() must be called with ThreadBase::mLock held
628 // Can temporarily release the lock if waiting for a reply from processConfigEvents_l().
sendConfigEvent_l(sp<ConfigEvent> & event)629 status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
630 NO_THREAD_SAFETY_ANALYSIS // condition variable
631 {
632 status_t status = NO_ERROR;
633
634 if (event->mRequiresSystemReady && !mSystemReady) {
635 event->mWaitStatus = false;
636 mPendingConfigEvents.add(event);
637 return status;
638 }
639 mConfigEvents.add(event);
640 ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
641 mWaitWorkCV.signal();
642 mLock.unlock();
643 {
644 Mutex::Autolock _l(event->mLock);
645 while (event->mWaitStatus) {
646 if (event->mCond.waitRelative(event->mLock, kConfigEventTimeoutNs) != NO_ERROR) {
647 event->mStatus = TIMED_OUT;
648 event->mWaitStatus = false;
649 }
650 }
651 status = event->mStatus;
652 }
653 mLock.lock();
654 return status;
655 }
656
sendIoConfigEvent(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)657 void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
658 audio_port_handle_t portId)
659 {
660 Mutex::Autolock _l(mLock);
661 sendIoConfigEvent_l(event, pid, portId);
662 }
663
664 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
sendIoConfigEvent_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)665 void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
666 audio_port_handle_t portId)
667 {
668 // The audio statistics history is exponentially weighted to forget events
669 // about five or more seconds in the past. In order to have
670 // crisper statistics for mediametrics, we reset the statistics on
671 // an IoConfigEvent, to reflect different properties for a new device.
672 mIoJitterMs.reset();
673 mLatencyMs.reset();
674 mProcessTimeMs.reset();
675 mMonopipePipeDepthStats.reset();
676 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
677
678 sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
679 sendConfigEvent_l(configEvent);
680 }
681
sendPrioConfigEvent(pid_t pid,pid_t tid,int32_t prio,bool forApp)682 void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp)
683 {
684 Mutex::Autolock _l(mLock);
685 sendPrioConfigEvent_l(pid, tid, prio, forApp);
686 }
687
688 // sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
sendPrioConfigEvent_l(pid_t pid,pid_t tid,int32_t prio,bool forApp)689 void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(
690 pid_t pid, pid_t tid, int32_t prio, bool forApp)
691 {
692 sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp);
693 sendConfigEvent_l(configEvent);
694 }
695
696 // sendSetParameterConfigEvent_l() must be called with ThreadBase::mLock held
sendSetParameterConfigEvent_l(const String8 & keyValuePair)697 status_t AudioFlinger::ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair)
698 {
699 sp<ConfigEvent> configEvent;
700 AudioParameter param(keyValuePair);
701 int value;
702 if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) {
703 setMasterMono_l(value != 0);
704 if (param.size() == 1) {
705 return NO_ERROR; // should be a solo parameter - we don't pass down
706 }
707 param.remove(String8(AudioParameter::keyMonoOutput));
708 configEvent = new SetParameterConfigEvent(param.toString());
709 } else {
710 configEvent = new SetParameterConfigEvent(keyValuePair);
711 }
712 return sendConfigEvent_l(configEvent);
713 }
714
sendCreateAudioPatchConfigEvent(const struct audio_patch * patch,audio_patch_handle_t * handle)715 status_t AudioFlinger::ThreadBase::sendCreateAudioPatchConfigEvent(
716 const struct audio_patch *patch,
717 audio_patch_handle_t *handle)
718 {
719 Mutex::Autolock _l(mLock);
720 sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle);
721 status_t status = sendConfigEvent_l(configEvent);
722 if (status == NO_ERROR) {
723 CreateAudioPatchConfigEventData *data =
724 (CreateAudioPatchConfigEventData *)configEvent->mData.get();
725 *handle = data->mHandle;
726 }
727 return status;
728 }
729
sendReleaseAudioPatchConfigEvent(const audio_patch_handle_t handle)730 status_t AudioFlinger::ThreadBase::sendReleaseAudioPatchConfigEvent(
731 const audio_patch_handle_t handle)
732 {
733 Mutex::Autolock _l(mLock);
734 sp<ConfigEvent> configEvent = (ConfigEvent *)new ReleaseAudioPatchConfigEvent(handle);
735 return sendConfigEvent_l(configEvent);
736 }
737
sendUpdateOutDeviceConfigEvent(const DeviceDescriptorBaseVector & outDevices)738 status_t AudioFlinger::ThreadBase::sendUpdateOutDeviceConfigEvent(
739 const DeviceDescriptorBaseVector& outDevices)
740 {
741 if (type() != RECORD) {
742 // The update out device operation is only for record thread.
743 return INVALID_OPERATION;
744 }
745 Mutex::Autolock _l(mLock);
746 sp<ConfigEvent> configEvent = (ConfigEvent *)new UpdateOutDevicesConfigEvent(outDevices);
747 return sendConfigEvent_l(configEvent);
748 }
749
sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)750 void AudioFlinger::ThreadBase::sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)
751 {
752 ALOG_ASSERT(type() == RECORD, "sendResizeBufferConfigEvent_l() called on non record thread");
753 sp<ConfigEvent> configEvent =
754 (ConfigEvent *)new ResizeBufferConfigEvent(maxSharedAudioHistoryMs);
755 sendConfigEvent_l(configEvent);
756 }
757
sendCheckOutputStageEffectsEvent()758 void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent()
759 {
760 Mutex::Autolock _l(mLock);
761 sendCheckOutputStageEffectsEvent_l();
762 }
763
sendCheckOutputStageEffectsEvent_l()764 void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent_l()
765 {
766 sp<ConfigEvent> configEvent =
767 (ConfigEvent *)new CheckOutputStageEffectsEvent();
768 sendConfigEvent_l(configEvent);
769 }
770
sendHalLatencyModesChangedEvent_l()771 void AudioFlinger::ThreadBase::sendHalLatencyModesChangedEvent_l()
772 {
773 sp<ConfigEvent> configEvent = sp<HalLatencyModesChangedEvent>::make();
774 sendConfigEvent_l(configEvent);
775 }
776
777 // post condition: mConfigEvents.isEmpty()
processConfigEvents_l()778 void AudioFlinger::ThreadBase::processConfigEvents_l()
779 {
780 bool configChanged = false;
781
782 while (!mConfigEvents.isEmpty()) {
783 ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
784 sp<ConfigEvent> event = mConfigEvents[0];
785 mConfigEvents.removeAt(0);
786 switch (event->mType) {
787 case CFG_EVENT_PRIO: {
788 PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get();
789 // FIXME Need to understand why this has to be done asynchronously
790 int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp,
791 true /*asynchronous*/);
792 if (err != 0) {
793 ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
794 data->mPrio, data->mPid, data->mTid, err);
795 }
796 } break;
797 case CFG_EVENT_IO: {
798 IoConfigEventData *data = (IoConfigEventData *)event->mData.get();
799 ioConfigChanged(data->mEvent, data->mPid, data->mPortId);
800 } break;
801 case CFG_EVENT_SET_PARAMETER: {
802 SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
803 if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) {
804 configChanged = true;
805 mLocalLog.log("CFG_EVENT_SET_PARAMETER: (%s) configuration changed",
806 data->mKeyValuePairs.string());
807 }
808 } break;
809 case CFG_EVENT_CREATE_AUDIO_PATCH: {
810 const DeviceTypeSet oldDevices = getDeviceTypes();
811 CreateAudioPatchConfigEventData *data =
812 (CreateAudioPatchConfigEventData *)event->mData.get();
813 event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
814 const DeviceTypeSet newDevices = getDeviceTypes();
815 configChanged = oldDevices != newDevices;
816 mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
817 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
818 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
819 } break;
820 case CFG_EVENT_RELEASE_AUDIO_PATCH: {
821 const DeviceTypeSet oldDevices = getDeviceTypes();
822 ReleaseAudioPatchConfigEventData *data =
823 (ReleaseAudioPatchConfigEventData *)event->mData.get();
824 event->mStatus = releaseAudioPatch_l(data->mHandle);
825 const DeviceTypeSet newDevices = getDeviceTypes();
826 configChanged = oldDevices != newDevices;
827 mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
828 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
829 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
830 } break;
831 case CFG_EVENT_UPDATE_OUT_DEVICE: {
832 UpdateOutDevicesConfigEventData *data =
833 (UpdateOutDevicesConfigEventData *)event->mData.get();
834 updateOutDevices(data->mOutDevices);
835 } break;
836 case CFG_EVENT_RESIZE_BUFFER: {
837 ResizeBufferConfigEventData *data =
838 (ResizeBufferConfigEventData *)event->mData.get();
839 resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
840 } break;
841
842 case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
843 setCheckOutputStageEffects();
844 } break;
845
846 case CFG_EVENT_HAL_LATENCY_MODES_CHANGED: {
847 onHalLatencyModesChanged_l();
848 } break;
849
850 default:
851 ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
852 break;
853 }
854 {
855 Mutex::Autolock _l(event->mLock);
856 if (event->mWaitStatus) {
857 event->mWaitStatus = false;
858 event->mCond.signal();
859 }
860 }
861 ALOGV_IF(mConfigEvents.isEmpty(), "processConfigEvents_l() DONE thread %p", this);
862 }
863
864 if (configChanged) {
865 cacheParameters_l();
866 }
867 }
868
channelMaskToString(audio_channel_mask_t mask,bool output)869 String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
870 String8 s;
871 const audio_channel_representation_t representation =
872 audio_channel_mask_get_representation(mask);
873
874 switch (representation) {
875 // Travel all single bit channel mask to convert channel mask to string.
876 case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
877 if (output) {
878 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
879 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
880 if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
881 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
882 if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
883 if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
884 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
885 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
886 if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
887 if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
888 if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
889 if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
890 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
891 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
892 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
893 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
894 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, ");
895 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, ");
896 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, ");
897 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, ");
898 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
899 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
900 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
901 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
902 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
903 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
904 if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
905 } else {
906 if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
907 if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
908 if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
909 if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
910 if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
911 if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
912 if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
913 if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
914 if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
915 if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
916 if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
917 if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
918 if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
919 if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
920 if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
921 if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
922 if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
923 if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
924 if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
925 if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
926 if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
927 }
928 const int len = s.length();
929 if (len > 2) {
930 (void) s.lockBuffer(len); // needed?
931 s.unlockBuffer(len - 2); // remove trailing ", "
932 }
933 return s;
934 }
935 case AUDIO_CHANNEL_REPRESENTATION_INDEX:
936 s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
937 return s;
938 default:
939 s.appendFormat("unknown mask, representation:%d bits:%#x",
940 representation, audio_channel_mask_get_bits(mask));
941 return s;
942 }
943 }
944
dump(int fd,const Vector<String16> & args)945 void AudioFlinger::ThreadBase::dump(int fd, const Vector<String16>& args)
946 NO_THREAD_SAFETY_ANALYSIS // conditional try lock
947 {
948 dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
949 this, mThreadName, getTid(), type(), threadTypeToString(type()));
950
951 bool locked = AudioFlinger::dumpTryLock(mLock);
952 if (!locked) {
953 dprintf(fd, " Thread may be deadlocked\n");
954 }
955
956 dumpBase_l(fd, args);
957 dumpInternals_l(fd, args);
958 dumpTracks_l(fd, args);
959 dumpEffectChains_l(fd, args);
960
961 if (locked) {
962 mLock.unlock();
963 }
964
965 dprintf(fd, " Local log:\n");
966 mLocalLog.dump(fd, " " /* prefix */, 40 /* lines */);
967
968 // --all does the statistics
969 bool dumpAll = false;
970 for (const auto &arg : args) {
971 if (arg == String16("--all")) {
972 dumpAll = true;
973 }
974 }
975 if (dumpAll || type() == SPATIALIZER) {
976 const std::string sched = mThreadSnapshot.toString();
977 if (!sched.empty()) {
978 (void)write(fd, sched.c_str(), sched.size());
979 }
980 }
981 }
982
dumpBase_l(int fd,const Vector<String16> & args __unused)983 void AudioFlinger::ThreadBase::dumpBase_l(int fd, const Vector<String16>& args __unused)
984 {
985 dprintf(fd, " I/O handle: %d\n", mId);
986 dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
987 dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
988 dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
989 dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat).c_str());
990 dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
991 dprintf(fd, " Channel count: %u\n", mChannelCount);
992 dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
993 channelMaskToString(mChannelMask, mType != RECORD).string());
994 dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat).c_str());
995 dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize);
996 dprintf(fd, " Pending config events:");
997 size_t numConfig = mConfigEvents.size();
998 if (numConfig) {
999 const size_t SIZE = 256;
1000 char buffer[SIZE];
1001 for (size_t i = 0; i < numConfig; i++) {
1002 mConfigEvents[i]->dump(buffer, SIZE);
1003 dprintf(fd, "\n %s", buffer);
1004 }
1005 dprintf(fd, "\n");
1006 } else {
1007 dprintf(fd, " none\n");
1008 }
1009 // Note: output device may be used by capture threads for effects such as AEC.
1010 dprintf(fd, " Output devices: %s (%s)\n",
1011 dumpDeviceTypes(outDeviceTypes()).c_str(), toString(outDeviceTypes()).c_str());
1012 dprintf(fd, " Input device: %#x (%s)\n",
1013 inDeviceType(), toString(inDeviceType()).c_str());
1014 dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());
1015
1016 // Dump timestamp statistics for the Thread types that support it.
1017 if (mType == RECORD
1018 || mType == MIXER
1019 || mType == DUPLICATING
1020 || mType == DIRECT
1021 || mType == OFFLOAD
1022 || mType == SPATIALIZER) {
1023 dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
1024 dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
1025 }
1026
1027 if (mLastIoBeginNs > 0) { // MMAP may not set this
1028 dprintf(fd, " Last %s occurred (msecs): %lld\n",
1029 isOutput() ? "write" : "read",
1030 (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
1031 }
1032
1033 if (mProcessTimeMs.getN() > 0) {
1034 dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
1035 }
1036
1037 if (mIoJitterMs.getN() > 0) {
1038 dprintf(fd, " Hal %s jitter ms stats: %s\n",
1039 isOutput() ? "write" : "read",
1040 mIoJitterMs.toString().c_str());
1041 }
1042
1043 if (mLatencyMs.getN() > 0) {
1044 dprintf(fd, " Threadloop %s latency stats: %s\n",
1045 isOutput() ? "write" : "read",
1046 mLatencyMs.toString().c_str());
1047 }
1048
1049 if (mMonopipePipeDepthStats.getN() > 0) {
1050 dprintf(fd, " Monopipe %s pipe depth stats: %s\n",
1051 isOutput() ? "write" : "read",
1052 mMonopipePipeDepthStats.toString().c_str());
1053 }
1054 }
1055
dumpEffectChains_l(int fd,const Vector<String16> & args)1056 void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
1057 {
1058 const size_t SIZE = 256;
1059 char buffer[SIZE];
1060
1061 size_t numEffectChains = mEffectChains.size();
1062 snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains);
1063 write(fd, buffer, strlen(buffer));
1064
1065 for (size_t i = 0; i < numEffectChains; ++i) {
1066 sp<EffectChain> chain = mEffectChains[i];
1067 if (chain != 0) {
1068 chain->dump(fd, args);
1069 }
1070 }
1071 }
1072
acquireWakeLock()1073 void AudioFlinger::ThreadBase::acquireWakeLock()
1074 {
1075 Mutex::Autolock _l(mLock);
1076 acquireWakeLock_l();
1077 }
1078
getWakeLockTag()1079 String16 AudioFlinger::ThreadBase::getWakeLockTag()
1080 {
1081 switch (mType) {
1082 case MIXER:
1083 return String16("AudioMix");
1084 case DIRECT:
1085 return String16("AudioDirectOut");
1086 case DUPLICATING:
1087 return String16("AudioDup");
1088 case RECORD:
1089 return String16("AudioIn");
1090 case OFFLOAD:
1091 return String16("AudioOffload");
1092 case MMAP_PLAYBACK:
1093 return String16("MmapPlayback");
1094 case MMAP_CAPTURE:
1095 return String16("MmapCapture");
1096 case SPATIALIZER:
1097 return String16("AudioSpatial");
1098 default:
1099 ALOG_ASSERT(false);
1100 return String16("AudioUnknown");
1101 }
1102 }
1103
acquireWakeLock_l()1104 void AudioFlinger::ThreadBase::acquireWakeLock_l()
1105 {
1106 getPowerManager_l();
1107 if (mPowerManager != 0) {
1108 sp<IBinder> binder = new BBinder();
1109 // Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids.
1110 binder::Status status = mPowerManager->acquireWakeLockAsync(binder,
1111 POWERMANAGER_PARTIAL_WAKE_LOCK,
1112 getWakeLockTag(),
1113 String16("audioserver"),
1114 {} /* workSource */,
1115 {} /* historyTag */);
1116 if (status.isOk()) {
1117 mWakeLockToken = binder;
1118 }
1119 ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status.exceptionCode());
1120 }
1121
1122 gBoottime.acquire(mWakeLockToken);
1123 mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
1124 gBoottime.getBoottimeOffset();
1125 }
1126
releaseWakeLock()1127 void AudioFlinger::ThreadBase::releaseWakeLock()
1128 {
1129 Mutex::Autolock _l(mLock);
1130 releaseWakeLock_l();
1131 }
1132
releaseWakeLock_l()1133 void AudioFlinger::ThreadBase::releaseWakeLock_l()
1134 {
1135 gBoottime.release(mWakeLockToken);
1136 if (mWakeLockToken != 0) {
1137 ALOGV("releaseWakeLock_l() %s", mThreadName);
1138 if (mPowerManager != 0) {
1139 mPowerManager->releaseWakeLockAsync(mWakeLockToken, 0);
1140 }
1141 mWakeLockToken.clear();
1142 }
1143 }
1144
getPowerManager_l()1145 void AudioFlinger::ThreadBase::getPowerManager_l() {
1146 if (mSystemReady && mPowerManager == 0) {
1147 // use checkService() to avoid blocking if power service is not up yet
1148 sp<IBinder> binder =
1149 defaultServiceManager()->checkService(String16("power"));
1150 if (binder == 0) {
1151 ALOGW("Thread %s cannot connect to the power manager service", mThreadName);
1152 } else {
1153 mPowerManager = interface_cast<os::IPowerManager>(binder);
1154 binder->linkToDeath(mDeathRecipient);
1155 }
1156 }
1157 }
1158
updateWakeLockUids_l(const SortedVector<uid_t> & uids)1159 void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t> &uids) {
1160 getPowerManager_l();
1161
1162 #if !LOG_NDEBUG
1163 std::stringstream s;
1164 for (uid_t uid : uids) {
1165 s << uid << " ";
1166 }
1167 ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str());
1168 #endif
1169
1170 if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
1171 if (mSystemReady) {
1172 ALOGE("no wake lock to update, but system ready!");
1173 } else {
1174 ALOGW("no wake lock to update, system not ready yet");
1175 }
1176 return;
1177 }
1178 if (mPowerManager != 0) {
1179 std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
1180 binder::Status status = mPowerManager->updateWakeLockUidsAsync(
1181 mWakeLockToken, uidsAsInt);
1182 ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status.exceptionCode());
1183 }
1184 }
1185
clearPowerManager()1186 void AudioFlinger::ThreadBase::clearPowerManager()
1187 {
1188 Mutex::Autolock _l(mLock);
1189 releaseWakeLock_l();
1190 mPowerManager.clear();
1191 }
1192
updateOutDevices(const DeviceDescriptorBaseVector & outDevices __unused)1193 void AudioFlinger::ThreadBase::updateOutDevices(
1194 const DeviceDescriptorBaseVector& outDevices __unused)
1195 {
1196 ALOGE("%s should only be called in RecordThread", __func__);
1197 }
1198
resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs __unused)1199 void AudioFlinger::ThreadBase::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs __unused)
1200 {
1201 ALOGE("%s should only be called in RecordThread", __func__);
1202 }
1203
binderDied(const wp<IBinder> & who __unused)1204 void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused)
1205 {
1206 sp<ThreadBase> thread = mThread.promote();
1207 if (thread != 0) {
1208 thread->clearPowerManager();
1209 }
1210 ALOGW("power manager service died !!!");
1211 }
1212
setEffectSuspended_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1213 void AudioFlinger::ThreadBase::setEffectSuspended_l(
1214 const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
1215 {
1216 sp<EffectChain> chain = getEffectChain_l(sessionId);
1217 if (chain != 0) {
1218 if (type != NULL) {
1219 chain->setEffectSuspended_l(type, suspend);
1220 } else {
1221 chain->setEffectSuspendedAll_l(suspend);
1222 }
1223 }
1224
1225 updateSuspendedSessions_l(type, suspend, sessionId);
1226 }
1227
checkSuspendOnAddEffectChain_l(const sp<EffectChain> & chain)1228 void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
1229 {
1230 ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
1231 if (index < 0) {
1232 return;
1233 }
1234
1235 const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
1236 mSuspendedSessions.valueAt(index);
1237
1238 for (size_t i = 0; i < sessionEffects.size(); i++) {
1239 const sp<SuspendedSessionDesc>& desc = sessionEffects.valueAt(i);
1240 for (int j = 0; j < desc->mRefCount; j++) {
1241 if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
1242 chain->setEffectSuspendedAll_l(true);
1243 } else {
1244 ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
1245 desc->mType.timeLow);
1246 chain->setEffectSuspended_l(&desc->mType, true);
1247 }
1248 }
1249 }
1250 }
1251
updateSuspendedSessions_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1252 void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
1253 bool suspend,
1254 audio_session_t sessionId)
1255 {
1256 ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
1257
1258 KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
1259
1260 if (suspend) {
1261 if (index >= 0) {
1262 sessionEffects = mSuspendedSessions.valueAt(index);
1263 } else {
1264 mSuspendedSessions.add(sessionId, sessionEffects);
1265 }
1266 } else {
1267 if (index < 0) {
1268 return;
1269 }
1270 sessionEffects = mSuspendedSessions.valueAt(index);
1271 }
1272
1273
1274 int key = EffectChain::kKeyForSuspendAll;
1275 if (type != NULL) {
1276 key = type->timeLow;
1277 }
1278 index = sessionEffects.indexOfKey(key);
1279
1280 sp<SuspendedSessionDesc> desc;
1281 if (suspend) {
1282 if (index >= 0) {
1283 desc = sessionEffects.valueAt(index);
1284 } else {
1285 desc = new SuspendedSessionDesc();
1286 if (type != NULL) {
1287 desc->mType = *type;
1288 }
1289 sessionEffects.add(key, desc);
1290 ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
1291 }
1292 desc->mRefCount++;
1293 } else {
1294 if (index < 0) {
1295 return;
1296 }
1297 desc = sessionEffects.valueAt(index);
1298 if (--desc->mRefCount == 0) {
1299 ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
1300 sessionEffects.removeItemsAt(index);
1301 if (sessionEffects.isEmpty()) {
1302 ALOGV("updateSuspendedSessions_l() restore removing session %d",
1303 sessionId);
1304 mSuspendedSessions.removeItem(sessionId);
1305 }
1306 }
1307 }
1308 if (!sessionEffects.isEmpty()) {
1309 mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
1310 }
1311 }
1312
checkSuspendOnEffectEnabled(bool enabled,audio_session_t sessionId,bool threadLocked)1313 void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(bool enabled,
1314 audio_session_t sessionId,
1315 bool threadLocked)
1316 NO_THREAD_SAFETY_ANALYSIS // manual locking
1317 {
1318 if (!threadLocked) {
1319 mLock.lock();
1320 }
1321
1322 if (mType != RECORD) {
1323 // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
1324 // another session. This gives the priority to well behaved effect control panels
1325 // and applications not using global effects.
1326 // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
1327 // global effects
1328 if (!audio_is_global_session(sessionId)) {
1329 setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
1330 }
1331 }
1332
1333 if (!threadLocked) {
1334 mLock.unlock();
1335 }
1336 }
1337
1338 // checkEffectCompatibility_l() must be called with ThreadBase::mLock held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1339 status_t AudioFlinger::RecordThread::checkEffectCompatibility_l(
1340 const effect_descriptor_t *desc, audio_session_t sessionId)
1341 {
1342 // No global output effect sessions on record threads
1343 if (sessionId == AUDIO_SESSION_OUTPUT_MIX
1344 || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1345 ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
1346 desc->name, mThreadName);
1347 return BAD_VALUE;
1348 }
1349 // only pre processing effects on record thread
1350 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
1351 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s",
1352 desc->name, mThreadName);
1353 return BAD_VALUE;
1354 }
1355
1356 // always allow effects without processing load or latency
1357 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1358 return NO_ERROR;
1359 }
1360
1361 audio_input_flags_t flags = mInput->flags;
1362 if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
1363 if (flags & AUDIO_INPUT_FLAG_RAW) {
1364 ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode",
1365 desc->name, mThreadName);
1366 return BAD_VALUE;
1367 }
1368 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1369 ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode",
1370 desc->name, mThreadName);
1371 return BAD_VALUE;
1372 }
1373 }
1374
1375 if (EffectModule::isHapticGenerator(&desc->type)) {
1376 ALOGE("%s(): HapticGenerator is not supported in RecordThread", __func__);
1377 return BAD_VALUE;
1378 }
1379 return NO_ERROR;
1380 }
1381
1382 // checkEffectCompatibility_l() must be called with ThreadBase::mLock held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1383 status_t AudioFlinger::PlaybackThread::checkEffectCompatibility_l(
1384 const effect_descriptor_t *desc, audio_session_t sessionId)
1385 {
1386 // no preprocessing on playback threads
1387 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
1388 ALOGW("%s: pre processing effect %s created on playback"
1389 " thread %s", __func__, desc->name, mThreadName);
1390 return BAD_VALUE;
1391 }
1392
1393 // always allow effects without processing load or latency
1394 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1395 return NO_ERROR;
1396 }
1397
1398 if (EffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) {
1399 ALOGW("%s: thread doesn't support haptic playback while the effect is HapticGenerator",
1400 __func__);
1401 return BAD_VALUE;
1402 }
1403
1404 if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
1405 && mType != SPATIALIZER) {
1406 ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
1407 __func__, mType);
1408 return BAD_VALUE;
1409 }
1410
1411 switch (mType) {
1412 case MIXER: {
1413 #ifndef MULTICHANNEL_EFFECT_CHAIN
1414 // Reject any effect on mixer multichannel sinks.
1415 // TODO: fix both format and multichannel issues with effects.
1416 if (mChannelCount != FCC_2) {
1417 ALOGW("%s: effect %s for multichannel(%d) on MIXER thread %s",
1418 __func__, desc->name, mChannelCount, mThreadName);
1419 return BAD_VALUE;
1420 }
1421 #endif
1422 audio_output_flags_t flags = mOutput->flags;
1423 if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
1424 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1425 // global effects are applied only to non fast tracks if they are SW
1426 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1427 break;
1428 }
1429 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1430 // only post processing on output stage session
1431 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1432 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1433 __func__, desc->name);
1434 return BAD_VALUE;
1435 }
1436 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1437 // only post processing on output stage session
1438 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1439 ALOGW("%s: non post processing effect %s not allowed on device session",
1440 __func__, desc->name);
1441 return BAD_VALUE;
1442 }
1443 } else {
1444 // no restriction on effects applied on non fast tracks
1445 if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) {
1446 break;
1447 }
1448 }
1449
1450 if (flags & AUDIO_OUTPUT_FLAG_RAW) {
1451 ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
1452 return BAD_VALUE;
1453 }
1454 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1455 ALOGW("%s: non HW effect %s on playback thread in fast mode",
1456 __func__, desc->name);
1457 return BAD_VALUE;
1458 }
1459 }
1460 } break;
1461 case OFFLOAD:
1462 // nothing actionable on offload threads, if the effect:
1463 // - is offloadable: the effect can be created
1464 // - is NOT offloadable: the effect should still be created, but EffectHandle::enable()
1465 // will take care of invalidating the tracks of the thread
1466 break;
1467 case DIRECT:
1468 // Reject any effect on Direct output threads for now, since the format of
1469 // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
1470 ALOGW("%s: effect %s on DIRECT output thread %s",
1471 __func__, desc->name, mThreadName);
1472 return BAD_VALUE;
1473 case DUPLICATING:
1474 #ifndef MULTICHANNEL_EFFECT_CHAIN
1475 // Reject any effect on mixer multichannel sinks.
1476 // TODO: fix both format and multichannel issues with effects.
1477 if (mChannelCount != FCC_2) {
1478 ALOGW("%s: effect %s for multichannel(%d) on DUPLICATING thread %s",
1479 __func__, desc->name, mChannelCount, mThreadName);
1480 return BAD_VALUE;
1481 }
1482 #endif
1483 if (audio_is_global_session(sessionId)) {
1484 ALOGW("%s: global effect %s on DUPLICATING thread %s",
1485 __func__, desc->name, mThreadName);
1486 return BAD_VALUE;
1487 }
1488 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
1489 ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
1490 __func__, desc->name, mThreadName);
1491 return BAD_VALUE;
1492 }
1493 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1494 ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
1495 __func__, desc->name, mThreadName);
1496 return BAD_VALUE;
1497 }
1498 break;
1499 case SPATIALIZER:
1500 // Global effects (AUDIO_SESSION_OUTPUT_MIX) are not supported on spatializer mixer
1501 // as there is no common accumulation buffer for sptialized and non sptialized tracks.
1502 // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
1503 // are supported and added after the spatializer.
1504 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1505 ALOGW("%s: global effect %s not supported on spatializer thread %s",
1506 __func__, desc->name, mThreadName);
1507 return BAD_VALUE;
1508 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1509 // only post processing , downmixer or spatializer effects on output stage session
1510 if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
1511 || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
1512 break;
1513 }
1514 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1515 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1516 __func__, desc->name);
1517 return BAD_VALUE;
1518 }
1519 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1520 // only post processing on output stage session
1521 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1522 ALOGW("%s: non post processing effect %s not allowed on device session",
1523 __func__, desc->name);
1524 return BAD_VALUE;
1525 }
1526 }
1527 break;
1528 case BIT_PERFECT:
1529 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1530 // Allow HW accelerated effects of tunnel type
1531 break;
1532 }
1533 // As bit-perfect tracks will not be allowed to apply audio effect that will touch the audio
1534 // data, effects will not be allowed on 1) global effects (AUDIO_SESSION_OUTPUT_MIX),
1535 // 2) post-processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE) and
1536 // 3) there is any bit-perfect track with the given session id.
1537 if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE ||
1538 sessionId == AUDIO_SESSION_DEVICE) {
1539 ALOGW("%s: effect %s not supported on bit-perfect thread %s",
1540 __func__, desc->name, mThreadName);
1541 return BAD_VALUE;
1542 } else if ((hasAudioSession_l(sessionId) & ThreadBase::BIT_PERFECT_SESSION) != 0) {
1543 ALOGW("%s: effect %s not supported as there is a bit-perfect track with session as %d",
1544 __func__, desc->name, sessionId);
1545 return BAD_VALUE;
1546 }
1547 break;
1548 default:
1549 LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
1550 }
1551
1552 return NO_ERROR;
1553 }
1554
1555 // ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held
createEffect_l(const sp<AudioFlinger::Client> & client,const sp<IEffectClient> & effectClient,int32_t priority,audio_session_t sessionId,effect_descriptor_t * desc,int * enabled,status_t * status,bool pinned,bool probe,bool notifyFramesProcessed)1556 sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
1557 const sp<AudioFlinger::Client>& client,
1558 const sp<IEffectClient>& effectClient,
1559 int32_t priority,
1560 audio_session_t sessionId,
1561 effect_descriptor_t *desc,
1562 int *enabled,
1563 status_t *status,
1564 bool pinned,
1565 bool probe,
1566 bool notifyFramesProcessed)
1567 {
1568 sp<EffectModule> effect;
1569 sp<EffectHandle> handle;
1570 status_t lStatus;
1571 sp<EffectChain> chain;
1572 bool chainCreated = false;
1573 bool effectCreated = false;
1574 audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
1575
1576 lStatus = initCheck();
1577 if (lStatus != NO_ERROR) {
1578 ALOGW("createEffect_l() Audio driver not initialized.");
1579 goto Exit;
1580 }
1581
1582 ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
1583
1584 { // scope for mLock
1585 Mutex::Autolock _l(mLock);
1586
1587 lStatus = checkEffectCompatibility_l(desc, sessionId);
1588 if (probe || lStatus != NO_ERROR) {
1589 goto Exit;
1590 }
1591
1592 // check for existing effect chain with the requested audio session
1593 chain = getEffectChain_l(sessionId);
1594 if (chain == 0) {
1595 // create a new chain for this session
1596 ALOGV("createEffect_l() new effect chain for session %d", sessionId);
1597 chain = new EffectChain(this, sessionId);
1598 addEffectChain_l(chain);
1599 chain->setStrategy(getStrategyForSession_l(sessionId));
1600 chainCreated = true;
1601 } else {
1602 effect = chain->getEffectFromDesc_l(desc);
1603 }
1604
1605 ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
1606
1607 if (effect == 0) {
1608 effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
1609 // create a new effect module if none present in the chain
1610 lStatus = chain->createEffect_l(effect, desc, effectId, sessionId, pinned);
1611 if (lStatus != NO_ERROR) {
1612 goto Exit;
1613 }
1614 effectCreated = true;
1615
1616 // FIXME: use vector of device and address when effect interface is ready.
1617 effect->setDevices(outDeviceTypeAddrs());
1618 effect->setInputDevice(inDeviceTypeAddr());
1619 effect->setMode(mAudioFlinger->getMode());
1620 effect->setAudioSource(mAudioSource);
1621 }
1622 if (effect->isHapticGenerator()) {
1623 // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
1624 // for the HapticGenerator.
1625 const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
1626 std::move(mAudioFlinger->getDefaultVibratorInfo_l());
1627 if (defaultVibratorInfo) {
1628 // Only set the vibrator info when it is a valid one.
1629 effect->setVibratorInfo(*defaultVibratorInfo);
1630 }
1631 }
1632 // create effect handle and connect it to effect module
1633 handle = new EffectHandle(effect, client, effectClient, priority, notifyFramesProcessed);
1634 lStatus = handle->initCheck();
1635 if (lStatus == OK) {
1636 lStatus = effect->addHandle(handle.get());
1637 sendCheckOutputStageEffectsEvent_l();
1638 }
1639 if (enabled != NULL) {
1640 *enabled = (int)effect->isEnabled();
1641 }
1642 }
1643
1644 Exit:
1645 if (!probe && lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
1646 Mutex::Autolock _l(mLock);
1647 if (effectCreated) {
1648 chain->removeEffect_l(effect);
1649 }
1650 if (chainCreated) {
1651 removeEffectChain_l(chain);
1652 }
1653 // handle must be cleared by caller to avoid deadlock.
1654 }
1655
1656 *status = lStatus;
1657 return handle;
1658 }
1659
disconnectEffectHandle(EffectHandle * handle,bool unpinIfLast)1660 void AudioFlinger::ThreadBase::disconnectEffectHandle(EffectHandle *handle,
1661 bool unpinIfLast)
1662 {
1663 bool remove = false;
1664 sp<EffectModule> effect;
1665 {
1666 Mutex::Autolock _l(mLock);
1667 sp<EffectBase> effectBase = handle->effect().promote();
1668 if (effectBase == nullptr) {
1669 return;
1670 }
1671 effect = effectBase->asEffectModule();
1672 if (effect == nullptr) {
1673 return;
1674 }
1675 // restore suspended effects if the disconnected handle was enabled and the last one.
1676 remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast);
1677 if (remove) {
1678 removeEffect_l(effect, true);
1679 }
1680 sendCheckOutputStageEffectsEvent_l();
1681 }
1682 if (remove) {
1683 mAudioFlinger->updateOrphanEffectChains(effect);
1684 if (handle->enabled()) {
1685 effect->checkSuspendOnEffectEnabled(false, false /*threadLocked*/);
1686 }
1687 }
1688 }
1689
onEffectEnable(const sp<EffectModule> & effect)1690 void AudioFlinger::ThreadBase::onEffectEnable(const sp<EffectModule>& effect) {
1691 if (isOffloadOrMmap()) {
1692 Mutex::Autolock _l(mLock);
1693 broadcast_l();
1694 }
1695 if (!effect->isOffloadable()) {
1696 if (mType == ThreadBase::OFFLOAD) {
1697 PlaybackThread *t = (PlaybackThread *)this;
1698 t->invalidateTracks(AUDIO_STREAM_MUSIC);
1699 }
1700 if (effect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) {
1701 mAudioFlinger->onNonOffloadableGlobalEffectEnable();
1702 }
1703 }
1704 }
1705
onEffectDisable()1706 void AudioFlinger::ThreadBase::onEffectDisable() {
1707 if (isOffloadOrMmap()) {
1708 Mutex::Autolock _l(mLock);
1709 broadcast_l();
1710 }
1711 }
1712
getEffect(audio_session_t sessionId,int effectId)1713 sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(audio_session_t sessionId,
1714 int effectId)
1715 {
1716 Mutex::Autolock _l(mLock);
1717 return getEffect_l(sessionId, effectId);
1718 }
1719
getEffect_l(audio_session_t sessionId,int effectId)1720 sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(audio_session_t sessionId,
1721 int effectId)
1722 {
1723 sp<EffectChain> chain = getEffectChain_l(sessionId);
1724 return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
1725 }
1726
getEffectIds_l(audio_session_t sessionId)1727 std::vector<int> AudioFlinger::ThreadBase::getEffectIds_l(audio_session_t sessionId)
1728 {
1729 sp<EffectChain> chain = getEffectChain_l(sessionId);
1730 return chain != nullptr ? chain->getEffectIds() : std::vector<int>{};
1731 }
1732
1733 // PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
1734 // PlaybackThread::mLock held
addEffect_l(const sp<EffectModule> & effect)1735 status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
1736 {
1737 // check for existing effect chain with the requested audio session
1738 audio_session_t sessionId = effect->sessionId();
1739 sp<EffectChain> chain = getEffectChain_l(sessionId);
1740 bool chainCreated = false;
1741
1742 ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(),
1743 "addEffect_l() on offloaded thread %p: effect %s does not support offload flags %#x",
1744 this, effect->desc().name, effect->desc().flags);
1745
1746 if (chain == 0) {
1747 // create a new chain for this session
1748 ALOGV("addEffect_l() new effect chain for session %d", sessionId);
1749 chain = new EffectChain(this, sessionId);
1750 addEffectChain_l(chain);
1751 chain->setStrategy(getStrategyForSession_l(sessionId));
1752 chainCreated = true;
1753 }
1754 ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get());
1755
1756 if (chain->getEffectFromId_l(effect->id()) != 0) {
1757 ALOGW("addEffect_l() %p effect %s already present in chain %p",
1758 this, effect->desc().name, chain.get());
1759 return BAD_VALUE;
1760 }
1761
1762 effect->setOffloaded(mType == OFFLOAD, mId);
1763
1764 status_t status = chain->addEffect_l(effect);
1765 if (status != NO_ERROR) {
1766 if (chainCreated) {
1767 removeEffectChain_l(chain);
1768 }
1769 return status;
1770 }
1771
1772 effect->setDevices(outDeviceTypeAddrs());
1773 effect->setInputDevice(inDeviceTypeAddr());
1774 effect->setMode(mAudioFlinger->getMode());
1775 effect->setAudioSource(mAudioSource);
1776
1777 return NO_ERROR;
1778 }
1779
removeEffect_l(const sp<EffectModule> & effect,bool release)1780 void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect, bool release) {
1781
1782 ALOGV("%s %p effect %p", __FUNCTION__, this, effect.get());
1783 effect_descriptor_t desc = effect->desc();
1784 if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
1785 detachAuxEffect_l(effect->id());
1786 }
1787
1788 sp<EffectChain> chain = effect->getCallback()->chain().promote();
1789 if (chain != 0) {
1790 // remove effect chain if removing last effect
1791 if (chain->removeEffect_l(effect, release) == 0) {
1792 removeEffectChain_l(chain);
1793 }
1794 } else {
1795 ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
1796 }
1797 }
1798
lockEffectChains_l(Vector<sp<AudioFlinger::EffectChain>> & effectChains)1799 void AudioFlinger::ThreadBase::lockEffectChains_l(
1800 Vector< sp<AudioFlinger::EffectChain> >& effectChains)
1801 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::lock()
1802 {
1803 effectChains = mEffectChains;
1804 for (size_t i = 0; i < mEffectChains.size(); i++) {
1805 mEffectChains[i]->lock();
1806 }
1807 }
1808
unlockEffectChains(const Vector<sp<AudioFlinger::EffectChain>> & effectChains)1809 void AudioFlinger::ThreadBase::unlockEffectChains(
1810 const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
1811 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::unlock()
1812 {
1813 for (size_t i = 0; i < effectChains.size(); i++) {
1814 effectChains[i]->unlock();
1815 }
1816 }
1817
getEffectChain(audio_session_t sessionId)1818 sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(audio_session_t sessionId)
1819 {
1820 Mutex::Autolock _l(mLock);
1821 return getEffectChain_l(sessionId);
1822 }
1823
getEffectChain_l(audio_session_t sessionId) const1824 sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(audio_session_t sessionId)
1825 const
1826 {
1827 size_t size = mEffectChains.size();
1828 for (size_t i = 0; i < size; i++) {
1829 if (mEffectChains[i]->sessionId() == sessionId) {
1830 return mEffectChains[i];
1831 }
1832 }
1833 return 0;
1834 }
1835
setMode(audio_mode_t mode)1836 void AudioFlinger::ThreadBase::setMode(audio_mode_t mode)
1837 {
1838 Mutex::Autolock _l(mLock);
1839 size_t size = mEffectChains.size();
1840 for (size_t i = 0; i < size; i++) {
1841 mEffectChains[i]->setMode_l(mode);
1842 }
1843 }
1844
toAudioPortConfig(struct audio_port_config * config)1845 void AudioFlinger::ThreadBase::toAudioPortConfig(struct audio_port_config *config)
1846 {
1847 config->type = AUDIO_PORT_TYPE_MIX;
1848 config->ext.mix.handle = mId;
1849 config->sample_rate = mSampleRate;
1850 config->format = mHALFormat;
1851 config->channel_mask = mChannelMask;
1852 config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
1853 AUDIO_PORT_CONFIG_FORMAT;
1854 }
1855
systemReady()1856 void AudioFlinger::ThreadBase::systemReady()
1857 {
1858 Mutex::Autolock _l(mLock);
1859 if (mSystemReady) {
1860 return;
1861 }
1862 mSystemReady = true;
1863
1864 for (size_t i = 0; i < mPendingConfigEvents.size(); i++) {
1865 sendConfigEvent_l(mPendingConfigEvents.editItemAt(i));
1866 }
1867 mPendingConfigEvents.clear();
1868 }
1869
1870 template <typename T>
add(const sp<T> & track)1871 ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::add(const sp<T> &track) {
1872 ssize_t index = mActiveTracks.indexOf(track);
1873 if (index >= 0) {
1874 ALOGW("ActiveTracks<T>::add track %p already there", track.get());
1875 return index;
1876 }
1877 logTrack("add", track);
1878 mActiveTracksGeneration++;
1879 mLatestActiveTrack = track;
1880 track->beginBatteryAttribution();
1881 mHasChanged = true;
1882 return mActiveTracks.add(track);
1883 }
1884
1885 template <typename T>
remove(const sp<T> & track)1886 ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::remove(const sp<T> &track) {
1887 ssize_t index = mActiveTracks.remove(track);
1888 if (index < 0) {
1889 ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get());
1890 return index;
1891 }
1892 logTrack("remove", track);
1893 mActiveTracksGeneration++;
1894 track->endBatteryAttribution();
1895 // mLatestActiveTrack is not cleared even if is the same as track.
1896 mHasChanged = true;
1897 #ifdef TEE_SINK
1898 track->dumpTee(-1 /* fd */, "_REMOVE");
1899 #endif
1900 track->logEndInterval(); // log to MediaMetrics
1901 return index;
1902 }
1903
1904 template <typename T>
clear()1905 void AudioFlinger::ThreadBase::ActiveTracks<T>::clear() {
1906 for (const sp<T> &track : mActiveTracks) {
1907 track->endBatteryAttribution();
1908 logTrack("clear", track);
1909 }
1910 mLastActiveTracksGeneration = mActiveTracksGeneration;
1911 if (!mActiveTracks.empty()) { mHasChanged = true; }
1912 mActiveTracks.clear();
1913 mLatestActiveTrack.clear();
1914 }
1915
1916 template <typename T>
updatePowerState(const sp<ThreadBase> & thread,bool force)1917 void AudioFlinger::ThreadBase::ActiveTracks<T>::updatePowerState(
1918 const sp<ThreadBase>& thread, bool force) {
1919 // Updates ActiveTracks client uids to the thread wakelock.
1920 if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) {
1921 thread->updateWakeLockUids_l(getWakeLockUids());
1922 mLastActiveTracksGeneration = mActiveTracksGeneration;
1923 }
1924 }
1925
1926 template <typename T>
readAndClearHasChanged()1927 bool AudioFlinger::ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
1928 bool hasChanged = mHasChanged;
1929 mHasChanged = false;
1930
1931 for (const sp<T> &track : mActiveTracks) {
1932 // Do not short-circuit as all hasChanged states must be reset
1933 // as all the metadata are going to be sent
1934 hasChanged |= track->readAndClearHasChanged();
1935 }
1936 return hasChanged;
1937 }
1938
1939 template <typename T>
logTrack(const char * funcName,const sp<T> & track) const1940 void AudioFlinger::ThreadBase::ActiveTracks<T>::logTrack(
1941 const char *funcName, const sp<T> &track) const {
1942 if (mLocalLog != nullptr) {
1943 String8 result;
1944 track->appendDump(result, false /* active */);
1945 mLocalLog->log("AT::%-10s(%p) %s", funcName, track.get(), result.string());
1946 }
1947 }
1948
broadcast_l()1949 void AudioFlinger::ThreadBase::broadcast_l()
1950 {
1951 // Thread could be blocked waiting for async
1952 // so signal it to handle state changes immediately
1953 // If threadLoop is currently unlocked a signal of mWaitWorkCV will
1954 // be lost so we also flag to prevent it blocking on mWaitWorkCV
1955 mSignalPending = true;
1956 mWaitWorkCV.broadcast();
1957 }
1958
1959 // Call only from threadLoop() or when it is idle.
1960 // Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
sendStatistics(bool force)1961 void AudioFlinger::ThreadBase::sendStatistics(bool force)
1962 {
1963 // Do not log if we have no stats.
1964 // We choose the timestamp verifier because it is the most likely item to be present.
1965 const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
1966 if (nstats == 0) {
1967 return;
1968 }
1969
1970 // Don't log more frequently than once per 12 hours.
1971 // We use BOOTTIME to include suspend time.
1972 const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
1973 const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
1974 if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
1975 return;
1976 }
1977
1978 mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
1979 mLastRecordedTimeNs = timeNs;
1980
1981 std::unique_ptr<mediametrics::Item> item(mediametrics::Item::create("audiothread"));
1982
1983 #define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
1984
1985 // thread configuration
1986 item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
1987 // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
1988 item->setCString(MM_PREFIX "type", threadTypeToString(mType));
1989 item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
1990 item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
1991 item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
1992 item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
1993 item->setCString(MM_PREFIX "outDevice", toString(outDeviceTypes()).c_str());
1994 item->setCString(MM_PREFIX "inDevice", toString(inDeviceType()).c_str());
1995
1996 // thread statistics
1997 if (mIoJitterMs.getN() > 0) {
1998 item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
1999 item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
2000 }
2001 if (mProcessTimeMs.getN() > 0) {
2002 item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
2003 item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
2004 }
2005 const auto tsjitter = mTimestampVerifier.getJitterMs();
2006 if (tsjitter.getN() > 0) {
2007 item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
2008 item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
2009 }
2010 if (mLatencyMs.getN() > 0) {
2011 item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
2012 item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
2013 }
2014 if (mMonopipePipeDepthStats.getN() > 0) {
2015 item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
2016 mMonopipePipeDepthStats.getMean());
2017 item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
2018 mMonopipePipeDepthStats.getStdDev());
2019 }
2020
2021 item->selfrecord();
2022 }
2023
getStrategyForStream(audio_stream_type_t stream) const2024 product_strategy_t AudioFlinger::ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
2025 {
2026 if (!mAudioFlinger->isAudioPolicyReady()) {
2027 return PRODUCT_STRATEGY_NONE;
2028 }
2029 return AudioSystem::getStrategyForStream(stream);
2030 }
2031
2032 // startMelComputation_l() must be called with AudioFlinger::mLock held
startMelComputation_l(const sp<audio_utils::MelProcessor> &)2033 void AudioFlinger::ThreadBase::startMelComputation_l(
2034 const sp<audio_utils::MelProcessor>& /*processor*/)
2035 {
2036 // Do nothing
2037 ALOGW("%s: ThreadBase does not support CSD", __func__);
2038 }
2039
2040 // stopMelComputation_l() must be called with AudioFlinger::mLock held
stopMelComputation_l()2041 void AudioFlinger::ThreadBase::stopMelComputation_l()
2042 {
2043 // Do nothing
2044 ALOGW("%s: ThreadBase does not support CSD", __func__);
2045 }
2046
2047 // ----------------------------------------------------------------------------
2048 // Playback
2049 // ----------------------------------------------------------------------------
2050
PlaybackThread(const sp<AudioFlinger> & audioFlinger,AudioStreamOut * output,audio_io_handle_t id,type_t type,bool systemReady,audio_config_base_t * mixerConfig)2051 AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
2052 AudioStreamOut* output,
2053 audio_io_handle_t id,
2054 type_t type,
2055 bool systemReady,
2056 audio_config_base_t *mixerConfig)
2057 : ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */),
2058 mNormalFrameCount(0), mSinkBuffer(NULL),
2059 mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
2060 mMixerBuffer(NULL),
2061 mMixerBufferSize(0),
2062 mMixerBufferFormat(AUDIO_FORMAT_INVALID),
2063 mMixerBufferValid(false),
2064 mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
2065 mEffectBuffer(NULL),
2066 mEffectBufferSize(0),
2067 mEffectBufferFormat(AUDIO_FORMAT_INVALID),
2068 mEffectBufferValid(false),
2069 mSuspended(0), mBytesWritten(0),
2070 mFramesWritten(0),
2071 mSuspendedFrames(0),
2072 mActiveTracks(&this->mLocalLog),
2073 // mStreamTypes[] initialized in constructor body
2074 mTracks(type == MIXER),
2075 mOutput(output),
2076 mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
2077 mMixerStatus(MIXER_IDLE),
2078 mMixerStatusIgnoringFastTracks(MIXER_IDLE),
2079 mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
2080 mBytesRemaining(0),
2081 mCurrentWriteLength(0),
2082 mUseAsyncWrite(false),
2083 mWriteAckSequence(0),
2084 mDrainSequence(0),
2085 mScreenState(AudioFlinger::mScreenState),
2086 // index 0 is reserved for normal mixer's submix
2087 mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
2088 mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
2089 mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
2090 mDownStreamPatch{},
2091 mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs)
2092 {
2093 snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
2094 mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
2095
2096 // Assumes constructor is called by AudioFlinger with it's mLock held, but
2097 // it would be safer to explicitly pass initial masterVolume/masterMute as
2098 // parameter.
2099 //
2100 // If the HAL we are using has support for master volume or master mute,
2101 // then do not attenuate or mute during mixing (just leave the volume at 1.0
2102 // and the mute set to false).
2103 mMasterVolume = audioFlinger->masterVolume_l();
2104 mMasterMute = audioFlinger->masterMute_l();
2105 if (mOutput->audioHwDev) {
2106 if (mOutput->audioHwDev->canSetMasterVolume()) {
2107 mMasterVolume = 1.0;
2108 }
2109
2110 if (mOutput->audioHwDev->canSetMasterMute()) {
2111 mMasterMute = false;
2112 }
2113 mIsMsdDevice = strcmp(
2114 mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
2115 }
2116
2117 if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
2118 mMixerChannelMask = mixerConfig->channel_mask;
2119 }
2120
2121 readOutputParameters_l();
2122
2123 if (mType != SPATIALIZER
2124 && mMixerChannelMask != mChannelMask) {
2125 LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
2126 mChannelMask, mMixerChannelMask);
2127 }
2128
2129 // TODO: We may also match on address as well as device type for
2130 // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
2131 if (type == MIXER || type == DIRECT || type == OFFLOAD) {
2132 // TODO: This property should be ensure that only contains one single device type.
2133 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
2134 "audio.timestamp.corrected_output_device",
2135 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD
2136 : AUDIO_DEVICE_NONE));
2137 }
2138
2139 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
2140 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
2141 mStreamTypes[stream].volume = 0.0f;
2142 mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
2143 }
2144 // Audio patch and call assistant volume are always max
2145 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
2146 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
2147 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
2148 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
2149 }
2150
~PlaybackThread()2151 AudioFlinger::PlaybackThread::~PlaybackThread()
2152 {
2153 mAudioFlinger->unregisterWriter(mNBLogWriter);
2154 free(mSinkBuffer);
2155 free(mMixerBuffer);
2156 free(mEffectBuffer);
2157 free(mPostSpatializerBuffer);
2158 }
2159
2160 // Thread virtuals
2161
onFirstRef()2162 void AudioFlinger::PlaybackThread::onFirstRef()
2163 {
2164 if (!isStreamInitialized()) {
2165 ALOGE("The stream is not open yet"); // This should not happen.
2166 } else {
2167 // Callbacks take strong or weak pointers as a parameter.
2168 // Since PlaybackThread passes itself as a callback handler, it can only
2169 // be done outside of the constructor. Creating weak and especially strong
2170 // pointers to a refcounted object in its own constructor is strongly
2171 // discouraged, see comments in system/core/libutils/include/utils/RefBase.h.
2172 // Even if a function takes a weak pointer, it is possible that it will
2173 // need to convert it to a strong pointer down the line.
2174 if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING &&
2175 mOutput->stream->setCallback(this) == OK) {
2176 mUseAsyncWrite = true;
2177 mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
2178 }
2179
2180 if (mOutput->stream->setEventCallback(this) != OK) {
2181 ALOGD("Failed to add event callback");
2182 }
2183 }
2184 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
2185 mThreadSnapshot.setTid(getTid());
2186 }
2187
2188 // ThreadBase virtuals
preExit()2189 void AudioFlinger::PlaybackThread::preExit()
2190 {
2191 ALOGV(" preExit()");
2192 status_t result = mOutput->stream->exit();
2193 ALOGE_IF(result != OK, "Error when calling exit(): %d", result);
2194 }
2195
dumpTracks_l(int fd,const Vector<String16> & args __unused)2196 void AudioFlinger::PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
2197 {
2198 String8 result;
2199
2200 result.appendFormat(" Stream volumes in dB: ");
2201 for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
2202 const stream_type_t *st = &mStreamTypes[i];
2203 if (i > 0) {
2204 result.appendFormat(", ");
2205 }
2206 result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
2207 if (st->mute) {
2208 result.append("M");
2209 }
2210 }
2211 result.append("\n");
2212 write(fd, result.string(), result.length());
2213 result.clear();
2214
2215 // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
2216 FastTrackUnderruns underruns = getFastTrackUnderruns(0);
2217 dprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n",
2218 underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
2219
2220 size_t numtracks = mTracks.size();
2221 size_t numactive = mActiveTracks.size();
2222 dprintf(fd, " %zu Tracks", numtracks);
2223 size_t numactiveseen = 0;
2224 const char *prefix = " ";
2225 if (numtracks) {
2226 dprintf(fd, " of which %zu are active\n", numactive);
2227 result.append(prefix);
2228 mTracks[0]->appendDumpHeader(result);
2229 for (size_t i = 0; i < numtracks; ++i) {
2230 sp<Track> track = mTracks[i];
2231 if (track != 0) {
2232 bool active = mActiveTracks.indexOf(track) >= 0;
2233 if (active) {
2234 numactiveseen++;
2235 }
2236 result.append(prefix);
2237 track->appendDump(result, active);
2238 }
2239 }
2240 } else {
2241 result.append("\n");
2242 }
2243 if (numactiveseen != numactive) {
2244 // some tracks in the active list were not in the tracks list
2245 result.append(" The following tracks are in the active list but"
2246 " not in the track list\n");
2247 result.append(prefix);
2248 mActiveTracks[0]->appendDumpHeader(result);
2249 for (size_t i = 0; i < numactive; ++i) {
2250 sp<Track> track = mActiveTracks[i];
2251 if (mTracks.indexOf(track) < 0) {
2252 result.append(prefix);
2253 track->appendDump(result, true /* active */);
2254 }
2255 }
2256 }
2257
2258 write(fd, result.string(), result.size());
2259 }
2260
dumpInternals_l(int fd,const Vector<String16> & args)2261 void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
2262 {
2263 dprintf(fd, " Master volume: %f\n", mMasterVolume);
2264 dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
2265 dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
2266 mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
2267 if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
2268 dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
2269 channelMaskToString(mHapticChannelMask, true /* output */).c_str());
2270 }
2271 dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
2272 dprintf(fd, " Total writes: %d\n", mNumWrites);
2273 dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
2274 dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
2275 dprintf(fd, " Suspend count: %d\n", mSuspended);
2276 dprintf(fd, " Sink buffer : %p\n", mSinkBuffer);
2277 dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer);
2278 dprintf(fd, " Effect buffer: %p\n", mEffectBuffer);
2279 dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
2280 dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
2281 AudioStreamOut *output = mOutput;
2282 audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
2283 dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
2284 output, flags, toString(flags).c_str());
2285 dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
2286 dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
2287 if (mPipeSink.get() != nullptr) {
2288 dprintf(fd, " PipeSink frames written: %lld\n", (long long)mPipeSink->framesWritten());
2289 }
2290 if (output != nullptr) {
2291 dprintf(fd, " Hal stream dump:\n");
2292 (void)output->stream->dump(fd, args);
2293 }
2294 }
2295
2296 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
createTrack_l(const sp<AudioFlinger::Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,size_t * pNotificationFrameCount,uint32_t notificationsPerBuffer,float speed,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,audio_output_flags_t * flags,pid_t creatorPid,const AttributionSourceState & attributionSource,pid_t tid,status_t * status,audio_port_handle_t portId,const sp<media::IAudioTrackCallback> & callback,bool isSpatialized,bool isBitPerfect)2297 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
2298 const sp<AudioFlinger::Client>& client,
2299 audio_stream_type_t streamType,
2300 const audio_attributes_t& attr,
2301 uint32_t *pSampleRate,
2302 audio_format_t format,
2303 audio_channel_mask_t channelMask,
2304 size_t *pFrameCount,
2305 size_t *pNotificationFrameCount,
2306 uint32_t notificationsPerBuffer,
2307 float speed,
2308 const sp<IMemory>& sharedBuffer,
2309 audio_session_t sessionId,
2310 audio_output_flags_t *flags,
2311 pid_t creatorPid,
2312 const AttributionSourceState& attributionSource,
2313 pid_t tid,
2314 status_t *status,
2315 audio_port_handle_t portId,
2316 const sp<media::IAudioTrackCallback>& callback,
2317 bool isSpatialized,
2318 bool isBitPerfect)
2319 {
2320 size_t frameCount = *pFrameCount;
2321 size_t notificationFrameCount = *pNotificationFrameCount;
2322 sp<Track> track;
2323 status_t lStatus;
2324 audio_output_flags_t outputFlags = mOutput->flags;
2325 audio_output_flags_t requestedFlags = *flags;
2326 uint32_t sampleRate;
2327
2328 if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
2329 lStatus = BAD_VALUE;
2330 goto Exit;
2331 }
2332
2333 if (*pSampleRate == 0) {
2334 *pSampleRate = mSampleRate;
2335 }
2336 sampleRate = *pSampleRate;
2337
2338 // special case for FAST flag considered OK if fast mixer is present
2339 if (hasFastMixer()) {
2340 outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST);
2341 }
2342
2343 // Check if requested flags are compatible with output stream flags
2344 if ((*flags & outputFlags) != *flags) {
2345 ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)",
2346 *flags, outputFlags);
2347 *flags = (audio_output_flags_t)(*flags & outputFlags);
2348 }
2349
2350 if (isBitPerfect) {
2351 sp<EffectChain> chain = getEffectChain_l(sessionId);
2352 if (chain.get() != nullptr) {
2353 // Bit-perfect is required according to the configuration and preferred mixer
2354 // attributes, but it is not in the output flag from the client's request. Explicitly
2355 // adding bit-perfect flag to check the compatibility
2356 audio_output_flags_t flagsToCheck =
2357 (audio_output_flags_t)(*flags & AUDIO_OUTPUT_FLAG_BIT_PERFECT);
2358 chain->checkOutputFlagCompatibility(&flagsToCheck);
2359 if ((flagsToCheck & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_NONE) {
2360 ALOGE("%s cannot create track as there is data-processing effect attached to "
2361 "given session id(%d)", __func__, sessionId);
2362 lStatus = BAD_VALUE;
2363 goto Exit;
2364 }
2365 *flags = flagsToCheck;
2366 }
2367 }
2368
2369 // client expresses a preference for FAST, but we get the final say
2370 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2371 if (
2372 // PCM data
2373 audio_is_linear_pcm(format) &&
2374 // TODO: extract as a data library function that checks that a computationally
2375 // expensive downmixer is not required: isFastOutputChannelConversion()
2376 (channelMask == (mChannelMask | mHapticChannelMask) ||
2377 mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
2378 (channelMask == AUDIO_CHANNEL_OUT_MONO
2379 /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
2380 // hardware sample rate
2381 (sampleRate == mSampleRate) &&
2382 // normal mixer has an associated fast mixer
2383 hasFastMixer() &&
2384 // there are sufficient fast track slots available
2385 (mFastTrackAvailMask != 0)
2386 // FIXME test that MixerThread for this fast track has a capable output HAL
2387 // FIXME add a permission test also?
2388 ) {
2389 // static tracks can have any nonzero framecount, streaming tracks check against minimum.
2390 if (sharedBuffer == 0) {
2391 // read the fast track multiplier property the first time it is needed
2392 int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
2393 if (ok != 0) {
2394 ALOGE("%s pthread_once failed: %d", __func__, ok);
2395 }
2396 frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
2397 }
2398
2399 // check compatibility with audio effects.
2400 { // scope for mLock
2401 Mutex::Autolock _l(mLock);
2402 for (audio_session_t session : {
2403 AUDIO_SESSION_DEVICE,
2404 AUDIO_SESSION_OUTPUT_STAGE,
2405 AUDIO_SESSION_OUTPUT_MIX,
2406 sessionId,
2407 }) {
2408 sp<EffectChain> chain = getEffectChain_l(session);
2409 if (chain.get() != nullptr) {
2410 audio_output_flags_t old = *flags;
2411 chain->checkOutputFlagCompatibility(flags);
2412 if (old != *flags) {
2413 ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x",
2414 (int)session, (int)old, (int)*flags);
2415 }
2416 }
2417 }
2418 }
2419 ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
2420 "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
2421 frameCount, mFrameCount);
2422 } else {
2423 ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
2424 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
2425 "sampleRate=%u mSampleRate=%u "
2426 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
2427 sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
2428 audio_is_linear_pcm(format), channelMask, sampleRate,
2429 mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
2430 *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
2431 }
2432 }
2433
2434 if (!audio_has_proportional_frames(format)) {
2435 if (sharedBuffer != 0) {
2436 // Same comment as below about ignoring frameCount parameter for set()
2437 frameCount = sharedBuffer->size();
2438 } else if (frameCount == 0) {
2439 frameCount = mNormalFrameCount;
2440 }
2441 if (notificationFrameCount != frameCount) {
2442 notificationFrameCount = frameCount;
2443 }
2444 } else if (sharedBuffer != 0) {
2445 // FIXME: Ensure client side memory buffers need
2446 // not have additional alignment beyond sample
2447 // (e.g. 16 bit stereo accessed as 32 bit frame).
2448 size_t alignment = audio_bytes_per_sample(format);
2449 if (alignment & 1) {
2450 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
2451 alignment = 1;
2452 }
2453 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
2454 size_t frameSize = channelCount * audio_bytes_per_sample(format);
2455 if (channelCount > 1) {
2456 // More than 2 channels does not require stronger alignment than stereo
2457 alignment <<= 1;
2458 }
2459 if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
2460 ALOGE("Invalid buffer alignment: address %p, channel count %u",
2461 sharedBuffer->unsecurePointer(), channelCount);
2462 lStatus = BAD_VALUE;
2463 goto Exit;
2464 }
2465
2466 // When initializing a shared buffer AudioTrack via constructors,
2467 // there's no frameCount parameter.
2468 // But when initializing a shared buffer AudioTrack via set(),
2469 // there _is_ a frameCount parameter. We silently ignore it.
2470 frameCount = sharedBuffer->size() / frameSize;
2471 } else {
2472 size_t minFrameCount = 0;
2473 // For fast tracks we try to respect the application's request for notifications per buffer.
2474 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2475 if (notificationsPerBuffer > 0) {
2476 // Avoid possible arithmetic overflow during multiplication.
2477 if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
2478 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
2479 notificationsPerBuffer, mFrameCount);
2480 } else {
2481 minFrameCount = mFrameCount * notificationsPerBuffer;
2482 }
2483 }
2484 } else {
2485 // For normal PCM streaming tracks, update minimum frame count.
2486 // Buffer depth is forced to be at least 2 x the normal mixer frame count and
2487 // cover audio hardware latency.
2488 // This is probably too conservative, but legacy application code may depend on it.
2489 // If you change this calculation, also review the start threshold which is related.
2490 uint32_t latencyMs = latency_l();
2491 if (latencyMs == 0) {
2492 ALOGE("Error when retrieving output stream latency");
2493 lStatus = UNKNOWN_ERROR;
2494 goto Exit;
2495 }
2496
2497 minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
2498 mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2499
2500 }
2501 if (frameCount < minFrameCount) {
2502 frameCount = minFrameCount;
2503 }
2504 }
2505
2506 // Make sure that application is notified with sufficient margin before underrun.
2507 // The client can divide the AudioTrack buffer into sub-buffers,
2508 // and expresses its desire to server as the notification frame count.
2509 if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
2510 size_t maxNotificationFrames;
2511 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2512 // notify every HAL buffer, regardless of the size of the track buffer
2513 maxNotificationFrames = mFrameCount;
2514 } else {
2515 // Triple buffer the notification period for a triple buffered mixer period;
2516 // otherwise, double buffering for the notification period is fine.
2517 //
2518 // TODO: This should be moved to AudioTrack to modify the notification period
2519 // on AudioTrack::setBufferSizeInFrames() changes.
2520 const int nBuffering =
2521 (uint64_t{frameCount} * mSampleRate)
2522 / (uint64_t{mNormalFrameCount} * sampleRate) == 3 ? 3 : 2;
2523
2524 maxNotificationFrames = frameCount / nBuffering;
2525 // If client requested a fast track but this was denied, then use the smaller maximum.
2526 if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
2527 size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
2528 if (maxNotificationFrames > maxNotificationFramesFastDenied) {
2529 maxNotificationFrames = maxNotificationFramesFastDenied;
2530 }
2531 }
2532 }
2533 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
2534 if (notificationFrameCount == 0) {
2535 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
2536 maxNotificationFrames, frameCount);
2537 } else {
2538 ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
2539 notificationFrameCount, maxNotificationFrames, frameCount);
2540 }
2541 notificationFrameCount = maxNotificationFrames;
2542 }
2543 }
2544
2545 *pFrameCount = frameCount;
2546 *pNotificationFrameCount = notificationFrameCount;
2547
2548 switch (mType) {
2549 case BIT_PERFECT:
2550 if (isBitPerfect) {
2551 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2552 ALOGE("%s, bad parameter when request streaming bit-perfect, sampleRate=%u, "
2553 "format=%#x, channelMask=%#x, mSampleRate=%u, mFormat=%#x, mChannelMask=%#x",
2554 __func__, sampleRate, format, channelMask, mSampleRate, mFormat,
2555 mChannelMask);
2556 lStatus = BAD_VALUE;
2557 goto Exit;
2558 }
2559 }
2560 break;
2561
2562 case DIRECT:
2563 if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()?
2564 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2565 ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
2566 "for output %p with format %#x",
2567 sampleRate, format, channelMask, mOutput, mFormat);
2568 lStatus = BAD_VALUE;
2569 goto Exit;
2570 }
2571 }
2572 break;
2573
2574 case OFFLOAD:
2575 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2576 ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \""
2577 "for output %p with format %#x",
2578 sampleRate, format, channelMask, mOutput, mFormat);
2579 lStatus = BAD_VALUE;
2580 goto Exit;
2581 }
2582 break;
2583
2584 default:
2585 if (!audio_is_linear_pcm(format)) {
2586 ALOGE("createTrack_l() Bad parameter: format %#x \""
2587 "for output %p with format %#x",
2588 format, mOutput, mFormat);
2589 lStatus = BAD_VALUE;
2590 goto Exit;
2591 }
2592 if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
2593 ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
2594 lStatus = BAD_VALUE;
2595 goto Exit;
2596 }
2597 break;
2598
2599 }
2600
2601 lStatus = initCheck();
2602 if (lStatus != NO_ERROR) {
2603 ALOGE("createTrack_l() audio driver not initialized");
2604 goto Exit;
2605 }
2606
2607 { // scope for mLock
2608 Mutex::Autolock _l(mLock);
2609
2610 // all tracks in same audio session must share the same routing strategy otherwise
2611 // conflicts will happen when tracks are moved from one output to another by audio policy
2612 // manager
2613 product_strategy_t strategy = getStrategyForStream(streamType);
2614 for (size_t i = 0; i < mTracks.size(); ++i) {
2615 sp<Track> t = mTracks[i];
2616 if (t != 0 && t->isExternalTrack()) {
2617 product_strategy_t actual = getStrategyForStream(t->streamType());
2618 if (sessionId == t->sessionId() && strategy != actual) {
2619 ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
2620 strategy, actual);
2621 lStatus = BAD_VALUE;
2622 goto Exit;
2623 }
2624 }
2625 }
2626
2627 // Set DIRECT flag if current thread is DirectOutputThread. This can
2628 // happen when the playback is rerouted to direct output thread by
2629 // dynamic audio policy.
2630 // Do NOT report the flag changes back to client, since the client
2631 // doesn't explicitly request a direct flag.
2632 audio_output_flags_t trackFlags = *flags;
2633 if (mType == DIRECT) {
2634 trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT);
2635 }
2636
2637 track = new Track(this, client, streamType, attr, sampleRate, format,
2638 channelMask, frameCount,
2639 nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
2640 sessionId, creatorPid, attributionSource, trackFlags,
2641 TrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/,
2642 speed, isSpatialized, isBitPerfect);
2643
2644 lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
2645 if (lStatus != NO_ERROR) {
2646 ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
2647 // track must be cleared from the caller as the caller has the AF lock
2648 goto Exit;
2649 }
2650 mTracks.add(track);
2651 {
2652 Mutex::Autolock _atCbL(mAudioTrackCbLock);
2653 if (callback.get() != nullptr) {
2654 mAudioTrackCallbacks.emplace(track, callback);
2655 }
2656 }
2657
2658 sp<EffectChain> chain = getEffectChain_l(sessionId);
2659 if (chain != 0) {
2660 ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
2661 track->setMainBuffer(chain->inBuffer());
2662 chain->setStrategy(getStrategyForStream(track->streamType()));
2663 chain->incTrackCnt();
2664 }
2665
2666 if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) {
2667 pid_t callingPid = IPCThreadState::self()->getCallingPid();
2668 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
2669 // so ask activity manager to do this on our behalf
2670 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
2671 }
2672 }
2673
2674 lStatus = NO_ERROR;
2675
2676 Exit:
2677 *status = lStatus;
2678 return track;
2679 }
2680
2681 template<typename T>
remove(const sp<T> & track)2682 ssize_t AudioFlinger::PlaybackThread::Tracks<T>::remove(const sp<T> &track)
2683 {
2684 const int trackId = track->id();
2685 const ssize_t index = mTracks.remove(track);
2686 if (index >= 0) {
2687 if (mSaveDeletedTrackIds) {
2688 // We can't directly access mAudioMixer since the caller may be outside of threadLoop.
2689 // Instead, we add to mDeletedTrackIds which is solely used for mAudioMixer update,
2690 // to be handled when MixerThread::prepareTracks_l() next changes mAudioMixer.
2691 mDeletedTrackIds.emplace(trackId);
2692 }
2693 }
2694 return index;
2695 }
2696
correctLatency_l(uint32_t latency) const2697 uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const
2698 {
2699 return latency;
2700 }
2701
latency() const2702 uint32_t AudioFlinger::PlaybackThread::latency() const
2703 {
2704 Mutex::Autolock _l(mLock);
2705 return latency_l();
2706 }
latency_l() const2707 uint32_t AudioFlinger::PlaybackThread::latency_l() const
2708 {
2709 uint32_t latency;
2710 if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) {
2711 return correctLatency_l(latency);
2712 }
2713 return 0;
2714 }
2715
setMasterVolume(float value)2716 void AudioFlinger::PlaybackThread::setMasterVolume(float value)
2717 {
2718 Mutex::Autolock _l(mLock);
2719 // Don't apply master volume in SW if our HAL can do it for us.
2720 if (mOutput && mOutput->audioHwDev &&
2721 mOutput->audioHwDev->canSetMasterVolume()) {
2722 mMasterVolume = 1.0;
2723 } else {
2724 mMasterVolume = value;
2725 }
2726 }
2727
setMasterBalance(float balance)2728 void AudioFlinger::PlaybackThread::setMasterBalance(float balance)
2729 {
2730 mMasterBalance.store(balance);
2731 }
2732
setMasterMute(bool muted)2733 void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
2734 {
2735 if (isDuplicating()) {
2736 return;
2737 }
2738 Mutex::Autolock _l(mLock);
2739 // Don't apply master mute in SW if our HAL can do it for us.
2740 if (mOutput && mOutput->audioHwDev &&
2741 mOutput->audioHwDev->canSetMasterMute()) {
2742 mMasterMute = false;
2743 } else {
2744 mMasterMute = muted;
2745 }
2746 }
2747
setStreamVolume(audio_stream_type_t stream,float value)2748 void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
2749 {
2750 Mutex::Autolock _l(mLock);
2751 mStreamTypes[stream].volume = value;
2752 broadcast_l();
2753 }
2754
setStreamMute(audio_stream_type_t stream,bool muted)2755 void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
2756 {
2757 Mutex::Autolock _l(mLock);
2758 mStreamTypes[stream].mute = muted;
2759 broadcast_l();
2760 }
2761
streamVolume(audio_stream_type_t stream) const2762 float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
2763 {
2764 Mutex::Autolock _l(mLock);
2765 return mStreamTypes[stream].volume;
2766 }
2767
setVolumeForOutput_l(float left,float right) const2768 void AudioFlinger::PlaybackThread::setVolumeForOutput_l(float left, float right) const
2769 {
2770 mOutput->stream->setVolume(left, right);
2771 }
2772
2773 // addTrack_l() must be called with ThreadBase::mLock held
addTrack_l(const sp<Track> & track)2774 status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
2775 NO_THREAD_SAFETY_ANALYSIS // release and re-acquire mLock
2776 {
2777 status_t status = ALREADY_EXISTS;
2778
2779 if (mActiveTracks.indexOf(track) < 0) {
2780 // the track is newly added, make sure it fills up all its
2781 // buffers before playing. This is to ensure the client will
2782 // effectively get the latency it requested.
2783 if (track->isExternalTrack()) {
2784 TrackBase::track_state state = track->mState;
2785 mLock.unlock();
2786 status = AudioSystem::startOutput(track->portId());
2787 mLock.lock();
2788 // abort track was stopped/paused while we released the lock
2789 if (state != track->mState) {
2790 if (status == NO_ERROR) {
2791 mLock.unlock();
2792 AudioSystem::stopOutput(track->portId());
2793 mLock.lock();
2794 }
2795 return INVALID_OPERATION;
2796 }
2797 // abort if start is rejected by audio policy manager
2798 if (status != NO_ERROR) {
2799 // Do not replace the error if it is DEAD_OBJECT. When this happens, it indicates
2800 // current playback thread is reopened, which may happen when clients set preferred
2801 // mixer configuration. Returning DEAD_OBJECT will make the client restore track
2802 // immediately.
2803 return status == DEAD_OBJECT ? status : PERMISSION_DENIED;
2804 }
2805 #ifdef ADD_BATTERY_DATA
2806 // to track the speaker usage
2807 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
2808 #endif
2809 sendIoConfigEvent_l(AUDIO_CLIENT_STARTED, track->creatorPid(), track->portId());
2810 }
2811
2812 // set retry count for buffer fill
2813 if (track->isOffloaded()) {
2814 if (track->isStopping_1()) {
2815 track->mRetryCount = kMaxTrackStopRetriesOffload;
2816 } else {
2817 track->mRetryCount = kMaxTrackStartupRetriesOffload;
2818 }
2819 track->mFillingUpStatus = mStandby ? Track::FS_FILLING : Track::FS_FILLED;
2820 } else {
2821 track->mRetryCount = kMaxTrackStartupRetries;
2822 track->mFillingUpStatus =
2823 track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
2824 }
2825
2826 sp<EffectChain> chain = getEffectChain_l(track->sessionId());
2827 if (mHapticChannelMask != AUDIO_CHANNEL_NONE
2828 && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2829 || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
2830 // Unlock due to VibratorService will lock for this call and will
2831 // call Tracks.mute/unmute which also require thread's lock.
2832 mLock.unlock();
2833 const os::HapticScale intensity = AudioFlinger::onExternalVibrationStart(
2834 track->getExternalVibration());
2835 std::optional<media::AudioVibratorInfo> vibratorInfo;
2836 {
2837 // TODO(b/184194780): Use the vibrator information from the vibrator that will be
2838 // used to play this track.
2839 Mutex::Autolock _l(mAudioFlinger->mLock);
2840 vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
2841 }
2842 mLock.lock();
2843 track->setHapticIntensity(intensity);
2844 if (vibratorInfo) {
2845 track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
2846 }
2847
2848 // Haptic playback should be enabled by vibrator service.
2849 if (track->getHapticPlaybackEnabled()) {
2850 // Disable haptic playback of all active track to ensure only
2851 // one track playing haptic if current track should play haptic.
2852 for (const auto &t : mActiveTracks) {
2853 t->setHapticPlaybackEnabled(false);
2854 }
2855 }
2856
2857 // Set haptic intensity for effect
2858 if (chain != nullptr) {
2859 chain->setHapticIntensity_l(track->id(), intensity);
2860 }
2861 }
2862
2863 track->mResetDone = false;
2864 track->resetPresentationComplete();
2865 mActiveTracks.add(track);
2866 if (chain != 0) {
2867 ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
2868 track->sessionId());
2869 chain->incActiveTrackCnt();
2870 }
2871
2872 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
2873 status = NO_ERROR;
2874 }
2875
2876 onAddNewTrack_l();
2877 return status;
2878 }
2879
destroyTrack_l(const sp<Track> & track)2880 bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
2881 {
2882 track->terminate();
2883 // active tracks are removed by threadLoop()
2884 bool trackActive = (mActiveTracks.indexOf(track) >= 0);
2885 track->mState = TrackBase::STOPPED;
2886 if (!trackActive) {
2887 removeTrack_l(track);
2888 } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) {
2889 if (track->isPausePending()) {
2890 track->pauseAck();
2891 }
2892 track->mState = TrackBase::STOPPING_1;
2893 }
2894
2895 return trackActive;
2896 }
2897
removeTrack_l(const sp<Track> & track)2898 void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
2899 {
2900 track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
2901
2902 String8 result;
2903 track->appendDump(result, false /* active */);
2904 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string());
2905
2906 mTracks.remove(track);
2907 {
2908 Mutex::Autolock _atCbL(mAudioTrackCbLock);
2909 mAudioTrackCallbacks.erase(track);
2910 }
2911 if (track->isFastTrack()) {
2912 int index = track->mFastIndex;
2913 ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
2914 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
2915 mFastTrackAvailMask |= 1 << index;
2916 // redundant as track is about to be destroyed, for dumpsys only
2917 track->mFastIndex = -1;
2918 }
2919 sp<EffectChain> chain = getEffectChain_l(track->sessionId());
2920 if (chain != 0) {
2921 chain->decTrackCnt();
2922 }
2923 }
2924
getParameters(const String8 & keys)2925 String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
2926 {
2927 Mutex::Autolock _l(mLock);
2928 String8 out_s8;
2929 if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) {
2930 return out_s8;
2931 }
2932 return {};
2933 }
2934
selectPresentation(int presentationId,int programId)2935 status_t AudioFlinger::DirectOutputThread::selectPresentation(int presentationId, int programId) {
2936 Mutex::Autolock _l(mLock);
2937 if (!isStreamInitialized()) {
2938 return NO_INIT;
2939 }
2940 return mOutput->stream->selectPresentation(presentationId, programId);
2941 }
2942
ioConfigChanged(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)2943 void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
2944 audio_port_handle_t portId) {
2945 ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
2946 sp<AudioIoDescriptor> desc;
2947 const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
2948 switch (event) {
2949 case AUDIO_OUTPUT_OPENED:
2950 case AUDIO_OUTPUT_REGISTERED:
2951 case AUDIO_OUTPUT_CONFIG_CHANGED:
2952 desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
2953 mSampleRate, mFormat, mChannelMask,
2954 // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
2955 mNormalFrameCount, mFrameCount, latency_l());
2956 break;
2957 case AUDIO_CLIENT_STARTED:
2958 desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
2959 break;
2960 case AUDIO_OUTPUT_CLOSED:
2961 default:
2962 desc = sp<AudioIoDescriptor>::make(mId);
2963 break;
2964 }
2965 mAudioFlinger->ioConfigChanged(event, desc, pid);
2966 }
2967
onWriteReady()2968 void AudioFlinger::PlaybackThread::onWriteReady()
2969 {
2970 mCallbackThread->resetWriteBlocked();
2971 }
2972
onDrainReady()2973 void AudioFlinger::PlaybackThread::onDrainReady()
2974 {
2975 mCallbackThread->resetDraining();
2976 }
2977
onError()2978 void AudioFlinger::PlaybackThread::onError()
2979 {
2980 mCallbackThread->setAsyncError();
2981 }
2982
onCodecFormatChanged(const std::basic_string<uint8_t> & metadataBs)2983 void AudioFlinger::PlaybackThread::onCodecFormatChanged(
2984 const std::basic_string<uint8_t>& metadataBs)
2985 {
2986 wp<AudioFlinger::PlaybackThread> weakPointerThis = this;
2987 std::thread([this, metadataBs, weakPointerThis]() {
2988 sp<AudioFlinger::PlaybackThread> playbackThread = weakPointerThis.promote();
2989 if (playbackThread == nullptr) {
2990 ALOGW("PlaybackThread was destroyed, skip codec format change event");
2991 return;
2992 }
2993
2994 audio_utils::metadata::Data metadata =
2995 audio_utils::metadata::dataFromByteString(metadataBs);
2996 if (metadata.empty()) {
2997 ALOGW("Can not transform the buffer to audio metadata, %s, %d",
2998 reinterpret_cast<char*>(const_cast<uint8_t*>(metadataBs.data())),
2999 (int)metadataBs.size());
3000 return;
3001 }
3002
3003 audio_utils::metadata::ByteString metaDataStr =
3004 audio_utils::metadata::byteStringFromData(metadata);
3005 std::vector metadataVec(metaDataStr.begin(), metaDataStr.end());
3006 Mutex::Autolock _l(mAudioTrackCbLock);
3007 for (const auto& callbackPair : mAudioTrackCallbacks) {
3008 callbackPair.second->onCodecFormatChanged(metadataVec);
3009 }
3010 }).detach();
3011 }
3012
resetWriteBlocked(uint32_t sequence)3013 void AudioFlinger::PlaybackThread::resetWriteBlocked(uint32_t sequence)
3014 {
3015 Mutex::Autolock _l(mLock);
3016 // reject out of sequence requests
3017 if ((mWriteAckSequence & 1) && (sequence == mWriteAckSequence)) {
3018 mWriteAckSequence &= ~1;
3019 mWaitWorkCV.signal();
3020 }
3021 }
3022
resetDraining(uint32_t sequence)3023 void AudioFlinger::PlaybackThread::resetDraining(uint32_t sequence)
3024 {
3025 Mutex::Autolock _l(mLock);
3026 // reject out of sequence requests
3027 if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
3028 // Register discontinuity when HW drain is completed because that can cause
3029 // the timestamp frame position to reset to 0 for direct and offload threads.
3030 // (Out of sequence requests are ignored, since the discontinuity would be handled
3031 // elsewhere, e.g. in flush).
3032 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
3033 mDrainSequence &= ~1;
3034 mWaitWorkCV.signal();
3035 }
3036 }
3037
readOutputParameters_l()3038 void AudioFlinger::PlaybackThread::readOutputParameters_l()
3039 {
3040 // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
3041 const audio_config_base_t audioConfig = mOutput->getAudioProperties();
3042 mSampleRate = audioConfig.sample_rate;
3043 mChannelMask = audioConfig.channel_mask;
3044 if (!audio_is_output_channel(mChannelMask)) {
3045 LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
3046 }
3047 if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
3048 LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
3049 mChannelMask);
3050 }
3051
3052 if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
3053 mMixerChannelMask = mChannelMask;
3054 }
3055
3056 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
3057 mBalance.setChannelMask(mChannelMask);
3058
3059 uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
3060
3061 // Get actual HAL format.
3062 status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
3063 LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
3064 // Get format from the shim, which will be different than the HAL format
3065 // if playing compressed audio over HDMI passthrough.
3066 mFormat = audioConfig.format;
3067 if (!audio_is_valid_format(mFormat)) {
3068 LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
3069 }
3070 if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
3071 LOG_FATAL("HAL format %#x not supported for mixed output",
3072 mFormat);
3073 }
3074 mFrameSize = mOutput->getFrameSize();
3075 result = mOutput->stream->getBufferSize(&mBufferSize);
3076 LOG_ALWAYS_FATAL_IF(result != OK,
3077 "Error when retrieving output stream buffer size: %d", result);
3078 mFrameCount = mBufferSize / mFrameSize;
3079 if (hasMixer() && (mFrameCount & 15)) {
3080 ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
3081 mFrameCount);
3082 }
3083
3084 mHwSupportsPause = false;
3085 if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
3086 bool supportsPause = false, supportsResume = false;
3087 if (mOutput->stream->supportsPauseAndResume(&supportsPause, &supportsResume) == OK) {
3088 if (supportsPause && supportsResume) {
3089 mHwSupportsPause = true;
3090 } else if (supportsPause) {
3091 ALOGW("direct output implements pause but not resume");
3092 } else if (supportsResume) {
3093 ALOGW("direct output implements resume but not pause");
3094 }
3095 }
3096 }
3097 if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
3098 LOG_ALWAYS_FATAL("HW_AV_SYNC requested but HAL does not implement pause and resume");
3099 }
3100
3101 if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
3102 // For best precision, we use float instead of the associated output
3103 // device format (typically PCM 16 bit).
3104
3105 mFormat = AUDIO_FORMAT_PCM_FLOAT;
3106 mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
3107 mBufferSize = mFrameSize * mFrameCount;
3108
3109 // TODO: We currently use the associated output device channel mask and sample rate.
3110 // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
3111 // (if a valid mask) to avoid premature downmix.
3112 // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
3113 // instead of the output device sample rate to avoid loss of high frequency information.
3114 // This may need to be updated as MixerThread/OutputTracks are added and not here.
3115 }
3116
3117 // Calculate size of normal sink buffer relative to the HAL output buffer size
3118 double multiplier = 1.0;
3119 // Note: mType == SPATIALIZER does not support FastMixer.
3120 if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
3121 kUseFastMixer == FastMixer_Dynamic)) {
3122 size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
3123 size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
3124
3125 // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
3126 minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
3127 maxNormalFrameCount = maxNormalFrameCount & ~15;
3128 if (maxNormalFrameCount < minNormalFrameCount) {
3129 maxNormalFrameCount = minNormalFrameCount;
3130 }
3131 multiplier = (double) minNormalFrameCount / (double) mFrameCount;
3132 if (multiplier <= 1.0) {
3133 multiplier = 1.0;
3134 } else if (multiplier <= 2.0) {
3135 if (2 * mFrameCount <= maxNormalFrameCount) {
3136 multiplier = 2.0;
3137 } else {
3138 multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
3139 }
3140 } else {
3141 multiplier = floor(multiplier);
3142 }
3143 }
3144 mNormalFrameCount = multiplier * mFrameCount;
3145 // round up to nearest 16 frames to satisfy AudioMixer
3146 if (hasMixer()) {
3147 mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
3148 }
3149 ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
3150 mNormalFrameCount);
3151
3152 // Check if we want to throttle the processing to no more than 2x normal rate
3153 mThreadThrottle = property_get_bool("af.thread.throttle", true /* default_value */);
3154 mThreadThrottleTimeMs = 0;
3155 mThreadThrottleEndMs = 0;
3156 mHalfBufferMs = mNormalFrameCount * 1000 / (2 * mSampleRate);
3157
3158 // mSinkBuffer is the sink buffer. Size is always multiple-of-16 frames.
3159 // Originally this was int16_t[] array, need to remove legacy implications.
3160 free(mSinkBuffer);
3161 mSinkBuffer = NULL;
3162
3163 // For sink buffer size, we use the frame size from the downstream sink to avoid problems
3164 // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
3165 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
3166 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
3167
3168 // We resize the mMixerBuffer according to the requirements of the sink buffer which
3169 // drives the output.
3170 free(mMixerBuffer);
3171 mMixerBuffer = NULL;
3172 if (mMixerBufferEnabled) {
3173 mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
3174 mMixerBufferSize = mNormalFrameCount * mixerChannelCount
3175 * audio_bytes_per_sample(mMixerBufferFormat);
3176 (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
3177 }
3178 free(mEffectBuffer);
3179 mEffectBuffer = NULL;
3180 if (mEffectBufferEnabled) {
3181 mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
3182 mEffectBufferSize = mNormalFrameCount * mixerChannelCount
3183 * audio_bytes_per_sample(mEffectBufferFormat);
3184 (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
3185 }
3186
3187 if (mType == SPATIALIZER) {
3188 free(mPostSpatializerBuffer);
3189 mPostSpatializerBuffer = nullptr;
3190 mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
3191 * audio_bytes_per_sample(mEffectBufferFormat);
3192 (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
3193 }
3194
3195 mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
3196 mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
3197 mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
3198 mChannelCount -= mHapticChannelCount;
3199 mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
3200
3201 // force reconfiguration of effect chains and engines to take new buffer size and audio
3202 // parameters into account
3203 // Note that mLock is not held when readOutputParameters_l() is called from the constructor
3204 // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
3205 // matter.
3206 // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
3207 Vector< sp<EffectChain> > effectChains = mEffectChains;
3208 for (size_t i = 0; i < effectChains.size(); i ++) {
3209 mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(),
3210 this/* srcThread */, this/* dstThread */);
3211 }
3212
3213 audio_output_flags_t flags = mOutput->flags;
3214 mediametrics::LogItem item(mThreadMetrics.getMetricsId()); // TODO: method in ThreadMetrics?
3215 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
3216 .set(AMEDIAMETRICS_PROP_ENCODING, formatToString(mFormat).c_str())
3217 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
3218 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
3219 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
3220 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mNormalFrameCount)
3221 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
3222 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
3223 (int32_t)mHapticChannelMask)
3224 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
3225 (int32_t)mHapticChannelCount)
3226 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
3227 formatToString(mHALFormat).c_str())
3228 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
3229 (int32_t)mFrameCount) // sic - added HAL
3230 ;
3231 uint32_t latencyMs;
3232 if (mOutput->stream->getLatency(&latencyMs) == NO_ERROR) {
3233 item.set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_LATENCYMS, (double)latencyMs);
3234 }
3235 item.record();
3236 }
3237
updateMetadata_l()3238 AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::PlaybackThread::updateMetadata_l()
3239 {
3240 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
3241 return {}; // nothing to do
3242 }
3243 StreamOutHalInterface::SourceMetadata metadata;
3244 auto backInserter = std::back_inserter(metadata.tracks);
3245 for (const sp<Track> &track : mActiveTracks) {
3246 // No track is invalid as this is called after prepareTrack_l in the same critical section
3247 track->copyMetadataTo(backInserter);
3248 }
3249 sendMetadataToBackend_l(metadata);
3250 MetadataUpdate change;
3251 change.playbackMetadataUpdate = metadata.tracks;
3252 return change;
3253 }
3254
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)3255 void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
3256 const StreamOutHalInterface::SourceMetadata& metadata)
3257 {
3258 mOutput->stream->updateSourceMetadata(metadata);
3259 };
3260
getRenderPosition(uint32_t * halFrames,uint32_t * dspFrames)3261 status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
3262 {
3263 if (halFrames == NULL || dspFrames == NULL) {
3264 return BAD_VALUE;
3265 }
3266 Mutex::Autolock _l(mLock);
3267 if (initCheck() != NO_ERROR) {
3268 return INVALID_OPERATION;
3269 }
3270 int64_t framesWritten = mBytesWritten / mFrameSize;
3271 *halFrames = framesWritten;
3272
3273 if (isSuspended()) {
3274 // return an estimation of rendered frames when the output is suspended
3275 size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
3276 *dspFrames = (uint32_t)
3277 (framesWritten >= (int64_t)latencyFrames ? framesWritten - latencyFrames : 0);
3278 return NO_ERROR;
3279 } else {
3280 status_t status;
3281 uint32_t frames;
3282 status = mOutput->getRenderPosition(&frames);
3283 *dspFrames = (size_t)frames;
3284 return status;
3285 }
3286 }
3287
getStrategyForSession_l(audio_session_t sessionId)3288 product_strategy_t AudioFlinger::PlaybackThread::getStrategyForSession_l(audio_session_t sessionId)
3289 {
3290 // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
3291 // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
3292 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
3293 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3294 }
3295 for (size_t i = 0; i < mTracks.size(); i++) {
3296 sp<Track> track = mTracks[i];
3297 if (sessionId == track->sessionId() && !track->isInvalid()) {
3298 return getStrategyForStream(track->streamType());
3299 }
3300 }
3301 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3302 }
3303
3304
getOutput() const3305 AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
3306 {
3307 Mutex::Autolock _l(mLock);
3308 return mOutput;
3309 }
3310
clearOutput()3311 AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
3312 {
3313 Mutex::Autolock _l(mLock);
3314 AudioStreamOut *output = mOutput;
3315 mOutput = NULL;
3316 // FIXME FastMixer might also have a raw ptr to mOutputSink;
3317 // must push a NULL and wait for ack
3318 mOutputSink.clear();
3319 mPipeSink.clear();
3320 mNormalSink.clear();
3321 return output;
3322 }
3323
3324 // this method must always be called either with ThreadBase mLock held or inside the thread loop
stream() const3325 sp<StreamHalInterface> AudioFlinger::PlaybackThread::stream() const
3326 {
3327 if (mOutput == NULL) {
3328 return NULL;
3329 }
3330 return mOutput->stream;
3331 }
3332
activeSleepTimeUs() const3333 uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
3334 {
3335 return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
3336 }
3337
setSyncEvent(const sp<SyncEvent> & event)3338 status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
3339 {
3340 if (!isValidSyncEvent(event)) {
3341 return BAD_VALUE;
3342 }
3343
3344 Mutex::Autolock _l(mLock);
3345
3346 for (size_t i = 0; i < mTracks.size(); ++i) {
3347 sp<Track> track = mTracks[i];
3348 if (event->triggerSession() == track->sessionId()) {
3349 (void) track->setSyncEvent(event);
3350 return NO_ERROR;
3351 }
3352 }
3353
3354 return NAME_NOT_FOUND;
3355 }
3356
isValidSyncEvent(const sp<SyncEvent> & event) const3357 bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
3358 {
3359 return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
3360 }
3361
threadLoop_removeTracks(const Vector<sp<Track>> & tracksToRemove)3362 void AudioFlinger::PlaybackThread::threadLoop_removeTracks(
3363 [[maybe_unused]] const Vector< sp<Track> >& tracksToRemove)
3364 {
3365 // Miscellaneous track cleanup when removed from the active list,
3366 // called without Thread lock but synchronized with threadLoop processing.
3367 #ifdef ADD_BATTERY_DATA
3368 for (const auto& track : tracksToRemove) {
3369 if (track->isExternalTrack()) {
3370 // to track the speaker usage
3371 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
3372 }
3373 }
3374 #endif
3375 }
3376
checkSilentMode_l()3377 void AudioFlinger::PlaybackThread::checkSilentMode_l()
3378 {
3379 if (!mMasterMute) {
3380 char value[PROPERTY_VALUE_MAX];
3381 if (mOutDeviceTypeAddrs.empty()) {
3382 ALOGD("ro.audio.silent is ignored since no output device is set");
3383 return;
3384 }
3385 if (isSingleDeviceType(outDeviceTypes(), AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) {
3386 ALOGD("ro.audio.silent will be ignored for threads on AUDIO_DEVICE_OUT_REMOTE_SUBMIX");
3387 return;
3388 }
3389 if (property_get("ro.audio.silent", value, "0") > 0) {
3390 char *endptr;
3391 unsigned long ul = strtoul(value, &endptr, 0);
3392 if (*endptr == '\0' && ul != 0) {
3393 ALOGD("Silence is golden");
3394 // The setprop command will not allow a property to be changed after
3395 // the first time it is set, so we don't have to worry about un-muting.
3396 setMasterMute_l(true);
3397 }
3398 }
3399 }
3400 }
3401
3402 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_write()3403 ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
3404 {
3405 LOG_HIST_TS();
3406 mInWrite = true;
3407 ssize_t bytesWritten;
3408 const size_t offset = mCurrentWriteLength - mBytesRemaining;
3409
3410 // If an NBAIO sink is present, use it to write the normal mixer's submix
3411 if (mNormalSink != 0) {
3412
3413 const size_t count = mBytesRemaining / mFrameSize;
3414
3415 ATRACE_BEGIN("write");
3416 // update the setpoint when AudioFlinger::mScreenState changes
3417 uint32_t screenState = AudioFlinger::mScreenState;
3418 if (screenState != mScreenState) {
3419 mScreenState = screenState;
3420 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
3421 if (pipe != NULL) {
3422 pipe->setAvgFrames((mScreenState & 1) ?
3423 (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
3424 }
3425 }
3426 ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
3427 ATRACE_END();
3428
3429 if (framesWritten > 0) {
3430 bytesWritten = framesWritten * mFrameSize;
3431
3432 #ifdef TEE_SINK
3433 mTee.write((char *)mSinkBuffer + offset, framesWritten);
3434 #endif
3435 } else {
3436 bytesWritten = framesWritten;
3437 }
3438 // otherwise use the HAL / AudioStreamOut directly
3439 } else {
3440 // Direct output and offload threads
3441
3442 if (mUseAsyncWrite) {
3443 ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request");
3444 mWriteAckSequence += 2;
3445 mWriteAckSequence |= 1;
3446 ALOG_ASSERT(mCallbackThread != 0);
3447 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3448 }
3449 ATRACE_BEGIN("write");
3450 // FIXME We should have an implementation of timestamps for direct output threads.
3451 // They are used e.g for multichannel PCM playback over HDMI.
3452 bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
3453 ATRACE_END();
3454
3455 if (mUseAsyncWrite &&
3456 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
3457 // do not wait for async callback in case of error of full write
3458 mWriteAckSequence &= ~1;
3459 ALOG_ASSERT(mCallbackThread != 0);
3460 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3461 }
3462 }
3463
3464 mNumWrites++;
3465 mInWrite = false;
3466 if (mStandby) {
3467 mThreadMetrics.logBeginInterval();
3468 mThreadSnapshot.onBegin();
3469 mStandby = false;
3470 }
3471 return bytesWritten;
3472 }
3473
3474 // startMelComputation_l() must be called with AudioFlinger::mLock held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)3475 void AudioFlinger::PlaybackThread::startMelComputation_l(
3476 const sp<audio_utils::MelProcessor>& processor)
3477 {
3478 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3479 if (outputSink != nullptr) {
3480 outputSink->startMelComputation(processor);
3481 }
3482 }
3483
3484 // stopMelComputation_l() must be called with AudioFlinger::mLock held
stopMelComputation_l()3485 void AudioFlinger::PlaybackThread::stopMelComputation_l()
3486 {
3487 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3488 if (outputSink != nullptr) {
3489 outputSink->stopMelComputation();
3490 }
3491 }
3492
threadLoop_drain()3493 void AudioFlinger::PlaybackThread::threadLoop_drain()
3494 {
3495 bool supportsDrain = false;
3496 if (mOutput->stream->supportsDrain(&supportsDrain) == OK && supportsDrain) {
3497 ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
3498 if (mUseAsyncWrite) {
3499 ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
3500 mDrainSequence |= 1;
3501 ALOG_ASSERT(mCallbackThread != 0);
3502 mCallbackThread->setDraining(mDrainSequence);
3503 }
3504 status_t result = mOutput->stream->drain(mMixerStatus == MIXER_DRAIN_TRACK);
3505 ALOGE_IF(result != OK, "Error when draining stream: %d", result);
3506 }
3507 }
3508
threadLoop_exit()3509 void AudioFlinger::PlaybackThread::threadLoop_exit()
3510 {
3511 {
3512 Mutex::Autolock _l(mLock);
3513 for (size_t i = 0; i < mTracks.size(); i++) {
3514 sp<Track> track = mTracks[i];
3515 track->invalidate();
3516 }
3517 // Clear ActiveTracks to update BatteryNotifier in case active tracks remain.
3518 // After we exit there are no more track changes sent to BatteryNotifier
3519 // because that requires an active threadLoop.
3520 // TODO: should we decActiveTrackCnt() of the cleared track effect chain?
3521 mActiveTracks.clear();
3522 }
3523 }
3524
3525 /*
3526 The derived values that are cached:
3527 - mSinkBufferSize from frame count * frame size
3528 - mActiveSleepTimeUs from activeSleepTimeUs()
3529 - mIdleSleepTimeUs from idleSleepTimeUs()
3530 - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only) or forced to at least
3531 kDefaultStandbyTimeInNsecs when connected to an A2DP device.
3532 - maxPeriod from frame count and sample rate (MIXER only)
3533
3534 The parameters that affect these derived values are:
3535 - frame count
3536 - frame size
3537 - sample rate
3538 - device type: A2DP or not
3539 - device latency
3540 - format: PCM or not
3541 - active sleep time
3542 - idle sleep time
3543 */
3544
cacheParameters_l()3545 void AudioFlinger::PlaybackThread::cacheParameters_l()
3546 {
3547 mSinkBufferSize = mNormalFrameCount * mFrameSize;
3548 mActiveSleepTimeUs = activeSleepTimeUs();
3549 mIdleSleepTimeUs = idleSleepTimeUs();
3550
3551 mStandbyDelayNs = AudioFlinger::mStandbyTimeInNsecs;
3552
3553 // make sure standby delay is not too short when connected to an A2DP sink to avoid
3554 // truncating audio when going to standby.
3555 if (!Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty()) {
3556 if (mStandbyDelayNs < kDefaultStandbyTimeInNsecs) {
3557 mStandbyDelayNs = kDefaultStandbyTimeInNsecs;
3558 }
3559 }
3560 }
3561
invalidateTracks_l(audio_stream_type_t streamType)3562 bool AudioFlinger::PlaybackThread::invalidateTracks_l(audio_stream_type_t streamType)
3563 {
3564 ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
3565 this, streamType, mTracks.size());
3566 bool trackMatch = false;
3567 size_t size = mTracks.size();
3568 for (size_t i = 0; i < size; i++) {
3569 sp<Track> t = mTracks[i];
3570 if (t->streamType() == streamType && t->isExternalTrack()) {
3571 t->invalidate();
3572 trackMatch = true;
3573 }
3574 }
3575 return trackMatch;
3576 }
3577
invalidateTracks(audio_stream_type_t streamType)3578 void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
3579 {
3580 Mutex::Autolock _l(mLock);
3581 invalidateTracks_l(streamType);
3582 }
3583
invalidateTracks(std::set<audio_port_handle_t> & portIds)3584 void AudioFlinger::PlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
3585 Mutex::Autolock _l(mLock);
3586 invalidateTracks_l(portIds);
3587 }
3588
invalidateTracks_l(std::set<audio_port_handle_t> & portIds)3589 bool AudioFlinger::PlaybackThread::invalidateTracks_l(std::set<audio_port_handle_t>& portIds) {
3590 bool trackMatch = false;
3591 const size_t size = mTracks.size();
3592 for (size_t i = 0; i < size; i++) {
3593 sp<Track> t = mTracks[i];
3594 if (t->isExternalTrack() && portIds.find(t->portId()) != portIds.end()) {
3595 t->invalidate();
3596 portIds.erase(t->portId());
3597 trackMatch = true;
3598 }
3599 if (portIds.empty()) {
3600 break;
3601 }
3602 }
3603 return trackMatch;
3604 }
3605
3606 // getTrackById_l must be called with holding thread lock
getTrackById_l(audio_port_handle_t trackPortId)3607 AudioFlinger::PlaybackThread::Track* AudioFlinger::PlaybackThread::getTrackById_l(
3608 audio_port_handle_t trackPortId) {
3609 for (size_t i = 0; i < mTracks.size(); i++) {
3610 if (mTracks[i]->portId() == trackPortId) {
3611 return mTracks[i].get();
3612 }
3613 }
3614 return nullptr;
3615 }
3616
addEffectChain_l(const sp<EffectChain> & chain)3617 status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
3618 {
3619 audio_session_t session = chain->sessionId();
3620 sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
3621 effect_buffer_t *buffer = nullptr; // only used for non global sessions
3622
3623 if (mType == SPATIALIZER) {
3624 if (!audio_is_global_session(session)) {
3625 // player sessions on a spatializer output will use a dedicated input buffer and
3626 // will either output multi channel to mEffectBuffer if the track is spatilaized
3627 // or stereo to mPostSpatializerBuffer if not spatialized.
3628 uint32_t channelMask;
3629 bool isSessionSpatialized =
3630 (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
3631 if (isSessionSpatialized) {
3632 channelMask = mMixerChannelMask;
3633 } else {
3634 channelMask = mChannelMask;
3635 }
3636 size_t numSamples = mNormalFrameCount
3637 * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
3638 status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
3639 numSamples * sizeof(effect_buffer_t),
3640 &halInBuffer);
3641 if (result != OK) return result;
3642
3643 result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
3644 isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
3645 isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
3646 &halOutBuffer);
3647 if (result != OK) return result;
3648
3649 #ifdef FLOAT_EFFECT_CHAIN
3650 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3651 #else
3652 buffer = halInBuffer ? halInBuffer->audioBuffer()->s16 : buffer;
3653 #endif
3654 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3655 buffer, session);
3656 } else {
3657 // A global session on a SPATIALIZER thread is either OUTPUT_STAGE or DEVICE
3658 // - OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
3659 // mPostSpatializerBuffer as output buffer
3660 // - DEVICE session uses the mPostSpatializerBuffer as input and output buffer.
3661 status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
3662 mEffectBuffer, mEffectBufferSize, &halInBuffer);
3663 if (result != OK) return result;
3664 result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
3665 mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
3666 if (result != OK) return result;
3667
3668 if (session == AUDIO_SESSION_DEVICE) {
3669 halInBuffer = halOutBuffer;
3670 }
3671 }
3672 } else {
3673 status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
3674 mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
3675 mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
3676 &halInBuffer);
3677 if (result != OK) return result;
3678 halOutBuffer = halInBuffer;
3679 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
3680 if (!audio_is_global_session(session)) {
3681 buffer = halInBuffer ? reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData())
3682 : buffer;
3683 // Only one effect chain can be present in direct output thread and it uses
3684 // the sink buffer as input
3685 if (mType != DIRECT) {
3686 size_t numSamples = mNormalFrameCount
3687 * (audio_channel_count_from_out_mask(mMixerChannelMask)
3688 + mHapticChannelCount);
3689 const status_t allocateStatus = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
3690 numSamples * sizeof(effect_buffer_t),
3691 &halInBuffer);
3692 if (allocateStatus != OK) return allocateStatus;
3693 #ifdef FLOAT_EFFECT_CHAIN
3694 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3695 #else
3696 buffer = halInBuffer ? halInBuffer->audioBuffer()->s16 : buffer;
3697 #endif
3698 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3699 buffer, session);
3700 }
3701 }
3702 }
3703
3704 if (!audio_is_global_session(session)) {
3705 // Attach all tracks with same session ID to this chain.
3706 for (size_t i = 0; i < mTracks.size(); ++i) {
3707 sp<Track> track = mTracks[i];
3708 if (session == track->sessionId()) {
3709 ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
3710 track.get(), buffer);
3711 track->setMainBuffer(buffer);
3712 chain->incTrackCnt();
3713 }
3714 }
3715
3716 // indicate all active tracks in the chain
3717 for (const sp<Track> &track : mActiveTracks) {
3718 if (session == track->sessionId()) {
3719 ALOGV("addEffectChain_l() activating track %p on session %d",
3720 track.get(), session);
3721 chain->incActiveTrackCnt();
3722 }
3723 }
3724 }
3725
3726 chain->setThread(this);
3727 chain->setInBuffer(halInBuffer);
3728 chain->setOutBuffer(halOutBuffer);
3729 // Effect chain for session AUDIO_SESSION_DEVICE is inserted at end of effect
3730 // chains list in order to be processed last as it contains output device effects.
3731 // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted just before to apply post
3732 // processing effects specific to an output stream before effects applied to all streams
3733 // routed to a given device.
3734 // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
3735 // session AUDIO_SESSION_OUTPUT_STAGE to be processed
3736 // after track specific effects and before output stage.
3737 // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
3738 // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX.
3739 // Effect chain for other sessions are inserted at beginning of effect
3740 // chains list to be processed before output mix effects. Relative order between other
3741 // sessions is not important.
3742 static_assert(AUDIO_SESSION_OUTPUT_MIX == 0 &&
3743 AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX &&
3744 AUDIO_SESSION_DEVICE < AUDIO_SESSION_OUTPUT_STAGE,
3745 "audio_session_t constants misdefined");
3746 size_t size = mEffectChains.size();
3747 size_t i = 0;
3748 for (i = 0; i < size; i++) {
3749 if (mEffectChains[i]->sessionId() < session) {
3750 break;
3751 }
3752 }
3753 mEffectChains.insertAt(chain, i);
3754 checkSuspendOnAddEffectChain_l(chain);
3755
3756 return NO_ERROR;
3757 }
3758
removeEffectChain_l(const sp<EffectChain> & chain)3759 size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
3760 {
3761 audio_session_t session = chain->sessionId();
3762
3763 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
3764
3765 for (size_t i = 0; i < mEffectChains.size(); i++) {
3766 if (chain == mEffectChains[i]) {
3767 mEffectChains.removeAt(i);
3768 // detach all active tracks from the chain
3769 for (const sp<Track> &track : mActiveTracks) {
3770 if (session == track->sessionId()) {
3771 ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
3772 chain.get(), session);
3773 chain->decActiveTrackCnt();
3774 }
3775 }
3776
3777 // detach all tracks with same session ID from this chain
3778 for (size_t j = 0; j < mTracks.size(); ++j) {
3779 sp<Track> track = mTracks[j];
3780 if (session == track->sessionId()) {
3781 track->setMainBuffer(reinterpret_cast<effect_buffer_t*>(mSinkBuffer));
3782 chain->decTrackCnt();
3783 }
3784 }
3785 break;
3786 }
3787 }
3788 return mEffectChains.size();
3789 }
3790
attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> & track,int EffectId)3791 status_t AudioFlinger::PlaybackThread::attachAuxEffect(
3792 const sp<AudioFlinger::PlaybackThread::Track>& track, int EffectId)
3793 {
3794 Mutex::Autolock _l(mLock);
3795 return attachAuxEffect_l(track, EffectId);
3796 }
3797
attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> & track,int EffectId)3798 status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
3799 const sp<AudioFlinger::PlaybackThread::Track>& track, int EffectId)
3800 {
3801 status_t status = NO_ERROR;
3802
3803 if (EffectId == 0) {
3804 track->setAuxBuffer(0, NULL);
3805 } else {
3806 // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
3807 sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
3808 if (effect != 0) {
3809 if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
3810 track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
3811 } else {
3812 status = INVALID_OPERATION;
3813 }
3814 } else {
3815 status = BAD_VALUE;
3816 }
3817 }
3818 return status;
3819 }
3820
detachAuxEffect_l(int effectId)3821 void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
3822 {
3823 for (size_t i = 0; i < mTracks.size(); ++i) {
3824 sp<Track> track = mTracks[i];
3825 if (track->auxEffectId() == effectId) {
3826 attachAuxEffect_l(track, 0);
3827 }
3828 }
3829 }
3830
threadLoop()3831 bool AudioFlinger::PlaybackThread::threadLoop()
3832 NO_THREAD_SAFETY_ANALYSIS // manual locking of AudioFlinger
3833 {
3834 tlNBLogWriter = mNBLogWriter.get();
3835
3836 Vector< sp<Track> > tracksToRemove;
3837
3838 mStandbyTimeNs = systemTime();
3839 int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
3840
3841 // MIXER
3842 nsecs_t lastWarning = 0;
3843
3844 // DUPLICATING
3845 // FIXME could this be made local to while loop?
3846 writeFrames = 0;
3847
3848 cacheParameters_l();
3849 mSleepTimeUs = mIdleSleepTimeUs;
3850
3851 if (mType == MIXER || mType == SPATIALIZER) {
3852 sleepTimeShift = 0;
3853 }
3854
3855 CpuStats cpuStats;
3856 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
3857
3858 acquireWakeLock();
3859
3860 // mNBLogWriter logging APIs can only be called by a single thread, typically the
3861 // thread associated with this PlaybackThread.
3862 // If you want to share the mNBLogWriter with other threads (for example, binder threads)
3863 // then all such threads must agree to hold a common mutex before logging.
3864 // So if you need to log when mutex is unlocked, set logString to a non-NULL string,
3865 // and then that string will be logged at the next convenient opportunity.
3866 // See reference to logString below.
3867 const char *logString = NULL;
3868
3869 // Estimated time for next buffer to be written to hal. This is used only on
3870 // suspended mode (for now) to help schedule the wait time until next iteration.
3871 nsecs_t timeLoopNextNs = 0;
3872
3873 checkSilentMode_l();
3874
3875 audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
3876
3877 sendCheckOutputStageEffectsEvent();
3878
3879 // loopCount is used for statistics and diagnostics.
3880 for (int64_t loopCount = 0; !exitPending(); ++loopCount)
3881 {
3882 // Log merge requests are performed during AudioFlinger binder transactions, but
3883 // that does not cover audio playback. It's requested here for that reason.
3884 mAudioFlinger->requestLogMerge();
3885
3886 cpuStats.sample(myName);
3887
3888 Vector< sp<EffectChain> > effectChains;
3889 audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
3890 bool isHapticSessionSpatialized = false;
3891 std::vector<sp<Track>> activeTracks;
3892
3893 // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
3894 //
3895 // Note: we access outDeviceTypes() outside of mLock.
3896 if (isMsdDevice() && outDeviceTypes().count(AUDIO_DEVICE_OUT_BUS) != 0) {
3897 // Here, we try for the AF lock, but do not block on it as the latency
3898 // is more informational.
3899 if (mAudioFlinger->mLock.tryLock() == NO_ERROR) {
3900 std::vector<PatchPanel::SoftwarePatch> swPatches;
3901 double latencyMs = 0.; // not required; initialized for clang-tidy
3902 status_t status = INVALID_OPERATION;
3903 audio_patch_handle_t downstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
3904 if (mAudioFlinger->mPatchPanel.getDownstreamSoftwarePatches(id(), &swPatches) == OK
3905 && swPatches.size() > 0) {
3906 status = swPatches[0].getLatencyMs_l(&latencyMs);
3907 downstreamPatchHandle = swPatches[0].getPatchHandle();
3908 }
3909 if (downstreamPatchHandle != lastDownstreamPatchHandle) {
3910 mDownstreamLatencyStatMs.reset();
3911 lastDownstreamPatchHandle = downstreamPatchHandle;
3912 }
3913 if (status == OK) {
3914 // verify downstream latency (we assume a max reasonable
3915 // latency of 5 seconds).
3916 const double minLatency = 0., maxLatency = 5000.;
3917 if (latencyMs >= minLatency && latencyMs <= maxLatency) {
3918 ALOGVV("new downstream latency %lf ms", latencyMs);
3919 } else {
3920 ALOGD("out of range downstream latency %lf ms", latencyMs);
3921 latencyMs = std::clamp(latencyMs, minLatency, maxLatency);
3922 }
3923 mDownstreamLatencyStatMs.add(latencyMs);
3924 }
3925 mAudioFlinger->mLock.unlock();
3926 }
3927 } else {
3928 if (lastDownstreamPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
3929 // our device is no longer AUDIO_DEVICE_OUT_BUS, reset patch handle and stats.
3930 mDownstreamLatencyStatMs.reset();
3931 lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
3932 }
3933 }
3934
3935 if (mCheckOutputStageEffects.exchange(false)) {
3936 checkOutputStageEffects();
3937 }
3938
3939 MetadataUpdate metadataUpdate;
3940 { // scope for mLock
3941
3942 Mutex::Autolock _l(mLock);
3943
3944 processConfigEvents_l();
3945 if (mCheckOutputStageEffects.load()) {
3946 continue;
3947 }
3948
3949 // See comment at declaration of logString for why this is done under mLock
3950 if (logString != NULL) {
3951 mNBLogWriter->logTimestamp();
3952 mNBLogWriter->log(logString);
3953 logString = NULL;
3954 }
3955
3956 collectTimestamps_l();
3957
3958 saveOutputTracks();
3959 if (mSignalPending) {
3960 // A signal was raised while we were unlocked
3961 mSignalPending = false;
3962 } else if (waitingAsyncCallback_l()) {
3963 if (exitPending()) {
3964 break;
3965 }
3966 bool released = false;
3967 if (!keepWakeLock()) {
3968 releaseWakeLock_l();
3969 released = true;
3970 }
3971
3972 const int64_t waitNs = computeWaitTimeNs_l();
3973 ALOGV("wait async completion (wait time: %lld)", (long long)waitNs);
3974 status_t status = mWaitWorkCV.waitRelative(mLock, waitNs);
3975 if (status == TIMED_OUT) {
3976 mSignalPending = true; // if timeout recheck everything
3977 }
3978 ALOGV("async completion/wake");
3979 if (released) {
3980 acquireWakeLock_l();
3981 }
3982 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
3983 mSleepTimeUs = 0;
3984
3985 continue;
3986 }
3987 if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
3988 isSuspended()) {
3989 // put audio hardware into standby after short delay
3990 if (shouldStandby_l()) {
3991
3992 threadLoop_standby();
3993
3994 // This is where we go into standby
3995 if (!mStandby) {
3996 LOG_AUDIO_STATE();
3997 mThreadMetrics.logEndInterval();
3998 mThreadSnapshot.onEnd();
3999 setStandby_l();
4000 }
4001 sendStatistics(false /* force */);
4002 }
4003
4004 if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
4005 // we're about to wait, flush the binder command buffer
4006 IPCThreadState::self()->flushCommands();
4007
4008 clearOutputTracks();
4009
4010 if (exitPending()) {
4011 break;
4012 }
4013
4014 releaseWakeLock_l();
4015 // wait until we have something to do...
4016 ALOGV("%s going to sleep", myName.string());
4017 mWaitWorkCV.wait(mLock);
4018 ALOGV("%s waking up", myName.string());
4019 acquireWakeLock_l();
4020
4021 mMixerStatus = MIXER_IDLE;
4022 mMixerStatusIgnoringFastTracks = MIXER_IDLE;
4023 mBytesWritten = 0;
4024 mBytesRemaining = 0;
4025 checkSilentMode_l();
4026
4027 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
4028 mSleepTimeUs = mIdleSleepTimeUs;
4029 if (mType == MIXER || mType == SPATIALIZER) {
4030 sleepTimeShift = 0;
4031 }
4032
4033 continue;
4034 }
4035 }
4036 // mMixerStatusIgnoringFastTracks is also updated internally
4037 mMixerStatus = prepareTracks_l(&tracksToRemove);
4038
4039 mActiveTracks.updatePowerState(this);
4040
4041 metadataUpdate = updateMetadata_l();
4042
4043 // prevent any changes in effect chain list and in each effect chain
4044 // during mixing and effect process as the audio buffers could be deleted
4045 // or modified if an effect is created or deleted
4046 lockEffectChains_l(effectChains);
4047
4048 // Determine which session to pick up haptic data.
4049 // This must be done under the same lock as prepareTracks_l().
4050 // The haptic data from the effect is at a higher priority than the one from track.
4051 // TODO: Write haptic data directly to sink buffer when mixing.
4052 if (mHapticChannelCount > 0) {
4053 for (const auto& track : mActiveTracks) {
4054 sp<EffectChain> effectChain = getEffectChain_l(track->sessionId());
4055 if (effectChain != nullptr
4056 && effectChain->containsHapticGeneratingEffect_l()) {
4057 activeHapticSessionId = track->sessionId();
4058 isHapticSessionSpatialized =
4059 mType == SPATIALIZER && track->isSpatialized();
4060 break;
4061 }
4062 if (activeHapticSessionId == AUDIO_SESSION_NONE
4063 && track->getHapticPlaybackEnabled()) {
4064 activeHapticSessionId = track->sessionId();
4065 isHapticSessionSpatialized =
4066 mType == SPATIALIZER && track->isSpatialized();
4067 }
4068 }
4069 }
4070
4071 // Acquire a local copy of active tracks with lock (release w/o lock).
4072 //
4073 // Control methods on the track acquire the ThreadBase lock (e.g. start()
4074 // stop(), pause(), etc.), but the threadLoop is entitled to call audio
4075 // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
4076 activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
4077
4078 setHalLatencyMode_l();
4079
4080 for (const auto &track : mActiveTracks ) {
4081 track->updateTeePatches_l();
4082 }
4083
4084 // signal actual start of output stream when the render position reported by the kernel
4085 // starts moving.
4086 if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
4087 && (mKernelPositionOnStandby
4088 != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
4089 mHalStarted = true;
4090 mWaitHalStartCV.broadcast();
4091 }
4092 } // mLock scope ends
4093
4094 if (mBytesRemaining == 0) {
4095 mCurrentWriteLength = 0;
4096 if (mMixerStatus == MIXER_TRACKS_READY) {
4097 // threadLoop_mix() sets mCurrentWriteLength
4098 threadLoop_mix();
4099 } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
4100 && (mMixerStatus != MIXER_DRAIN_ALL)) {
4101 // threadLoop_sleepTime sets mSleepTimeUs to 0 if data
4102 // must be written to HAL
4103 threadLoop_sleepTime();
4104 if (mSleepTimeUs == 0) {
4105 mCurrentWriteLength = mSinkBufferSize;
4106
4107 // Tally underrun frames as we are inserting 0s here.
4108 for (const auto& track : activeTracks) {
4109 if (track->mFillingUpStatus == Track::FS_ACTIVE
4110 && !track->isStopped()
4111 && !track->isPaused()
4112 && !track->isTerminated()) {
4113 ALOGV("%s: track(%d) %s underrun due to thread sleep of %zu frames",
4114 __func__, track->id(), track->getTrackStateAsString(),
4115 mNormalFrameCount);
4116 track->mAudioTrackServerProxy->tallyUnderrunFrames(mNormalFrameCount);
4117 }
4118 }
4119 }
4120 }
4121 // Either threadLoop_mix() or threadLoop_sleepTime() should have set
4122 // mMixerBuffer with data if mMixerBufferValid is true and mSleepTimeUs == 0.
4123 // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
4124 // or mSinkBuffer (if there are no effects and there is no data already copied to
4125 // mSinkBuffer).
4126 //
4127 // This is done pre-effects computation; if effects change to
4128 // support higher precision, this needs to move.
4129 //
4130 // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
4131 // TODO use mSleepTimeUs == 0 as an additional condition.
4132 uint32_t mixerChannelCount = mEffectBufferValid ?
4133 audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
4134 if (mMixerBufferValid && (mEffectBufferValid || !mHasDataCopiedToSinkBuffer)) {
4135 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
4136 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
4137
4138 // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
4139 // do these processes after effects are applied.
4140 if (!mEffectBufferValid) {
4141 // mono blend occurs for mixer threads only (not direct or offloaded)
4142 // and is handled here if we're going directly to the sink.
4143 if (requireMonoBlend()) {
4144 mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
4145 mNormalFrameCount, true /*limit*/);
4146 }
4147
4148 if (!hasFastMixer()) {
4149 // Balance must take effect after mono conversion.
4150 // We do it here if there is no FastMixer.
4151 // mBalance detects zero balance within the class for speed
4152 // (not needed here).
4153 mBalance.setBalance(mMasterBalance.load());
4154 mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
4155 }
4156 }
4157
4158 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
4159 mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
4160
4161 // If we're going directly to the sink and there are haptic channels,
4162 // we should adjust channels as the sample data is partially interleaved
4163 // in this case.
4164 if (!mEffectBufferValid && mHapticChannelCount > 0) {
4165 adjust_channels_non_destructive(buffer, mChannelCount, buffer,
4166 mChannelCount + mHapticChannelCount,
4167 audio_bytes_per_sample(format),
4168 audio_bytes_per_frame(mChannelCount, format) * mNormalFrameCount);
4169 }
4170 }
4171
4172 mBytesRemaining = mCurrentWriteLength;
4173 if (isSuspended()) {
4174 // Simulate write to HAL when suspended (e.g. BT SCO phone call).
4175 mSleepTimeUs = suspendSleepTimeUs(); // assumes full buffer.
4176 const size_t framesRemaining = mBytesRemaining / mFrameSize;
4177 mBytesWritten += mBytesRemaining;
4178 mFramesWritten += framesRemaining;
4179 mSuspendedFrames += framesRemaining; // to adjust kernel HAL position
4180 mBytesRemaining = 0;
4181 }
4182
4183 // only process effects if we're going to write
4184 if (mSleepTimeUs == 0 && mType != OFFLOAD) {
4185 for (size_t i = 0; i < effectChains.size(); i ++) {
4186 effectChains[i]->process_l();
4187 // TODO: Write haptic data directly to sink buffer when mixing.
4188 if (activeHapticSessionId != AUDIO_SESSION_NONE
4189 && activeHapticSessionId == effectChains[i]->sessionId()) {
4190 // Haptic data is active in this case, copy it directly from
4191 // in buffer to out buffer.
4192 uint32_t hapticSessionChannelCount = mEffectBufferValid ?
4193 audio_channel_count_from_out_mask(mMixerChannelMask) :
4194 mChannelCount;
4195 if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
4196 hapticSessionChannelCount = mChannelCount;
4197 }
4198
4199 const size_t audioBufferSize = mNormalFrameCount
4200 * audio_bytes_per_frame(hapticSessionChannelCount,
4201 EFFECT_BUFFER_FORMAT);
4202 memcpy_by_audio_format(
4203 (uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
4204 EFFECT_BUFFER_FORMAT,
4205 (const uint8_t*)effectChains[i]->inBuffer() + audioBufferSize,
4206 EFFECT_BUFFER_FORMAT, mNormalFrameCount * mHapticChannelCount);
4207 }
4208 }
4209 }
4210 }
4211 // Process effect chains for offloaded thread even if no audio
4212 // was read from audio track: process only updates effect state
4213 // and thus does have to be synchronized with audio writes but may have
4214 // to be called while waiting for async write callback
4215 if (mType == OFFLOAD) {
4216 for (size_t i = 0; i < effectChains.size(); i ++) {
4217 effectChains[i]->process_l();
4218 }
4219 }
4220
4221 // Only if the Effects buffer is enabled and there is data in the
4222 // Effects buffer (buffer valid), we need to
4223 // copy into the sink buffer.
4224 // TODO use mSleepTimeUs == 0 as an additional condition.
4225 if (mEffectBufferValid && !mHasDataCopiedToSinkBuffer) {
4226 //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
4227 void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
4228 if (requireMonoBlend()) {
4229 mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
4230 true /*limit*/);
4231 }
4232
4233 if (!hasFastMixer()) {
4234 // Balance must take effect after mono conversion.
4235 // We do it here if there is no FastMixer.
4236 // mBalance detects zero balance within the class for speed (not needed here).
4237 mBalance.setBalance(mMasterBalance.load());
4238 mBalance.process((float *)effectBuffer, mNormalFrameCount);
4239 }
4240
4241 // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
4242 // mPostSpatializerBuffer if the haptics track is spatialized.
4243 // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
4244 // For other thread types, the haptics channels are already in mEffectBuffer.
4245 if (mType == SPATIALIZER && isHapticSessionSpatialized) {
4246 const size_t srcBufferSize = mNormalFrameCount *
4247 audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
4248 mEffectBufferFormat);
4249 const size_t dstBufferSize = mNormalFrameCount
4250 * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
4251
4252 memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
4253 mEffectBufferFormat,
4254 (uint8_t*)mEffectBuffer + srcBufferSize,
4255 mEffectBufferFormat,
4256 mNormalFrameCount * mHapticChannelCount);
4257 }
4258 const size_t framesToCopy = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
4259 if (mFormat == AUDIO_FORMAT_PCM_FLOAT &&
4260 mEffectBufferFormat == AUDIO_FORMAT_PCM_FLOAT) {
4261 // Clamp PCM float values more than this distance from 0 to insulate
4262 // a HAL which doesn't handle NaN correctly.
4263 static constexpr float HAL_FLOAT_SAMPLE_LIMIT = 2.0f;
4264 memcpy_to_float_from_float_with_clamping(static_cast<float*>(mSinkBuffer),
4265 static_cast<const float*>(effectBuffer),
4266 framesToCopy, HAL_FLOAT_SAMPLE_LIMIT /* absMax */);
4267 } else {
4268 memcpy_by_audio_format(mSinkBuffer, mFormat,
4269 effectBuffer, mEffectBufferFormat, framesToCopy);
4270 }
4271 // The sample data is partially interleaved when haptic channels exist,
4272 // we need to adjust channels here.
4273 if (mHapticChannelCount > 0) {
4274 adjust_channels_non_destructive(mSinkBuffer, mChannelCount, mSinkBuffer,
4275 mChannelCount + mHapticChannelCount,
4276 audio_bytes_per_sample(mFormat),
4277 audio_bytes_per_frame(mChannelCount, mFormat) * mNormalFrameCount);
4278 }
4279 }
4280
4281 // enable changes in effect chain
4282 unlockEffectChains(effectChains);
4283
4284 if (!metadataUpdate.playbackMetadataUpdate.empty()) {
4285 mAudioFlinger->mMelReporter->updateMetadataForCsd(id(),
4286 metadataUpdate.playbackMetadataUpdate);
4287 }
4288
4289 if (!waitingAsyncCallback()) {
4290 // mSleepTimeUs == 0 means we must write to audio hardware
4291 if (mSleepTimeUs == 0) {
4292 ssize_t ret = 0;
4293 // writePeriodNs is updated >= 0 when ret > 0.
4294 int64_t writePeriodNs = -1;
4295 if (mBytesRemaining) {
4296 // FIXME rewrite to reduce number of system calls
4297 const int64_t lastIoBeginNs = systemTime();
4298 ret = threadLoop_write();
4299 const int64_t lastIoEndNs = systemTime();
4300 if (ret < 0) {
4301 mBytesRemaining = 0;
4302 } else if (ret > 0) {
4303 mBytesWritten += ret;
4304 mBytesRemaining -= ret;
4305 const int64_t frames = ret / mFrameSize;
4306 mFramesWritten += frames;
4307
4308 writePeriodNs = lastIoEndNs - mLastIoEndNs;
4309 // process information relating to write time.
4310 if (audio_has_proportional_frames(mFormat)) {
4311 // we are in a continuous mixing cycle
4312 if (mMixerStatus == MIXER_TRACKS_READY &&
4313 loopCount == lastLoopCountWritten + 1) {
4314
4315 const double jitterMs =
4316 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
4317 {frames, writePeriodNs},
4318 {0, 0} /* lastTimestamp */, mSampleRate);
4319 const double processMs =
4320 (lastIoBeginNs - mLastIoEndNs) * 1e-6;
4321
4322 Mutex::Autolock _l(mLock);
4323 mIoJitterMs.add(jitterMs);
4324 mProcessTimeMs.add(processMs);
4325
4326 if (mPipeSink.get() != nullptr) {
4327 // Using the Monopipe availableToWrite, we estimate the current
4328 // buffer size.
4329 MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
4330 const ssize_t
4331 availableToWrite = mPipeSink->availableToWrite();
4332 const size_t pipeFrames = monoPipe->maxFrames();
4333 const size_t
4334 remainingFrames = pipeFrames - max(availableToWrite, 0);
4335 mMonopipePipeDepthStats.add(remainingFrames);
4336 }
4337 }
4338
4339 // write blocked detection
4340 const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
4341 if ((mType == MIXER || mType == SPATIALIZER)
4342 && deltaWriteNs > maxPeriod) {
4343 mNumDelayedWrites++;
4344 if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
4345 ATRACE_NAME("underrun");
4346 ALOGW("write blocked for %lld msecs, "
4347 "%d delayed writes, thread %d",
4348 (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
4349 mNumDelayedWrites, mId);
4350 lastWarning = lastIoEndNs;
4351 }
4352 }
4353 }
4354 // update timing info.
4355 mLastIoBeginNs = lastIoBeginNs;
4356 mLastIoEndNs = lastIoEndNs;
4357 lastLoopCountWritten = loopCount;
4358 }
4359 } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
4360 (mMixerStatus == MIXER_DRAIN_ALL)) {
4361 threadLoop_drain();
4362 }
4363 if ((mType == MIXER || mType == SPATIALIZER) && !mStandby) {
4364
4365 if (mThreadThrottle
4366 && mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
4367 && writePeriodNs > 0) { // we have write period info
4368 // Limit MixerThread data processing to no more than twice the
4369 // expected processing rate.
4370 //
4371 // This helps prevent underruns with NuPlayer and other applications
4372 // which may set up buffers that are close to the minimum size, or use
4373 // deep buffers, and rely on a double-buffering sleep strategy to fill.
4374 //
4375 // The throttle smooths out sudden large data drains from the device,
4376 // e.g. when it comes out of standby, which often causes problems with
4377 // (1) mixer threads without a fast mixer (which has its own warm-up)
4378 // (2) minimum buffer sized tracks (even if the track is full,
4379 // the app won't fill fast enough to handle the sudden draw).
4380 //
4381 // Total time spent in last processing cycle equals time spent in
4382 // 1. threadLoop_write, as well as time spent in
4383 // 2. threadLoop_mix (significant for heavy mixing, especially
4384 // on low tier processors)
4385
4386 // it's OK if deltaMs is an overestimate.
4387
4388 const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
4389
4390 const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
4391 if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
4392 mThreadMetrics.logThrottleMs((double)throttleMs);
4393
4394 usleep(throttleMs * 1000);
4395 // notify of throttle start on verbose log
4396 ALOGV_IF(mThreadThrottleEndMs == mThreadThrottleTimeMs,
4397 "mixer(%p) throttle begin:"
4398 " ret(%zd) deltaMs(%d) requires sleep %d ms",
4399 this, ret, deltaMs, throttleMs);
4400 mThreadThrottleTimeMs += throttleMs;
4401 // Throttle must be attributed to the previous mixer loop's write time
4402 // to allow back-to-back throttling.
4403 // This also ensures proper timing statistics.
4404 mLastIoEndNs = systemTime(); // we fetch the write end time again.
4405 } else {
4406 uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
4407 if (diff > 0) {
4408 // notify of throttle end on debug log
4409 // but prevent spamming for bluetooth
4410 ALOGD_IF(!isSingleDeviceType(
4411 outDeviceTypes(), audio_is_a2dp_out_device) &&
4412 !isSingleDeviceType(
4413 outDeviceTypes(), audio_is_hearing_aid_out_device),
4414 "mixer(%p) throttle end: throttle time(%u)", this, diff);
4415 mThreadThrottleEndMs = mThreadThrottleTimeMs;
4416 }
4417 }
4418 }
4419 }
4420
4421 } else {
4422 ATRACE_BEGIN("sleep");
4423 Mutex::Autolock _l(mLock);
4424 // suspended requires accurate metering of sleep time.
4425 if (isSuspended()) {
4426 // advance by expected sleepTime
4427 timeLoopNextNs += microseconds((nsecs_t)mSleepTimeUs);
4428 const nsecs_t nowNs = systemTime();
4429
4430 // compute expected next time vs current time.
4431 // (negative deltas are treated as delays).
4432 nsecs_t deltaNs = timeLoopNextNs - nowNs;
4433 if (deltaNs < -kMaxNextBufferDelayNs) {
4434 // Delays longer than the max allowed trigger a reset.
4435 ALOGV("DelayNs: %lld, resetting timeLoopNextNs", (long long) deltaNs);
4436 deltaNs = microseconds((nsecs_t)mSleepTimeUs);
4437 timeLoopNextNs = nowNs + deltaNs;
4438 } else if (deltaNs < 0) {
4439 // Delays within the max delay allowed: zero the delta/sleepTime
4440 // to help the system catch up in the next iteration(s)
4441 ALOGV("DelayNs: %lld, catching-up", (long long) deltaNs);
4442 deltaNs = 0;
4443 }
4444 // update sleep time (which is >= 0)
4445 mSleepTimeUs = deltaNs / 1000;
4446 }
4447 if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
4448 mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)mSleepTimeUs));
4449 }
4450 ATRACE_END();
4451 }
4452 }
4453
4454 // Finally let go of removed track(s), without the lock held
4455 // since we can't guarantee the destructors won't acquire that
4456 // same lock. This will also mutate and push a new fast mixer state.
4457 threadLoop_removeTracks(tracksToRemove);
4458 tracksToRemove.clear();
4459
4460 // FIXME I don't understand the need for this here;
4461 // it was in the original code but maybe the
4462 // assignment in saveOutputTracks() makes this unnecessary?
4463 clearOutputTracks();
4464
4465 // Effect chains will be actually deleted here if they were removed from
4466 // mEffectChains list during mixing or effects processing
4467 effectChains.clear();
4468
4469 // FIXME Note that the above .clear() is no longer necessary since effectChains
4470 // is now local to this block, but will keep it for now (at least until merge done).
4471 }
4472
4473 threadLoop_exit();
4474
4475 if (!mStandby) {
4476 threadLoop_standby();
4477 setStandby();
4478 }
4479
4480 releaseWakeLock();
4481
4482 ALOGV("Thread %p type %d exiting", this, mType);
4483 return false;
4484 }
4485
collectTimestamps_l()4486 void AudioFlinger::PlaybackThread::collectTimestamps_l()
4487 {
4488 if (mStandby) {
4489 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
4490 return;
4491 } else if (mHwPaused) {
4492 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
4493 return;
4494 }
4495
4496 // Gather the framesReleased counters for all active tracks,
4497 // and associate with the sink frames written out. We need
4498 // this to convert the sink timestamp to the track timestamp.
4499 bool kernelLocationUpdate = false;
4500 ExtendedTimestamp timestamp; // use private copy to fetch
4501
4502 // Always query HAL timestamp and update timestamp verifier. In standby or pause,
4503 // HAL may be draining some small duration buffered data for fade out.
4504 if (threadloop_getHalTimestamp_l(×tamp) == OK) {
4505 mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
4506 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4507 mSampleRate);
4508
4509 if (isTimestampCorrectionEnabled()) {
4510 ALOGVV("TS_BEFORE: %d %lld %lld", id(),
4511 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4512 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4513 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
4514 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4515 = correctedTimestamp.mFrames;
4516 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
4517 = correctedTimestamp.mTimeNs;
4518 ALOGVV("TS_AFTER: %d %lld %lld", id(),
4519 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4520 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4521
4522 // Note: Downstream latency only added if timestamp correction enabled.
4523 if (mDownstreamLatencyStatMs.getN() > 0) { // we have latency info.
4524 const int64_t newPosition =
4525 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4526 - int64_t(mDownstreamLatencyStatMs.getMean() * mSampleRate * 1e-3);
4527 // prevent retrograde
4528 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = max(
4529 newPosition,
4530 (mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4531 - mSuspendedFrames));
4532 }
4533 }
4534
4535 // We always fetch the timestamp here because often the downstream
4536 // sink will block while writing.
4537
4538 // We keep track of the last valid kernel position in case we are in underrun
4539 // and the normal mixer period is the same as the fast mixer period, or there
4540 // is some error from the HAL.
4541 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4542 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4543 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4544 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4545 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4546
4547 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4548 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER];
4549 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4550 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER];
4551 }
4552
4553 if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4554 kernelLocationUpdate = true;
4555 } else {
4556 ALOGVV("getTimestamp error - no valid kernel position");
4557 }
4558
4559 // copy over kernel info
4560 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
4561 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4562 + mSuspendedFrames; // add frames discarded when suspended
4563 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
4564 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4565 } else {
4566 mTimestampVerifier.error();
4567 }
4568
4569 // mFramesWritten for non-offloaded tracks are contiguous
4570 // even after standby() is called. This is useful for the track frame
4571 // to sink frame mapping.
4572 bool serverLocationUpdate = false;
4573 if (mFramesWritten != mLastFramesWritten) {
4574 serverLocationUpdate = true;
4575 mLastFramesWritten = mFramesWritten;
4576 }
4577 // Only update timestamps if there is a meaningful change.
4578 // Either the kernel timestamp must be valid or we have written something.
4579 if (kernelLocationUpdate || serverLocationUpdate) {
4580 if (serverLocationUpdate) {
4581 // use the time before we called the HAL write - it is a bit more accurate
4582 // to when the server last read data than the current time here.
4583 //
4584 // If we haven't written anything, mLastIoBeginNs will be -1
4585 // and we use systemTime().
4586 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
4587 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
4588 ? systemTime() : mLastIoBeginNs;
4589 }
4590
4591 for (const sp<Track> &t : mActiveTracks) {
4592 if (!t->isFastTrack()) {
4593 t->updateTrackFrameInfo(
4594 t->mAudioTrackServerProxy->framesReleased(),
4595 mFramesWritten,
4596 mSampleRate,
4597 mTimestamp);
4598 }
4599 }
4600 }
4601
4602 if (audio_has_proportional_frames(mFormat)) {
4603 const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
4604 if (latencyMs != 0.) { // note 0. means timestamp is empty.
4605 mLatencyMs.add(latencyMs);
4606 }
4607 }
4608 #if 0
4609 // logFormat example
4610 if (z % 100 == 0) {
4611 timespec ts;
4612 clock_gettime(CLOCK_MONOTONIC, &ts);
4613 LOGT("This is an integer %d, this is a float %f, this is my "
4614 "pid %p %% %s %t", 42, 3.14, "and this is a timestamp", ts);
4615 LOGT("A deceptive null-terminated string %\0");
4616 }
4617 ++z;
4618 #endif
4619 }
4620
4621 // removeTracks_l() must be called with ThreadBase::mLock held
removeTracks_l(const Vector<sp<Track>> & tracksToRemove)4622 void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
4623 NO_THREAD_SAFETY_ANALYSIS // release and re-acquire mLock
4624 {
4625 for (const auto& track : tracksToRemove) {
4626 mActiveTracks.remove(track);
4627 ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
4628 sp<EffectChain> chain = getEffectChain_l(track->sessionId());
4629 if (chain != 0) {
4630 ALOGV("%s(%d): stopping track on chain %p for session Id: %d",
4631 __func__, track->id(), chain.get(), track->sessionId());
4632 chain->decActiveTrackCnt();
4633 }
4634 // If an external client track, inform APM we're no longer active, and remove if needed.
4635 // We do this under lock so that the state is consistent if the Track is destroyed.
4636 if (track->isExternalTrack()) {
4637 AudioSystem::stopOutput(track->portId());
4638 if (track->isTerminated()) {
4639 AudioSystem::releaseOutput(track->portId());
4640 }
4641 }
4642 if (track->isTerminated()) {
4643 // remove from our tracks vector
4644 removeTrack_l(track);
4645 }
4646 if (mHapticChannelCount > 0 &&
4647 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
4648 || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
4649 mLock.unlock();
4650 // Unlock due to VibratorService will lock for this call and will
4651 // call Tracks.mute/unmute which also require thread's lock.
4652 AudioFlinger::onExternalVibrationStop(track->getExternalVibration());
4653 mLock.lock();
4654
4655 // When the track is stop, set the haptic intensity as MUTE
4656 // for the HapticGenerator effect.
4657 if (chain != nullptr) {
4658 chain->setHapticIntensity_l(track->id(), os::HapticScale::MUTE);
4659 }
4660 }
4661 }
4662 }
4663
getTimestamp_l(AudioTimestamp & timestamp)4664 status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
4665 {
4666 if (mNormalSink != 0) {
4667 ExtendedTimestamp ets;
4668 status_t status = mNormalSink->getTimestamp(ets);
4669 if (status == NO_ERROR) {
4670 status = ets.getBestTimestamp(×tamp);
4671 }
4672 return status;
4673 }
4674 if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL) {
4675 collectTimestamps_l();
4676 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] <= 0) {
4677 return INVALID_OPERATION;
4678 }
4679 timestamp.mPosition = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4680 const int64_t timeNs = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4681 timestamp.mTime.tv_sec = timeNs / NANOS_PER_SECOND;
4682 timestamp.mTime.tv_nsec = timeNs - (timestamp.mTime.tv_sec * NANOS_PER_SECOND);
4683 return NO_ERROR;
4684 }
4685 return INVALID_OPERATION;
4686 }
4687
4688 // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
4689 // still applied by the mixer.
4690 // All tracks attached to a mixer with flag VOIP_RX are tied to the same
4691 // stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
4692 // if more than one track are active
handleVoipVolume_l(float * volume)4693 status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
4694 {
4695 status_t result = NO_ERROR;
4696 if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
4697 if (*volume != mLeftVolFloat) {
4698 result = mOutput->stream->setVolume(*volume, *volume);
4699 ALOGE_IF(result != OK,
4700 "Error when setting output stream volume: %d", result);
4701 if (result == NO_ERROR) {
4702 mLeftVolFloat = *volume;
4703 }
4704 }
4705 // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
4706 // remove stream volume contribution from software volume.
4707 if (mLeftVolFloat == *volume) {
4708 *volume = 1.0f;
4709 }
4710 }
4711 return result;
4712 }
4713
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4714 status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
4715 audio_patch_handle_t *handle)
4716 {
4717 status_t status;
4718 if (property_get_bool("af.patch_park", false /* default_value */)) {
4719 // Park FastMixer to avoid potential DOS issues with writing to the HAL
4720 // or if HAL does not properly lock against access.
4721 AutoPark<FastMixer> park(mFastMixer);
4722 status = PlaybackThread::createAudioPatch_l(patch, handle);
4723 } else {
4724 status = PlaybackThread::createAudioPatch_l(patch, handle);
4725 }
4726
4727 updateHalSupportedLatencyModes_l();
4728 return status;
4729 }
4730
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4731 status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
4732 audio_patch_handle_t *handle)
4733 {
4734 status_t status = NO_ERROR;
4735
4736 // store new device and send to effects
4737 audio_devices_t type = AUDIO_DEVICE_NONE;
4738 AudioDeviceTypeAddrVector deviceTypeAddrs;
4739 for (unsigned int i = 0; i < patch->num_sinks; i++) {
4740 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
4741 && !mOutput->audioHwDev->supportsAudioPatches(),
4742 "Enumerated device type(%#x) must not be used "
4743 "as it does not support audio patches",
4744 patch->sinks[i].ext.device.type);
4745 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
4746 deviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
4747 patch->sinks[i].ext.device.address);
4748 }
4749
4750 audio_port_handle_t sinkPortId = patch->sinks[0].id;
4751 #ifdef ADD_BATTERY_DATA
4752 // when changing the audio output device, call addBatteryData to notify
4753 // the change
4754 if (outDeviceTypes() != deviceTypes) {
4755 uint32_t params = 0;
4756 // check whether speaker is on
4757 if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) > 0) {
4758 params |= IMediaPlayerService::kBatteryDataSpeakerOn;
4759 }
4760
4761 // check if any other device (except speaker) is on
4762 if (!isSingleDeviceType(deviceTypes, AUDIO_DEVICE_OUT_SPEAKER)) {
4763 params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
4764 }
4765
4766 if (params != 0) {
4767 addBatteryData(params);
4768 }
4769 }
4770 #endif
4771
4772 for (size_t i = 0; i < mEffectChains.size(); i++) {
4773 mEffectChains[i]->setDevices_l(deviceTypeAddrs);
4774 }
4775
4776 // mPatch.num_sinks is not set when the thread is created so that
4777 // the first patch creation triggers an ioConfigChanged callback
4778 bool configChanged = (mPatch.num_sinks == 0) ||
4779 (mPatch.sinks[0].id != sinkPortId);
4780 mPatch = *patch;
4781 mOutDeviceTypeAddrs = deviceTypeAddrs;
4782 checkSilentMode_l();
4783
4784 if (mOutput->audioHwDev->supportsAudioPatches()) {
4785 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
4786 status = hwDevice->createAudioPatch(patch->num_sources,
4787 patch->sources,
4788 patch->num_sinks,
4789 patch->sinks,
4790 handle);
4791 } else {
4792 status = mOutput->stream->legacyCreateAudioPatch(patch->sinks[0], std::nullopt, type);
4793 *handle = AUDIO_PATCH_HANDLE_NONE;
4794 }
4795 const std::string patchSinksAsString = patchSinksToString(patch);
4796
4797 mThreadMetrics.logEndInterval();
4798 mThreadMetrics.logCreatePatch(/* inDevices */ {}, patchSinksAsString);
4799 mThreadMetrics.logBeginInterval();
4800 // also dispatch to active AudioTracks for MediaMetrics
4801 for (const auto &track : mActiveTracks) {
4802 track->logEndInterval();
4803 track->logBeginInterval(patchSinksAsString);
4804 }
4805
4806 if (configChanged) {
4807 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
4808 }
4809 // Force metadata update after a route change
4810 mActiveTracks.setHasChanged();
4811
4812 return status;
4813 }
4814
releaseAudioPatch_l(const audio_patch_handle_t handle)4815 status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
4816 {
4817 status_t status;
4818 if (property_get_bool("af.patch_park", false /* default_value */)) {
4819 // Park FastMixer to avoid potential DOS issues with writing to the HAL
4820 // or if HAL does not properly lock against access.
4821 AutoPark<FastMixer> park(mFastMixer);
4822 status = PlaybackThread::releaseAudioPatch_l(handle);
4823 } else {
4824 status = PlaybackThread::releaseAudioPatch_l(handle);
4825 }
4826 return status;
4827 }
4828
releaseAudioPatch_l(const audio_patch_handle_t handle)4829 status_t AudioFlinger::PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
4830 {
4831 status_t status = NO_ERROR;
4832
4833 mPatch = audio_patch{};
4834 mOutDeviceTypeAddrs.clear();
4835
4836 if (mOutput->audioHwDev->supportsAudioPatches()) {
4837 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
4838 status = hwDevice->releaseAudioPatch(handle);
4839 } else {
4840 status = mOutput->stream->legacyReleaseAudioPatch();
4841 }
4842 // Force meteadata update after a route change
4843 mActiveTracks.setHasChanged();
4844
4845 return status;
4846 }
4847
addPatchTrack(const sp<PatchTrack> & track)4848 void AudioFlinger::PlaybackThread::addPatchTrack(const sp<PatchTrack>& track)
4849 {
4850 Mutex::Autolock _l(mLock);
4851 mTracks.add(track);
4852 }
4853
deletePatchTrack(const sp<PatchTrack> & track)4854 void AudioFlinger::PlaybackThread::deletePatchTrack(const sp<PatchTrack>& track)
4855 {
4856 Mutex::Autolock _l(mLock);
4857 destroyTrack_l(track);
4858 }
4859
toAudioPortConfig(struct audio_port_config * config)4860 void AudioFlinger::PlaybackThread::toAudioPortConfig(struct audio_port_config *config)
4861 {
4862 ThreadBase::toAudioPortConfig(config);
4863 config->role = AUDIO_PORT_ROLE_SOURCE;
4864 config->ext.mix.hw_module = mOutput->audioHwDev->handle();
4865 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
4866 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
4867 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
4868 config->flags.output = mOutput->flags;
4869 }
4870 }
4871
4872 // ----------------------------------------------------------------------------
4873
MixerThread(const sp<AudioFlinger> & audioFlinger,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,type_t type,audio_config_base_t * mixerConfig)4874 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
4875 audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
4876 : PlaybackThread(audioFlinger, output, id, type, systemReady, mixerConfig),
4877 // mAudioMixer below
4878 // mFastMixer below
4879 mBluetoothLatencyModesEnabled(false),
4880 mFastMixerFutex(0),
4881 mMasterMono(false)
4882 // mOutputSink below
4883 // mPipeSink below
4884 // mNormalSink below
4885 {
4886 setMasterBalance(audioFlinger->getMasterBalance_l());
4887 ALOGV("MixerThread() id=%d type=%d", id, type);
4888 ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
4889 "mFrameCount=%zu, mNormalFrameCount=%zu",
4890 mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
4891 mNormalFrameCount);
4892 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
4893
4894 if (type == DUPLICATING) {
4895 // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
4896 // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
4897 // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
4898 return;
4899 }
4900 // create an NBAIO sink for the HAL output stream, and negotiate
4901 mOutputSink = new AudioStreamOutSink(output->stream);
4902 size_t numCounterOffers = 0;
4903 const NBAIO_Format offers[1] = {Format_from_SR_C(
4904 mSampleRate, mChannelCount + mHapticChannelCount, mFormat)};
4905 #if !LOG_NDEBUG
4906 ssize_t index =
4907 #else
4908 (void)
4909 #endif
4910 mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
4911 ALOG_ASSERT(index == 0);
4912
4913 // initialize fast mixer depending on configuration
4914 bool initFastMixer;
4915 if (mType == SPATIALIZER || mType == BIT_PERFECT) {
4916 initFastMixer = false;
4917 } else {
4918 switch (kUseFastMixer) {
4919 case FastMixer_Never:
4920 initFastMixer = false;
4921 break;
4922 case FastMixer_Always:
4923 initFastMixer = true;
4924 break;
4925 case FastMixer_Static:
4926 case FastMixer_Dynamic:
4927 initFastMixer = mFrameCount < mNormalFrameCount;
4928 break;
4929 }
4930 ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
4931 "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
4932 mFrameCount, mNormalFrameCount);
4933 }
4934 if (initFastMixer) {
4935 audio_format_t fastMixerFormat;
4936 if (mMixerBufferEnabled && mEffectBufferEnabled) {
4937 fastMixerFormat = AUDIO_FORMAT_PCM_FLOAT;
4938 } else {
4939 fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
4940 }
4941 if (mFormat != fastMixerFormat) {
4942 // change our Sink format to accept our intermediate precision
4943 mFormat = fastMixerFormat;
4944 free(mSinkBuffer);
4945 mFrameSize = audio_bytes_per_frame(mChannelCount + mHapticChannelCount, mFormat);
4946 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
4947 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
4948 }
4949
4950 // create a MonoPipe to connect our submix to FastMixer
4951 NBAIO_Format format = mOutputSink->format();
4952
4953 // adjust format to match that of the Fast Mixer
4954 ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
4955 format.mFormat = fastMixerFormat;
4956 format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
4957
4958 // This pipe depth compensates for scheduling latency of the normal mixer thread.
4959 // When it wakes up after a maximum latency, it runs a few cycles quickly before
4960 // finally blocking. Note the pipe implementation rounds up the request to a power of 2.
4961 MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
4962 const NBAIO_Format offersFast[1] = {format};
4963 size_t numCounterOffersFast = 0;
4964 #if !LOG_NDEBUG
4965 index =
4966 #else
4967 (void)
4968 #endif
4969 monoPipe->negotiate(offersFast, std::size(offersFast),
4970 nullptr /* counterOffers */, numCounterOffersFast);
4971 ALOG_ASSERT(index == 0);
4972 monoPipe->setAvgFrames((mScreenState & 1) ?
4973 (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
4974 mPipeSink = monoPipe;
4975
4976 // create fast mixer and configure it initially with just one fast track for our submix
4977 mFastMixer = new FastMixer(mId);
4978 FastMixerStateQueue *sq = mFastMixer->sq();
4979 #ifdef STATE_QUEUE_DUMP
4980 sq->setObserverDump(&mStateQueueObserverDump);
4981 sq->setMutatorDump(&mStateQueueMutatorDump);
4982 #endif
4983 FastMixerState *state = sq->begin();
4984 FastTrack *fastTrack = &state->mFastTracks[0];
4985 // wrap the source side of the MonoPipe to make it an AudioBufferProvider
4986 fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
4987 fastTrack->mVolumeProvider = NULL;
4988 fastTrack->mChannelMask = static_cast<audio_channel_mask_t>(
4989 mChannelMask | mHapticChannelMask); // mPipeSink channel mask for
4990 // audio to FastMixer
4991 fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
4992 fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
4993 fastTrack->mHapticIntensity = os::HapticScale::NONE;
4994 fastTrack->mHapticMaxAmplitude = NAN;
4995 fastTrack->mGeneration++;
4996 state->mFastTracksGen++;
4997 state->mTrackMask = 1;
4998 // fast mixer will use the HAL output sink
4999 state->mOutputSink = mOutputSink.get();
5000 state->mOutputSinkGen++;
5001 state->mFrameCount = mFrameCount;
5002 // specify sink channel mask when haptic channel mask present as it can not
5003 // be calculated directly from channel count
5004 state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
5005 ? AUDIO_CHANNEL_NONE
5006 : static_cast<audio_channel_mask_t>(mChannelMask | mHapticChannelMask);
5007 state->mCommand = FastMixerState::COLD_IDLE;
5008 // already done in constructor initialization list
5009 //mFastMixerFutex = 0;
5010 state->mColdFutexAddr = &mFastMixerFutex;
5011 state->mColdGen++;
5012 state->mDumpState = &mFastMixerDumpState;
5013 mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer");
5014 state->mNBLogWriter = mFastMixerNBLogWriter.get();
5015 sq->end();
5016 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5017
5018 NBLog::thread_info_t info;
5019 info.id = mId;
5020 info.type = NBLog::FASTMIXER;
5021 mFastMixerNBLogWriter->log<NBLog::EVENT_THREAD_INFO>(info);
5022
5023 // start the fast mixer
5024 mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
5025 pid_t tid = mFastMixer->getTid();
5026 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5027 stream()->setHalThreadPriority(kPriorityFastMixer);
5028
5029 #ifdef AUDIO_WATCHDOG
5030 // create and start the watchdog
5031 mAudioWatchdog = new AudioWatchdog();
5032 mAudioWatchdog->setDump(&mAudioWatchdogDump);
5033 mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
5034 tid = mAudioWatchdog->getTid();
5035 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5036 #endif
5037 } else {
5038 #ifdef TEE_SINK
5039 // Only use the MixerThread tee if there is no FastMixer.
5040 mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
5041 mTee.setId(std::string("_") + std::to_string(mId) + "_M");
5042 #endif
5043 }
5044
5045 switch (kUseFastMixer) {
5046 case FastMixer_Never:
5047 case FastMixer_Dynamic:
5048 mNormalSink = mOutputSink;
5049 break;
5050 case FastMixer_Always:
5051 mNormalSink = mPipeSink;
5052 break;
5053 case FastMixer_Static:
5054 mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
5055 break;
5056 }
5057 }
5058
~MixerThread()5059 AudioFlinger::MixerThread::~MixerThread()
5060 {
5061 if (mFastMixer != 0) {
5062 FastMixerStateQueue *sq = mFastMixer->sq();
5063 FastMixerState *state = sq->begin();
5064 if (state->mCommand == FastMixerState::COLD_IDLE) {
5065 int32_t old = android_atomic_inc(&mFastMixerFutex);
5066 if (old == -1) {
5067 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5068 }
5069 }
5070 state->mCommand = FastMixerState::EXIT;
5071 sq->end();
5072 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5073 mFastMixer->join();
5074 // Though the fast mixer thread has exited, it's state queue is still valid.
5075 // We'll use that extract the final state which contains one remaining fast track
5076 // corresponding to our sub-mix.
5077 state = sq->begin();
5078 ALOG_ASSERT(state->mTrackMask == 1);
5079 FastTrack *fastTrack = &state->mFastTracks[0];
5080 ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
5081 delete fastTrack->mBufferProvider;
5082 sq->end(false /*didModify*/);
5083 mFastMixer.clear();
5084 #ifdef AUDIO_WATCHDOG
5085 if (mAudioWatchdog != 0) {
5086 mAudioWatchdog->requestExit();
5087 mAudioWatchdog->requestExitAndWait();
5088 mAudioWatchdog.clear();
5089 }
5090 #endif
5091 }
5092 mAudioFlinger->unregisterWriter(mFastMixerNBLogWriter);
5093 delete mAudioMixer;
5094 }
5095
onFirstRef()5096 void AudioFlinger::MixerThread::onFirstRef() {
5097 PlaybackThread::onFirstRef();
5098
5099 Mutex::Autolock _l(mLock);
5100 if (mOutput != nullptr && mOutput->stream != nullptr) {
5101 status_t status = mOutput->stream->setLatencyModeCallback(this);
5102 if (status != INVALID_OPERATION) {
5103 updateHalSupportedLatencyModes_l();
5104 }
5105 // Default to enabled if the HAL supports it. This can be changed by Audioflinger after
5106 // the thread construction according to AudioFlinger::mBluetoothLatencyModesEnabled
5107 mBluetoothLatencyModesEnabled.store(
5108 mOutput->audioHwDev->supportsBluetoothVariableLatency());
5109 }
5110 }
5111
correctLatency_l(uint32_t latency) const5112 uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const
5113 {
5114 if (mFastMixer != 0) {
5115 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
5116 latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
5117 }
5118 return latency;
5119 }
5120
threadLoop_write()5121 ssize_t AudioFlinger::MixerThread::threadLoop_write()
5122 {
5123 // FIXME we should only do one push per cycle; confirm this is true
5124 // Start the fast mixer if it's not already running
5125 if (mFastMixer != 0) {
5126 FastMixerStateQueue *sq = mFastMixer->sq();
5127 FastMixerState *state = sq->begin();
5128 if (state->mCommand != FastMixerState::MIX_WRITE &&
5129 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
5130 if (state->mCommand == FastMixerState::COLD_IDLE) {
5131
5132 // FIXME workaround for first HAL write being CPU bound on some devices
5133 ATRACE_BEGIN("write");
5134 mOutput->write((char *)mSinkBuffer, 0);
5135 ATRACE_END();
5136
5137 int32_t old = android_atomic_inc(&mFastMixerFutex);
5138 if (old == -1) {
5139 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5140 }
5141 #ifdef AUDIO_WATCHDOG
5142 if (mAudioWatchdog != 0) {
5143 mAudioWatchdog->resume();
5144 }
5145 #endif
5146 }
5147 state->mCommand = FastMixerState::MIX_WRITE;
5148 #ifdef FAST_THREAD_STATISTICS
5149 mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
5150 FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN);
5151 #endif
5152 sq->end();
5153 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5154 if (kUseFastMixer == FastMixer_Dynamic) {
5155 mNormalSink = mPipeSink;
5156 }
5157 } else {
5158 sq->end(false /*didModify*/);
5159 }
5160 }
5161 return PlaybackThread::threadLoop_write();
5162 }
5163
threadLoop_standby()5164 void AudioFlinger::MixerThread::threadLoop_standby()
5165 {
5166 // Idle the fast mixer if it's currently running
5167 if (mFastMixer != 0) {
5168 FastMixerStateQueue *sq = mFastMixer->sq();
5169 FastMixerState *state = sq->begin();
5170 if (!(state->mCommand & FastMixerState::IDLE)) {
5171 // Report any frames trapped in the Monopipe
5172 MonoPipe *monoPipe = (MonoPipe *)mPipeSink.get();
5173 const long long pipeFrames = monoPipe->maxFrames() - monoPipe->availableToWrite();
5174 mLocalLog.log("threadLoop_standby: framesWritten:%lld suspendedFrames:%lld "
5175 "monoPipeWritten:%lld monoPipeLeft:%lld",
5176 (long long)mFramesWritten, (long long)mSuspendedFrames,
5177 (long long)mPipeSink->framesWritten(), pipeFrames);
5178 mLocalLog.log("threadLoop_standby: %s", mTimestamp.toString().c_str());
5179
5180 state->mCommand = FastMixerState::COLD_IDLE;
5181 state->mColdFutexAddr = &mFastMixerFutex;
5182 state->mColdGen++;
5183 mFastMixerFutex = 0;
5184 sq->end();
5185 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
5186 sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
5187 if (kUseFastMixer == FastMixer_Dynamic) {
5188 mNormalSink = mOutputSink;
5189 }
5190 #ifdef AUDIO_WATCHDOG
5191 if (mAudioWatchdog != 0) {
5192 mAudioWatchdog->pause();
5193 }
5194 #endif
5195 } else {
5196 sq->end(false /*didModify*/);
5197 }
5198 }
5199 PlaybackThread::threadLoop_standby();
5200 }
5201
waitingAsyncCallback_l()5202 bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
5203 {
5204 return false;
5205 }
5206
shouldStandby_l()5207 bool AudioFlinger::PlaybackThread::shouldStandby_l()
5208 {
5209 return !mStandby;
5210 }
5211
waitingAsyncCallback()5212 bool AudioFlinger::PlaybackThread::waitingAsyncCallback()
5213 {
5214 Mutex::Autolock _l(mLock);
5215 return waitingAsyncCallback_l();
5216 }
5217
5218 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_standby()5219 void AudioFlinger::PlaybackThread::threadLoop_standby()
5220 {
5221 ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
5222 mOutput->standby();
5223 if (mUseAsyncWrite != 0) {
5224 // discard any pending drain or write ack by incrementing sequence
5225 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
5226 mDrainSequence = (mDrainSequence + 2) & ~1;
5227 ALOG_ASSERT(mCallbackThread != 0);
5228 mCallbackThread->setWriteBlocked(mWriteAckSequence);
5229 mCallbackThread->setDraining(mDrainSequence);
5230 }
5231 mHwPaused = false;
5232 setHalLatencyMode_l();
5233 }
5234
onAddNewTrack_l()5235 void AudioFlinger::PlaybackThread::onAddNewTrack_l()
5236 {
5237 ALOGV("signal playback thread");
5238 broadcast_l();
5239 }
5240
onAsyncError()5241 void AudioFlinger::PlaybackThread::onAsyncError()
5242 {
5243 for (int i = AUDIO_STREAM_SYSTEM; i < (int)AUDIO_STREAM_CNT; i++) {
5244 invalidateTracks((audio_stream_type_t)i);
5245 }
5246 }
5247
threadLoop_mix()5248 void AudioFlinger::MixerThread::threadLoop_mix()
5249 {
5250 // mix buffers...
5251 mAudioMixer->process();
5252 mCurrentWriteLength = mSinkBufferSize;
5253 // increase sleep time progressively when application underrun condition clears.
5254 // Only increase sleep time if the mixer is ready for two consecutive times to avoid
5255 // that a steady state of alternating ready/not ready conditions keeps the sleep time
5256 // such that we would underrun the audio HAL.
5257 if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
5258 sleepTimeShift--;
5259 }
5260 mSleepTimeUs = 0;
5261 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
5262 //TODO: delay standby when effects have a tail
5263
5264 }
5265
threadLoop_sleepTime()5266 void AudioFlinger::MixerThread::threadLoop_sleepTime()
5267 {
5268 // If no tracks are ready, sleep once for the duration of an output
5269 // buffer size, then write 0s to the output
5270 if (mSleepTimeUs == 0) {
5271 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
5272 if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
5273 // Using the Monopipe availableToWrite, we estimate the
5274 // sleep time to retry for more data (before we underrun).
5275 MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
5276 const ssize_t availableToWrite = mPipeSink->availableToWrite();
5277 const size_t pipeFrames = monoPipe->maxFrames();
5278 const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
5279 // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
5280 const size_t framesDelay = std::min(
5281 mNormalFrameCount, max(framesLeft / 2, mFrameCount));
5282 ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
5283 pipeFrames, framesLeft, framesDelay);
5284 mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
5285 } else {
5286 mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
5287 if (mSleepTimeUs < kMinThreadSleepTimeUs) {
5288 mSleepTimeUs = kMinThreadSleepTimeUs;
5289 }
5290 // reduce sleep time in case of consecutive application underruns to avoid
5291 // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
5292 // duration we would end up writing less data than needed by the audio HAL if
5293 // the condition persists.
5294 if (sleepTimeShift < kMaxThreadSleepTimeShift) {
5295 sleepTimeShift++;
5296 }
5297 }
5298 } else {
5299 mSleepTimeUs = mIdleSleepTimeUs;
5300 }
5301 } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
5302 // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
5303 // before effects processing or output.
5304 if (mMixerBufferValid) {
5305 memset(mMixerBuffer, 0, mMixerBufferSize);
5306 if (mType == SPATIALIZER) {
5307 memset(mSinkBuffer, 0, mSinkBufferSize);
5308 }
5309 } else {
5310 memset(mSinkBuffer, 0, mSinkBufferSize);
5311 }
5312 mSleepTimeUs = 0;
5313 ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
5314 "anticipated start");
5315 }
5316 // TODO add standby time extension fct of effect tail
5317 }
5318
5319 // prepareTracks_l() must be called with ThreadBase::mLock held
prepareTracks_l(Vector<sp<Track>> * tracksToRemove)5320 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
5321 Vector< sp<Track> > *tracksToRemove)
5322 {
5323 // clean up deleted track ids in AudioMixer before allocating new tracks
5324 (void)mTracks.processDeletedTrackIds([this](int trackId) {
5325 // for each trackId, destroy it in the AudioMixer
5326 if (mAudioMixer->exists(trackId)) {
5327 mAudioMixer->destroy(trackId);
5328 }
5329 });
5330 mTracks.clearDeletedTrackIds();
5331
5332 mixer_state mixerStatus = MIXER_IDLE;
5333 // find out which tracks need to be processed
5334 size_t count = mActiveTracks.size();
5335 size_t mixedTracks = 0;
5336 size_t tracksWithEffect = 0;
5337 // counts only _active_ fast tracks
5338 size_t fastTracks = 0;
5339 uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
5340
5341 float masterVolume = mMasterVolume;
5342 bool masterMute = mMasterMute;
5343
5344 if (masterMute) {
5345 masterVolume = 0;
5346 }
5347 // Delegate master volume control to effect in output mix effect chain if needed
5348 sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
5349 if (chain != 0) {
5350 uint32_t v = (uint32_t)(masterVolume * (1 << 24));
5351 chain->setVolume_l(&v, &v);
5352 masterVolume = (float)((v + (1 << 23)) >> 24);
5353 chain.clear();
5354 }
5355
5356 // prepare a new state to push
5357 FastMixerStateQueue *sq = NULL;
5358 FastMixerState *state = NULL;
5359 bool didModify = false;
5360 FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
5361 bool coldIdle = false;
5362 if (mFastMixer != 0) {
5363 sq = mFastMixer->sq();
5364 state = sq->begin();
5365 coldIdle = state->mCommand == FastMixerState::COLD_IDLE;
5366 }
5367
5368 mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
5369 mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
5370
5371 // DeferredOperations handles statistics after setting mixerStatus.
5372 class DeferredOperations {
5373 public:
5374 DeferredOperations(mixer_state *mixerStatus, ThreadMetrics *threadMetrics)
5375 : mMixerStatus(mixerStatus)
5376 , mThreadMetrics(threadMetrics) {}
5377
5378 // when leaving scope, tally frames properly.
5379 ~DeferredOperations() {
5380 // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
5381 // because that is when the underrun occurs.
5382 // We do not distinguish between FastTracks and NormalTracks here.
5383 size_t maxUnderrunFrames = 0;
5384 if (*mMixerStatus == MIXER_TRACKS_READY && mUnderrunFrames.size() > 0) {
5385 for (const auto &underrun : mUnderrunFrames) {
5386 underrun.first->tallyUnderrunFrames(underrun.second);
5387 maxUnderrunFrames = max(underrun.second, maxUnderrunFrames);
5388 }
5389 }
5390 // send the max underrun frames for this mixer period
5391 mThreadMetrics->logUnderrunFrames(maxUnderrunFrames);
5392 }
5393
5394 // tallyUnderrunFrames() is called to update the track counters
5395 // with the number of underrun frames for a particular mixer period.
5396 // We defer tallying until we know the final mixer status.
5397 void tallyUnderrunFrames(const sp<Track>& track, size_t underrunFrames) {
5398 mUnderrunFrames.emplace_back(track, underrunFrames);
5399 }
5400
5401 private:
5402 const mixer_state * const mMixerStatus;
5403 ThreadMetrics * const mThreadMetrics;
5404 std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
5405 } deferredOperations(&mixerStatus, &mThreadMetrics);
5406 // implicit nested scope for variable capture
5407
5408 bool noFastHapticTrack = true;
5409 for (size_t i=0 ; i<count ; i++) {
5410 const sp<Track> t = mActiveTracks[i];
5411
5412 // this const just means the local variable doesn't change
5413 Track* const track = t.get();
5414
5415 // process fast tracks
5416 if (track->isFastTrack()) {
5417 LOG_ALWAYS_FATAL_IF(mFastMixer.get() == nullptr,
5418 "%s(%d): FastTrack(%d) present without FastMixer",
5419 __func__, id(), track->id());
5420
5421 if (track->getHapticPlaybackEnabled()) {
5422 noFastHapticTrack = false;
5423 }
5424
5425 // It's theoretically possible (though unlikely) for a fast track to be created
5426 // and then removed within the same normal mix cycle. This is not a problem, as
5427 // the track never becomes active so it's fast mixer slot is never touched.
5428 // The converse, of removing an (active) track and then creating a new track
5429 // at the identical fast mixer slot within the same normal mix cycle,
5430 // is impossible because the slot isn't marked available until the end of each cycle.
5431 int j = track->mFastIndex;
5432 ALOG_ASSERT(0 < j && j < (int)FastMixerState::sMaxFastTracks);
5433 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
5434 FastTrack *fastTrack = &state->mFastTracks[j];
5435
5436 // Determine whether the track is currently in underrun condition,
5437 // and whether it had a recent underrun.
5438 FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j];
5439 FastTrackUnderruns underruns = ftDump->mUnderruns;
5440 uint32_t recentFull = (underruns.mBitFields.mFull -
5441 track->mObservedUnderruns.mBitFields.mFull) & UNDERRUN_MASK;
5442 uint32_t recentPartial = (underruns.mBitFields.mPartial -
5443 track->mObservedUnderruns.mBitFields.mPartial) & UNDERRUN_MASK;
5444 uint32_t recentEmpty = (underruns.mBitFields.mEmpty -
5445 track->mObservedUnderruns.mBitFields.mEmpty) & UNDERRUN_MASK;
5446 uint32_t recentUnderruns = recentPartial + recentEmpty;
5447 track->mObservedUnderruns = underruns;
5448 // don't count underruns that occur while stopping or pausing
5449 // or stopped which can occur when flush() is called while active
5450 size_t underrunFrames = 0;
5451 if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
5452 recentUnderruns > 0) {
5453 // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
5454 underrunFrames = recentUnderruns * mFrameCount;
5455 }
5456 // Immediately account for FastTrack underruns.
5457 track->mAudioTrackServerProxy->tallyUnderrunFrames(underrunFrames);
5458
5459 // This is similar to the state machine for normal tracks,
5460 // with a few modifications for fast tracks.
5461 bool isActive = true;
5462 switch (track->mState) {
5463 case TrackBase::STOPPING_1:
5464 // track stays active in STOPPING_1 state until first underrun
5465 if (recentUnderruns > 0 || track->isTerminated()) {
5466 track->mState = TrackBase::STOPPING_2;
5467 }
5468 break;
5469 case TrackBase::PAUSING:
5470 // ramp down is not yet implemented
5471 track->setPaused();
5472 break;
5473 case TrackBase::RESUMING:
5474 // ramp up is not yet implemented
5475 track->mState = TrackBase::ACTIVE;
5476 break;
5477 case TrackBase::ACTIVE:
5478 if (recentFull > 0 || recentPartial > 0) {
5479 // track has provided at least some frames recently: reset retry count
5480 track->mRetryCount = kMaxTrackRetries;
5481 }
5482 if (recentUnderruns == 0) {
5483 // no recent underruns: stay active
5484 break;
5485 }
5486 // there has recently been an underrun of some kind
5487 if (track->sharedBuffer() == 0) {
5488 // were any of the recent underruns "empty" (no frames available)?
5489 if (recentEmpty == 0) {
5490 // no, then ignore the partial underruns as they are allowed indefinitely
5491 break;
5492 }
5493 // there has recently been an "empty" underrun: decrement the retry counter
5494 if (--(track->mRetryCount) > 0) {
5495 break;
5496 }
5497 // indicate to client process that the track was disabled because of underrun;
5498 // it will then automatically call start() when data is available
5499 track->disable();
5500 // remove from active list, but state remains ACTIVE [confusing but true]
5501 isActive = false;
5502 break;
5503 }
5504 FALLTHROUGH_INTENDED;
5505 case TrackBase::STOPPING_2:
5506 case TrackBase::PAUSED:
5507 case TrackBase::STOPPED:
5508 case TrackBase::FLUSHED: // flush() while active
5509 // Check for presentation complete if track is inactive
5510 // We have consumed all the buffers of this track.
5511 // This would be incomplete if we auto-paused on underrun
5512 {
5513 uint32_t latency = 0;
5514 status_t result = mOutput->stream->getLatency(&latency);
5515 ALOGE_IF(result != OK,
5516 "Error when retrieving output stream latency: %d", result);
5517 size_t audioHALFrames = (latency * mSampleRate) / 1000;
5518 int64_t framesWritten = mBytesWritten / mFrameSize;
5519 if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
5520 // track stays in active list until presentation is complete
5521 break;
5522 }
5523 }
5524 if (track->isStopping_2()) {
5525 track->mState = TrackBase::STOPPED;
5526 }
5527 if (track->isStopped()) {
5528 // Can't reset directly, as fast mixer is still polling this track
5529 // track->reset();
5530 // So instead mark this track as needing to be reset after push with ack
5531 resetMask |= 1 << i;
5532 }
5533 isActive = false;
5534 break;
5535 case TrackBase::IDLE:
5536 default:
5537 LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->mState);
5538 }
5539
5540 if (isActive) {
5541 // was it previously inactive?
5542 if (!(state->mTrackMask & (1 << j))) {
5543 ExtendedAudioBufferProvider *eabp = track;
5544 VolumeProvider *vp = track;
5545 fastTrack->mBufferProvider = eabp;
5546 fastTrack->mVolumeProvider = vp;
5547 fastTrack->mChannelMask = track->mChannelMask;
5548 fastTrack->mFormat = track->mFormat;
5549 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5550 fastTrack->mHapticIntensity = track->getHapticIntensity();
5551 fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
5552 fastTrack->mGeneration++;
5553 state->mTrackMask |= 1 << j;
5554 didModify = true;
5555 // no acknowledgement required for newly active tracks
5556 }
5557 sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
5558 float volume;
5559 if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
5560 volume = 0.f;
5561 } else {
5562 volume = masterVolume * mStreamTypes[track->streamType()].volume;
5563 }
5564
5565 handleVoipVolume_l(&volume);
5566
5567 // cache the combined master volume and stream type volume for fast mixer; this
5568 // lacks any synchronization or barrier so VolumeProvider may read a stale value
5569 const float vh = track->getVolumeHandler()->getVolume(
5570 proxy->framesReleased()).first;
5571 volume *= vh;
5572 track->mCachedVolume = volume;
5573 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
5574 float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
5575 float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
5576
5577 track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
5578 /*muteState=*/{masterVolume == 0.f,
5579 mStreamTypes[track->streamType()].volume == 0.f,
5580 mStreamTypes[track->streamType()].mute,
5581 track->isPlaybackRestricted(),
5582 vlf == 0.f && vrf == 0.f,
5583 vh == 0.f});
5584
5585 vlf *= volume;
5586 vrf *= volume;
5587
5588 track->setFinalVolume(vlf, vrf);
5589 ++fastTracks;
5590 } else {
5591 // was it previously active?
5592 if (state->mTrackMask & (1 << j)) {
5593 fastTrack->mBufferProvider = NULL;
5594 fastTrack->mGeneration++;
5595 state->mTrackMask &= ~(1 << j);
5596 didModify = true;
5597 // If any fast tracks were removed, we must wait for acknowledgement
5598 // because we're about to decrement the last sp<> on those tracks.
5599 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
5600 } else {
5601 // ALOGW rather than LOG_ALWAYS_FATAL because it seems there are cases where an
5602 // AudioTrack may start (which may not be with a start() but with a write()
5603 // after underrun) and immediately paused or released. In that case the
5604 // FastTrack state hasn't had time to update.
5605 // TODO Remove the ALOGW when this theory is confirmed.
5606 ALOGW("fast track %d should have been active; "
5607 "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
5608 j, (int)track->mState, state->mTrackMask, recentUnderruns,
5609 track->sharedBuffer() != 0);
5610 // Since the FastMixer state already has the track inactive, do nothing here.
5611 }
5612 tracksToRemove->add(track);
5613 // Avoids a misleading display in dumpsys
5614 track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL;
5615 }
5616 if (fastTrack->mHapticPlaybackEnabled != track->getHapticPlaybackEnabled()) {
5617 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5618 didModify = true;
5619 }
5620 continue;
5621 }
5622
5623 { // local variable scope to avoid goto warning
5624
5625 audio_track_cblk_t* cblk = track->cblk();
5626
5627 // The first time a track is added we wait
5628 // for all its buffers to be filled before processing it
5629 const int trackId = track->id();
5630
5631 // if an active track doesn't exist in the AudioMixer, create it.
5632 // use the trackId as the AudioMixer name.
5633 if (!mAudioMixer->exists(trackId)) {
5634 status_t status = mAudioMixer->create(
5635 trackId,
5636 track->mChannelMask,
5637 track->mFormat,
5638 track->mSessionId);
5639 if (status != OK) {
5640 ALOGW("%s(): AudioMixer cannot create track(%d)"
5641 " mask %#x, format %#x, sessionId %d",
5642 __func__, trackId,
5643 track->mChannelMask, track->mFormat, track->mSessionId);
5644 tracksToRemove->add(track);
5645 track->invalidate(); // consider it dead.
5646 continue;
5647 }
5648 }
5649
5650 // make sure that we have enough frames to mix one full buffer.
5651 // enforce this condition only once to enable draining the buffer in case the client
5652 // app does not call stop() and relies on underrun to stop:
5653 // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
5654 // during last round
5655 size_t desiredFrames;
5656 const uint32_t sampleRate = track->mAudioTrackServerProxy->getSampleRate();
5657 const AudioPlaybackRate playbackRate = track->mAudioTrackServerProxy->getPlaybackRate();
5658
5659 desiredFrames = sourceFramesNeededWithTimestretch(
5660 sampleRate, mNormalFrameCount, mSampleRate, playbackRate.mSpeed);
5661 // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
5662 // add frames already consumed but not yet released by the resampler
5663 // because mAudioTrackServerProxy->framesReady() will include these frames
5664 desiredFrames += mAudioMixer->getUnreleasedFrames(trackId);
5665
5666 uint32_t minFrames = 1;
5667 if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
5668 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
5669 minFrames = desiredFrames;
5670 }
5671
5672 size_t framesReady = track->framesReady();
5673 if (ATRACE_ENABLED()) {
5674 // I wish we had formatted trace names
5675 std::string traceName("nRdy");
5676 traceName += std::to_string(trackId);
5677 ATRACE_INT(traceName.c_str(), framesReady);
5678 }
5679 if ((framesReady >= minFrames) && track->isReady() &&
5680 !track->isPaused() && !track->isTerminated())
5681 {
5682 ALOGVV("track(%d) s=%08x [OK] on thread %p", trackId, cblk->mServer, this);
5683
5684 mixedTracks++;
5685
5686 // track->mainBuffer() != mSinkBuffer or mMixerBuffer means
5687 // there is an effect chain connected to the track
5688 chain.clear();
5689 if (track->mainBuffer() != mSinkBuffer &&
5690 track->mainBuffer() != mMixerBuffer) {
5691 if (mEffectBufferEnabled) {
5692 mEffectBufferValid = true; // Later can set directly.
5693 }
5694 chain = getEffectChain_l(track->sessionId());
5695 // Delegate volume control to effect in track effect chain if needed
5696 if (chain != 0) {
5697 tracksWithEffect++;
5698 } else {
5699 ALOGW("prepareTracks_l(): track(%d) attached to effect but no chain found on "
5700 "session %d",
5701 trackId, track->sessionId());
5702 }
5703 }
5704
5705
5706 int param = AudioMixer::VOLUME;
5707 if (track->mFillingUpStatus == Track::FS_FILLED) {
5708 // no ramp for the first volume setting
5709 track->mFillingUpStatus = Track::FS_ACTIVE;
5710 if (track->mState == TrackBase::RESUMING) {
5711 track->mState = TrackBase::ACTIVE;
5712 // If a new track is paused immediately after start, do not ramp on resume.
5713 if (cblk->mServer != 0) {
5714 param = AudioMixer::RAMP_VOLUME;
5715 }
5716 }
5717 mAudioMixer->setParameter(trackId, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
5718 mLeftVolFloat = -1.0;
5719 // FIXME should not make a decision based on mServer
5720 } else if (cblk->mServer != 0) {
5721 // If the track is stopped before the first frame was mixed,
5722 // do not apply ramp
5723 param = AudioMixer::RAMP_VOLUME;
5724 }
5725
5726 // compute volume for this track
5727 uint32_t vl, vr; // in U8.24 integer format
5728 float vlf, vrf, vaf; // in [0.0, 1.0] float format
5729 // read original volumes with volume control
5730 float v = masterVolume * mStreamTypes[track->streamType()].volume;
5731 // Always fetch volumeshaper volume to ensure state is updated.
5732 const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
5733 const float vh = track->getVolumeHandler()->getVolume(
5734 track->mAudioTrackServerProxy->framesReleased()).first;
5735
5736 if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
5737 v = 0;
5738 }
5739
5740 handleVoipVolume_l(&v);
5741
5742 if (track->isPausing()) {
5743 vl = vr = 0;
5744 vlf = vrf = vaf = 0.;
5745 track->setPaused();
5746 } else {
5747 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
5748 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
5749 vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
5750 // track volumes come from shared memory, so can't be trusted and must be clamped
5751 if (vlf > GAIN_FLOAT_UNITY) {
5752 ALOGV("Track left volume out of range: %.3g", vlf);
5753 vlf = GAIN_FLOAT_UNITY;
5754 }
5755 if (vrf > GAIN_FLOAT_UNITY) {
5756 ALOGV("Track right volume out of range: %.3g", vrf);
5757 vrf = GAIN_FLOAT_UNITY;
5758 }
5759
5760 track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
5761 /*muteState=*/{masterVolume == 0.f,
5762 mStreamTypes[track->streamType()].volume == 0.f,
5763 mStreamTypes[track->streamType()].mute,
5764 track->isPlaybackRestricted(),
5765 vlf == 0.f && vrf == 0.f,
5766 vh == 0.f});
5767
5768 // now apply the master volume and stream type volume and shaper volume
5769 vlf *= v * vh;
5770 vrf *= v * vh;
5771 // assuming master volume and stream type volume each go up to 1.0,
5772 // then derive vl and vr as U8.24 versions for the effect chain
5773 const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
5774 vl = (uint32_t) (scaleto8_24 * vlf);
5775 vr = (uint32_t) (scaleto8_24 * vrf);
5776 // vl and vr are now in U8.24 format
5777 uint16_t sendLevel = proxy->getSendLevel_U4_12();
5778 // send level comes from shared memory and so may be corrupt
5779 if (sendLevel > MAX_GAIN_INT) {
5780 ALOGV("Track send level out of range: %04X", sendLevel);
5781 sendLevel = MAX_GAIN_INT;
5782 }
5783 // vaf is represented as [0.0, 1.0] float by rescaling sendLevel
5784 vaf = v * sendLevel * (1. / MAX_GAIN_INT);
5785 }
5786
5787 track->setFinalVolume(vrf, vlf);
5788
5789 // Delegate volume control to effect in track effect chain if needed
5790 if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
5791 // Do not ramp volume if volume is controlled by effect
5792 param = AudioMixer::VOLUME;
5793 // Update remaining floating point volume levels
5794 vlf = (float)vl / (1 << 24);
5795 vrf = (float)vr / (1 << 24);
5796 track->mHasVolumeController = true;
5797 } else {
5798 // force no volume ramp when volume controller was just disabled or removed
5799 // from effect chain to avoid volume spike
5800 if (track->mHasVolumeController) {
5801 param = AudioMixer::VOLUME;
5802 }
5803 track->mHasVolumeController = false;
5804 }
5805
5806 // XXX: these things DON'T need to be done each time
5807 mAudioMixer->setBufferProvider(trackId, track);
5808 mAudioMixer->enable(trackId);
5809
5810 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME0, &vlf);
5811 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME1, &vrf);
5812 mAudioMixer->setParameter(trackId, param, AudioMixer::AUXLEVEL, &vaf);
5813 mAudioMixer->setParameter(
5814 trackId,
5815 AudioMixer::TRACK,
5816 AudioMixer::FORMAT, (void *)track->format());
5817 mAudioMixer->setParameter(
5818 trackId,
5819 AudioMixer::TRACK,
5820 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
5821
5822 if (mType == SPATIALIZER && !track->isSpatialized()) {
5823 mAudioMixer->setParameter(
5824 trackId,
5825 AudioMixer::TRACK,
5826 AudioMixer::MIXER_CHANNEL_MASK,
5827 (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
5828 } else {
5829 mAudioMixer->setParameter(
5830 trackId,
5831 AudioMixer::TRACK,
5832 AudioMixer::MIXER_CHANNEL_MASK,
5833 (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
5834 }
5835
5836 // limit track sample rate to 2 x output sample rate, which changes at re-configuration
5837 uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
5838 uint32_t reqSampleRate = proxy->getSampleRate();
5839 if (reqSampleRate == 0) {
5840 reqSampleRate = mSampleRate;
5841 } else if (reqSampleRate > maxSampleRate) {
5842 reqSampleRate = maxSampleRate;
5843 }
5844 mAudioMixer->setParameter(
5845 trackId,
5846 AudioMixer::RESAMPLE,
5847 AudioMixer::SAMPLE_RATE,
5848 (void *)(uintptr_t)reqSampleRate);
5849
5850 mAudioMixer->setParameter(
5851 trackId,
5852 AudioMixer::TIMESTRETCH,
5853 AudioMixer::PLAYBACK_RATE,
5854 // cast away constness for this generic API.
5855 const_cast<void *>(reinterpret_cast<const void *>(&playbackRate)));
5856
5857 /*
5858 * Select the appropriate output buffer for the track.
5859 *
5860 * Tracks with effects go into their own effects chain buffer
5861 * and from there into either mEffectBuffer or mSinkBuffer.
5862 *
5863 * Other tracks can use mMixerBuffer for higher precision
5864 * channel accumulation. If this buffer is enabled
5865 * (mMixerBufferEnabled true), then selected tracks will accumulate
5866 * into it.
5867 *
5868 */
5869 if (mMixerBufferEnabled
5870 && (track->mainBuffer() == mSinkBuffer
5871 || track->mainBuffer() == mMixerBuffer)) {
5872 if (mType == SPATIALIZER && !track->isSpatialized()) {
5873 mAudioMixer->setParameter(
5874 trackId,
5875 AudioMixer::TRACK,
5876 AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
5877 mAudioMixer->setParameter(
5878 trackId,
5879 AudioMixer::TRACK,
5880 AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
5881 } else {
5882 mAudioMixer->setParameter(
5883 trackId,
5884 AudioMixer::TRACK,
5885 AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
5886 mAudioMixer->setParameter(
5887 trackId,
5888 AudioMixer::TRACK,
5889 AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
5890 // TODO: override track->mainBuffer()?
5891 mMixerBufferValid = true;
5892 }
5893 } else {
5894 mAudioMixer->setParameter(
5895 trackId,
5896 AudioMixer::TRACK,
5897 AudioMixer::MIXER_FORMAT, (void *)EFFECT_BUFFER_FORMAT);
5898 mAudioMixer->setParameter(
5899 trackId,
5900 AudioMixer::TRACK,
5901 AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
5902 }
5903 mAudioMixer->setParameter(
5904 trackId,
5905 AudioMixer::TRACK,
5906 AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
5907 mAudioMixer->setParameter(
5908 trackId,
5909 AudioMixer::TRACK,
5910 AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
5911 mAudioMixer->setParameter(
5912 trackId,
5913 AudioMixer::TRACK,
5914 AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
5915 mAudioMixer->setParameter(
5916 trackId,
5917 AudioMixer::TRACK,
5918 AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)(&(track->mHapticMaxAmplitude)));
5919
5920 // reset retry count
5921 track->mRetryCount = kMaxTrackRetries;
5922
5923 // If one track is ready, set the mixer ready if:
5924 // - the mixer was not ready during previous round OR
5925 // - no other track is not ready
5926 if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
5927 mixerStatus != MIXER_TRACKS_ENABLED) {
5928 mixerStatus = MIXER_TRACKS_READY;
5929 }
5930
5931 // Enable the next few lines to instrument a test for underrun log handling.
5932 // TODO: Remove when we have a better way of testing the underrun log.
5933 #if 0
5934 static int i;
5935 if ((++i & 0xf) == 0) {
5936 deferredOperations.tallyUnderrunFrames(track, 10 /* underrunFrames */);
5937 }
5938 #endif
5939 } else {
5940 size_t underrunFrames = 0;
5941 if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
5942 ALOGV("track(%d) underrun, track state %s framesReady(%zu) < framesDesired(%zd)",
5943 trackId, track->getTrackStateAsString(), framesReady, desiredFrames);
5944 underrunFrames = desiredFrames;
5945 }
5946 deferredOperations.tallyUnderrunFrames(track, underrunFrames);
5947
5948 // clear effect chain input buffer if an active track underruns to avoid sending
5949 // previous audio buffer again to effects
5950 chain = getEffectChain_l(track->sessionId());
5951 if (chain != 0) {
5952 chain->clearInputBuffer();
5953 }
5954
5955 ALOGVV("track(%d) s=%08x [NOT READY] on thread %p", trackId, cblk->mServer, this);
5956 if ((track->sharedBuffer() != 0) || track->isTerminated() ||
5957 track->isStopped() || track->isPaused()) {
5958 // We have consumed all the buffers of this track.
5959 // Remove it from the list of active tracks.
5960 // TODO: use actual buffer filling status instead of latency when available from
5961 // audio HAL
5962 size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
5963 int64_t framesWritten = mBytesWritten / mFrameSize;
5964 if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
5965 if (track->isStopped()) {
5966 track->reset();
5967 }
5968 tracksToRemove->add(track);
5969 }
5970 } else {
5971 // No buffers for this track. Give it a few chances to
5972 // fill a buffer, then remove it from active list.
5973 if (--(track->mRetryCount) <= 0) {
5974 ALOGI("BUFFER TIMEOUT: remove(%d) from active list on thread %p",
5975 trackId, this);
5976 tracksToRemove->add(track);
5977 // indicate to client process that the track was disabled because of underrun;
5978 // it will then automatically call start() when data is available
5979 track->disable();
5980 // If one track is not ready, mark the mixer also not ready if:
5981 // - the mixer was ready during previous round OR
5982 // - no other track is ready
5983 } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
5984 mixerStatus != MIXER_TRACKS_READY) {
5985 mixerStatus = MIXER_TRACKS_ENABLED;
5986 }
5987 }
5988 mAudioMixer->disable(trackId);
5989 }
5990
5991 } // local variable scope to avoid goto warning
5992
5993 }
5994
5995 if (mHapticChannelMask != AUDIO_CHANNEL_NONE && sq != NULL) {
5996 // When there is no fast track playing haptic and FastMixer exists,
5997 // enabling the first FastTrack, which provides mixed data from normal
5998 // tracks, to play haptic data.
5999 FastTrack *fastTrack = &state->mFastTracks[0];
6000 if (fastTrack->mHapticPlaybackEnabled != noFastHapticTrack) {
6001 fastTrack->mHapticPlaybackEnabled = noFastHapticTrack;
6002 didModify = true;
6003 }
6004 }
6005
6006 // Push the new FastMixer state if necessary
6007 [[maybe_unused]] bool pauseAudioWatchdog = false;
6008 if (didModify) {
6009 state->mFastTracksGen++;
6010 // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
6011 if (kUseFastMixer == FastMixer_Dynamic &&
6012 state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
6013 state->mCommand = FastMixerState::COLD_IDLE;
6014 state->mColdFutexAddr = &mFastMixerFutex;
6015 state->mColdGen++;
6016 mFastMixerFutex = 0;
6017 if (kUseFastMixer == FastMixer_Dynamic) {
6018 mNormalSink = mOutputSink;
6019 }
6020 // If we go into cold idle, need to wait for acknowledgement
6021 // so that fast mixer stops doing I/O.
6022 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
6023 pauseAudioWatchdog = true;
6024 }
6025 }
6026 if (sq != NULL) {
6027 sq->end(didModify);
6028 // No need to block if the FastMixer is in COLD_IDLE as the FastThread
6029 // is not active. (We BLOCK_UNTIL_ACKED when entering COLD_IDLE
6030 // when bringing the output sink into standby.)
6031 //
6032 // We will get the latest FastMixer state when we come out of COLD_IDLE.
6033 //
6034 // This occurs with BT suspend when we idle the FastMixer with
6035 // active tracks, which may be added or removed.
6036 sq->push(coldIdle ? FastMixerStateQueue::BLOCK_NEVER : block);
6037 }
6038 #ifdef AUDIO_WATCHDOG
6039 if (pauseAudioWatchdog && mAudioWatchdog != 0) {
6040 mAudioWatchdog->pause();
6041 }
6042 #endif
6043
6044 // Now perform the deferred reset on fast tracks that have stopped
6045 while (resetMask != 0) {
6046 size_t i = __builtin_ctz(resetMask);
6047 ALOG_ASSERT(i < count);
6048 resetMask &= ~(1 << i);
6049 sp<Track> track = mActiveTracks[i];
6050 ALOG_ASSERT(track->isFastTrack() && track->isStopped());
6051 track->reset();
6052 }
6053
6054 // Track destruction may occur outside of threadLoop once it is removed from active tracks.
6055 // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
6056 // it ceases to be active, to allow safe removal from the AudioMixer at the start
6057 // of prepareTracks_l(); this releases any outstanding buffer back to the track.
6058 // See also the implementation of destroyTrack_l().
6059 for (const auto &track : *tracksToRemove) {
6060 const int trackId = track->id();
6061 if (mAudioMixer->exists(trackId)) { // Normal tracks here, fast tracks in FastMixer.
6062 mAudioMixer->setBufferProvider(trackId, nullptr /* bufferProvider */);
6063 }
6064 }
6065
6066 // remove all the tracks that need to be...
6067 removeTracks_l(*tracksToRemove);
6068
6069 if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
6070 getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
6071 mEffectBufferValid = true;
6072 }
6073
6074 if (mEffectBufferValid) {
6075 // as long as there are effects we should clear the effects buffer, to avoid
6076 // passing a non-clean buffer to the effect chain
6077 memset(mEffectBuffer, 0, mEffectBufferSize);
6078 if (mType == SPATIALIZER) {
6079 memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
6080 }
6081 }
6082 // sink or mix buffer must be cleared if all tracks are connected to an
6083 // effect chain as in this case the mixer will not write to the sink or mix buffer
6084 // and track effects will accumulate into it
6085 // always clear sink buffer for spatializer output as the output of the spatializer
6086 // effect will be accumulated into it
6087 if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6088 (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
6089 // FIXME as a performance optimization, should remember previous zero status
6090 if (mMixerBufferValid) {
6091 memset(mMixerBuffer, 0, mMixerBufferSize);
6092 // TODO: In testing, mSinkBuffer below need not be cleared because
6093 // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer
6094 // after mixing.
6095 //
6096 // To enforce this guarantee:
6097 // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6098 // (mixedTracks == 0 && fastTracks > 0))
6099 // must imply MIXER_TRACKS_READY.
6100 // Later, we may clear buffers regardless, and skip much of this logic.
6101 }
6102 // FIXME as a performance optimization, should remember previous zero status
6103 memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
6104 }
6105
6106 // if any fast tracks, then status is ready
6107 mMixerStatusIgnoringFastTracks = mixerStatus;
6108 if (fastTracks > 0) {
6109 mixerStatus = MIXER_TRACKS_READY;
6110 }
6111 return mixerStatus;
6112 }
6113
6114 // trackCountForUid_l() must be called with ThreadBase::mLock held
trackCountForUid_l(uid_t uid) const6115 uint32_t AudioFlinger::PlaybackThread::trackCountForUid_l(uid_t uid) const
6116 {
6117 uint32_t trackCount = 0;
6118 for (size_t i = 0; i < mTracks.size() ; i++) {
6119 if (mTracks[i]->uid() == uid) {
6120 trackCount++;
6121 }
6122 }
6123 return trackCount;
6124 }
6125
check(AudioStreamOut * output)6126 bool AudioFlinger::PlaybackThread::IsTimestampAdvancing::check(AudioStreamOut * output)
6127 {
6128 // Check the timestamp to see if it's advancing once every 150ms. If we check too frequently, we
6129 // could falsely detect that the frame position has stalled due to underrun because we haven't
6130 // given the Audio HAL enough time to update.
6131 const nsecs_t nowNs = systemTime();
6132 if (nowNs - mPreviousNs < mMinimumTimeBetweenChecksNs) {
6133 return mLatchedValue;
6134 }
6135 mPreviousNs = nowNs;
6136 mLatchedValue = false;
6137 // Determine if the presentation position is still advancing.
6138 uint64_t position = 0;
6139 struct timespec unused;
6140 const status_t ret = output->getPresentationPosition(&position, &unused);
6141 if (ret == NO_ERROR) {
6142 if (position != mPreviousPosition) {
6143 mPreviousPosition = position;
6144 mLatchedValue = true;
6145 }
6146 }
6147 return mLatchedValue;
6148 }
6149
clear()6150 void AudioFlinger::PlaybackThread::IsTimestampAdvancing::clear()
6151 {
6152 mLatchedValue = true;
6153 mPreviousPosition = 0;
6154 mPreviousNs = 0;
6155 }
6156
6157 // isTrackAllowed_l() must be called with ThreadBase::mLock held
isTrackAllowed_l(audio_channel_mask_t channelMask,audio_format_t format,audio_session_t sessionId,uid_t uid) const6158 bool AudioFlinger::MixerThread::isTrackAllowed_l(
6159 audio_channel_mask_t channelMask, audio_format_t format,
6160 audio_session_t sessionId, uid_t uid) const
6161 {
6162 if (!PlaybackThread::isTrackAllowed_l(channelMask, format, sessionId, uid)) {
6163 return false;
6164 }
6165 // Check validity as we don't call AudioMixer::create() here.
6166 if (!mAudioMixer->isValidFormat(format)) {
6167 ALOGW("%s: invalid format: %#x", __func__, format);
6168 return false;
6169 }
6170 if (!mAudioMixer->isValidChannelMask(channelMask)) {
6171 ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
6172 return false;
6173 }
6174 return true;
6175 }
6176
6177 // checkForNewParameter_l() must be called with ThreadBase::mLock held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)6178 bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePair,
6179 status_t& status)
6180 {
6181 bool reconfig = false;
6182 status = NO_ERROR;
6183
6184 AutoPark<FastMixer> park(mFastMixer);
6185
6186 AudioParameter param = AudioParameter(keyValuePair);
6187 int value;
6188 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
6189 reconfig = true;
6190 }
6191 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
6192 if (!isValidPcmSinkFormat((audio_format_t) value)) {
6193 status = BAD_VALUE;
6194 } else {
6195 // no need to save value, since it's constant
6196 reconfig = true;
6197 }
6198 }
6199 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
6200 if (!isValidPcmSinkChannelMask((audio_channel_mask_t) value)) {
6201 status = BAD_VALUE;
6202 } else {
6203 // no need to save value, since it's constant
6204 reconfig = true;
6205 }
6206 }
6207 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
6208 // do not accept frame count changes if tracks are open as the track buffer
6209 // size depends on frame count and correct behavior would not be guaranteed
6210 // if frame count is changed after track creation
6211 if (!mTracks.isEmpty()) {
6212 status = INVALID_OPERATION;
6213 } else {
6214 reconfig = true;
6215 }
6216 }
6217 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
6218 LOG_FATAL("Should not set routing device in MixerThread");
6219 }
6220
6221 if (status == NO_ERROR) {
6222 status = mOutput->stream->setParameters(keyValuePair);
6223 if (!mStandby && status == INVALID_OPERATION) {
6224 ALOGW("%s: setParameters failed with keyValuePair %s, entering standby",
6225 __func__, keyValuePair.c_str());
6226 mOutput->standby();
6227 mThreadMetrics.logEndInterval();
6228 mThreadSnapshot.onEnd();
6229 setStandby_l();
6230 mBytesWritten = 0;
6231 status = mOutput->stream->setParameters(keyValuePair);
6232 }
6233 if (status == NO_ERROR && reconfig) {
6234 readOutputParameters_l();
6235 delete mAudioMixer;
6236 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
6237 for (const auto &track : mTracks) {
6238 const int trackId = track->id();
6239 const status_t createStatus = mAudioMixer->create(
6240 trackId,
6241 track->mChannelMask,
6242 track->mFormat,
6243 track->mSessionId);
6244 ALOGW_IF(createStatus != NO_ERROR,
6245 "%s(): AudioMixer cannot create track(%d)"
6246 " mask %#x, format %#x, sessionId %d",
6247 __func__,
6248 trackId, track->mChannelMask, track->mFormat, track->mSessionId);
6249 }
6250 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
6251 }
6252 }
6253
6254 return reconfig;
6255 }
6256
6257
dumpInternals_l(int fd,const Vector<String16> & args)6258 void AudioFlinger::MixerThread::dumpInternals_l(int fd, const Vector<String16>& args)
6259 {
6260 PlaybackThread::dumpInternals_l(fd, args);
6261 dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
6262 dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
6263 dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
6264 dprintf(fd, " Master balance: %f (%s)\n", mMasterBalance.load(),
6265 (hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
6266 : mBalance.toString()).c_str());
6267 if (hasFastMixer()) {
6268 dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
6269
6270 // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
6271 // while we are dumping it. It may be inconsistent, but it won't mutate!
6272 // This is a large object so we place it on the heap.
6273 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
6274 const std::unique_ptr<FastMixerDumpState> copy =
6275 std::make_unique<FastMixerDumpState>(mFastMixerDumpState);
6276 copy->dump(fd);
6277
6278 #ifdef STATE_QUEUE_DUMP
6279 // Similar for state queue
6280 StateQueueObserverDump observerCopy = mStateQueueObserverDump;
6281 observerCopy.dump(fd);
6282 StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
6283 mutatorCopy.dump(fd);
6284 #endif
6285
6286 #ifdef AUDIO_WATCHDOG
6287 if (mAudioWatchdog != 0) {
6288 // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
6289 AudioWatchdogDump wdCopy = mAudioWatchdogDump;
6290 wdCopy.dump(fd);
6291 }
6292 #endif
6293
6294 } else {
6295 dprintf(fd, " No FastMixer\n");
6296 }
6297
6298 dprintf(fd, "Bluetooth latency modes are %senabled\n",
6299 mBluetoothLatencyModesEnabled ? "" : "not ");
6300 dprintf(fd, "HAL does %ssupport Bluetooth latency modes\n", mOutput != nullptr &&
6301 mOutput->audioHwDev->supportsBluetoothVariableLatency() ? "" : "not ");
6302 dprintf(fd, "Supported latency modes: %s\n", toString(mSupportedLatencyModes).c_str());
6303 }
6304
idleSleepTimeUs() const6305 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
6306 {
6307 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
6308 }
6309
suspendSleepTimeUs() const6310 uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const
6311 {
6312 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000);
6313 }
6314
cacheParameters_l()6315 void AudioFlinger::MixerThread::cacheParameters_l()
6316 {
6317 PlaybackThread::cacheParameters_l();
6318
6319 // FIXME: Relaxed timing because of a certain device that can't meet latency
6320 // Should be reduced to 2x after the vendor fixes the driver issue
6321 // increase threshold again due to low power audio mode. The way this warning
6322 // threshold is calculated and its usefulness should be reconsidered anyway.
6323 maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15;
6324 }
6325
onHalLatencyModesChanged_l()6326 void AudioFlinger::MixerThread::onHalLatencyModesChanged_l() {
6327 mAudioFlinger->onSupportedLatencyModesChanged(mId, mSupportedLatencyModes);
6328 }
6329
setHalLatencyMode_l()6330 void AudioFlinger::MixerThread::setHalLatencyMode_l() {
6331 // Only handle latency mode if:
6332 // - mBluetoothLatencyModesEnabled is true
6333 // - the HAL supports latency modes
6334 // - the selected device is Bluetooth LE or A2DP
6335 if (!mBluetoothLatencyModesEnabled.load() || mSupportedLatencyModes.empty()) {
6336 return;
6337 }
6338 if (mOutDeviceTypeAddrs.size() != 1
6339 || !(audio_is_a2dp_out_device(mOutDeviceTypeAddrs[0].mType)
6340 || audio_is_ble_out_device(mOutDeviceTypeAddrs[0].mType))) {
6341 return;
6342 }
6343
6344 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
6345 if (mSupportedLatencyModes.size() == 1) {
6346 // If the HAL only support one latency mode currently, confirm the choice
6347 latencyMode = mSupportedLatencyModes[0];
6348 } else if (mSupportedLatencyModes.size() > 1) {
6349 // Request low latency if:
6350 // - At least one active track is either:
6351 // - a fast track with gaming usage or
6352 // - a track with acessibility usage
6353 for (const auto& track : mActiveTracks) {
6354 if ((track->isFastTrack() && track->attributes().usage == AUDIO_USAGE_GAME)
6355 || track->attributes().usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) {
6356 latencyMode = AUDIO_LATENCY_MODE_LOW;
6357 break;
6358 }
6359 }
6360 }
6361
6362 if (latencyMode != mSetLatencyMode) {
6363 status_t status = mOutput->stream->setLatencyMode(latencyMode);
6364 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
6365 __func__, mId, toString(latencyMode).c_str(), status);
6366 if (status == NO_ERROR) {
6367 mSetLatencyMode = latencyMode;
6368 }
6369 }
6370 }
6371
updateHalSupportedLatencyModes_l()6372 void AudioFlinger::MixerThread::updateHalSupportedLatencyModes_l() {
6373
6374 if (mOutput == nullptr || mOutput->stream == nullptr) {
6375 return;
6376 }
6377 std::vector<audio_latency_mode_t> latencyModes;
6378 const status_t status = mOutput->stream->getRecommendedLatencyModes(&latencyModes);
6379 if (status != NO_ERROR) {
6380 latencyModes.clear();
6381 }
6382 if (latencyModes != mSupportedLatencyModes) {
6383 ALOGD("%s: thread(%d) status %d supported latency modes: %s",
6384 __func__, mId, status, toString(latencyModes).c_str());
6385 mSupportedLatencyModes.swap(latencyModes);
6386 sendHalLatencyModesChangedEvent_l();
6387 }
6388 }
6389
getSupportedLatencyModes(std::vector<audio_latency_mode_t> * modes)6390 status_t AudioFlinger::MixerThread::getSupportedLatencyModes(
6391 std::vector<audio_latency_mode_t>* modes) {
6392 if (modes == nullptr) {
6393 return BAD_VALUE;
6394 }
6395 Mutex::Autolock _l(mLock);
6396 *modes = mSupportedLatencyModes;
6397 return NO_ERROR;
6398 }
6399
onRecommendedLatencyModeChanged(std::vector<audio_latency_mode_t> modes)6400 void AudioFlinger::MixerThread::onRecommendedLatencyModeChanged(
6401 std::vector<audio_latency_mode_t> modes) {
6402 Mutex::Autolock _l(mLock);
6403 if (modes != mSupportedLatencyModes) {
6404 ALOGD("%s: thread(%d) supported latency modes: %s",
6405 __func__, mId, toString(modes).c_str());
6406 mSupportedLatencyModes.swap(modes);
6407 sendHalLatencyModesChangedEvent_l();
6408 }
6409 }
6410
setBluetoothVariableLatencyEnabled(bool enabled)6411 status_t AudioFlinger::MixerThread::setBluetoothVariableLatencyEnabled(bool enabled) {
6412 if (mOutput == nullptr || mOutput->audioHwDev == nullptr
6413 || !mOutput->audioHwDev->supportsBluetoothVariableLatency()) {
6414 return INVALID_OPERATION;
6415 }
6416 mBluetoothLatencyModesEnabled.store(enabled);
6417 return NO_ERROR;
6418 }
6419
6420 // ----------------------------------------------------------------------------
6421
DirectOutputThread(const sp<AudioFlinger> & audioFlinger,AudioStreamOut * output,audio_io_handle_t id,ThreadBase::type_t type,bool systemReady,const audio_offload_info_t & offloadInfo)6422 AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
6423 AudioStreamOut* output, audio_io_handle_t id, ThreadBase::type_t type, bool systemReady,
6424 const audio_offload_info_t& offloadInfo)
6425 : PlaybackThread(audioFlinger, output, id, type, systemReady)
6426 , mOffloadInfo(offloadInfo)
6427 {
6428 setMasterBalance(audioFlinger->getMasterBalance_l());
6429 }
6430
~DirectOutputThread()6431 AudioFlinger::DirectOutputThread::~DirectOutputThread()
6432 {
6433 }
6434
dumpInternals_l(int fd,const Vector<String16> & args)6435 void AudioFlinger::DirectOutputThread::dumpInternals_l(int fd, const Vector<String16>& args)
6436 {
6437 PlaybackThread::dumpInternals_l(fd, args);
6438 dprintf(fd, " Master balance: %f Left: %f Right: %f\n",
6439 mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
6440 }
6441
setMasterBalance(float balance)6442 void AudioFlinger::DirectOutputThread::setMasterBalance(float balance)
6443 {
6444 Mutex::Autolock _l(mLock);
6445 if (mMasterBalance != balance) {
6446 mMasterBalance.store(balance);
6447 mBalance.computeStereoBalance(balance, &mMasterBalanceLeft, &mMasterBalanceRight);
6448 broadcast_l();
6449 }
6450 }
6451
processVolume_l(Track * track,bool lastTrack)6452 void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
6453 {
6454 float left, right;
6455
6456 // Ensure volumeshaper state always advances even when muted.
6457 const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
6458
6459 const size_t framesReleased = proxy->framesReleased();
6460 const int64_t frames = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
6461 const int64_t time = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
6462
6463 ALOGV("%s: Direct/Offload bufferConsumed:%zu timestamp frames:%lld time:%lld",
6464 __func__, framesReleased, (long long)frames, (long long)time);
6465
6466 const int64_t volumeShaperFrames =
6467 mMonotonicFrameCounter.updateAndGetMonotonicFrameCount(frames, time);
6468 const auto [shaperVolume, shaperActive] =
6469 track->getVolumeHandler()->getVolume(volumeShaperFrames);
6470 mVolumeShaperActive = shaperActive;
6471
6472 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
6473 left = float_from_gain(gain_minifloat_unpack_left(vlr));
6474 right = float_from_gain(gain_minifloat_unpack_right(vlr));
6475
6476 const bool clientVolumeMute = (left == 0.f && right == 0.f);
6477
6478 if (mMasterMute || mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
6479 left = right = 0;
6480 } else {
6481 float typeVolume = mStreamTypes[track->streamType()].volume;
6482 const float v = mMasterVolume * typeVolume * shaperVolume;
6483
6484 if (left > GAIN_FLOAT_UNITY) {
6485 left = GAIN_FLOAT_UNITY;
6486 }
6487 if (right > GAIN_FLOAT_UNITY) {
6488 right = GAIN_FLOAT_UNITY;
6489 }
6490 left *= v;
6491 right *= v;
6492 if (mAudioFlinger->getMode() != AUDIO_MODE_IN_COMMUNICATION
6493 || audio_channel_count_from_out_mask(mChannelMask) > 1) {
6494 left *= mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
6495 right *= mMasterBalanceRight;
6496 }
6497 }
6498
6499 track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
6500 /*muteState=*/{mMasterMute,
6501 mStreamTypes[track->streamType()].volume == 0.f,
6502 mStreamTypes[track->streamType()].mute,
6503 track->isPlaybackRestricted(),
6504 clientVolumeMute,
6505 shaperVolume == 0.f});
6506
6507 if (lastTrack) {
6508 track->setFinalVolume(left, right);
6509 if (left != mLeftVolFloat || right != mRightVolFloat) {
6510 mLeftVolFloat = left;
6511 mRightVolFloat = right;
6512
6513 // Delegate volume control to effect in track effect chain if needed
6514 // only one effect chain can be present on DirectOutputThread, so if
6515 // there is one, the track is connected to it
6516 if (!mEffectChains.isEmpty()) {
6517 // if effect chain exists, volume is handled by it.
6518 // Convert volumes from float to 8.24
6519 uint32_t vl = (uint32_t)(left * (1 << 24));
6520 uint32_t vr = (uint32_t)(right * (1 << 24));
6521 // Direct/Offload effect chains set output volume in setVolume_l().
6522 (void)mEffectChains[0]->setVolume_l(&vl, &vr);
6523 } else {
6524 // otherwise we directly set the volume.
6525 setVolumeForOutput_l(left, right);
6526 }
6527 }
6528 }
6529 }
6530
onAddNewTrack_l()6531 void AudioFlinger::DirectOutputThread::onAddNewTrack_l()
6532 {
6533 sp<Track> previousTrack = mPreviousTrack.promote();
6534 sp<Track> latestTrack = mActiveTracks.getLatest();
6535
6536 if (previousTrack != 0 && latestTrack != 0) {
6537 if (mType == DIRECT) {
6538 if (previousTrack.get() != latestTrack.get()) {
6539 mFlushPending = true;
6540 }
6541 } else /* mType == OFFLOAD */ {
6542 if (previousTrack->sessionId() != latestTrack->sessionId() ||
6543 previousTrack->isFlushPending()) {
6544 mFlushPending = true;
6545 }
6546 }
6547 } else if (previousTrack == 0) {
6548 // there could be an old track added back during track transition for direct
6549 // output, so always issues flush to flush data of the previous track if it
6550 // was already destroyed with HAL paused, then flush can resume the playback
6551 mFlushPending = true;
6552 }
6553 PlaybackThread::onAddNewTrack_l();
6554 }
6555
prepareTracks_l(Vector<sp<Track>> * tracksToRemove)6556 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
6557 Vector< sp<Track> > *tracksToRemove
6558 )
6559 {
6560 size_t count = mActiveTracks.size();
6561 mixer_state mixerStatus = MIXER_IDLE;
6562 bool doHwPause = false;
6563 bool doHwResume = false;
6564
6565 // find out which tracks need to be processed
6566 for (const sp<Track> &t : mActiveTracks) {
6567 if (t->isInvalid()) {
6568 ALOGW("An invalidated track shouldn't be in active list");
6569 tracksToRemove->add(t);
6570 continue;
6571 }
6572
6573 Track* const track = t.get();
6574 #ifdef VERY_VERY_VERBOSE_LOGGING
6575 audio_track_cblk_t* cblk = track->cblk();
6576 #endif
6577 // Only consider last track started for volume and mixer state control.
6578 // In theory an older track could underrun and restart after the new one starts
6579 // but as we only care about the transition phase between two tracks on a
6580 // direct output, it is not a problem to ignore the underrun case.
6581 sp<Track> l = mActiveTracks.getLatest();
6582 bool last = l.get() == track;
6583
6584 if (track->isPausePending()) {
6585 track->pauseAck();
6586 // It is possible a track might have been flushed or stopped.
6587 // Other operations such as flush pending might occur on the next prepare.
6588 if (track->isPausing()) {
6589 track->setPaused();
6590 }
6591 // Always perform pause, as an immediate flush will change
6592 // the pause state to be no longer isPausing().
6593 if (mHwSupportsPause && last && !mHwPaused) {
6594 doHwPause = true;
6595 mHwPaused = true;
6596 }
6597 } else if (track->isFlushPending()) {
6598 track->flushAck();
6599 if (last) {
6600 mFlushPending = true;
6601 }
6602 } else if (track->isResumePending()) {
6603 track->resumeAck();
6604 if (last) {
6605 mLeftVolFloat = mRightVolFloat = -1.0;
6606 if (mHwPaused) {
6607 doHwResume = true;
6608 mHwPaused = false;
6609 }
6610 }
6611 }
6612
6613 // The first time a track is added we wait
6614 // for all its buffers to be filled before processing it.
6615 // Allow draining the buffer in case the client
6616 // app does not call stop() and relies on underrun to stop:
6617 // hence the test on (track->mRetryCount > 1).
6618 // If track->mRetryCount <= 1 then track is about to be disabled, paused, removed,
6619 // so we accept any nonzero amount of data delivered by the AudioTrack (which will
6620 // reset the retry counter).
6621 // Do not use a high threshold for compressed audio.
6622
6623 // target retry count that we will use is based on the time we wait for retries.
6624 const int32_t targetRetryCount = kMaxTrackRetriesDirectMs * 1000 / mActiveSleepTimeUs;
6625 // the retry threshold is when we accept any size for PCM data. This is slightly
6626 // smaller than the retry count so we can push small bits of data without a glitch.
6627 const int32_t retryThreshold = targetRetryCount > 2 ? targetRetryCount - 1 : 1;
6628 uint32_t minFrames;
6629 if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
6630 && (track->mRetryCount > retryThreshold) && audio_has_proportional_frames(mFormat)) {
6631 minFrames = mNormalFrameCount;
6632 } else {
6633 minFrames = 1;
6634 }
6635
6636 const size_t framesReady = track->framesReady();
6637 const int trackId = track->id();
6638 if (ATRACE_ENABLED()) {
6639 std::string traceName("nRdy");
6640 traceName += std::to_string(trackId);
6641 ATRACE_INT(traceName.c_str(), framesReady);
6642 }
6643 if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() &&
6644 !track->isStopping_2() && !track->isStopped())
6645 {
6646 ALOGVV("track(%d) s=%08x [OK]", trackId, cblk->mServer);
6647
6648 if (track->mFillingUpStatus == Track::FS_FILLED) {
6649 track->mFillingUpStatus = Track::FS_ACTIVE;
6650 if (last) {
6651 // make sure processVolume_l() will apply new volume even if 0
6652 mLeftVolFloat = mRightVolFloat = -1.0;
6653 }
6654 if (!mHwSupportsPause) {
6655 track->resumeAck();
6656 }
6657 }
6658
6659 // compute volume for this track
6660 processVolume_l(track, last);
6661 if (last) {
6662 sp<Track> previousTrack = mPreviousTrack.promote();
6663 if (previousTrack != 0) {
6664 if (track != previousTrack.get()) {
6665 // Flush any data still being written from last track
6666 mBytesRemaining = 0;
6667 // Invalidate previous track to force a seek when resuming.
6668 previousTrack->invalidate();
6669 }
6670 }
6671 mPreviousTrack = track;
6672
6673 // reset retry count
6674 track->mRetryCount = targetRetryCount;
6675 mActiveTrack = t;
6676 mixerStatus = MIXER_TRACKS_READY;
6677 if (mHwPaused) {
6678 doHwResume = true;
6679 mHwPaused = false;
6680 }
6681 }
6682 } else {
6683 // clear effect chain input buffer if the last active track started underruns
6684 // to avoid sending previous audio buffer again to effects
6685 if (!mEffectChains.isEmpty() && last) {
6686 mEffectChains[0]->clearInputBuffer();
6687 }
6688 if (track->isStopping_1()) {
6689 track->mState = TrackBase::STOPPING_2;
6690 if (last && mHwPaused) {
6691 doHwResume = true;
6692 mHwPaused = false;
6693 }
6694 }
6695 if ((track->sharedBuffer() != 0) || track->isStopped() ||
6696 track->isStopping_2() || track->isPaused()) {
6697 // We have consumed all the buffers of this track.
6698 // Remove it from the list of active tracks.
6699 bool presComplete = false;
6700 if (mStandby || !last ||
6701 (presComplete = track->presentationComplete(latency_l())) ||
6702 track->isPaused() || mHwPaused) {
6703 if (presComplete) {
6704 mOutput->presentationComplete();
6705 }
6706 if (track->isStopping_2()) {
6707 track->mState = TrackBase::STOPPED;
6708 }
6709 if (track->isStopped()) {
6710 track->reset();
6711 }
6712 tracksToRemove->add(track);
6713 }
6714 } else {
6715 // No buffers for this track. Give it a few chances to
6716 // fill a buffer, then remove it from active list.
6717 // Only consider last track started for mixer state control
6718 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
6719 if (!isTunerStream() // tuner streams remain active in underrun
6720 && --(track->mRetryCount) <= 0) {
6721 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
6722 track->mRetryCount = kMaxTrackRetriesOffload;
6723 } else {
6724 ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
6725 tracksToRemove->add(track);
6726 // indicate to client process that the track was disabled because of
6727 // underrun; it will then automatically call start() when data is available
6728 track->disable();
6729 // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
6730 // unlike mixerthread, HAL can be paused for direct output
6731 ALOGW("pause because of UNDERRUN, framesReady = %zu,"
6732 "minFrames = %u, mFormat = %#x",
6733 framesReady, minFrames, mFormat);
6734 if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
6735 doHwPause = true;
6736 mHwPaused = true;
6737 }
6738 }
6739 } else if (last) {
6740 mixerStatus = MIXER_TRACKS_ENABLED;
6741 }
6742 }
6743 }
6744 }
6745
6746 // if an active track did not command a flush, check for pending flush on stopped tracks
6747 if (!mFlushPending) {
6748 for (size_t i = 0; i < mTracks.size(); i++) {
6749 if (mTracks[i]->isFlushPending()) {
6750 mTracks[i]->flushAck();
6751 mFlushPending = true;
6752 }
6753 }
6754 }
6755
6756 // make sure the pause/flush/resume sequence is executed in the right order.
6757 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
6758 // before flush and then resume HW. This can happen in case of pause/flush/resume
6759 // if resume is received before pause is executed.
6760 if (mHwSupportsPause && !mStandby &&
6761 (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
6762 status_t result = mOutput->stream->pause();
6763 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
6764 doHwResume = !doHwPause; // resume if pause is due to flush.
6765 }
6766 if (mFlushPending) {
6767 flushHw_l();
6768 }
6769 if (mHwSupportsPause && !mStandby && doHwResume) {
6770 status_t result = mOutput->stream->resume();
6771 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
6772 }
6773 // remove all the tracks that need to be...
6774 removeTracks_l(*tracksToRemove);
6775
6776 return mixerStatus;
6777 }
6778
threadLoop_mix()6779 void AudioFlinger::DirectOutputThread::threadLoop_mix()
6780 {
6781 size_t frameCount = mFrameCount;
6782 int8_t *curBuf = (int8_t *)mSinkBuffer;
6783 // output audio to hardware
6784 while (frameCount) {
6785 AudioBufferProvider::Buffer buffer;
6786 buffer.frameCount = frameCount;
6787 status_t status = mActiveTrack->getNextBuffer(&buffer);
6788 if (status != NO_ERROR || buffer.raw == NULL) {
6789 // no need to pad with 0 for compressed audio
6790 if (audio_has_proportional_frames(mFormat)) {
6791 memset(curBuf, 0, frameCount * mFrameSize);
6792 }
6793 break;
6794 }
6795 memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
6796 frameCount -= buffer.frameCount;
6797 curBuf += buffer.frameCount * mFrameSize;
6798 mActiveTrack->releaseBuffer(&buffer);
6799 }
6800 mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
6801 mSleepTimeUs = 0;
6802 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
6803 mActiveTrack.clear();
6804 }
6805
threadLoop_sleepTime()6806 void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
6807 {
6808 // do not write to HAL when paused
6809 if (mHwPaused || (usesHwAvSync() && mStandby)) {
6810 mSleepTimeUs = mIdleSleepTimeUs;
6811 return;
6812 }
6813 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
6814 mSleepTimeUs = mActiveSleepTimeUs;
6815 } else {
6816 mSleepTimeUs = mIdleSleepTimeUs;
6817 }
6818 // Note: In S or later, we do not write zeroes for
6819 // linear or proportional PCM direct tracks in underrun.
6820 }
6821
threadLoop_exit()6822 void AudioFlinger::DirectOutputThread::threadLoop_exit()
6823 {
6824 {
6825 Mutex::Autolock _l(mLock);
6826 for (size_t i = 0; i < mTracks.size(); i++) {
6827 if (mTracks[i]->isFlushPending()) {
6828 mTracks[i]->flushAck();
6829 mFlushPending = true;
6830 }
6831 }
6832 if (mFlushPending) {
6833 flushHw_l();
6834 }
6835 }
6836 PlaybackThread::threadLoop_exit();
6837 }
6838
6839 // must be called with thread mutex locked
shouldStandby_l()6840 bool AudioFlinger::DirectOutputThread::shouldStandby_l()
6841 {
6842 bool trackPaused = false;
6843 bool trackStopped = false;
6844
6845 // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
6846 // after a timeout and we will enter standby then.
6847 if (mTracks.size() > 0) {
6848 trackPaused = mTracks[mTracks.size() - 1]->isPaused();
6849 trackStopped = mTracks[mTracks.size() - 1]->isStopped() ||
6850 mTracks[mTracks.size() - 1]->mState == TrackBase::IDLE;
6851 }
6852
6853 return !mStandby && !(trackPaused || (mHwPaused && !trackStopped));
6854 }
6855
6856 // checkForNewParameter_l() must be called with ThreadBase::mLock held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)6857 bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& keyValuePair,
6858 status_t& status)
6859 {
6860 bool reconfig = false;
6861 status = NO_ERROR;
6862
6863 AudioParameter param = AudioParameter(keyValuePair);
6864 int value;
6865 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
6866 LOG_FATAL("Should not set routing device in DirectOutputThread");
6867 }
6868 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
6869 // do not accept frame count changes if tracks are open as the track buffer
6870 // size depends on frame count and correct behavior would not be garantied
6871 // if frame count is changed after track creation
6872 if (!mTracks.isEmpty()) {
6873 status = INVALID_OPERATION;
6874 } else {
6875 reconfig = true;
6876 }
6877 }
6878 if (status == NO_ERROR) {
6879 status = mOutput->stream->setParameters(keyValuePair);
6880 if (!mStandby && status == INVALID_OPERATION) {
6881 mOutput->standby();
6882 if (!mStandby) {
6883 mThreadMetrics.logEndInterval();
6884 mThreadSnapshot.onEnd();
6885 setStandby_l();
6886 }
6887 mBytesWritten = 0;
6888 status = mOutput->stream->setParameters(keyValuePair);
6889 }
6890 if (status == NO_ERROR && reconfig) {
6891 readOutputParameters_l();
6892 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
6893 }
6894 }
6895
6896 return reconfig;
6897 }
6898
activeSleepTimeUs() const6899 uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
6900 {
6901 uint32_t time;
6902 if (audio_has_proportional_frames(mFormat)) {
6903 time = PlaybackThread::activeSleepTimeUs();
6904 } else {
6905 time = kDirectMinSleepTimeUs;
6906 }
6907 return time;
6908 }
6909
idleSleepTimeUs() const6910 uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const
6911 {
6912 uint32_t time;
6913 if (audio_has_proportional_frames(mFormat)) {
6914 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
6915 } else {
6916 time = kDirectMinSleepTimeUs;
6917 }
6918 return time;
6919 }
6920
suspendSleepTimeUs() const6921 uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const
6922 {
6923 uint32_t time;
6924 if (audio_has_proportional_frames(mFormat)) {
6925 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
6926 } else {
6927 time = kDirectMinSleepTimeUs;
6928 }
6929 return time;
6930 }
6931
cacheParameters_l()6932 void AudioFlinger::DirectOutputThread::cacheParameters_l()
6933 {
6934 PlaybackThread::cacheParameters_l();
6935
6936 // use shorter standby delay as on normal output to release
6937 // hardware resources as soon as possible
6938 // no delay on outputs with HW A/V sync
6939 if (usesHwAvSync()) {
6940 mStandbyDelayNs = 0;
6941 } else if ((mType == OFFLOAD) && !audio_has_proportional_frames(mFormat)) {
6942 mStandbyDelayNs = kOffloadStandbyDelayNs;
6943 } else {
6944 mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
6945 }
6946 }
6947
flushHw_l()6948 void AudioFlinger::DirectOutputThread::flushHw_l()
6949 {
6950 PlaybackThread::flushHw_l();
6951 mOutput->flush();
6952 mHwPaused = false;
6953 mFlushPending = false;
6954 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
6955 mTimestamp.clear();
6956 mMonotonicFrameCounter.onFlush();
6957 }
6958
computeWaitTimeNs_l() const6959 int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
6960 // If a VolumeShaper is active, we must wake up periodically to update volume.
6961 const int64_t NS_PER_MS = 1000000;
6962 return mVolumeShaperActive ?
6963 kMinNormalSinkBufferSizeMs * NS_PER_MS : PlaybackThread::computeWaitTimeNs_l();
6964 }
6965
6966 // ----------------------------------------------------------------------------
6967
AsyncCallbackThread(const wp<AudioFlinger::PlaybackThread> & playbackThread)6968 AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
6969 const wp<AudioFlinger::PlaybackThread>& playbackThread)
6970 : Thread(false /*canCallJava*/),
6971 mPlaybackThread(playbackThread),
6972 mWriteAckSequence(0),
6973 mDrainSequence(0),
6974 mAsyncError(false)
6975 {
6976 }
6977
~AsyncCallbackThread()6978 AudioFlinger::AsyncCallbackThread::~AsyncCallbackThread()
6979 {
6980 }
6981
onFirstRef()6982 void AudioFlinger::AsyncCallbackThread::onFirstRef()
6983 {
6984 run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
6985 }
6986
threadLoop()6987 bool AudioFlinger::AsyncCallbackThread::threadLoop()
6988 {
6989 while (!exitPending()) {
6990 uint32_t writeAckSequence;
6991 uint32_t drainSequence;
6992 bool asyncError;
6993
6994 {
6995 Mutex::Autolock _l(mLock);
6996 while (!((mWriteAckSequence & 1) ||
6997 (mDrainSequence & 1) ||
6998 mAsyncError ||
6999 exitPending())) {
7000 mWaitWorkCV.wait(mLock);
7001 }
7002
7003 if (exitPending()) {
7004 break;
7005 }
7006 ALOGV("AsyncCallbackThread mWriteAckSequence %d mDrainSequence %d",
7007 mWriteAckSequence, mDrainSequence);
7008 writeAckSequence = mWriteAckSequence;
7009 mWriteAckSequence &= ~1;
7010 drainSequence = mDrainSequence;
7011 mDrainSequence &= ~1;
7012 asyncError = mAsyncError;
7013 mAsyncError = false;
7014 }
7015 {
7016 sp<AudioFlinger::PlaybackThread> playbackThread = mPlaybackThread.promote();
7017 if (playbackThread != 0) {
7018 if (writeAckSequence & 1) {
7019 playbackThread->resetWriteBlocked(writeAckSequence >> 1);
7020 }
7021 if (drainSequence & 1) {
7022 playbackThread->resetDraining(drainSequence >> 1);
7023 }
7024 if (asyncError) {
7025 playbackThread->onAsyncError();
7026 }
7027 }
7028 }
7029 }
7030 return false;
7031 }
7032
exit()7033 void AudioFlinger::AsyncCallbackThread::exit()
7034 {
7035 ALOGV("AsyncCallbackThread::exit");
7036 Mutex::Autolock _l(mLock);
7037 requestExit();
7038 mWaitWorkCV.broadcast();
7039 }
7040
setWriteBlocked(uint32_t sequence)7041 void AudioFlinger::AsyncCallbackThread::setWriteBlocked(uint32_t sequence)
7042 {
7043 Mutex::Autolock _l(mLock);
7044 // bit 0 is cleared
7045 mWriteAckSequence = sequence << 1;
7046 }
7047
resetWriteBlocked()7048 void AudioFlinger::AsyncCallbackThread::resetWriteBlocked()
7049 {
7050 Mutex::Autolock _l(mLock);
7051 // ignore unexpected callbacks
7052 if (mWriteAckSequence & 2) {
7053 mWriteAckSequence |= 1;
7054 mWaitWorkCV.signal();
7055 }
7056 }
7057
setDraining(uint32_t sequence)7058 void AudioFlinger::AsyncCallbackThread::setDraining(uint32_t sequence)
7059 {
7060 Mutex::Autolock _l(mLock);
7061 // bit 0 is cleared
7062 mDrainSequence = sequence << 1;
7063 }
7064
resetDraining()7065 void AudioFlinger::AsyncCallbackThread::resetDraining()
7066 {
7067 Mutex::Autolock _l(mLock);
7068 // ignore unexpected callbacks
7069 if (mDrainSequence & 2) {
7070 mDrainSequence |= 1;
7071 mWaitWorkCV.signal();
7072 }
7073 }
7074
setAsyncError()7075 void AudioFlinger::AsyncCallbackThread::setAsyncError()
7076 {
7077 Mutex::Autolock _l(mLock);
7078 mAsyncError = true;
7079 mWaitWorkCV.signal();
7080 }
7081
7082
7083 // ----------------------------------------------------------------------------
OffloadThread(const sp<AudioFlinger> & audioFlinger,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)7084 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
7085 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
7086 const audio_offload_info_t& offloadInfo)
7087 : DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady, offloadInfo),
7088 mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
7089 {
7090 //FIXME: mStandby should be set to true by ThreadBase constructo
7091 mStandby = true;
7092 mKeepWakeLock = property_get_bool("ro.audio.offload_wakelock", true /* default_value */);
7093 }
7094
threadLoop_exit()7095 void AudioFlinger::OffloadThread::threadLoop_exit()
7096 {
7097 if (mFlushPending || mHwPaused) {
7098 // If a flush is pending or track was paused, just discard buffered data
7099 flushHw_l();
7100 } else {
7101 mMixerStatus = MIXER_DRAIN_ALL;
7102 threadLoop_drain();
7103 }
7104 if (mUseAsyncWrite) {
7105 ALOG_ASSERT(mCallbackThread != 0);
7106 mCallbackThread->exit();
7107 }
7108 PlaybackThread::threadLoop_exit();
7109 }
7110
prepareTracks_l(Vector<sp<Track>> * tracksToRemove)7111 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTracks_l(
7112 Vector< sp<Track> > *tracksToRemove
7113 )
7114 {
7115 size_t count = mActiveTracks.size();
7116
7117 mixer_state mixerStatus = MIXER_IDLE;
7118 bool doHwPause = false;
7119 bool doHwResume = false;
7120
7121 ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
7122
7123 // find out which tracks need to be processed
7124 for (const sp<Track> &t : mActiveTracks) {
7125 Track* const track = t.get();
7126 #ifdef VERY_VERY_VERBOSE_LOGGING
7127 audio_track_cblk_t* cblk = track->cblk();
7128 #endif
7129 // Only consider last track started for volume and mixer state control.
7130 // In theory an older track could underrun and restart after the new one starts
7131 // but as we only care about the transition phase between two tracks on a
7132 // direct output, it is not a problem to ignore the underrun case.
7133 sp<Track> l = mActiveTracks.getLatest();
7134 bool last = l.get() == track;
7135
7136 if (track->isInvalid()) {
7137 ALOGW("An invalidated track shouldn't be in active list");
7138 tracksToRemove->add(track);
7139 continue;
7140 }
7141
7142 if (track->mState == TrackBase::IDLE) {
7143 ALOGW("An idle track shouldn't be in active list");
7144 continue;
7145 }
7146
7147 if (track->isPausePending()) {
7148 track->pauseAck();
7149 // It is possible a track might have been flushed or stopped.
7150 // Other operations such as flush pending might occur on the next prepare.
7151 if (track->isPausing()) {
7152 track->setPaused();
7153 }
7154 // Always perform pause if last, as an immediate flush will change
7155 // the pause state to be no longer isPausing().
7156 if (last) {
7157 if (mHwSupportsPause && !mHwPaused) {
7158 doHwPause = true;
7159 mHwPaused = true;
7160 }
7161 // If we were part way through writing the mixbuffer to
7162 // the HAL we must save this until we resume
7163 // BUG - this will be wrong if a different track is made active,
7164 // in that case we want to discard the pending data in the
7165 // mixbuffer and tell the client to present it again when the
7166 // track is resumed
7167 mPausedWriteLength = mCurrentWriteLength;
7168 mPausedBytesRemaining = mBytesRemaining;
7169 mBytesRemaining = 0; // stop writing
7170 }
7171 tracksToRemove->add(track);
7172 } else if (track->isFlushPending()) {
7173 if (track->isStopping_1()) {
7174 track->mRetryCount = kMaxTrackStopRetriesOffload;
7175 } else {
7176 track->mRetryCount = kMaxTrackRetriesOffload;
7177 }
7178 track->flushAck();
7179 if (last) {
7180 mFlushPending = true;
7181 }
7182 } else if (track->isResumePending()){
7183 track->resumeAck();
7184 if (last) {
7185 if (mPausedBytesRemaining) {
7186 // Need to continue write that was interrupted
7187 mCurrentWriteLength = mPausedWriteLength;
7188 mBytesRemaining = mPausedBytesRemaining;
7189 mPausedBytesRemaining = 0;
7190 }
7191 if (mHwPaused) {
7192 doHwResume = true;
7193 mHwPaused = false;
7194 // threadLoop_mix() will handle the case that we need to
7195 // resume an interrupted write
7196 }
7197 // enable write to audio HAL
7198 mSleepTimeUs = 0;
7199
7200 mLeftVolFloat = mRightVolFloat = -1.0;
7201
7202 // Do not handle new data in this iteration even if track->framesReady()
7203 mixerStatus = MIXER_TRACKS_ENABLED;
7204 }
7205 } else if (track->framesReady() && track->isReady() &&
7206 !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) {
7207 ALOGVV("OffloadThread: track(%d) s=%08x [OK]", track->id(), cblk->mServer);
7208 if (track->mFillingUpStatus == Track::FS_FILLED) {
7209 track->mFillingUpStatus = Track::FS_ACTIVE;
7210 if (last) {
7211 // make sure processVolume_l() will apply new volume even if 0
7212 mLeftVolFloat = mRightVolFloat = -1.0;
7213 }
7214 }
7215
7216 if (last) {
7217 sp<Track> previousTrack = mPreviousTrack.promote();
7218 if (previousTrack != 0) {
7219 if (track != previousTrack.get()) {
7220 // Flush any data still being written from last track
7221 mBytesRemaining = 0;
7222 if (mPausedBytesRemaining) {
7223 // Last track was paused so we also need to flush saved
7224 // mixbuffer state and invalidate track so that it will
7225 // re-submit that unwritten data when it is next resumed
7226 mPausedBytesRemaining = 0;
7227 // Invalidate is a bit drastic - would be more efficient
7228 // to have a flag to tell client that some of the
7229 // previously written data was lost
7230 previousTrack->invalidate();
7231 }
7232 // flush data already sent to the DSP if changing audio session as audio
7233 // comes from a different source. Also invalidate previous track to force a
7234 // seek when resuming.
7235 if (previousTrack->sessionId() != track->sessionId()) {
7236 previousTrack->invalidate();
7237 }
7238 }
7239 }
7240 mPreviousTrack = track;
7241 // reset retry count
7242 if (track->isStopping_1()) {
7243 track->mRetryCount = kMaxTrackStopRetriesOffload;
7244 } else {
7245 track->mRetryCount = kMaxTrackRetriesOffload;
7246 }
7247 mActiveTrack = t;
7248 mixerStatus = MIXER_TRACKS_READY;
7249 }
7250 } else {
7251 ALOGVV("OffloadThread: track(%d) s=%08x [NOT READY]", track->id(), cblk->mServer);
7252 if (track->isStopping_1()) {
7253 if (--(track->mRetryCount) <= 0) {
7254 // Hardware buffer can hold a large amount of audio so we must
7255 // wait for all current track's data to drain before we say
7256 // that the track is stopped.
7257 if (mBytesRemaining == 0) {
7258 // Only start draining when all data in mixbuffer
7259 // has been written
7260 ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
7261 track->mState = TrackBase::STOPPING_2; // so presentation completes after
7262 // drain do not drain if no data was ever sent to HAL (mStandby == true)
7263 if (last && !mStandby) {
7264 // do not modify drain sequence if we are already draining. This happens
7265 // when resuming from pause after drain.
7266 if ((mDrainSequence & 1) == 0) {
7267 mSleepTimeUs = 0;
7268 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7269 mixerStatus = MIXER_DRAIN_TRACK;
7270 mDrainSequence += 2;
7271 }
7272 if (mHwPaused) {
7273 // It is possible to move from PAUSED to STOPPING_1 without
7274 // a resume so we must ensure hardware is running
7275 doHwResume = true;
7276 mHwPaused = false;
7277 }
7278 }
7279 }
7280 } else if (last) {
7281 ALOGV("stopping1 underrun retries left %d", track->mRetryCount);
7282 mixerStatus = MIXER_TRACKS_ENABLED;
7283 }
7284 } else if (track->isStopping_2()) {
7285 // Drain has completed or we are in standby, signal presentation complete
7286 if (!(mDrainSequence & 1) || !last || mStandby) {
7287 track->mState = TrackBase::STOPPED;
7288 mOutput->presentationComplete();
7289 track->presentationComplete(latency_l()); // always returns true
7290 track->reset();
7291 tracksToRemove->add(track);
7292 // OFFLOADED stop resets frame counts.
7293 if (!mUseAsyncWrite) {
7294 // If we don't get explicit drain notification we must
7295 // register discontinuity regardless of whether this is
7296 // the previous (!last) or the upcoming (last) track
7297 // to avoid skipping the discontinuity.
7298 mTimestampVerifier.discontinuity(
7299 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
7300 }
7301 }
7302 } else {
7303 // No buffers for this track. Give it a few chances to
7304 // fill a buffer, then remove it from active list.
7305 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
7306 if (!isTunerStream() // tuner streams remain active in underrun
7307 && --(track->mRetryCount) <= 0) {
7308 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
7309 track->mRetryCount = kMaxTrackRetriesOffload;
7310 } else {
7311 ALOGV("OffloadThread: BUFFER TIMEOUT: remove track(%d) from active list",
7312 track->id());
7313 tracksToRemove->add(track);
7314 // tell client process that the track was disabled because of underrun;
7315 // it will then automatically call start() when data is available
7316 track->disable();
7317 }
7318 } else if (last){
7319 mixerStatus = MIXER_TRACKS_ENABLED;
7320 }
7321 }
7322 }
7323 // compute volume for this track
7324 if (track->isReady()) { // check ready to prevent premature start.
7325 processVolume_l(track, last);
7326 }
7327 }
7328
7329 // make sure the pause/flush/resume sequence is executed in the right order.
7330 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
7331 // before flush and then resume HW. This can happen in case of pause/flush/resume
7332 // if resume is received before pause is executed.
7333 if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
7334 status_t result = mOutput->stream->pause();
7335 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
7336 doHwResume = !doHwPause; // resume if pause is due to flush.
7337 }
7338 if (mFlushPending) {
7339 flushHw_l();
7340 }
7341 if (!mStandby && doHwResume) {
7342 status_t result = mOutput->stream->resume();
7343 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
7344 }
7345
7346 // remove all the tracks that need to be...
7347 removeTracks_l(*tracksToRemove);
7348
7349 return mixerStatus;
7350 }
7351
7352 // must be called with thread mutex locked
waitingAsyncCallback_l()7353 bool AudioFlinger::OffloadThread::waitingAsyncCallback_l()
7354 {
7355 ALOGVV("waitingAsyncCallback_l mWriteAckSequence %d mDrainSequence %d",
7356 mWriteAckSequence, mDrainSequence);
7357 if (mUseAsyncWrite && ((mWriteAckSequence & 1) || (mDrainSequence & 1))) {
7358 return true;
7359 }
7360 return false;
7361 }
7362
waitingAsyncCallback()7363 bool AudioFlinger::OffloadThread::waitingAsyncCallback()
7364 {
7365 Mutex::Autolock _l(mLock);
7366 return waitingAsyncCallback_l();
7367 }
7368
flushHw_l()7369 void AudioFlinger::OffloadThread::flushHw_l()
7370 {
7371 DirectOutputThread::flushHw_l();
7372 // Flush anything still waiting in the mixbuffer
7373 mCurrentWriteLength = 0;
7374 mBytesRemaining = 0;
7375 mPausedWriteLength = 0;
7376 mPausedBytesRemaining = 0;
7377 // reset bytes written count to reflect that DSP buffers are empty after flush.
7378 mBytesWritten = 0;
7379
7380 if (mUseAsyncWrite) {
7381 // discard any pending drain or write ack by incrementing sequence
7382 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
7383 mDrainSequence = (mDrainSequence + 2) & ~1;
7384 ALOG_ASSERT(mCallbackThread != 0);
7385 mCallbackThread->setWriteBlocked(mWriteAckSequence);
7386 mCallbackThread->setDraining(mDrainSequence);
7387 }
7388 }
7389
invalidateTracks(audio_stream_type_t streamType)7390 void AudioFlinger::OffloadThread::invalidateTracks(audio_stream_type_t streamType)
7391 {
7392 Mutex::Autolock _l(mLock);
7393 if (PlaybackThread::invalidateTracks_l(streamType)) {
7394 mFlushPending = true;
7395 }
7396 }
7397
invalidateTracks(std::set<audio_port_handle_t> & portIds)7398 void AudioFlinger::OffloadThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
7399 Mutex::Autolock _l(mLock);
7400 if (PlaybackThread::invalidateTracks_l(portIds)) {
7401 mFlushPending = true;
7402 }
7403 }
7404
7405 // ----------------------------------------------------------------------------
7406
DuplicatingThread(const sp<AudioFlinger> & audioFlinger,AudioFlinger::MixerThread * mainThread,audio_io_handle_t id,bool systemReady)7407 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
7408 AudioFlinger::MixerThread* mainThread, audio_io_handle_t id, bool systemReady)
7409 : MixerThread(audioFlinger, mainThread->getOutput(), id,
7410 systemReady, DUPLICATING),
7411 mWaitTimeMs(UINT_MAX)
7412 {
7413 addOutputTrack(mainThread);
7414 }
7415
~DuplicatingThread()7416 AudioFlinger::DuplicatingThread::~DuplicatingThread()
7417 {
7418 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7419 mOutputTracks[i]->destroy();
7420 }
7421 }
7422
threadLoop_mix()7423 void AudioFlinger::DuplicatingThread::threadLoop_mix()
7424 {
7425 // mix buffers...
7426 if (outputsReady()) {
7427 mAudioMixer->process();
7428 } else {
7429 if (mMixerBufferValid) {
7430 memset(mMixerBuffer, 0, mMixerBufferSize);
7431 } else {
7432 memset(mSinkBuffer, 0, mSinkBufferSize);
7433 }
7434 }
7435 mSleepTimeUs = 0;
7436 writeFrames = mNormalFrameCount;
7437 mCurrentWriteLength = mSinkBufferSize;
7438 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7439 }
7440
threadLoop_sleepTime()7441 void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
7442 {
7443 if (mSleepTimeUs == 0) {
7444 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7445 mSleepTimeUs = mActiveSleepTimeUs;
7446 } else {
7447 mSleepTimeUs = mIdleSleepTimeUs;
7448 }
7449 } else if (mBytesWritten != 0) {
7450 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7451 writeFrames = mNormalFrameCount;
7452 memset(mSinkBuffer, 0, mSinkBufferSize);
7453 } else {
7454 // flush remaining overflow buffers in output tracks
7455 writeFrames = 0;
7456 }
7457 mSleepTimeUs = 0;
7458 }
7459 }
7460
threadLoop_write()7461 ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
7462 {
7463 for (size_t i = 0; i < outputTracks.size(); i++) {
7464 const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
7465
7466 // Consider the first OutputTrack for timestamp and frame counting.
7467
7468 // The threadLoop() generally assumes writing a full sink buffer size at a time.
7469 // Here, we correct for writeFrames of 0 (a stop) or underruns because
7470 // we always claim success.
7471 if (i == 0) {
7472 const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
7473 ALOGD_IF(correction != 0 && writeFrames != 0,
7474 "%s: writeFrames:%u actualWritten:%zd correction:%zd mFramesWritten:%lld",
7475 __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
7476 mFramesWritten -= correction;
7477 }
7478
7479 // TODO: Report correction for the other output tracks and show in the dump.
7480 }
7481 if (mStandby) {
7482 mThreadMetrics.logBeginInterval();
7483 mThreadSnapshot.onBegin();
7484 mStandby = false;
7485 }
7486 return (ssize_t)mSinkBufferSize;
7487 }
7488
threadLoop_standby()7489 void AudioFlinger::DuplicatingThread::threadLoop_standby()
7490 {
7491 // DuplicatingThread implements standby by stopping all tracks
7492 for (size_t i = 0; i < outputTracks.size(); i++) {
7493 outputTracks[i]->stop();
7494 }
7495 }
7496
dumpInternals_l(int fd,const Vector<String16> & args)7497 void AudioFlinger::DuplicatingThread::dumpInternals_l(int fd, const Vector<String16>& args)
7498 {
7499 MixerThread::dumpInternals_l(fd, args);
7500
7501 std::stringstream ss;
7502 const size_t numTracks = mOutputTracks.size();
7503 ss << " " << numTracks << " OutputTracks";
7504 if (numTracks > 0) {
7505 ss << ":";
7506 for (const auto &track : mOutputTracks) {
7507 const sp<ThreadBase> thread = track->thread().promote();
7508 ss << " (" << track->id() << " : ";
7509 if (thread.get() != nullptr) {
7510 ss << thread.get() << ", " << thread->id();
7511 } else {
7512 ss << "null";
7513 }
7514 ss << ")";
7515 }
7516 }
7517 ss << "\n";
7518 std::string result = ss.str();
7519 write(fd, result.c_str(), result.size());
7520 }
7521
saveOutputTracks()7522 void AudioFlinger::DuplicatingThread::saveOutputTracks()
7523 {
7524 outputTracks = mOutputTracks;
7525 }
7526
clearOutputTracks()7527 void AudioFlinger::DuplicatingThread::clearOutputTracks()
7528 {
7529 outputTracks.clear();
7530 }
7531
addOutputTrack(MixerThread * thread)7532 void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
7533 {
7534 Mutex::Autolock _l(mLock);
7535 // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
7536 // Adjust for thread->sampleRate() to determine minimum buffer frame count.
7537 // Then triple buffer because Threads do not run synchronously and may not be clock locked.
7538 const size_t frameCount =
7539 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
7540 // TODO: Consider asynchronous sample rate conversion to handle clock disparity
7541 // from different OutputTracks and their associated MixerThreads (e.g. one may
7542 // nearly empty and the other may be dropping data).
7543
7544 // TODO b/182392769: use attribution source util, move to server edge
7545 AttributionSourceState attributionSource = AttributionSourceState();
7546 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
7547 IPCThreadState::self()->getCallingUid()));
7548 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
7549 IPCThreadState::self()->getCallingPid()));
7550 attributionSource.token = sp<BBinder>::make();
7551 sp<OutputTrack> outputTrack = new OutputTrack(thread,
7552 this,
7553 mSampleRate,
7554 mFormat,
7555 mChannelMask,
7556 frameCount,
7557 attributionSource);
7558 status_t status = outputTrack != 0 ? outputTrack->initCheck() : (status_t) NO_MEMORY;
7559 if (status != NO_ERROR) {
7560 ALOGE("addOutputTrack() initCheck failed %d", status);
7561 return;
7562 }
7563 thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f);
7564 mOutputTracks.add(outputTrack);
7565 ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
7566 updateWaitTime_l();
7567 }
7568
removeOutputTrack(MixerThread * thread)7569 void AudioFlinger::DuplicatingThread::removeOutputTrack(MixerThread *thread)
7570 {
7571 Mutex::Autolock _l(mLock);
7572 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7573 if (mOutputTracks[i]->thread() == thread) {
7574 mOutputTracks[i]->destroy();
7575 mOutputTracks.removeAt(i);
7576 updateWaitTime_l();
7577 if (thread->getOutput() == mOutput) {
7578 mOutput = NULL;
7579 }
7580 return;
7581 }
7582 }
7583 ALOGV("removeOutputTrack(): unknown thread: %p", thread);
7584 }
7585
7586 // caller must hold mLock
updateWaitTime_l()7587 void AudioFlinger::DuplicatingThread::updateWaitTime_l()
7588 {
7589 mWaitTimeMs = UINT_MAX;
7590 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7591 sp<ThreadBase> strong = mOutputTracks[i]->thread().promote();
7592 if (strong != 0) {
7593 uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
7594 if (waitTimeMs < mWaitTimeMs) {
7595 mWaitTimeMs = waitTimeMs;
7596 }
7597 }
7598 }
7599 }
7600
outputsReady()7601 bool AudioFlinger::DuplicatingThread::outputsReady()
7602 {
7603 for (size_t i = 0; i < outputTracks.size(); i++) {
7604 sp<ThreadBase> thread = outputTracks[i]->thread().promote();
7605 if (thread == 0) {
7606 ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p",
7607 outputTracks[i].get());
7608 return false;
7609 }
7610 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
7611 // see note at standby() declaration
7612 if (playbackThread->standby() && !playbackThread->isSuspended()) {
7613 ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(),
7614 thread.get());
7615 return false;
7616 }
7617 }
7618 return true;
7619 }
7620
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)7621 void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
7622 const StreamOutHalInterface::SourceMetadata& metadata)
7623 {
7624 for (auto& outputTrack : outputTracks) { // not mOutputTracks
7625 outputTrack->setMetadatas(metadata.tracks);
7626 }
7627 }
7628
activeSleepTimeUs() const7629 uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
7630 {
7631 return (mWaitTimeMs * 1000) / 2;
7632 }
7633
cacheParameters_l()7634 void AudioFlinger::DuplicatingThread::cacheParameters_l()
7635 {
7636 // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
7637 updateWaitTime_l();
7638
7639 MixerThread::cacheParameters_l();
7640 }
7641
7642 // ----------------------------------------------------------------------------
7643
SpatializerThread(const sp<AudioFlinger> & audioFlinger,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,audio_config_base_t * mixerConfig)7644 AudioFlinger::SpatializerThread::SpatializerThread(const sp<AudioFlinger>& audioFlinger,
7645 AudioStreamOut* output,
7646 audio_io_handle_t id,
7647 bool systemReady,
7648 audio_config_base_t *mixerConfig)
7649 : MixerThread(audioFlinger, output, id, systemReady, SPATIALIZER, mixerConfig)
7650 {
7651 }
7652
onFirstRef()7653 void AudioFlinger::SpatializerThread::onFirstRef() {
7654 MixerThread::onFirstRef();
7655
7656 const pid_t tid = getTid();
7657 if (tid == -1) {
7658 // Unusual: PlaybackThread::onFirstRef() should set the threadLoop running.
7659 ALOGW("%s: Cannot update Spatializer mixer thread priority, not running", __func__);
7660 } else {
7661 const int priorityBoost = requestSpatializerPriority(getpid(), tid);
7662 if (priorityBoost > 0) {
7663 stream()->setHalThreadPriority(priorityBoost);
7664 }
7665 }
7666 }
7667
setHalLatencyMode_l()7668 void AudioFlinger::SpatializerThread::setHalLatencyMode_l() {
7669 // if mSupportedLatencyModes is empty, the HAL stream does not support
7670 // latency mode control and we can exit.
7671 if (mSupportedLatencyModes.empty()) {
7672 return;
7673 }
7674 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
7675 if (mSupportedLatencyModes.size() == 1) {
7676 // If the HAL only support one latency mode currently, confirm the choice
7677 latencyMode = mSupportedLatencyModes[0];
7678 } else if (mSupportedLatencyModes.size() > 1) {
7679 // Request low latency if:
7680 // - The low latency mode is requested by the spatializer controller
7681 // (mRequestedLatencyMode = AUDIO_LATENCY_MODE_LOW)
7682 // AND
7683 // - At least one active track is spatialized
7684 bool hasSpatializedActiveTrack = false;
7685 for (const auto& track : mActiveTracks) {
7686 if (track->isSpatialized()) {
7687 hasSpatializedActiveTrack = true;
7688 break;
7689 }
7690 }
7691 if (hasSpatializedActiveTrack && mRequestedLatencyMode == AUDIO_LATENCY_MODE_LOW) {
7692 latencyMode = AUDIO_LATENCY_MODE_LOW;
7693 }
7694 }
7695
7696 if (latencyMode != mSetLatencyMode) {
7697 status_t status = mOutput->stream->setLatencyMode(latencyMode);
7698 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
7699 __func__, mId, toString(latencyMode).c_str(), status);
7700 if (status == NO_ERROR) {
7701 mSetLatencyMode = latencyMode;
7702 }
7703 }
7704 }
7705
setRequestedLatencyMode(audio_latency_mode_t mode)7706 status_t AudioFlinger::SpatializerThread::setRequestedLatencyMode(audio_latency_mode_t mode) {
7707 if (mode != AUDIO_LATENCY_MODE_LOW && mode != AUDIO_LATENCY_MODE_FREE) {
7708 return BAD_VALUE;
7709 }
7710 Mutex::Autolock _l(mLock);
7711 mRequestedLatencyMode = mode;
7712 return NO_ERROR;
7713 }
7714
checkOutputStageEffects()7715 void AudioFlinger::SpatializerThread::checkOutputStageEffects()
7716 {
7717 bool hasVirtualizer = false;
7718 bool hasDownMixer = false;
7719 sp<EffectHandle> finalDownMixer;
7720 {
7721 Mutex::Autolock _l(mLock);
7722 sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
7723 if (chain != 0) {
7724 hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
7725 hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
7726 }
7727
7728 finalDownMixer = mFinalDownMixer;
7729 mFinalDownMixer.clear();
7730 }
7731
7732 if (hasVirtualizer) {
7733 if (finalDownMixer != nullptr) {
7734 int32_t ret;
7735 finalDownMixer->disable(&ret);
7736 }
7737 finalDownMixer.clear();
7738 } else if (!hasDownMixer) {
7739 std::vector<effect_descriptor_t> descriptors;
7740 status_t status = mAudioFlinger->mEffectsFactoryHal->getDescriptors(
7741 EFFECT_UIID_DOWNMIX, &descriptors);
7742 if (status != NO_ERROR) {
7743 return;
7744 }
7745 ALOG_ASSERT(!descriptors.empty(),
7746 "%s getDescriptors() returned no error but empty list", __func__);
7747
7748 finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
7749 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
7750 &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
7751
7752 if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
7753 ALOGW("%s error creating downmixer %d", __func__, status);
7754 finalDownMixer.clear();
7755 } else {
7756 int32_t ret;
7757 finalDownMixer->enable(&ret);
7758 }
7759 }
7760
7761 {
7762 Mutex::Autolock _l(mLock);
7763 mFinalDownMixer = finalDownMixer;
7764 }
7765 }
7766
7767 // ----------------------------------------------------------------------------
7768 // Record
7769 // ----------------------------------------------------------------------------
7770
RecordThread(const sp<AudioFlinger> & audioFlinger,AudioStreamIn * input,audio_io_handle_t id,bool systemReady)7771 AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
7772 AudioStreamIn *input,
7773 audio_io_handle_t id,
7774 bool systemReady
7775 ) :
7776 ThreadBase(audioFlinger, id, RECORD, systemReady, false /* isOut */),
7777 mInput(input),
7778 mSource(mInput),
7779 mActiveTracks(&this->mLocalLog),
7780 mRsmpInBuffer(NULL),
7781 // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
7782 mRsmpInRear(0)
7783 , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
7784 "RecordThreadRO", MemoryHeapBase::READ_ONLY))
7785 // mFastCapture below
7786 , mFastCaptureFutex(0)
7787 // mInputSource
7788 // mPipeSink
7789 // mPipeSource
7790 , mPipeFramesP2(0)
7791 // mPipeMemory
7792 // mFastCaptureNBLogWriter
7793 , mFastTrackAvail(false)
7794 , mBtNrecSuspended(false)
7795 {
7796 snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
7797 mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
7798
7799 if (mInput->audioHwDev != nullptr) {
7800 mIsMsdDevice = strcmp(
7801 mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
7802 }
7803
7804 readInputParameters_l();
7805
7806 // TODO: We may also match on address as well as device type for
7807 // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
7808 // TODO: This property should be ensure that only contains one single device type.
7809 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
7810 "audio.timestamp.corrected_input_device",
7811 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
7812 : AUDIO_DEVICE_NONE));
7813
7814 // create an NBAIO source for the HAL input stream, and negotiate
7815 mInputSource = new AudioStreamInSource(input->stream);
7816 size_t numCounterOffers = 0;
7817 const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
7818 #if !LOG_NDEBUG
7819 [[maybe_unused]] ssize_t index =
7820 #else
7821 (void)
7822 #endif
7823 mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
7824 ALOG_ASSERT(index == 0);
7825
7826 // initialize fast capture depending on configuration
7827 bool initFastCapture;
7828 switch (kUseFastCapture) {
7829 case FastCapture_Never:
7830 initFastCapture = false;
7831 ALOGV("%p kUseFastCapture = Never, initFastCapture = false", this);
7832 break;
7833 case FastCapture_Always:
7834 initFastCapture = true;
7835 ALOGV("%p kUseFastCapture = Always, initFastCapture = true", this);
7836 break;
7837 case FastCapture_Static:
7838 initFastCapture = !mIsMsdDevice // Disable fast capture for MSD BUS devices.
7839 && (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
7840 ALOGV("%p kUseFastCapture = Static, (%lld * 1000) / %u vs %u, initFastCapture = %d "
7841 "mIsMsdDevice = %d", this, (long long)mFrameCount, mSampleRate,
7842 kMinNormalCaptureBufferSizeMs, initFastCapture, mIsMsdDevice);
7843 break;
7844 // case FastCapture_Dynamic:
7845 }
7846
7847 if (initFastCapture) {
7848 // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
7849 NBAIO_Format format = mInputSource->format();
7850 // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
7851 size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
7852 size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
7853 void *pipeBuffer = nullptr;
7854 const sp<MemoryDealer> roHeap(readOnlyHeap());
7855 sp<IMemory> pipeMemory;
7856 if ((roHeap == 0) ||
7857 (pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
7858 (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
7859 ALOGE("not enough memory for pipe buffer size=%zu; "
7860 "roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
7861 pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
7862 (long long)kRecordThreadReadOnlyHeapSize);
7863 goto failed;
7864 }
7865 // pipe will be shared directly with fast clients, so clear to avoid leaking old information
7866 memset(pipeBuffer, 0, pipeSize);
7867 Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer);
7868 const NBAIO_Format offersFast[1] = {format};
7869 size_t numCounterOffersFast = 0;
7870 [[maybe_unused]] ssize_t index2 = pipe->negotiate(offersFast, std::size(offersFast),
7871 nullptr /* counterOffers */, numCounterOffersFast);
7872 ALOG_ASSERT(index2 == 0);
7873 mPipeSink = pipe;
7874 PipeReader *pipeReader = new PipeReader(*pipe);
7875 numCounterOffersFast = 0;
7876 index2 = pipeReader->negotiate(offersFast, std::size(offersFast),
7877 nullptr /* counterOffers */, numCounterOffersFast);
7878 ALOG_ASSERT(index2 == 0);
7879 mPipeSource = pipeReader;
7880 mPipeFramesP2 = pipeFramesP2;
7881 mPipeMemory = pipeMemory;
7882
7883 // create fast capture
7884 mFastCapture = new FastCapture();
7885 FastCaptureStateQueue *sq = mFastCapture->sq();
7886 #ifdef STATE_QUEUE_DUMP
7887 // FIXME
7888 #endif
7889 FastCaptureState *state = sq->begin();
7890 state->mCblk = NULL;
7891 state->mInputSource = mInputSource.get();
7892 state->mInputSourceGen++;
7893 state->mPipeSink = pipe;
7894 state->mPipeSinkGen++;
7895 state->mFrameCount = mFrameCount;
7896 state->mCommand = FastCaptureState::COLD_IDLE;
7897 // already done in constructor initialization list
7898 //mFastCaptureFutex = 0;
7899 state->mColdFutexAddr = &mFastCaptureFutex;
7900 state->mColdGen++;
7901 state->mDumpState = &mFastCaptureDumpState;
7902 #ifdef TEE_SINK
7903 // FIXME
7904 #endif
7905 mFastCaptureNBLogWriter = audioFlinger->newWriter_l(kFastCaptureLogSize, "FastCapture");
7906 state->mNBLogWriter = mFastCaptureNBLogWriter.get();
7907 sq->end();
7908 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
7909
7910 // start the fast capture
7911 mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
7912 pid_t tid = mFastCapture->getTid();
7913 sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
7914 stream()->setHalThreadPriority(kPriorityFastCapture);
7915 #ifdef AUDIO_WATCHDOG
7916 // FIXME
7917 #endif
7918
7919 mFastTrackAvail = true;
7920 }
7921 #ifdef TEE_SINK
7922 mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
7923 mTee.setId(std::string("_") + std::to_string(mId) + "_C");
7924 #endif
7925 failed: ;
7926
7927 // FIXME mNormalSource
7928 }
7929
~RecordThread()7930 AudioFlinger::RecordThread::~RecordThread()
7931 {
7932 if (mFastCapture != 0) {
7933 FastCaptureStateQueue *sq = mFastCapture->sq();
7934 FastCaptureState *state = sq->begin();
7935 if (state->mCommand == FastCaptureState::COLD_IDLE) {
7936 int32_t old = android_atomic_inc(&mFastCaptureFutex);
7937 if (old == -1) {
7938 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
7939 }
7940 }
7941 state->mCommand = FastCaptureState::EXIT;
7942 sq->end();
7943 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
7944 mFastCapture->join();
7945 mFastCapture.clear();
7946 }
7947 mAudioFlinger->unregisterWriter(mFastCaptureNBLogWriter);
7948 mAudioFlinger->unregisterWriter(mNBLogWriter);
7949 free(mRsmpInBuffer);
7950 }
7951
onFirstRef()7952 void AudioFlinger::RecordThread::onFirstRef()
7953 {
7954 run(mThreadName, PRIORITY_URGENT_AUDIO);
7955 }
7956
preExit()7957 void AudioFlinger::RecordThread::preExit()
7958 {
7959 ALOGV(" preExit()");
7960 Mutex::Autolock _l(mLock);
7961 for (size_t i = 0; i < mTracks.size(); i++) {
7962 sp<RecordTrack> track = mTracks[i];
7963 track->invalidate();
7964 }
7965 mActiveTracks.clear();
7966 mStartStopCond.broadcast();
7967 }
7968
threadLoop()7969 bool AudioFlinger::RecordThread::threadLoop()
7970 {
7971 nsecs_t lastWarning = 0;
7972
7973 inputStandBy();
7974
7975 reacquire_wakelock:
7976 sp<RecordTrack> activeTrack;
7977 {
7978 Mutex::Autolock _l(mLock);
7979 acquireWakeLock_l();
7980 }
7981
7982 // used to request a deferred sleep, to be executed later while mutex is unlocked
7983 uint32_t sleepUs = 0;
7984
7985 int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
7986
7987 // loop while there is work to do
7988 for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
7989 Vector< sp<EffectChain> > effectChains;
7990
7991 // activeTracks accumulates a copy of a subset of mActiveTracks
7992 Vector< sp<RecordTrack> > activeTracks;
7993
7994 // reference to the (first and only) active fast track
7995 sp<RecordTrack> fastTrack;
7996
7997 // reference to a fast track which is about to be removed
7998 sp<RecordTrack> fastTrackToRemove;
7999
8000 bool silenceFastCapture = false;
8001
8002 { // scope for mLock
8003 Mutex::Autolock _l(mLock);
8004
8005 processConfigEvents_l();
8006
8007 // check exitPending here because checkForNewParameters_l() and
8008 // checkForNewParameters_l() can temporarily release mLock
8009 if (exitPending()) {
8010 break;
8011 }
8012
8013 // sleep with mutex unlocked
8014 if (sleepUs > 0) {
8015 ATRACE_BEGIN("sleepC");
8016 mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)sleepUs));
8017 ATRACE_END();
8018 sleepUs = 0;
8019 continue;
8020 }
8021
8022 // if no active track(s), then standby and release wakelock
8023 size_t size = mActiveTracks.size();
8024 if (size == 0) {
8025 standbyIfNotAlreadyInStandby();
8026 // exitPending() can't become true here
8027 releaseWakeLock_l();
8028 ALOGV("RecordThread: loop stopping");
8029 // go to sleep
8030 mWaitWorkCV.wait(mLock);
8031 ALOGV("RecordThread: loop starting");
8032 goto reacquire_wakelock;
8033 }
8034
8035 bool doBroadcast = false;
8036 bool allStopped = true;
8037 for (size_t i = 0; i < size; ) {
8038
8039 activeTrack = mActiveTracks[i];
8040 if (activeTrack->isTerminated()) {
8041 if (activeTrack->isFastTrack()) {
8042 ALOG_ASSERT(fastTrackToRemove == 0);
8043 fastTrackToRemove = activeTrack;
8044 }
8045 removeTrack_l(activeTrack);
8046 mActiveTracks.remove(activeTrack);
8047 size--;
8048 continue;
8049 }
8050
8051 TrackBase::track_state activeTrackState = activeTrack->mState;
8052 switch (activeTrackState) {
8053
8054 case TrackBase::PAUSING:
8055 mActiveTracks.remove(activeTrack);
8056 activeTrack->mState = TrackBase::PAUSED;
8057 doBroadcast = true;
8058 size--;
8059 continue;
8060
8061 case TrackBase::STARTING_1:
8062 sleepUs = 10000;
8063 i++;
8064 allStopped = false;
8065 continue;
8066
8067 case TrackBase::STARTING_2:
8068 doBroadcast = true;
8069 if (mStandby) {
8070 mThreadMetrics.logBeginInterval();
8071 mThreadSnapshot.onBegin();
8072 mStandby = false;
8073 }
8074 activeTrack->mState = TrackBase::ACTIVE;
8075 allStopped = false;
8076 break;
8077
8078 case TrackBase::ACTIVE:
8079 allStopped = false;
8080 break;
8081
8082 case TrackBase::IDLE: // cannot be on ActiveTracks if idle
8083 case TrackBase::PAUSED: // cannot be on ActiveTracks if paused
8084 case TrackBase::STOPPED: // cannot be on ActiveTracks if destroyed/terminated
8085 default:
8086 LOG_ALWAYS_FATAL("%s: Unexpected active track state:%d, id:%d, tracks:%zu",
8087 __func__, activeTrackState, activeTrack->id(), size);
8088 }
8089
8090 if (activeTrack->isFastTrack()) {
8091 ALOG_ASSERT(!mFastTrackAvail);
8092 ALOG_ASSERT(fastTrack == 0);
8093 // if the active fast track is silenced either:
8094 // 1) silence the whole capture from fast capture buffer if this is
8095 // the only active track
8096 // 2) invalidate this track: this will cause the client to reconnect and possibly
8097 // be invalidated again until unsilenced
8098 bool invalidate = false;
8099 if (activeTrack->isSilenced()) {
8100 if (size > 1) {
8101 invalidate = true;
8102 } else {
8103 silenceFastCapture = true;
8104 }
8105 }
8106 // Invalidate fast tracks if access to audio history is required as this is not
8107 // possible with fast tracks. Once the fast track has been invalidated, no new
8108 // fast track will be created until mMaxSharedAudioHistoryMs is cleared.
8109 if (mMaxSharedAudioHistoryMs != 0) {
8110 invalidate = true;
8111 }
8112 if (invalidate) {
8113 activeTrack->invalidate();
8114 ALOG_ASSERT(fastTrackToRemove == 0);
8115 fastTrackToRemove = activeTrack;
8116 removeTrack_l(activeTrack);
8117 mActiveTracks.remove(activeTrack);
8118 size--;
8119 continue;
8120 }
8121 fastTrack = activeTrack;
8122 }
8123
8124 activeTracks.add(activeTrack);
8125 i++;
8126
8127 }
8128
8129 mActiveTracks.updatePowerState(this);
8130
8131 updateMetadata_l();
8132
8133 if (allStopped) {
8134 standbyIfNotAlreadyInStandby();
8135 }
8136 if (doBroadcast) {
8137 mStartStopCond.broadcast();
8138 }
8139
8140 // sleep if there are no active tracks to process
8141 if (activeTracks.isEmpty()) {
8142 if (sleepUs == 0) {
8143 sleepUs = kRecordThreadSleepUs;
8144 }
8145 continue;
8146 }
8147 sleepUs = 0;
8148
8149 lockEffectChains_l(effectChains);
8150 }
8151
8152 // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
8153
8154 size_t size = effectChains.size();
8155 for (size_t i = 0; i < size; i++) {
8156 // thread mutex is not locked, but effect chain is locked
8157 effectChains[i]->process_l();
8158 }
8159
8160 // Push a new fast capture state if fast capture is not already running, or cblk change
8161 if (mFastCapture != 0) {
8162 FastCaptureStateQueue *sq = mFastCapture->sq();
8163 FastCaptureState *state = sq->begin();
8164 bool didModify = false;
8165 FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
8166 if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
8167 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
8168 if (state->mCommand == FastCaptureState::COLD_IDLE) {
8169 int32_t old = android_atomic_inc(&mFastCaptureFutex);
8170 if (old == -1) {
8171 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
8172 }
8173 }
8174 state->mCommand = FastCaptureState::READ_WRITE;
8175 #if 0 // FIXME
8176 mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
8177 FastThreadDumpState::kSamplingNforLowRamDevice :
8178 FastThreadDumpState::kSamplingN);
8179 #endif
8180 didModify = true;
8181 }
8182 audio_track_cblk_t *cblkOld = state->mCblk;
8183 audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
8184 if (cblkNew != cblkOld) {
8185 state->mCblk = cblkNew;
8186 // block until acked if removing a fast track
8187 if (cblkOld != NULL) {
8188 block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
8189 }
8190 didModify = true;
8191 }
8192 AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
8193 reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
8194 if (state->mFastPatchRecordBufferProvider != abp) {
8195 state->mFastPatchRecordBufferProvider = abp;
8196 state->mFastPatchRecordFormat = fastTrack == 0 ?
8197 AUDIO_FORMAT_INVALID : fastTrack->format();
8198 didModify = true;
8199 }
8200 if (state->mSilenceCapture != silenceFastCapture) {
8201 state->mSilenceCapture = silenceFastCapture;
8202 didModify = true;
8203 }
8204 sq->end(didModify);
8205 if (didModify) {
8206 sq->push(block);
8207 #if 0
8208 if (kUseFastCapture == FastCapture_Dynamic) {
8209 mNormalSource = mPipeSource;
8210 }
8211 #endif
8212 }
8213 }
8214
8215 // now run the fast track destructor with thread mutex unlocked
8216 fastTrackToRemove.clear();
8217
8218 // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
8219 // Only the client(s) that are too slow will overrun. But if even the fastest client is too
8220 // slow, then this RecordThread will overrun by not calling HAL read often enough.
8221 // If destination is non-contiguous, first read past the nominal end of buffer, then
8222 // copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
8223
8224 int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
8225 ssize_t framesRead = 0; // not needed, remove clang-tidy warning.
8226 const int64_t lastIoBeginNs = systemTime(); // start IO timing
8227
8228 // If an NBAIO source is present, use it to read the normal capture's data
8229 if (mPipeSource != 0) {
8230 size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
8231
8232 // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
8233 // to the full buffer point (clearing the overflow condition). Upon OVERRUN error,
8234 // we immediately retry the read() to get data and prevent another overflow.
8235 for (int retries = 0; retries <= 2; ++retries) {
8236 ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
8237 framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
8238 framesToRead);
8239 if (framesRead != OVERRUN) break;
8240 }
8241
8242 const ssize_t availableToRead = mPipeSource->availableToRead();
8243 if (availableToRead >= 0) {
8244 mMonopipePipeDepthStats.add(availableToRead);
8245 // PipeSource is the primary clock. It is up to the AudioRecord client to keep up.
8246 LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
8247 "more frames to read than fifo size, %zd > %zu",
8248 availableToRead, mPipeFramesP2);
8249 const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
8250 const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
8251 ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
8252 mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
8253 sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
8254 }
8255 if (framesRead < 0) {
8256 status_t status = (status_t) framesRead;
8257 switch (status) {
8258 case OVERRUN:
8259 ALOGW("overrun on read from pipe");
8260 framesRead = 0;
8261 break;
8262 case NEGOTIATE:
8263 ALOGE("re-negotiation is needed");
8264 framesRead = -1; // Will cause an attempt to recover.
8265 break;
8266 default:
8267 ALOGE("unknown error %d on read from pipe", status);
8268 break;
8269 }
8270 }
8271 // otherwise use the HAL / AudioStreamIn directly
8272 } else {
8273 ATRACE_BEGIN("read");
8274 size_t bytesRead;
8275 status_t result = mSource->read(
8276 (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
8277 ATRACE_END();
8278 if (result < 0) {
8279 framesRead = result;
8280 } else {
8281 framesRead = bytesRead / mFrameSize;
8282 }
8283 }
8284
8285 const int64_t lastIoEndNs = systemTime(); // end IO timing
8286
8287 // Update server timestamp with server stats
8288 // systemTime() is optional if the hardware supports timestamps.
8289 if (framesRead >= 0) {
8290 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
8291 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
8292 }
8293
8294 // Update server timestamp with kernel stats
8295 if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
8296 int64_t position, time;
8297 if (mStandby) {
8298 mTimestampVerifier.discontinuity(audio_is_linear_pcm(mFormat) ?
8299 mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS :
8300 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
8301 } else if (mSource->getCapturePosition(&position, &time) == NO_ERROR
8302 && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
8303
8304 mTimestampVerifier.add(position, time, mSampleRate);
8305
8306 // Correct timestamps
8307 if (isTimestampCorrectionEnabled()) {
8308 ALOGVV("TS_BEFORE: %d %lld %lld",
8309 id(), (long long)time, (long long)position);
8310 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
8311 position = correctedTimestamp.mFrames;
8312 time = correctedTimestamp.mTimeNs;
8313 ALOGVV("TS_AFTER: %d %lld %lld",
8314 id(), (long long)time, (long long)position);
8315 }
8316
8317 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
8318 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
8319 // Note: In general record buffers should tend to be empty in
8320 // a properly running pipeline.
8321 //
8322 // Also, it is not advantageous to call get_presentation_position during the read
8323 // as the read obtains a lock, preventing the timestamp call from executing.
8324 } else {
8325 mTimestampVerifier.error();
8326 }
8327 }
8328
8329 // From the timestamp, input read latency is negative output write latency.
8330 const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
8331 const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
8332 ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
8333 if (latencyMs != 0.) { // note 0. means timestamp is empty.
8334 mLatencyMs.add(latencyMs);
8335 }
8336
8337 // Use this to track timestamp information
8338 // ALOGD("%s", mTimestamp.toString().c_str());
8339
8340 if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
8341 ALOGE("read failed: framesRead=%zd", framesRead);
8342 // Force input into standby so that it tries to recover at next read attempt
8343 inputStandBy();
8344 sleepUs = kRecordThreadSleepUs;
8345 }
8346 if (framesRead <= 0) {
8347 goto unlock;
8348 }
8349 ALOG_ASSERT(framesRead > 0);
8350 mFramesRead += framesRead;
8351
8352 #ifdef TEE_SINK
8353 (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
8354 #endif
8355 // If destination is non-contiguous, we now correct for reading past end of buffer.
8356 {
8357 size_t part1 = mRsmpInFramesP2 - rear;
8358 if ((size_t) framesRead > part1) {
8359 memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
8360 (framesRead - part1) * mFrameSize);
8361 }
8362 }
8363 mRsmpInRear = audio_utils::safe_add_overflow(mRsmpInRear, (int32_t)framesRead);
8364
8365 size = activeTracks.size();
8366
8367 // loop over each active track
8368 for (size_t i = 0; i < size; i++) {
8369 activeTrack = activeTracks[i];
8370
8371 // skip fast tracks, as those are handled directly by FastCapture
8372 if (activeTrack->isFastTrack()) {
8373 continue;
8374 }
8375
8376 // TODO: This code probably should be moved to RecordTrack.
8377 // TODO: Update the activeTrack buffer converter in case of reconfigure.
8378
8379 enum {
8380 OVERRUN_UNKNOWN,
8381 OVERRUN_TRUE,
8382 OVERRUN_FALSE
8383 } overrun = OVERRUN_UNKNOWN;
8384
8385 // loop over getNextBuffer to handle circular sink
8386 for (;;) {
8387
8388 activeTrack->mSink.frameCount = ~0;
8389 status_t status = activeTrack->getNextBuffer(&activeTrack->mSink);
8390 size_t framesOut = activeTrack->mSink.frameCount;
8391 LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
8392
8393 // check available frames and handle overrun conditions
8394 // if the record track isn't draining fast enough.
8395 bool hasOverrun;
8396 size_t framesIn;
8397 activeTrack->mResamplerBufferProvider->sync(&framesIn, &hasOverrun);
8398 if (hasOverrun) {
8399 overrun = OVERRUN_TRUE;
8400 }
8401 if (framesOut == 0 || framesIn == 0) {
8402 break;
8403 }
8404
8405 // Don't allow framesOut to be larger than what is possible with resampling
8406 // from framesIn.
8407 // This isn't strictly necessary but helps limit buffer resizing in
8408 // RecordBufferConverter. TODO: remove when no longer needed.
8409 framesOut = min(framesOut,
8410 destinationFramesPossible(
8411 framesIn, mSampleRate, activeTrack->mSampleRate));
8412
8413 if (activeTrack->isDirect()) {
8414 // No RecordBufferConverter used for direct streams. Pass
8415 // straight from RecordThread buffer to RecordTrack buffer.
8416 AudioBufferProvider::Buffer buffer;
8417 buffer.frameCount = framesOut;
8418 const status_t getNextBufferStatus =
8419 activeTrack->mResamplerBufferProvider->getNextBuffer(&buffer);
8420 if (getNextBufferStatus == OK && buffer.frameCount != 0) {
8421 ALOGV_IF(buffer.frameCount != framesOut,
8422 "%s() read less than expected (%zu vs %zu)",
8423 __func__, buffer.frameCount, framesOut);
8424 framesOut = buffer.frameCount;
8425 memcpy(activeTrack->mSink.raw, buffer.raw, buffer.frameCount * mFrameSize);
8426 activeTrack->mResamplerBufferProvider->releaseBuffer(&buffer);
8427 } else {
8428 framesOut = 0;
8429 ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
8430 __func__, getNextBufferStatus, buffer.frameCount);
8431 }
8432 } else {
8433 // process frames from the RecordThread buffer provider to the RecordTrack
8434 // buffer
8435 framesOut = activeTrack->mRecordBufferConverter->convert(
8436 activeTrack->mSink.raw,
8437 activeTrack->mResamplerBufferProvider,
8438 framesOut);
8439 }
8440
8441 if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
8442 overrun = OVERRUN_FALSE;
8443 }
8444
8445 if (activeTrack->mFramesToDrop == 0) {
8446 if (framesOut > 0) {
8447 activeTrack->mSink.frameCount = framesOut;
8448 // Sanitize before releasing if the track has no access to the source data
8449 // An idle UID receives silence from non virtual devices until active
8450 if (activeTrack->isSilenced()) {
8451 memset(activeTrack->mSink.raw, 0, framesOut * activeTrack->frameSize());
8452 }
8453 activeTrack->releaseBuffer(&activeTrack->mSink);
8454 }
8455 } else {
8456 // FIXME could do a partial drop of framesOut
8457 if (activeTrack->mFramesToDrop > 0) {
8458 activeTrack->mFramesToDrop -= (ssize_t)framesOut;
8459 if (activeTrack->mFramesToDrop <= 0) {
8460 activeTrack->clearSyncStartEvent();
8461 }
8462 } else {
8463 activeTrack->mFramesToDrop += framesOut;
8464 if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 ||
8465 activeTrack->mSyncStartEvent->isCancelled()) {
8466 ALOGW("Synced record %s, session %d, trigger session %d",
8467 (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
8468 activeTrack->sessionId(),
8469 (activeTrack->mSyncStartEvent != 0) ?
8470 activeTrack->mSyncStartEvent->triggerSession() :
8471 AUDIO_SESSION_NONE);
8472 activeTrack->clearSyncStartEvent();
8473 }
8474 }
8475 }
8476
8477 if (framesOut == 0) {
8478 break;
8479 }
8480 }
8481
8482 switch (overrun) {
8483 case OVERRUN_TRUE:
8484 // client isn't retrieving buffers fast enough
8485 if (!activeTrack->setOverflow()) {
8486 nsecs_t now = systemTime();
8487 // FIXME should lastWarning per track?
8488 if ((now - lastWarning) > kWarningThrottleNs) {
8489 ALOGW("RecordThread: buffer overflow");
8490 lastWarning = now;
8491 }
8492 }
8493 break;
8494 case OVERRUN_FALSE:
8495 activeTrack->clearOverflow();
8496 break;
8497 case OVERRUN_UNKNOWN:
8498 break;
8499 }
8500
8501 // update frame information and push timestamp out
8502 activeTrack->updateTrackFrameInfo(
8503 activeTrack->mServerProxy->framesReleased(),
8504 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER],
8505 mSampleRate, mTimestamp);
8506 }
8507
8508 unlock:
8509 // enable changes in effect chain
8510 unlockEffectChains(effectChains);
8511 // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
8512 if (audio_has_proportional_frames(mFormat)
8513 && loopCount == lastLoopCountRead + 1) {
8514 const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
8515 const double jitterMs =
8516 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
8517 {framesRead, readPeriodNs},
8518 {0, 0} /* lastTimestamp */, mSampleRate);
8519 const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
8520
8521 Mutex::Autolock _l(mLock);
8522 mIoJitterMs.add(jitterMs);
8523 mProcessTimeMs.add(processMs);
8524 }
8525 // update timing info.
8526 mLastIoBeginNs = lastIoBeginNs;
8527 mLastIoEndNs = lastIoEndNs;
8528 lastLoopCountRead = loopCount;
8529 }
8530
8531 standbyIfNotAlreadyInStandby();
8532
8533 {
8534 Mutex::Autolock _l(mLock);
8535 for (size_t i = 0; i < mTracks.size(); i++) {
8536 sp<RecordTrack> track = mTracks[i];
8537 track->invalidate();
8538 }
8539 mActiveTracks.clear();
8540 mStartStopCond.broadcast();
8541 }
8542
8543 releaseWakeLock();
8544
8545 ALOGV("RecordThread %p exiting", this);
8546 return false;
8547 }
8548
standbyIfNotAlreadyInStandby()8549 void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby()
8550 {
8551 if (!mStandby) {
8552 inputStandBy();
8553 mThreadMetrics.logEndInterval();
8554 mThreadSnapshot.onEnd();
8555 mStandby = true;
8556 }
8557 }
8558
inputStandBy()8559 void AudioFlinger::RecordThread::inputStandBy()
8560 {
8561 // Idle the fast capture if it's currently running
8562 if (mFastCapture != 0) {
8563 FastCaptureStateQueue *sq = mFastCapture->sq();
8564 FastCaptureState *state = sq->begin();
8565 if (!(state->mCommand & FastCaptureState::IDLE)) {
8566 state->mCommand = FastCaptureState::COLD_IDLE;
8567 state->mColdFutexAddr = &mFastCaptureFutex;
8568 state->mColdGen++;
8569 mFastCaptureFutex = 0;
8570 sq->end();
8571 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
8572 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_ACKED);
8573 #if 0
8574 if (kUseFastCapture == FastCapture_Dynamic) {
8575 // FIXME
8576 }
8577 #endif
8578 #ifdef AUDIO_WATCHDOG
8579 // FIXME
8580 #endif
8581 } else {
8582 sq->end(false /*didModify*/);
8583 }
8584 }
8585 status_t result = mSource->standby();
8586 ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
8587
8588 // If going into standby, flush the pipe source.
8589 if (mPipeSource.get() != nullptr) {
8590 const ssize_t flushed = mPipeSource->flush();
8591 if (flushed > 0) {
8592 ALOGV("Input standby flushed PipeSource %zd frames", flushed);
8593 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
8594 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
8595 }
8596 }
8597 }
8598
8599 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
createRecordTrack_l(const sp<AudioFlinger::Client> & client,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,audio_session_t sessionId,size_t * pNotificationFrameCount,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t * flags,pid_t tid,status_t * status,audio_port_handle_t portId,int32_t maxSharedAudioHistoryMs)8600 sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
8601 const sp<AudioFlinger::Client>& client,
8602 const audio_attributes_t& attr,
8603 uint32_t *pSampleRate,
8604 audio_format_t format,
8605 audio_channel_mask_t channelMask,
8606 size_t *pFrameCount,
8607 audio_session_t sessionId,
8608 size_t *pNotificationFrameCount,
8609 pid_t creatorPid,
8610 const AttributionSourceState& attributionSource,
8611 audio_input_flags_t *flags,
8612 pid_t tid,
8613 status_t *status,
8614 audio_port_handle_t portId,
8615 int32_t maxSharedAudioHistoryMs)
8616 {
8617 size_t frameCount = *pFrameCount;
8618 size_t notificationFrameCount = *pNotificationFrameCount;
8619 sp<RecordTrack> track;
8620 status_t lStatus;
8621 audio_input_flags_t inputFlags = mInput->flags;
8622 audio_input_flags_t requestedFlags = *flags;
8623 uint32_t sampleRate;
8624
8625 lStatus = initCheck();
8626 if (lStatus != NO_ERROR) {
8627 ALOGE("createRecordTrack_l() audio driver not initialized");
8628 goto Exit;
8629 }
8630
8631 if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
8632 ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
8633 lStatus = BAD_VALUE;
8634 goto Exit;
8635 }
8636
8637 if (maxSharedAudioHistoryMs != 0) {
8638 if (!captureHotwordAllowed(attributionSource)) {
8639 lStatus = PERMISSION_DENIED;
8640 goto Exit;
8641 }
8642 if (maxSharedAudioHistoryMs < 0
8643 || maxSharedAudioHistoryMs > AudioFlinger::kMaxSharedAudioHistoryMs) {
8644 lStatus = BAD_VALUE;
8645 goto Exit;
8646 }
8647 }
8648 if (*pSampleRate == 0) {
8649 *pSampleRate = mSampleRate;
8650 }
8651 sampleRate = *pSampleRate;
8652
8653 // special case for FAST flag considered OK if fast capture is present and access to
8654 // audio history is not required
8655 if (hasFastCapture() && mMaxSharedAudioHistoryMs == 0) {
8656 inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
8657 }
8658
8659 // Check if requested flags are compatible with input stream flags
8660 if ((*flags & inputFlags) != *flags) {
8661 ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
8662 " input flags (%08x)",
8663 *flags, inputFlags);
8664 *flags = (audio_input_flags_t)(*flags & inputFlags);
8665 }
8666
8667 // client expresses a preference for FAST and no access to audio history,
8668 // but we get the final say
8669 if (*flags & AUDIO_INPUT_FLAG_FAST && maxSharedAudioHistoryMs == 0) {
8670 if (
8671 // we formerly checked for a callback handler (non-0 tid),
8672 // but that is no longer required for TRANSFER_OBTAIN mode
8673 // No need to match hardware format, format conversion will be done in client side.
8674 //
8675 // Frame count is not specified (0), or is less than or equal the pipe depth.
8676 // It is OK to provide a higher capacity than requested.
8677 // We will force it to mPipeFramesP2 below.
8678 (frameCount <= mPipeFramesP2) &&
8679 // PCM data
8680 audio_is_linear_pcm(format) &&
8681 // hardware channel mask
8682 (channelMask == mChannelMask) &&
8683 // hardware sample rate
8684 (sampleRate == mSampleRate) &&
8685 // record thread has an associated fast capture
8686 hasFastCapture() &&
8687 // there are sufficient fast track slots available
8688 mFastTrackAvail
8689 ) {
8690 // check compatibility with audio effects.
8691 Mutex::Autolock _l(mLock);
8692 // Do not accept FAST flag if the session has software effects
8693 sp<EffectChain> chain = getEffectChain_l(sessionId);
8694 if (chain != 0) {
8695 audio_input_flags_t old = *flags;
8696 chain->checkInputFlagCompatibility(flags);
8697 if (old != *flags) {
8698 ALOGV("%p AUDIO_INPUT_FLAGS denied by effect old=%#x new=%#x",
8699 this, (int)old, (int)*flags);
8700 }
8701 }
8702 ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
8703 "%p AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
8704 this, frameCount, mFrameCount);
8705 } else {
8706 ALOGV("%p AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
8707 "format=%#x isLinear=%d mFormat=%#x channelMask=%#x sampleRate=%u mSampleRate=%u "
8708 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
8709 this, frameCount, mFrameCount, mPipeFramesP2,
8710 format, audio_is_linear_pcm(format), mFormat, channelMask, sampleRate, mSampleRate,
8711 hasFastCapture(), tid, mFastTrackAvail);
8712 *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
8713 }
8714 }
8715
8716 // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
8717 if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
8718 (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
8719 *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
8720 lStatus = BAD_TYPE;
8721 goto Exit;
8722 }
8723
8724 // compute track buffer size in frames, and suggest the notification frame count
8725 if (*flags & AUDIO_INPUT_FLAG_FAST) {
8726 // fast track: frame count is exactly the pipe depth
8727 frameCount = mPipeFramesP2;
8728 // ignore requested notificationFrames, and always notify exactly once every HAL buffer
8729 notificationFrameCount = mFrameCount;
8730 } else {
8731 // not fast track: max notification period is resampled equivalent of one HAL buffer time
8732 // or 20 ms if there is a fast capture
8733 // TODO This could be a roundupRatio inline, and const
8734 size_t maxNotificationFrames = ((int64_t) (hasFastCapture() ? mSampleRate/50 : mFrameCount)
8735 * sampleRate + mSampleRate - 1) / mSampleRate;
8736 // minimum number of notification periods is at least kMinNotifications,
8737 // and at least kMinMs rounded up to a whole notification period (minNotificationsByMs)
8738 static const size_t kMinNotifications = 3;
8739 static const uint32_t kMinMs = 30;
8740 // TODO This could be a roundupRatio inline
8741 const size_t minFramesByMs = (sampleRate * kMinMs + 1000 - 1) / 1000;
8742 // TODO This could be a roundupRatio inline
8743 const size_t minNotificationsByMs = (minFramesByMs + maxNotificationFrames - 1) /
8744 maxNotificationFrames;
8745 const size_t minFrameCount = maxNotificationFrames *
8746 max(kMinNotifications, minNotificationsByMs);
8747 frameCount = max(frameCount, minFrameCount);
8748 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
8749 notificationFrameCount = maxNotificationFrames;
8750 }
8751 }
8752 *pFrameCount = frameCount;
8753 *pNotificationFrameCount = notificationFrameCount;
8754
8755 { // scope for mLock
8756 Mutex::Autolock _l(mLock);
8757 int32_t startFrames = -1;
8758 if (!mSharedAudioPackageName.empty()
8759 && mSharedAudioPackageName == attributionSource.packageName
8760 && mSharedAudioSessionId == sessionId
8761 && captureHotwordAllowed(attributionSource)) {
8762 startFrames = mSharedAudioStartFrames;
8763 }
8764
8765 track = new RecordTrack(this, client, attr, sampleRate,
8766 format, channelMask, frameCount,
8767 nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
8768 attributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
8769 startFrames);
8770
8771 lStatus = track->initCheck();
8772 if (lStatus != NO_ERROR) {
8773 ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
8774 // track must be cleared from the caller as the caller has the AF lock
8775 goto Exit;
8776 }
8777 mTracks.add(track);
8778
8779 if ((*flags & AUDIO_INPUT_FLAG_FAST) && (tid != -1)) {
8780 pid_t callingPid = IPCThreadState::self()->getCallingPid();
8781 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
8782 // so ask activity manager to do this on our behalf
8783 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
8784 }
8785
8786 if (maxSharedAudioHistoryMs != 0) {
8787 sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
8788 }
8789 }
8790
8791 lStatus = NO_ERROR;
8792
8793 Exit:
8794 *status = lStatus;
8795 return track;
8796 }
8797
start(RecordThread::RecordTrack * recordTrack,AudioSystem::sync_event_t event,audio_session_t triggerSession)8798 status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
8799 AudioSystem::sync_event_t event,
8800 audio_session_t triggerSession)
8801 {
8802 ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
8803 sp<ThreadBase> strongMe = this;
8804 status_t status = NO_ERROR;
8805
8806 if (event == AudioSystem::SYNC_EVENT_NONE) {
8807 recordTrack->clearSyncStartEvent();
8808 } else if (event != AudioSystem::SYNC_EVENT_SAME) {
8809 recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
8810 triggerSession,
8811 recordTrack->sessionId(),
8812 syncStartEventCallback,
8813 recordTrack);
8814 // Sync event can be cancelled by the trigger session if the track is not in a
8815 // compatible state in which case we start record immediately
8816 if (recordTrack->mSyncStartEvent->isCancelled()) {
8817 recordTrack->clearSyncStartEvent();
8818 } else {
8819 // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
8820 recordTrack->mFramesToDrop = -(ssize_t)
8821 ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000);
8822 }
8823 }
8824
8825 {
8826 // This section is a rendezvous between binder thread executing start() and RecordThread
8827 AutoMutex lock(mLock);
8828 if (recordTrack->isInvalid()) {
8829 recordTrack->clearSyncStartEvent();
8830 ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
8831 return DEAD_OBJECT;
8832 }
8833 if (mActiveTracks.indexOf(recordTrack) >= 0) {
8834 if (recordTrack->mState == TrackBase::PAUSING) {
8835 // We haven't stopped yet (moved to PAUSED and not in mActiveTracks)
8836 // so no need to startInput().
8837 ALOGV("active record track PAUSING -> ACTIVE");
8838 recordTrack->mState = TrackBase::ACTIVE;
8839 } else {
8840 ALOGV("active record track state %d", (int)recordTrack->mState);
8841 }
8842 return status;
8843 }
8844
8845 // TODO consider other ways of handling this, such as changing the state to :STARTING and
8846 // adding the track to mActiveTracks after returning from AudioSystem::startInput(),
8847 // or using a separate command thread
8848 recordTrack->mState = TrackBase::STARTING_1;
8849 mActiveTracks.add(recordTrack);
8850 if (recordTrack->isExternalTrack()) {
8851 mLock.unlock();
8852 status = AudioSystem::startInput(recordTrack->portId());
8853 mLock.lock();
8854 if (recordTrack->isInvalid()) {
8855 recordTrack->clearSyncStartEvent();
8856 if (status == NO_ERROR && recordTrack->mState == TrackBase::STARTING_1) {
8857 recordTrack->mState = TrackBase::STARTING_2;
8858 // STARTING_2 forces destroy to call stopInput.
8859 }
8860 ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
8861 return DEAD_OBJECT;
8862 }
8863 if (recordTrack->mState != TrackBase::STARTING_1) {
8864 ALOGW("%s(%d): unsynchronized mState:%d change",
8865 __func__, recordTrack->id(), (int)recordTrack->mState);
8866 // Someone else has changed state, let them take over,
8867 // leave mState in the new state.
8868 recordTrack->clearSyncStartEvent();
8869 return INVALID_OPERATION;
8870 }
8871 // we're ok, but perhaps startInput has failed
8872 if (status != NO_ERROR) {
8873 ALOGW("%s(%d): startInput failed, status %d",
8874 __func__, recordTrack->id(), status);
8875 // We are in ActiveTracks if STARTING_1 and valid, so remove from ActiveTracks,
8876 // leave in STARTING_1, so destroy() will not call stopInput.
8877 mActiveTracks.remove(recordTrack);
8878 recordTrack->clearSyncStartEvent();
8879 return status;
8880 }
8881 sendIoConfigEvent_l(
8882 AUDIO_CLIENT_STARTED, recordTrack->creatorPid(), recordTrack->portId());
8883 }
8884
8885 recordTrack->logBeginInterval(patchSourcesToString(&mPatch)); // log to MediaMetrics
8886
8887 // Catch up with current buffer indices if thread is already running.
8888 // This is what makes a new client discard all buffered data. If the track's mRsmpInFront
8889 // was initialized to some value closer to the thread's mRsmpInFront, then the track could
8890 // see previously buffered data before it called start(), but with greater risk of overrun.
8891
8892 recordTrack->mResamplerBufferProvider->reset();
8893 if (!recordTrack->isDirect()) {
8894 // clear any converter state as new data will be discontinuous
8895 recordTrack->mRecordBufferConverter->reset();
8896 }
8897 recordTrack->mState = TrackBase::STARTING_2;
8898 // signal thread to start
8899 mWaitWorkCV.broadcast();
8900 return status;
8901 }
8902 }
8903
syncStartEventCallback(const wp<SyncEvent> & event)8904 void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
8905 {
8906 sp<SyncEvent> strongEvent = event.promote();
8907
8908 if (strongEvent != 0) {
8909 sp<RefBase> ptr = strongEvent->cookie().promote();
8910 if (ptr != 0) {
8911 RecordTrack *recordTrack = (RecordTrack *)ptr.get();
8912 recordTrack->handleSyncStartEvent(strongEvent);
8913 }
8914 }
8915 }
8916
stop(RecordThread::RecordTrack * recordTrack)8917 bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
8918 ALOGV("RecordThread::stop");
8919 AutoMutex _l(mLock);
8920 // if we're invalid, we can't be on the ActiveTracks.
8921 if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->mState == TrackBase::PAUSING) {
8922 return false;
8923 }
8924 // note that threadLoop may still be processing the track at this point [without lock]
8925 recordTrack->mState = TrackBase::PAUSING;
8926
8927 // NOTE: Waiting here is important to keep stop synchronous.
8928 // This is needed for proper patchRecord peer release.
8929 while (recordTrack->mState == TrackBase::PAUSING && !recordTrack->isInvalid()) {
8930 mWaitWorkCV.broadcast(); // signal thread to stop
8931 mStartStopCond.wait(mLock);
8932 }
8933
8934 if (recordTrack->mState == TrackBase::PAUSED) { // successful stop
8935 ALOGV("Record stopped OK");
8936 return true;
8937 }
8938
8939 // don't handle anything - we've been invalidated or restarted and in a different state
8940 ALOGW_IF("%s(%d): unsynchronized stop, state: %d",
8941 __func__, recordTrack->id(), recordTrack->mState);
8942 return false;
8943 }
8944
isValidSyncEvent(const sp<SyncEvent> & event __unused) const8945 bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const
8946 {
8947 return false;
8948 }
8949
setSyncEvent(const sp<SyncEvent> & event __unused)8950 status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event __unused)
8951 {
8952 #if 0 // This branch is currently dead code, but is preserved in case it will be needed in future
8953 if (!isValidSyncEvent(event)) {
8954 return BAD_VALUE;
8955 }
8956
8957 audio_session_t eventSession = event->triggerSession();
8958 status_t ret = NAME_NOT_FOUND;
8959
8960 Mutex::Autolock _l(mLock);
8961
8962 for (size_t i = 0; i < mTracks.size(); i++) {
8963 sp<RecordTrack> track = mTracks[i];
8964 if (eventSession == track->sessionId()) {
8965 (void) track->setSyncEvent(event);
8966 ret = NO_ERROR;
8967 }
8968 }
8969 return ret;
8970 #else
8971 return BAD_VALUE;
8972 #endif
8973 }
8974
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)8975 status_t AudioFlinger::RecordThread::getActiveMicrophones(
8976 std::vector<media::MicrophoneInfoFw>* activeMicrophones)
8977 {
8978 ALOGV("RecordThread::getActiveMicrophones");
8979 AutoMutex _l(mLock);
8980 if (!isStreamInitialized()) {
8981 return NO_INIT;
8982 }
8983 status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
8984 return status;
8985 }
8986
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)8987 status_t AudioFlinger::RecordThread::setPreferredMicrophoneDirection(
8988 audio_microphone_direction_t direction)
8989 {
8990 ALOGV("setPreferredMicrophoneDirection(%d)", direction);
8991 AutoMutex _l(mLock);
8992 if (!isStreamInitialized()) {
8993 return NO_INIT;
8994 }
8995 return mInput->stream->setPreferredMicrophoneDirection(direction);
8996 }
8997
setPreferredMicrophoneFieldDimension(float zoom)8998 status_t AudioFlinger::RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
8999 {
9000 ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
9001 AutoMutex _l(mLock);
9002 if (!isStreamInitialized()) {
9003 return NO_INIT;
9004 }
9005 return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
9006 }
9007
shareAudioHistory(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9008 status_t AudioFlinger::RecordThread::shareAudioHistory(
9009 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9010 int64_t sharedAudioStartMs) {
9011 AutoMutex _l(mLock);
9012 return shareAudioHistory_l(sharedAudioPackageName, sharedSessionId, sharedAudioStartMs);
9013 }
9014
shareAudioHistory_l(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9015 status_t AudioFlinger::RecordThread::shareAudioHistory_l(
9016 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9017 int64_t sharedAudioStartMs) {
9018
9019 if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
9020 return BAD_VALUE;
9021 }
9022
9023 if (sharedAudioStartMs < 0
9024 || sharedAudioStartMs > INT64_MAX / mSampleRate) {
9025 return BAD_VALUE;
9026 }
9027
9028 // Current implementation of the input resampling buffer wraps around indexes at 32 bit.
9029 // As we cannot detect more than one wraparound, only accept values up current write position
9030 // after one wraparound
9031 // We assume recent wraparounds on mRsmpInRear only given it is unlikely that the requesting
9032 // app waits several hours after the start time was computed.
9033 int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
9034 const int32_t sharedOffset = audio_utils::safe_sub_overflow(mRsmpInRear,
9035 (int32_t)sharedAudioStartFrames);
9036 // Bring the start frame position within the input buffer to match the documented
9037 // "best effort" behavior of the API.
9038 if (sharedOffset < 0) {
9039 sharedAudioStartFrames = mRsmpInRear;
9040 } else if (sharedOffset > static_cast<signed>(mRsmpInFrames)) {
9041 sharedAudioStartFrames =
9042 audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
9043 }
9044
9045 mSharedAudioPackageName = sharedAudioPackageName;
9046 if (mSharedAudioPackageName.empty()) {
9047 resetAudioHistory_l();
9048 } else {
9049 mSharedAudioSessionId = sharedSessionId;
9050 mSharedAudioStartFrames = (int32_t)sharedAudioStartFrames;
9051 }
9052 return NO_ERROR;
9053 }
9054
resetAudioHistory_l()9055 void AudioFlinger::RecordThread::resetAudioHistory_l() {
9056 mSharedAudioSessionId = AUDIO_SESSION_NONE;
9057 mSharedAudioStartFrames = -1;
9058 mSharedAudioPackageName = "";
9059 }
9060
updateMetadata_l()9061 AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::RecordThread::updateMetadata_l()
9062 {
9063 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
9064 return {}; // nothing to do
9065 }
9066 StreamInHalInterface::SinkMetadata metadata;
9067 auto backInserter = std::back_inserter(metadata.tracks);
9068 for (const sp<RecordTrack> &track : mActiveTracks) {
9069 track->copyMetadataTo(backInserter);
9070 }
9071 mInput->stream->updateSinkMetadata(metadata);
9072 MetadataUpdate change;
9073 change.recordMetadataUpdate = metadata.tracks;
9074 return change;
9075 }
9076
9077 // destroyTrack_l() must be called with ThreadBase::mLock held
destroyTrack_l(const sp<RecordTrack> & track)9078 void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
9079 {
9080 track->terminate();
9081 track->mState = TrackBase::STOPPED;
9082
9083 // active tracks are removed by threadLoop()
9084 if (mActiveTracks.indexOf(track) < 0) {
9085 removeTrack_l(track);
9086 }
9087 }
9088
removeTrack_l(const sp<RecordTrack> & track)9089 void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track)
9090 {
9091 String8 result;
9092 track->appendDump(result, false /* active */);
9093 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string());
9094
9095 mTracks.remove(track);
9096 // need anything related to effects here?
9097 if (track->isFastTrack()) {
9098 ALOG_ASSERT(!mFastTrackAvail);
9099 mFastTrackAvail = true;
9100 }
9101 }
9102
dumpInternals_l(int fd,const Vector<String16> & args __unused)9103 void AudioFlinger::RecordThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
9104 {
9105 AudioStreamIn *input = mInput;
9106 audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
9107 dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
9108 input, flags, toString(flags).c_str());
9109 dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
9110 if (mActiveTracks.isEmpty()) {
9111 dprintf(fd, " No active record clients\n");
9112 }
9113
9114 if (input != nullptr) {
9115 dprintf(fd, " Hal stream dump:\n");
9116 (void)input->stream->dump(fd);
9117 }
9118
9119 dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
9120 dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
9121
9122 // Make a non-atomic copy of fast capture dump state so it won't change underneath us
9123 // while we are dumping it. It may be inconsistent, but it won't mutate!
9124 // This is a large object so we place it on the heap.
9125 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
9126 const std::unique_ptr<FastCaptureDumpState> copy =
9127 std::make_unique<FastCaptureDumpState>(mFastCaptureDumpState);
9128 copy->dump(fd);
9129 }
9130
dumpTracks_l(int fd,const Vector<String16> & args __unused)9131 void AudioFlinger::RecordThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
9132 {
9133 String8 result;
9134 size_t numtracks = mTracks.size();
9135 size_t numactive = mActiveTracks.size();
9136 size_t numactiveseen = 0;
9137 dprintf(fd, " %zu Tracks", numtracks);
9138 const char *prefix = " ";
9139 if (numtracks) {
9140 dprintf(fd, " of which %zu are active\n", numactive);
9141 result.append(prefix);
9142 mTracks[0]->appendDumpHeader(result);
9143 for (size_t i = 0; i < numtracks ; ++i) {
9144 sp<RecordTrack> track = mTracks[i];
9145 if (track != 0) {
9146 bool active = mActiveTracks.indexOf(track) >= 0;
9147 if (active) {
9148 numactiveseen++;
9149 }
9150 result.append(prefix);
9151 track->appendDump(result, active);
9152 }
9153 }
9154 } else {
9155 dprintf(fd, "\n");
9156 }
9157
9158 if (numactiveseen != numactive) {
9159 result.append(" The following tracks are in the active list but"
9160 " not in the track list\n");
9161 result.append(prefix);
9162 mActiveTracks[0]->appendDumpHeader(result);
9163 for (size_t i = 0; i < numactive; ++i) {
9164 sp<RecordTrack> track = mActiveTracks[i];
9165 if (mTracks.indexOf(track) < 0) {
9166 result.append(prefix);
9167 track->appendDump(result, true /* active */);
9168 }
9169 }
9170
9171 }
9172 write(fd, result.string(), result.size());
9173 }
9174
setRecordSilenced(audio_port_handle_t portId,bool silenced)9175 void AudioFlinger::RecordThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
9176 {
9177 Mutex::Autolock _l(mLock);
9178 for (size_t i = 0; i < mTracks.size() ; i++) {
9179 sp<RecordTrack> track = mTracks[i];
9180 if (track != 0 && track->portId() == portId) {
9181 track->setSilenced(silenced);
9182 }
9183 }
9184 }
9185
reset()9186 void AudioFlinger::RecordThread::ResamplerBufferProvider::reset()
9187 {
9188 sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
9189 RecordThread *recordThread = (RecordThread *) threadBase.get();
9190 mRsmpInUnrel = 0;
9191 const int32_t rear = recordThread->mRsmpInRear;
9192 ssize_t deltaFrames = 0;
9193 if (mRecordTrack->startFrames() >= 0) {
9194 int32_t startFrames = mRecordTrack->startFrames();
9195 // Accept a recent wraparound of mRsmpInRear
9196 if (startFrames <= rear) {
9197 deltaFrames = rear - startFrames;
9198 } else {
9199 deltaFrames = (int32_t)((int64_t)rear + UINT32_MAX + 1 - startFrames);
9200 }
9201 // start frame cannot be further in the past than start of resampling buffer
9202 if ((size_t) deltaFrames > recordThread->mRsmpInFrames) {
9203 deltaFrames = recordThread->mRsmpInFrames;
9204 }
9205 }
9206 mRsmpInFront = audio_utils::safe_sub_overflow(rear, static_cast<int32_t>(deltaFrames));
9207 }
9208
sync(size_t * framesAvailable,bool * hasOverrun)9209 void AudioFlinger::RecordThread::ResamplerBufferProvider::sync(
9210 size_t *framesAvailable, bool *hasOverrun)
9211 {
9212 sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
9213 RecordThread *recordThread = (RecordThread *) threadBase.get();
9214 const int32_t rear = recordThread->mRsmpInRear;
9215 const int32_t front = mRsmpInFront;
9216 const ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9217
9218 size_t framesIn;
9219 bool overrun = false;
9220 if (filled < 0) {
9221 // should not happen, but treat like a massive overrun and re-sync
9222 framesIn = 0;
9223 mRsmpInFront = rear;
9224 overrun = true;
9225 } else if ((size_t) filled <= recordThread->mRsmpInFrames) {
9226 framesIn = (size_t) filled;
9227 } else {
9228 // client is not keeping up with server, but give it latest data
9229 framesIn = recordThread->mRsmpInFrames;
9230 mRsmpInFront = /* front = */ audio_utils::safe_sub_overflow(
9231 rear, static_cast<int32_t>(framesIn));
9232 overrun = true;
9233 }
9234 if (framesAvailable != NULL) {
9235 *framesAvailable = framesIn;
9236 }
9237 if (hasOverrun != NULL) {
9238 *hasOverrun = overrun;
9239 }
9240 }
9241
9242 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)9243 status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
9244 AudioBufferProvider::Buffer* buffer)
9245 {
9246 sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
9247 if (threadBase == 0) {
9248 buffer->frameCount = 0;
9249 buffer->raw = NULL;
9250 return NOT_ENOUGH_DATA;
9251 }
9252 RecordThread *recordThread = (RecordThread *) threadBase.get();
9253 int32_t rear = recordThread->mRsmpInRear;
9254 int32_t front = mRsmpInFront;
9255 ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9256 // FIXME should not be P2 (don't want to increase latency)
9257 // FIXME if client not keeping up, discard
9258 LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames));
9259 // 'filled' may be non-contiguous, so return only the first contiguous chunk
9260
9261 front &= recordThread->mRsmpInFramesP2 - 1;
9262 size_t part1 = recordThread->mRsmpInFramesP2 - front;
9263 if (part1 > (size_t) filled) {
9264 part1 = filled;
9265 }
9266 size_t ask = buffer->frameCount;
9267 ALOG_ASSERT(ask > 0);
9268 if (part1 > ask) {
9269 part1 = ask;
9270 }
9271 if (part1 == 0) {
9272 // out of data is fine since the resampler will return a short-count.
9273 buffer->raw = NULL;
9274 buffer->frameCount = 0;
9275 mRsmpInUnrel = 0;
9276 return NOT_ENOUGH_DATA;
9277 }
9278
9279 buffer->raw = (uint8_t*)recordThread->mRsmpInBuffer + front * recordThread->mFrameSize;
9280 buffer->frameCount = part1;
9281 mRsmpInUnrel = part1;
9282 return NO_ERROR;
9283 }
9284
9285 // AudioBufferProvider interface
releaseBuffer(AudioBufferProvider::Buffer * buffer)9286 void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer(
9287 AudioBufferProvider::Buffer* buffer)
9288 {
9289 int32_t stepCount = static_cast<int32_t>(buffer->frameCount);
9290 if (stepCount == 0) {
9291 return;
9292 }
9293 ALOG_ASSERT(stepCount <= (int32_t)mRsmpInUnrel);
9294 mRsmpInUnrel -= stepCount;
9295 mRsmpInFront = audio_utils::safe_add_overflow(mRsmpInFront, stepCount);
9296 buffer->raw = NULL;
9297 buffer->frameCount = 0;
9298 }
9299
checkBtNrec()9300 void AudioFlinger::RecordThread::checkBtNrec()
9301 {
9302 Mutex::Autolock _l(mLock);
9303 checkBtNrec_l();
9304 }
9305
checkBtNrec_l()9306 void AudioFlinger::RecordThread::checkBtNrec_l()
9307 {
9308 // disable AEC and NS if the device is a BT SCO headset supporting those
9309 // pre processings
9310 bool suspend = audio_is_bluetooth_sco_device(inDeviceType()) &&
9311 mAudioFlinger->btNrecIsOff();
9312 if (mBtNrecSuspended.exchange(suspend) != suspend) {
9313 for (size_t i = 0; i < mEffectChains.size(); i++) {
9314 setEffectSuspended_l(FX_IID_AEC, suspend, mEffectChains[i]->sessionId());
9315 setEffectSuspended_l(FX_IID_NS, suspend, mEffectChains[i]->sessionId());
9316 }
9317 }
9318 }
9319
9320
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)9321 bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
9322 status_t& status)
9323 {
9324 bool reconfig = false;
9325
9326 status = NO_ERROR;
9327
9328 audio_format_t reqFormat = mFormat;
9329 uint32_t samplingRate = mSampleRate;
9330 // TODO this may change if we want to support capture from HDMI PCM multi channel (e.g on TVs).
9331 [[maybe_unused]] audio_channel_mask_t channelMask =
9332 audio_channel_in_mask_from_count(mChannelCount);
9333
9334 AudioParameter param = AudioParameter(keyValuePair);
9335 int value;
9336
9337 // scope for AutoPark extends to end of method
9338 AutoPark<FastCapture> park(mFastCapture);
9339
9340 // TODO Investigate when this code runs. Check with audio policy when a sample rate and
9341 // channel count change can be requested. Do we mandate the first client defines the
9342 // HAL sampling rate and channel count or do we allow changes on the fly?
9343 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
9344 samplingRate = value;
9345 reconfig = true;
9346 }
9347 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
9348 if (!audio_is_linear_pcm((audio_format_t) value)) {
9349 status = BAD_VALUE;
9350 } else {
9351 reqFormat = (audio_format_t) value;
9352 reconfig = true;
9353 }
9354 }
9355 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
9356 audio_channel_mask_t mask = (audio_channel_mask_t) value;
9357 if (!audio_is_input_channel(mask) ||
9358 audio_channel_count_from_in_mask(mask) > FCC_LIMIT) {
9359 status = BAD_VALUE;
9360 } else {
9361 channelMask = mask;
9362 reconfig = true;
9363 }
9364 }
9365 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
9366 // do not accept frame count changes if tracks are open as the track buffer
9367 // size depends on frame count and correct behavior would not be guaranteed
9368 // if frame count is changed after track creation
9369 if (mActiveTracks.size() > 0) {
9370 status = INVALID_OPERATION;
9371 } else {
9372 reconfig = true;
9373 }
9374 }
9375 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
9376 LOG_FATAL("Should not set routing device in RecordThread");
9377 }
9378 if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
9379 mAudioSource != (audio_source_t)value) {
9380 LOG_FATAL("Should not set audio source in RecordThread");
9381 }
9382
9383 if (status == NO_ERROR) {
9384 status = mInput->stream->setParameters(keyValuePair);
9385 if (status == INVALID_OPERATION) {
9386 inputStandBy();
9387 status = mInput->stream->setParameters(keyValuePair);
9388 }
9389 if (reconfig) {
9390 if (status == BAD_VALUE) {
9391 audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
9392 if (mInput->stream->getAudioProperties(&config) == OK &&
9393 audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
9394 config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
9395 audio_channel_count_from_in_mask(config.channel_mask) <= FCC_LIMIT) {
9396 status = NO_ERROR;
9397 }
9398 }
9399 if (status == NO_ERROR) {
9400 readInputParameters_l();
9401 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
9402 }
9403 }
9404 }
9405
9406 return reconfig;
9407 }
9408
getParameters(const String8 & keys)9409 String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
9410 {
9411 Mutex::Autolock _l(mLock);
9412 if (initCheck() == NO_ERROR) {
9413 String8 out_s8;
9414 if (mInput->stream->getParameters(keys, &out_s8) == OK) {
9415 return out_s8;
9416 }
9417 }
9418 return {};
9419 }
9420
ioConfigChanged(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)9421 void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
9422 audio_port_handle_t portId) {
9423 sp<AudioIoDescriptor> desc;
9424 switch (event) {
9425 case AUDIO_INPUT_OPENED:
9426 case AUDIO_INPUT_REGISTERED:
9427 case AUDIO_INPUT_CONFIG_CHANGED:
9428 desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
9429 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
9430 break;
9431 case AUDIO_CLIENT_STARTED:
9432 desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
9433 break;
9434 case AUDIO_INPUT_CLOSED:
9435 default:
9436 desc = sp<AudioIoDescriptor>::make(mId);
9437 break;
9438 }
9439 mAudioFlinger->ioConfigChanged(event, desc, pid);
9440 }
9441
readInputParameters_l()9442 void AudioFlinger::RecordThread::readInputParameters_l()
9443 {
9444 status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
9445 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
9446 mFormat = mHALFormat;
9447 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
9448 if (audio_is_linear_pcm(mFormat)) {
9449 LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
9450 mChannelCount, FCC_LIMIT);
9451 } else {
9452 // Can have more that FCC_LIMIT channels in encoded streams.
9453 ALOGI("HAL format %#x is not linear pcm", mFormat);
9454 }
9455 result = mInput->stream->getFrameSize(&mFrameSize);
9456 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
9457 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
9458 mFrameSize);
9459 result = mInput->stream->getBufferSize(&mBufferSize);
9460 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
9461 mFrameCount = mBufferSize / mFrameSize;
9462 ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
9463 "mBufferSize=%zu, mFrameCount=%zu",
9464 this, mChannelCount, mFormat, mFrameSize, mBufferSize, mFrameCount);
9465
9466 // mRsmpInFrames must be 0 before calling resizeInputBuffer_l for the first time
9467 mRsmpInFrames = 0;
9468 resizeInputBuffer_l(0 /*maxSharedAudioHistoryMs*/);
9469
9470 // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
9471 // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
9472
9473 audio_input_flags_t flags = mInput->flags;
9474 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
9475 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
9476 .set(AMEDIAMETRICS_PROP_ENCODING, formatToString(mFormat).c_str())
9477 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
9478 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
9479 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
9480 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
9481 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
9482 .record();
9483 }
9484
getInputFramesLost()9485 uint32_t AudioFlinger::RecordThread::getInputFramesLost()
9486 {
9487 Mutex::Autolock _l(mLock);
9488 uint32_t result;
9489 if (initCheck() == NO_ERROR && mInput->stream->getInputFramesLost(&result) == OK) {
9490 return result;
9491 }
9492 return 0;
9493 }
9494
sessionIds() const9495 KeyedVector<audio_session_t, bool> AudioFlinger::RecordThread::sessionIds() const
9496 {
9497 KeyedVector<audio_session_t, bool> ids;
9498 Mutex::Autolock _l(mLock);
9499 for (size_t j = 0; j < mTracks.size(); ++j) {
9500 sp<RecordThread::RecordTrack> track = mTracks[j];
9501 audio_session_t sessionId = track->sessionId();
9502 if (ids.indexOfKey(sessionId) < 0) {
9503 ids.add(sessionId, true);
9504 }
9505 }
9506 return ids;
9507 }
9508
clearInput()9509 AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::clearInput()
9510 {
9511 Mutex::Autolock _l(mLock);
9512 AudioStreamIn *input = mInput;
9513 mInput = NULL;
9514 mInputSource.clear();
9515 return input;
9516 }
9517
9518 // this method must always be called either with ThreadBase mLock held or inside the thread loop
stream() const9519 sp<StreamHalInterface> AudioFlinger::RecordThread::stream() const
9520 {
9521 if (mInput == NULL) {
9522 return NULL;
9523 }
9524 return mInput->stream;
9525 }
9526
addEffectChain_l(const sp<EffectChain> & chain)9527 status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
9528 {
9529 ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
9530 chain->setThread(this);
9531 chain->setInBuffer(NULL);
9532 chain->setOutBuffer(NULL);
9533
9534 checkSuspendOnAddEffectChain_l(chain);
9535
9536 // make sure enabled pre processing effects state is communicated to the HAL as we
9537 // just moved them to a new input stream.
9538 chain->syncHalEffectsState();
9539
9540 mEffectChains.add(chain);
9541
9542 return NO_ERROR;
9543 }
9544
removeEffectChain_l(const sp<EffectChain> & chain)9545 size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& chain)
9546 {
9547 ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
9548
9549 for (size_t i = 0; i < mEffectChains.size(); i++) {
9550 if (chain == mEffectChains[i]) {
9551 mEffectChains.removeAt(i);
9552 break;
9553 }
9554 }
9555 return mEffectChains.size();
9556 }
9557
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)9558 status_t AudioFlinger::RecordThread::createAudioPatch_l(const struct audio_patch *patch,
9559 audio_patch_handle_t *handle)
9560 {
9561 status_t status = NO_ERROR;
9562
9563 // store new device and send to effects
9564 mInDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
9565 mInDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
9566 audio_port_handle_t deviceId = patch->sources[0].id;
9567 for (size_t i = 0; i < mEffectChains.size(); i++) {
9568 mEffectChains[i]->setInputDevice_l(inDeviceTypeAddr());
9569 }
9570
9571 checkBtNrec_l();
9572
9573 // store new source and send to effects
9574 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
9575 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
9576 for (size_t i = 0; i < mEffectChains.size(); i++) {
9577 mEffectChains[i]->setAudioSource_l(mAudioSource);
9578 }
9579 }
9580
9581 if (mInput->audioHwDev->supportsAudioPatches()) {
9582 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
9583 status = hwDevice->createAudioPatch(patch->num_sources,
9584 patch->sources,
9585 patch->num_sinks,
9586 patch->sinks,
9587 handle);
9588 } else {
9589 status = mInput->stream->legacyCreateAudioPatch(patch->sources[0],
9590 patch->sinks[0].ext.mix.usecase.source,
9591 patch->sources[0].ext.device.type);
9592 *handle = AUDIO_PATCH_HANDLE_NONE;
9593 }
9594
9595 if ((mPatch.num_sources == 0) || (mPatch.sources[0].id != deviceId)) {
9596 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
9597 mPatch = *patch;
9598 }
9599
9600 const std::string pathSourcesAsString = patchSourcesToString(patch);
9601 mThreadMetrics.logEndInterval();
9602 mThreadMetrics.logCreatePatch(pathSourcesAsString, /* outDevices */ {});
9603 mThreadMetrics.logBeginInterval();
9604 // also dispatch to active AudioRecords
9605 for (const auto &track : mActiveTracks) {
9606 track->logEndInterval();
9607 track->logBeginInterval(pathSourcesAsString);
9608 }
9609 // Force meteadata update after a route change
9610 mActiveTracks.setHasChanged();
9611
9612 return status;
9613 }
9614
releaseAudioPatch_l(const audio_patch_handle_t handle)9615 status_t AudioFlinger::RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
9616 {
9617 status_t status = NO_ERROR;
9618
9619 mPatch = audio_patch{};
9620 mInDeviceTypeAddr.reset();
9621
9622 if (mInput->audioHwDev->supportsAudioPatches()) {
9623 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
9624 status = hwDevice->releaseAudioPatch(handle);
9625 } else {
9626 status = mInput->stream->legacyReleaseAudioPatch();
9627 }
9628 // Force meteadata update after a route change
9629 mActiveTracks.setHasChanged();
9630
9631 return status;
9632 }
9633
updateOutDevices(const DeviceDescriptorBaseVector & outDevices)9634 void AudioFlinger::RecordThread::updateOutDevices(const DeviceDescriptorBaseVector& outDevices)
9635 {
9636 Mutex::Autolock _l(mLock);
9637 mOutDevices = outDevices;
9638 mOutDeviceTypeAddrs = deviceTypeAddrsFromDescriptors(mOutDevices);
9639 for (size_t i = 0; i < mEffectChains.size(); i++) {
9640 mEffectChains[i]->setDevices_l(outDeviceTypeAddrs());
9641 }
9642 }
9643
getOldestFront_l()9644 int32_t AudioFlinger::RecordThread::getOldestFront_l()
9645 {
9646 if (mTracks.size() == 0) {
9647 return mRsmpInRear;
9648 }
9649 int32_t oldestFront = mRsmpInRear;
9650 int32_t maxFilled = 0;
9651 for (size_t i = 0; i < mTracks.size(); i++) {
9652 int32_t front = mTracks[i]->mResamplerBufferProvider->getFront();
9653 int32_t filled;
9654 (void)__builtin_sub_overflow(mRsmpInRear, front, &filled);
9655 if (filled > maxFilled) {
9656 oldestFront = front;
9657 maxFilled = filled;
9658 }
9659 }
9660 if (maxFilled > static_cast<signed>(mRsmpInFrames)) {
9661 (void)__builtin_sub_overflow(mRsmpInRear, mRsmpInFrames, &oldestFront);
9662 }
9663 return oldestFront;
9664 }
9665
updateFronts_l(int32_t offset)9666 void AudioFlinger::RecordThread::updateFronts_l(int32_t offset)
9667 {
9668 if (offset == 0) {
9669 return;
9670 }
9671 for (size_t i = 0; i < mTracks.size(); i++) {
9672 int32_t front = mTracks[i]->mResamplerBufferProvider->getFront();
9673 front = audio_utils::safe_sub_overflow(front, offset);
9674 mTracks[i]->mResamplerBufferProvider->setFront(front);
9675 }
9676 }
9677
resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)9678 void AudioFlinger::RecordThread::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)
9679 {
9680 // This is the formula for calculating the temporary buffer size.
9681 // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
9682 // 1 full output buffer, regardless of the alignment of the available input.
9683 // The value is somewhat arbitrary, and could probably be even larger.
9684 // A larger value should allow more old data to be read after a track calls start(),
9685 // without increasing latency.
9686 //
9687 // Note this is independent of the maximum downsampling ratio permitted for capture.
9688 size_t minRsmpInFrames = mFrameCount * 7;
9689
9690 // maxSharedAudioHistoryMs != 0 indicates a request to possibly make some part of the audio
9691 // capture history available to another client using the same session ID:
9692 // dimension the resampler input buffer accordingly.
9693
9694 // Get oldest client read position: getOldestFront_l() must be called before altering
9695 // mRsmpInRear, or mRsmpInFrames
9696 int32_t previousFront = getOldestFront_l();
9697 size_t previousRsmpInFramesP2 = mRsmpInFramesP2;
9698 int32_t previousRear = mRsmpInRear;
9699 mRsmpInRear = 0;
9700
9701 ALOG_ASSERT(maxSharedAudioHistoryMs >= 0
9702 && maxSharedAudioHistoryMs <= AudioFlinger::kMaxSharedAudioHistoryMs,
9703 "resizeInputBuffer_l() called with invalid max shared history %d",
9704 maxSharedAudioHistoryMs);
9705 if (maxSharedAudioHistoryMs != 0) {
9706 // resizeInputBuffer_l should never be called with a non zero shared history if the
9707 // buffer was not already allocated
9708 ALOG_ASSERT(mRsmpInBuffer != nullptr && mRsmpInFrames != 0,
9709 "resizeInputBuffer_l() called with shared history and unallocated buffer");
9710 size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
9711 // never reduce resampler input buffer size
9712 if (rsmpInFrames <= mRsmpInFrames) {
9713 return;
9714 }
9715 mRsmpInFrames = rsmpInFrames;
9716 }
9717 mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
9718 // Note: mRsmpInFrames is 0 when called with maxSharedAudioHistoryMs equals to 0 so it is always
9719 // initialized
9720 if (mRsmpInFrames < minRsmpInFrames) {
9721 mRsmpInFrames = minRsmpInFrames;
9722 }
9723 mRsmpInFramesP2 = roundup(mRsmpInFrames);
9724
9725 // TODO optimize audio capture buffer sizes ...
9726 // Here we calculate the size of the sliding buffer used as a source
9727 // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7).
9728 // For current HAL frame counts, this is usually 2048 = 40 ms. It would
9729 // be better to have it derived from the pipe depth in the long term.
9730 // The current value is higher than necessary. However it should not add to latency.
9731
9732 // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
9733 mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
9734
9735 void *rsmpInBuffer;
9736 (void)posix_memalign(&rsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
9737 // if posix_memalign fails, will segv here.
9738 memset(rsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
9739
9740 // Copy audio history if any from old buffer before freeing it
9741 if (previousRear != 0) {
9742 ALOG_ASSERT(mRsmpInBuffer != nullptr,
9743 "resizeInputBuffer_l() called with null buffer but frames already read from HAL");
9744
9745 ssize_t unread = audio_utils::safe_sub_overflow(previousRear, previousFront);
9746 previousFront &= previousRsmpInFramesP2 - 1;
9747 size_t part1 = previousRsmpInFramesP2 - previousFront;
9748 if (part1 > (size_t) unread) {
9749 part1 = unread;
9750 }
9751 if (part1 != 0) {
9752 memcpy(rsmpInBuffer, (const uint8_t*)mRsmpInBuffer + previousFront * mFrameSize,
9753 part1 * mFrameSize);
9754 mRsmpInRear = part1;
9755 part1 = unread - part1;
9756 if (part1 != 0) {
9757 memcpy((uint8_t*)rsmpInBuffer + mRsmpInRear * mFrameSize,
9758 (const uint8_t*)mRsmpInBuffer, part1 * mFrameSize);
9759 mRsmpInRear += part1;
9760 }
9761 }
9762 // Update front for all clients according to new rear
9763 updateFronts_l(audio_utils::safe_sub_overflow(previousRear, mRsmpInRear));
9764 } else {
9765 mRsmpInRear = 0;
9766 }
9767 free(mRsmpInBuffer);
9768 mRsmpInBuffer = rsmpInBuffer;
9769 }
9770
addPatchTrack(const sp<PatchRecord> & record)9771 void AudioFlinger::RecordThread::addPatchTrack(const sp<PatchRecord>& record)
9772 {
9773 Mutex::Autolock _l(mLock);
9774 mTracks.add(record);
9775 if (record->getSource()) {
9776 mSource = record->getSource();
9777 }
9778 }
9779
deletePatchTrack(const sp<PatchRecord> & record)9780 void AudioFlinger::RecordThread::deletePatchTrack(const sp<PatchRecord>& record)
9781 {
9782 Mutex::Autolock _l(mLock);
9783 if (mSource == record->getSource()) {
9784 mSource = mInput;
9785 }
9786 destroyTrack_l(record);
9787 }
9788
toAudioPortConfig(struct audio_port_config * config)9789 void AudioFlinger::RecordThread::toAudioPortConfig(struct audio_port_config *config)
9790 {
9791 ThreadBase::toAudioPortConfig(config);
9792 config->role = AUDIO_PORT_ROLE_SINK;
9793 config->ext.mix.hw_module = mInput->audioHwDev->handle();
9794 config->ext.mix.usecase.source = mAudioSource;
9795 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
9796 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
9797 config->flags.input = mInput->flags;
9798 }
9799 }
9800
9801 // ----------------------------------------------------------------------------
9802 // Mmap
9803 // ----------------------------------------------------------------------------
9804
MmapThreadHandle(const sp<MmapThread> & thread)9805 AudioFlinger::MmapThreadHandle::MmapThreadHandle(const sp<MmapThread>& thread)
9806 : mThread(thread)
9807 {
9808 assert(thread != 0); // thread must start non-null and stay non-null
9809 }
9810
~MmapThreadHandle()9811 AudioFlinger::MmapThreadHandle::~MmapThreadHandle()
9812 {
9813 mThread->disconnect();
9814 }
9815
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)9816 status_t AudioFlinger::MmapThreadHandle::createMmapBuffer(int32_t minSizeFrames,
9817 struct audio_mmap_buffer_info *info)
9818 {
9819 return mThread->createMmapBuffer(minSizeFrames, info);
9820 }
9821
getMmapPosition(struct audio_mmap_position * position)9822 status_t AudioFlinger::MmapThreadHandle::getMmapPosition(struct audio_mmap_position *position)
9823 {
9824 return mThread->getMmapPosition(position);
9825 }
9826
getExternalPosition(uint64_t * position,int64_t * timeNanos)9827 status_t AudioFlinger::MmapThreadHandle::getExternalPosition(uint64_t *position,
9828 int64_t *timeNanos) {
9829 return mThread->getExternalPosition(position, timeNanos);
9830 }
9831
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)9832 status_t AudioFlinger::MmapThreadHandle::start(const AudioClient& client,
9833 const audio_attributes_t *attr, audio_port_handle_t *handle)
9834
9835 {
9836 return mThread->start(client, attr, handle);
9837 }
9838
stop(audio_port_handle_t handle)9839 status_t AudioFlinger::MmapThreadHandle::stop(audio_port_handle_t handle)
9840 {
9841 return mThread->stop(handle);
9842 }
9843
standby()9844 status_t AudioFlinger::MmapThreadHandle::standby()
9845 {
9846 return mThread->standby();
9847 }
9848
reportData(const void * buffer,size_t frameCount)9849 status_t AudioFlinger::MmapThreadHandle::reportData(const void* buffer, size_t frameCount) {
9850 return mThread->reportData(buffer, frameCount);
9851 }
9852
9853
MmapThread(const sp<AudioFlinger> & audioFlinger,audio_io_handle_t id,AudioHwDevice * hwDev,const sp<StreamHalInterface> & stream,bool systemReady,bool isOut)9854 AudioFlinger::MmapThread::MmapThread(
9855 const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
9856 AudioHwDevice *hwDev, const sp<StreamHalInterface>& stream, bool systemReady, bool isOut)
9857 : ThreadBase(audioFlinger, id, (isOut ? MMAP_PLAYBACK : MMAP_CAPTURE), systemReady, isOut),
9858 mSessionId(AUDIO_SESSION_NONE),
9859 mPortId(AUDIO_PORT_HANDLE_NONE),
9860 mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
9861 mActiveTracks(&this->mLocalLog),
9862 mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
9863 mNoCallbackWarningCount(0)
9864 {
9865 mStandby = true;
9866 readHalParameters_l();
9867 }
9868
~MmapThread()9869 AudioFlinger::MmapThread::~MmapThread()
9870 {
9871 }
9872
onFirstRef()9873 void AudioFlinger::MmapThread::onFirstRef()
9874 {
9875 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
9876 }
9877
disconnect()9878 void AudioFlinger::MmapThread::disconnect()
9879 {
9880 ActiveTracks<MmapTrack> activeTracks;
9881 {
9882 Mutex::Autolock _l(mLock);
9883 for (const sp<MmapTrack> &t : mActiveTracks) {
9884 activeTracks.add(t);
9885 }
9886 }
9887 for (const sp<MmapTrack> &t : activeTracks) {
9888 stop(t->portId());
9889 }
9890 // This will decrement references and may cause the destruction of this thread.
9891 if (isOutput()) {
9892 AudioSystem::releaseOutput(mPortId);
9893 } else {
9894 AudioSystem::releaseInput(mPortId);
9895 }
9896 }
9897
9898
configure(const audio_attributes_t * attr,audio_stream_type_t streamType __unused,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,audio_port_handle_t deviceId,audio_port_handle_t portId)9899 void AudioFlinger::MmapThread::configure(const audio_attributes_t *attr,
9900 audio_stream_type_t streamType __unused,
9901 audio_session_t sessionId,
9902 const sp<MmapStreamCallback>& callback,
9903 audio_port_handle_t deviceId,
9904 audio_port_handle_t portId)
9905 {
9906 mAttr = *attr;
9907 mSessionId = sessionId;
9908 mCallback = callback;
9909 mDeviceId = deviceId;
9910 mPortId = portId;
9911 }
9912
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)9913 status_t AudioFlinger::MmapThread::createMmapBuffer(int32_t minSizeFrames,
9914 struct audio_mmap_buffer_info *info)
9915 {
9916 if (mHalStream == 0) {
9917 return NO_INIT;
9918 }
9919 mStandby = true;
9920 return mHalStream->createMmapBuffer(minSizeFrames, info);
9921 }
9922
getMmapPosition(struct audio_mmap_position * position)9923 status_t AudioFlinger::MmapThread::getMmapPosition(struct audio_mmap_position *position)
9924 {
9925 if (mHalStream == 0) {
9926 return NO_INIT;
9927 }
9928 return mHalStream->getMmapPosition(position);
9929 }
9930
exitStandby_l()9931 status_t AudioFlinger::MmapThread::exitStandby_l()
9932 {
9933 // The HAL must receive track metadata before starting the stream
9934 updateMetadata_l();
9935 status_t ret = mHalStream->start();
9936 if (ret != NO_ERROR) {
9937 ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
9938 return ret;
9939 }
9940 if (mStandby) {
9941 mThreadMetrics.logBeginInterval();
9942 mThreadSnapshot.onBegin();
9943 mStandby = false;
9944 }
9945 return NO_ERROR;
9946 }
9947
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)9948 status_t AudioFlinger::MmapThread::start(const AudioClient& client,
9949 const audio_attributes_t *attr,
9950 audio_port_handle_t *handle)
9951 {
9952 ALOGV("%s clientUid %d mStandby %d mPortId %d *handle %d", __FUNCTION__,
9953 client.attributionSource.uid, mStandby, mPortId, *handle);
9954 if (mHalStream == 0) {
9955 return NO_INIT;
9956 }
9957
9958 status_t ret;
9959
9960 // For the first track, reuse portId and session allocated when the stream was opened.
9961 if (*handle == mPortId) {
9962 acquireWakeLock();
9963 return NO_ERROR;
9964 }
9965
9966 audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
9967
9968 audio_io_handle_t io = mId;
9969 AttributionSourceState adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
9970 client.attributionSource);
9971
9972 if (isOutput()) {
9973 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
9974 config.sample_rate = mSampleRate;
9975 config.channel_mask = mChannelMask;
9976 config.format = mFormat;
9977 audio_stream_type_t stream = streamType();
9978 audio_output_flags_t flags =
9979 (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
9980 audio_port_handle_t deviceId = mDeviceId;
9981 std::vector<audio_io_handle_t> secondaryOutputs;
9982 bool isSpatialized;
9983 bool isBitPerfect;
9984 ret = AudioSystem::getOutputForAttr(&mAttr, &io,
9985 mSessionId,
9986 &stream,
9987 adjAttributionSource,
9988 &config,
9989 flags,
9990 &deviceId,
9991 &portId,
9992 &secondaryOutputs,
9993 &isSpatialized,
9994 &isBitPerfect);
9995 ALOGD_IF(!secondaryOutputs.empty(),
9996 "MmapThread::start does not support secondary outputs, ignoring them");
9997 } else {
9998 audio_config_base_t config;
9999 config.sample_rate = mSampleRate;
10000 config.channel_mask = mChannelMask;
10001 config.format = mFormat;
10002 audio_port_handle_t deviceId = mDeviceId;
10003 ret = AudioSystem::getInputForAttr(&mAttr, &io,
10004 RECORD_RIID_INVALID,
10005 mSessionId,
10006 adjAttributionSource,
10007 &config,
10008 AUDIO_INPUT_FLAG_MMAP_NOIRQ,
10009 &deviceId,
10010 &portId);
10011 }
10012 // APM should not chose a different input or output stream for the same set of attributes
10013 // and audo configuration
10014 if (ret != NO_ERROR || io != mId) {
10015 ALOGE("%s: error getting output or input from APM (error %d, io %d expected io %d)",
10016 __FUNCTION__, ret, io, mId);
10017 return BAD_VALUE;
10018 }
10019
10020 if (isOutput()) {
10021 ret = AudioSystem::startOutput(portId);
10022 } else {
10023 {
10024 // Add the track record before starting input so that the silent status for the
10025 // client can be cached.
10026 Mutex::Autolock _l(mLock);
10027 setClientSilencedState_l(portId, false /*silenced*/);
10028 }
10029 ret = AudioSystem::startInput(portId);
10030 }
10031
10032 Mutex::Autolock _l(mLock);
10033 // abort if start is rejected by audio policy manager
10034 if (ret != NO_ERROR) {
10035 ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
10036 if (!mActiveTracks.isEmpty()) {
10037 mLock.unlock();
10038 if (isOutput()) {
10039 AudioSystem::releaseOutput(portId);
10040 } else {
10041 AudioSystem::releaseInput(portId);
10042 }
10043 mLock.lock();
10044 } else {
10045 mHalStream->stop();
10046 }
10047 eraseClientSilencedState_l(portId);
10048 return PERMISSION_DENIED;
10049 }
10050
10051 // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
10052 sp<MmapTrack> track = new MmapTrack(this, attr == nullptr ? mAttr : *attr, mSampleRate, mFormat,
10053 mChannelMask, mSessionId, isOutput(),
10054 client.attributionSource,
10055 IPCThreadState::self()->getCallingPid(), portId);
10056 if (!isOutput()) {
10057 track->setSilenced_l(isClientSilenced_l(portId));
10058 }
10059
10060 if (isOutput()) {
10061 // force volume update when a new track is added
10062 mHalVolFloat = -1.0f;
10063 } else if (!track->isSilenced_l()) {
10064 for (const sp<MmapTrack> &t : mActiveTracks) {
10065 if (t->isSilenced_l()
10066 && t->uid() != static_cast<uid_t>(client.attributionSource.uid)) {
10067 t->invalidate();
10068 }
10069 }
10070 }
10071
10072 mActiveTracks.add(track);
10073 sp<EffectChain> chain = getEffectChain_l(mSessionId);
10074 if (chain != 0) {
10075 chain->setStrategy(getStrategyForStream(streamType()));
10076 chain->incTrackCnt();
10077 chain->incActiveTrackCnt();
10078 }
10079
10080 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
10081 *handle = portId;
10082
10083 if (mActiveTracks.size() == 1) {
10084 ret = exitStandby_l();
10085 }
10086
10087 broadcast_l();
10088
10089 ALOGV("%s DONE status %d handle %d stream %p", __FUNCTION__, ret, *handle, mHalStream.get());
10090
10091 return ret;
10092 }
10093
stop(audio_port_handle_t handle)10094 status_t AudioFlinger::MmapThread::stop(audio_port_handle_t handle)
10095 {
10096 ALOGV("%s handle %d", __FUNCTION__, handle);
10097
10098 if (mHalStream == 0) {
10099 return NO_INIT;
10100 }
10101
10102 if (handle == mPortId) {
10103 releaseWakeLock();
10104 return NO_ERROR;
10105 }
10106
10107 Mutex::Autolock _l(mLock);
10108
10109 sp<MmapTrack> track;
10110 for (const sp<MmapTrack> &t : mActiveTracks) {
10111 if (handle == t->portId()) {
10112 track = t;
10113 break;
10114 }
10115 }
10116 if (track == 0) {
10117 return BAD_VALUE;
10118 }
10119
10120 mActiveTracks.remove(track);
10121 eraseClientSilencedState_l(track->portId());
10122
10123 mLock.unlock();
10124 if (isOutput()) {
10125 AudioSystem::stopOutput(track->portId());
10126 AudioSystem::releaseOutput(track->portId());
10127 } else {
10128 AudioSystem::stopInput(track->portId());
10129 AudioSystem::releaseInput(track->portId());
10130 }
10131 mLock.lock();
10132
10133 sp<EffectChain> chain = getEffectChain_l(track->sessionId());
10134 if (chain != 0) {
10135 chain->decActiveTrackCnt();
10136 chain->decTrackCnt();
10137 }
10138
10139 if (mActiveTracks.isEmpty()) {
10140 mHalStream->stop();
10141 }
10142
10143 broadcast_l();
10144
10145 return NO_ERROR;
10146 }
10147
standby()10148 status_t AudioFlinger::MmapThread::standby()
10149 {
10150 ALOGV("%s", __FUNCTION__);
10151
10152 if (mHalStream == 0) {
10153 return NO_INIT;
10154 }
10155 if (!mActiveTracks.isEmpty()) {
10156 return INVALID_OPERATION;
10157 }
10158 mHalStream->standby();
10159 if (!mStandby) {
10160 mThreadMetrics.logEndInterval();
10161 mThreadSnapshot.onEnd();
10162 mStandby = true;
10163 }
10164 releaseWakeLock();
10165 return NO_ERROR;
10166 }
10167
reportData(const void *,size_t)10168 status_t AudioFlinger::MmapThread::reportData(const void* /*buffer*/, size_t /*frameCount*/) {
10169 // This is a stub implementation. The MmapPlaybackThread overrides this function.
10170 return INVALID_OPERATION;
10171 }
10172
readHalParameters_l()10173 void AudioFlinger::MmapThread::readHalParameters_l()
10174 {
10175 status_t result = mHalStream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
10176 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
10177 mFormat = mHALFormat;
10178 LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
10179 result = mHalStream->getFrameSize(&mFrameSize);
10180 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
10181 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
10182 mFrameSize);
10183 result = mHalStream->getBufferSize(&mBufferSize);
10184 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
10185 mFrameCount = mBufferSize / mFrameSize;
10186
10187 // TODO: make a readHalParameters call?
10188 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
10189 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
10190 .set(AMEDIAMETRICS_PROP_ENCODING, formatToString(mFormat).c_str())
10191 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
10192 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
10193 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
10194 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
10195 /*
10196 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
10197 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
10198 (int32_t)mHapticChannelMask)
10199 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
10200 (int32_t)mHapticChannelCount)
10201 */
10202 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
10203 formatToString(mHALFormat).c_str())
10204 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
10205 (int32_t)mFrameCount) // sic - added HAL
10206 .record();
10207 }
10208
threadLoop()10209 bool AudioFlinger::MmapThread::threadLoop()
10210 {
10211 checkSilentMode_l();
10212
10213 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
10214
10215 while (!exitPending())
10216 {
10217 Vector< sp<EffectChain> > effectChains;
10218
10219 { // under Thread lock
10220 Mutex::Autolock _l(mLock);
10221
10222 if (mSignalPending) {
10223 // A signal was raised while we were unlocked
10224 mSignalPending = false;
10225 } else {
10226 if (mConfigEvents.isEmpty()) {
10227 // we're about to wait, flush the binder command buffer
10228 IPCThreadState::self()->flushCommands();
10229
10230 if (exitPending()) {
10231 break;
10232 }
10233
10234 // wait until we have something to do...
10235 ALOGV("%s going to sleep", myName.string());
10236 mWaitWorkCV.wait(mLock);
10237 ALOGV("%s waking up", myName.string());
10238
10239 checkSilentMode_l();
10240
10241 continue;
10242 }
10243 }
10244
10245 processConfigEvents_l();
10246
10247 processVolume_l();
10248
10249 checkInvalidTracks_l();
10250
10251 mActiveTracks.updatePowerState(this);
10252
10253 updateMetadata_l();
10254
10255 lockEffectChains_l(effectChains);
10256 } // release Thread lock
10257
10258 for (size_t i = 0; i < effectChains.size(); i ++) {
10259 effectChains[i]->process_l(); // Thread is not locked, but effect chain is locked
10260 }
10261
10262 // enable changes in effect chain, including moving to another thread.
10263 unlockEffectChains(effectChains);
10264 // Effect chains will be actually deleted here if they were removed from
10265 // mEffectChains list during mixing or effects processing
10266 }
10267
10268 threadLoop_exit();
10269
10270 if (!mStandby) {
10271 threadLoop_standby();
10272 mStandby = true;
10273 }
10274
10275 ALOGV("Thread %p type %d exiting", this, mType);
10276 return false;
10277 }
10278
10279 // checkForNewParameter_l() must be called with ThreadBase::mLock held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)10280 bool AudioFlinger::MmapThread::checkForNewParameter_l(const String8& keyValuePair,
10281 status_t& status)
10282 {
10283 AudioParameter param = AudioParameter(keyValuePair);
10284 int value;
10285 bool sendToHal = true;
10286 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
10287 LOG_FATAL("Should not happen set routing device in MmapThread");
10288 }
10289 if (sendToHal) {
10290 status = mHalStream->setParameters(keyValuePair);
10291 } else {
10292 status = NO_ERROR;
10293 }
10294
10295 return false;
10296 }
10297
getParameters(const String8 & keys)10298 String8 AudioFlinger::MmapThread::getParameters(const String8& keys)
10299 {
10300 Mutex::Autolock _l(mLock);
10301 String8 out_s8;
10302 if (initCheck() == NO_ERROR && mHalStream->getParameters(keys, &out_s8) == OK) {
10303 return out_s8;
10304 }
10305 return {};
10306 }
10307
ioConfigChanged(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId __unused)10308 void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
10309 audio_port_handle_t portId __unused) {
10310 sp<AudioIoDescriptor> desc;
10311 bool isInput = false;
10312 switch (event) {
10313 case AUDIO_INPUT_OPENED:
10314 case AUDIO_INPUT_REGISTERED:
10315 case AUDIO_INPUT_CONFIG_CHANGED:
10316 isInput = true;
10317 FALLTHROUGH_INTENDED;
10318 case AUDIO_OUTPUT_OPENED:
10319 case AUDIO_OUTPUT_REGISTERED:
10320 case AUDIO_OUTPUT_CONFIG_CHANGED:
10321 desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
10322 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
10323 break;
10324 case AUDIO_INPUT_CLOSED:
10325 case AUDIO_OUTPUT_CLOSED:
10326 default:
10327 desc = sp<AudioIoDescriptor>::make(mId);
10328 break;
10329 }
10330 mAudioFlinger->ioConfigChanged(event, desc, pid);
10331 }
10332
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)10333 status_t AudioFlinger::MmapThread::createAudioPatch_l(const struct audio_patch *patch,
10334 audio_patch_handle_t *handle)
10335 NO_THREAD_SAFETY_ANALYSIS // elease and re-acquire mLock
10336 {
10337 status_t status = NO_ERROR;
10338
10339 // store new device and send to effects
10340 audio_devices_t type = AUDIO_DEVICE_NONE;
10341 audio_port_handle_t deviceId;
10342 AudioDeviceTypeAddrVector sinkDeviceTypeAddrs;
10343 AudioDeviceTypeAddr sourceDeviceTypeAddr;
10344 uint32_t numDevices = 0;
10345 if (isOutput()) {
10346 for (unsigned int i = 0; i < patch->num_sinks; i++) {
10347 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
10348 && !mAudioHwDev->supportsAudioPatches(),
10349 "Enumerated device type(%#x) must not be used "
10350 "as it does not support audio patches",
10351 patch->sinks[i].ext.device.type);
10352 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
10353 sinkDeviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
10354 patch->sinks[i].ext.device.address);
10355 }
10356 deviceId = patch->sinks[0].id;
10357 numDevices = mPatch.num_sinks;
10358 } else {
10359 type = patch->sources[0].ext.device.type;
10360 deviceId = patch->sources[0].id;
10361 numDevices = mPatch.num_sources;
10362 sourceDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
10363 sourceDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
10364 }
10365
10366 for (size_t i = 0; i < mEffectChains.size(); i++) {
10367 if (isOutput()) {
10368 mEffectChains[i]->setDevices_l(sinkDeviceTypeAddrs);
10369 } else {
10370 mEffectChains[i]->setInputDevice_l(sourceDeviceTypeAddr);
10371 }
10372 }
10373
10374 if (!isOutput()) {
10375 // store new source and send to effects
10376 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
10377 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
10378 for (size_t i = 0; i < mEffectChains.size(); i++) {
10379 mEffectChains[i]->setAudioSource_l(mAudioSource);
10380 }
10381 }
10382 }
10383
10384 if (mAudioHwDev->supportsAudioPatches()) {
10385 status = mHalDevice->createAudioPatch(patch->num_sources, patch->sources, patch->num_sinks,
10386 patch->sinks, handle);
10387 } else {
10388 audio_port_config port;
10389 std::optional<audio_source_t> source;
10390 if (isOutput()) {
10391 port = patch->sinks[0];
10392 } else {
10393 port = patch->sources[0];
10394 source = patch->sinks[0].ext.mix.usecase.source;
10395 }
10396 status = mHalStream->legacyCreateAudioPatch(port, source, type);
10397 *handle = AUDIO_PATCH_HANDLE_NONE;
10398 }
10399
10400 if (numDevices == 0 || mDeviceId != deviceId) {
10401 if (isOutput()) {
10402 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
10403 mOutDeviceTypeAddrs = sinkDeviceTypeAddrs;
10404 checkSilentMode_l();
10405 } else {
10406 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
10407 mInDeviceTypeAddr = sourceDeviceTypeAddr;
10408 }
10409 sp<MmapStreamCallback> callback = mCallback.promote();
10410 if (mDeviceId != deviceId && callback != 0) {
10411 mLock.unlock();
10412 callback->onRoutingChanged(deviceId);
10413 mLock.lock();
10414 }
10415 mPatch = *patch;
10416 mDeviceId = deviceId;
10417 }
10418 // Force meteadata update after a route change
10419 mActiveTracks.setHasChanged();
10420
10421 return status;
10422 }
10423
releaseAudioPatch_l(const audio_patch_handle_t handle)10424 status_t AudioFlinger::MmapThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
10425 {
10426 status_t status = NO_ERROR;
10427
10428 mPatch = audio_patch{};
10429 mOutDeviceTypeAddrs.clear();
10430 mInDeviceTypeAddr.reset();
10431
10432 bool supportsAudioPatches = mHalDevice->supportsAudioPatches(&supportsAudioPatches) == OK ?
10433 supportsAudioPatches : false;
10434
10435 if (supportsAudioPatches) {
10436 status = mHalDevice->releaseAudioPatch(handle);
10437 } else {
10438 status = mHalStream->legacyReleaseAudioPatch();
10439 }
10440 // Force meteadata update after a route change
10441 mActiveTracks.setHasChanged();
10442
10443 return status;
10444 }
10445
toAudioPortConfig(struct audio_port_config * config)10446 void AudioFlinger::MmapThread::toAudioPortConfig(struct audio_port_config *config)
10447 {
10448 ThreadBase::toAudioPortConfig(config);
10449 if (isOutput()) {
10450 config->role = AUDIO_PORT_ROLE_SOURCE;
10451 config->ext.mix.hw_module = mAudioHwDev->handle();
10452 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
10453 } else {
10454 config->role = AUDIO_PORT_ROLE_SINK;
10455 config->ext.mix.hw_module = mAudioHwDev->handle();
10456 config->ext.mix.usecase.source = mAudioSource;
10457 }
10458 }
10459
addEffectChain_l(const sp<EffectChain> & chain)10460 status_t AudioFlinger::MmapThread::addEffectChain_l(const sp<EffectChain>& chain)
10461 {
10462 audio_session_t session = chain->sessionId();
10463
10464 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
10465 // Attach all tracks with same session ID to this chain.
10466 // indicate all active tracks in the chain
10467 for (const sp<MmapTrack> &track : mActiveTracks) {
10468 if (session == track->sessionId()) {
10469 chain->incTrackCnt();
10470 chain->incActiveTrackCnt();
10471 }
10472 }
10473
10474 chain->setThread(this);
10475 chain->setInBuffer(nullptr);
10476 chain->setOutBuffer(nullptr);
10477 chain->syncHalEffectsState();
10478
10479 mEffectChains.add(chain);
10480 checkSuspendOnAddEffectChain_l(chain);
10481 return NO_ERROR;
10482 }
10483
removeEffectChain_l(const sp<EffectChain> & chain)10484 size_t AudioFlinger::MmapThread::removeEffectChain_l(const sp<EffectChain>& chain)
10485 {
10486 audio_session_t session = chain->sessionId();
10487
10488 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
10489
10490 for (size_t i = 0; i < mEffectChains.size(); i++) {
10491 if (chain == mEffectChains[i]) {
10492 mEffectChains.removeAt(i);
10493 // detach all active tracks from the chain
10494 // detach all tracks with same session ID from this chain
10495 for (const sp<MmapTrack> &track : mActiveTracks) {
10496 if (session == track->sessionId()) {
10497 chain->decActiveTrackCnt();
10498 chain->decTrackCnt();
10499 }
10500 }
10501 break;
10502 }
10503 }
10504 return mEffectChains.size();
10505 }
10506
threadLoop_standby()10507 void AudioFlinger::MmapThread::threadLoop_standby()
10508 {
10509 mHalStream->standby();
10510 }
10511
threadLoop_exit()10512 void AudioFlinger::MmapThread::threadLoop_exit()
10513 {
10514 // Do not call callback->onTearDown() because it is redundant for thread exit
10515 // and because it can cause a recursive mutex lock on stop().
10516 }
10517
setSyncEvent(const sp<SyncEvent> & event __unused)10518 status_t AudioFlinger::MmapThread::setSyncEvent(const sp<SyncEvent>& event __unused)
10519 {
10520 return BAD_VALUE;
10521 }
10522
isValidSyncEvent(const sp<SyncEvent> & event __unused) const10523 bool AudioFlinger::MmapThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const
10524 {
10525 return false;
10526 }
10527
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)10528 status_t AudioFlinger::MmapThread::checkEffectCompatibility_l(
10529 const effect_descriptor_t *desc, audio_session_t sessionId)
10530 {
10531 // No global effect sessions on mmap threads
10532 if (audio_is_global_session(sessionId)) {
10533 ALOGW("checkEffectCompatibility_l(): global effect %s on MMAP thread %s",
10534 desc->name, mThreadName);
10535 return BAD_VALUE;
10536 }
10537
10538 if (!isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC)) {
10539 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on capture mmap thread",
10540 desc->name);
10541 return BAD_VALUE;
10542 }
10543 if (isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
10544 ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback mmap "
10545 "thread", desc->name);
10546 return BAD_VALUE;
10547 }
10548
10549 // Only allow effects without processing load or latency
10550 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) != EFFECT_FLAG_NO_PROCESS) {
10551 return BAD_VALUE;
10552 }
10553
10554 if (EffectModule::isHapticGenerator(&desc->type)) {
10555 ALOGE("%s(): HapticGenerator is not supported for MmapThread", __func__);
10556 return BAD_VALUE;
10557 }
10558
10559 return NO_ERROR;
10560 }
10561
checkInvalidTracks_l()10562 void AudioFlinger::MmapThread::checkInvalidTracks_l()
10563 NO_THREAD_SAFETY_ANALYSIS // release and re-acquire mLock
10564 {
10565 sp<MmapStreamCallback> callback;
10566 for (const sp<MmapTrack> &track : mActiveTracks) {
10567 if (track->isInvalid()) {
10568 callback = mCallback.promote();
10569 if (callback == nullptr && mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
10570 ALOGW("Could not notify MMAP stream tear down: no onRoutingChanged callback!");
10571 mNoCallbackWarningCount++;
10572 }
10573 break;
10574 }
10575 }
10576 if (callback != 0) {
10577 mLock.unlock();
10578 callback->onRoutingChanged(AUDIO_PORT_HANDLE_NONE);
10579 mLock.lock();
10580 }
10581 }
10582
dumpInternals_l(int fd,const Vector<String16> & args __unused)10583 void AudioFlinger::MmapThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
10584 {
10585 dprintf(fd, " Attributes: content type %d usage %d source %d\n",
10586 mAttr.content_type, mAttr.usage, mAttr.source);
10587 dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
10588 if (mActiveTracks.isEmpty()) {
10589 dprintf(fd, " No active clients\n");
10590 }
10591 }
10592
dumpTracks_l(int fd,const Vector<String16> & args __unused)10593 void AudioFlinger::MmapThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
10594 {
10595 String8 result;
10596 size_t numtracks = mActiveTracks.size();
10597 dprintf(fd, " %zu Tracks\n", numtracks);
10598 const char *prefix = " ";
10599 if (numtracks) {
10600 result.append(prefix);
10601 mActiveTracks[0]->appendDumpHeader(result);
10602 for (size_t i = 0; i < numtracks ; ++i) {
10603 sp<MmapTrack> track = mActiveTracks[i];
10604 result.append(prefix);
10605 track->appendDump(result, true /* active */);
10606 }
10607 } else {
10608 dprintf(fd, "\n");
10609 }
10610 write(fd, result.string(), result.size());
10611 }
10612
MmapPlaybackThread(const sp<AudioFlinger> & audioFlinger,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamOut * output,bool systemReady)10613 AudioFlinger::MmapPlaybackThread::MmapPlaybackThread(
10614 const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
10615 AudioHwDevice *hwDev, AudioStreamOut *output, bool systemReady)
10616 : MmapThread(audioFlinger, id, hwDev, output->stream, systemReady, true /* isOut */),
10617 mStreamType(AUDIO_STREAM_MUSIC),
10618 mOutput(output)
10619 {
10620 snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
10621 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
10622 mMasterVolume = audioFlinger->masterVolume_l();
10623 mMasterMute = audioFlinger->masterMute_l();
10624
10625 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
10626 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
10627 mStreamTypes[stream].volume = 0.0f;
10628 mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
10629 }
10630 // Audio patch and call assistant volume are always max
10631 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
10632 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
10633 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
10634 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
10635
10636 if (mAudioHwDev) {
10637 if (mAudioHwDev->canSetMasterVolume()) {
10638 mMasterVolume = 1.0;
10639 }
10640
10641 if (mAudioHwDev->canSetMasterMute()) {
10642 mMasterMute = false;
10643 }
10644 }
10645 }
10646
configure(const audio_attributes_t * attr,audio_stream_type_t streamType,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,audio_port_handle_t deviceId,audio_port_handle_t portId)10647 void AudioFlinger::MmapPlaybackThread::configure(const audio_attributes_t *attr,
10648 audio_stream_type_t streamType,
10649 audio_session_t sessionId,
10650 const sp<MmapStreamCallback>& callback,
10651 audio_port_handle_t deviceId,
10652 audio_port_handle_t portId)
10653 {
10654 MmapThread::configure(attr, streamType, sessionId, callback, deviceId, portId);
10655 mStreamType = streamType;
10656 }
10657
clearOutput()10658 AudioStreamOut* AudioFlinger::MmapPlaybackThread::clearOutput()
10659 {
10660 Mutex::Autolock _l(mLock);
10661 AudioStreamOut *output = mOutput;
10662 mOutput = NULL;
10663 return output;
10664 }
10665
setMasterVolume(float value)10666 void AudioFlinger::MmapPlaybackThread::setMasterVolume(float value)
10667 {
10668 Mutex::Autolock _l(mLock);
10669 // Don't apply master volume in SW if our HAL can do it for us.
10670 if (mAudioHwDev &&
10671 mAudioHwDev->canSetMasterVolume()) {
10672 mMasterVolume = 1.0;
10673 } else {
10674 mMasterVolume = value;
10675 }
10676 }
10677
setMasterMute(bool muted)10678 void AudioFlinger::MmapPlaybackThread::setMasterMute(bool muted)
10679 {
10680 Mutex::Autolock _l(mLock);
10681 // Don't apply master mute in SW if our HAL can do it for us.
10682 if (mAudioHwDev && mAudioHwDev->canSetMasterMute()) {
10683 mMasterMute = false;
10684 } else {
10685 mMasterMute = muted;
10686 }
10687 }
10688
setStreamVolume(audio_stream_type_t stream,float value)10689 void AudioFlinger::MmapPlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
10690 {
10691 Mutex::Autolock _l(mLock);
10692 mStreamTypes[stream].volume = value;
10693 if (stream == mStreamType) {
10694 broadcast_l();
10695 }
10696 }
10697
streamVolume(audio_stream_type_t stream) const10698 float AudioFlinger::MmapPlaybackThread::streamVolume(audio_stream_type_t stream) const
10699 {
10700 Mutex::Autolock _l(mLock);
10701 return mStreamTypes[stream].volume;
10702 }
10703
setStreamMute(audio_stream_type_t stream,bool muted)10704 void AudioFlinger::MmapPlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
10705 {
10706 Mutex::Autolock _l(mLock);
10707 mStreamTypes[stream].mute = muted;
10708 if (stream == mStreamType) {
10709 broadcast_l();
10710 }
10711 }
10712
invalidateTracks(audio_stream_type_t streamType)10713 void AudioFlinger::MmapPlaybackThread::invalidateTracks(audio_stream_type_t streamType)
10714 {
10715 Mutex::Autolock _l(mLock);
10716 if (streamType == mStreamType) {
10717 for (const sp<MmapTrack> &track : mActiveTracks) {
10718 track->invalidate();
10719 }
10720 broadcast_l();
10721 }
10722 }
10723
invalidateTracks(std::set<audio_port_handle_t> & portIds)10724 void AudioFlinger::MmapPlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds)
10725 {
10726 Mutex::Autolock _l(mLock);
10727 bool trackMatch = false;
10728 for (const sp<MmapTrack> &track : mActiveTracks) {
10729 if (portIds.find(track->portId()) != portIds.end()) {
10730 track->invalidate();
10731 trackMatch = true;
10732 portIds.erase(track->portId());
10733 }
10734 if (portIds.empty()) {
10735 break;
10736 }
10737 }
10738 if (trackMatch) {
10739 broadcast_l();
10740 }
10741 }
10742
processVolume_l()10743 void AudioFlinger::MmapPlaybackThread::processVolume_l()
10744 NO_THREAD_SAFETY_ANALYSIS // access of track->processMuteEvent_l
10745 {
10746 float volume;
10747
10748 if (mMasterMute || streamMuted_l()) {
10749 volume = 0;
10750 } else {
10751 volume = mMasterVolume * streamVolume_l();
10752 }
10753
10754 if (volume != mHalVolFloat) {
10755 // Convert volumes from float to 8.24
10756 uint32_t vol = (uint32_t)(volume * (1 << 24));
10757
10758 // Delegate volume control to effect in track effect chain if needed
10759 // only one effect chain can be present on DirectOutputThread, so if
10760 // there is one, the track is connected to it
10761 if (!mEffectChains.isEmpty()) {
10762 mEffectChains[0]->setVolume_l(&vol, &vol);
10763 volume = (float)vol / (1 << 24);
10764 }
10765 // Try to use HW volume control and fall back to SW control if not implemented
10766 if (mOutput->stream->setVolume(volume, volume) == NO_ERROR) {
10767 mHalVolFloat = volume; // HW volume control worked, so update value.
10768 mNoCallbackWarningCount = 0;
10769 } else {
10770 sp<MmapStreamCallback> callback = mCallback.promote();
10771 if (callback != 0) {
10772 mHalVolFloat = volume; // SW volume control worked, so update value.
10773 mNoCallbackWarningCount = 0;
10774 mLock.unlock();
10775 callback->onVolumeChanged(volume);
10776 mLock.lock();
10777 } else {
10778 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
10779 ALOGW("Could not set MMAP stream volume: no volume callback!");
10780 mNoCallbackWarningCount++;
10781 }
10782 }
10783 }
10784 for (const sp<MmapTrack> &track : mActiveTracks) {
10785 track->setMetadataHasChanged();
10786 track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
10787 /*muteState=*/{mMasterMute,
10788 streamVolume_l() == 0.f,
10789 streamMuted_l(),
10790 // TODO(b/241533526): adjust logic to include mute from AppOps
10791 false /*muteFromPlaybackRestricted*/,
10792 false /*muteFromClientVolume*/,
10793 false /*muteFromVolumeShaper*/});
10794 }
10795 }
10796 }
10797
updateMetadata_l()10798 AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::MmapPlaybackThread::updateMetadata_l()
10799 {
10800 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
10801 return {}; // nothing to do
10802 }
10803 StreamOutHalInterface::SourceMetadata metadata;
10804 for (const sp<MmapTrack> &track : mActiveTracks) {
10805 // No track is invalid as this is called after prepareTrack_l in the same critical section
10806 playback_track_metadata_v7_t trackMetadata;
10807 trackMetadata.base = {
10808 .usage = track->attributes().usage,
10809 .content_type = track->attributes().content_type,
10810 .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
10811 };
10812 trackMetadata.channel_mask = track->channelMask(),
10813 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
10814 metadata.tracks.push_back(trackMetadata);
10815 }
10816 mOutput->stream->updateSourceMetadata(metadata);
10817
10818 MetadataUpdate change;
10819 change.playbackMetadataUpdate = metadata.tracks;
10820 return change;
10821 };
10822
checkSilentMode_l()10823 void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
10824 {
10825 if (!mMasterMute) {
10826 char value[PROPERTY_VALUE_MAX];
10827 if (property_get("ro.audio.silent", value, "0") > 0) {
10828 char *endptr;
10829 unsigned long ul = strtoul(value, &endptr, 0);
10830 if (*endptr == '\0' && ul != 0) {
10831 ALOGD("Silence is golden");
10832 // The setprop command will not allow a property to be changed after
10833 // the first time it is set, so we don't have to worry about un-muting.
10834 setMasterMute_l(true);
10835 }
10836 }
10837 }
10838 }
10839
toAudioPortConfig(struct audio_port_config * config)10840 void AudioFlinger::MmapPlaybackThread::toAudioPortConfig(struct audio_port_config *config)
10841 {
10842 MmapThread::toAudioPortConfig(config);
10843 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
10844 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
10845 config->flags.output = mOutput->flags;
10846 }
10847 }
10848
getExternalPosition(uint64_t * position,int64_t * timeNanos)10849 status_t AudioFlinger::MmapPlaybackThread::getExternalPosition(uint64_t *position,
10850 int64_t *timeNanos)
10851 {
10852 if (mOutput == nullptr) {
10853 return NO_INIT;
10854 }
10855 struct timespec timestamp;
10856 status_t status = mOutput->getPresentationPosition(position, ×tamp);
10857 if (status == NO_ERROR) {
10858 *timeNanos = timestamp.tv_sec * NANOS_PER_SECOND + timestamp.tv_nsec;
10859 }
10860 return status;
10861 }
10862
reportData(const void * buffer,size_t frameCount)10863 status_t AudioFlinger::MmapPlaybackThread::reportData(const void* buffer, size_t frameCount) {
10864 // Send to MelProcessor for sound dose measurement.
10865 auto processor = mMelProcessor.load();
10866 if (processor) {
10867 processor->process(buffer, frameCount * mFrameSize);
10868 }
10869
10870 return NO_ERROR;
10871 }
10872
10873 // startMelComputation_l() must be called with AudioFlinger::mLock held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)10874 void AudioFlinger::MmapPlaybackThread::startMelComputation_l(
10875 const sp<audio_utils::MelProcessor>& processor)
10876 {
10877 ALOGV("%s: starting mel processor for thread %d", __func__, id());
10878 mMelProcessor.store(processor);
10879 if (processor) {
10880 processor->resume();
10881 }
10882
10883 // no need to update output format for MMapPlaybackThread since it is
10884 // assigned constant for each thread
10885 }
10886
10887 // stopMelComputation_l() must be called with AudioFlinger::mLock held
stopMelComputation_l()10888 void AudioFlinger::MmapPlaybackThread::stopMelComputation_l()
10889 {
10890 ALOGV("%s: pausing mel processor for thread %d", __func__, id());
10891 auto melProcessor = mMelProcessor.load();
10892 if (melProcessor != nullptr) {
10893 melProcessor->pause();
10894 }
10895 }
10896
dumpInternals_l(int fd,const Vector<String16> & args)10897 void AudioFlinger::MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
10898 {
10899 MmapThread::dumpInternals_l(fd, args);
10900
10901 dprintf(fd, " Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n",
10902 mStreamType, streamVolume_l(), mHalVolFloat, streamMuted_l());
10903 dprintf(fd, " Master volume: %f Master mute %d\n", mMasterVolume, mMasterMute);
10904 }
10905
MmapCaptureThread(const sp<AudioFlinger> & audioFlinger,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamIn * input,bool systemReady)10906 AudioFlinger::MmapCaptureThread::MmapCaptureThread(
10907 const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
10908 AudioHwDevice *hwDev, AudioStreamIn *input, bool systemReady)
10909 : MmapThread(audioFlinger, id, hwDev, input->stream, systemReady, false /* isOut */),
10910 mInput(input)
10911 {
10912 snprintf(mThreadName, kThreadNameLength, "AudioMmapIn_%X", id);
10913 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
10914 }
10915
exitStandby_l()10916 status_t AudioFlinger::MmapCaptureThread::exitStandby_l()
10917 {
10918 {
10919 // mInput might have been cleared by clearInput()
10920 if (mInput != nullptr && mInput->stream != nullptr) {
10921 mInput->stream->setGain(1.0f);
10922 }
10923 }
10924 return MmapThread::exitStandby_l();
10925 }
10926
clearInput()10927 AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
10928 {
10929 Mutex::Autolock _l(mLock);
10930 AudioStreamIn *input = mInput;
10931 mInput = NULL;
10932 return input;
10933 }
10934
10935
processVolume_l()10936 void AudioFlinger::MmapCaptureThread::processVolume_l()
10937 {
10938 bool changed = false;
10939 bool silenced = false;
10940
10941 sp<MmapStreamCallback> callback = mCallback.promote();
10942 if (callback == 0) {
10943 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
10944 ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
10945 mNoCallbackWarningCount++;
10946 }
10947 }
10948
10949 // After a change occurred in track silenced state, mute capture in audio DSP if at least one
10950 // track is silenced and unmute otherwise
10951 for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
10952 if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
10953 changed = true;
10954 silenced = mActiveTracks[i]->isSilenced_l();
10955 }
10956 }
10957
10958 if (changed) {
10959 mInput->stream->setGain(silenced ? 0.0f: 1.0f);
10960 }
10961 }
10962
updateMetadata_l()10963 AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::MmapCaptureThread::updateMetadata_l()
10964 {
10965 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
10966 return {}; // nothing to do
10967 }
10968 StreamInHalInterface::SinkMetadata metadata;
10969 for (const sp<MmapTrack> &track : mActiveTracks) {
10970 // No track is invalid as this is called after prepareTrack_l in the same critical section
10971 record_track_metadata_v7_t trackMetadata;
10972 trackMetadata.base = {
10973 .source = track->attributes().source,
10974 .gain = 1, // capture tracks do not have volumes
10975 };
10976 trackMetadata.channel_mask = track->channelMask(),
10977 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
10978 metadata.tracks.push_back(trackMetadata);
10979 }
10980 mInput->stream->updateSinkMetadata(metadata);
10981 MetadataUpdate change;
10982 change.recordMetadataUpdate = metadata.tracks;
10983 return change;
10984 }
10985
setRecordSilenced(audio_port_handle_t portId,bool silenced)10986 void AudioFlinger::MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
10987 {
10988 Mutex::Autolock _l(mLock);
10989 for (size_t i = 0; i < mActiveTracks.size() ; i++) {
10990 if (mActiveTracks[i]->portId() == portId) {
10991 mActiveTracks[i]->setSilenced_l(silenced);
10992 broadcast_l();
10993 }
10994 }
10995 setClientSilencedIfExists_l(portId, silenced);
10996 }
10997
toAudioPortConfig(struct audio_port_config * config)10998 void AudioFlinger::MmapCaptureThread::toAudioPortConfig(struct audio_port_config *config)
10999 {
11000 MmapThread::toAudioPortConfig(config);
11001 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
11002 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
11003 config->flags.input = mInput->flags;
11004 }
11005 }
11006
getExternalPosition(uint64_t * position,int64_t * timeNanos)11007 status_t AudioFlinger::MmapCaptureThread::getExternalPosition(
11008 uint64_t *position, int64_t *timeNanos)
11009 {
11010 if (mInput == nullptr) {
11011 return NO_INIT;
11012 }
11013 return mInput->getCapturePosition((int64_t*)position, timeNanos);
11014 }
11015
11016 // ----------------------------------------------------------------------------
11017
BitPerfectThread(const sp<AudioFlinger> & audioflinger,AudioStreamOut * output,audio_io_handle_t id,bool systemReady)11018 AudioFlinger::BitPerfectThread::BitPerfectThread(const sp<AudioFlinger> &audioflinger,
11019 AudioStreamOut *output, audio_io_handle_t id, bool systemReady)
11020 : MixerThread(audioflinger, output, id, systemReady, BIT_PERFECT) {}
11021
prepareTracks_l(Vector<sp<Track>> * tracksToRemove)11022 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::BitPerfectThread::prepareTracks_l(
11023 Vector<sp<Track>> *tracksToRemove) {
11024 mixer_state result = MixerThread::prepareTracks_l(tracksToRemove);
11025 // If there is only one active track and it is bit-perfect, enable tee buffer.
11026 float volumeLeft = 1.0f;
11027 float volumeRight = 1.0f;
11028 if (mActiveTracks.size() == 1 && mActiveTracks[0]->isBitPerfect()) {
11029 const int trackId = mActiveTracks[0]->id();
11030 mAudioMixer->setParameter(
11031 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, (void *)mSinkBuffer);
11032 mAudioMixer->setParameter(
11033 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER_FRAME_COUNT,
11034 (void *)(uintptr_t)mNormalFrameCount);
11035 mActiveTracks[0]->getFinalVolume(&volumeLeft, &volumeRight);
11036 mIsBitPerfect = true;
11037 } else {
11038 mIsBitPerfect = false;
11039 // No need to copy bit-perfect data directly to sink buffer given there are multiple tracks
11040 // active.
11041 for (const auto& track : mActiveTracks) {
11042 const int trackId = track->id();
11043 mAudioMixer->setParameter(
11044 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, nullptr);
11045 }
11046 }
11047 if (mVolumeLeft != volumeLeft || mVolumeRight != volumeRight) {
11048 mVolumeLeft = volumeLeft;
11049 mVolumeRight = volumeRight;
11050 setVolumeForOutput_l(volumeLeft, volumeRight);
11051 }
11052 return result;
11053 }
11054
threadLoop_mix()11055 void AudioFlinger::BitPerfectThread::threadLoop_mix() {
11056 MixerThread::threadLoop_mix();
11057 mHasDataCopiedToSinkBuffer = mIsBitPerfect;
11058 }
11059
11060 } // namespace android
11061