1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #pragma once
18
19 #include <android-base/thread_annotations.h>
20 #include <audio_utils/safe_math.h>
21 #include <audio_utils/threads.h>
22 #include <utils/Log.h>
23 #include <utils/Timers.h>
24
25 #include <algorithm>
26 #include <array>
27 #include <cmath>
28 #include <map>
29 #include <memory>
30 #include <mutex>
31 #include <sys/syscall.h>
32 #include <unordered_map>
33 #include <unordered_set>
34 #include <utility>
35 #include <vector>
36
37 #pragma push_macro("LOG_TAG")
38 #undef LOG_TAG
39 #define LOG_TAG "audio_utils::mutex"
40
41 namespace android::audio_utils {
42
43 // Define global capabilities for thread-safety annotation.
44 //
45 // These can be manually modified, or
46 // compile generate_mutex_order.cpp in the tests directory
47 // to generate this.
48
49 // --- Begin generated section
50
51 // Lock order
52 enum class MutexOrder : uint32_t {
53 kSpatializer_Mutex = 0,
54 kAudioPolicyEffects_Mutex = 1,
55 kEffectHandle_Mutex = 2,
56 kEffectBase_PolicyMutex = 3,
57 kAudioPolicyService_Mutex = 4,
58 kCommandThread_Mutex = 5,
59 kAudioCommand_Mutex = 6,
60 kUidPolicy_Mutex = 7,
61 kAudioFlinger_Mutex = 8,
62 kAudioFlinger_HardwareMutex = 9,
63 kDeviceEffectManager_Mutex = 10,
64 kPatchCommandThread_Mutex = 11,
65 kThreadBase_Mutex = 12,
66 kAudioFlinger_ClientMutex = 13,
67 kMelReporter_Mutex = 14,
68 kEffectChain_Mutex = 15,
69 kDeviceEffectProxy_ProxyMutex = 16,
70 kEffectBase_Mutex = 17,
71 kAudioFlinger_UnregisteredWritersMutex = 18,
72 kAsyncCallbackThread_Mutex = 19,
73 kConfigEvent_Mutex = 20,
74 kOutputTrack_TrackMetadataMutex = 21,
75 kPassthruPatchRecord_ReadMutex = 22,
76 kPatchCommandThread_ListenerMutex = 23,
77 kPlaybackThread_AudioTrackCbMutex = 24,
78 kAudioPolicyService_NotificationClientsMutex = 25,
79 kMediaLogNotifier_Mutex = 26,
80 kOtherMutex = 27,
81 kSize = 28,
82 };
83
84 // Lock by name
85 inline constexpr const char* const gMutexNames[] = {
86 "Spatializer_Mutex",
87 "AudioPolicyEffects_Mutex",
88 "EffectHandle_Mutex",
89 "EffectBase_PolicyMutex",
90 "AudioPolicyService_Mutex",
91 "CommandThread_Mutex",
92 "AudioCommand_Mutex",
93 "UidPolicy_Mutex",
94 "AudioFlinger_Mutex",
95 "AudioFlinger_HardwareMutex",
96 "DeviceEffectManager_Mutex",
97 "PatchCommandThread_Mutex",
98 "ThreadBase_Mutex",
99 "AudioFlinger_ClientMutex",
100 "MelReporter_Mutex",
101 "EffectChain_Mutex",
102 "DeviceEffectProxy_ProxyMutex",
103 "EffectBase_Mutex",
104 "AudioFlinger_UnregisteredWritersMutex",
105 "AsyncCallbackThread_Mutex",
106 "ConfigEvent_Mutex",
107 "OutputTrack_TrackMetadataMutex",
108 "PassthruPatchRecord_ReadMutex",
109 "PatchCommandThread_ListenerMutex",
110 "PlaybackThread_AudioTrackCbMutex",
111 "AudioPolicyService_NotificationClientsMutex",
112 "MediaLogNotifier_Mutex",
113 "OtherMutex",
114 };
115
116 // Forward declarations
117 class AudioMutexAttributes;
118 template <typename T> class mutex_impl;
119 using mutex = mutex_impl<AudioMutexAttributes>;
120
121 // Capabilities in priority order
122 // (declaration only, value is nullptr)
123 inline mutex* Spatializer_Mutex;
124 inline mutex* AudioPolicyEffects_Mutex
125 ACQUIRED_AFTER(android::audio_utils::Spatializer_Mutex);
126 inline mutex* EffectHandle_Mutex
127 ACQUIRED_AFTER(android::audio_utils::AudioPolicyEffects_Mutex);
128 inline mutex* EffectBase_PolicyMutex
129 ACQUIRED_AFTER(android::audio_utils::EffectHandle_Mutex);
130 inline mutex* AudioPolicyService_Mutex
131 ACQUIRED_AFTER(android::audio_utils::EffectBase_PolicyMutex);
132 inline mutex* CommandThread_Mutex
133 ACQUIRED_AFTER(android::audio_utils::AudioPolicyService_Mutex);
134 inline mutex* AudioCommand_Mutex
135 ACQUIRED_AFTER(android::audio_utils::CommandThread_Mutex);
136 inline mutex* UidPolicy_Mutex
137 ACQUIRED_AFTER(android::audio_utils::AudioCommand_Mutex);
138 inline mutex* AudioFlinger_Mutex
139 ACQUIRED_AFTER(android::audio_utils::UidPolicy_Mutex);
140 inline mutex* AudioFlinger_HardwareMutex
141 ACQUIRED_AFTER(android::audio_utils::AudioFlinger_Mutex);
142 inline mutex* DeviceEffectManager_Mutex
143 ACQUIRED_AFTER(android::audio_utils::AudioFlinger_HardwareMutex);
144 inline mutex* PatchCommandThread_Mutex
145 ACQUIRED_AFTER(android::audio_utils::DeviceEffectManager_Mutex);
146 inline mutex* ThreadBase_Mutex
147 ACQUIRED_AFTER(android::audio_utils::PatchCommandThread_Mutex);
148 inline mutex* AudioFlinger_ClientMutex
149 ACQUIRED_AFTER(android::audio_utils::ThreadBase_Mutex);
150 inline mutex* MelReporter_Mutex
151 ACQUIRED_AFTER(android::audio_utils::AudioFlinger_ClientMutex);
152 inline mutex* EffectChain_Mutex
153 ACQUIRED_AFTER(android::audio_utils::MelReporter_Mutex);
154 inline mutex* DeviceEffectProxy_ProxyMutex
155 ACQUIRED_AFTER(android::audio_utils::EffectChain_Mutex);
156 inline mutex* EffectBase_Mutex
157 ACQUIRED_AFTER(android::audio_utils::DeviceEffectProxy_ProxyMutex);
158 inline mutex* AudioFlinger_UnregisteredWritersMutex
159 ACQUIRED_AFTER(android::audio_utils::EffectBase_Mutex);
160 inline mutex* AsyncCallbackThread_Mutex
161 ACQUIRED_AFTER(android::audio_utils::AudioFlinger_UnregisteredWritersMutex);
162 inline mutex* ConfigEvent_Mutex
163 ACQUIRED_AFTER(android::audio_utils::AsyncCallbackThread_Mutex);
164 inline mutex* OutputTrack_TrackMetadataMutex
165 ACQUIRED_AFTER(android::audio_utils::ConfigEvent_Mutex);
166 inline mutex* PassthruPatchRecord_ReadMutex
167 ACQUIRED_AFTER(android::audio_utils::OutputTrack_TrackMetadataMutex);
168 inline mutex* PatchCommandThread_ListenerMutex
169 ACQUIRED_AFTER(android::audio_utils::PassthruPatchRecord_ReadMutex);
170 inline mutex* PlaybackThread_AudioTrackCbMutex
171 ACQUIRED_AFTER(android::audio_utils::PatchCommandThread_ListenerMutex);
172 inline mutex* AudioPolicyService_NotificationClientsMutex
173 ACQUIRED_AFTER(android::audio_utils::PlaybackThread_AudioTrackCbMutex);
174 inline mutex* MediaLogNotifier_Mutex
175 ACQUIRED_AFTER(android::audio_utils::AudioPolicyService_NotificationClientsMutex);
176 inline mutex* OtherMutex
177 ACQUIRED_AFTER(android::audio_utils::MediaLogNotifier_Mutex);
178
179 // Exclusion by capability
180 #define EXCLUDES_BELOW_OtherMutex
181 #define EXCLUDES_OtherMutex \
182 EXCLUDES(android::audio_utils::OtherMutex) \
183 EXCLUDES_BELOW_OtherMutex
184
185 #define EXCLUDES_BELOW_MediaLogNotifier_Mutex \
186 EXCLUDES_OtherMutex
187 #define EXCLUDES_MediaLogNotifier_Mutex \
188 EXCLUDES(android::audio_utils::MediaLogNotifier_Mutex) \
189 EXCLUDES_BELOW_MediaLogNotifier_Mutex
190
191 #define EXCLUDES_BELOW_AudioPolicyService_NotificationClientsMutex \
192 EXCLUDES_MediaLogNotifier_Mutex
193 #define EXCLUDES_AudioPolicyService_NotificationClientsMutex \
194 EXCLUDES(android::audio_utils::AudioPolicyService_NotificationClientsMutex) \
195 EXCLUDES_BELOW_AudioPolicyService_NotificationClientsMutex
196
197 #define EXCLUDES_BELOW_PlaybackThread_AudioTrackCbMutex \
198 EXCLUDES_AudioPolicyService_NotificationClientsMutex
199 #define EXCLUDES_PlaybackThread_AudioTrackCbMutex \
200 EXCLUDES(android::audio_utils::PlaybackThread_AudioTrackCbMutex) \
201 EXCLUDES_BELOW_PlaybackThread_AudioTrackCbMutex
202
203 #define EXCLUDES_BELOW_PatchCommandThread_ListenerMutex \
204 EXCLUDES_PlaybackThread_AudioTrackCbMutex
205 #define EXCLUDES_PatchCommandThread_ListenerMutex \
206 EXCLUDES(android::audio_utils::PatchCommandThread_ListenerMutex) \
207 EXCLUDES_BELOW_PatchCommandThread_ListenerMutex
208
209 #define EXCLUDES_BELOW_PassthruPatchRecord_ReadMutex \
210 EXCLUDES_PatchCommandThread_ListenerMutex
211 #define EXCLUDES_PassthruPatchRecord_ReadMutex \
212 EXCLUDES(android::audio_utils::PassthruPatchRecord_ReadMutex) \
213 EXCLUDES_BELOW_PassthruPatchRecord_ReadMutex
214
215 #define EXCLUDES_BELOW_OutputTrack_TrackMetadataMutex \
216 EXCLUDES_PassthruPatchRecord_ReadMutex
217 #define EXCLUDES_OutputTrack_TrackMetadataMutex \
218 EXCLUDES(android::audio_utils::OutputTrack_TrackMetadataMutex) \
219 EXCLUDES_BELOW_OutputTrack_TrackMetadataMutex
220
221 #define EXCLUDES_BELOW_ConfigEvent_Mutex \
222 EXCLUDES_OutputTrack_TrackMetadataMutex
223 #define EXCLUDES_ConfigEvent_Mutex \
224 EXCLUDES(android::audio_utils::ConfigEvent_Mutex) \
225 EXCLUDES_BELOW_ConfigEvent_Mutex
226
227 #define EXCLUDES_BELOW_AsyncCallbackThread_Mutex \
228 EXCLUDES_ConfigEvent_Mutex
229 #define EXCLUDES_AsyncCallbackThread_Mutex \
230 EXCLUDES(android::audio_utils::AsyncCallbackThread_Mutex) \
231 EXCLUDES_BELOW_AsyncCallbackThread_Mutex
232
233 #define EXCLUDES_BELOW_AudioFlinger_UnregisteredWritersMutex \
234 EXCLUDES_AsyncCallbackThread_Mutex
235 #define EXCLUDES_AudioFlinger_UnregisteredWritersMutex \
236 EXCLUDES(android::audio_utils::AudioFlinger_UnregisteredWritersMutex) \
237 EXCLUDES_BELOW_AudioFlinger_UnregisteredWritersMutex
238
239 #define EXCLUDES_BELOW_EffectBase_Mutex \
240 EXCLUDES_AudioFlinger_UnregisteredWritersMutex
241 #define EXCLUDES_EffectBase_Mutex \
242 EXCLUDES(android::audio_utils::EffectBase_Mutex) \
243 EXCLUDES_BELOW_EffectBase_Mutex
244
245 #define EXCLUDES_BELOW_DeviceEffectProxy_ProxyMutex \
246 EXCLUDES_EffectBase_Mutex
247 #define EXCLUDES_DeviceEffectProxy_ProxyMutex \
248 EXCLUDES(android::audio_utils::DeviceEffectProxy_ProxyMutex) \
249 EXCLUDES_BELOW_DeviceEffectProxy_ProxyMutex
250
251 #define EXCLUDES_BELOW_EffectChain_Mutex \
252 EXCLUDES_DeviceEffectProxy_ProxyMutex
253 #define EXCLUDES_EffectChain_Mutex \
254 EXCLUDES(android::audio_utils::EffectChain_Mutex) \
255 EXCLUDES_BELOW_EffectChain_Mutex
256
257 #define EXCLUDES_BELOW_MelReporter_Mutex \
258 EXCLUDES_EffectChain_Mutex
259 #define EXCLUDES_MelReporter_Mutex \
260 EXCLUDES(android::audio_utils::MelReporter_Mutex) \
261 EXCLUDES_BELOW_MelReporter_Mutex
262
263 #define EXCLUDES_BELOW_AudioFlinger_ClientMutex \
264 EXCLUDES_MelReporter_Mutex
265 #define EXCLUDES_AudioFlinger_ClientMutex \
266 EXCLUDES(android::audio_utils::AudioFlinger_ClientMutex) \
267 EXCLUDES_BELOW_AudioFlinger_ClientMutex
268
269 #define EXCLUDES_BELOW_ThreadBase_Mutex \
270 EXCLUDES_AudioFlinger_ClientMutex
271 #define EXCLUDES_ThreadBase_Mutex \
272 EXCLUDES(android::audio_utils::ThreadBase_Mutex) \
273 EXCLUDES_BELOW_ThreadBase_Mutex
274
275 #define EXCLUDES_BELOW_PatchCommandThread_Mutex \
276 EXCLUDES_ThreadBase_Mutex
277 #define EXCLUDES_PatchCommandThread_Mutex \
278 EXCLUDES(android::audio_utils::PatchCommandThread_Mutex) \
279 EXCLUDES_BELOW_PatchCommandThread_Mutex
280
281 #define EXCLUDES_BELOW_DeviceEffectManager_Mutex \
282 EXCLUDES_PatchCommandThread_Mutex
283 #define EXCLUDES_DeviceEffectManager_Mutex \
284 EXCLUDES(android::audio_utils::DeviceEffectManager_Mutex) \
285 EXCLUDES_BELOW_DeviceEffectManager_Mutex
286
287 #define EXCLUDES_BELOW_AudioFlinger_HardwareMutex \
288 EXCLUDES_DeviceEffectManager_Mutex
289 #define EXCLUDES_AudioFlinger_HardwareMutex \
290 EXCLUDES(android::audio_utils::AudioFlinger_HardwareMutex) \
291 EXCLUDES_BELOW_AudioFlinger_HardwareMutex
292
293 #define EXCLUDES_BELOW_AudioFlinger_Mutex \
294 EXCLUDES_AudioFlinger_HardwareMutex
295 #define EXCLUDES_AudioFlinger_Mutex \
296 EXCLUDES(android::audio_utils::AudioFlinger_Mutex) \
297 EXCLUDES_BELOW_AudioFlinger_Mutex
298
299 #define EXCLUDES_BELOW_UidPolicy_Mutex \
300 EXCLUDES_AudioFlinger_Mutex
301 #define EXCLUDES_UidPolicy_Mutex \
302 EXCLUDES(android::audio_utils::UidPolicy_Mutex) \
303 EXCLUDES_BELOW_UidPolicy_Mutex
304
305 #define EXCLUDES_BELOW_AudioCommand_Mutex \
306 EXCLUDES_UidPolicy_Mutex
307 #define EXCLUDES_AudioCommand_Mutex \
308 EXCLUDES(android::audio_utils::AudioCommand_Mutex) \
309 EXCLUDES_BELOW_AudioCommand_Mutex
310
311 #define EXCLUDES_BELOW_CommandThread_Mutex \
312 EXCLUDES_AudioCommand_Mutex
313 #define EXCLUDES_CommandThread_Mutex \
314 EXCLUDES(android::audio_utils::CommandThread_Mutex) \
315 EXCLUDES_BELOW_CommandThread_Mutex
316
317 #define EXCLUDES_BELOW_AudioPolicyService_Mutex \
318 EXCLUDES_CommandThread_Mutex
319 #define EXCLUDES_AudioPolicyService_Mutex \
320 EXCLUDES(android::audio_utils::AudioPolicyService_Mutex) \
321 EXCLUDES_BELOW_AudioPolicyService_Mutex
322
323 #define EXCLUDES_BELOW_EffectBase_PolicyMutex \
324 EXCLUDES_AudioPolicyService_Mutex
325 #define EXCLUDES_EffectBase_PolicyMutex \
326 EXCLUDES(android::audio_utils::EffectBase_PolicyMutex) \
327 EXCLUDES_BELOW_EffectBase_PolicyMutex
328
329 #define EXCLUDES_BELOW_EffectHandle_Mutex \
330 EXCLUDES_EffectBase_PolicyMutex
331 #define EXCLUDES_EffectHandle_Mutex \
332 EXCLUDES(android::audio_utils::EffectHandle_Mutex) \
333 EXCLUDES_BELOW_EffectHandle_Mutex
334
335 #define EXCLUDES_BELOW_AudioPolicyEffects_Mutex \
336 EXCLUDES_EffectHandle_Mutex
337 #define EXCLUDES_AudioPolicyEffects_Mutex \
338 EXCLUDES(android::audio_utils::AudioPolicyEffects_Mutex) \
339 EXCLUDES_BELOW_AudioPolicyEffects_Mutex
340
341 #define EXCLUDES_BELOW_Spatializer_Mutex \
342 EXCLUDES_AudioPolicyEffects_Mutex
343 #define EXCLUDES_Spatializer_Mutex \
344 EXCLUDES(android::audio_utils::Spatializer_Mutex) \
345 EXCLUDES_BELOW_Spatializer_Mutex
346
347 #define EXCLUDES_AUDIO_ALL \
348 EXCLUDES_Spatializer_Mutex
349
350 // --- End generated section
351
352 /**
353 * AudioMutexAttributes is a collection of types and constexpr configuration
354 * used for the Android audio mutex.
355 *
356 * A different AudioMutexAttributes configuration will instantiate a completely
357 * independent set of mutex strategies, statics and thread locals,
358 * for a different type of mutexes.
359 */
360
361 class AudioMutexAttributes {
362 public:
363 // Order types, name arrays.
364 using order_t = MutexOrder;
365 static constexpr auto& order_names_ = gMutexNames;
366 static constexpr size_t order_size_ = static_cast<size_t>(MutexOrder::kSize);
367 static constexpr order_t order_default_ = MutexOrder::kOtherMutex;
368
369 // verify order information
370 static_assert(std::size(order_names_) == order_size_);
371 static_assert(static_cast<size_t>(order_default_) < order_size_);
372
373 // Set mutex_tracking_enabled_ to true to enable mutex
374 // statistics and debugging (order checking) features.
375 static constexpr bool mutex_tracking_enabled_ = true;
376
377 // Control the depth of the mutex stack per thread (the mutexes
378 // we track). Set this to the maximum expected
379 // number of mutexes held by a thread. If the depth is too small,
380 // deadlock detection, order checking, and recursion checking
381 // may result in a false negative. This is a static configuration
382 // because reallocating memory for the stack requires a lock for
383 // the reader.
384 static constexpr size_t mutex_stack_depth_ = 16;
385
386 // Enable or disable log always fatal.
387 // This also requires the mutex feature flag to be set.
388 static constexpr bool abort_on_order_check_ = true;
389 static constexpr bool abort_on_recursion_check_ = true;
390 static constexpr bool abort_on_invalid_unlock_ = true;
391 };
392
393 // relaxed_atomic implements the same features as std::atomic<T> but using
394 // std::memory_order_relaxed as default.
395 //
396 // This is the minimum consistency for the multiple writer multiple reader case.
397
398 template <typename T>
399 class relaxed_atomic : private std::atomic<T> {
400 public:
401 constexpr relaxed_atomic(T desired = {}) : std::atomic<T>(desired) {}
T()402 operator T() const { return std::atomic<T>::load(std::memory_order_relaxed); }
403 T operator=(T desired) {
404 std::atomic<T>::store(desired, std::memory_order_relaxed); return desired;
405 }
406
407 T operator--() { return std::atomic<T>::fetch_sub(1, std::memory_order_relaxed) - 1; }
408 T operator++() { return std::atomic<T>::fetch_add(1, std::memory_order_relaxed) + 1; }
409 T operator+=(const T value) {
410 return std::atomic<T>::fetch_add(value, std::memory_order_relaxed) + value;
411 }
412
413 T load(std::memory_order order = std::memory_order_relaxed) const {
414 return std::atomic<T>::load(order);
415 }
416 T fetch_add(T arg, std::memory_order order =std::memory_order_relaxed) {
417 return std::atomic<T>::fetch_add(arg, order);
418 }
419 bool compare_exchange_weak(
420 T& expected, T desired, std::memory_order order = std::memory_order_relaxed) {
421 return std::atomic<T>::compare_exchange_weak(expected, desired, order);
422 }
423 };
424
425 // unordered_atomic implements data storage such that memory reads have a value
426 // consistent with a memory write in some order, i.e. not having values
427 // "out of thin air".
428 //
429 // Unordered memory reads and writes may not actually take place but be implicitly cached.
430 // Nevertheless, a memory read should return at least as contemporaneous a value
431 // as the last memory write before the write thread memory barrier that
432 // preceded the most recent read thread memory barrier.
433 //
434 // This is weaker than relaxed_atomic and has no equivalent C++ terminology.
435 // unordered_atomic would be used for a single writer, multiple reader case,
436 // where data access of type T would be a implemented by the compiler and
437 // hw architecture with a single "uninterruptible" memory operation.
438 // (The current implementation holds true for general realized CPU architectures).
439 // Note that multiple writers would cause read-modify-write unordered_atomic
440 // operations to have inconsistent results.
441 //
442 // unordered_atomic is implemented with normal operations such that compiler
443 // optimizations can take place which would otherwise be discouraged for atomics.
444 // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0062r1.html
445
446 // VT may be volatile qualified, if desired, or a normal arithmetic type.
447 template <typename VT>
448 class unordered_atomic {
449 using T = std::decay_t<VT>;
450 static_assert(std::atomic<T>::is_always_lock_free);
451 public:
t_(desired)452 constexpr unordered_atomic(T desired = {}) : t_(desired) {}
T()453 operator T() const { return t_; }
454 T operator=(T desired) { t_ = desired; return desired; }
455
456 // a volatile ++t_ or t_ += 1 is deprecated in C++20.
457 T operator--() { return operator=(t_ - 1); }
458 T operator++() { return operator=(t_ + 1); }
459 T operator+=(const T value) { return operator=(t_ + value); }
460
461 T load(std::memory_order order = std::memory_order_relaxed) const { (void)order; return t_; }
462
463 private:
464 VT t_;
465 };
466
467 inline constexpr pid_t kInvalidTid = -1;
468
469 // While std::atomic with the default std::memory_order_seq_cst
470 // access could be used, it results in performance loss over less
471 // restrictive memory access.
472
473 // stats_atomic is a multiple writer multiple reader object.
474 //
475 // This is normally used to increment statistics counters on
476 // mutex priority categories.
477 //
478 // We used relaxed_atomic instead of std::atomic/memory_order_seq_cst here.
479 template <typename T>
480 using stats_atomic = relaxed_atomic<T>;
481
482 // thread_atomic is a single writer multiple reader object.
483 //
484 // This is normally accessed as a thread local (hence single writer)
485 // but may be accessed (rarely) by multiple readers on deadlock
486 // detection which does not modify the data.
487 //
488 // We use unordered_atomic instead of std::atomic/memory_order_seq_cst here.
489 template <typename T>
490 using thread_atomic = unordered_atomic<T>;
491
compiler_memory_barrier()492 inline void compiler_memory_barrier() {
493 // Reads or writes are not migrated or cached by the compiler across this barrier.
494 asm volatile("" ::: "memory");
495
496 // if not using gnu / clang, compare with compiler-only barrier generated by
497 // std::atomic_signal_fence(std::memory_order_seq_cst);
498 // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0124r7.html
499 }
500
501 // The mutex locking is thread-safe.
502 //
503 // However, the mutex metadata (statistics and thread info) updates are not locked
504 // by an internal mutex for efficiency reasons. Instead, they use atomics, with
505 // the possibility of false negatives since they are not sampled synchronously.
506 //
507 // To prevent the compiler from excessively caching the statistics and thread metadata
508 // which makes this asynchronous atomic sampling worse, as unordered or relaxed atomics
509 // do not implicitly impose any memory barriers,
510 // we can elect to explicitly issue compiler memory barriers to ensure
511 // metadata visibility across threads. This is optional, and only useful if
512 // the compiler does aggressive inlining.
513 //
metadata_memory_barrier_if_needed()514 inline void metadata_memory_barrier_if_needed() {
515 // check the level of atomicity used for thread metadata to alter the
516 // use of a barrier here.
517 if constexpr (std::is_same_v<thread_atomic<int32_t>, unordered_atomic<int32_t>>
518 || std::is_same_v<thread_atomic<int32_t>, relaxed_atomic<int32_t>>) {
519 compiler_memory_barrier();
520 }
521 }
522
523 /**
524 * Helper method to accumulate floating point values to an atomic
525 * prior to C++23 support of atomic<float> atomic<double> accumulation.
526 */
527 template <typename AccumulateType, typename ValueType>
528 requires std::is_floating_point<AccumulateType>::value
529 void atomic_add_to(std::atomic<AccumulateType> &dst, ValueType src,
530 std::memory_order order = std::memory_order_seq_cst) {
531 static_assert(std::atomic<AccumulateType>::is_always_lock_free);
532 AccumulateType expected;
533 do {
534 expected = dst;
535 } while (!dst.compare_exchange_weak(expected, expected + src, order));
536 }
537
538 template <typename AccumulateType, typename ValueType>
539 requires std::is_integral<AccumulateType>::value
540 void atomic_add_to(std::atomic<AccumulateType> &dst, ValueType src,
541 std::memory_order order = std::memory_order_seq_cst) {
542 dst.fetch_add(src, order);
543 }
544
545 template <typename AccumulateType, typename ValueType>
546 requires std::is_floating_point<AccumulateType>::value
547 void atomic_add_to(relaxed_atomic<AccumulateType> &dst, ValueType src,
548 std::memory_order order = std::memory_order_relaxed) {
549 AccumulateType expected;
550 do {
551 expected = dst;
552 } while (!dst.compare_exchange_weak(expected, expected + src, order));
553 }
554
555 template <typename AccumulateType, typename ValueType>
556 requires std::is_integral<AccumulateType>::value
557 void atomic_add_to(relaxed_atomic<AccumulateType> &dst, ValueType src,
558 std::memory_order order = std::memory_order_relaxed) {
559 dst.fetch_add(src, order);
560 }
561
562 template <typename AccumulateType, typename ValueType>
563 void atomic_add_to(unordered_atomic<AccumulateType> &dst, ValueType src,
564 std::memory_order order = std::memory_order_relaxed) {
565 (void)order; // unused
566 dst = dst + src;
567 }
568
569 /**
570 * mutex_stat is a struct composed of atomic members associated
571 * with usage of a particular mutex order.
572 *
573 * Access of a snapshot of this does not have a global lock, so the reader
574 * may experience temporal shear. Use of this by a different reader thread
575 * is for informative purposes only.
576 */
577
578 // CounterType == uint64_t, AccumulatorType == double
579 template <typename CounterType, typename AccumulatorType>
580 struct mutex_stat {
581 static_assert(std::is_floating_point_v<AccumulatorType>);
582 static_assert(std::is_integral_v<CounterType>);
583 static_assert(std::atomic<CounterType>::is_always_lock_free);
584 static_assert(std::atomic<AccumulatorType>::is_always_lock_free);
585 stats_atomic<CounterType> locks = 0; // number of times locked
586 stats_atomic<CounterType> unlocks = 0; // number of times unlocked
587 stats_atomic<CounterType> waits = 0; // number of locks that waited
588 stats_atomic<AccumulatorType> wait_sum_ns = 0.; // sum of time waited.
589 stats_atomic<AccumulatorType> wait_sumsq_ns = 0.; // sumsq of time waited.
590
591 template <typename WaitTimeType>
add_wait_timemutex_stat592 void add_wait_time(WaitTimeType wait_ns) {
593 AccumulatorType value_ns = wait_ns;
594 atomic_add_to(wait_sum_ns, value_ns);
595 atomic_add_to(wait_sumsq_ns, value_ns * value_ns);
596 }
597
to_stringmutex_stat598 std::string to_string() const {
599 CounterType uncontested = locks - waits;
600 AccumulatorType recip = waits == 0 ? 0. : 1. / waits;
601 AccumulatorType avg_wait_ms = waits == 0 ? 0. : wait_sum_ns * 1e-6 * recip;
602 AccumulatorType std_wait_ms = waits < 2 ? 0. :
603 std::sqrt(std::max(wait_sumsq_ns * recip * 1e-12 - avg_wait_ms * avg_wait_ms,
604 0.));
605 return std::string("locks: ").append(std::to_string(locks))
606 .append("\nuncontested: ").append(std::to_string(uncontested))
607 .append("\nwaits: ").append(std::to_string(waits))
608 .append("\nunlocks: ").append(std::to_string(unlocks))
609 .append("\navg_wait_ms: ").append(std::to_string(avg_wait_ms))
610 .append("\nstd_wait_ms: ").append(std::to_string(std_wait_ms))
611 .append("\n");
612 }
613 };
614
615 /**
616 * atomic_stack is a single writer, multiple reader object.
617 * Readers not on the same thread as the writer may experience temporal shear,
618 * but individual members are accessed atomic-safe, i.e. no partial member
619 * reads or delayed writes due to caching.
620 *
621 * For use with mutex checking, the atomic_stack maintains an ordering on
622 * P (payload) such that the top item pushed must always be greater than or
623 * equal to the P (payload) of items below it.
624 *
625 * Pushes always go to the top of the stack. Removes can occur
626 * from any place in the stack, but typically near the top.
627 *
628 * The atomic_stack never reallocates beyond its fixed capacity of N.
629 * This prevents a lockless reader from accessing invalid memory because
630 * the address region does not change.
631 *
632 * If the number of pushes exceed the capacity N, then items may be discarded.
633 * In that case, the stack is a subset stack of the "true" unlimited
634 * capacity stack. Nevertheless, a subset of an ordered stack
635 * with items deleted is also ordered.
636 *
637 * The size() of the atomic_stack is the size of the subset stack of tracked items.
638 * The true_size() is the size of the number of items pushed minus the
639 * number of items removed (the "true" size if capacity were unlimited).
640 * Since the capacity() is constant the true_size() may include
641 * items we don't track except by count. If true_size() == size() then
642 * the subset stack is complete.
643 *
644 * In this single writer, multiple reader model, we could get away with
645 * memory_order_relaxed as the reader is purely informative,
646 * but we choose memory_order_seq_cst which imposes the most
647 * restrictions on the compiler (variable access reordering) and the
648 * processor (memory access reordering). This means operations take effect
649 * in the order written. However, this isn't strictly needed - as there is
650 * only one writer, a read-modify-write operation is safe (no need for special
651 * memory instructions), and there isn't the acquire-release semantics with
652 * non-atomic memory access needed for a lockless fifo, for example.
653 */
654
655 /*
656 * For audio mutex purposes, one question arises - why don't we use
657 * a bitmask to represent the capabilities taken by a thread
658 * instead of a stack?
659 *
660 * A bitmask arrangement works if there exists a one-to-one relationship
661 * from a physical mutex to its capability. That may exist for some
662 * projects, but not AudioFlinger.
663 *
664 * As a consequence, we need the actual count and handle:
665 *
666 * 1) A single thread may hold multiple instances of some capabilities
667 * (e.g. ThreadBase_Mutex and EffectChain_Mutex).
668 * For example there may be multiple effect chains locked during mixing.
669 * There may be multiple PlaybackThreads locked during effect chain movement.
670 * A bit per capability can't count beyond 1.
671 *
672 * 2) Deadlock detection requires tracking the actual MutexHandle (a void*)
673 * to form a cycle, because there may be many mutexes associated with a
674 * given capability order.
675 * For example, each PlaybackThread or RecordThread will have its own mutex
676 * with the ThreadBase_Mutex capability.
677 *
678 */
679
680 template <typename Item, typename Payload, size_t N>
681 class atomic_stack {
682 public:
683 using item_payload_pair_t = std::pair<thread_atomic<Item>, thread_atomic<Payload>>;
684
685 /**
686 * Puts the item at the top of the stack.
687 *
688 * If the stack depth is exceeded the item
689 * replaces the top.
690 *
691 * Mutexes when locked are always placed on the top of the stack;
692 * however, they may be unlocked in a non last-in-first-out (LIFO)
693 * order. It is rare to see a non LIFO order, but it can happen.
694 */
push(const Item & item,const Payload & payload)695 void push(const Item& item, const Payload& payload) {
696 size_t location = top_;
697 size_t increment = 1;
698 if (location >= N) {
699 // we exceed the top of stack.
700 //
701 // although we could ignore this item (subset is the oldest),
702 // the better solution is to replace the topmost entry as
703 // it allows quicker removal.
704 location = N - 1;
705 increment = 0;
706 }
707 // issue the operations close together.
708 pairs_[location].first = item;
709 pairs_[location].second = payload;
710 ++true_top_;
711 top_ += increment;
712 }
713
714 /**
715 * Removes the item which is expected at the top of the stack
716 * but may be lower. Mutexes are generally unlocked in stack
717 * order (LIFO), but this is not a strict requirement.
718 */
remove(const Item & item)719 bool remove(const Item& item) {
720 if (true_top_ == 0) {
721 return false; // cannot remove.
722 }
723 // there is a temporary benign read race here where true_top_ != top_.
724 --true_top_;
725 for (size_t i = top_; i > 0; ) {
726 if (item == pairs_[--i].first) {
727 // We shift to preserve order.
728 // A reader may temporarily see a "duplicate" entry
729 // but that is preferable to a "missing" entry
730 // for the purposes of deadlock detection.
731 const size_t limit = top_ - 1;
732 while (i < limit) { // using atomics, we need to assign first, second separately.
733 pairs_[i].first = pairs_[i + 1].first.load();
734 pairs_[i].second = pairs_[i + 1].second.load();
735 ++i;
736 }
737 --top_; // now we restrict our range.
738 // on relaxed semantics, it might be better to clear out the last
739 // pair, but we are seq_cst.
740 return true;
741 }
742 }
743 // not found in our subset.
744 //
745 // we return true upon correct removal (true_top_ must always be >= top_).
746 if (true_top_ >= top_) return true;
747
748 // else recover and return false to notify that removal was invalid.
749 true_top_ = top_.load();
750 return false;
751 }
752
753 /**
754 * return the top of our atomic subset stack
755 * or the invalid_ (zero-initialized) entry if it doesn't exist.
756 */
757 // Consideration of using std::optional<> is a possibility
758 // but as std::atomic doesn't have a copy ctor (and does not make sense),
759 // we would want to directly return an optional on the non-atomic values,
760 // in a custom pair.
761 const item_payload_pair_t& top(size_t offset = 0) const {
762 const ssize_t top = static_cast<ssize_t>(top_) - static_cast<ssize_t>(offset);
763 if (top > 0 && top <= static_cast<ssize_t>(N)) return pairs_[top - 1];
764 return invalid_; // we don't know anything.
765 }
766
767 /**
768 * return the bottom (or base) of our atomic subset stack
769 * or the invalid_ (zero-initialized) entry if it doesn't exist.
770 */
771 const item_payload_pair_t& bottom(size_t offset = 0) const {
772 if (offset < top_) return pairs_[offset];
773 return invalid_; // we don't know anything.
774 }
775
776 /**
777 * prints the contents of the stack starting from the most recent first.
778 *
779 * If the thread is not the same as the writer thread, there could be
780 * temporal shear in the data printed.
781 */
to_string()782 std::string to_string() const {
783 std::string s("size: ");
784 s.append(std::to_string(size()))
785 .append(" true_size: ").append(std::to_string(true_size()))
786 .append(" items: [");
787 for (size_t i = 0; i < top_; ++i) {
788 s.append("{ ")
789 .append(std::to_string(reinterpret_cast<uintptr_t>(pairs_[i].first.load())))
790 .append(", ")
791 .append(std::to_string(static_cast<size_t>(pairs_[i].second.load())))
792 .append(" } ");
793 }
794 s.append("]");
795 return s;
796 }
797
798 /*
799 * stack configuration
800 */
capacity()801 static consteval size_t capacity() { return N; }
true_size()802 size_t true_size() const { return true_top_; }
size()803 size_t size() const { return top_; }
invalid()804 const auto& invalid() const { return invalid_; }
805
806 private:
807 thread_atomic<size_t> top_ = 0; // ranges from 0 to N - 1
808 thread_atomic<size_t> true_top_ = 0; // always >= top_.
809 // if true_top_ == top_ the subset stack is complete.
810
811 /*
812 * The subset stack entries are a pair of atomics rather than an atomic<pair>
813 * to prevent lock requirements if T and P are small enough, i.e. <= sizeof(size_t).
814 *
815 * As atomics are not composable from smaller atomics, there may be some
816 * temporary inconsistencies when reading from a different thread than the writer.
817 */
818 item_payload_pair_t pairs_[N]{};
819
820 /*
821 * The invalid pair is returned when top() is called without a tracked item.
822 * This might occur with an empty subset of the "true" stack.
823 */
824 static inline const item_payload_pair_t invalid_{}; // volatile != constexpr, if so qualified
825 };
826
827 // A list of reasons why we might have an inter-thread wait besides a mutex.
828 enum class other_wait_reason_t {
829 none = 0,
830 cv = 1,
831 join = 2,
832 };
833
reason_to_string(other_wait_reason_t reason)834 inline constexpr const char* reason_to_string(other_wait_reason_t reason) {
835 switch (reason) {
836 case other_wait_reason_t::none: return "none";
837 case other_wait_reason_t::cv: return "cv";
838 case other_wait_reason_t::join: return "join";
839 default: return "invalid";
840 }
841 }
842
843 /**
844 * thread_mutex_info is a struct that is associated with every
845 * thread the first time a mutex is used on it. Writing will be through
846 * a single thread (essentially thread_local), but the thread_registry
847 * debug methods may access this through a different reader thread.
848 *
849 * If the thread does not use the audio_utils mutex, the allocation of this
850 * struct never occurs, although there is approx 16 bytes for a shared ptr and
851 * 1 byte for a thread local once bool.
852 *
853 * Here, we use for the MutexHandle a void*, which is used as an opaque unique ID
854 * representing the mutex.
855 *
856 * Since there is no global locking, the validity of the mutex* associated to
857 * the void* is unknown -- the mutex* could be deallocated in a different
858 * thread. Nevertheless the opaque ID can still be used to check deadlocks
859 * realizing there could be a false positive on a potential reader race
860 * where a new mutex is created at the same storage location.
861 */
862 template <typename MutexHandle, typename Order, size_t N>
863 class thread_mutex_info {
864 public:
865 using atomic_stack_t = atomic_stack<MutexHandle, Order, N>;
866
867 class other_wait_info {
868 public:
869 thread_atomic<pid_t> tid_ = kInvalidTid;
870 thread_atomic<other_wait_reason_t> reason_ = other_wait_reason_t::none;
871 thread_atomic<Order> order_ = (Order)-1;
872
to_string()873 std::string to_string() const {
874 const pid_t tid = tid_.load();
875 const other_wait_reason_t reason = reason_.load();
876 const Order order = order_.load();
877
878 std::string s;
879 if (tid != kInvalidTid) {
880 switch (reason) {
881 case other_wait_reason_t::none:
882 default:
883 break;
884 case other_wait_reason_t::cv:
885 s.append("cv_tid: ").append(std::to_string(tid))
886 .append(" cv_order: ").append(std::to_string(
887 static_cast<size_t>(order)));
888 break;
889 case other_wait_reason_t::join:
890 s.append("join_tid: ").append(std::to_string(tid));
891 break;
892 }
893 }
894 return s;
895 }
896 };
897
thread_mutex_info(pid_t tid)898 thread_mutex_info(pid_t tid) : tid_(tid) {}
899
900 // the destructor releases the thread_mutex_info.
901 // declared here, defined below due to use of thread_registry.
902 ~thread_mutex_info();
903
904 void reset_waiter(MutexHandle waiter = nullptr) {
905 mutex_wait_ = waiter;
906 }
907
908 /**
909 * check_held returns the stack pair that conflicts
910 * with the existing mutex handle and order, or the invalid
911 * stack pair (empty mutex handle and empty order).
912 */
913 const typename atomic_stack_t::item_payload_pair_t&
check_held(MutexHandle mutex,Order order)914 check_held(MutexHandle mutex, Order order) const {
915 // validate mutex order.
916 const size_t size = mutexes_held_.size();
917 for (size_t i = 0; i < size; ++i) {
918 const auto& top = mutexes_held_.top(i);
919 const auto top_order = top.second.load();
920
921 if (top_order < order) break; // ok
922 if (top_order > order) return top; // inverted order
923 if (top.first.load() == mutex) return top; // recursive mutex
924 }
925 return mutexes_held_.invalid();
926 }
927
928 /*
929 * This is unverified push. Use check_held() prior to this to
930 * verify no lock inversion or replication.
931 */
push_held(MutexHandle mutex,Order order)932 void push_held(MutexHandle mutex, Order order) {
933 mutexes_held_.push(mutex, order);
934 }
935
remove_held(MutexHandle mutex)936 bool remove_held(MutexHandle mutex) {
937 return mutexes_held_.remove(mutex);
938 }
939
940 // Variants used by condition_variable on wait() that handle
941 // hint metadata. This is used by deadlock detection algorithm to inform we
942 // are waiting on a worker thread identified by notifier_tid.
943
push_held_for_cv(MutexHandle mutex,Order order)944 void push_held_for_cv(MutexHandle mutex, Order order) {
945 push_held(mutex, order);
946 // condition wait has expired. always invalidate.
947 other_wait_info_.tid_ = kInvalidTid;
948 }
949
remove_held_for_cv(MutexHandle mutex,Order order,pid_t notifier_tid)950 bool remove_held_for_cv(MutexHandle mutex, Order order, pid_t notifier_tid) {
951 // last condition on the mutex overwrites.
952 other_wait_info_.order_ = order;
953 other_wait_info_.reason_ = other_wait_reason_t::cv;
954 other_wait_info_.tid_ = notifier_tid;
955 return remove_held(mutex);
956 }
957
958 // Add waiting state for join.
add_wait_join(pid_t waiting_tid)959 void add_wait_join(pid_t waiting_tid) {
960 other_wait_info_.reason_ = other_wait_reason_t::join;
961 other_wait_info_.tid_ = waiting_tid;
962 }
963
remove_wait_join()964 void remove_wait_join() {
965 other_wait_info_.tid_ = kInvalidTid;
966 }
967
968 /*
969 * Due to the fact that the thread_mutex_info contents are not globally locked,
970 * there may be temporal shear. The string representation is
971 * informative only.
972 */
to_string()973 std::string to_string() const {
974 std::string s;
975 s.append("tid: ").append(std::to_string(static_cast<int>(tid_)));
976 s.append("\nwaiting: ").append(std::to_string(
977 reinterpret_cast<uintptr_t>(mutex_wait_.load())));
978 // inform if there is a condition variable wait associated with a known thread.
979 if (other_wait_info_.tid_ != kInvalidTid) {
980 s.append("\n").append(other_wait_info_.to_string());
981 }
982 s.append("\nheld: ").append(mutexes_held_.to_string());
983 return s;
984 }
985
986 /*
987 * empty() indicates that the thread is not waiting for or
988 * holding any mutexes.
989 */
empty()990 bool empty() const {
991 return mutex_wait_ == nullptr && mutexes_held_.size() == 0;
992 }
993
stack()994 const auto& stack() const {
995 return mutexes_held_;
996 }
997
998 const pid_t tid_; // me
999 thread_atomic<MutexHandle> mutex_wait_{}; // mutex waiting for
1000 other_wait_info other_wait_info_;
1001 atomic_stack_t mutexes_held_; // mutexes held
1002 };
1003
1004
1005 /**
1006 * deadlock_info_t encapsulates the mutex wait / cycle information from
1007 * thread_registry::deadlock_detection().
1008 *
1009 * If a cycle is detected, the last element of the vector chain represents
1010 * a tid that is repeated somewhere earlier in the vector.
1011 */
1012 struct deadlock_info_t {
1013 public:
deadlock_info_tdeadlock_info_t1014 explicit deadlock_info_t(pid_t tid_param) : tid(tid_param) {}
1015
emptydeadlock_info_t1016 bool empty() const {
1017 return chain.empty();
1018 }
1019
to_stringdeadlock_info_t1020 std::string to_string() const {
1021 std::string description;
1022
1023 if (has_cycle) {
1024 description.append("mutex cycle found (last tid repeated) ");
1025 } else {
1026 description.append("mutex wait chain ");
1027 }
1028 description.append("[ ").append(std::to_string(tid));
1029 // Note: when we dump here, we add the timeout tid to the start of the wait chain.
1030 for (const auto& [ tid2, name ] : chain) {
1031 description.append(", ").append(std::to_string(tid2))
1032 .append(" (by ").append(name).append(")");
1033 }
1034 description.append(" ]");
1035 return description;
1036 }
1037
1038 const pid_t tid; // tid for which the deadlock was checked
1039 bool has_cycle = false; // true if there is a cycle detected
1040 other_wait_reason_t other_wait_reason = other_wait_reason_t::none;
1041 std::vector<std::pair<pid_t, std::string>> chain; // wait chain of tids and mutexes.
1042 };
1043
1044 /**
1045 * The thread_registry is a thread-safe locked structure that
1046 * maintains a list of the threads that contain thread_mutex_info.
1047 *
1048 * Only first mutex access from a new thread and the destruction of that
1049 * thread will trigger an access to the thread_registry map.
1050 *
1051 * The debug methods to_string() and deadlock_detection() will also lock the struct
1052 * long enough to copy the map and safely obtain the weak pointers,
1053 * and then deal with the thread local data afterwards.
1054 *
1055 * It is recommended to keep a static singleton of the thread_registry for the
1056 * type desired. The singleton should be associated properly with the object
1057 * it should be unique for, which in this case is the mutex_impl template.
1058 * This enables access to the elements as needed.
1059 */
1060 template <typename ThreadInfo>
1061 class thread_registry {
1062 public:
add_to_registry(const std::shared_ptr<ThreadInfo> & tminfo)1063 bool add_to_registry(const std::shared_ptr<ThreadInfo>& tminfo) EXCLUDES(mutex_) {
1064 ALOGV("%s: registered for %d", __func__, tminfo->tid_);
1065 std::lock_guard l(mutex_);
1066 if (registry_.count(tminfo->tid_) > 0) {
1067 ALOGW_IF("%s: tid %d already exists", __func__, tminfo->tid_);
1068 return false;
1069 }
1070 registry_[tminfo->tid_] = tminfo;
1071 return true;
1072 }
1073
remove_from_registry(pid_t tid)1074 bool remove_from_registry(pid_t tid) EXCLUDES(mutex_) {
1075 ALOGV("%s: unregistered for %d", __func__, tid);
1076 std::lock_guard l(mutex_);
1077 // don't crash here because it might be a test app.
1078 const bool success = registry_.erase(tid) == 1;
1079 ALOGW_IF(!success, "%s: Cannot find entry for tid:%d", __func__, tid);
1080 return success;
1081 }
1082
1083 // Returns a std::unordered_map for easy access on tid.
copy_map()1084 auto copy_map() EXCLUDES(mutex_) {
1085 std::lock_guard l(mutex_);
1086 return registry_;
1087 }
1088
1089 // Returns a std::map sorted on tid for easy debug reading.
copy_ordered_map()1090 auto copy_ordered_map() EXCLUDES(mutex_) {
1091 std::lock_guard l(mutex_);
1092 std::map<pid_t, std::weak_ptr<ThreadInfo>> sorted(registry_.begin(), registry_.end());
1093 return sorted;
1094 }
1095
1096 /**
1097 * Returns a string containing the thread mutex info for each
1098 * thread that has accessed the audio_utils mutex.
1099 */
to_string()1100 std::string to_string() {
1101 // for debug purposes it is much easier to see the tids in numeric order.
1102 const auto registry_map = copy_ordered_map();
1103 ALOGV("%s: dumping tids: %zu", __func__, registry_map.size());
1104 std::string s("thread count: ");
1105 s.append(std::to_string(registry_map.size())).append("\n");
1106
1107 std::vector<pid_t> empty;
1108 for (const auto& [tid, weak_info] : registry_map) {
1109 const auto info = weak_info.lock();
1110 if (info) {
1111 if (info->empty()) {
1112 empty.push_back(tid);
1113 } else {
1114 s.append(info->to_string()).append("\n");
1115 }
1116 }
1117 }
1118
1119 // dump remaining empty tids out
1120 s.append("tids without current activity [ ");
1121 for (const auto tid : empty) {
1122 s.append(std::to_string(tid)).append(" ");
1123 }
1124 s.append("]\n");
1125 return s;
1126 }
1127
1128 /**
1129 * Returns the thread info for a pid_t.
1130 *
1131 * It should use a copy of the registry map which is not changing
1132 * as it does not take any lock.
1133 */
tid_to_info(const std::unordered_map<pid_t,std::weak_ptr<ThreadInfo>> & registry_map,pid_t tid)1134 static std::shared_ptr<ThreadInfo> tid_to_info(
1135 const std::unordered_map<pid_t, std::weak_ptr<ThreadInfo>>& registry_map,
1136 pid_t tid) {
1137 const auto it = registry_map.find(tid);
1138 if (it == registry_map.end()) return {};
1139 const auto& weak_info = it->second; // unmapped returns empty weak_ptr.
1140 return weak_info.lock();
1141 }
1142
1143 /**
1144 * Returns a deadlock_info_t struct describing the mutex wait / cycle information.
1145 *
1146 * The deadlock_detection() method is not exceptionally fast
1147 * and is not designed to be called for every mutex locked (and contended).
1148 * It is designed to run as a diagnostic routine to enhance
1149 * dumping for watchdogs, like TimeCheck, when a tid is believed blocked.
1150 *
1151 * Access of state is through atomics, so has minimal overhead on
1152 * concurrent execution, with the possibility of (mostly) false
1153 * negatives due to race.
1154 *
1155 * \param tid target tid which may be in a cycle or blocked.
1156 * \param mutex_names a string array of mutex names indexed on capability order.
1157 * \return a deadlock_info_t struct, which contains whether a cycle was found and
1158 * a vector of tids and mutex names in the mutex wait chain.
1159 */
1160 template <typename StringArray>
deadlock_detection(pid_t tid,const StringArray & mutex_names)1161 deadlock_info_t deadlock_detection(pid_t tid, const StringArray& mutex_names) {
1162 const auto registry_map = copy_map();
1163 deadlock_info_t deadlock_info{tid};
1164
1165 // if tid not waiting, return.
1166
1167 const auto tinfo_original_tid = tid_to_info(registry_map, tid);
1168 if (tinfo_original_tid == nullptr) return deadlock_info;
1169
1170 void* m = tinfo_original_tid->mutex_wait_.load();
1171 pid_t other_wait_tid = tinfo_original_tid->other_wait_info_.tid_.load();
1172 if (m == nullptr && other_wait_tid == kInvalidTid) return deadlock_info;
1173 other_wait_reason_t other_wait_reason =
1174 tinfo_original_tid->other_wait_info_.reason_.load();
1175 size_t other_wait_order =
1176 static_cast<size_t>(tinfo_original_tid->other_wait_info_.order_.load());
1177
1178 bool subset = false; // do we have missing mutex data per thread?
1179
1180 // Create helper map from mutex to tid.
1181 //
1182 // The helper map is built up from thread_local info rather than from
1183 // a global mutex list.
1184 //
1185 // There are multiple reasons behind this.
1186 // 1) There are many mutexes (mostly not held). We don't want to keep and
1187 // manage a "global" list of them.
1188 // 2) The mutex pointer itself may be deallocated from a different thread
1189 // from the reader. To keep it alive requires either a mutex, or a
1190 // weak_ptr to shared_ptr promotion.
1191 // Lifetime management is expensive on a per-mutex basis as there are many
1192 // of them, but cheaper on a per-thread basis as the threads are fewer.
1193 // 3) The thread_local lookup is very inexpensive for thread info (special
1194 // acceleration by C++ and the OS), but more complex for a mutex list
1195 // which at best is a static concurrent hash map.
1196 //
1197 // Note that the mutex_ptr handle is opaque -- it may be deallocated from
1198 // a different thread, so we use the tid from the thread registry map.
1199 //
1200 using pid_order_index_pair_t = std::pair<pid_t, size_t>;
1201 std::unordered_map<void*, pid_order_index_pair_t> mutex_to_tid;
1202 for (const auto& [tid2, weak_info] : registry_map) {
1203 const auto info = weak_info.lock();
1204 if (info == nullptr) continue;
1205 const auto& stack = info->mutexes_held_;
1206 static constinit size_t capacity = std::decay_t<decltype(stack)>::capacity();
1207 const size_t size = std::min(stack.size(), capacity);
1208 subset = subset || size != stack.true_size();
1209 for (size_t i = 0; i < size; ++i) {
1210 const auto& mutex_order_pair = stack.bottom(i);
1211 // if this method is not called by the writer thread
1212 // it is possible for data to change.
1213 const auto mutex_ptr = mutex_order_pair.first.load();
1214 const auto order = static_cast<size_t>(mutex_order_pair.second.load());
1215 if (mutex_ptr != nullptr) {
1216 mutex_to_tid[mutex_ptr] = pid_order_index_pair_t{tid2, order};
1217 }
1218 }
1219 }
1220 ALOGD_IF(subset, "%s: mutex info only subset, deadlock detection may be inaccurate",
1221 __func__);
1222
1223 // traverse from tid -> waiting mutex, then
1224 // mutex -> tid holding
1225 // until we get no more tids, or a tid cycle.
1226 std::unordered_set<pid_t> visited;
1227 visited.insert(tid); // mark the original tid, we start there for cycle detection.
1228 for (pid_t tid2 = tid; true;) {
1229 size_t order;
1230 other_wait_reason_t reason = other_wait_reason_t::none;
1231
1232 if (m != nullptr && mutex_to_tid.count(m)) {
1233 // waiting on mutex held by another tid.
1234 std::tie(tid2, order) = mutex_to_tid[m];
1235 } else if (other_wait_tid != kInvalidTid) {
1236 // condition variable waiting on tid.
1237 tid2 = other_wait_tid;
1238 order = other_wait_order;
1239 reason = other_wait_reason;
1240 deadlock_info.other_wait_reason = reason;
1241 } else {
1242 // no mutex or cv info.
1243 return deadlock_info;
1244 }
1245
1246 // add to chain.
1247 // if waiting through a condition variable, we prefix with "cv-".
1248 const auto name = order < std::size(mutex_names) ? mutex_names[order] : "unknown";
1249 deadlock_info.chain.emplace_back(tid2,
1250 reason == other_wait_reason_t::cv
1251 ? std::string("cv-").append(name).c_str()
1252 : reason == other_wait_reason_t::join ? "join" : name);
1253
1254 // cycle detected
1255 if (visited.count(tid2)) {
1256 deadlock_info.has_cycle = true;
1257 return deadlock_info;
1258 }
1259 visited.insert(tid2);
1260
1261 // if tid not waiting return (could be blocked on binder).
1262 const auto tinfo = tid_to_info(registry_map, tid2);
1263 m = tinfo->mutex_wait_.load();
1264 other_wait_tid = tinfo->other_wait_info_.tid_.load();
1265 other_wait_reason = tinfo->other_wait_info_.reason_.load();
1266 other_wait_order = static_cast<size_t>(tinfo->other_wait_info_.order_.load());
1267 }
1268 }
1269
1270 private:
1271 mutable std::mutex mutex_;
1272 std::unordered_map<pid_t, std::weak_ptr<ThreadInfo>> registry_ GUARDED_BY(mutex_);
1273 };
1274
1275 // audio_utils::mutex, audio_utils::lock_guard, audio_utils::unique_lock,
1276 // and audio_utils::condition_variable are method compatible versions
1277 // of std::mutex, std::lock_guard, std::unique_lock, and std::condition_variable
1278 // for optimizing audio thread performance and debugging.
1279 //
1280 // Note: we do not use std::timed_mutex as its Clang library implementation
1281 // is inefficient. One is better off making a custom timed implementation using
1282 // pthread_mutex_timedlock() on the mutex::native_handle().
1283
1284 extern bool mutex_get_enable_flag();
1285
1286 template <typename Attributes>
1287 class CAPABILITY("mutex") [[nodiscard]] mutex_impl {
1288 public:
1289 using attributes_t = Attributes;
1290
1291 // We use composition here.
1292 // No copy/move ctors as the member std::mutex has it deleted.
1293
1294 // Constructor selects priority inheritance based on the platform default.
1295 mutex_impl(typename Attributes::order_t order = Attributes::order_default_)
mutex_impl(mutex_get_enable_flag (),order)1296 : mutex_impl(mutex_get_enable_flag(), order)
1297 {}
1298
1299 // Constructor selects priority inheritance based on input argument.
1300 mutex_impl(bool priority_inheritance,
1301 typename Attributes::order_t order = Attributes::order_default_)
order_(order)1302 : order_(order)
1303 , stat_{get_mutex_stat_array()[static_cast<size_t>(order)]}
1304 {
1305 LOG_ALWAYS_FATAL_IF(static_cast<size_t>(order) >= Attributes::order_size_,
1306 "mutex order %zu is equal to or greater than order limit:%zu",
1307 static_cast<size_t>(order), Attributes::order_size_);
1308
1309 if (!priority_inheritance) return;
1310
1311 pthread_mutexattr_t attr;
1312 int ret = pthread_mutexattr_init(&attr);
1313 if (ret != 0) {
1314 ALOGW("%s, pthread_mutexattr_init returned %d", __func__, ret);
1315 return;
1316 }
1317
1318 ret = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT);
1319 if (ret != 0) {
1320 ALOGW("%s, pthread_mutexattr_setprotocol returned %d", __func__, ret);
1321 return;
1322 }
1323
1324 // use of the native_handle() is implementation defined.
1325 const auto handle = m_.native_handle();
1326 ret = pthread_mutex_init(handle, &attr);
1327 if (ret != 0) {
1328 ALOGW("%s, pthread_mutex_init returned %d", __func__, ret);
1329 }
1330 ALOGV("%s: audio_mutex initialized: ret:%d order:%zu",
1331 __func__, ret, static_cast<size_t>(order_));
1332 }
1333
~mutex_impl()1334 ~mutex_impl() {
1335 // Note: std::mutex behavior is undefined if released holding ownership.
1336 }
1337
native_handle()1338 auto native_handle() {
1339 return m_.native_handle();
1340 }
1341
lock()1342 void lock() ACQUIRE() {
1343 lock_scoped_stat_t::pre_lock(*this);
1344 if (!m_.try_lock()) { // if we directly use futex, we can optimize this with m_.lock().
1345 // lock_scoped_stat_t accumulates waiting time for the mutex lock call.
1346 lock_scoped_stat_t ls(*this);
1347 m_.lock();
1348 }
1349 lock_scoped_stat_t::post_lock(*this);
1350 metadata_memory_barrier_if_needed();
1351 }
1352
unlock()1353 void unlock() RELEASE() {
1354 lock_scoped_stat_t::pre_unlock(*this);
1355 m_.unlock();
1356 metadata_memory_barrier_if_needed();
1357 }
1358
TRY_ACQUIRE(true)1359 bool try_lock(int64_t timeout_ns = 0) TRY_ACQUIRE(true) {
1360 lock_scoped_stat_t::pre_lock(*this);
1361 if (timeout_ns <= 0) {
1362 if (!m_.try_lock()) return false;
1363 } else {
1364 const int64_t deadline_ns =
1365 safe_add_sat(timeout_ns, systemTime(SYSTEM_TIME_REALTIME));
1366 const struct timespec ts = {
1367 .tv_sec = static_cast<time_t>(deadline_ns / 1'000'000'000),
1368 .tv_nsec = static_cast<long>(deadline_ns % 1'000'000'000),
1369 };
1370 lock_scoped_stat_t ls(*this);
1371 if (pthread_mutex_timedlock(m_.native_handle(), &ts) != 0) {
1372 ls.ignoreWaitTime(); // didn't get lock, don't count wait time
1373 metadata_memory_barrier_if_needed();
1374 return false;
1375 }
1376 }
1377 lock_scoped_stat_t::post_lock(*this);
1378 metadata_memory_barrier_if_needed();
1379 return true;
1380 }
1381
1382 // additional method to obtain the underlying std::mutex.
std_mutex()1383 std::mutex& std_mutex() {
1384 return m_;
1385 }
1386
1387 using mutex_stat_t = mutex_stat<uint64_t, double>;
1388
get_stat()1389 mutex_stat_t& get_stat() const {
1390 return stat_;
1391 }
1392
1393 /**
1394 * Returns the locking statistics per mutex capability category.
1395 */
all_stats_to_string()1396 static std::string all_stats_to_string() {
1397 std::string out("mutex stats: priority inheritance ");
1398 out.append(mutex_get_enable_flag() ? "enabled" : "disabled")
1399 .append("\n");
1400 const auto& stat_array = get_mutex_stat_array();
1401 for (size_t i = 0; i < stat_array.size(); ++i) {
1402 if (stat_array[i].locks != 0) {
1403 out.append("Capability: ").append(Attributes::order_names_[i]).append("\n")
1404 .append(stat_array[i].to_string());
1405 }
1406 }
1407 return out;
1408 }
1409
1410 /**
1411 * Returns the thread locks held per tid.
1412 */
all_threads_to_string()1413 static std::string all_threads_to_string() {
1414 return get_registry().to_string();
1415 }
1416
1417 /**
1418 * Returns a pair of bool (whether a cycle is detected) and a vector
1419 * of mutex wait dependencies.
1420 *
1421 * If a cycle is detected, the last element of the vector represents
1422 * a tid that is repeated somewhere earlier in the vector.
1423 *
1424 * The deadlock_detection() method is not exceptionally fast
1425 * and is not designed to be called for every mutex locked (and contended).
1426 * It is designed to run as a diagnostic routine to enhance
1427 * dumping for watchdogs, like TimeCheck, when a tid is believed blocked.
1428 *
1429 * Access of state is through atomics, so has minimal overhead on
1430 * concurrent execution, with the possibility of (mostly) false
1431 * negatives due to race.
1432 */
1433 static deadlock_info_t
deadlock_detection(pid_t tid)1434 deadlock_detection(pid_t tid) {
1435 return get_registry().deadlock_detection(tid, Attributes::order_names_);
1436 }
1437
1438 using thread_mutex_info_t = thread_mutex_info<
1439 void* /* mutex handle */, MutexOrder, Attributes::mutex_stack_depth_>;
1440
1441 // get_thread_mutex_info is a thread-local "singleton".
1442 //
1443 // We write it like a Meyer's singleton with a single thread_local
1444 // assignment that is guaranteed to be called on first time initialization.
1445 // Since the variables are thread_local, there is no thread contention
1446 // for initialization that would happen with a traditional Meyer's singleton,
1447 // so really a simple thread-local bool will do for a once_flag.
get_thread_mutex_info()1448 static const std::shared_ptr<thread_mutex_info_t>& get_thread_mutex_info() {
1449 thread_local std::shared_ptr<thread_mutex_info_t> tminfo = []() {
1450 auto info = std::make_shared<thread_mutex_info_t>(gettid_wrapper());
1451 get_registry().add_to_registry(info);
1452 return info;
1453 }();
1454 return tminfo;
1455 }
1456
1457 // helper class for registering statistics for a mutex lock.
1458
1459 class [[nodiscard]] lock_scoped_stat_enabled {
1460 public:
lock_scoped_stat_enabled(mutex & m)1461 explicit lock_scoped_stat_enabled(mutex& m)
1462 : mutex_(m)
1463 , time_(systemTime()) {
1464 ++mutex_.stat_.waits;
1465 mutex_.get_thread_mutex_info()->reset_waiter(&mutex_);
1466 }
1467
~lock_scoped_stat_enabled()1468 ~lock_scoped_stat_enabled() {
1469 if (!discard_wait_time_) mutex_.stat_.add_wait_time(systemTime() - time_);
1470 mutex_.get_thread_mutex_info()->reset_waiter();
1471 }
1472
ignoreWaitTime()1473 void ignoreWaitTime() {
1474 discard_wait_time_ = true;
1475 }
1476
pre_unlock(mutex & m)1477 static void pre_unlock(mutex& m) {
1478 ++m.stat_.unlocks;
1479 const bool success = m.get_thread_mutex_info()->remove_held(&m);
1480 LOG_ALWAYS_FATAL_IF(Attributes::abort_on_invalid_unlock_
1481 && !success,
1482 "%s: invalid mutex unlock when not previously held", __func__);
1483 }
1484
1485 // before we lock, we check order and recursion.
pre_lock(mutex & m)1486 static void pre_lock(mutex& m) {
1487 if constexpr (!Attributes::abort_on_order_check_ &&
1488 !Attributes::abort_on_recursion_check_) return;
1489
1490 const auto& p = m.get_thread_mutex_info()->check_held(&m, m.order_);
1491 if (p.first == nullptr) return; // no problematic mutex.
1492
1493 // problem!
1494 const size_t p_order = static_cast<size_t>(p.second.load());
1495 const size_t m_order = static_cast<size_t>(m.order_);
1496
1497 // lock inversion
1498 LOG_ALWAYS_FATAL_IF(Attributes::abort_on_order_check_
1499 && p_order > m_order,
1500 "%s: invalid mutex order (previous) %zu %s> (new) %zu %s",
1501 __func__, p_order, Attributes::order_names_[p_order],
1502 m_order, Attributes::order_names_[m_order]);
1503
1504 // lock recursion
1505 LOG_ALWAYS_FATAL_IF(Attributes::abort_on_recursion_check_
1506 && p_order == m_order,
1507 "%s: recursive mutex access detected (order: %zu %s)",
1508 __func__, p_order, Attributes::order_names_[p_order]);
1509 }
1510
post_lock(mutex & m)1511 static void post_lock(mutex& m) {
1512 ++m.stat_.locks;
1513 m.get_thread_mutex_info()->push_held(&m, m.order_);
1514 }
1515
1516 private:
1517 mutex& mutex_;
1518 const int64_t time_;
1519 bool discard_wait_time_ = false;
1520 };
1521
1522 // A RAII class that implements thread join wait detection
1523 // for the deadlock check.
1524 //
1525 // During the lifetime of this class object, the current thread
1526 // is assumed blocked on the thread tid due to a
1527 // thread join.
1528 //
1529 // {
1530 // scoped_join_wait_check sjw(tid_of_thread);
1531 // thread.join();
1532 // }
1533 //
1534
1535 class [[nodiscard]] scoped_join_wait_check {
1536 public:
scoped_join_wait_check(pid_t tid)1537 explicit scoped_join_wait_check(pid_t tid) {
1538 get_thread_mutex_info()->add_wait_join(tid);
1539 }
~scoped_join_wait_check()1540 ~scoped_join_wait_check() {
1541 get_thread_mutex_info()->remove_wait_join();
1542 }
1543 };
1544
1545 class lock_scoped_stat_disabled {
1546 public:
lock_scoped_stat_disabled(mutex &)1547 explicit lock_scoped_stat_disabled(mutex&) {}
1548
ignoreWaitTime()1549 void ignoreWaitTime() {}
1550
pre_unlock(mutex &)1551 static void pre_unlock(mutex&) {}
1552
pre_lock(mutex &)1553 static void pre_lock(mutex&) {}
1554
post_lock(mutex &)1555 static void post_lock(mutex&) {}
1556 };
1557
1558 using lock_scoped_stat_t = std::conditional_t<Attributes::mutex_tracking_enabled_,
1559 lock_scoped_stat_enabled, lock_scoped_stat_disabled>;
1560
1561 // helper class for registering statistics for a cv wait.
1562 class [[nodiscard]] cv_wait_scoped_stat_enabled {
1563 public:
1564 explicit cv_wait_scoped_stat_enabled(mutex& m, pid_t notifier_tid = kInvalidTid)
mutex_(m)1565 : mutex_(m) {
1566 ++mutex_.stat_.unlocks;
1567 // metadata that we relinquish lock.
1568 const bool success = mutex_.get_thread_mutex_info()->remove_held_for_cv(
1569 &mutex_, mutex_.order_, notifier_tid);
1570 LOG_ALWAYS_FATAL_IF(Attributes::abort_on_invalid_unlock_
1571 && !success,
1572 "%s: invalid mutex unlock when not previously held", __func__);
1573 }
1574
~cv_wait_scoped_stat_enabled()1575 ~cv_wait_scoped_stat_enabled() {
1576 ++mutex_.stat_.locks;
1577 // metadata that we are reacquiring lock.
1578 mutex_.get_thread_mutex_info()->push_held_for_cv(&mutex_, mutex_.order_);
1579 }
1580 private:
1581 mutex& mutex_;
1582 };
1583
1584 class [[nodiscard]] cv_wait_scoped_stat_disabled {
cv_wait_scoped_stat_disabled(mutex &)1585 explicit cv_wait_scoped_stat_disabled(mutex&) {}
1586 };
1587
1588 using cv_wait_scoped_stat_t = std::conditional_t<Attributes::mutex_tracking_enabled_,
1589 cv_wait_scoped_stat_enabled, cv_wait_scoped_stat_disabled>;
1590
1591 using thread_registry_t = thread_registry<thread_mutex_info_t>;
1592
1593 // One per-process thread registry, one instance per template typename.
1594 // Declared here but must be defined in a .cpp otherwise there will be multiple
1595 // instances if the header is included into different shared libraries.
1596 static thread_registry_t& get_registry();
1597
1598 using stat_array_t = std::array<mutex_stat_t, Attributes::order_size_>;
1599
1600 // One per-process mutex statistics array, one instance per template typename.
1601 // Declared here but must be defined in a .cpp otherwise there will be multiple
1602 // instances if the header is included into different shared libraries.
1603 static stat_array_t& get_mutex_stat_array();
1604
1605 private:
1606
1607 std::mutex m_;
1608 const typename Attributes::order_t order_;
1609 mutex_stat_t& stat_; // set in ctor
1610 };
1611
1612 // define the destructor to remove from registry.
1613 template <typename MutexHandle, typename Order, size_t N>
~thread_mutex_info()1614 inline thread_mutex_info<MutexHandle, Order, N>::~thread_mutex_info() {
1615 if (tid_ != 0) {
1616 mutex::get_registry().remove_from_registry(tid_);
1617 }
1618 }
1619
1620 // audio_utils::lock_guard only works with the defined mutex.
1621 //
1622 // We add [[nodiscard]] to prevent accidentally ignoring construction.
1623 class [[nodiscard]] SCOPED_CAPABILITY lock_guard {
1624 public:
lock_guard(mutex & m)1625 explicit lock_guard(mutex& m) ACQUIRE(m)
1626 : mutex_(m) {
1627 mutex_.lock();
1628 }
1629
RELEASE()1630 ~lock_guard() RELEASE() {
1631 mutex_.unlock();
1632 }
1633
1634 lock_guard(const lock_guard&) = delete;
1635
1636 // Note: a member reference will also delete this.
1637 lock_guard& operator=(const lock_guard&) = delete;
1638
1639 private:
1640 mutex& mutex_;
1641 };
1642
1643 // audio_utils::unique_lock is based on std::unique_lock<std::mutex>
1644 // because std::condition_variable is optimized for std::unique_lock<std::mutex>
1645 //
1646 // Note: std::unique_lock **does not** have thread safety annotations.
1647 // We annotate correctly here. Essentially, this is the same as an annotated
1648 // using unique_lock = std::unique_lock<std::mutex>;
1649 //
1650 // We omit swap(), release() and move methods which don't have thread
1651 // safety annotations.
1652 //
1653 // We add [[nodiscard]] to prevent accidentally ignoring construction.
1654 class [[nodiscard]] SCOPED_CAPABILITY unique_lock {
1655 public:
unique_lock(mutex & m)1656 explicit unique_lock(mutex& m) ACQUIRE(m)
1657 : ul_(m.std_mutex(), std::defer_lock)
1658 , mutex_(m) {
1659 lock();
1660 }
1661
RELEASE()1662 ~unique_lock() RELEASE() {
1663 if (owns_lock()) unlock();
1664 }
1665
lock()1666 void lock() ACQUIRE() {
1667 mutex::lock_scoped_stat_t::pre_lock(mutex_);
1668 if (!ul_.try_lock()) {
1669 mutex::lock_scoped_stat_t ls(mutex_);
1670 ul_.lock();
1671 }
1672 mutex::lock_scoped_stat_t::post_lock(mutex_);
1673 metadata_memory_barrier_if_needed();
1674 }
1675
unlock()1676 void unlock() RELEASE() {
1677 mutex::lock_scoped_stat_t::pre_unlock(mutex_);
1678 ul_.unlock();
1679 metadata_memory_barrier_if_needed();
1680 }
1681
try_lock()1682 bool try_lock() TRY_ACQUIRE(true) {
1683 mutex::lock_scoped_stat_t::pre_lock(mutex_);
1684 if (!ul_.try_lock()) return false;
1685 mutex::lock_scoped_stat_t::post_lock(mutex_);
1686 metadata_memory_barrier_if_needed();
1687 return true;
1688 }
1689
1690 template<class Rep, class Period>
try_lock_for(const std::chrono::duration<Rep,Period> & timeout_duration)1691 bool try_lock_for(const std::chrono::duration<Rep,Period>& timeout_duration)
1692 TRY_ACQUIRE(true) {
1693 mutex::lock_scoped_stat_t::pre_lock(mutex_);
1694 if (!ul_.try_lock_for(timeout_duration)) return false;
1695 mutex::lock_scoped_stat_t::post_lock(mutex_);
1696 metadata_memory_barrier_if_needed();
1697 return true;
1698 }
1699
1700 template<class Clock, class Duration>
try_lock_until(const std::chrono::time_point<Clock,Duration> & timeout_time)1701 bool try_lock_until(const std::chrono::time_point<Clock,Duration>& timeout_time)
1702 TRY_ACQUIRE(true) {
1703 mutex::lock_scoped_stat_t::pre_lock(mutex_);
1704 if (!ul_.try_lock_until(timeout_time)) return false;
1705 mutex::lock_scoped_stat_t::post_lock(mutex_);
1706 metadata_memory_barrier_if_needed();
1707 return true;
1708 }
1709
owns_lock()1710 bool owns_lock() const {
1711 return ul_.owns_lock();
1712 }
1713
1714 explicit operator bool() const {
1715 return owns_lock();
1716 }
1717
1718 // additional method to obtain the underlying std::unique_lock
std_unique_lock()1719 std::unique_lock<std::mutex>& std_unique_lock() {
1720 return ul_;
1721 }
1722
1723 // additional method to obtain the underlying mutex
native_mutex()1724 mutex& native_mutex() {
1725 return mutex_;
1726 }
1727
1728 private:
1729 std::unique_lock<std::mutex> ul_;
1730 mutex& mutex_;
1731 };
1732
1733 // audio_utils::condition_variable uses the optimized version of
1734 // std::condition_variable for std::unique_lock<std::mutex>
1735 // It is possible to use std::condition_variable_any for a generic mutex type,
1736 // but it is less efficient.
1737
1738 // The audio_utils condition_variable permits speicifying a "notifier_tid"
1739 // metadata in the wait() methods, which states the expected tid of the
1740 // notification thread for deadlock / wait detection purposes.
1741 class [[nodiscard]] condition_variable {
1742 public:
notify_one()1743 void notify_one() noexcept {
1744 cv_.notify_one();
1745 }
1746
notify_all()1747 void notify_all() noexcept {
1748 cv_.notify_all();
1749 }
1750
1751 void wait(unique_lock& lock, pid_t notifier_tid = kInvalidTid) {
1752 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1753 cv_.wait(lock.std_unique_lock());
1754 }
1755
1756 template<typename Predicate>
1757 void wait(unique_lock& lock, Predicate stop_waiting, pid_t notifier_tid = kInvalidTid) {
1758 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1759 cv_.wait(lock.std_unique_lock(), std::move(stop_waiting));
1760 }
1761
1762 template<typename Rep, typename Period>
1763 std::cv_status wait_for(unique_lock& lock,
1764 const std::chrono::duration<Rep, Period>& rel_time,
1765 pid_t notifier_tid = kInvalidTid) {
1766 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1767 return cv_.wait_for(lock.std_unique_lock(), rel_time);
1768 }
1769
1770 template<typename Rep, typename Period, typename Predicate>
1771 bool wait_for(unique_lock& lock,
1772 const std::chrono::duration<Rep, Period>& rel_time,
1773 Predicate stop_waiting, pid_t notifier_tid = kInvalidTid) {
1774 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1775 return cv_.wait_for(lock.std_unique_lock(), rel_time, std::move(stop_waiting));
1776 }
1777
1778 template<typename Clock, typename Duration>
1779 std::cv_status wait_until(unique_lock& lock,
1780 const std::chrono::time_point<Clock, Duration>& timeout_time,
1781 pid_t notifier_tid = kInvalidTid) {
1782 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1783 return cv_.wait_until(lock.std_unique_lock(), timeout_time);
1784 }
1785
1786 template<typename Clock, typename Duration, typename Predicate>
1787 bool wait_until(unique_lock& lock,
1788 const std::chrono::time_point<Clock, Duration>& timeout_time,
1789 Predicate stop_waiting, pid_t notifier_tid = kInvalidTid) {
1790 mutex::cv_wait_scoped_stat_t ws(lock.native_mutex(), notifier_tid);
1791 return cv_.wait_until(lock.std_unique_lock(), timeout_time, std::move(stop_waiting));
1792 }
1793
native_handle()1794 auto native_handle() {
1795 return cv_.native_handle();
1796 }
1797
1798 private:
1799 std::condition_variable cv_;
1800 };
1801
1802 // audio_utils::scoped_lock has proper thread safety annotations.
1803 // std::scoped_lock does not have thread safety annotations for greater than 1 lock
1804 // since the variadic template doesn't convert to the variadic macro def.
1805 //
1806 // We add [[nodiscard]] to prevent accidentally ignoring construction.
1807 template <typename ...Mutexes>
1808 class scoped_lock;
1809
1810 template <typename Mutex1>
1811 class [[nodiscard]] SCOPED_CAPABILITY scoped_lock<Mutex1>
1812 : private std::scoped_lock<Mutex1> {
1813 public:
scoped_lock(Mutex1 & m)1814 explicit scoped_lock(Mutex1& m) ACQUIRE(m) : std::scoped_lock<Mutex1>(m) {}
1815 ~scoped_lock() RELEASE() = default;
1816 };
1817
1818 template <typename Mutex1, typename Mutex2>
1819 class [[nodiscard]] SCOPED_CAPABILITY scoped_lock<Mutex1, Mutex2>
1820 : private std::scoped_lock<Mutex1, Mutex2> {
1821 public:
scoped_lock(Mutex1 & m1,Mutex2 & m2)1822 scoped_lock(Mutex1& m1, Mutex2& m2) ACQUIRE(m1, m2)
1823 : std::scoped_lock<Mutex1, Mutex2>(m1, m2) {}
1824 ~scoped_lock() RELEASE() = default;
1825 };
1826
1827 template <typename Mutex1, typename Mutex2, typename Mutex3>
1828 class [[nodiscard]] SCOPED_CAPABILITY scoped_lock<Mutex1, Mutex2, Mutex3>
1829 : private std::scoped_lock<Mutex1, Mutex2, Mutex3> {
1830 public:
scoped_lock(Mutex1 & m1,Mutex2 & m2,Mutex3 & m3)1831 scoped_lock(Mutex1& m1, Mutex2& m2, Mutex3& m3) ACQUIRE(m1, m2, m3)
1832 : std::scoped_lock<Mutex1, Mutex2, Mutex3>(m1, m2, m3) {}
1833 ~scoped_lock() RELEASE() = default;
1834 };
1835
1836 template <typename ...Mutexes>
1837 class [[nodiscard]] scoped_lock : private std::scoped_lock<Mutexes...> {
1838 public:
scoped_lock(Mutexes &...mutexes)1839 scoped_lock(Mutexes&... mutexes)
1840 : std::scoped_lock<Mutexes...>(mutexes...) {}
1841 };
1842
1843 // audio_utils::lock_guard_no_thread_safety_analysis is used to lock
1844 // the second mutex when the same global capability is aliased
1845 // to 2 (or more) different mutexes.
1846 //
1847 // Example usage:
1848 //
1849 // // Suppose the interface IAfThreadBase::mutex() returns a global capability
1850 // // ThreadBase_Mutex.
1851 //
1852 // sp<IAfThreadBase> srcThread, dstThread;
1853 //
1854 // lock_guard(srcThread->mutex()); // acquires global capability ThreadBase_Mutex;
1855 // ...
1856 // lock_guard_no_thread_safety_analysis( // lock_guard would fail here as
1857 // dstThread->mutex()); // the same global capability is assigned to
1858 // // dstThread->mutex().
1859 // // lock_guard_no_thread_safety_analysis
1860 // // prevents a thread safety error.
1861
1862 template<typename Mutex1>
1863 class lock_guard_no_thread_safety_analysis : private std::lock_guard<Mutex1> {
1864 public:
lock_guard_no_thread_safety_analysis(Mutex1 & m)1865 lock_guard_no_thread_safety_analysis(Mutex1& m) : std::lock_guard<Mutex1>(m) {}
1866 };
1867
1868 } // namespace android::audio_utils
1869
1870 #pragma pop_macro("LOG_TAG")
1871