1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/synchronization/mutex.h"
16
17 #ifdef _WIN32
18 #include <windows.h>
19 #ifdef ERROR
20 #undef ERROR
21 #endif
22 #else
23 #include <fcntl.h>
24 #include <pthread.h>
25 #include <sched.h>
26 #include <sys/time.h>
27 #endif
28
29 #include <assert.h>
30 #include <errno.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <time.h>
35
36 #include <algorithm>
37 #include <atomic>
38 #include <cstddef>
39 #include <cstdlib>
40 #include <cstring>
41 #include <thread> // NOLINT(build/c++11)
42
43 #include "absl/base/attributes.h"
44 #include "absl/base/call_once.h"
45 #include "absl/base/config.h"
46 #include "absl/base/dynamic_annotations.h"
47 #include "absl/base/internal/atomic_hook.h"
48 #include "absl/base/internal/cycleclock.h"
49 #include "absl/base/internal/hide_ptr.h"
50 #include "absl/base/internal/low_level_alloc.h"
51 #include "absl/base/internal/raw_logging.h"
52 #include "absl/base/internal/spinlock.h"
53 #include "absl/base/internal/sysinfo.h"
54 #include "absl/base/internal/thread_identity.h"
55 #include "absl/base/internal/tsan_mutex_interface.h"
56 #include "absl/base/optimization.h"
57 #include "absl/debugging/stacktrace.h"
58 #include "absl/debugging/symbolize.h"
59 #include "absl/synchronization/internal/graphcycles.h"
60 #include "absl/synchronization/internal/per_thread_sem.h"
61 #include "absl/time/time.h"
62
63 using absl::base_internal::CurrentThreadIdentityIfPresent;
64 using absl::base_internal::CycleClock;
65 using absl::base_internal::PerThreadSynch;
66 using absl::base_internal::SchedulingGuard;
67 using absl::base_internal::ThreadIdentity;
68 using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
69 using absl::synchronization_internal::GraphCycles;
70 using absl::synchronization_internal::GraphId;
71 using absl::synchronization_internal::InvalidGraphId;
72 using absl::synchronization_internal::KernelTimeout;
73 using absl::synchronization_internal::PerThreadSem;
74
75 extern "C" {
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)76 ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
77 std::this_thread::yield();
78 }
79 } // extern "C"
80
81 namespace absl {
82 ABSL_NAMESPACE_BEGIN
83
84 namespace {
85
86 #if defined(ABSL_HAVE_THREAD_SANITIZER)
87 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
88 #else
89 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
90 #endif
91
92 ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
93 kDeadlockDetectionDefault);
94 ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
95
96 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
97 absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
98 submit_profile_data;
99 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
100 const char* msg, const void* obj, int64_t wait_cycles)>
101 mutex_tracer;
102 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
103 absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
104 cond_var_tracer;
105
106 } // namespace
107
108 static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
109 bool locking, bool trylock,
110 bool read_lock);
111
RegisterMutexProfiler(void (* fn)(int64_t wait_cycles))112 void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
113 submit_profile_data.Store(fn);
114 }
115
RegisterMutexTracer(void (* fn)(const char * msg,const void * obj,int64_t wait_cycles))116 void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
117 int64_t wait_cycles)) {
118 mutex_tracer.Store(fn);
119 }
120
RegisterCondVarTracer(void (* fn)(const char * msg,const void * cv))121 void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
122 cond_var_tracer.Store(fn);
123 }
124
125 namespace {
126 // Represents the strategy for spin and yield.
127 // See the comment in GetMutexGlobals() for more information.
128 enum DelayMode { AGGRESSIVE, GENTLE };
129
130 struct ABSL_CACHELINE_ALIGNED MutexGlobals {
131 absl::once_flag once;
132 int spinloop_iterations = 0;
133 int32_t mutex_sleep_spins[2] = {};
134 absl::Duration mutex_sleep_time;
135 };
136
MeasureTimeToYield()137 absl::Duration MeasureTimeToYield() {
138 absl::Time before = absl::Now();
139 ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
140 return absl::Now() - before;
141 }
142
GetMutexGlobals()143 const MutexGlobals& GetMutexGlobals() {
144 ABSL_CONST_INIT static MutexGlobals data;
145 absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
146 if (absl::base_internal::NumCPUs() > 1) {
147 // If this is multiprocessor, allow spinning. If the mode is
148 // aggressive then spin many times before yielding. If the mode is
149 // gentle then spin only a few times before yielding. Aggressive spinning
150 // is used to ensure that an Unlock() call, which must get the spin lock
151 // for any thread to make progress gets it without undue delay.
152 data.spinloop_iterations = 1500;
153 data.mutex_sleep_spins[AGGRESSIVE] = 5000;
154 data.mutex_sleep_spins[GENTLE] = 250;
155 data.mutex_sleep_time = absl::Microseconds(10);
156 } else {
157 // If this a uniprocessor, only yield/sleep. Real-time threads are often
158 // unable to yield, so the sleep time needs to be long enough to keep
159 // the calling thread asleep until scheduling happens.
160 data.spinloop_iterations = 0;
161 data.mutex_sleep_spins[AGGRESSIVE] = 0;
162 data.mutex_sleep_spins[GENTLE] = 0;
163 data.mutex_sleep_time = MeasureTimeToYield() * 5;
164 data.mutex_sleep_time =
165 std::min(data.mutex_sleep_time, absl::Milliseconds(1));
166 data.mutex_sleep_time =
167 std::max(data.mutex_sleep_time, absl::Microseconds(10));
168 }
169 });
170 return data;
171 }
172 } // namespace
173
174 namespace synchronization_internal {
175 // Returns the Mutex delay on iteration `c` depending on the given `mode`.
176 // The returned value should be used as `c` for the next call to `MutexDelay`.
MutexDelay(int32_t c,int mode)177 int MutexDelay(int32_t c, int mode) {
178 const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
179 const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
180 if (c < limit) {
181 // Spin.
182 c++;
183 } else {
184 SchedulingGuard::ScopedEnable enable_rescheduling;
185 ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
186 if (c == limit) {
187 // Yield once.
188 ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
189 c++;
190 } else {
191 // Then wait.
192 absl::SleepFor(sleep_time);
193 c = 0;
194 }
195 ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
196 }
197 return c;
198 }
199 } // namespace synchronization_internal
200
201 // --------------------------Generic atomic ops
202 // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
203 // "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
204 // before making any change.
205 // This is used to set flags in mutex and condition variable words.
AtomicSetBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)206 static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
207 intptr_t wait_until_clear) {
208 intptr_t v;
209 do {
210 v = pv->load(std::memory_order_relaxed);
211 } while ((v & bits) != bits &&
212 ((v & wait_until_clear) != 0 ||
213 !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
214 std::memory_order_relaxed)));
215 }
216
217 // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
218 // "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
219 // before making any change.
220 // This is used to unset flags in mutex and condition variable words.
AtomicClearBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)221 static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
222 intptr_t wait_until_clear) {
223 intptr_t v;
224 do {
225 v = pv->load(std::memory_order_relaxed);
226 } while ((v & bits) != 0 &&
227 ((v & wait_until_clear) != 0 ||
228 !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
229 std::memory_order_relaxed)));
230 }
231
232 //------------------------------------------------------------------
233
234 // Data for doing deadlock detection.
235 ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
236 absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
237
238 // Graph used to detect deadlocks.
239 ABSL_CONST_INIT static GraphCycles* deadlock_graph
240 ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
241
242 //------------------------------------------------------------------
243 // An event mechanism for debugging mutex use.
244 // It also allows mutexes to be given names for those who can't handle
245 // addresses, and instead like to give their data structures names like
246 // "Henry", "Fido", or "Rupert IV, King of Yondavia".
247
248 namespace { // to prevent name pollution
249 enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
250 // Mutex events
251 SYNCH_EV_TRYLOCK_SUCCESS,
252 SYNCH_EV_TRYLOCK_FAILED,
253 SYNCH_EV_READERTRYLOCK_SUCCESS,
254 SYNCH_EV_READERTRYLOCK_FAILED,
255 SYNCH_EV_LOCK,
256 SYNCH_EV_LOCK_RETURNING,
257 SYNCH_EV_READERLOCK,
258 SYNCH_EV_READERLOCK_RETURNING,
259 SYNCH_EV_UNLOCK,
260 SYNCH_EV_READERUNLOCK,
261
262 // CondVar events
263 SYNCH_EV_WAIT,
264 SYNCH_EV_WAIT_RETURNING,
265 SYNCH_EV_SIGNAL,
266 SYNCH_EV_SIGNALALL,
267 };
268
269 enum { // Event flags
270 SYNCH_F_R = 0x01, // reader event
271 SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
272 SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
273 SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
274
275 SYNCH_F_LCK_W = SYNCH_F_LCK,
276 SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
277 };
278 } // anonymous namespace
279
280 // Properties of the events.
281 static const struct {
282 int flags;
283 const char* msg;
284 } event_properties[] = {
285 {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
286 {0, "TryLock failed "},
287 {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
288 {0, "ReaderTryLock failed "},
289 {0, "Lock blocking "},
290 {SYNCH_F_LCK_W, "Lock returning "},
291 {0, "ReaderLock blocking "},
292 {SYNCH_F_LCK_R, "ReaderLock returning "},
293 {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
294 {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
295 {0, "Wait on "},
296 {0, "Wait unblocked "},
297 {0, "Signal on "},
298 {0, "SignalAll on "},
299 };
300
301 ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
302 absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
303
304 // Hash table size; should be prime > 2.
305 // Can't be too small, as it's used for deadlock detection information.
306 static constexpr uint32_t kNSynchEvent = 1031;
307
308 static struct SynchEvent { // this is a trivial hash table for the events
309 // struct is freed when refcount reaches 0
310 int refcount ABSL_GUARDED_BY(synch_event_mu);
311
312 // buckets have linear, 0-terminated chains
313 SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
314
315 // Constant after initialization
316 uintptr_t masked_addr; // object at this address is called "name"
317
318 // No explicit synchronization used. Instead we assume that the
319 // client who enables/disables invariants/logging on a Mutex does so
320 // while the Mutex is not being concurrently accessed by others.
321 void (*invariant)(void* arg); // called on each event
322 void* arg; // first arg to (*invariant)()
323 bool log; // logging turned on
324
325 // Constant after initialization
326 char name[1]; // actually longer---NUL-terminated string
327 }* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
328
329 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
330 // set "bits" in the word there (waiting until lockbit is clear before doing
331 // so), and return a refcounted reference that will remain valid until
332 // UnrefSynchEvent() is called. If a new SynchEvent is allocated,
333 // the string name is copied into it.
334 // When used with a mutex, the caller should also ensure that kMuEvent
335 // is set in the mutex word, and similarly for condition variables and kCVEvent.
EnsureSynchEvent(std::atomic<intptr_t> * addr,const char * name,intptr_t bits,intptr_t lockbit)336 static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
337 const char* name, intptr_t bits,
338 intptr_t lockbit) {
339 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
340 SynchEvent* e;
341 // first look for existing SynchEvent struct..
342 synch_event_mu.Lock();
343 for (e = synch_event[h];
344 e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
345 e = e->next) {
346 }
347 if (e == nullptr) { // no SynchEvent struct found; make one.
348 if (name == nullptr) {
349 name = "";
350 }
351 size_t l = strlen(name);
352 e = reinterpret_cast<SynchEvent*>(
353 base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
354 e->refcount = 2; // one for return value, one for linked list
355 e->masked_addr = base_internal::HidePtr(addr);
356 e->invariant = nullptr;
357 e->arg = nullptr;
358 e->log = false;
359 strcpy(e->name, name); // NOLINT(runtime/printf)
360 e->next = synch_event[h];
361 AtomicSetBits(addr, bits, lockbit);
362 synch_event[h] = e;
363 } else {
364 e->refcount++; // for return value
365 }
366 synch_event_mu.Unlock();
367 return e;
368 }
369
370 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
DeleteSynchEvent(SynchEvent * e)371 static void DeleteSynchEvent(SynchEvent* e) {
372 base_internal::LowLevelAlloc::Free(e);
373 }
374
375 // Decrement the reference count of *e, or do nothing if e==null.
UnrefSynchEvent(SynchEvent * e)376 static void UnrefSynchEvent(SynchEvent* e) {
377 if (e != nullptr) {
378 synch_event_mu.Lock();
379 bool del = (--(e->refcount) == 0);
380 synch_event_mu.Unlock();
381 if (del) {
382 DeleteSynchEvent(e);
383 }
384 }
385 }
386
387 // Forget the mapping from the object (Mutex or CondVar) at address addr
388 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
389 // is clear before doing so).
ForgetSynchEvent(std::atomic<intptr_t> * addr,intptr_t bits,intptr_t lockbit)390 static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
391 intptr_t lockbit) {
392 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
393 SynchEvent** pe;
394 SynchEvent* e;
395 synch_event_mu.Lock();
396 for (pe = &synch_event[h];
397 (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
398 pe = &e->next) {
399 }
400 bool del = false;
401 if (e != nullptr) {
402 *pe = e->next;
403 del = (--(e->refcount) == 0);
404 }
405 AtomicClearBits(addr, bits, lockbit);
406 synch_event_mu.Unlock();
407 if (del) {
408 DeleteSynchEvent(e);
409 }
410 }
411
412 // Return a refcounted reference to the SynchEvent of the object at address
413 // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
414 // called.
GetSynchEvent(const void * addr)415 static SynchEvent* GetSynchEvent(const void* addr) {
416 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
417 SynchEvent* e;
418 synch_event_mu.Lock();
419 for (e = synch_event[h];
420 e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
421 e = e->next) {
422 }
423 if (e != nullptr) {
424 e->refcount++;
425 }
426 synch_event_mu.Unlock();
427 return e;
428 }
429
430 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
431 // if event recording is on
PostSynchEvent(void * obj,int ev)432 static void PostSynchEvent(void* obj, int ev) {
433 SynchEvent* e = GetSynchEvent(obj);
434 // logging is on if event recording is on and either there's no event struct,
435 // or it explicitly says to log
436 if (e == nullptr || e->log) {
437 void* pcs[40];
438 int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
439 // A buffer with enough space for the ASCII for all the PCs, even on a
440 // 64-bit machine.
441 char buffer[ABSL_ARRAYSIZE(pcs) * 24];
442 int pos = snprintf(buffer, sizeof(buffer), " @");
443 for (int i = 0; i != n; i++) {
444 int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
445 " %p", pcs[i]);
446 if (b < 0 ||
447 static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
448 break;
449 }
450 pos += b;
451 }
452 ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
453 (e == nullptr ? "" : e->name), buffer);
454 }
455 const int flags = event_properties[ev].flags;
456 if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
457 // Calling the invariant as is causes problems under ThreadSanitizer.
458 // We are currently inside of Mutex Lock/Unlock and are ignoring all
459 // memory accesses and synchronization. If the invariant transitively
460 // synchronizes something else and we ignore the synchronization, we will
461 // get false positive race reports later.
462 // Reuse EvalConditionAnnotated to properly call into user code.
463 struct local {
464 static bool pred(SynchEvent* ev) {
465 (*ev->invariant)(ev->arg);
466 return false;
467 }
468 };
469 Condition cond(&local::pred, e);
470 Mutex* mu = static_cast<Mutex*>(obj);
471 const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
472 const bool trylock = (flags & SYNCH_F_TRY) != 0;
473 const bool read_lock = (flags & SYNCH_F_R) != 0;
474 EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
475 }
476 UnrefSynchEvent(e);
477 }
478
479 //------------------------------------------------------------------
480
481 // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
482 // whether it has a timeout, the condition, exclusive/shared, and whether a
483 // condition variable wait has an associated Mutex (as opposed to another
484 // type of lock). It also points to the PerThreadSynch struct of its thread.
485 // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
486 //
487 // This structure is held on the stack rather than directly in
488 // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
489 // while waiting on one Mutex, the implementation calls a client callback
490 // (such as a Condition function) that acquires another Mutex. We don't
491 // strictly need to allow this, but programmers become confused if we do not
492 // allow them to use functions such a LOG() within Condition functions. The
493 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
494 // the thread is on a Mutex's waiter queue.
495 struct SynchWaitParams {
SynchWaitParamsabsl::SynchWaitParams496 SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
497 KernelTimeout timeout_arg, Mutex* cvmu_arg,
498 PerThreadSynch* thread_arg,
499 std::atomic<intptr_t>* cv_word_arg)
500 : how(how_arg),
501 cond(cond_arg),
502 timeout(timeout_arg),
503 cvmu(cvmu_arg),
504 thread(thread_arg),
505 cv_word(cv_word_arg),
506 contention_start_cycles(CycleClock::Now()),
507 should_submit_contention_data(false) {}
508
509 const Mutex::MuHow how; // How this thread needs to wait.
510 const Condition* cond; // The condition that this thread is waiting for.
511 // In Mutex, this field is set to zero if a timeout
512 // expires.
513 KernelTimeout timeout; // timeout expiry---absolute time
514 // In Mutex, this field is set to zero if a timeout
515 // expires.
516 Mutex* const cvmu; // used for transfer from cond var to mutex
517 PerThreadSynch* const thread; // thread that is waiting
518
519 // If not null, thread should be enqueued on the CondVar whose state
520 // word is cv_word instead of queueing normally on the Mutex.
521 std::atomic<intptr_t>* cv_word;
522
523 int64_t contention_start_cycles; // Time (in cycles) when this thread started
524 // to contend for the mutex.
525 bool should_submit_contention_data;
526 };
527
528 struct SynchLocksHeld {
529 int n; // number of valid entries in locks[]
530 bool overflow; // true iff we overflowed the array at some point
531 struct {
532 Mutex* mu; // lock acquired
533 int32_t count; // times acquired
534 GraphId id; // deadlock_graph id of acquired lock
535 } locks[40];
536 // If a thread overfills the array during deadlock detection, we
537 // continue, discarding information as needed. If no overflow has
538 // taken place, we can provide more error checking, such as
539 // detecting when a thread releases a lock it does not hold.
540 };
541
542 // A sentinel value in lists that is not 0.
543 // A 0 value is used to mean "not on a list".
544 static PerThreadSynch* const kPerThreadSynchNull =
545 reinterpret_cast<PerThreadSynch*>(1);
546
LocksHeldAlloc()547 static SynchLocksHeld* LocksHeldAlloc() {
548 SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
549 base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
550 ret->n = 0;
551 ret->overflow = false;
552 return ret;
553 }
554
555 // Return the PerThreadSynch-struct for this thread.
Synch_GetPerThread()556 static PerThreadSynch* Synch_GetPerThread() {
557 ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
558 return &identity->per_thread_synch;
559 }
560
Synch_GetPerThreadAnnotated(Mutex * mu)561 static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
562 if (mu) {
563 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
564 }
565 PerThreadSynch* w = Synch_GetPerThread();
566 if (mu) {
567 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
568 }
569 return w;
570 }
571
Synch_GetAllLocks()572 static SynchLocksHeld* Synch_GetAllLocks() {
573 PerThreadSynch* s = Synch_GetPerThread();
574 if (s->all_locks == nullptr) {
575 s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
576 }
577 return s->all_locks;
578 }
579
580 // Post on "w"'s associated PerThreadSem.
IncrementSynchSem(Mutex * mu,PerThreadSynch * w)581 void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
582 if (mu) {
583 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
584 // We miss synchronization around passing PerThreadSynch between threads
585 // since it happens inside of the Mutex code, so we need to ignore all
586 // accesses to the object.
587 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
588 PerThreadSem::Post(w->thread_identity());
589 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
590 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
591 } else {
592 PerThreadSem::Post(w->thread_identity());
593 }
594 }
595
596 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
DecrementSynchSem(Mutex * mu,PerThreadSynch * w,KernelTimeout t)597 bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
598 if (mu) {
599 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
600 }
601 assert(w == Synch_GetPerThread());
602 static_cast<void>(w);
603 bool res = PerThreadSem::Wait(t);
604 if (mu) {
605 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
606 }
607 return res;
608 }
609
610 // We're in a fatal signal handler that hopes to use Mutex and to get
611 // lucky by not deadlocking. We try to improve its chances of success
612 // by effectively disabling some of the consistency checks. This will
613 // prevent certain ABSL_RAW_CHECK() statements from being triggered when
614 // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
615 // Mutex code checking that the "waitp" field has not been reused.
InternalAttemptToUseMutexInFatalSignalHandler()616 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
617 // Fix the per-thread state only if it exists.
618 ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
619 if (identity != nullptr) {
620 identity->per_thread_synch.suppress_fatal_errors = true;
621 }
622 // Don't do deadlock detection when we are already failing.
623 synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
624 std::memory_order_release);
625 }
626
627 // --------------------------Mutexes
628
629 // In the layout below, the msb of the bottom byte is currently unused. Also,
630 // the following constraints were considered in choosing the layout:
631 // o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
632 // 0xcd) are illegal: reader and writer lock both held.
633 // o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
634 // bit-twiddling trick in Mutex::Unlock().
635 // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
636 // to enable the bit-twiddling trick in CheckForMutexCorruption().
637 static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
638 // There's a designated waker.
639 // INVARIANT1: there's a thread that was blocked on the mutex, is
640 // no longer, yet has not yet acquired the mutex. If there's a
641 // designated waker, all threads can avoid taking the slow path in
642 // unlock because the designated waker will subsequently acquire
643 // the lock and wake someone. To maintain INVARIANT1 the bit is
644 // set when a thread is unblocked(INV1a), and threads that were
645 // unblocked reset the bit when they either acquire or re-block (INV1b).
646 static const intptr_t kMuDesig = 0x0002L;
647 static const intptr_t kMuWait = 0x0004L; // threads are waiting
648 static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
649 static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
650 // Runnable writer is waiting for a reader.
651 // If set, new readers will not lock the mutex to avoid writer starvation.
652 // Note: if a reader has higher priority than the writer, it will still lock
653 // the mutex ahead of the waiting writer, but in a very inefficient manner:
654 // the reader will first queue itself and block, but then the last unlocking
655 // reader will wake it.
656 static const intptr_t kMuWrWait = 0x0020L;
657 static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
658 static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
659 static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
660
661 // Hack to make constant values available to gdb pretty printer
662 enum {
663 kGdbMuSpin = kMuSpin,
664 kGdbMuEvent = kMuEvent,
665 kGdbMuWait = kMuWait,
666 kGdbMuWriter = kMuWriter,
667 kGdbMuDesig = kMuDesig,
668 kGdbMuWrWait = kMuWrWait,
669 kGdbMuReader = kMuReader,
670 kGdbMuLow = kMuLow,
671 };
672
673 // kMuWrWait implies kMuWait.
674 // kMuReader and kMuWriter are mutually exclusive.
675 // If kMuReader is zero, there are no readers.
676 // Otherwise, if kMuWait is zero, the high order bits contain a count of the
677 // number of readers. Otherwise, the reader count is held in
678 // PerThreadSynch::readers of the most recently queued waiter, again in the
679 // bits above kMuLow.
680 static const intptr_t kMuOne = 0x0100; // a count of one reader
681
682 // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
683 static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
684 static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
685
686 static_assert(PerThreadSynch::kAlignment > kMuLow,
687 "PerThreadSynch::kAlignment must be greater than kMuLow");
688
689 // This struct contains various bitmasks to be used in
690 // acquiring and releasing a mutex in a particular mode.
691 struct MuHowS {
692 // if all the bits in fast_need_zero are zero, the lock can be acquired by
693 // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
694 // this is the designated waker.
695 intptr_t fast_need_zero;
696 intptr_t fast_or;
697 intptr_t fast_add;
698
699 intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
700
701 intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
702 // zero a reader can acquire a read share by
703 // setting the reader bit and incrementing
704 // the reader count (in last waiter since
705 // we're now slow-path). kMuWrWait be may
706 // be ignored if we already waited once.
707 };
708
709 static const MuHowS kSharedS = {
710 // shared or read lock
711 kMuWriter | kMuWait | kMuEvent, // fast_need_zero
712 kMuReader, // fast_or
713 kMuOne, // fast_add
714 kMuWriter | kMuWait, // slow_need_zero
715 kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
716 };
717 static const MuHowS kExclusiveS = {
718 // exclusive or write lock
719 kMuWriter | kMuReader | kMuEvent, // fast_need_zero
720 kMuWriter, // fast_or
721 0, // fast_add
722 kMuWriter | kMuReader, // slow_need_zero
723 ~static_cast<intptr_t>(0), // slow_inc_need_zero
724 };
725 static const Mutex::MuHow kShared = &kSharedS; // shared lock
726 static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
727
728 #ifdef NDEBUG
729 static constexpr bool kDebugMode = false;
730 #else
731 static constexpr bool kDebugMode = true;
732 #endif
733
734 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
TsanFlags(Mutex::MuHow how)735 static unsigned TsanFlags(Mutex::MuHow how) {
736 return how == kShared ? __tsan_mutex_read_lock : 0;
737 }
738 #endif
739
DebugOnlyIsExiting()740 static bool DebugOnlyIsExiting() {
741 return false;
742 }
743
~Mutex()744 Mutex::~Mutex() {
745 intptr_t v = mu_.load(std::memory_order_relaxed);
746 if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
747 ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
748 }
749 if (kDebugMode) {
750 this->ForgetDeadlockInfo();
751 }
752 ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
753 }
754
EnableDebugLog(const char * name)755 void Mutex::EnableDebugLog(const char* name) {
756 SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
757 e->log = true;
758 UnrefSynchEvent(e);
759 }
760
EnableMutexInvariantDebugging(bool enabled)761 void EnableMutexInvariantDebugging(bool enabled) {
762 synch_check_invariants.store(enabled, std::memory_order_release);
763 }
764
EnableInvariantDebugging(void (* invariant)(void *),void * arg)765 void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
766 if (synch_check_invariants.load(std::memory_order_acquire) &&
767 invariant != nullptr) {
768 SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
769 e->invariant = invariant;
770 e->arg = arg;
771 UnrefSynchEvent(e);
772 }
773 }
774
SetMutexDeadlockDetectionMode(OnDeadlockCycle mode)775 void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
776 synch_deadlock_detection.store(mode, std::memory_order_release);
777 }
778
779 // Return true iff threads x and y are part of the same equivalence
780 // class of waiters. An equivalence class is defined as the set of
781 // waiters with the same condition, type of lock, and thread priority.
782 //
783 // Requires that x and y be waiting on the same Mutex queue.
MuEquivalentWaiter(PerThreadSynch * x,PerThreadSynch * y)784 static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
785 return x->waitp->how == y->waitp->how && x->priority == y->priority &&
786 Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
787 }
788
789 // Given the contents of a mutex word containing a PerThreadSynch pointer,
790 // return the pointer.
GetPerThreadSynch(intptr_t v)791 static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
792 return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
793 }
794
795 // The next several routines maintain the per-thread next and skip fields
796 // used in the Mutex waiter queue.
797 // The queue is a circular singly-linked list, of which the "head" is the
798 // last element, and head->next if the first element.
799 // The skip field has the invariant:
800 // For thread x, x->skip is one of:
801 // - invalid (iff x is not in a Mutex wait queue),
802 // - null, or
803 // - a pointer to a distinct thread waiting later in the same Mutex queue
804 // such that all threads in [x, x->skip] have the same condition, priority
805 // and lock type (MuEquivalentWaiter() is true for all pairs in [x,
806 // x->skip]).
807 // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
808 //
809 // By the spec of MuEquivalentWaiter(), it is not necessary when removing the
810 // first runnable thread y from the front a Mutex queue to adjust the skip
811 // field of another thread x because if x->skip==y, x->skip must (have) become
812 // invalid before y is removed. The function TryRemove can remove a specified
813 // thread from an arbitrary position in the queue whether runnable or not, so
814 // it fixes up skip fields that would otherwise be left dangling.
815 // The statement
816 // if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
817 // maintains the invariant provided x is not the last waiter in a Mutex queue
818 // The statement
819 // if (x->skip != null) { x->skip = x->skip->skip; }
820 // maintains the invariant.
821
822 // Returns the last thread y in a mutex waiter queue such that all threads in
823 // [x, y] inclusive share the same condition. Sets skip fields of some threads
824 // in that range to optimize future evaluation of Skip() on x values in
825 // the range. Requires thread x is in a mutex waiter queue.
826 // The locking is unusual. Skip() is called under these conditions:
827 // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
828 // - Mutex is held in call from UnlockSlow() by last unlocker, with
829 // maybe_unlocking == true
830 // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
831 // UnlockSlow()) and TryRemove()
832 // These cases are mutually exclusive, so Skip() never runs concurrently
833 // with itself on the same Mutex. The skip chain is used in these other places
834 // that cannot occur concurrently:
835 // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
836 // - Dequeue() (with spinlock and Mutex held)
837 // - UnlockSlow() (with spinlock and Mutex held)
838 // A more complex case is Enqueue()
839 // - Enqueue() (with spinlock held and maybe_unlocking == false)
840 // This is the first case in which Skip is called, above.
841 // - Enqueue() (without spinlock held; but queue is empty and being freshly
842 // formed)
843 // - Enqueue() (with spinlock held and maybe_unlocking == true)
844 // The first case has mutual exclusion, and the second isolation through
845 // working on an otherwise unreachable data structure.
846 // In the last case, Enqueue() is required to change no skip/next pointers
847 // except those in the added node and the former "head" node. This implies
848 // that the new node is added after head, and so must be the new head or the
849 // new front of the queue.
Skip(PerThreadSynch * x)850 static PerThreadSynch* Skip(PerThreadSynch* x) {
851 PerThreadSynch* x0 = nullptr;
852 PerThreadSynch* x1 = x;
853 PerThreadSynch* x2 = x->skip;
854 if (x2 != nullptr) {
855 // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
856 // such that x1 == x0->skip && x2 == x1->skip
857 while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
858 x0->skip = x2; // short-circuit skip from x0 to x2
859 }
860 x->skip = x1; // short-circuit skip from x to result
861 }
862 return x1;
863 }
864
865 // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
866 // The latter is going to be removed out of order, because of a timeout.
867 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
868 // and fix it if it does.
FixSkip(PerThreadSynch * ancestor,PerThreadSynch * to_be_removed)869 static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
870 if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
871 if (to_be_removed->skip != nullptr) {
872 ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
873 } else if (ancestor->next != to_be_removed) { // they are not adjacent
874 ancestor->skip = ancestor->next; // can skip one past ancestor
875 } else {
876 ancestor->skip = nullptr; // can't skip at all
877 }
878 }
879 }
880
881 static void CondVarEnqueue(SynchWaitParams* waitp);
882
883 // Enqueue thread "waitp->thread" on a waiter queue.
884 // Called with mutex spinlock held if head != nullptr
885 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
886 // idempotent; it alters no state associated with the existing (empty)
887 // queue.
888 //
889 // If waitp->cv_word == nullptr, queue the thread at either the front or
890 // the end (according to its priority) of the circular mutex waiter queue whose
891 // head is "head", and return the new head. mu is the previous mutex state,
892 // which contains the reader count (perhaps adjusted for the operation in
893 // progress) if the list was empty and a read lock held, and the holder hint if
894 // the list was empty and a write lock held. (flags & kMuIsCond) indicates
895 // whether this thread was transferred from a CondVar or is waiting for a
896 // non-trivial condition. In this case, Enqueue() never returns nullptr
897 //
898 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
899 // returned. This mechanism is used by CondVar to queue a thread on the
900 // condition variable queue instead of the mutex queue in implementing Wait().
901 // In this case, Enqueue() can return nullptr (if head==nullptr).
Enqueue(PerThreadSynch * head,SynchWaitParams * waitp,intptr_t mu,int flags)902 static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
903 intptr_t mu, int flags) {
904 // If we have been given a cv_word, call CondVarEnqueue() and return
905 // the previous head of the Mutex waiter queue.
906 if (waitp->cv_word != nullptr) {
907 CondVarEnqueue(waitp);
908 return head;
909 }
910
911 PerThreadSynch* s = waitp->thread;
912 ABSL_RAW_CHECK(
913 s->waitp == nullptr || // normal case
914 s->waitp == waitp || // Fer()---transfer from condition variable
915 s->suppress_fatal_errors,
916 "detected illegal recursion into Mutex code");
917 s->waitp = waitp;
918 s->skip = nullptr; // maintain skip invariant (see above)
919 s->may_skip = true; // always true on entering queue
920 s->wake = false; // not being woken
921 s->cond_waiter = ((flags & kMuIsCond) != 0);
922 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
923 int64_t now_cycles = CycleClock::Now();
924 if (s->next_priority_read_cycles < now_cycles) {
925 // Every so often, update our idea of the thread's priority.
926 // pthread_getschedparam() is 5% of the block/wakeup time;
927 // CycleClock::Now() is 0.5%.
928 int policy;
929 struct sched_param param;
930 const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
931 if (err != 0) {
932 ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
933 } else {
934 s->priority = param.sched_priority;
935 s->next_priority_read_cycles =
936 now_cycles + static_cast<int64_t>(CycleClock::Frequency());
937 }
938 }
939 #endif
940 if (head == nullptr) { // s is the only waiter
941 s->next = s; // it's the only entry in the cycle
942 s->readers = mu; // reader count is from mu word
943 s->maybe_unlocking = false; // no one is searching an empty list
944 head = s; // s is new head
945 } else {
946 PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
947 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
948 if (s->priority > head->priority) { // s's priority is above head's
949 // try to put s in priority-fifo order, or failing that at the front.
950 if (!head->maybe_unlocking) {
951 // No unlocker can be scanning the queue, so we can insert into the
952 // middle of the queue.
953 //
954 // Within a skip chain, all waiters have the same priority, so we can
955 // skip forward through the chains until we find one with a lower
956 // priority than the waiter to be enqueued.
957 PerThreadSynch* advance_to = head; // next value of enqueue_after
958 do {
959 enqueue_after = advance_to;
960 // (side-effect: optimizes skip chain)
961 advance_to = Skip(enqueue_after->next);
962 } while (s->priority <= advance_to->priority);
963 // termination guaranteed because s->priority > head->priority
964 // and head is the end of a skip chain
965 } else if (waitp->how == kExclusive &&
966 Condition::GuaranteedEqual(waitp->cond, nullptr)) {
967 // An unlocker could be scanning the queue, but we know it will recheck
968 // the queue front for writers that have no condition, which is what s
969 // is, so an insert at front is safe.
970 enqueue_after = head; // add after head, at front
971 }
972 }
973 #endif
974 if (enqueue_after != nullptr) {
975 s->next = enqueue_after->next;
976 enqueue_after->next = s;
977
978 // enqueue_after can be: head, Skip(...), or cur.
979 // The first two imply enqueue_after->skip == nullptr, and
980 // the last is used only if MuEquivalentWaiter(s, cur).
981 // We require this because clearing enqueue_after->skip
982 // is impossible; enqueue_after's predecessors might also
983 // incorrectly skip over s if we were to allow other
984 // insertion points.
985 ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
986 MuEquivalentWaiter(enqueue_after, s),
987 "Mutex Enqueue failure");
988
989 if (enqueue_after != head && enqueue_after->may_skip &&
990 MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
991 // enqueue_after can skip to its new successor, s
992 enqueue_after->skip = enqueue_after->next;
993 }
994 if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
995 s->skip = s->next; // s may skip to its successor
996 }
997 } else { // enqueue not done any other way, so
998 // we're inserting s at the back
999 // s will become new head; copy data from head into it
1000 s->next = head->next; // add s after head
1001 head->next = s;
1002 s->readers = head->readers; // reader count is from previous head
1003 s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
1004 if (head->may_skip && MuEquivalentWaiter(head, s)) {
1005 // head now has successor; may skip
1006 head->skip = s;
1007 }
1008 head = s; // s is new head
1009 }
1010 }
1011 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1012 return head;
1013 }
1014
1015 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1016 // whose last element is head. The new head element is returned, or null
1017 // if the list is made empty.
1018 // Dequeue is called with both spinlock and Mutex held.
Dequeue(PerThreadSynch * head,PerThreadSynch * pw)1019 static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
1020 PerThreadSynch* w = pw->next;
1021 pw->next = w->next; // snip w out of list
1022 if (head == w) { // we removed the head
1023 head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
1024 } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1025 // pw can skip to its new successor
1026 if (pw->next->skip !=
1027 nullptr) { // either skip to its successors skip target
1028 pw->skip = pw->next->skip;
1029 } else { // or to pw's successor
1030 pw->skip = pw->next;
1031 }
1032 }
1033 return head;
1034 }
1035
1036 // Traverse the elements [ pw->next, h] of the circular list whose last element
1037 // is head.
1038 // Remove all elements with wake==true and place them in the
1039 // singly-linked list wake_list in the order found. Assumes that
1040 // there is only one such element if the element has how == kExclusive.
1041 // Return the new head.
DequeueAllWakeable(PerThreadSynch * head,PerThreadSynch * pw,PerThreadSynch ** wake_tail)1042 static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
1043 PerThreadSynch* pw,
1044 PerThreadSynch** wake_tail) {
1045 PerThreadSynch* orig_h = head;
1046 PerThreadSynch* w = pw->next;
1047 bool skipped = false;
1048 do {
1049 if (w->wake) { // remove this element
1050 ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1051 // we're removing pw's successor so either pw->skip is zero or we should
1052 // already have removed pw since if pw->skip!=null, pw has the same
1053 // condition as w.
1054 head = Dequeue(head, pw);
1055 w->next = *wake_tail; // keep list terminated
1056 *wake_tail = w; // add w to wake_list;
1057 wake_tail = &w->next; // next addition to end
1058 if (w->waitp->how == kExclusive) { // wake at most 1 writer
1059 break;
1060 }
1061 } else { // not waking this one; skip
1062 pw = Skip(w); // skip as much as possible
1063 skipped = true;
1064 }
1065 w = pw->next;
1066 // We want to stop processing after we've considered the original head,
1067 // orig_h. We can't test for w==orig_h in the loop because w may skip over
1068 // it; we are guaranteed only that w's predecessor will not skip over
1069 // orig_h. When we've considered orig_h, either we've processed it and
1070 // removed it (so orig_h != head), or we considered it and skipped it (so
1071 // skipped==true && pw == head because skipping from head always skips by
1072 // just one, leaving pw pointing at head). So we want to
1073 // continue the loop with the negation of that expression.
1074 } while (orig_h == head && (pw != head || !skipped));
1075 return head;
1076 }
1077
1078 // Try to remove thread s from the list of waiters on this mutex.
1079 // Does nothing if s is not on the waiter list.
TryRemove(PerThreadSynch * s)1080 void Mutex::TryRemove(PerThreadSynch* s) {
1081 SchedulingGuard::ScopedDisable disable_rescheduling;
1082 intptr_t v = mu_.load(std::memory_order_relaxed);
1083 // acquire spinlock & lock
1084 if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1085 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1086 std::memory_order_acquire,
1087 std::memory_order_relaxed)) {
1088 PerThreadSynch* h = GetPerThreadSynch(v);
1089 if (h != nullptr) {
1090 PerThreadSynch* pw = h; // pw is w's predecessor
1091 PerThreadSynch* w;
1092 if ((w = pw->next) != s) { // search for thread,
1093 do { // processing at least one element
1094 // If the current element isn't equivalent to the waiter to be
1095 // removed, we can skip the entire chain.
1096 if (!MuEquivalentWaiter(s, w)) {
1097 pw = Skip(w); // so skip all that won't match
1098 // we don't have to worry about dangling skip fields
1099 // in the threads we skipped; none can point to s
1100 // because they are in a different equivalence class.
1101 } else { // seeking same condition
1102 FixSkip(w, s); // fix up any skip pointer from w to s
1103 pw = w;
1104 }
1105 // don't search further if we found the thread, or we're about to
1106 // process the first thread again.
1107 } while ((w = pw->next) != s && pw != h);
1108 }
1109 if (w == s) { // found thread; remove it
1110 // pw->skip may be non-zero here; the loop above ensured that
1111 // no ancestor of s can skip to s, so removal is safe anyway.
1112 h = Dequeue(h, pw);
1113 s->next = nullptr;
1114 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1115 }
1116 }
1117 intptr_t nv;
1118 do { // release spinlock and lock
1119 v = mu_.load(std::memory_order_relaxed);
1120 nv = v & (kMuDesig | kMuEvent);
1121 if (h != nullptr) {
1122 nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1123 h->readers = 0; // we hold writer lock
1124 h->maybe_unlocking = false; // finished unlocking
1125 }
1126 } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
1127 std::memory_order_relaxed));
1128 }
1129 }
1130
1131 // Wait until thread "s", which must be the current thread, is removed from the
1132 // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1133 // if the wait extends past the absolute time specified, even if "s" is still
1134 // on the mutex queue. In this case, remove "s" from the queue and return
1135 // true, otherwise return false.
Block(PerThreadSynch * s)1136 void Mutex::Block(PerThreadSynch* s) {
1137 while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1138 if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1139 // After a timeout, we go into a spin loop until we remove ourselves
1140 // from the queue, or someone else removes us. We can't be sure to be
1141 // able to remove ourselves in a single lock acquisition because this
1142 // mutex may be held, and the holder has the right to read the centre
1143 // of the waiter queue without holding the spinlock.
1144 this->TryRemove(s);
1145 int c = 0;
1146 while (s->next != nullptr) {
1147 c = synchronization_internal::MutexDelay(c, GENTLE);
1148 this->TryRemove(s);
1149 }
1150 if (kDebugMode) {
1151 // This ensures that we test the case that TryRemove() is called when s
1152 // is not on the queue.
1153 this->TryRemove(s);
1154 }
1155 s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1156 s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1157 }
1158 }
1159 ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1160 "detected illegal recursion in Mutex code");
1161 s->waitp = nullptr;
1162 }
1163
1164 // Wake thread w, and return the next thread in the list.
Wakeup(PerThreadSynch * w)1165 PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
1166 PerThreadSynch* next = w->next;
1167 w->next = nullptr;
1168 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1169 IncrementSynchSem(this, w);
1170
1171 return next;
1172 }
1173
GetGraphIdLocked(Mutex * mu)1174 static GraphId GetGraphIdLocked(Mutex* mu)
1175 ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1176 if (!deadlock_graph) { // (re)create the deadlock graph.
1177 deadlock_graph =
1178 new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1179 GraphCycles;
1180 }
1181 return deadlock_graph->GetId(mu);
1182 }
1183
GetGraphId(Mutex * mu)1184 static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1185 deadlock_graph_mu.Lock();
1186 GraphId id = GetGraphIdLocked(mu);
1187 deadlock_graph_mu.Unlock();
1188 return id;
1189 }
1190
1191 // Record a lock acquisition. This is used in debug mode for deadlock
1192 // detection. The held_locks pointer points to the relevant data
1193 // structure for each case.
LockEnter(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1194 static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1195 int n = held_locks->n;
1196 int i = 0;
1197 while (i != n && held_locks->locks[i].id != id) {
1198 i++;
1199 }
1200 if (i == n) {
1201 if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1202 held_locks->overflow = true; // lost some data
1203 } else { // we have room for lock
1204 held_locks->locks[i].mu = mu;
1205 held_locks->locks[i].count = 1;
1206 held_locks->locks[i].id = id;
1207 held_locks->n = n + 1;
1208 }
1209 } else {
1210 held_locks->locks[i].count++;
1211 }
1212 }
1213
1214 // Record a lock release. Each call to LockEnter(mu, id, x) should be
1215 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1216 // It does not process the event if is not needed when deadlock detection is
1217 // disabled.
LockLeave(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1218 static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
1219 int n = held_locks->n;
1220 int i = 0;
1221 while (i != n && held_locks->locks[i].id != id) {
1222 i++;
1223 }
1224 if (i == n) {
1225 if (!held_locks->overflow) {
1226 // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1227 // but in that case mu should still be present.
1228 i = 0;
1229 while (i != n && held_locks->locks[i].mu != mu) {
1230 i++;
1231 }
1232 if (i == n) { // mu missing means releasing unheld lock
1233 SynchEvent* mu_events = GetSynchEvent(mu);
1234 ABSL_RAW_LOG(FATAL,
1235 "thread releasing lock it does not hold: %p %s; "
1236 ,
1237 static_cast<void*>(mu),
1238 mu_events == nullptr ? "" : mu_events->name);
1239 }
1240 }
1241 } else if (held_locks->locks[i].count == 1) {
1242 held_locks->n = n - 1;
1243 held_locks->locks[i] = held_locks->locks[n - 1];
1244 held_locks->locks[n - 1].id = InvalidGraphId();
1245 held_locks->locks[n - 1].mu =
1246 nullptr; // clear mu to please the leak detector.
1247 } else {
1248 assert(held_locks->locks[i].count > 0);
1249 held_locks->locks[i].count--;
1250 }
1251 }
1252
1253 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu)1254 static inline void DebugOnlyLockEnter(Mutex* mu) {
1255 if (kDebugMode) {
1256 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1257 OnDeadlockCycle::kIgnore) {
1258 LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1259 }
1260 }
1261 }
1262
1263 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu,GraphId id)1264 static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
1265 if (kDebugMode) {
1266 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1267 OnDeadlockCycle::kIgnore) {
1268 LockEnter(mu, id, Synch_GetAllLocks());
1269 }
1270 }
1271 }
1272
1273 // Call LockLeave() if in debug mode and deadlock detection is enabled.
DebugOnlyLockLeave(Mutex * mu)1274 static inline void DebugOnlyLockLeave(Mutex* mu) {
1275 if (kDebugMode) {
1276 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1277 OnDeadlockCycle::kIgnore) {
1278 LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1279 }
1280 }
1281 }
1282
StackString(void ** pcs,int n,char * buf,int maxlen,bool symbolize)1283 static char* StackString(void** pcs, int n, char* buf, int maxlen,
1284 bool symbolize) {
1285 static constexpr int kSymLen = 200;
1286 char sym[kSymLen];
1287 int len = 0;
1288 for (int i = 0; i != n; i++) {
1289 if (len >= maxlen)
1290 return buf;
1291 size_t count = static_cast<size_t>(maxlen - len);
1292 if (symbolize) {
1293 if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
1294 sym[0] = '\0';
1295 }
1296 snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
1297 sym);
1298 } else {
1299 snprintf(buf + len, count, " %p", pcs[i]);
1300 }
1301 len += strlen(&buf[len]);
1302 }
1303 return buf;
1304 }
1305
CurrentStackString(char * buf,int maxlen,bool symbolize)1306 static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
1307 void* pcs[40];
1308 return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1309 maxlen, symbolize);
1310 }
1311
1312 namespace {
1313 enum {
1314 kMaxDeadlockPathLen = 10
1315 }; // maximum length of a deadlock cycle;
1316 // a path this long would be remarkable
1317 // Buffers required to report a deadlock.
1318 // We do not allocate them on stack to avoid large stack frame.
1319 struct DeadlockReportBuffers {
1320 char buf[6100];
1321 GraphId path[kMaxDeadlockPathLen];
1322 };
1323
1324 struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffersabsl::__anon775dab1d0a11::ScopedDeadlockReportBuffers1325 ScopedDeadlockReportBuffers() {
1326 b = reinterpret_cast<DeadlockReportBuffers*>(
1327 base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1328 }
~ScopedDeadlockReportBuffersabsl::__anon775dab1d0a11::ScopedDeadlockReportBuffers1329 ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1330 DeadlockReportBuffers* b;
1331 };
1332
1333 // Helper to pass to GraphCycles::UpdateStackTrace.
GetStack(void ** stack,int max_depth)1334 int GetStack(void** stack, int max_depth) {
1335 return absl::GetStackTrace(stack, max_depth, 3);
1336 }
1337 } // anonymous namespace
1338
1339 // Called in debug mode when a thread is about to acquire a lock in a way that
1340 // may block.
DeadlockCheck(Mutex * mu)1341 static GraphId DeadlockCheck(Mutex* mu) {
1342 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1343 OnDeadlockCycle::kIgnore) {
1344 return InvalidGraphId();
1345 }
1346
1347 SynchLocksHeld* all_locks = Synch_GetAllLocks();
1348
1349 absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1350 const GraphId mu_id = GetGraphIdLocked(mu);
1351
1352 if (all_locks->n == 0) {
1353 // There are no other locks held. Return now so that we don't need to
1354 // call GetSynchEvent(). This way we do not record the stack trace
1355 // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1356 // it can't always be the first lock acquired by a thread.
1357 return mu_id;
1358 }
1359
1360 // We prefer to keep stack traces that show a thread holding and acquiring
1361 // as many locks as possible. This increases the chances that a given edge
1362 // in the acquires-before graph will be represented in the stack traces
1363 // recorded for the locks.
1364 deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1365
1366 // For each other mutex already held by this thread:
1367 for (int i = 0; i != all_locks->n; i++) {
1368 const GraphId other_node_id = all_locks->locks[i].id;
1369 const Mutex* other =
1370 static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
1371 if (other == nullptr) {
1372 // Ignore stale lock
1373 continue;
1374 }
1375
1376 // Add the acquired-before edge to the graph.
1377 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1378 ScopedDeadlockReportBuffers scoped_buffers;
1379 DeadlockReportBuffers* b = scoped_buffers.b;
1380 static int number_of_reported_deadlocks = 0;
1381 number_of_reported_deadlocks++;
1382 // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1383 bool symbolize = number_of_reported_deadlocks <= 2;
1384 ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1385 CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1386 size_t len = 0;
1387 for (int j = 0; j != all_locks->n; j++) {
1388 void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1389 if (pr != nullptr) {
1390 snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
1391 len += strlen(&b->buf[len]);
1392 }
1393 }
1394 ABSL_RAW_LOG(ERROR,
1395 "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1396 "historical lock ordering graph has been observed",
1397 static_cast<void*>(mu), b->buf);
1398 ABSL_RAW_LOG(ERROR, "Cycle: ");
1399 int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
1400 ABSL_ARRAYSIZE(b->path), b->path);
1401 for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
1402 GraphId id = b->path[j];
1403 Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
1404 if (path_mu == nullptr) continue;
1405 void** stack;
1406 int depth = deadlock_graph->GetStackTrace(id, &stack);
1407 snprintf(b->buf, sizeof(b->buf),
1408 "mutex@%p stack: ", static_cast<void*>(path_mu));
1409 StackString(stack, depth, b->buf + strlen(b->buf),
1410 static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1411 symbolize);
1412 ABSL_RAW_LOG(ERROR, "%s", b->buf);
1413 }
1414 if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
1415 ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
1416 }
1417 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1418 OnDeadlockCycle::kAbort) {
1419 deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1420 ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1421 return mu_id;
1422 }
1423 break; // report at most one potential deadlock per acquisition
1424 }
1425 }
1426
1427 return mu_id;
1428 }
1429
1430 // Invoke DeadlockCheck() iff we're in debug mode and
1431 // deadlock checking has been enabled.
DebugOnlyDeadlockCheck(Mutex * mu)1432 static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
1433 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1434 OnDeadlockCycle::kIgnore) {
1435 return DeadlockCheck(mu);
1436 } else {
1437 return InvalidGraphId();
1438 }
1439 }
1440
ForgetDeadlockInfo()1441 void Mutex::ForgetDeadlockInfo() {
1442 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1443 OnDeadlockCycle::kIgnore) {
1444 deadlock_graph_mu.Lock();
1445 if (deadlock_graph != nullptr) {
1446 deadlock_graph->RemoveNode(this);
1447 }
1448 deadlock_graph_mu.Unlock();
1449 }
1450 }
1451
AssertNotHeld() const1452 void Mutex::AssertNotHeld() const {
1453 // We have the data to allow this check only if in debug mode and deadlock
1454 // detection is enabled.
1455 if (kDebugMode &&
1456 (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1457 synch_deadlock_detection.load(std::memory_order_acquire) !=
1458 OnDeadlockCycle::kIgnore) {
1459 GraphId id = GetGraphId(const_cast<Mutex*>(this));
1460 SynchLocksHeld* locks = Synch_GetAllLocks();
1461 for (int i = 0; i != locks->n; i++) {
1462 if (locks->locks[i].id == id) {
1463 SynchEvent* mu_events = GetSynchEvent(this);
1464 ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1465 static_cast<const void*>(this),
1466 (mu_events == nullptr ? "" : mu_events->name));
1467 }
1468 }
1469 }
1470 }
1471
1472 // Attempt to acquire *mu, and return whether successful. The implementation
1473 // may spin for a short while if the lock cannot be acquired immediately.
TryAcquireWithSpinning(std::atomic<intptr_t> * mu)1474 static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1475 int c = GetMutexGlobals().spinloop_iterations;
1476 do { // do/while somewhat faster on AMD
1477 intptr_t v = mu->load(std::memory_order_relaxed);
1478 if ((v & (kMuReader | kMuEvent)) != 0) {
1479 return false; // a reader or tracing -> give up
1480 } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1481 mu->compare_exchange_strong(v, kMuWriter | v,
1482 std::memory_order_acquire,
1483 std::memory_order_relaxed)) {
1484 return true;
1485 }
1486 } while (--c > 0);
1487 return false;
1488 }
1489
Lock()1490 void Mutex::Lock() {
1491 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1492 GraphId id = DebugOnlyDeadlockCheck(this);
1493 intptr_t v = mu_.load(std::memory_order_relaxed);
1494 // try fast acquire, then spin loop
1495 if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1496 !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
1497 std::memory_order_relaxed)) {
1498 // try spin acquire, then slow loop
1499 if (!TryAcquireWithSpinning(&this->mu_)) {
1500 this->LockSlow(kExclusive, nullptr, 0);
1501 }
1502 }
1503 DebugOnlyLockEnter(this, id);
1504 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1505 }
1506
ReaderLock()1507 void Mutex::ReaderLock() {
1508 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1509 GraphId id = DebugOnlyDeadlockCheck(this);
1510 intptr_t v = mu_.load(std::memory_order_relaxed);
1511 // try fast acquire, then slow loop
1512 if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1513 !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1514 std::memory_order_acquire,
1515 std::memory_order_relaxed)) {
1516 this->LockSlow(kShared, nullptr, 0);
1517 }
1518 DebugOnlyLockEnter(this, id);
1519 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1520 }
1521
LockWhen(const Condition & cond)1522 void Mutex::LockWhen(const Condition& cond) {
1523 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1524 GraphId id = DebugOnlyDeadlockCheck(this);
1525 this->LockSlow(kExclusive, &cond, 0);
1526 DebugOnlyLockEnter(this, id);
1527 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1528 }
1529
LockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1530 bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
1531 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1532 GraphId id = DebugOnlyDeadlockCheck(this);
1533 bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
1534 DebugOnlyLockEnter(this, id);
1535 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1536 return res;
1537 }
1538
LockWhenWithDeadline(const Condition & cond,absl::Time deadline)1539 bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
1540 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1541 GraphId id = DebugOnlyDeadlockCheck(this);
1542 bool res =
1543 LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
1544 DebugOnlyLockEnter(this, id);
1545 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1546 return res;
1547 }
1548
ReaderLockWhen(const Condition & cond)1549 void Mutex::ReaderLockWhen(const Condition& cond) {
1550 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1551 GraphId id = DebugOnlyDeadlockCheck(this);
1552 this->LockSlow(kShared, &cond, 0);
1553 DebugOnlyLockEnter(this, id);
1554 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1555 }
1556
ReaderLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1557 bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
1558 absl::Duration timeout) {
1559 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1560 GraphId id = DebugOnlyDeadlockCheck(this);
1561 bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
1562 DebugOnlyLockEnter(this, id);
1563 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1564 return res;
1565 }
1566
ReaderLockWhenWithDeadline(const Condition & cond,absl::Time deadline)1567 bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
1568 absl::Time deadline) {
1569 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1570 GraphId id = DebugOnlyDeadlockCheck(this);
1571 bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1572 DebugOnlyLockEnter(this, id);
1573 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1574 return res;
1575 }
1576
Await(const Condition & cond)1577 void Mutex::Await(const Condition& cond) {
1578 if (cond.Eval()) { // condition already true; nothing to do
1579 if (kDebugMode) {
1580 this->AssertReaderHeld();
1581 }
1582 } else { // normal case
1583 ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1584 "condition untrue on return from Await");
1585 }
1586 }
1587
AwaitWithTimeout(const Condition & cond,absl::Duration timeout)1588 bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
1589 if (cond.Eval()) { // condition already true; nothing to do
1590 if (kDebugMode) {
1591 this->AssertReaderHeld();
1592 }
1593 return true;
1594 }
1595
1596 KernelTimeout t{timeout};
1597 bool res = this->AwaitCommon(cond, t);
1598 ABSL_RAW_CHECK(res || t.has_timeout(),
1599 "condition untrue on return from Await");
1600 return res;
1601 }
1602
AwaitWithDeadline(const Condition & cond,absl::Time deadline)1603 bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
1604 if (cond.Eval()) { // condition already true; nothing to do
1605 if (kDebugMode) {
1606 this->AssertReaderHeld();
1607 }
1608 return true;
1609 }
1610
1611 KernelTimeout t{deadline};
1612 bool res = this->AwaitCommon(cond, t);
1613 ABSL_RAW_CHECK(res || t.has_timeout(),
1614 "condition untrue on return from Await");
1615 return res;
1616 }
1617
AwaitCommon(const Condition & cond,KernelTimeout t)1618 bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
1619 this->AssertReaderHeld();
1620 MuHow how =
1621 (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1622 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1623 SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
1624 Synch_GetPerThreadAnnotated(this),
1625 nullptr /*no cv_word*/);
1626 int flags = kMuHasBlocked;
1627 if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1628 flags |= kMuIsCond;
1629 }
1630 this->UnlockSlow(&waitp);
1631 this->Block(waitp.thread);
1632 ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1633 ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1634 this->LockSlowLoop(&waitp, flags);
1635 bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
1636 EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1637 ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1638 return res;
1639 }
1640
TryLock()1641 bool Mutex::TryLock() {
1642 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1643 intptr_t v = mu_.load(std::memory_order_relaxed);
1644 if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1645 mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
1646 std::memory_order_relaxed)) {
1647 DebugOnlyLockEnter(this);
1648 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1649 return true;
1650 }
1651 if ((v & kMuEvent) != 0) { // we're recording events
1652 if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1653 mu_.compare_exchange_strong(
1654 v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1655 std::memory_order_acquire, std::memory_order_relaxed)) {
1656 DebugOnlyLockEnter(this);
1657 PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1658 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1659 return true;
1660 } else {
1661 PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1662 }
1663 }
1664 ABSL_TSAN_MUTEX_POST_LOCK(
1665 this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1666 return false;
1667 }
1668
ReaderTryLock()1669 bool Mutex::ReaderTryLock() {
1670 ABSL_TSAN_MUTEX_PRE_LOCK(this,
1671 __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1672 intptr_t v = mu_.load(std::memory_order_relaxed);
1673 // The while-loops (here and below) iterate only if the mutex word keeps
1674 // changing (typically because the reader count changes) under the CAS. We
1675 // limit the number of attempts to avoid having to think about livelock.
1676 int loop_limit = 5;
1677 while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
1678 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1679 std::memory_order_acquire,
1680 std::memory_order_relaxed)) {
1681 DebugOnlyLockEnter(this);
1682 ABSL_TSAN_MUTEX_POST_LOCK(
1683 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1684 return true;
1685 }
1686 loop_limit--;
1687 v = mu_.load(std::memory_order_relaxed);
1688 }
1689 if ((v & kMuEvent) != 0) { // we're recording events
1690 loop_limit = 5;
1691 while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1692 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1693 std::memory_order_acquire,
1694 std::memory_order_relaxed)) {
1695 DebugOnlyLockEnter(this);
1696 PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1697 ABSL_TSAN_MUTEX_POST_LOCK(
1698 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1699 return true;
1700 }
1701 loop_limit--;
1702 v = mu_.load(std::memory_order_relaxed);
1703 }
1704 if ((v & kMuEvent) != 0) {
1705 PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1706 }
1707 }
1708 ABSL_TSAN_MUTEX_POST_LOCK(this,
1709 __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1710 __tsan_mutex_try_lock_failed,
1711 0);
1712 return false;
1713 }
1714
Unlock()1715 void Mutex::Unlock() {
1716 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1717 DebugOnlyLockLeave(this);
1718 intptr_t v = mu_.load(std::memory_order_relaxed);
1719
1720 if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1721 ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1722 static_cast<unsigned>(v));
1723 }
1724
1725 // should_try_cas is whether we'll try a compare-and-swap immediately.
1726 // NOTE: optimized out when kDebugMode is false.
1727 bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1728 (v & (kMuWait | kMuDesig)) != kMuWait);
1729 // But, we can use an alternate computation of it, that compilers
1730 // currently don't find on their own. When that changes, this function
1731 // can be simplified.
1732 intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1733 intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1734 // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1735 // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1736 // all possible non-zero values for x exceed all possible values for y.
1737 // Therefore, (x == 0 && y > 0) == (x < y).
1738 if (kDebugMode && should_try_cas != (x < y)) {
1739 // We would usually use PRIdPTR here, but is not correctly implemented
1740 // within the android toolchain.
1741 ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1742 static_cast<long long>(v), static_cast<long long>(x),
1743 static_cast<long long>(y));
1744 }
1745 if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1746 std::memory_order_release,
1747 std::memory_order_relaxed)) {
1748 // fast writer release (writer with no waiters or with designated waker)
1749 } else {
1750 this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1751 }
1752 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1753 }
1754
1755 // Requires v to represent a reader-locked state.
ExactlyOneReader(intptr_t v)1756 static bool ExactlyOneReader(intptr_t v) {
1757 assert((v & (kMuWriter | kMuReader)) == kMuReader);
1758 assert((v & kMuHigh) != 0);
1759 // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1760 // on some architectures the following generates slightly smaller code.
1761 // It may be faster too.
1762 constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1763 return (v & kMuMultipleWaitersMask) == 0;
1764 }
1765
ReaderUnlock()1766 void Mutex::ReaderUnlock() {
1767 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1768 DebugOnlyLockLeave(this);
1769 intptr_t v = mu_.load(std::memory_order_relaxed);
1770 assert((v & (kMuWriter | kMuReader)) == kMuReader);
1771 if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
1772 // fast reader release (reader with no waiters)
1773 intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
1774 if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
1775 std::memory_order_relaxed)) {
1776 ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1777 return;
1778 }
1779 }
1780 this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1781 ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1782 }
1783
1784 // Clears the designated waker flag in the mutex if this thread has blocked, and
1785 // therefore may be the designated waker.
ClearDesignatedWakerMask(int flag)1786 static intptr_t ClearDesignatedWakerMask(int flag) {
1787 assert(flag >= 0);
1788 assert(flag <= 1);
1789 switch (flag) {
1790 case 0: // not blocked
1791 return ~static_cast<intptr_t>(0);
1792 case 1: // blocked; turn off the designated waker bit
1793 return ~static_cast<intptr_t>(kMuDesig);
1794 }
1795 ABSL_UNREACHABLE();
1796 }
1797
1798 // Conditionally ignores the existence of waiting writers if a reader that has
1799 // already blocked once wakes up.
IgnoreWaitingWritersMask(int flag)1800 static intptr_t IgnoreWaitingWritersMask(int flag) {
1801 assert(flag >= 0);
1802 assert(flag <= 1);
1803 switch (flag) {
1804 case 0: // not blocked
1805 return ~static_cast<intptr_t>(0);
1806 case 1: // blocked; pretend there are no waiting writers
1807 return ~static_cast<intptr_t>(kMuWrWait);
1808 }
1809 ABSL_UNREACHABLE();
1810 }
1811
1812 // Internal version of LockWhen(). See LockSlowWithDeadline()
LockSlow(MuHow how,const Condition * cond,int flags)1813 ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
1814 int flags) {
1815 ABSL_RAW_CHECK(
1816 this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1817 "condition untrue on return from LockSlow");
1818 }
1819
1820 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
EvalConditionAnnotated(const Condition * cond,Mutex * mu,bool locking,bool trylock,bool read_lock)1821 static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
1822 bool locking, bool trylock,
1823 bool read_lock) {
1824 // Delicate annotation dance.
1825 // We are currently inside of read/write lock/unlock operation.
1826 // All memory accesses are ignored inside of mutex operations + for unlock
1827 // operation tsan considers that we've already released the mutex.
1828 bool res = false;
1829 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1830 const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
1831 const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1832 #endif
1833 if (locking) {
1834 // For lock we pretend that we have finished the operation,
1835 // evaluate the predicate, then unlock the mutex and start locking it again
1836 // to match the annotation at the end of outer lock operation.
1837 // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1838 // will think the lock acquisition is recursive which will trigger
1839 // deadlock detector.
1840 ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1841 res = cond->Eval();
1842 // There is no "try" version of Unlock, so use flags instead of tryflags.
1843 ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1844 ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1845 ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1846 } else {
1847 // Similarly, for unlock we pretend that we have unlocked the mutex,
1848 // lock the mutex, evaluate the predicate, and start unlocking it again
1849 // to match the annotation at the end of outer unlock operation.
1850 ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1851 ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1852 ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1853 res = cond->Eval();
1854 ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1855 }
1856 // Prevent unused param warnings in non-TSAN builds.
1857 static_cast<void>(mu);
1858 static_cast<void>(trylock);
1859 static_cast<void>(read_lock);
1860 return res;
1861 }
1862
1863 // Compute cond->Eval() hiding it from race detectors.
1864 // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1865 // that was just added by a concurrent Lock operation; Lock adds the predicate
1866 // to the internal Mutex list without actually acquiring the Mutex
1867 // (it only acquires the internal spinlock, which is rightfully invisible for
1868 // tsan). As the result there is no tsan-visible synchronization between the
1869 // addition and this thread. So if we would enable race detection here,
1870 // it would race with the predicate initialization.
EvalConditionIgnored(Mutex * mu,const Condition * cond)1871 static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
1872 // Memory accesses are already ignored inside of lock/unlock operations,
1873 // but synchronization operations are also ignored. When we evaluate the
1874 // predicate we must ignore only memory accesses but not synchronization,
1875 // because missed synchronization can lead to false reports later.
1876 // So we "divert" (which un-ignores both memory accesses and synchronization)
1877 // and then separately turn on ignores of memory accesses.
1878 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1879 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1880 bool res = cond->Eval();
1881 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1882 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1883 static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
1884 return res;
1885 }
1886
1887 // Internal equivalent of *LockWhenWithDeadline(), where
1888 // "t" represents the absolute timeout; !t.has_timeout() means "forever".
1889 // "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1890 // In flags, bits are ored together:
1891 // - kMuHasBlocked indicates that the client has already blocked on the call so
1892 // the designated waker bit must be cleared and waiting writers should not
1893 // obstruct this call
1894 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1895 // Await, LockWhen) so contention profiling should be suppressed.
LockSlowWithDeadline(MuHow how,const Condition * cond,KernelTimeout t,int flags)1896 bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
1897 KernelTimeout t, int flags) {
1898 intptr_t v = mu_.load(std::memory_order_relaxed);
1899 bool unlock = false;
1900 if ((v & how->fast_need_zero) == 0 && // try fast acquire
1901 mu_.compare_exchange_strong(
1902 v,
1903 (how->fast_or |
1904 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1905 how->fast_add,
1906 std::memory_order_acquire, std::memory_order_relaxed)) {
1907 if (cond == nullptr ||
1908 EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1909 return true;
1910 }
1911 unlock = true;
1912 }
1913 SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
1914 Synch_GetPerThreadAnnotated(this),
1915 nullptr /*no cv_word*/);
1916 if (!Condition::GuaranteedEqual(cond, nullptr)) {
1917 flags |= kMuIsCond;
1918 }
1919 if (unlock) {
1920 this->UnlockSlow(&waitp);
1921 this->Block(waitp.thread);
1922 flags |= kMuHasBlocked;
1923 }
1924 this->LockSlowLoop(&waitp, flags);
1925 return waitp.cond != nullptr || // => cond known true from LockSlowLoop
1926 cond == nullptr ||
1927 EvalConditionAnnotated(cond, this, true, false, how == kShared);
1928 }
1929
1930 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1931 // the printf-style argument list. The format string must be a literal.
1932 // Arguments after the first are not evaluated unless the condition is true.
1933 #define RAW_CHECK_FMT(cond, ...) \
1934 do { \
1935 if (ABSL_PREDICT_FALSE(!(cond))) { \
1936 ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1937 } \
1938 } while (0)
1939
CheckForMutexCorruption(intptr_t v,const char * label)1940 static void CheckForMutexCorruption(intptr_t v, const char* label) {
1941 // Test for either of two situations that should not occur in v:
1942 // kMuWriter and kMuReader
1943 // kMuWrWait and !kMuWait
1944 const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
1945 // By flipping that bit, we can now test for:
1946 // kMuWriter and kMuReader in w
1947 // kMuWrWait and kMuWait in w
1948 // We've chosen these two pairs of values to be so that they will overlap,
1949 // respectively, when the word is left shifted by three. This allows us to
1950 // save a branch in the common (correct) case of them not being coincident.
1951 static_assert(kMuReader << 3 == kMuWriter, "must match");
1952 static_assert(kMuWait << 3 == kMuWrWait, "must match");
1953 if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1954 RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1955 "%s: Mutex corrupt: both reader and writer lock held: %p",
1956 label, reinterpret_cast<void*>(v));
1957 RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1958 "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
1959 reinterpret_cast<void*>(v));
1960 assert(false);
1961 }
1962
LockSlowLoop(SynchWaitParams * waitp,int flags)1963 void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
1964 SchedulingGuard::ScopedDisable disable_rescheduling;
1965 int c = 0;
1966 intptr_t v = mu_.load(std::memory_order_relaxed);
1967 if ((v & kMuEvent) != 0) {
1968 PostSynchEvent(
1969 this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
1970 }
1971 ABSL_RAW_CHECK(
1972 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1973 "detected illegal recursion into Mutex code");
1974 for (;;) {
1975 v = mu_.load(std::memory_order_relaxed);
1976 CheckForMutexCorruption(v, "Lock");
1977 if ((v & waitp->how->slow_need_zero) == 0) {
1978 if (mu_.compare_exchange_strong(
1979 v,
1980 (waitp->how->fast_or |
1981 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1982 waitp->how->fast_add,
1983 std::memory_order_acquire, std::memory_order_relaxed)) {
1984 if (waitp->cond == nullptr ||
1985 EvalConditionAnnotated(waitp->cond, this, true, false,
1986 waitp->how == kShared)) {
1987 break; // we timed out, or condition true, so return
1988 }
1989 this->UnlockSlow(waitp); // got lock but condition false
1990 this->Block(waitp->thread);
1991 flags |= kMuHasBlocked;
1992 c = 0;
1993 }
1994 } else { // need to access waiter list
1995 bool dowait = false;
1996 if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
1997 // This thread tries to become the one and only waiter.
1998 PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
1999 intptr_t nv =
2000 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2001 kMuWait;
2002 ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
2003 if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2004 nv |= kMuWrWait;
2005 }
2006 if (mu_.compare_exchange_strong(
2007 v, reinterpret_cast<intptr_t>(new_h) | nv,
2008 std::memory_order_release, std::memory_order_relaxed)) {
2009 dowait = true;
2010 } else { // attempted Enqueue() failed
2011 // zero out the waitp field set by Enqueue()
2012 waitp->thread->waitp = nullptr;
2013 }
2014 } else if ((v & waitp->how->slow_inc_need_zero &
2015 IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
2016 // This is a reader that needs to increment the reader count,
2017 // but the count is currently held in the last waiter.
2018 if (mu_.compare_exchange_strong(
2019 v,
2020 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2021 kMuSpin | kMuReader,
2022 std::memory_order_acquire, std::memory_order_relaxed)) {
2023 PerThreadSynch* h = GetPerThreadSynch(v);
2024 h->readers += kMuOne; // inc reader count in waiter
2025 do { // release spinlock
2026 v = mu_.load(std::memory_order_relaxed);
2027 } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2028 std::memory_order_release,
2029 std::memory_order_relaxed));
2030 if (waitp->cond == nullptr ||
2031 EvalConditionAnnotated(waitp->cond, this, true, false,
2032 waitp->how == kShared)) {
2033 break; // we timed out, or condition true, so return
2034 }
2035 this->UnlockSlow(waitp); // got lock but condition false
2036 this->Block(waitp->thread);
2037 flags |= kMuHasBlocked;
2038 c = 0;
2039 }
2040 } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
2041 mu_.compare_exchange_strong(
2042 v,
2043 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2044 kMuSpin | kMuWait,
2045 std::memory_order_acquire, std::memory_order_relaxed)) {
2046 PerThreadSynch* h = GetPerThreadSynch(v);
2047 PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
2048 intptr_t wr_wait = 0;
2049 ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2050 if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2051 wr_wait = kMuWrWait; // give priority to a waiting writer
2052 }
2053 do { // release spinlock
2054 v = mu_.load(std::memory_order_relaxed);
2055 } while (!mu_.compare_exchange_weak(
2056 v,
2057 (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2058 reinterpret_cast<intptr_t>(new_h),
2059 std::memory_order_release, std::memory_order_relaxed));
2060 dowait = true;
2061 }
2062 if (dowait) {
2063 this->Block(waitp->thread); // wait until removed from list or timeout
2064 flags |= kMuHasBlocked;
2065 c = 0;
2066 }
2067 }
2068 ABSL_RAW_CHECK(
2069 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2070 "detected illegal recursion into Mutex code");
2071 // delay, then try again
2072 c = synchronization_internal::MutexDelay(c, GENTLE);
2073 }
2074 ABSL_RAW_CHECK(
2075 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2076 "detected illegal recursion into Mutex code");
2077 if ((v & kMuEvent) != 0) {
2078 PostSynchEvent(this, waitp->how == kExclusive
2079 ? SYNCH_EV_LOCK_RETURNING
2080 : SYNCH_EV_READERLOCK_RETURNING);
2081 }
2082 }
2083
2084 // Unlock this mutex, which is held by the current thread.
2085 // If waitp is non-zero, it must be the wait parameters for the current thread
2086 // which holds the lock but is not runnable because its condition is false
2087 // or it is in the process of blocking on a condition variable; it must requeue
2088 // itself on the mutex/condvar to wait for its condition to become true.
UnlockSlow(SynchWaitParams * waitp)2089 ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
2090 SchedulingGuard::ScopedDisable disable_rescheduling;
2091 intptr_t v = mu_.load(std::memory_order_relaxed);
2092 this->AssertReaderHeld();
2093 CheckForMutexCorruption(v, "Unlock");
2094 if ((v & kMuEvent) != 0) {
2095 PostSynchEvent(
2096 this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
2097 }
2098 int c = 0;
2099 // the waiter under consideration to wake, or zero
2100 PerThreadSynch* w = nullptr;
2101 // the predecessor to w or zero
2102 PerThreadSynch* pw = nullptr;
2103 // head of the list searched previously, or zero
2104 PerThreadSynch* old_h = nullptr;
2105 // a condition that's known to be false.
2106 const Condition* known_false = nullptr;
2107 PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
2108 intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2109 // later writer could have acquired the lock
2110 // (starvation avoidance)
2111 ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2112 waitp->thread->suppress_fatal_errors,
2113 "detected illegal recursion into Mutex code");
2114 // This loop finds threads wake_list to wakeup if any, and removes them from
2115 // the list of waiters. In addition, it places waitp.thread on the queue of
2116 // waiters if waitp is non-zero.
2117 for (;;) {
2118 v = mu_.load(std::memory_order_relaxed);
2119 if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2120 waitp == nullptr) {
2121 // fast writer release (writer with no waiters or with designated waker)
2122 if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2123 std::memory_order_release,
2124 std::memory_order_relaxed)) {
2125 return;
2126 }
2127 } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2128 // fast reader release (reader with no waiters)
2129 intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2130 if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
2131 std::memory_order_relaxed)) {
2132 return;
2133 }
2134 } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
2135 mu_.compare_exchange_strong(v, v | kMuSpin,
2136 std::memory_order_acquire,
2137 std::memory_order_relaxed)) {
2138 if ((v & kMuWait) == 0) { // no one to wake
2139 intptr_t nv;
2140 bool do_enqueue = true; // always Enqueue() the first time
2141 ABSL_RAW_CHECK(waitp != nullptr,
2142 "UnlockSlow is confused"); // about to sleep
2143 do { // must loop to release spinlock as reader count may change
2144 v = mu_.load(std::memory_order_relaxed);
2145 // decrement reader count if there are readers
2146 intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
2147 PerThreadSynch* new_h = nullptr;
2148 if (do_enqueue) {
2149 // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2150 // we must not retry here. The initial attempt will always have
2151 // succeeded, further attempts would enqueue us against *this due to
2152 // Fer() handling.
2153 do_enqueue = (waitp->cv_word == nullptr);
2154 new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2155 }
2156 intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
2157 if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
2158 clear = kMuWrWait | kMuReader; // clear read bit
2159 }
2160 nv = (v & kMuLow & ~clear & ~kMuSpin);
2161 if (new_h != nullptr) {
2162 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2163 } else { // new_h could be nullptr if we queued ourselves on a
2164 // CondVar
2165 // In that case, we must place the reader count back in the mutex
2166 // word, as Enqueue() did not store it in the new waiter.
2167 nv |= new_readers & kMuHigh;
2168 }
2169 // release spinlock & our lock; retry if reader-count changed
2170 // (writer count cannot change since we hold lock)
2171 } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
2172 std::memory_order_relaxed));
2173 break;
2174 }
2175
2176 // There are waiters.
2177 // Set h to the head of the circular waiter list.
2178 PerThreadSynch* h = GetPerThreadSynch(v);
2179 if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2180 // a reader but not the last
2181 h->readers -= kMuOne; // release our lock
2182 intptr_t nv = v; // normally just release spinlock
2183 if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2184 PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2185 ABSL_RAW_CHECK(new_h != nullptr,
2186 "waiters disappeared during Enqueue()!");
2187 nv &= kMuLow;
2188 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2189 }
2190 mu_.store(nv, std::memory_order_release); // release spinlock
2191 // can release with a store because there were waiters
2192 break;
2193 }
2194
2195 // Either we didn't search before, or we marked the queue
2196 // as "maybe_unlocking" and no one else should have changed it.
2197 ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2198 "Mutex queue changed beneath us");
2199
2200 // The lock is becoming free, and there's a waiter
2201 if (old_h != nullptr &&
2202 !old_h->may_skip) { // we used old_h as a terminator
2203 old_h->may_skip = true; // allow old_h to skip once more
2204 ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2205 if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2206 old_h->skip = old_h->next; // old_h not head & can skip to successor
2207 }
2208 }
2209 if (h->next->waitp->how == kExclusive &&
2210 Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2211 // easy case: writer with no condition; no need to search
2212 pw = h; // wake w, the successor of h (=pw)
2213 w = h->next;
2214 w->wake = true;
2215 // We are waking up a writer. This writer may be racing against
2216 // an already awake reader for the lock. We want the
2217 // writer to usually win this race,
2218 // because if it doesn't, we can potentially keep taking a reader
2219 // perpetually and writers will starve. Worse than
2220 // that, this can also starve other readers if kMuWrWait gets set
2221 // later.
2222 wr_wait = kMuWrWait;
2223 } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2224 // we found a waiter w to wake on a previous iteration and either it's
2225 // a writer, or we've searched the entire list so we have all the
2226 // readers.
2227 if (pw == nullptr) { // if w's predecessor is unknown, it must be h
2228 pw = h;
2229 }
2230 } else {
2231 // At this point we don't know all the waiters to wake, and the first
2232 // waiter has a condition or is a reader. We avoid searching over
2233 // waiters we've searched on previous iterations by starting at
2234 // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2235 if (old_h == h) { // we've searched before, and nothing's new
2236 // so there's no one to wake.
2237 intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
2238 h->readers = 0;
2239 h->maybe_unlocking = false; // finished unlocking
2240 if (waitp != nullptr) { // we must queue ourselves and sleep
2241 PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
2242 nv &= kMuLow;
2243 if (new_h != nullptr) {
2244 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2245 } // else new_h could be nullptr if we queued ourselves on a
2246 // CondVar
2247 }
2248 // release spinlock & lock
2249 // can release with a store because there were waiters
2250 mu_.store(nv, std::memory_order_release);
2251 break;
2252 }
2253
2254 // set up to walk the list
2255 PerThreadSynch* w_walk; // current waiter during list walk
2256 PerThreadSynch* pw_walk; // previous waiter during list walk
2257 if (old_h != nullptr) { // we've searched up to old_h before
2258 pw_walk = old_h;
2259 w_walk = old_h->next;
2260 } else { // no prior search, start at beginning
2261 pw_walk =
2262 nullptr; // h->next's predecessor may change; don't record it
2263 w_walk = h->next;
2264 }
2265
2266 h->may_skip = false; // ensure we never skip past h in future searches
2267 // even if other waiters are queued after it.
2268 ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2269
2270 h->maybe_unlocking = true; // we're about to scan the waiter list
2271 // without the spinlock held.
2272 // Enqueue must be conservative about
2273 // priority queuing.
2274
2275 // We must release the spinlock to evaluate the conditions.
2276 mu_.store(v, std::memory_order_release); // release just spinlock
2277 // can release with a store because there were waiters
2278
2279 // h is the last waiter queued, and w_walk the first unsearched waiter.
2280 // Without the spinlock, the locations mu_ and h->next may now change
2281 // underneath us, but since we hold the lock itself, the only legal
2282 // change is to add waiters between h and w_walk. Therefore, it's safe
2283 // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2284 // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2285
2286 old_h = h; // remember we searched to here
2287
2288 // Walk the path upto and including h looking for waiters we can wake.
2289 while (pw_walk != h) {
2290 w_walk->wake = false;
2291 if (w_walk->waitp->cond ==
2292 nullptr || // no condition => vacuously true OR
2293 (w_walk->waitp->cond != known_false &&
2294 // this thread's condition is not known false, AND
2295 // is in fact true
2296 EvalConditionIgnored(this, w_walk->waitp->cond))) {
2297 if (w == nullptr) {
2298 w_walk->wake = true; // can wake this waiter
2299 w = w_walk;
2300 pw = pw_walk;
2301 if (w_walk->waitp->how == kExclusive) {
2302 wr_wait = kMuWrWait;
2303 break; // bail if waking this writer
2304 }
2305 } else if (w_walk->waitp->how == kShared) { // wake if a reader
2306 w_walk->wake = true;
2307 } else { // writer with true condition
2308 wr_wait = kMuWrWait;
2309 }
2310 } else { // can't wake; condition false
2311 known_false = w_walk->waitp->cond; // remember last false condition
2312 }
2313 if (w_walk->wake) { // we're waking reader w_walk
2314 pw_walk = w_walk; // don't skip similar waiters
2315 } else { // not waking; skip as much as possible
2316 pw_walk = Skip(w_walk);
2317 }
2318 // If pw_walk == h, then load of pw_walk->next can race with
2319 // concurrent write in Enqueue(). However, at the same time
2320 // we do not need to do the load, because we will bail out
2321 // from the loop anyway.
2322 if (pw_walk != h) {
2323 w_walk = pw_walk->next;
2324 }
2325 }
2326
2327 continue; // restart for(;;)-loop to wakeup w or to find more waiters
2328 }
2329 ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2330 // The first (and perhaps only) waiter we've chosen to wake is w, whose
2331 // predecessor is pw. If w is a reader, we must wake all the other
2332 // waiters with wake==true as well. We may also need to queue
2333 // ourselves if waitp != null. The spinlock and the lock are still
2334 // held.
2335
2336 // This traverses the list in [ pw->next, h ], where h is the head,
2337 // removing all elements with wake==true and placing them in the
2338 // singly-linked list wake_list. Returns the new head.
2339 h = DequeueAllWakeable(h, pw, &wake_list);
2340
2341 intptr_t nv = (v & kMuEvent) | kMuDesig;
2342 // assume no waiters left,
2343 // set kMuDesig for INV1a
2344
2345 if (waitp != nullptr) { // we must queue ourselves and sleep
2346 h = Enqueue(h, waitp, v, kMuIsCond);
2347 // h is new last waiter; could be null if we queued ourselves on a
2348 // CondVar
2349 }
2350
2351 ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2352 "unexpected empty wake list");
2353
2354 if (h != nullptr) { // there are waiters left
2355 h->readers = 0;
2356 h->maybe_unlocking = false; // finished unlocking
2357 nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2358 }
2359
2360 // release both spinlock & lock
2361 // can release with a store because there were waiters
2362 mu_.store(nv, std::memory_order_release);
2363 break; // out of for(;;)-loop
2364 }
2365 // aggressive here; no one can proceed till we do
2366 c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2367 } // end of for(;;)-loop
2368
2369 if (wake_list != kPerThreadSynchNull) {
2370 int64_t total_wait_cycles = 0;
2371 int64_t max_wait_cycles = 0;
2372 int64_t now = CycleClock::Now();
2373 do {
2374 // Profile lock contention events only if the waiter was trying to acquire
2375 // the lock, not waiting on a condition variable or Condition.
2376 if (!wake_list->cond_waiter) {
2377 int64_t cycles_waited =
2378 (now - wake_list->waitp->contention_start_cycles);
2379 total_wait_cycles += cycles_waited;
2380 if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
2381 wake_list->waitp->contention_start_cycles = now;
2382 wake_list->waitp->should_submit_contention_data = true;
2383 }
2384 wake_list = Wakeup(wake_list); // wake waiters
2385 } while (wake_list != kPerThreadSynchNull);
2386 if (total_wait_cycles > 0) {
2387 mutex_tracer("slow release", this, total_wait_cycles);
2388 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2389 submit_profile_data(total_wait_cycles);
2390 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2391 }
2392 }
2393 }
2394
2395 // Used by CondVar implementation to reacquire mutex after waking from
2396 // condition variable. This routine is used instead of Lock() because the
2397 // waiting thread may have been moved from the condition variable queue to the
2398 // mutex queue without a wakeup, by Trans(). In that case, when the thread is
2399 // finally woken, the woken thread will believe it has been woken from the
2400 // condition variable (i.e. its PC will be in when in the CondVar code), when
2401 // in fact it has just been woken from the mutex. Thus, it must enter the slow
2402 // path of the mutex in the same state as if it had just woken from the mutex.
2403 // That is, it must ensure to clear kMuDesig (INV1b).
Trans(MuHow how)2404 void Mutex::Trans(MuHow how) {
2405 this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2406 }
2407
2408 // Used by CondVar implementation to effectively wake thread w from the
2409 // condition variable. If this mutex is free, we simply wake the thread.
2410 // It will later acquire the mutex with high probability. Otherwise, we
2411 // enqueue thread w on this mutex.
Fer(PerThreadSynch * w)2412 void Mutex::Fer(PerThreadSynch* w) {
2413 SchedulingGuard::ScopedDisable disable_rescheduling;
2414 int c = 0;
2415 ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2416 "Mutex::Fer while waiting on Condition");
2417 ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2418 "Mutex::Fer while in timed wait");
2419 ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2420 "Mutex::Fer with pending CondVar queueing");
2421 for (;;) {
2422 intptr_t v = mu_.load(std::memory_order_relaxed);
2423 // Note: must not queue if the mutex is unlocked (nobody will wake it).
2424 // For example, we can have only kMuWait (conditional) or maybe
2425 // kMuWait|kMuWrWait.
2426 // conflicting != 0 implies that the waking thread cannot currently take
2427 // the mutex, which in turn implies that someone else has it and can wake
2428 // us if we queue.
2429 const intptr_t conflicting =
2430 kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2431 if ((v & conflicting) == 0) {
2432 w->next = nullptr;
2433 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2434 IncrementSynchSem(this, w);
2435 return;
2436 } else {
2437 if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
2438 // This thread tries to become the one and only waiter.
2439 PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2440 ABSL_RAW_CHECK(new_h != nullptr,
2441 "Enqueue failed"); // we must queue ourselves
2442 if (mu_.compare_exchange_strong(
2443 v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2444 std::memory_order_release, std::memory_order_relaxed)) {
2445 return;
2446 }
2447 } else if ((v & kMuSpin) == 0 &&
2448 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2449 PerThreadSynch* h = GetPerThreadSynch(v);
2450 PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2451 ABSL_RAW_CHECK(new_h != nullptr,
2452 "Enqueue failed"); // we must queue ourselves
2453 do {
2454 v = mu_.load(std::memory_order_relaxed);
2455 } while (!mu_.compare_exchange_weak(
2456 v,
2457 (v & kMuLow & ~kMuSpin) | kMuWait |
2458 reinterpret_cast<intptr_t>(new_h),
2459 std::memory_order_release, std::memory_order_relaxed));
2460 return;
2461 }
2462 }
2463 c = synchronization_internal::MutexDelay(c, GENTLE);
2464 }
2465 }
2466
AssertHeld() const2467 void Mutex::AssertHeld() const {
2468 if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2469 SynchEvent* e = GetSynchEvent(this);
2470 ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2471 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2472 }
2473 }
2474
AssertReaderHeld() const2475 void Mutex::AssertReaderHeld() const {
2476 if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2477 SynchEvent* e = GetSynchEvent(this);
2478 ABSL_RAW_LOG(FATAL,
2479 "thread should hold at least a read lock on Mutex %p %s",
2480 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
2481 }
2482 }
2483
2484 // -------------------------------- condition variables
2485 static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
2486 static const intptr_t kCvEvent = 0x0002L; // record events
2487
2488 static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2489
2490 // Hack to make constant values available to gdb pretty printer
2491 enum {
2492 kGdbCvSpin = kCvSpin,
2493 kGdbCvEvent = kCvEvent,
2494 kGdbCvLow = kCvLow,
2495 };
2496
2497 static_assert(PerThreadSynch::kAlignment > kCvLow,
2498 "PerThreadSynch::kAlignment must be greater than kCvLow");
2499
EnableDebugLog(const char * name)2500 void CondVar::EnableDebugLog(const char* name) {
2501 SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2502 e->log = true;
2503 UnrefSynchEvent(e);
2504 }
2505
~CondVar()2506 CondVar::~CondVar() {
2507 if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2508 ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2509 }
2510 }
2511
2512 // Remove thread s from the list of waiters on this condition variable.
Remove(PerThreadSynch * s)2513 void CondVar::Remove(PerThreadSynch* s) {
2514 SchedulingGuard::ScopedDisable disable_rescheduling;
2515 intptr_t v;
2516 int c = 0;
2517 for (v = cv_.load(std::memory_order_relaxed);;
2518 v = cv_.load(std::memory_order_relaxed)) {
2519 if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2520 cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2521 std::memory_order_relaxed)) {
2522 PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2523 if (h != nullptr) {
2524 PerThreadSynch* w = h;
2525 while (w->next != s && w->next != h) { // search for thread
2526 w = w->next;
2527 }
2528 if (w->next == s) { // found thread; remove it
2529 w->next = s->next;
2530 if (h == s) {
2531 h = (w == s) ? nullptr : w;
2532 }
2533 s->next = nullptr;
2534 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2535 }
2536 }
2537 // release spinlock
2538 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2539 std::memory_order_release);
2540 return;
2541 } else {
2542 // try again after a delay
2543 c = synchronization_internal::MutexDelay(c, GENTLE);
2544 }
2545 }
2546 }
2547
2548 // Queue thread waitp->thread on condition variable word cv_word using
2549 // wait parameters waitp.
2550 // We split this into a separate routine, rather than simply doing it as part
2551 // of WaitCommon(). If we were to queue ourselves on the condition variable
2552 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2553 // the logging code, or via a Condition function) and might potentially attempt
2554 // to block this thread. That would be a problem if the thread were already on
2555 // a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
2556 // the unlock code to call CondVarEnqueue() to queue the thread on the condition
2557 // variable queue just before the mutex is to be unlocked, and (most
2558 // importantly) after any call to an external routine that might re-enter the
2559 // mutex code.
CondVarEnqueue(SynchWaitParams * waitp)2560 static void CondVarEnqueue(SynchWaitParams* waitp) {
2561 // This thread might be transferred to the Mutex queue by Fer() when
2562 // we are woken. To make sure that is what happens, Enqueue() doesn't
2563 // call CondVarEnqueue() again but instead uses its normal code. We
2564 // must do this before we queue ourselves so that cv_word will be null
2565 // when seen by the dequeuer, who may wish immediately to requeue
2566 // this thread on another queue.
2567 std::atomic<intptr_t>* cv_word = waitp->cv_word;
2568 waitp->cv_word = nullptr;
2569
2570 intptr_t v = cv_word->load(std::memory_order_relaxed);
2571 int c = 0;
2572 while ((v & kCvSpin) != 0 || // acquire spinlock
2573 !cv_word->compare_exchange_weak(v, v | kCvSpin,
2574 std::memory_order_acquire,
2575 std::memory_order_relaxed)) {
2576 c = synchronization_internal::MutexDelay(c, GENTLE);
2577 v = cv_word->load(std::memory_order_relaxed);
2578 }
2579 ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2580 waitp->thread->waitp = waitp; // prepare ourselves for waiting
2581 PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2582 if (h == nullptr) { // add this thread to waiter list
2583 waitp->thread->next = waitp->thread;
2584 } else {
2585 waitp->thread->next = h->next;
2586 h->next = waitp->thread;
2587 }
2588 waitp->thread->state.store(PerThreadSynch::kQueued,
2589 std::memory_order_relaxed);
2590 cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2591 std::memory_order_release);
2592 }
2593
WaitCommon(Mutex * mutex,KernelTimeout t)2594 bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
2595 bool rc = false; // return value; true iff we timed-out
2596
2597 intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2598 Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2599 ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2600
2601 // maybe trace this call
2602 intptr_t v = cv_.load(std::memory_order_relaxed);
2603 cond_var_tracer("Wait", this);
2604 if ((v & kCvEvent) != 0) {
2605 PostSynchEvent(this, SYNCH_EV_WAIT);
2606 }
2607
2608 // Release mu and wait on condition variable.
2609 SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2610 Synch_GetPerThreadAnnotated(mutex), &cv_);
2611 // UnlockSlow() will call CondVarEnqueue() just before releasing the
2612 // Mutex, thus queuing this thread on the condition variable. See
2613 // CondVarEnqueue() for the reasons.
2614 mutex->UnlockSlow(&waitp);
2615
2616 // wait for signal
2617 while (waitp.thread->state.load(std::memory_order_acquire) ==
2618 PerThreadSynch::kQueued) {
2619 if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2620 // DecrementSynchSem returned due to timeout.
2621 // Now we will either (1) remove ourselves from the wait list in Remove
2622 // below, in which case Remove will set thread.state = kAvailable and
2623 // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2624 // has removed us concurrently and is calling Wakeup, which will set
2625 // thread.state = kAvailable and post to the semaphore.
2626 // It's important to reset the timeout for the case (2) because otherwise
2627 // we can live-lock in this loop since DecrementSynchSem will always
2628 // return immediately due to timeout, but Signal/SignalAll is not
2629 // necessary set thread.state = kAvailable yet (and is not scheduled
2630 // due to thread priorities or other scheduler artifacts).
2631 // Note this could also be resolved if Signal/SignalAll would set
2632 // thread.state = kAvailable while holding the wait list spin lock.
2633 // But this can't be easily done for SignalAll since it grabs the whole
2634 // wait list with a single compare-exchange and does not really grab
2635 // the spin lock.
2636 t = KernelTimeout::Never();
2637 this->Remove(waitp.thread);
2638 rc = true;
2639 }
2640 }
2641
2642 ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2643 waitp.thread->waitp = nullptr; // cleanup
2644
2645 // maybe trace this call
2646 cond_var_tracer("Unwait", this);
2647 if ((v & kCvEvent) != 0) {
2648 PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2649 }
2650
2651 // From synchronization point of view Wait is unlock of the mutex followed
2652 // by lock of the mutex. We've annotated start of unlock in the beginning
2653 // of the function. Now, finish unlock and annotate lock of the mutex.
2654 // (Trans is effectively lock).
2655 ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2656 ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2657 mutex->Trans(mutex_how); // Reacquire mutex
2658 ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2659 return rc;
2660 }
2661
WaitWithTimeout(Mutex * mu,absl::Duration timeout)2662 bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
2663 return WaitCommon(mu, KernelTimeout(timeout));
2664 }
2665
WaitWithDeadline(Mutex * mu,absl::Time deadline)2666 bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
2667 return WaitCommon(mu, KernelTimeout(deadline));
2668 }
2669
Wait(Mutex * mu)2670 void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
2671
2672 // Wake thread w
2673 // If it was a timed wait, w will be waiting on w->cv
2674 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2675 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
Wakeup(PerThreadSynch * w)2676 void CondVar::Wakeup(PerThreadSynch* w) {
2677 if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2678 // The waiting thread only needs to observe "w->state == kAvailable" to be
2679 // released, we must cache "cvmu" before clearing "next".
2680 Mutex* mu = w->waitp->cvmu;
2681 w->next = nullptr;
2682 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2683 Mutex::IncrementSynchSem(mu, w);
2684 } else {
2685 w->waitp->cvmu->Fer(w);
2686 }
2687 }
2688
Signal()2689 void CondVar::Signal() {
2690 SchedulingGuard::ScopedDisable disable_rescheduling;
2691 ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2692 intptr_t v;
2693 int c = 0;
2694 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2695 v = cv_.load(std::memory_order_relaxed)) {
2696 if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2697 cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
2698 std::memory_order_relaxed)) {
2699 PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2700 PerThreadSynch* w = nullptr;
2701 if (h != nullptr) { // remove first waiter
2702 w = h->next;
2703 if (w == h) {
2704 h = nullptr;
2705 } else {
2706 h->next = w->next;
2707 }
2708 }
2709 // release spinlock
2710 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2711 std::memory_order_release);
2712 if (w != nullptr) {
2713 CondVar::Wakeup(w); // wake waiter, if there was one
2714 cond_var_tracer("Signal wakeup", this);
2715 }
2716 if ((v & kCvEvent) != 0) {
2717 PostSynchEvent(this, SYNCH_EV_SIGNAL);
2718 }
2719 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2720 return;
2721 } else {
2722 c = synchronization_internal::MutexDelay(c, GENTLE);
2723 }
2724 }
2725 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2726 }
2727
SignalAll()2728 void CondVar::SignalAll() {
2729 ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2730 intptr_t v;
2731 int c = 0;
2732 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2733 v = cv_.load(std::memory_order_relaxed)) {
2734 // empty the list if spinlock free
2735 // We do this by simply setting the list to empty using
2736 // compare and swap. We then have the entire list in our hands,
2737 // which cannot be changing since we grabbed it while no one
2738 // held the lock.
2739 if ((v & kCvSpin) == 0 &&
2740 cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2741 std::memory_order_relaxed)) {
2742 PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
2743 if (h != nullptr) {
2744 PerThreadSynch* w;
2745 PerThreadSynch* n = h->next;
2746 do { // for every thread, wake it up
2747 w = n;
2748 n = n->next;
2749 CondVar::Wakeup(w);
2750 } while (w != h);
2751 cond_var_tracer("SignalAll wakeup", this);
2752 }
2753 if ((v & kCvEvent) != 0) {
2754 PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2755 }
2756 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2757 return;
2758 } else {
2759 // try again after a delay
2760 c = synchronization_internal::MutexDelay(c, GENTLE);
2761 }
2762 }
2763 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2764 }
2765
Release()2766 void ReleasableMutexLock::Release() {
2767 ABSL_RAW_CHECK(this->mu_ != nullptr,
2768 "ReleasableMutexLock::Release may only be called once");
2769 this->mu_->Unlock();
2770 this->mu_ = nullptr;
2771 }
2772
2773 #ifdef ABSL_HAVE_THREAD_SANITIZER
2774 extern "C" void __tsan_read1(void* addr);
2775 #else
2776 #define __tsan_read1(addr) // do nothing if TSan not enabled
2777 #endif
2778
2779 // A function that just returns its argument, dereferenced
Dereference(void * arg)2780 static bool Dereference(void* arg) {
2781 // ThreadSanitizer does not instrument this file for memory accesses.
2782 // This function dereferences a user variable that can participate
2783 // in a data race, so we need to manually tell TSan about this memory access.
2784 __tsan_read1(arg);
2785 return *(static_cast<bool*>(arg));
2786 }
2787
2788 ABSL_CONST_INIT const Condition Condition::kTrue;
2789
Condition(bool (* func)(void *),void * arg)2790 Condition::Condition(bool (*func)(void*), void* arg)
2791 : eval_(&CallVoidPtrFunction), arg_(arg) {
2792 static_assert(sizeof(&func) <= sizeof(callback_),
2793 "An overlarge function pointer passed to Condition.");
2794 StoreCallback(func);
2795 }
2796
CallVoidPtrFunction(const Condition * c)2797 bool Condition::CallVoidPtrFunction(const Condition* c) {
2798 using FunctionPointer = bool (*)(void*);
2799 FunctionPointer function_pointer;
2800 std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2801 return (*function_pointer)(c->arg_);
2802 }
2803
Condition(const bool * cond)2804 Condition::Condition(const bool* cond)
2805 : eval_(CallVoidPtrFunction),
2806 // const_cast is safe since Dereference does not modify arg
2807 arg_(const_cast<bool*>(cond)) {
2808 using FunctionPointer = bool (*)(void*);
2809 const FunctionPointer dereference = Dereference;
2810 StoreCallback(dereference);
2811 }
2812
Eval() const2813 bool Condition::Eval() const {
2814 // eval_ == null for kTrue
2815 return (this->eval_ == nullptr) || (*this->eval_)(this);
2816 }
2817
GuaranteedEqual(const Condition * a,const Condition * b)2818 bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
2819 // kTrue logic.
2820 if (a == nullptr || a->eval_ == nullptr) {
2821 return b == nullptr || b->eval_ == nullptr;
2822 } else if (b == nullptr || b->eval_ == nullptr) {
2823 return false;
2824 }
2825 // Check equality of the representative fields.
2826 return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
2827 !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
2828 }
2829
2830 ABSL_NAMESPACE_END
2831 } // namespace absl
2832