• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/synchronization/mutex.h"
16 
17 #ifdef _WIN32
18 #include <windows.h>
19 #ifdef ERROR
20 #undef ERROR
21 #endif
22 #else
23 #include <fcntl.h>
24 #include <pthread.h>
25 #include <sched.h>
26 #include <sys/time.h>
27 #endif
28 
29 #include <assert.h>
30 #include <errno.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <time.h>
35 
36 #include <algorithm>
37 #include <atomic>
38 #include <cinttypes>
39 #include <thread>  // NOLINT(build/c++11)
40 
41 #include "absl/base/attributes.h"
42 #include "absl/base/call_once.h"
43 #include "absl/base/config.h"
44 #include "absl/base/dynamic_annotations.h"
45 #include "absl/base/internal/atomic_hook.h"
46 #include "absl/base/internal/cycleclock.h"
47 #include "absl/base/internal/hide_ptr.h"
48 #include "absl/base/internal/low_level_alloc.h"
49 #include "absl/base/internal/raw_logging.h"
50 #include "absl/base/internal/spinlock.h"
51 #include "absl/base/internal/sysinfo.h"
52 #include "absl/base/internal/thread_identity.h"
53 #include "absl/base/internal/tsan_mutex_interface.h"
54 #include "absl/base/port.h"
55 #include "absl/debugging/stacktrace.h"
56 #include "absl/debugging/symbolize.h"
57 #include "absl/synchronization/internal/graphcycles.h"
58 #include "absl/synchronization/internal/per_thread_sem.h"
59 #include "absl/time/time.h"
60 
61 using absl::base_internal::CurrentThreadIdentityIfPresent;
62 using absl::base_internal::PerThreadSynch;
63 using absl::base_internal::SchedulingGuard;
64 using absl::base_internal::ThreadIdentity;
65 using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
66 using absl::synchronization_internal::GraphCycles;
67 using absl::synchronization_internal::GraphId;
68 using absl::synchronization_internal::InvalidGraphId;
69 using absl::synchronization_internal::KernelTimeout;
70 using absl::synchronization_internal::PerThreadSem;
71 
72 extern "C" {
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)73 ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
74   std::this_thread::yield();
75 }
76 }  // extern "C"
77 
78 namespace absl {
79 ABSL_NAMESPACE_BEGIN
80 
81 namespace {
82 
83 #if defined(ABSL_HAVE_THREAD_SANITIZER)
84 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
85 #else
86 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
87 #endif
88 
89 ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
90     kDeadlockDetectionDefault);
91 ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
92 
93 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
94 absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
95     submit_profile_data;
96 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
97     const char *msg, const void *obj, int64_t wait_cycles)>
98     mutex_tracer;
99 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
100     absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
101         cond_var_tracer;
102 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
103     bool (*)(const void *pc, char *out, int out_size)>
104     symbolizer(absl::Symbolize);
105 
106 }  // namespace
107 
108 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
109                                           bool locking, bool trylock,
110                                           bool read_lock);
111 
RegisterMutexProfiler(void (* fn)(int64_t wait_cycles))112 void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
113   submit_profile_data.Store(fn);
114 }
115 
RegisterMutexTracer(void (* fn)(const char * msg,const void * obj,int64_t wait_cycles))116 void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
117                                     int64_t wait_cycles)) {
118   mutex_tracer.Store(fn);
119 }
120 
RegisterCondVarTracer(void (* fn)(const char * msg,const void * cv))121 void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
122   cond_var_tracer.Store(fn);
123 }
124 
RegisterSymbolizer(bool (* fn)(const void * pc,char * out,int out_size))125 void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
126   symbolizer.Store(fn);
127 }
128 
129 namespace {
130 // Represents the strategy for spin and yield.
131 // See the comment in GetMutexGlobals() for more information.
132 enum DelayMode { AGGRESSIVE, GENTLE };
133 
134 struct ABSL_CACHELINE_ALIGNED MutexGlobals {
135   absl::once_flag once;
136   int spinloop_iterations = 0;
137   int32_t mutex_sleep_limit[2] = {};
138 };
139 
GetMutexGlobals()140 const MutexGlobals &GetMutexGlobals() {
141   ABSL_CONST_INIT static MutexGlobals data;
142   absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
143     const int num_cpus = absl::base_internal::NumCPUs();
144     data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
145     // If this a uniprocessor, only yield/sleep.  Otherwise, if the mode is
146     // aggressive then spin many times before yielding.  If the mode is
147     // gentle then spin only a few times before yielding.  Aggressive spinning
148     // is used to ensure that an Unlock() call, which must get the spin lock
149     // for any thread to make progress gets it without undue delay.
150     if (num_cpus > 1) {
151       data.mutex_sleep_limit[AGGRESSIVE] = 5000;
152       data.mutex_sleep_limit[GENTLE] = 250;
153     } else {
154       data.mutex_sleep_limit[AGGRESSIVE] = 0;
155       data.mutex_sleep_limit[GENTLE] = 0;
156     }
157   });
158   return data;
159 }
160 }  // namespace
161 
162 namespace synchronization_internal {
163 // Returns the Mutex delay on iteration `c` depending on the given `mode`.
164 // The returned value should be used as `c` for the next call to `MutexDelay`.
MutexDelay(int32_t c,int mode)165 int MutexDelay(int32_t c, int mode) {
166   const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
167   if (c < limit) {
168     // Spin.
169     c++;
170   } else {
171     SchedulingGuard::ScopedEnable enable_rescheduling;
172     ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
173     if (c == limit) {
174       // Yield once.
175       ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
176       c++;
177     } else {
178       // Then wait.
179       absl::SleepFor(absl::Microseconds(10));
180       c = 0;
181     }
182     ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
183   }
184   return c;
185 }
186 }  // namespace synchronization_internal
187 
188 // --------------------------Generic atomic ops
189 // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
190 // "*pv | bits" if necessary.  Wait until (*pv & wait_until_clear)==0
191 // before making any change.
192 // This is used to set flags in mutex and condition variable words.
AtomicSetBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)193 static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
194                           intptr_t wait_until_clear) {
195   intptr_t v;
196   do {
197     v = pv->load(std::memory_order_relaxed);
198   } while ((v & bits) != bits &&
199            ((v & wait_until_clear) != 0 ||
200             !pv->compare_exchange_weak(v, v | bits,
201                                        std::memory_order_release,
202                                        std::memory_order_relaxed)));
203 }
204 
205 // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
206 // "*pv & ~bits" if necessary.  Wait until (*pv & wait_until_clear)==0
207 // before making any change.
208 // This is used to unset flags in mutex and condition variable words.
AtomicClearBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)209 static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
210                             intptr_t wait_until_clear) {
211   intptr_t v;
212   do {
213     v = pv->load(std::memory_order_relaxed);
214   } while ((v & bits) != 0 &&
215            ((v & wait_until_clear) != 0 ||
216             !pv->compare_exchange_weak(v, v & ~bits,
217                                        std::memory_order_release,
218                                        std::memory_order_relaxed)));
219 }
220 
221 //------------------------------------------------------------------
222 
223 // Data for doing deadlock detection.
224 ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
225     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
226 
227 // Graph used to detect deadlocks.
228 ABSL_CONST_INIT static GraphCycles *deadlock_graph
229     ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
230 
231 //------------------------------------------------------------------
232 // An event mechanism for debugging mutex use.
233 // It also allows mutexes to be given names for those who can't handle
234 // addresses, and instead like to give their data structures names like
235 // "Henry", "Fido", or "Rupert IV, King of Yondavia".
236 
237 namespace {  // to prevent name pollution
238 enum {       // Mutex and CondVar events passed as "ev" to PostSynchEvent
239              // Mutex events
240   SYNCH_EV_TRYLOCK_SUCCESS,
241   SYNCH_EV_TRYLOCK_FAILED,
242   SYNCH_EV_READERTRYLOCK_SUCCESS,
243   SYNCH_EV_READERTRYLOCK_FAILED,
244   SYNCH_EV_LOCK,
245   SYNCH_EV_LOCK_RETURNING,
246   SYNCH_EV_READERLOCK,
247   SYNCH_EV_READERLOCK_RETURNING,
248   SYNCH_EV_UNLOCK,
249   SYNCH_EV_READERUNLOCK,
250 
251   // CondVar events
252   SYNCH_EV_WAIT,
253   SYNCH_EV_WAIT_RETURNING,
254   SYNCH_EV_SIGNAL,
255   SYNCH_EV_SIGNALALL,
256 };
257 
258 enum {                    // Event flags
259   SYNCH_F_R = 0x01,       // reader event
260   SYNCH_F_LCK = 0x02,     // PostSynchEvent called with mutex held
261   SYNCH_F_TRY = 0x04,     // TryLock or ReaderTryLock
262   SYNCH_F_UNLOCK = 0x08,  // Unlock or ReaderUnlock
263 
264   SYNCH_F_LCK_W = SYNCH_F_LCK,
265   SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
266 };
267 }  // anonymous namespace
268 
269 // Properties of the events.
270 static const struct {
271   int flags;
272   const char *msg;
273 } event_properties[] = {
274     {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
275     {0, "TryLock failed "},
276     {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
277     {0, "ReaderTryLock failed "},
278     {0, "Lock blocking "},
279     {SYNCH_F_LCK_W, "Lock returning "},
280     {0, "ReaderLock blocking "},
281     {SYNCH_F_LCK_R, "ReaderLock returning "},
282     {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
283     {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
284     {0, "Wait on "},
285     {0, "Wait unblocked "},
286     {0, "Signal on "},
287     {0, "SignalAll on "},
288 };
289 
290 ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
291     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
292 
293 // Hash table size; should be prime > 2.
294 // Can't be too small, as it's used for deadlock detection information.
295 static constexpr uint32_t kNSynchEvent = 1031;
296 
297 static struct SynchEvent {     // this is a trivial hash table for the events
298   // struct is freed when refcount reaches 0
299   int refcount ABSL_GUARDED_BY(synch_event_mu);
300 
301   // buckets have linear, 0-terminated  chains
302   SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
303 
304   // Constant after initialization
305   uintptr_t masked_addr;  // object at this address is called "name"
306 
307   // No explicit synchronization used.  Instead we assume that the
308   // client who enables/disables invariants/logging on a Mutex does so
309   // while the Mutex is not being concurrently accessed by others.
310   void (*invariant)(void *arg);  // called on each event
311   void *arg;            // first arg to (*invariant)()
312   bool log;             // logging turned on
313 
314   // Constant after initialization
315   char name[1];         // actually longer---NUL-terminated string
316 } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
317 
318 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
319 // set "bits" in the word there (waiting until lockbit is clear before doing
320 // so), and return a refcounted reference that will remain valid until
321 // UnrefSynchEvent() is called.  If a new SynchEvent is allocated,
322 // the string name is copied into it.
323 // When used with a mutex, the caller should also ensure that kMuEvent
324 // is set in the mutex word, and similarly for condition variables and kCVEvent.
EnsureSynchEvent(std::atomic<intptr_t> * addr,const char * name,intptr_t bits,intptr_t lockbit)325 static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
326                                     const char *name, intptr_t bits,
327                                     intptr_t lockbit) {
328   uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
329   SynchEvent *e;
330   // first look for existing SynchEvent struct..
331   synch_event_mu.Lock();
332   for (e = synch_event[h];
333        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
334        e = e->next) {
335   }
336   if (e == nullptr) {  // no SynchEvent struct found; make one.
337     if (name == nullptr) {
338       name = "";
339     }
340     size_t l = strlen(name);
341     e = reinterpret_cast<SynchEvent *>(
342         base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
343     e->refcount = 2;    // one for return value, one for linked list
344     e->masked_addr = base_internal::HidePtr(addr);
345     e->invariant = nullptr;
346     e->arg = nullptr;
347     e->log = false;
348     strcpy(e->name, name);  // NOLINT(runtime/printf)
349     e->next = synch_event[h];
350     AtomicSetBits(addr, bits, lockbit);
351     synch_event[h] = e;
352   } else {
353     e->refcount++;      // for return value
354   }
355   synch_event_mu.Unlock();
356   return e;
357 }
358 
359 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
DeleteSynchEvent(SynchEvent * e)360 static void DeleteSynchEvent(SynchEvent *e) {
361   base_internal::LowLevelAlloc::Free(e);
362 }
363 
364 // Decrement the reference count of *e, or do nothing if e==null.
UnrefSynchEvent(SynchEvent * e)365 static void UnrefSynchEvent(SynchEvent *e) {
366   if (e != nullptr) {
367     synch_event_mu.Lock();
368     bool del = (--(e->refcount) == 0);
369     synch_event_mu.Unlock();
370     if (del) {
371       DeleteSynchEvent(e);
372     }
373   }
374 }
375 
376 // Forget the mapping from the object (Mutex or CondVar) at address addr
377 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
378 // is clear before doing so).
ForgetSynchEvent(std::atomic<intptr_t> * addr,intptr_t bits,intptr_t lockbit)379 static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
380                              intptr_t lockbit) {
381   uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
382   SynchEvent **pe;
383   SynchEvent *e;
384   synch_event_mu.Lock();
385   for (pe = &synch_event[h];
386        (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
387        pe = &e->next) {
388   }
389   bool del = false;
390   if (e != nullptr) {
391     *pe = e->next;
392     del = (--(e->refcount) == 0);
393   }
394   AtomicClearBits(addr, bits, lockbit);
395   synch_event_mu.Unlock();
396   if (del) {
397     DeleteSynchEvent(e);
398   }
399 }
400 
401 // Return a refcounted reference to the SynchEvent of the object at address
402 // "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
403 // called.
GetSynchEvent(const void * addr)404 static SynchEvent *GetSynchEvent(const void *addr) {
405   uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
406   SynchEvent *e;
407   synch_event_mu.Lock();
408   for (e = synch_event[h];
409        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
410        e = e->next) {
411   }
412   if (e != nullptr) {
413     e->refcount++;
414   }
415   synch_event_mu.Unlock();
416   return e;
417 }
418 
419 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
420 // if event recording is on
PostSynchEvent(void * obj,int ev)421 static void PostSynchEvent(void *obj, int ev) {
422   SynchEvent *e = GetSynchEvent(obj);
423   // logging is on if event recording is on and either there's no event struct,
424   // or it explicitly says to log
425   if (e == nullptr || e->log) {
426     void *pcs[40];
427     int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
428     // A buffer with enough space for the ASCII for all the PCs, even on a
429     // 64-bit machine.
430     char buffer[ABSL_ARRAYSIZE(pcs) * 24];
431     int pos = snprintf(buffer, sizeof (buffer), " @");
432     for (int i = 0; i != n; i++) {
433       pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
434     }
435     ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
436                  (e == nullptr ? "" : e->name), buffer);
437   }
438   const int flags = event_properties[ev].flags;
439   if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
440     // Calling the invariant as is causes problems under ThreadSanitizer.
441     // We are currently inside of Mutex Lock/Unlock and are ignoring all
442     // memory accesses and synchronization. If the invariant transitively
443     // synchronizes something else and we ignore the synchronization, we will
444     // get false positive race reports later.
445     // Reuse EvalConditionAnnotated to properly call into user code.
446     struct local {
447       static bool pred(SynchEvent *ev) {
448         (*ev->invariant)(ev->arg);
449         return false;
450       }
451     };
452     Condition cond(&local::pred, e);
453     Mutex *mu = static_cast<Mutex *>(obj);
454     const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
455     const bool trylock = (flags & SYNCH_F_TRY) != 0;
456     const bool read_lock = (flags & SYNCH_F_R) != 0;
457     EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
458   }
459   UnrefSynchEvent(e);
460 }
461 
462 //------------------------------------------------------------------
463 
464 // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
465 // whether it has a timeout, the condition, exclusive/shared, and whether a
466 // condition variable wait has an associated Mutex (as opposed to another
467 // type of lock).  It also points to the PerThreadSynch struct of its thread.
468 // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
469 //
470 // This structure is held on the stack rather than directly in
471 // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
472 // while waiting on one Mutex, the implementation calls a client callback
473 // (such as a Condition function) that acquires another Mutex. We don't
474 // strictly need to allow this, but programmers become confused if we do not
475 // allow them to use functions such a LOG() within Condition functions.  The
476 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
477 // the thread is on a Mutex's waiter queue.
478 struct SynchWaitParams {
SynchWaitParamsabsl::SynchWaitParams479   SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
480                   KernelTimeout timeout_arg, Mutex *cvmu_arg,
481                   PerThreadSynch *thread_arg,
482                   std::atomic<intptr_t> *cv_word_arg)
483       : how(how_arg),
484         cond(cond_arg),
485         timeout(timeout_arg),
486         cvmu(cvmu_arg),
487         thread(thread_arg),
488         cv_word(cv_word_arg),
489         contention_start_cycles(base_internal::CycleClock::Now()) {}
490 
491   const Mutex::MuHow how;  // How this thread needs to wait.
492   const Condition *cond;  // The condition that this thread is waiting for.
493                           // In Mutex, this field is set to zero if a timeout
494                           // expires.
495   KernelTimeout timeout;  // timeout expiry---absolute time
496                           // In Mutex, this field is set to zero if a timeout
497                           // expires.
498   Mutex *const cvmu;      // used for transfer from cond var to mutex
499   PerThreadSynch *const thread;  // thread that is waiting
500 
501   // If not null, thread should be enqueued on the CondVar whose state
502   // word is cv_word instead of queueing normally on the Mutex.
503   std::atomic<intptr_t> *cv_word;
504 
505   int64_t contention_start_cycles;  // Time (in cycles) when this thread started
506                                     // to contend for the mutex.
507 };
508 
509 struct SynchLocksHeld {
510   int n;              // number of valid entries in locks[]
511   bool overflow;      // true iff we overflowed the array at some point
512   struct {
513     Mutex *mu;        // lock acquired
514     int32_t count;      // times acquired
515     GraphId id;       // deadlock_graph id of acquired lock
516   } locks[40];
517   // If a thread overfills the array during deadlock detection, we
518   // continue, discarding information as needed.  If no overflow has
519   // taken place, we can provide more error checking, such as
520   // detecting when a thread releases a lock it does not hold.
521 };
522 
523 // A sentinel value in lists that is not 0.
524 // A 0 value is used to mean "not on a list".
525 static PerThreadSynch *const kPerThreadSynchNull =
526   reinterpret_cast<PerThreadSynch *>(1);
527 
LocksHeldAlloc()528 static SynchLocksHeld *LocksHeldAlloc() {
529   SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
530       base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
531   ret->n = 0;
532   ret->overflow = false;
533   return ret;
534 }
535 
536 // Return the PerThreadSynch-struct for this thread.
Synch_GetPerThread()537 static PerThreadSynch *Synch_GetPerThread() {
538   ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
539   return &identity->per_thread_synch;
540 }
541 
Synch_GetPerThreadAnnotated(Mutex * mu)542 static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
543   if (mu) {
544     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
545   }
546   PerThreadSynch *w = Synch_GetPerThread();
547   if (mu) {
548     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
549   }
550   return w;
551 }
552 
Synch_GetAllLocks()553 static SynchLocksHeld *Synch_GetAllLocks() {
554   PerThreadSynch *s = Synch_GetPerThread();
555   if (s->all_locks == nullptr) {
556     s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
557   }
558   return s->all_locks;
559 }
560 
561 // Post on "w"'s associated PerThreadSem.
IncrementSynchSem(Mutex * mu,PerThreadSynch * w)562 void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
563   if (mu) {
564     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
565   }
566   PerThreadSem::Post(w->thread_identity());
567   if (mu) {
568     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
569   }
570 }
571 
572 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
DecrementSynchSem(Mutex * mu,PerThreadSynch * w,KernelTimeout t)573 bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
574   if (mu) {
575     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
576   }
577   assert(w == Synch_GetPerThread());
578   static_cast<void>(w);
579   bool res = PerThreadSem::Wait(t);
580   if (mu) {
581     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
582   }
583   return res;
584 }
585 
586 // We're in a fatal signal handler that hopes to use Mutex and to get
587 // lucky by not deadlocking.  We try to improve its chances of success
588 // by effectively disabling some of the consistency checks.  This will
589 // prevent certain ABSL_RAW_CHECK() statements from being triggered when
590 // re-rentry is detected.  The ABSL_RAW_CHECK() statements are those in the
591 // Mutex code checking that the "waitp" field has not been reused.
InternalAttemptToUseMutexInFatalSignalHandler()592 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
593   // Fix the per-thread state only if it exists.
594   ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
595   if (identity != nullptr) {
596     identity->per_thread_synch.suppress_fatal_errors = true;
597   }
598   // Don't do deadlock detection when we are already failing.
599   synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
600                                  std::memory_order_release);
601 }
602 
603 // --------------------------time support
604 
605 // Return the current time plus the timeout.  Use the same clock as
606 // PerThreadSem::Wait() for consistency.  Unfortunately, we don't have
607 // such a choice when a deadline is given directly.
DeadlineFromTimeout(absl::Duration timeout)608 static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
609 #ifndef _WIN32
610   struct timeval tv;
611   gettimeofday(&tv, nullptr);
612   return absl::TimeFromTimeval(tv) + timeout;
613 #else
614   return absl::Now() + timeout;
615 #endif
616 }
617 
618 // --------------------------Mutexes
619 
620 // In the layout below, the msb of the bottom byte is currently unused.  Also,
621 // the following constraints were considered in choosing the layout:
622 //  o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
623 //    0xcd) are illegal: reader and writer lock both held.
624 //  o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
625 //    bit-twiddling trick in Mutex::Unlock().
626 //  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
627 //    to enable the bit-twiddling trick in CheckForMutexCorruption().
628 static const intptr_t kMuReader      = 0x0001L;  // a reader holds the lock
629 static const intptr_t kMuDesig       = 0x0002L;  // there's a designated waker
630 static const intptr_t kMuWait        = 0x0004L;  // threads are waiting
631 static const intptr_t kMuWriter      = 0x0008L;  // a writer holds the lock
632 static const intptr_t kMuEvent       = 0x0010L;  // record this mutex's events
633 // INVARIANT1:  there's a thread that was blocked on the mutex, is
634 // no longer, yet has not yet acquired the mutex.  If there's a
635 // designated waker, all threads can avoid taking the slow path in
636 // unlock because the designated waker will subsequently acquire
637 // the lock and wake someone.  To maintain INVARIANT1 the bit is
638 // set when a thread is unblocked(INV1a), and threads that were
639 // unblocked reset the bit when they either acquire or re-block
640 // (INV1b).
641 static const intptr_t kMuWrWait      = 0x0020L;  // runnable writer is waiting
642                                                  // for a reader
643 static const intptr_t kMuSpin        = 0x0040L;  // spinlock protects wait list
644 static const intptr_t kMuLow         = 0x00ffL;  // mask all mutex bits
645 static const intptr_t kMuHigh        = ~kMuLow;  // mask pointer/reader count
646 
647 // Hack to make constant values available to gdb pretty printer
648 enum {
649   kGdbMuSpin = kMuSpin,
650   kGdbMuEvent = kMuEvent,
651   kGdbMuWait = kMuWait,
652   kGdbMuWriter = kMuWriter,
653   kGdbMuDesig = kMuDesig,
654   kGdbMuWrWait = kMuWrWait,
655   kGdbMuReader = kMuReader,
656   kGdbMuLow = kMuLow,
657 };
658 
659 // kMuWrWait implies kMuWait.
660 // kMuReader and kMuWriter are mutually exclusive.
661 // If kMuReader is zero, there are no readers.
662 // Otherwise, if kMuWait is zero, the high order bits contain a count of the
663 // number of readers.  Otherwise, the reader count is held in
664 // PerThreadSynch::readers of the most recently queued waiter, again in the
665 // bits above kMuLow.
666 static const intptr_t kMuOne = 0x0100;  // a count of one reader
667 
668 // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
669 static const int kMuHasBlocked = 0x01;  // already blocked (MUST == 1)
670 static const int kMuIsCond = 0x02;      // conditional waiter (CV or Condition)
671 
672 static_assert(PerThreadSynch::kAlignment > kMuLow,
673               "PerThreadSynch::kAlignment must be greater than kMuLow");
674 
675 // This struct contains various bitmasks to be used in
676 // acquiring and releasing a mutex in a particular mode.
677 struct MuHowS {
678   // if all the bits in fast_need_zero are zero, the lock can be acquired by
679   // adding fast_add and oring fast_or.  The bit kMuDesig should be reset iff
680   // this is the designated waker.
681   intptr_t fast_need_zero;
682   intptr_t fast_or;
683   intptr_t fast_add;
684 
685   intptr_t slow_need_zero;  // fast_need_zero with events (e.g. logging)
686 
687   intptr_t slow_inc_need_zero;  // if all the bits in slow_inc_need_zero are
688                                 // zero a reader can acquire a read share by
689                                 // setting the reader bit and incrementing
690                                 // the reader count (in last waiter since
691                                 // we're now slow-path).  kMuWrWait be may
692                                 // be ignored if we already waited once.
693 };
694 
695 static const MuHowS kSharedS = {
696     // shared or read lock
697     kMuWriter | kMuWait | kMuEvent,   // fast_need_zero
698     kMuReader,                        // fast_or
699     kMuOne,                           // fast_add
700     kMuWriter | kMuWait,              // slow_need_zero
701     kMuSpin | kMuWriter | kMuWrWait,  // slow_inc_need_zero
702 };
703 static const MuHowS kExclusiveS = {
704     // exclusive or write lock
705     kMuWriter | kMuReader | kMuEvent,  // fast_need_zero
706     kMuWriter,                         // fast_or
707     0,                                 // fast_add
708     kMuWriter | kMuReader,             // slow_need_zero
709     ~static_cast<intptr_t>(0),         // slow_inc_need_zero
710 };
711 static const Mutex::MuHow kShared = &kSharedS;        // shared lock
712 static const Mutex::MuHow kExclusive = &kExclusiveS;  // exclusive lock
713 
714 #ifdef NDEBUG
715 static constexpr bool kDebugMode = false;
716 #else
717 static constexpr bool kDebugMode = true;
718 #endif
719 
720 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
TsanFlags(Mutex::MuHow how)721 static unsigned TsanFlags(Mutex::MuHow how) {
722   return how == kShared ? __tsan_mutex_read_lock : 0;
723 }
724 #endif
725 
DebugOnlyIsExiting()726 static bool DebugOnlyIsExiting() {
727   return false;
728 }
729 
~Mutex()730 Mutex::~Mutex() {
731   intptr_t v = mu_.load(std::memory_order_relaxed);
732   if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
733     ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
734   }
735   if (kDebugMode) {
736     this->ForgetDeadlockInfo();
737   }
738   ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
739 }
740 
EnableDebugLog(const char * name)741 void Mutex::EnableDebugLog(const char *name) {
742   SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
743   e->log = true;
744   UnrefSynchEvent(e);
745 }
746 
EnableMutexInvariantDebugging(bool enabled)747 void EnableMutexInvariantDebugging(bool enabled) {
748   synch_check_invariants.store(enabled, std::memory_order_release);
749 }
750 
EnableInvariantDebugging(void (* invariant)(void *),void * arg)751 void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
752                                      void *arg) {
753   if (synch_check_invariants.load(std::memory_order_acquire) &&
754       invariant != nullptr) {
755     SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
756     e->invariant = invariant;
757     e->arg = arg;
758     UnrefSynchEvent(e);
759   }
760 }
761 
SetMutexDeadlockDetectionMode(OnDeadlockCycle mode)762 void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
763   synch_deadlock_detection.store(mode, std::memory_order_release);
764 }
765 
766 // Return true iff threads x and y are part of the same equivalence
767 // class of waiters. An equivalence class is defined as the set of
768 // waiters with the same condition, type of lock, and thread priority.
769 //
770 // Requires that x and y be waiting on the same Mutex queue.
MuEquivalentWaiter(PerThreadSynch * x,PerThreadSynch * y)771 static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
772   return x->waitp->how == y->waitp->how && x->priority == y->priority &&
773          Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
774 }
775 
776 // Given the contents of a mutex word containing a PerThreadSynch pointer,
777 // return the pointer.
GetPerThreadSynch(intptr_t v)778 static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
779   return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
780 }
781 
782 // The next several routines maintain the per-thread next and skip fields
783 // used in the Mutex waiter queue.
784 // The queue is a circular singly-linked list, of which the "head" is the
785 // last element, and head->next if the first element.
786 // The skip field has the invariant:
787 //   For thread x, x->skip is one of:
788 //     - invalid (iff x is not in a Mutex wait queue),
789 //     - null, or
790 //     - a pointer to a distinct thread waiting later in the same Mutex queue
791 //       such that all threads in [x, x->skip] have the same condition, priority
792 //       and lock type (MuEquivalentWaiter() is true for all pairs in [x,
793 //       x->skip]).
794 // In addition, if x->skip is  valid, (x->may_skip || x->skip == null)
795 //
796 // By the spec of MuEquivalentWaiter(), it is not necessary when removing the
797 // first runnable thread y from the front a Mutex queue to adjust the skip
798 // field of another thread x because if x->skip==y, x->skip must (have) become
799 // invalid before y is removed.  The function TryRemove can remove a specified
800 // thread from an arbitrary position in the queue whether runnable or not, so
801 // it fixes up skip fields that would otherwise be left dangling.
802 // The statement
803 //     if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
804 // maintains the invariant provided x is not the last waiter in a Mutex queue
805 // The statement
806 //          if (x->skip != null) { x->skip = x->skip->skip; }
807 // maintains the invariant.
808 
809 // Returns the last thread y in a mutex waiter queue such that all threads in
810 // [x, y] inclusive share the same condition.  Sets skip fields of some threads
811 // in that range to optimize future evaluation of Skip() on x values in
812 // the range.  Requires thread x is in a mutex waiter queue.
813 // The locking is unusual.  Skip() is called under these conditions:
814 //   - spinlock is held in call from Enqueue(), with maybe_unlocking == false
815 //   - Mutex is held in call from UnlockSlow() by last unlocker, with
816 //     maybe_unlocking == true
817 //   - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
818 //     UnlockSlow()) and TryRemove()
819 // These cases are mutually exclusive, so Skip() never runs concurrently
820 // with itself on the same Mutex.   The skip chain is used in these other places
821 // that cannot occur concurrently:
822 //   - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
823 //   - Dequeue() (with spinlock and Mutex held)
824 //   - UnlockSlow() (with spinlock and Mutex held)
825 // A more complex case is Enqueue()
826 //   - Enqueue() (with spinlock held and maybe_unlocking == false)
827 //               This is the first case in which Skip is called, above.
828 //   - Enqueue() (without spinlock held; but queue is empty and being freshly
829 //                formed)
830 //   - Enqueue() (with spinlock held and maybe_unlocking == true)
831 // The first case has mutual exclusion, and the second isolation through
832 // working on an otherwise unreachable data structure.
833 // In the last case, Enqueue() is required to change no skip/next pointers
834 // except those in the added node and the former "head" node.  This implies
835 // that the new node is added after head, and so must be the new head or the
836 // new front of the queue.
Skip(PerThreadSynch * x)837 static PerThreadSynch *Skip(PerThreadSynch *x) {
838   PerThreadSynch *x0 = nullptr;
839   PerThreadSynch *x1 = x;
840   PerThreadSynch *x2 = x->skip;
841   if (x2 != nullptr) {
842     // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
843     // such that   x1 == x0->skip && x2 == x1->skip
844     while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
845       x0->skip = x2;      // short-circuit skip from x0 to x2
846     }
847     x->skip = x1;         // short-circuit skip from x to result
848   }
849   return x1;
850 }
851 
852 // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
853 // The latter is going to be removed out of order, because of a timeout.
854 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
855 // and fix it if it does.
FixSkip(PerThreadSynch * ancestor,PerThreadSynch * to_be_removed)856 static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
857   if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
858     if (to_be_removed->skip != nullptr) {
859       ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
860     } else if (ancestor->next != to_be_removed) {  // they are not adjacent
861       ancestor->skip = ancestor->next;             // can skip one past ancestor
862     } else {
863       ancestor->skip = nullptr;  // can't skip at all
864     }
865   }
866 }
867 
868 static void CondVarEnqueue(SynchWaitParams *waitp);
869 
870 // Enqueue thread "waitp->thread" on a waiter queue.
871 // Called with mutex spinlock held if head != nullptr
872 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
873 // idempotent; it alters no state associated with the existing (empty)
874 // queue.
875 //
876 // If waitp->cv_word == nullptr, queue the thread at either the front or
877 // the end (according to its priority) of the circular mutex waiter queue whose
878 // head is "head", and return the new head.  mu is the previous mutex state,
879 // which contains the reader count (perhaps adjusted for the operation in
880 // progress) if the list was empty and a read lock held, and the holder hint if
881 // the list was empty and a write lock held.  (flags & kMuIsCond) indicates
882 // whether this thread was transferred from a CondVar or is waiting for a
883 // non-trivial condition.  In this case, Enqueue() never returns nullptr
884 //
885 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
886 // returned. This mechanism is used by CondVar to queue a thread on the
887 // condition variable queue instead of the mutex queue in implementing Wait().
888 // In this case, Enqueue() can return nullptr (if head==nullptr).
Enqueue(PerThreadSynch * head,SynchWaitParams * waitp,intptr_t mu,int flags)889 static PerThreadSynch *Enqueue(PerThreadSynch *head,
890                                SynchWaitParams *waitp, intptr_t mu, int flags) {
891   // If we have been given a cv_word, call CondVarEnqueue() and return
892   // the previous head of the Mutex waiter queue.
893   if (waitp->cv_word != nullptr) {
894     CondVarEnqueue(waitp);
895     return head;
896   }
897 
898   PerThreadSynch *s = waitp->thread;
899   ABSL_RAW_CHECK(
900       s->waitp == nullptr ||    // normal case
901           s->waitp == waitp ||  // Fer()---transfer from condition variable
902           s->suppress_fatal_errors,
903       "detected illegal recursion into Mutex code");
904   s->waitp = waitp;
905   s->skip = nullptr;             // maintain skip invariant (see above)
906   s->may_skip = true;            // always true on entering queue
907   s->wake = false;               // not being woken
908   s->cond_waiter = ((flags & kMuIsCond) != 0);
909   if (head == nullptr) {         // s is the only waiter
910     s->next = s;                 // it's the only entry in the cycle
911     s->readers = mu;             // reader count is from mu word
912     s->maybe_unlocking = false;  // no one is searching an empty list
913     head = s;                    // s is new head
914   } else {
915     PerThreadSynch *enqueue_after = nullptr;  // we'll put s after this element
916 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
917     int64_t now_cycles = base_internal::CycleClock::Now();
918     if (s->next_priority_read_cycles < now_cycles) {
919       // Every so often, update our idea of the thread's priority.
920       // pthread_getschedparam() is 5% of the block/wakeup time;
921       // base_internal::CycleClock::Now() is 0.5%.
922       int policy;
923       struct sched_param param;
924       const int err = pthread_getschedparam(pthread_self(), &policy, &param);
925       if (err != 0) {
926         ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
927       } else {
928         s->priority = param.sched_priority;
929         s->next_priority_read_cycles =
930             now_cycles +
931             static_cast<int64_t>(base_internal::CycleClock::Frequency());
932       }
933     }
934     if (s->priority > head->priority) {  // s's priority is above head's
935       // try to put s in priority-fifo order, or failing that at the front.
936       if (!head->maybe_unlocking) {
937         // No unlocker can be scanning the queue, so we can insert into the
938         // middle of the queue.
939         //
940         // Within a skip chain, all waiters have the same priority, so we can
941         // skip forward through the chains until we find one with a lower
942         // priority than the waiter to be enqueued.
943         PerThreadSynch *advance_to = head;    // next value of enqueue_after
944         do {
945           enqueue_after = advance_to;
946           // (side-effect: optimizes skip chain)
947           advance_to = Skip(enqueue_after->next);
948         } while (s->priority <= advance_to->priority);
949               // termination guaranteed because s->priority > head->priority
950               // and head is the end of a skip chain
951       } else if (waitp->how == kExclusive &&
952                  Condition::GuaranteedEqual(waitp->cond, nullptr)) {
953         // An unlocker could be scanning the queue, but we know it will recheck
954         // the queue front for writers that have no condition, which is what s
955         // is, so an insert at front is safe.
956         enqueue_after = head;       // add after head, at front
957       }
958     }
959 #endif
960     if (enqueue_after != nullptr) {
961       s->next = enqueue_after->next;
962       enqueue_after->next = s;
963 
964       // enqueue_after can be: head, Skip(...), or cur.
965       // The first two imply enqueue_after->skip == nullptr, and
966       // the last is used only if MuEquivalentWaiter(s, cur).
967       // We require this because clearing enqueue_after->skip
968       // is impossible; enqueue_after's predecessors might also
969       // incorrectly skip over s if we were to allow other
970       // insertion points.
971       ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
972                          MuEquivalentWaiter(enqueue_after, s),
973                      "Mutex Enqueue failure");
974 
975       if (enqueue_after != head && enqueue_after->may_skip &&
976           MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
977         // enqueue_after can skip to its new successor, s
978         enqueue_after->skip = enqueue_after->next;
979       }
980       if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
981         s->skip = s->next;                // s may skip to its successor
982       }
983     } else {   // enqueue not done any other way, so
984                // we're inserting s at the back
985       // s will become new head; copy data from head into it
986       s->next = head->next;        // add s after head
987       head->next = s;
988       s->readers = head->readers;  // reader count is from previous head
989       s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
990       if (head->may_skip && MuEquivalentWaiter(head, s)) {
991         // head now has successor; may skip
992         head->skip = s;
993       }
994       head = s;  // s is new head
995     }
996   }
997   s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
998   return head;
999 }
1000 
1001 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1002 // whose last element is head.  The new head element is returned, or null
1003 // if the list is made empty.
1004 // Dequeue is called with both spinlock and Mutex held.
Dequeue(PerThreadSynch * head,PerThreadSynch * pw)1005 static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1006   PerThreadSynch *w = pw->next;
1007   pw->next = w->next;         // snip w out of list
1008   if (head == w) {            // we removed the head
1009     head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
1010   } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1011     // pw can skip to its new successor
1012     if (pw->next->skip !=
1013         nullptr) {  // either skip to its successors skip target
1014       pw->skip = pw->next->skip;
1015     } else {                   // or to pw's successor
1016       pw->skip = pw->next;
1017     }
1018   }
1019   return head;
1020 }
1021 
1022 // Traverse the elements [ pw->next, h] of the circular list whose last element
1023 // is head.
1024 // Remove all elements with wake==true and place them in the
1025 // singly-linked list wake_list in the order found.   Assumes that
1026 // there is only one such element if the element has how == kExclusive.
1027 // Return the new head.
DequeueAllWakeable(PerThreadSynch * head,PerThreadSynch * pw,PerThreadSynch ** wake_tail)1028 static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1029                                           PerThreadSynch *pw,
1030                                           PerThreadSynch **wake_tail) {
1031   PerThreadSynch *orig_h = head;
1032   PerThreadSynch *w = pw->next;
1033   bool skipped = false;
1034   do {
1035     if (w->wake) {                    // remove this element
1036       ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1037       // we're removing pw's successor so either pw->skip is zero or we should
1038       // already have removed pw since if pw->skip!=null, pw has the same
1039       // condition as w.
1040       head = Dequeue(head, pw);
1041       w->next = *wake_tail;           // keep list terminated
1042       *wake_tail = w;                 // add w to wake_list;
1043       wake_tail = &w->next;           // next addition to end
1044       if (w->waitp->how == kExclusive) {  // wake at most 1 writer
1045         break;
1046       }
1047     } else {                // not waking this one; skip
1048       pw = Skip(w);       // skip as much as possible
1049       skipped = true;
1050     }
1051     w = pw->next;
1052     // We want to stop processing after we've considered the original head,
1053     // orig_h.  We can't test for w==orig_h in the loop because w may skip over
1054     // it; we are guaranteed only that w's predecessor will not skip over
1055     // orig_h.  When we've considered orig_h, either we've processed it and
1056     // removed it (so orig_h != head), or we considered it and skipped it (so
1057     // skipped==true && pw == head because skipping from head always skips by
1058     // just one, leaving pw pointing at head).  So we want to
1059     // continue the loop with the negation of that expression.
1060   } while (orig_h == head && (pw != head || !skipped));
1061   return head;
1062 }
1063 
1064 // Try to remove thread s from the list of waiters on this mutex.
1065 // Does nothing if s is not on the waiter list.
TryRemove(PerThreadSynch * s)1066 void Mutex::TryRemove(PerThreadSynch *s) {
1067   SchedulingGuard::ScopedDisable disable_rescheduling;
1068   intptr_t v = mu_.load(std::memory_order_relaxed);
1069   // acquire spinlock & lock
1070   if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1071       mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1072                                   std::memory_order_acquire,
1073                                   std::memory_order_relaxed)) {
1074     PerThreadSynch *h = GetPerThreadSynch(v);
1075     if (h != nullptr) {
1076       PerThreadSynch *pw = h;   // pw is w's predecessor
1077       PerThreadSynch *w;
1078       if ((w = pw->next) != s) {  // search for thread,
1079         do {                      // processing at least one element
1080           // If the current element isn't equivalent to the waiter to be
1081           // removed, we can skip the entire chain.
1082           if (!MuEquivalentWaiter(s, w)) {
1083             pw = Skip(w);                // so skip all that won't match
1084             // we don't have to worry about dangling skip fields
1085             // in the threads we skipped; none can point to s
1086             // because they are in a different equivalence class.
1087           } else {          // seeking same condition
1088             FixSkip(w, s);  // fix up any skip pointer from w to s
1089             pw = w;
1090           }
1091           // don't search further if we found the thread, or we're about to
1092           // process the first thread again.
1093         } while ((w = pw->next) != s && pw != h);
1094       }
1095       if (w == s) {                 // found thread; remove it
1096         // pw->skip may be non-zero here; the loop above ensured that
1097         // no ancestor of s can skip to s, so removal is safe anyway.
1098         h = Dequeue(h, pw);
1099         s->next = nullptr;
1100         s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1101       }
1102     }
1103     intptr_t nv;
1104     do {                        // release spinlock and lock
1105       v = mu_.load(std::memory_order_relaxed);
1106       nv = v & (kMuDesig | kMuEvent);
1107       if (h != nullptr) {
1108         nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1109         h->readers = 0;            // we hold writer lock
1110         h->maybe_unlocking = false;  // finished unlocking
1111       }
1112     } while (!mu_.compare_exchange_weak(v, nv,
1113                                         std::memory_order_release,
1114                                         std::memory_order_relaxed));
1115   }
1116 }
1117 
1118 // Wait until thread "s", which must be the current thread, is removed from the
1119 // this mutex's waiter queue.  If "s->waitp->timeout" has a timeout, wake up
1120 // if the wait extends past the absolute time specified, even if "s" is still
1121 // on the mutex queue.  In this case, remove "s" from the queue and return
1122 // true, otherwise return false.
Block(PerThreadSynch * s)1123 ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
1124   while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1125     if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1126       // After a timeout, we go into a spin loop until we remove ourselves
1127       // from the queue, or someone else removes us.  We can't be sure to be
1128       // able to remove ourselves in a single lock acquisition because this
1129       // mutex may be held, and the holder has the right to read the centre
1130       // of the waiter queue without holding the spinlock.
1131       this->TryRemove(s);
1132       int c = 0;
1133       while (s->next != nullptr) {
1134         c = synchronization_internal::MutexDelay(c, GENTLE);
1135         this->TryRemove(s);
1136       }
1137       if (kDebugMode) {
1138         // This ensures that we test the case that TryRemove() is called when s
1139         // is not on the queue.
1140         this->TryRemove(s);
1141       }
1142       s->waitp->timeout = KernelTimeout::Never();      // timeout is satisfied
1143       s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
1144     }
1145   }
1146   ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1147                  "detected illegal recursion in Mutex code");
1148   s->waitp = nullptr;
1149 }
1150 
1151 // Wake thread w, and return the next thread in the list.
Wakeup(PerThreadSynch * w)1152 PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1153   PerThreadSynch *next = w->next;
1154   w->next = nullptr;
1155   w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1156   IncrementSynchSem(this, w);
1157 
1158   return next;
1159 }
1160 
GetGraphIdLocked(Mutex * mu)1161 static GraphId GetGraphIdLocked(Mutex *mu)
1162     ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1163   if (!deadlock_graph) {  // (re)create the deadlock graph.
1164     deadlock_graph =
1165         new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1166             GraphCycles;
1167   }
1168   return deadlock_graph->GetId(mu);
1169 }
1170 
GetGraphId(Mutex * mu)1171 static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1172   deadlock_graph_mu.Lock();
1173   GraphId id = GetGraphIdLocked(mu);
1174   deadlock_graph_mu.Unlock();
1175   return id;
1176 }
1177 
1178 // Record a lock acquisition.  This is used in debug mode for deadlock
1179 // detection.  The held_locks pointer points to the relevant data
1180 // structure for each case.
LockEnter(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1181 static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1182   int n = held_locks->n;
1183   int i = 0;
1184   while (i != n && held_locks->locks[i].id != id) {
1185     i++;
1186   }
1187   if (i == n) {
1188     if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1189       held_locks->overflow = true;  // lost some data
1190     } else {                        // we have room for lock
1191       held_locks->locks[i].mu = mu;
1192       held_locks->locks[i].count = 1;
1193       held_locks->locks[i].id = id;
1194       held_locks->n = n + 1;
1195     }
1196   } else {
1197     held_locks->locks[i].count++;
1198   }
1199 }
1200 
1201 // Record a lock release.  Each call to LockEnter(mu, id, x) should be
1202 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1203 // It does not process the event if is not needed when deadlock detection is
1204 // disabled.
LockLeave(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1205 static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1206   int n = held_locks->n;
1207   int i = 0;
1208   while (i != n && held_locks->locks[i].id != id) {
1209     i++;
1210   }
1211   if (i == n) {
1212     if (!held_locks->overflow) {
1213       // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1214       // but in that case mu should still be present.
1215       i = 0;
1216       while (i != n && held_locks->locks[i].mu != mu) {
1217         i++;
1218       }
1219       if (i == n) {  // mu missing means releasing unheld lock
1220         SynchEvent *mu_events = GetSynchEvent(mu);
1221         ABSL_RAW_LOG(FATAL,
1222                      "thread releasing lock it does not hold: %p %s; "
1223                      ,
1224                      static_cast<void *>(mu),
1225                      mu_events == nullptr ? "" : mu_events->name);
1226       }
1227     }
1228   } else if (held_locks->locks[i].count == 1) {
1229     held_locks->n = n - 1;
1230     held_locks->locks[i] = held_locks->locks[n - 1];
1231     held_locks->locks[n - 1].id = InvalidGraphId();
1232     held_locks->locks[n - 1].mu =
1233         nullptr;  // clear mu to please the leak detector.
1234   } else {
1235     assert(held_locks->locks[i].count > 0);
1236     held_locks->locks[i].count--;
1237   }
1238 }
1239 
1240 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu)1241 static inline void DebugOnlyLockEnter(Mutex *mu) {
1242   if (kDebugMode) {
1243     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1244         OnDeadlockCycle::kIgnore) {
1245       LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1246     }
1247   }
1248 }
1249 
1250 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu,GraphId id)1251 static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1252   if (kDebugMode) {
1253     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1254         OnDeadlockCycle::kIgnore) {
1255       LockEnter(mu, id, Synch_GetAllLocks());
1256     }
1257   }
1258 }
1259 
1260 // Call LockLeave() if in debug mode and deadlock detection is enabled.
DebugOnlyLockLeave(Mutex * mu)1261 static inline void DebugOnlyLockLeave(Mutex *mu) {
1262   if (kDebugMode) {
1263     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1264         OnDeadlockCycle::kIgnore) {
1265       LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1266     }
1267   }
1268 }
1269 
StackString(void ** pcs,int n,char * buf,int maxlen,bool symbolize)1270 static char *StackString(void **pcs, int n, char *buf, int maxlen,
1271                          bool symbolize) {
1272   static const int kSymLen = 200;
1273   char sym[kSymLen];
1274   int len = 0;
1275   for (int i = 0; i != n; i++) {
1276     if (symbolize) {
1277       if (!symbolizer(pcs[i], sym, kSymLen)) {
1278         sym[0] = '\0';
1279       }
1280       snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
1281                (i == 0 ? "\n" : ""),
1282                pcs[i], sym);
1283     } else {
1284       snprintf(buf + len, maxlen - len, " %p", pcs[i]);
1285     }
1286     len += strlen(&buf[len]);
1287   }
1288   return buf;
1289 }
1290 
CurrentStackString(char * buf,int maxlen,bool symbolize)1291 static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1292   void *pcs[40];
1293   return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1294                      maxlen, symbolize);
1295 }
1296 
1297 namespace {
1298 enum { kMaxDeadlockPathLen = 10 };  // maximum length of a deadlock cycle;
1299                                     // a path this long would be remarkable
1300 // Buffers required to report a deadlock.
1301 // We do not allocate them on stack to avoid large stack frame.
1302 struct DeadlockReportBuffers {
1303   char buf[6100];
1304   GraphId path[kMaxDeadlockPathLen];
1305 };
1306 
1307 struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffersabsl::__anon7bd8141e0a11::ScopedDeadlockReportBuffers1308   ScopedDeadlockReportBuffers() {
1309     b = reinterpret_cast<DeadlockReportBuffers *>(
1310         base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1311   }
~ScopedDeadlockReportBuffersabsl::__anon7bd8141e0a11::ScopedDeadlockReportBuffers1312   ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1313   DeadlockReportBuffers *b;
1314 };
1315 
1316 // Helper to pass to GraphCycles::UpdateStackTrace.
GetStack(void ** stack,int max_depth)1317 int GetStack(void** stack, int max_depth) {
1318   return absl::GetStackTrace(stack, max_depth, 3);
1319 }
1320 }  // anonymous namespace
1321 
1322 // Called in debug mode when a thread is about to acquire a lock in a way that
1323 // may block.
DeadlockCheck(Mutex * mu)1324 static GraphId DeadlockCheck(Mutex *mu) {
1325   if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1326       OnDeadlockCycle::kIgnore) {
1327     return InvalidGraphId();
1328   }
1329 
1330   SynchLocksHeld *all_locks = Synch_GetAllLocks();
1331 
1332   absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1333   const GraphId mu_id = GetGraphIdLocked(mu);
1334 
1335   if (all_locks->n == 0) {
1336     // There are no other locks held. Return now so that we don't need to
1337     // call GetSynchEvent(). This way we do not record the stack trace
1338     // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1339     // it can't always be the first lock acquired by a thread.
1340     return mu_id;
1341   }
1342 
1343   // We prefer to keep stack traces that show a thread holding and acquiring
1344   // as many locks as possible.  This increases the chances that a given edge
1345   // in the acquires-before graph will be represented in the stack traces
1346   // recorded for the locks.
1347   deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1348 
1349   // For each other mutex already held by this thread:
1350   for (int i = 0; i != all_locks->n; i++) {
1351     const GraphId other_node_id = all_locks->locks[i].id;
1352     const Mutex *other =
1353         static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1354     if (other == nullptr) {
1355       // Ignore stale lock
1356       continue;
1357     }
1358 
1359     // Add the acquired-before edge to the graph.
1360     if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1361       ScopedDeadlockReportBuffers scoped_buffers;
1362       DeadlockReportBuffers *b = scoped_buffers.b;
1363       static int number_of_reported_deadlocks = 0;
1364       number_of_reported_deadlocks++;
1365       // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1366       bool symbolize = number_of_reported_deadlocks <= 2;
1367       ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1368                    CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1369       int len = 0;
1370       for (int j = 0; j != all_locks->n; j++) {
1371         void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1372         if (pr != nullptr) {
1373           snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1374           len += static_cast<int>(strlen(&b->buf[len]));
1375         }
1376       }
1377       ABSL_RAW_LOG(ERROR,
1378                    "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1379                    "historical lock ordering graph has been observed",
1380                    static_cast<void *>(mu), b->buf);
1381       ABSL_RAW_LOG(ERROR, "Cycle: ");
1382       int path_len = deadlock_graph->FindPath(
1383           mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1384       for (int j = 0; j != path_len; j++) {
1385         GraphId id = b->path[j];
1386         Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1387         if (path_mu == nullptr) continue;
1388         void** stack;
1389         int depth = deadlock_graph->GetStackTrace(id, &stack);
1390         snprintf(b->buf, sizeof(b->buf),
1391                  "mutex@%p stack: ", static_cast<void *>(path_mu));
1392         StackString(stack, depth, b->buf + strlen(b->buf),
1393                     static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1394                     symbolize);
1395         ABSL_RAW_LOG(ERROR, "%s", b->buf);
1396       }
1397       if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1398           OnDeadlockCycle::kAbort) {
1399         deadlock_graph_mu.Unlock();  // avoid deadlock in fatal sighandler
1400         ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1401         return mu_id;
1402       }
1403       break;   // report at most one potential deadlock per acquisition
1404     }
1405   }
1406 
1407   return mu_id;
1408 }
1409 
1410 // Invoke DeadlockCheck() iff we're in debug mode and
1411 // deadlock checking has been enabled.
DebugOnlyDeadlockCheck(Mutex * mu)1412 static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1413   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1414                         OnDeadlockCycle::kIgnore) {
1415     return DeadlockCheck(mu);
1416   } else {
1417     return InvalidGraphId();
1418   }
1419 }
1420 
ForgetDeadlockInfo()1421 void Mutex::ForgetDeadlockInfo() {
1422   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1423                         OnDeadlockCycle::kIgnore) {
1424     deadlock_graph_mu.Lock();
1425     if (deadlock_graph != nullptr) {
1426       deadlock_graph->RemoveNode(this);
1427     }
1428     deadlock_graph_mu.Unlock();
1429   }
1430 }
1431 
AssertNotHeld() const1432 void Mutex::AssertNotHeld() const {
1433   // We have the data to allow this check only if in debug mode and deadlock
1434   // detection is enabled.
1435   if (kDebugMode &&
1436       (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1437       synch_deadlock_detection.load(std::memory_order_acquire) !=
1438           OnDeadlockCycle::kIgnore) {
1439     GraphId id = GetGraphId(const_cast<Mutex *>(this));
1440     SynchLocksHeld *locks = Synch_GetAllLocks();
1441     for (int i = 0; i != locks->n; i++) {
1442       if (locks->locks[i].id == id) {
1443         SynchEvent *mu_events = GetSynchEvent(this);
1444         ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1445                      static_cast<const void *>(this),
1446                      (mu_events == nullptr ? "" : mu_events->name));
1447       }
1448     }
1449   }
1450 }
1451 
1452 // Attempt to acquire *mu, and return whether successful.  The implementation
1453 // may spin for a short while if the lock cannot be acquired immediately.
TryAcquireWithSpinning(std::atomic<intptr_t> * mu)1454 static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1455   int c = GetMutexGlobals().spinloop_iterations;
1456   do {  // do/while somewhat faster on AMD
1457     intptr_t v = mu->load(std::memory_order_relaxed);
1458     if ((v & (kMuReader|kMuEvent)) != 0) {
1459       return false;  // a reader or tracing -> give up
1460     } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
1461                mu->compare_exchange_strong(v, kMuWriter | v,
1462                                            std::memory_order_acquire,
1463                                            std::memory_order_relaxed)) {
1464       return true;
1465     }
1466   } while (--c > 0);
1467   return false;
1468 }
1469 
Lock()1470 ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
1471   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1472   GraphId id = DebugOnlyDeadlockCheck(this);
1473   intptr_t v = mu_.load(std::memory_order_relaxed);
1474   // try fast acquire, then spin loop
1475   if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1476       !mu_.compare_exchange_strong(v, kMuWriter | v,
1477                                    std::memory_order_acquire,
1478                                    std::memory_order_relaxed)) {
1479     // try spin acquire, then slow loop
1480     if (!TryAcquireWithSpinning(&this->mu_)) {
1481       this->LockSlow(kExclusive, nullptr, 0);
1482     }
1483   }
1484   DebugOnlyLockEnter(this, id);
1485   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1486 }
1487 
ReaderLock()1488 ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
1489   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1490   GraphId id = DebugOnlyDeadlockCheck(this);
1491   intptr_t v = mu_.load(std::memory_order_relaxed);
1492   // try fast acquire, then slow loop
1493   if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1494       !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1495                                    std::memory_order_acquire,
1496                                    std::memory_order_relaxed)) {
1497     this->LockSlow(kShared, nullptr, 0);
1498   }
1499   DebugOnlyLockEnter(this, id);
1500   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1501 }
1502 
LockWhen(const Condition & cond)1503 void Mutex::LockWhen(const Condition &cond) {
1504   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1505   GraphId id = DebugOnlyDeadlockCheck(this);
1506   this->LockSlow(kExclusive, &cond, 0);
1507   DebugOnlyLockEnter(this, id);
1508   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1509 }
1510 
LockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1511 bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1512   return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1513 }
1514 
LockWhenWithDeadline(const Condition & cond,absl::Time deadline)1515 bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1516   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1517   GraphId id = DebugOnlyDeadlockCheck(this);
1518   bool res = LockSlowWithDeadline(kExclusive, &cond,
1519                                   KernelTimeout(deadline), 0);
1520   DebugOnlyLockEnter(this, id);
1521   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1522   return res;
1523 }
1524 
ReaderLockWhen(const Condition & cond)1525 void Mutex::ReaderLockWhen(const Condition &cond) {
1526   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1527   GraphId id = DebugOnlyDeadlockCheck(this);
1528   this->LockSlow(kShared, &cond, 0);
1529   DebugOnlyLockEnter(this, id);
1530   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1531 }
1532 
ReaderLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1533 bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1534                                       absl::Duration timeout) {
1535   return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1536 }
1537 
ReaderLockWhenWithDeadline(const Condition & cond,absl::Time deadline)1538 bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1539                                        absl::Time deadline) {
1540   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1541   GraphId id = DebugOnlyDeadlockCheck(this);
1542   bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1543   DebugOnlyLockEnter(this, id);
1544   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1545   return res;
1546 }
1547 
Await(const Condition & cond)1548 void Mutex::Await(const Condition &cond) {
1549   if (cond.Eval()) {    // condition already true; nothing to do
1550     if (kDebugMode) {
1551       this->AssertReaderHeld();
1552     }
1553   } else {              // normal case
1554     ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1555                    "condition untrue on return from Await");
1556   }
1557 }
1558 
AwaitWithTimeout(const Condition & cond,absl::Duration timeout)1559 bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1560   return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1561 }
1562 
AwaitWithDeadline(const Condition & cond,absl::Time deadline)1563 bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1564   if (cond.Eval()) {      // condition already true; nothing to do
1565     if (kDebugMode) {
1566       this->AssertReaderHeld();
1567     }
1568     return true;
1569   }
1570 
1571   KernelTimeout t{deadline};
1572   bool res = this->AwaitCommon(cond, t);
1573   ABSL_RAW_CHECK(res || t.has_timeout(),
1574                  "condition untrue on return from Await");
1575   return res;
1576 }
1577 
AwaitCommon(const Condition & cond,KernelTimeout t)1578 bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1579   this->AssertReaderHeld();
1580   MuHow how =
1581       (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1582   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1583   SynchWaitParams waitp(
1584       how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1585       nullptr /*no cv_word*/);
1586   int flags = kMuHasBlocked;
1587   if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1588     flags |= kMuIsCond;
1589   }
1590   this->UnlockSlow(&waitp);
1591   this->Block(waitp.thread);
1592   ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1593   ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1594   this->LockSlowLoop(&waitp, flags);
1595   bool res = waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1596              EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1597   ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1598   return res;
1599 }
1600 
TryLock()1601 ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
1602   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1603   intptr_t v = mu_.load(std::memory_order_relaxed);
1604   if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&  // try fast acquire
1605       mu_.compare_exchange_strong(v, kMuWriter | v,
1606                                   std::memory_order_acquire,
1607                                   std::memory_order_relaxed)) {
1608     DebugOnlyLockEnter(this);
1609     ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1610     return true;
1611   }
1612   if ((v & kMuEvent) != 0) {              // we're recording events
1613     if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
1614         mu_.compare_exchange_strong(
1615             v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1616             std::memory_order_acquire, std::memory_order_relaxed)) {
1617       DebugOnlyLockEnter(this);
1618       PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1619       ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1620       return true;
1621     } else {
1622       PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1623     }
1624   }
1625   ABSL_TSAN_MUTEX_POST_LOCK(
1626       this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1627   return false;
1628 }
1629 
ReaderTryLock()1630 ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
1631   ABSL_TSAN_MUTEX_PRE_LOCK(this,
1632                            __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1633   intptr_t v = mu_.load(std::memory_order_relaxed);
1634   // The while-loops (here and below) iterate only if the mutex word keeps
1635   // changing (typically because the reader count changes) under the CAS.  We
1636   // limit the number of attempts to avoid having to think about livelock.
1637   int loop_limit = 5;
1638   while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1639     if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1640                                     std::memory_order_acquire,
1641                                     std::memory_order_relaxed)) {
1642       DebugOnlyLockEnter(this);
1643       ABSL_TSAN_MUTEX_POST_LOCK(
1644           this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1645       return true;
1646     }
1647     loop_limit--;
1648     v = mu_.load(std::memory_order_relaxed);
1649   }
1650   if ((v & kMuEvent) != 0) {   // we're recording events
1651     loop_limit = 5;
1652     while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1653       if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1654                                       std::memory_order_acquire,
1655                                       std::memory_order_relaxed)) {
1656         DebugOnlyLockEnter(this);
1657         PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1658         ABSL_TSAN_MUTEX_POST_LOCK(
1659             this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1660         return true;
1661       }
1662       loop_limit--;
1663       v = mu_.load(std::memory_order_relaxed);
1664     }
1665     if ((v & kMuEvent) != 0) {
1666       PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1667     }
1668   }
1669   ABSL_TSAN_MUTEX_POST_LOCK(this,
1670                             __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1671                                 __tsan_mutex_try_lock_failed,
1672                             0);
1673   return false;
1674 }
1675 
Unlock()1676 ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
1677   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1678   DebugOnlyLockLeave(this);
1679   intptr_t v = mu_.load(std::memory_order_relaxed);
1680 
1681   if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1682     ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1683                  static_cast<unsigned>(v));
1684   }
1685 
1686   // should_try_cas is whether we'll try a compare-and-swap immediately.
1687   // NOTE: optimized out when kDebugMode is false.
1688   bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1689                           (v & (kMuWait | kMuDesig)) != kMuWait);
1690   // But, we can use an alternate computation of it, that compilers
1691   // currently don't find on their own.  When that changes, this function
1692   // can be simplified.
1693   intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1694   intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1695   // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1696   // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1697   // all possible non-zero values for x exceed all possible values for y.
1698   // Therefore, (x == 0 && y > 0) == (x < y).
1699   if (kDebugMode && should_try_cas != (x < y)) {
1700     // We would usually use PRIdPTR here, but is not correctly implemented
1701     // within the android toolchain.
1702     ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1703                  static_cast<long long>(v), static_cast<long long>(x),
1704                  static_cast<long long>(y));
1705   }
1706   if (x < y &&
1707       mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1708                                   std::memory_order_release,
1709                                   std::memory_order_relaxed)) {
1710     // fast writer release (writer with no waiters or with designated waker)
1711   } else {
1712     this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1713   }
1714   ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1715 }
1716 
1717 // Requires v to represent a reader-locked state.
ExactlyOneReader(intptr_t v)1718 static bool ExactlyOneReader(intptr_t v) {
1719   assert((v & (kMuWriter|kMuReader)) == kMuReader);
1720   assert((v & kMuHigh) != 0);
1721   // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1722   // on some architectures the following generates slightly smaller code.
1723   // It may be faster too.
1724   constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1725   return (v & kMuMultipleWaitersMask) == 0;
1726 }
1727 
ReaderUnlock()1728 ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
1729   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1730   DebugOnlyLockLeave(this);
1731   intptr_t v = mu_.load(std::memory_order_relaxed);
1732   assert((v & (kMuWriter|kMuReader)) == kMuReader);
1733   if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1734     // fast reader release (reader with no waiters)
1735     intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1736     if (mu_.compare_exchange_strong(v, v - clear,
1737                                     std::memory_order_release,
1738                                     std::memory_order_relaxed)) {
1739       ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1740       return;
1741     }
1742   }
1743   this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
1744   ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1745 }
1746 
1747 // Clears the designated waker flag in the mutex if this thread has blocked, and
1748 // therefore may be the designated waker.
ClearDesignatedWakerMask(int flag)1749 static intptr_t ClearDesignatedWakerMask(int flag) {
1750   assert(flag >= 0);
1751   assert(flag <= 1);
1752   switch (flag) {
1753     case 0:  // not blocked
1754       return ~static_cast<intptr_t>(0);
1755     case 1:  // blocked; turn off the designated waker bit
1756       return ~static_cast<intptr_t>(kMuDesig);
1757   }
1758   ABSL_INTERNAL_UNREACHABLE;
1759 }
1760 
1761 // Conditionally ignores the existence of waiting writers if a reader that has
1762 // already blocked once wakes up.
IgnoreWaitingWritersMask(int flag)1763 static intptr_t IgnoreWaitingWritersMask(int flag) {
1764   assert(flag >= 0);
1765   assert(flag <= 1);
1766   switch (flag) {
1767     case 0:  // not blocked
1768       return ~static_cast<intptr_t>(0);
1769     case 1:  // blocked; pretend there are no waiting writers
1770       return ~static_cast<intptr_t>(kMuWrWait);
1771   }
1772   ABSL_INTERNAL_UNREACHABLE;
1773 }
1774 
1775 // Internal version of LockWhen().  See LockSlowWithDeadline()
LockSlow(MuHow how,const Condition * cond,int flags)1776 ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1777                                              int flags) {
1778   ABSL_RAW_CHECK(
1779       this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1780       "condition untrue on return from LockSlow");
1781 }
1782 
1783 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
EvalConditionAnnotated(const Condition * cond,Mutex * mu,bool locking,bool trylock,bool read_lock)1784 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1785                                           bool locking, bool trylock,
1786                                           bool read_lock) {
1787   // Delicate annotation dance.
1788   // We are currently inside of read/write lock/unlock operation.
1789   // All memory accesses are ignored inside of mutex operations + for unlock
1790   // operation tsan considers that we've already released the mutex.
1791   bool res = false;
1792 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1793   const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1794   const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1795 #endif
1796   if (locking) {
1797     // For lock we pretend that we have finished the operation,
1798     // evaluate the predicate, then unlock the mutex and start locking it again
1799     // to match the annotation at the end of outer lock operation.
1800     // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1801     // will think the lock acquisition is recursive which will trigger
1802     // deadlock detector.
1803     ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1804     res = cond->Eval();
1805     // There is no "try" version of Unlock, so use flags instead of tryflags.
1806     ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1807     ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1808     ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1809   } else {
1810     // Similarly, for unlock we pretend that we have unlocked the mutex,
1811     // lock the mutex, evaluate the predicate, and start unlocking it again
1812     // to match the annotation at the end of outer unlock operation.
1813     ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1814     ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1815     ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1816     res = cond->Eval();
1817     ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1818   }
1819   // Prevent unused param warnings in non-TSAN builds.
1820   static_cast<void>(mu);
1821   static_cast<void>(trylock);
1822   static_cast<void>(read_lock);
1823   return res;
1824 }
1825 
1826 // Compute cond->Eval() hiding it from race detectors.
1827 // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1828 // that was just added by a concurrent Lock operation; Lock adds the predicate
1829 // to the internal Mutex list without actually acquiring the Mutex
1830 // (it only acquires the internal spinlock, which is rightfully invisible for
1831 // tsan). As the result there is no tsan-visible synchronization between the
1832 // addition and this thread. So if we would enable race detection here,
1833 // it would race with the predicate initialization.
EvalConditionIgnored(Mutex * mu,const Condition * cond)1834 static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1835   // Memory accesses are already ignored inside of lock/unlock operations,
1836   // but synchronization operations are also ignored. When we evaluate the
1837   // predicate we must ignore only memory accesses but not synchronization,
1838   // because missed synchronization can lead to false reports later.
1839   // So we "divert" (which un-ignores both memory accesses and synchronization)
1840   // and then separately turn on ignores of memory accesses.
1841   ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1842   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1843   bool res = cond->Eval();
1844   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1845   ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1846   static_cast<void>(mu);  // Prevent unused param warning in non-TSAN builds.
1847   return res;
1848 }
1849 
1850 // Internal equivalent of *LockWhenWithDeadline(), where
1851 //   "t" represents the absolute timeout; !t.has_timeout() means "forever".
1852 //   "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1853 // In flags, bits are ored together:
1854 // - kMuHasBlocked indicates that the client has already blocked on the call so
1855 //   the designated waker bit must be cleared and waiting writers should not
1856 //   obstruct this call
1857 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1858 //   Await,  LockWhen) so contention profiling should be suppressed.
LockSlowWithDeadline(MuHow how,const Condition * cond,KernelTimeout t,int flags)1859 bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1860                                  KernelTimeout t, int flags) {
1861   intptr_t v = mu_.load(std::memory_order_relaxed);
1862   bool unlock = false;
1863   if ((v & how->fast_need_zero) == 0 &&  // try fast acquire
1864       mu_.compare_exchange_strong(
1865           v,
1866           (how->fast_or |
1867            (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1868               how->fast_add,
1869           std::memory_order_acquire, std::memory_order_relaxed)) {
1870     if (cond == nullptr ||
1871         EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1872       return true;
1873     }
1874     unlock = true;
1875   }
1876   SynchWaitParams waitp(
1877       how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1878       nullptr /*no cv_word*/);
1879   if (!Condition::GuaranteedEqual(cond, nullptr)) {
1880     flags |= kMuIsCond;
1881   }
1882   if (unlock) {
1883     this->UnlockSlow(&waitp);
1884     this->Block(waitp.thread);
1885     flags |= kMuHasBlocked;
1886   }
1887   this->LockSlowLoop(&waitp, flags);
1888   return waitp.cond != nullptr ||  // => cond known true from LockSlowLoop
1889          cond == nullptr ||
1890          EvalConditionAnnotated(cond, this, true, false, how == kShared);
1891 }
1892 
1893 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1894 // the printf-style argument list.   The format string must be a literal.
1895 // Arguments after the first are not evaluated unless the condition is true.
1896 #define RAW_CHECK_FMT(cond, ...)                                   \
1897   do {                                                             \
1898     if (ABSL_PREDICT_FALSE(!(cond))) {                             \
1899       ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1900     }                                                              \
1901   } while (0)
1902 
CheckForMutexCorruption(intptr_t v,const char * label)1903 static void CheckForMutexCorruption(intptr_t v, const char* label) {
1904   // Test for either of two situations that should not occur in v:
1905   //   kMuWriter and kMuReader
1906   //   kMuWrWait and !kMuWait
1907   const uintptr_t w = v ^ kMuWait;
1908   // By flipping that bit, we can now test for:
1909   //   kMuWriter and kMuReader in w
1910   //   kMuWrWait and kMuWait in w
1911   // We've chosen these two pairs of values to be so that they will overlap,
1912   // respectively, when the word is left shifted by three.  This allows us to
1913   // save a branch in the common (correct) case of them not being coincident.
1914   static_assert(kMuReader << 3 == kMuWriter, "must match");
1915   static_assert(kMuWait << 3 == kMuWrWait, "must match");
1916   if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1917   RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1918                 "%s: Mutex corrupt: both reader and writer lock held: %p",
1919                 label, reinterpret_cast<void *>(v));
1920   RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1921                 "%s: Mutex corrupt: waiting writer with no waiters: %p",
1922                 label, reinterpret_cast<void *>(v));
1923   assert(false);
1924 }
1925 
LockSlowLoop(SynchWaitParams * waitp,int flags)1926 void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1927   SchedulingGuard::ScopedDisable disable_rescheduling;
1928   int c = 0;
1929   intptr_t v = mu_.load(std::memory_order_relaxed);
1930   if ((v & kMuEvent) != 0) {
1931     PostSynchEvent(this,
1932          waitp->how == kExclusive?  SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1933   }
1934   ABSL_RAW_CHECK(
1935       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1936       "detected illegal recursion into Mutex code");
1937   for (;;) {
1938     v = mu_.load(std::memory_order_relaxed);
1939     CheckForMutexCorruption(v, "Lock");
1940     if ((v & waitp->how->slow_need_zero) == 0) {
1941       if (mu_.compare_exchange_strong(
1942               v,
1943               (waitp->how->fast_or |
1944                (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1945                   waitp->how->fast_add,
1946               std::memory_order_acquire, std::memory_order_relaxed)) {
1947         if (waitp->cond == nullptr ||
1948             EvalConditionAnnotated(waitp->cond, this, true, false,
1949                                    waitp->how == kShared)) {
1950           break;  // we timed out, or condition true, so return
1951         }
1952         this->UnlockSlow(waitp);  // got lock but condition false
1953         this->Block(waitp->thread);
1954         flags |= kMuHasBlocked;
1955         c = 0;
1956       }
1957     } else {                      // need to access waiter list
1958       bool dowait = false;
1959       if ((v & (kMuSpin|kMuWait)) == 0) {   // no waiters
1960         // This thread tries to become the one and only waiter.
1961         PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1962         intptr_t nv =
1963             (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
1964             kMuWait;
1965         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
1966         if (waitp->how == kExclusive && (v & kMuReader) != 0) {
1967           nv |= kMuWrWait;
1968         }
1969         if (mu_.compare_exchange_strong(
1970                 v, reinterpret_cast<intptr_t>(new_h) | nv,
1971                 std::memory_order_release, std::memory_order_relaxed)) {
1972           dowait = true;
1973         } else {            // attempted Enqueue() failed
1974           // zero out the waitp field set by Enqueue()
1975           waitp->thread->waitp = nullptr;
1976         }
1977       } else if ((v & waitp->how->slow_inc_need_zero &
1978                   IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
1979         // This is a reader that needs to increment the reader count,
1980         // but the count is currently held in the last waiter.
1981         if (mu_.compare_exchange_strong(
1982                 v,
1983                 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
1984                     kMuSpin | kMuReader,
1985                 std::memory_order_acquire, std::memory_order_relaxed)) {
1986           PerThreadSynch *h = GetPerThreadSynch(v);
1987           h->readers += kMuOne;       // inc reader count in waiter
1988           do {                        // release spinlock
1989             v = mu_.load(std::memory_order_relaxed);
1990           } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
1991                                               std::memory_order_release,
1992                                               std::memory_order_relaxed));
1993           if (waitp->cond == nullptr ||
1994               EvalConditionAnnotated(waitp->cond, this, true, false,
1995                                      waitp->how == kShared)) {
1996             break;  // we timed out, or condition true, so return
1997           }
1998           this->UnlockSlow(waitp);           // got lock but condition false
1999           this->Block(waitp->thread);
2000           flags |= kMuHasBlocked;
2001           c = 0;
2002         }
2003       } else if ((v & kMuSpin) == 0 &&  // attempt to queue ourselves
2004                  mu_.compare_exchange_strong(
2005                      v,
2006                      (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2007                          kMuSpin | kMuWait,
2008                      std::memory_order_acquire, std::memory_order_relaxed)) {
2009         PerThreadSynch *h = GetPerThreadSynch(v);
2010         PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
2011         intptr_t wr_wait = 0;
2012         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2013         if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2014           wr_wait = kMuWrWait;      // give priority to a waiting writer
2015         }
2016         do {                        // release spinlock
2017           v = mu_.load(std::memory_order_relaxed);
2018         } while (!mu_.compare_exchange_weak(
2019             v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2020             reinterpret_cast<intptr_t>(new_h),
2021             std::memory_order_release, std::memory_order_relaxed));
2022         dowait = true;
2023       }
2024       if (dowait) {
2025         this->Block(waitp->thread);  // wait until removed from list or timeout
2026         flags |= kMuHasBlocked;
2027         c = 0;
2028       }
2029     }
2030     ABSL_RAW_CHECK(
2031         waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2032         "detected illegal recursion into Mutex code");
2033     // delay, then try again
2034     c = synchronization_internal::MutexDelay(c, GENTLE);
2035   }
2036   ABSL_RAW_CHECK(
2037       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2038       "detected illegal recursion into Mutex code");
2039   if ((v & kMuEvent) != 0) {
2040     PostSynchEvent(this,
2041                    waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2042                                       SYNCH_EV_READERLOCK_RETURNING);
2043   }
2044 }
2045 
2046 // Unlock this mutex, which is held by the current thread.
2047 // If waitp is non-zero, it must be the wait parameters for the current thread
2048 // which holds the lock but is not runnable because its condition is false
2049 // or it is in the process of blocking on a condition variable; it must requeue
2050 // itself on the mutex/condvar to wait for its condition to become true.
UnlockSlow(SynchWaitParams * waitp)2051 ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2052   SchedulingGuard::ScopedDisable disable_rescheduling;
2053   intptr_t v = mu_.load(std::memory_order_relaxed);
2054   this->AssertReaderHeld();
2055   CheckForMutexCorruption(v, "Unlock");
2056   if ((v & kMuEvent) != 0) {
2057     PostSynchEvent(this,
2058                 (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2059   }
2060   int c = 0;
2061   // the waiter under consideration to wake, or zero
2062   PerThreadSynch *w = nullptr;
2063   // the predecessor to w or zero
2064   PerThreadSynch *pw = nullptr;
2065   // head of the list searched previously, or zero
2066   PerThreadSynch *old_h = nullptr;
2067   // a condition that's known to be false.
2068   const Condition *known_false = nullptr;
2069   PerThreadSynch *wake_list = kPerThreadSynchNull;   // list of threads to wake
2070   intptr_t wr_wait = 0;        // set to kMuWrWait if we wake a reader and a
2071                                // later writer could have acquired the lock
2072                                // (starvation avoidance)
2073   ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2074                      waitp->thread->suppress_fatal_errors,
2075                  "detected illegal recursion into Mutex code");
2076   // This loop finds threads wake_list to wakeup if any, and removes them from
2077   // the list of waiters.  In addition, it places waitp.thread on the queue of
2078   // waiters if waitp is non-zero.
2079   for (;;) {
2080     v = mu_.load(std::memory_order_relaxed);
2081     if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2082         waitp == nullptr) {
2083       // fast writer release (writer with no waiters or with designated waker)
2084       if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2085                                       std::memory_order_release,
2086                                       std::memory_order_relaxed)) {
2087         return;
2088       }
2089     } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2090       // fast reader release (reader with no waiters)
2091       intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2092       if (mu_.compare_exchange_strong(v, v - clear,
2093                                       std::memory_order_release,
2094                                       std::memory_order_relaxed)) {
2095         return;
2096       }
2097     } else if ((v & kMuSpin) == 0 &&  // attempt to get spinlock
2098                mu_.compare_exchange_strong(v, v | kMuSpin,
2099                                            std::memory_order_acquire,
2100                                            std::memory_order_relaxed)) {
2101       if ((v & kMuWait) == 0) {       // no one to wake
2102         intptr_t nv;
2103         bool do_enqueue = true;  // always Enqueue() the first time
2104         ABSL_RAW_CHECK(waitp != nullptr,
2105                        "UnlockSlow is confused");  // about to sleep
2106         do {    // must loop to release spinlock as reader count may change
2107           v = mu_.load(std::memory_order_relaxed);
2108           // decrement reader count if there are readers
2109           intptr_t new_readers = (v >= kMuOne)?  v - kMuOne : v;
2110           PerThreadSynch *new_h = nullptr;
2111           if (do_enqueue) {
2112             // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2113             // we must not retry here.  The initial attempt will always have
2114             // succeeded, further attempts would enqueue us against *this due to
2115             // Fer() handling.
2116             do_enqueue = (waitp->cv_word == nullptr);
2117             new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2118           }
2119           intptr_t clear = kMuWrWait | kMuWriter;  // by default clear write bit
2120           if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) {  // last reader
2121             clear = kMuWrWait | kMuReader;                    // clear read bit
2122           }
2123           nv = (v & kMuLow & ~clear & ~kMuSpin);
2124           if (new_h != nullptr) {
2125             nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2126           } else {  // new_h could be nullptr if we queued ourselves on a
2127                     // CondVar
2128             // In that case, we must place the reader count back in the mutex
2129             // word, as Enqueue() did not store it in the new waiter.
2130             nv |= new_readers & kMuHigh;
2131           }
2132           // release spinlock & our lock; retry if reader-count changed
2133           // (writer count cannot change since we hold lock)
2134         } while (!mu_.compare_exchange_weak(v, nv,
2135                                             std::memory_order_release,
2136                                             std::memory_order_relaxed));
2137         break;
2138       }
2139 
2140       // There are waiters.
2141       // Set h to the head of the circular waiter list.
2142       PerThreadSynch *h = GetPerThreadSynch(v);
2143       if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2144         // a reader but not the last
2145         h->readers -= kMuOne;  // release our lock
2146         intptr_t nv = v;       // normally just release spinlock
2147         if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
2148           PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2149           ABSL_RAW_CHECK(new_h != nullptr,
2150                          "waiters disappeared during Enqueue()!");
2151           nv &= kMuLow;
2152           nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2153         }
2154         mu_.store(nv, std::memory_order_release);  // release spinlock
2155         // can release with a store because there were waiters
2156         break;
2157       }
2158 
2159       // Either we didn't search before, or we marked the queue
2160       // as "maybe_unlocking" and no one else should have changed it.
2161       ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2162                      "Mutex queue changed beneath us");
2163 
2164       // The lock is becoming free, and there's a waiter
2165       if (old_h != nullptr &&
2166           !old_h->may_skip) {                  // we used old_h as a terminator
2167         old_h->may_skip = true;                // allow old_h to skip once more
2168         ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2169         if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2170           old_h->skip = old_h->next;  // old_h not head & can skip to successor
2171         }
2172       }
2173       if (h->next->waitp->how == kExclusive &&
2174           Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2175         // easy case: writer with no condition; no need to search
2176         pw = h;                       // wake w, the successor of h (=pw)
2177         w = h->next;
2178         w->wake = true;
2179         // We are waking up a writer.  This writer may be racing against
2180         // an already awake reader for the lock.  We want the
2181         // writer to usually win this race,
2182         // because if it doesn't, we can potentially keep taking a reader
2183         // perpetually and writers will starve.  Worse than
2184         // that, this can also starve other readers if kMuWrWait gets set
2185         // later.
2186         wr_wait = kMuWrWait;
2187       } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2188         // we found a waiter w to wake on a previous iteration and either it's
2189         // a writer, or we've searched the entire list so we have all the
2190         // readers.
2191         if (pw == nullptr) {  // if w's predecessor is unknown, it must be h
2192           pw = h;
2193         }
2194       } else {
2195         // At this point we don't know all the waiters to wake, and the first
2196         // waiter has a condition or is a reader.  We avoid searching over
2197         // waiters we've searched on previous iterations by starting at
2198         // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
2199         if (old_h == h) {      // we've searched before, and nothing's new
2200                                // so there's no one to wake.
2201           intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2202           h->readers = 0;
2203           h->maybe_unlocking = false;   // finished unlocking
2204           if (waitp != nullptr) {       // we must queue ourselves and sleep
2205             PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2206             nv &= kMuLow;
2207             if (new_h != nullptr) {
2208               nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2209             }  // else new_h could be nullptr if we queued ourselves on a
2210                // CondVar
2211           }
2212           // release spinlock & lock
2213           // can release with a store because there were waiters
2214           mu_.store(nv, std::memory_order_release);
2215           break;
2216         }
2217 
2218         // set up to walk the list
2219         PerThreadSynch *w_walk;   // current waiter during list walk
2220         PerThreadSynch *pw_walk;  // previous waiter during list walk
2221         if (old_h != nullptr) {  // we've searched up to old_h before
2222           pw_walk = old_h;
2223           w_walk = old_h->next;
2224         } else {            // no prior search, start at beginning
2225           pw_walk =
2226               nullptr;  // h->next's predecessor may change; don't record it
2227           w_walk = h->next;
2228         }
2229 
2230         h->may_skip = false;  // ensure we never skip past h in future searches
2231                               // even if other waiters are queued after it.
2232         ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2233 
2234         h->maybe_unlocking = true;  // we're about to scan the waiter list
2235                                     // without the spinlock held.
2236                                     // Enqueue must be conservative about
2237                                     // priority queuing.
2238 
2239         // We must release the spinlock to evaluate the conditions.
2240         mu_.store(v, std::memory_order_release);  // release just spinlock
2241         // can release with a store because there were waiters
2242 
2243         // h is the last waiter queued, and w_walk the first unsearched waiter.
2244         // Without the spinlock, the locations mu_ and h->next may now change
2245         // underneath us, but since we hold the lock itself, the only legal
2246         // change is to add waiters between h and w_walk.  Therefore, it's safe
2247         // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2248         // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2249 
2250         old_h = h;        // remember we searched to here
2251 
2252         // Walk the path upto and including h looking for waiters we can wake.
2253         while (pw_walk != h) {
2254           w_walk->wake = false;
2255           if (w_walk->waitp->cond ==
2256                   nullptr ||  // no condition => vacuously true OR
2257               (w_walk->waitp->cond != known_false &&
2258                // this thread's condition is not known false, AND
2259                //  is in fact true
2260                EvalConditionIgnored(this, w_walk->waitp->cond))) {
2261             if (w == nullptr) {
2262               w_walk->wake = true;    // can wake this waiter
2263               w = w_walk;
2264               pw = pw_walk;
2265               if (w_walk->waitp->how == kExclusive) {
2266                 wr_wait = kMuWrWait;
2267                 break;                // bail if waking this writer
2268               }
2269             } else if (w_walk->waitp->how == kShared) {  // wake if a reader
2270               w_walk->wake = true;
2271             } else {   // writer with true condition
2272               wr_wait = kMuWrWait;
2273             }
2274           } else {                  // can't wake; condition false
2275             known_false = w_walk->waitp->cond;  // remember last false condition
2276           }
2277           if (w_walk->wake) {   // we're waking reader w_walk
2278             pw_walk = w_walk;   // don't skip similar waiters
2279           } else {              // not waking; skip as much as possible
2280             pw_walk = Skip(w_walk);
2281           }
2282           // If pw_walk == h, then load of pw_walk->next can race with
2283           // concurrent write in Enqueue(). However, at the same time
2284           // we do not need to do the load, because we will bail out
2285           // from the loop anyway.
2286           if (pw_walk != h) {
2287             w_walk = pw_walk->next;
2288           }
2289         }
2290 
2291         continue;  // restart for(;;)-loop to wakeup w or to find more waiters
2292       }
2293       ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2294       // The first (and perhaps only) waiter we've chosen to wake is w, whose
2295       // predecessor is pw.  If w is a reader, we must wake all the other
2296       // waiters with wake==true as well.  We may also need to queue
2297       // ourselves if waitp != null.  The spinlock and the lock are still
2298       // held.
2299 
2300       // This traverses the list in [ pw->next, h ], where h is the head,
2301       // removing all elements with wake==true and placing them in the
2302       // singly-linked list wake_list.  Returns the new head.
2303       h = DequeueAllWakeable(h, pw, &wake_list);
2304 
2305       intptr_t nv = (v & kMuEvent) | kMuDesig;
2306                                              // assume no waiters left,
2307                                              // set kMuDesig for INV1a
2308 
2309       if (waitp != nullptr) {  // we must queue ourselves and sleep
2310         h = Enqueue(h, waitp, v, kMuIsCond);
2311         // h is new last waiter; could be null if we queued ourselves on a
2312         // CondVar
2313       }
2314 
2315       ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2316                      "unexpected empty wake list");
2317 
2318       if (h != nullptr) {  // there are waiters left
2319         h->readers = 0;
2320         h->maybe_unlocking = false;     // finished unlocking
2321         nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2322       }
2323 
2324       // release both spinlock & lock
2325       // can release with a store because there were waiters
2326       mu_.store(nv, std::memory_order_release);
2327       break;  // out of for(;;)-loop
2328     }
2329     // aggressive here; no one can proceed till we do
2330     c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2331   }                            // end of for(;;)-loop
2332 
2333   if (wake_list != kPerThreadSynchNull) {
2334     int64_t wait_cycles = 0;
2335     int64_t now = base_internal::CycleClock::Now();
2336     do {
2337       // Sample lock contention events only if the waiter was trying to acquire
2338       // the lock, not waiting on a condition variable or Condition.
2339       if (!wake_list->cond_waiter) {
2340         wait_cycles += (now - wake_list->waitp->contention_start_cycles);
2341         wake_list->waitp->contention_start_cycles = now;
2342       }
2343       wake_list = Wakeup(wake_list);              // wake waiters
2344     } while (wake_list != kPerThreadSynchNull);
2345     if (wait_cycles > 0) {
2346       mutex_tracer("slow release", this, wait_cycles);
2347       ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2348       submit_profile_data(wait_cycles);
2349       ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2350     }
2351   }
2352 }
2353 
2354 // Used by CondVar implementation to reacquire mutex after waking from
2355 // condition variable.  This routine is used instead of Lock() because the
2356 // waiting thread may have been moved from the condition variable queue to the
2357 // mutex queue without a wakeup, by Trans().  In that case, when the thread is
2358 // finally woken, the woken thread will believe it has been woken from the
2359 // condition variable (i.e. its PC will be in when in the CondVar code), when
2360 // in fact it has just been woken from the mutex.  Thus, it must enter the slow
2361 // path of the mutex in the same state as if it had just woken from the mutex.
2362 // That is, it must ensure to clear kMuDesig (INV1b).
Trans(MuHow how)2363 void Mutex::Trans(MuHow how) {
2364   this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2365 }
2366 
2367 // Used by CondVar implementation to effectively wake thread w from the
2368 // condition variable.  If this mutex is free, we simply wake the thread.
2369 // It will later acquire the mutex with high probability.  Otherwise, we
2370 // enqueue thread w on this mutex.
Fer(PerThreadSynch * w)2371 void Mutex::Fer(PerThreadSynch *w) {
2372   SchedulingGuard::ScopedDisable disable_rescheduling;
2373   int c = 0;
2374   ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2375                  "Mutex::Fer while waiting on Condition");
2376   ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2377                  "Mutex::Fer while in timed wait");
2378   ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2379                  "Mutex::Fer with pending CondVar queueing");
2380   for (;;) {
2381     intptr_t v = mu_.load(std::memory_order_relaxed);
2382     // Note: must not queue if the mutex is unlocked (nobody will wake it).
2383     // For example, we can have only kMuWait (conditional) or maybe
2384     // kMuWait|kMuWrWait.
2385     // conflicting != 0 implies that the waking thread cannot currently take
2386     // the mutex, which in turn implies that someone else has it and can wake
2387     // us if we queue.
2388     const intptr_t conflicting =
2389         kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2390     if ((v & conflicting) == 0) {
2391       w->next = nullptr;
2392       w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2393       IncrementSynchSem(this, w);
2394       return;
2395     } else {
2396       if ((v & (kMuSpin|kMuWait)) == 0) {       // no waiters
2397         // This thread tries to become the one and only waiter.
2398         PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2399         ABSL_RAW_CHECK(new_h != nullptr,
2400                        "Enqueue failed");  // we must queue ourselves
2401         if (mu_.compare_exchange_strong(
2402                 v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2403                 std::memory_order_release, std::memory_order_relaxed)) {
2404           return;
2405         }
2406       } else if ((v & kMuSpin) == 0 &&
2407                  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2408         PerThreadSynch *h = GetPerThreadSynch(v);
2409         PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2410         ABSL_RAW_CHECK(new_h != nullptr,
2411                        "Enqueue failed");  // we must queue ourselves
2412         do {
2413           v = mu_.load(std::memory_order_relaxed);
2414         } while (!mu_.compare_exchange_weak(
2415             v,
2416             (v & kMuLow & ~kMuSpin) | kMuWait |
2417                 reinterpret_cast<intptr_t>(new_h),
2418             std::memory_order_release, std::memory_order_relaxed));
2419         return;
2420       }
2421     }
2422     c = synchronization_internal::MutexDelay(c, GENTLE);
2423   }
2424 }
2425 
AssertHeld() const2426 void Mutex::AssertHeld() const {
2427   if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2428     SynchEvent *e = GetSynchEvent(this);
2429     ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2430                  static_cast<const void *>(this),
2431                  (e == nullptr ? "" : e->name));
2432   }
2433 }
2434 
AssertReaderHeld() const2435 void Mutex::AssertReaderHeld() const {
2436   if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2437     SynchEvent *e = GetSynchEvent(this);
2438     ABSL_RAW_LOG(
2439         FATAL, "thread should hold at least a read lock on Mutex %p %s",
2440         static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2441   }
2442 }
2443 
2444 // -------------------------------- condition variables
2445 static const intptr_t kCvSpin = 0x0001L;   // spinlock protects waiter list
2446 static const intptr_t kCvEvent = 0x0002L;  // record events
2447 
2448 static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
2449 
2450 // Hack to make constant values available to gdb pretty printer
2451 enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2452 
2453 static_assert(PerThreadSynch::kAlignment > kCvLow,
2454               "PerThreadSynch::kAlignment must be greater than kCvLow");
2455 
EnableDebugLog(const char * name)2456 void CondVar::EnableDebugLog(const char *name) {
2457   SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2458   e->log = true;
2459   UnrefSynchEvent(e);
2460 }
2461 
~CondVar()2462 CondVar::~CondVar() {
2463   if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2464     ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2465   }
2466 }
2467 
2468 
2469 // Remove thread s from the list of waiters on this condition variable.
Remove(PerThreadSynch * s)2470 void CondVar::Remove(PerThreadSynch *s) {
2471   SchedulingGuard::ScopedDisable disable_rescheduling;
2472   intptr_t v;
2473   int c = 0;
2474   for (v = cv_.load(std::memory_order_relaxed);;
2475        v = cv_.load(std::memory_order_relaxed)) {
2476     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2477         cv_.compare_exchange_strong(v, v | kCvSpin,
2478                                     std::memory_order_acquire,
2479                                     std::memory_order_relaxed)) {
2480       PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2481       if (h != nullptr) {
2482         PerThreadSynch *w = h;
2483         while (w->next != s && w->next != h) {  // search for thread
2484           w = w->next;
2485         }
2486         if (w->next == s) {           // found thread; remove it
2487           w->next = s->next;
2488           if (h == s) {
2489             h = (w == s) ? nullptr : w;
2490           }
2491           s->next = nullptr;
2492           s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2493         }
2494       }
2495                                       // release spinlock
2496       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2497                 std::memory_order_release);
2498       return;
2499     } else {
2500       // try again after a delay
2501       c = synchronization_internal::MutexDelay(c, GENTLE);
2502     }
2503   }
2504 }
2505 
2506 // Queue thread waitp->thread on condition variable word cv_word using
2507 // wait parameters waitp.
2508 // We split this into a separate routine, rather than simply doing it as part
2509 // of WaitCommon().  If we were to queue ourselves on the condition variable
2510 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2511 // the logging code, or via a Condition function) and might potentially attempt
2512 // to block this thread.  That would be a problem if the thread were already on
2513 // a condition variable waiter queue.  Thus, we use the waitp->cv_word to tell
2514 // the unlock code to call CondVarEnqueue() to queue the thread on the condition
2515 // variable queue just before the mutex is to be unlocked, and (most
2516 // importantly) after any call to an external routine that might re-enter the
2517 // mutex code.
CondVarEnqueue(SynchWaitParams * waitp)2518 static void CondVarEnqueue(SynchWaitParams *waitp) {
2519   // This thread might be transferred to the Mutex queue by Fer() when
2520   // we are woken.  To make sure that is what happens, Enqueue() doesn't
2521   // call CondVarEnqueue() again but instead uses its normal code.  We
2522   // must do this before we queue ourselves so that cv_word will be null
2523   // when seen by the dequeuer, who may wish immediately to requeue
2524   // this thread on another queue.
2525   std::atomic<intptr_t> *cv_word = waitp->cv_word;
2526   waitp->cv_word = nullptr;
2527 
2528   intptr_t v = cv_word->load(std::memory_order_relaxed);
2529   int c = 0;
2530   while ((v & kCvSpin) != 0 ||  // acquire spinlock
2531          !cv_word->compare_exchange_weak(v, v | kCvSpin,
2532                                          std::memory_order_acquire,
2533                                          std::memory_order_relaxed)) {
2534     c = synchronization_internal::MutexDelay(c, GENTLE);
2535     v = cv_word->load(std::memory_order_relaxed);
2536   }
2537   ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2538   waitp->thread->waitp = waitp;      // prepare ourselves for waiting
2539   PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2540   if (h == nullptr) {  // add this thread to waiter list
2541     waitp->thread->next = waitp->thread;
2542   } else {
2543     waitp->thread->next = h->next;
2544     h->next = waitp->thread;
2545   }
2546   waitp->thread->state.store(PerThreadSynch::kQueued,
2547                              std::memory_order_relaxed);
2548   cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2549                  std::memory_order_release);
2550 }
2551 
WaitCommon(Mutex * mutex,KernelTimeout t)2552 bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2553   bool rc = false;          // return value; true iff we timed-out
2554 
2555   intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2556   Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2557   ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2558 
2559   // maybe trace this call
2560   intptr_t v = cv_.load(std::memory_order_relaxed);
2561   cond_var_tracer("Wait", this);
2562   if ((v & kCvEvent) != 0) {
2563     PostSynchEvent(this, SYNCH_EV_WAIT);
2564   }
2565 
2566   // Release mu and wait on condition variable.
2567   SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2568                         Synch_GetPerThreadAnnotated(mutex), &cv_);
2569   // UnlockSlow() will call CondVarEnqueue() just before releasing the
2570   // Mutex, thus queuing this thread on the condition variable.  See
2571   // CondVarEnqueue() for the reasons.
2572   mutex->UnlockSlow(&waitp);
2573 
2574   // wait for signal
2575   while (waitp.thread->state.load(std::memory_order_acquire) ==
2576          PerThreadSynch::kQueued) {
2577     if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2578       // DecrementSynchSem returned due to timeout.
2579       // Now we will either (1) remove ourselves from the wait list in Remove
2580       // below, in which case Remove will set thread.state = kAvailable and
2581       // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2582       // has removed us concurrently and is calling Wakeup, which will set
2583       // thread.state = kAvailable and post to the semaphore.
2584       // It's important to reset the timeout for the case (2) because otherwise
2585       // we can live-lock in this loop since DecrementSynchSem will always
2586       // return immediately due to timeout, but Signal/SignalAll is not
2587       // necessary set thread.state = kAvailable yet (and is not scheduled
2588       // due to thread priorities or other scheduler artifacts).
2589       // Note this could also be resolved if Signal/SignalAll would set
2590       // thread.state = kAvailable while holding the wait list spin lock.
2591       // But this can't be easily done for SignalAll since it grabs the whole
2592       // wait list with a single compare-exchange and does not really grab
2593       // the spin lock.
2594       t = KernelTimeout::Never();
2595       this->Remove(waitp.thread);
2596       rc = true;
2597     }
2598   }
2599 
2600   ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2601   waitp.thread->waitp = nullptr;  // cleanup
2602 
2603   // maybe trace this call
2604   cond_var_tracer("Unwait", this);
2605   if ((v & kCvEvent) != 0) {
2606     PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2607   }
2608 
2609   // From synchronization point of view Wait is unlock of the mutex followed
2610   // by lock of the mutex. We've annotated start of unlock in the beginning
2611   // of the function. Now, finish unlock and annotate lock of the mutex.
2612   // (Trans is effectively lock).
2613   ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2614   ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2615   mutex->Trans(mutex_how);  // Reacquire mutex
2616   ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2617   return rc;
2618 }
2619 
WaitWithTimeout(Mutex * mu,absl::Duration timeout)2620 bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2621   return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2622 }
2623 
WaitWithDeadline(Mutex * mu,absl::Time deadline)2624 bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2625   return WaitCommon(mu, KernelTimeout(deadline));
2626 }
2627 
Wait(Mutex * mu)2628 void CondVar::Wait(Mutex *mu) {
2629   WaitCommon(mu, KernelTimeout::Never());
2630 }
2631 
2632 // Wake thread w
2633 // If it was a timed wait, w will be waiting on w->cv
2634 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2635 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
Wakeup(PerThreadSynch * w)2636 void CondVar::Wakeup(PerThreadSynch *w) {
2637   if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2638     // The waiting thread only needs to observe "w->state == kAvailable" to be
2639     // released, we must cache "cvmu" before clearing "next".
2640     Mutex *mu = w->waitp->cvmu;
2641     w->next = nullptr;
2642     w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2643     Mutex::IncrementSynchSem(mu, w);
2644   } else {
2645     w->waitp->cvmu->Fer(w);
2646   }
2647 }
2648 
Signal()2649 void CondVar::Signal() {
2650   SchedulingGuard::ScopedDisable disable_rescheduling;
2651   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2652   intptr_t v;
2653   int c = 0;
2654   for (v = cv_.load(std::memory_order_relaxed); v != 0;
2655        v = cv_.load(std::memory_order_relaxed)) {
2656     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
2657         cv_.compare_exchange_strong(v, v | kCvSpin,
2658                                     std::memory_order_acquire,
2659                                     std::memory_order_relaxed)) {
2660       PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2661       PerThreadSynch *w = nullptr;
2662       if (h != nullptr) {  // remove first waiter
2663         w = h->next;
2664         if (w == h) {
2665           h = nullptr;
2666         } else {
2667           h->next = w->next;
2668         }
2669       }
2670                                       // release spinlock
2671       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2672                 std::memory_order_release);
2673       if (w != nullptr) {
2674         CondVar::Wakeup(w);                // wake waiter, if there was one
2675         cond_var_tracer("Signal wakeup", this);
2676       }
2677       if ((v & kCvEvent) != 0) {
2678         PostSynchEvent(this, SYNCH_EV_SIGNAL);
2679       }
2680       ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2681       return;
2682     } else {
2683       c = synchronization_internal::MutexDelay(c, GENTLE);
2684     }
2685   }
2686   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2687 }
2688 
SignalAll()2689 void CondVar::SignalAll () {
2690   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2691   intptr_t v;
2692   int c = 0;
2693   for (v = cv_.load(std::memory_order_relaxed); v != 0;
2694        v = cv_.load(std::memory_order_relaxed)) {
2695     // empty the list if spinlock free
2696     // We do this by simply setting the list to empty using
2697     // compare and swap.   We then have the entire list in our hands,
2698     // which cannot be changing since we grabbed it while no one
2699     // held the lock.
2700     if ((v & kCvSpin) == 0 &&
2701         cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2702                                     std::memory_order_relaxed)) {
2703       PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2704       if (h != nullptr) {
2705         PerThreadSynch *w;
2706         PerThreadSynch *n = h->next;
2707         do {                          // for every thread, wake it up
2708           w = n;
2709           n = n->next;
2710           CondVar::Wakeup(w);
2711         } while (w != h);
2712         cond_var_tracer("SignalAll wakeup", this);
2713       }
2714       if ((v & kCvEvent) != 0) {
2715         PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2716       }
2717       ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2718       return;
2719     } else {
2720       // try again after a delay
2721       c = synchronization_internal::MutexDelay(c, GENTLE);
2722     }
2723   }
2724   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2725 }
2726 
Release()2727 void ReleasableMutexLock::Release() {
2728   ABSL_RAW_CHECK(this->mu_ != nullptr,
2729                  "ReleasableMutexLock::Release may only be called once");
2730   this->mu_->Unlock();
2731   this->mu_ = nullptr;
2732 }
2733 
2734 #ifdef ABSL_HAVE_THREAD_SANITIZER
2735 extern "C" void __tsan_read1(void *addr);
2736 #else
2737 #define __tsan_read1(addr)  // do nothing if TSan not enabled
2738 #endif
2739 
2740 // A function that just returns its argument, dereferenced
Dereference(void * arg)2741 static bool Dereference(void *arg) {
2742   // ThreadSanitizer does not instrument this file for memory accesses.
2743   // This function dereferences a user variable that can participate
2744   // in a data race, so we need to manually tell TSan about this memory access.
2745   __tsan_read1(arg);
2746   return *(static_cast<bool *>(arg));
2747 }
2748 
Condition()2749 Condition::Condition() {}   // null constructor, used for kTrue only
2750 const Condition Condition::kTrue;
2751 
Condition(bool (* func)(void *),void * arg)2752 Condition::Condition(bool (*func)(void *), void *arg)
2753     : eval_(&CallVoidPtrFunction),
2754       function_(func),
2755       method_(nullptr),
2756       arg_(arg) {}
2757 
CallVoidPtrFunction(const Condition * c)2758 bool Condition::CallVoidPtrFunction(const Condition *c) {
2759   return (*c->function_)(c->arg_);
2760 }
2761 
Condition(const bool * cond)2762 Condition::Condition(const bool *cond)
2763     : eval_(CallVoidPtrFunction),
2764       function_(Dereference),
2765       method_(nullptr),
2766       // const_cast is safe since Dereference does not modify arg
2767       arg_(const_cast<bool *>(cond)) {}
2768 
Eval() const2769 bool Condition::Eval() const {
2770   // eval_ == null for kTrue
2771   return (this->eval_ == nullptr) || (*this->eval_)(this);
2772 }
2773 
GuaranteedEqual(const Condition * a,const Condition * b)2774 bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2775   if (a == nullptr) {
2776     return b == nullptr || b->eval_ == nullptr;
2777   }
2778   if (b == nullptr || b->eval_ == nullptr) {
2779     return a->eval_ == nullptr;
2780   }
2781   return a->eval_ == b->eval_ && a->function_ == b->function_ &&
2782          a->arg_ == b->arg_ && a->method_ == b->method_;
2783 }
2784 
2785 ABSL_NAMESPACE_END
2786 }  // namespace absl
2787