Lines Matching +full:stale +full:- +full:pr +full:- +full:label
7 // https://www.apache.org/licenses/LICENSE-2.0
177 // --------------------------Generic atomic ops
186 v = pv->load(std::memory_order_relaxed); in AtomicSetBits()
189 !pv->compare_exchange_weak(v, v | bits, in AtomicSetBits()
202 v = pv->load(std::memory_order_relaxed); in AtomicClearBits()
205 !pv->compare_exchange_weak(v, v & ~bits, in AtomicClearBits()
210 //------------------------------------------------------------------
220 //------------------------------------------------------------------
290 // buckets have linear, 0-terminated chains
304 char name[1]; // actually longer---NUL-terminated string
322 e != nullptr && e->masked_addr != base_internal::HidePtr(addr); in EnsureSynchEvent()
323 e = e->next) { in EnsureSynchEvent()
332 e->refcount = 2; // one for return value, one for linked list in EnsureSynchEvent()
333 e->masked_addr = base_internal::HidePtr(addr); in EnsureSynchEvent()
334 e->invariant = nullptr; in EnsureSynchEvent()
335 e->arg = nullptr; in EnsureSynchEvent()
336 e->log = false; in EnsureSynchEvent()
337 strcpy(e->name, name); // NOLINT(runtime/printf) in EnsureSynchEvent()
338 e->next = synch_event[h]; in EnsureSynchEvent()
342 e->refcount++; // for return value in EnsureSynchEvent()
357 bool del = (--(e->refcount) == 0); in UnrefSynchEvent()
375 (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr); in ForgetSynchEvent()
376 pe = &e->next) { in ForgetSynchEvent()
380 *pe = e->next; in ForgetSynchEvent()
381 del = (--(e->refcount) == 0); in ForgetSynchEvent()
398 e != nullptr && e->masked_addr != base_internal::HidePtr(addr); in GetSynchEvent()
399 e = e->next) { in GetSynchEvent()
402 e->refcount++; in GetSynchEvent()
414 if (e == nullptr || e->log) { in PostSynchEvent()
418 // 64-bit machine. in PostSynchEvent()
422 pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]); in PostSynchEvent()
425 (e == nullptr ? "" : e->name), buffer); in PostSynchEvent()
428 if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) { in PostSynchEvent()
437 (*ev->invariant)(ev->arg); in PostSynchEvent()
451 //------------------------------------------------------------------
484 KernelTimeout timeout; // timeout expiry---absolute time
520 ret->n = 0; in LocksHeldAlloc()
521 ret->overflow = false; in LocksHeldAlloc()
525 // Return the PerThreadSynch-struct for this thread.
528 return &identity->per_thread_synch; in Synch_GetPerThread()
544 if (s->all_locks == nullptr) { in Synch_GetAllLocks()
545 s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity. in Synch_GetAllLocks()
547 return s->all_locks; in Synch_GetAllLocks()
555 PerThreadSem::Post(w->thread_identity()); in IncrementSynchSem()
579 // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
582 // Fix the per-thread state only if it exists. in InternalAttemptToUseMutexInFatalSignalHandler()
585 identity->per_thread_synch.suppress_fatal_errors = true; in InternalAttemptToUseMutexInFatalSignalHandler()
592 // --------------------------time support
607 // --------------------------Mutexes
614 // bit-twiddling trick in Mutex::Unlock().
616 // to enable the bit-twiddling trick in CheckForMutexCorruption().
628 // unblocked reset the bit when they either acquire or re-block
680 // we're now slow-path). kMuWrWait be may
722 ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin); in ~Mutex()
725 this->ForgetDeadlockInfo(); in ~Mutex()
731 SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin); in EnableDebugLog()
732 e->log = true; in EnableDebugLog()
744 SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin); in EnableInvariantDebugging()
745 e->invariant = invariant; in EnableInvariantDebugging()
746 e->arg = arg; in EnableInvariantDebugging()
759 return x->waitp->how == y->waitp->how && in MuSameCondition()
760 Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond); in MuSameCondition()
769 // The next several routines maintain the per-thread next and skip fields
771 // The queue is a circular singly-linked list, of which the "head" is the
772 // last element, and head->next if the first element.
774 // For thread x, x->skip is one of:
775 // - invalid (iff x is not in a Mutex wait queue),
776 // - null, or
777 // - a pointer to a distinct thread waiting later in the same Mutex queue
778 // such that all threads in [x, x->skip] have the same condition and
779 // lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
780 // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
784 // field of another thread x because if x->skip==y, x->skip must (have) become
789 // if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
792 // if (x->skip != null) { x->skip = x->skip->skip; }
800 // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
801 // - Mutex is held in call from UnlockSlow() by last unlocker, with
803 // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
808 // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
809 // - Dequeue() (with spinlock and Mutex held)
810 // - UnlockSlow() (with spinlock and Mutex held)
812 // - Enqueue() (with spinlock held and maybe_unlocking == false)
814 // - Enqueue() (without spinlock held; but queue is empty and being freshly
816 // - Enqueue() (with spinlock held and maybe_unlocking == true)
826 PerThreadSynch *x2 = x->skip; in Skip()
829 // such that x1 == x0->skip && x2 == x1->skip in Skip()
830 while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) { in Skip()
831 x0->skip = x2; // short-circuit skip from x0 to x2 in Skip()
833 x->skip = x1; // short-circuit skip from x to result in Skip()
843 if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling in FixSkip()
844 if (to_be_removed->skip != nullptr) { in FixSkip()
845 ancestor->skip = to_be_removed->skip; // can skip past to_be_removed in FixSkip()
846 } else if (ancestor->next != to_be_removed) { // they are not adjacent in FixSkip()
847 ancestor->skip = ancestor->next; // can skip one past ancestor in FixSkip()
849 ancestor->skip = nullptr; // can't skip at all in FixSkip()
856 // Enqueue thread "waitp->thread" on a waiter queue.
858 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
862 // If waitp->cv_word == nullptr, queue the thread at either the front or
869 // non-trivial condition. In this case, Enqueue() never returns nullptr
871 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
879 if (waitp->cv_word != nullptr) { in Enqueue()
884 PerThreadSynch *s = waitp->thread; in Enqueue()
886 s->waitp == nullptr || // normal case in Enqueue()
887 s->waitp == waitp || // Fer()---transfer from condition variable in Enqueue()
888 s->suppress_fatal_errors, in Enqueue()
890 s->waitp = waitp; in Enqueue()
891 s->skip = nullptr; // maintain skip invariant (see above) in Enqueue()
892 s->may_skip = true; // always true on entering queue in Enqueue()
893 s->wake = false; // not being woken in Enqueue()
894 s->cond_waiter = ((flags & kMuIsCond) != 0); in Enqueue()
896 s->next = s; // it's the only entry in the cycle in Enqueue()
897 s->readers = mu; // reader count is from mu word in Enqueue()
898 s->maybe_unlocking = false; // no one is searching an empty list in Enqueue()
904 if (s->next_priority_read_cycles < now_cycles) { in Enqueue()
914 s->priority = param.sched_priority; in Enqueue()
915 s->next_priority_read_cycles = in Enqueue()
920 if (s->priority > head->priority) { // s's priority is above head's in Enqueue()
921 // try to put s in priority-fifo order, or failing that at the front. in Enqueue()
922 if (!head->maybe_unlocking) { in Enqueue()
924 // skip-chains, and within a skip-chain if it has the same condition as in Enqueue()
925 // s. We insert in priority-fifo order, examining the end of every in Enqueue()
926 // skip-chain, plus every element with the same condition as s. in Enqueue()
931 cur = enqueue_after->next; // this advance ensures progress in Enqueue()
933 // (side-effect: optimizes skip chain) in Enqueue()
934 if (advance_to != cur && s->priority > advance_to->priority && in Enqueue()
938 // so we can insert within the skip-chain in Enqueue()
941 } while (s->priority <= advance_to->priority); in Enqueue()
942 // termination guaranteed because s->priority > head->priority in Enqueue()
944 } else if (waitp->how == kExclusive && in Enqueue()
945 Condition::GuaranteedEqual(waitp->cond, nullptr)) { in Enqueue()
954 s->next = enqueue_after->next; in Enqueue()
955 enqueue_after->next = s; in Enqueue()
958 // The first two imply enqueue_after->skip == nullptr, and in Enqueue()
960 // We require this because clearing enqueue_after->skip in Enqueue()
965 enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s), in Enqueue()
968 if (enqueue_after != head && enqueue_after->may_skip && in Enqueue()
969 MuSameCondition(enqueue_after, enqueue_after->next)) { in Enqueue()
971 enqueue_after->skip = enqueue_after->next; in Enqueue()
973 if (MuSameCondition(s, s->next)) { // s->may_skip is known to be true in Enqueue()
974 s->skip = s->next; // s may skip to its successor in Enqueue()
979 s->next = head->next; // add s after head in Enqueue()
980 head->next = s; in Enqueue()
981 s->readers = head->readers; // reader count is from previous head in Enqueue()
982 s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint in Enqueue()
983 if (head->may_skip && MuSameCondition(head, s)) { in Enqueue()
985 head->skip = s; in Enqueue()
990 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed); in Enqueue()
994 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
999 PerThreadSynch *w = pw->next; in Dequeue()
1000 pw->next = w->next; // snip w out of list in Dequeue()
1003 } else if (pw != head && MuSameCondition(pw, pw->next)) { in Dequeue()
1005 if (pw->next->skip != in Dequeue()
1007 pw->skip = pw->next->skip; in Dequeue()
1009 pw->skip = pw->next; in Dequeue()
1015 // Traverse the elements [ pw->next, h] of the circular list whose last element
1018 // singly-linked list wake_list in the order found. Assumes that
1025 PerThreadSynch *w = pw->next; in DequeueAllWakeable()
1028 if (w->wake) { // remove this element in DequeueAllWakeable()
1029 ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable"); in DequeueAllWakeable()
1030 // we're removing pw's successor so either pw->skip is zero or we should in DequeueAllWakeable()
1031 // already have removed pw since if pw->skip!=null, pw has the same in DequeueAllWakeable()
1034 w->next = *wake_tail; // keep list terminated in DequeueAllWakeable()
1036 wake_tail = &w->next; // next addition to end in DequeueAllWakeable()
1037 if (w->waitp->how == kExclusive) { // wake at most 1 writer in DequeueAllWakeable()
1044 w = pw->next; in DequeueAllWakeable()
1071 if ((w = pw->next) != s) { // search for thread, in TryRemove()
1084 } while ((w = pw->next) != s && pw != h); in TryRemove()
1087 // pw->skip may be non-zero here; the loop above ensured that in TryRemove()
1090 s->next = nullptr; in TryRemove()
1091 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in TryRemove()
1100 h->readers = 0; // we hold writer lock in TryRemove()
1101 h->maybe_unlocking = false; // finished unlocking in TryRemove()
1110 // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1115 while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { in Block()
1116 if (!DecrementSynchSem(this, s, s->waitp->timeout)) { in Block()
1122 this->TryRemove(s); in Block()
1124 while (s->next != nullptr) { in Block()
1126 this->TryRemove(s); in Block()
1131 this->TryRemove(s); in Block()
1133 s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied in Block()
1134 s->waitp->cond = nullptr; // condition no longer relevant for wakeups in Block()
1137 ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors, in Block()
1139 s->waitp = nullptr; in Block()
1144 PerThreadSynch *next = w->next; in Wakeup()
1145 w->next = nullptr; in Wakeup()
1146 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Wakeup()
1159 return deadlock_graph->GetId(mu); in GetGraphIdLocked()
1173 int n = held_locks->n; in LockEnter()
1175 while (i != n && held_locks->locks[i].id != id) { in LockEnter()
1179 if (n == ABSL_ARRAYSIZE(held_locks->locks)) { in LockEnter()
1180 held_locks->overflow = true; // lost some data in LockEnter()
1182 held_locks->locks[i].mu = mu; in LockEnter()
1183 held_locks->locks[i].count = 1; in LockEnter()
1184 held_locks->locks[i].id = id; in LockEnter()
1185 held_locks->n = n + 1; in LockEnter()
1188 held_locks->locks[i].count++; in LockEnter()
1197 int n = held_locks->n; in LockLeave()
1199 while (i != n && held_locks->locks[i].id != id) { in LockLeave()
1203 if (!held_locks->overflow) { in LockLeave()
1207 while (i != n && held_locks->locks[i].mu != mu) { in LockLeave()
1216 mu_events == nullptr ? "" : mu_events->name); in LockLeave()
1219 } else if (held_locks->locks[i].count == 1) { in LockLeave()
1220 held_locks->n = n - 1; in LockLeave()
1221 held_locks->locks[i] = held_locks->locks[n - 1]; in LockLeave()
1222 held_locks->locks[n - 1].id = InvalidGraphId(); in LockLeave()
1223 held_locks->locks[n - 1].mu = in LockLeave()
1226 assert(held_locks->locks[i].count > 0); in LockLeave()
1227 held_locks->locks[i].count--; in LockLeave()
1271 snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n", in StackString()
1275 snprintf(buf + len, maxlen - len, " %p", pcs[i]); in StackString()
1326 if (all_locks->n == 0) { in DeadlockCheck()
1336 // in the acquires-before graph will be represented in the stack traces in DeadlockCheck()
1338 deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack); in DeadlockCheck()
1341 for (int i = 0; i != all_locks->n; i++) { in DeadlockCheck()
1342 const GraphId other_node_id = all_locks->locks[i].id; in DeadlockCheck()
1344 static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id)); in DeadlockCheck()
1346 // Ignore stale lock in DeadlockCheck()
1350 // Add the acquired-before edge to the graph. in DeadlockCheck()
1351 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) { in DeadlockCheck()
1359 CurrentStackString(b->buf, sizeof (b->buf), symbolize)); in DeadlockCheck()
1361 for (int j = 0; j != all_locks->n; j++) { in DeadlockCheck()
1362 void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); in DeadlockCheck() local
1363 if (pr != nullptr) { in DeadlockCheck()
1364 snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); in DeadlockCheck()
1365 len += static_cast<int>(strlen(&b->buf[len])); in DeadlockCheck()
1369 static_cast<void *>(mu), b->buf); in DeadlockCheck()
1371 int path_len = deadlock_graph->FindPath( in DeadlockCheck()
1372 mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path); in DeadlockCheck()
1374 GraphId id = b->path[j]; in DeadlockCheck()
1375 Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id)); in DeadlockCheck()
1378 int depth = deadlock_graph->GetStackTrace(id, &stack); in DeadlockCheck()
1379 snprintf(b->buf, sizeof(b->buf), in DeadlockCheck()
1381 StackString(stack, depth, b->buf + strlen(b->buf), in DeadlockCheck()
1382 static_cast<int>(sizeof(b->buf) - strlen(b->buf)), in DeadlockCheck()
1384 ABSL_RAW_LOG(ERROR, "%s", b->buf); in DeadlockCheck()
1415 deadlock_graph->RemoveNode(this); in ForgetDeadlockInfo()
1430 for (int i = 0; i != locks->n; i++) { in AssertNotHeld()
1431 if (locks->locks[i].id == id) { in AssertNotHeld()
1435 (mu_events == nullptr ? "" : mu_events->name)); in AssertNotHeld()
1446 intptr_t v = mu->load(std::memory_order_relaxed); in TryAcquireWithSpinning()
1448 return false; // a reader or tracing -> give up in TryAcquireWithSpinning()
1449 } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire in TryAcquireWithSpinning()
1450 mu->compare_exchange_strong(v, kMuWriter | v, in TryAcquireWithSpinning()
1455 } while (--c > 0); in TryAcquireWithSpinning()
1469 if (!TryAcquireWithSpinning(&this->mu_)) { in Lock()
1470 this->LockSlow(kExclusive, nullptr, 0); in Lock()
1486 this->LockSlow(kShared, nullptr, 0); in ReaderLock()
1495 this->LockSlow(kExclusive, &cond, 0); in LockWhen()
1517 this->LockSlow(kShared, &cond, 0); in ReaderLockWhen()
1540 this->AssertReaderHeld(); in Await()
1543 ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()), in Await()
1555 this->AssertReaderHeld(); in AwaitWithDeadline()
1561 bool res = this->AwaitCommon(cond, t); in AwaitWithDeadline()
1568 this->AssertReaderHeld(); in AwaitCommon()
1579 this->UnlockSlow(&waitp); in AwaitCommon()
1580 this->Block(waitp.thread); in AwaitCommon()
1583 this->LockSlowLoop(&waitp, flags); in AwaitCommon()
1602 if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire in TryLock()
1604 v, (kExclusive->fast_or | v) + kExclusive->fast_add, in TryLock()
1623 // The while-loops (here and below) iterate only if the mutex word keeps in ReaderTryLock()
1636 loop_limit--; in ReaderTryLock()
1641 while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) { in ReaderTryLock()
1651 loop_limit--; in ReaderTryLock()
1675 // should_try_cas is whether we'll try a compare-and-swap immediately. in Unlock()
1686 // all possible non-zero values for x exceed all possible values for y. in Unlock()
1701 this->UnlockSlow(nullptr /*no waitp*/); // take slow path in Unlock()
1706 // Requires v to represent a reader-locked state.
1725 if (mu_.compare_exchange_strong(v, v - clear, in ReaderUnlock()
1732 this->UnlockSlow(nullptr /*no waitp*/); // take slow path in ReaderUnlock()
1758 this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags), in LockSlow()
1762 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1783 res = cond->Eval(); in EvalConditionAnnotated()
1795 res = cond->Eval(); in EvalConditionAnnotated()
1798 // Prevent unused param warnings in non-TSAN builds. in EvalConditionAnnotated()
1805 // Compute cond->Eval() hiding it from race detectors.
1810 // tsan). As the result there is no tsan-visible synchronization between the
1818 // So we "divert" (which un-ignores both memory accesses and synchronization) in EvalConditionIgnored()
1822 bool res = cond->Eval(); in EvalConditionIgnored()
1825 static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds. in EvalConditionIgnored()
1833 // - kMuHasBlocked indicates that the client has already blocked on the call so
1836 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1842 if ((v & how->fast_need_zero) == 0 && // try fast acquire in LockSlowWithDeadline()
1844 v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) + in LockSlowWithDeadline()
1845 how->fast_add, in LockSlowWithDeadline()
1860 this->UnlockSlow(&waitp); in LockSlowWithDeadline()
1861 this->Block(waitp.thread); in LockSlowWithDeadline()
1864 this->LockSlowLoop(&waitp, flags); in LockSlowWithDeadline()
1870 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1871 // the printf-style argument list. The format string must be a literal.
1880 static void CheckForMutexCorruption(intptr_t v, const char* label) { in CheckForMutexCorruption() argument
1896 label, reinterpret_cast<void *>(v)); in CheckForMutexCorruption()
1899 label, reinterpret_cast<void *>(v)); in CheckForMutexCorruption()
1909 waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK); in LockSlowLoop()
1912 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
1917 if ((v & waitp->how->slow_need_zero) == 0) { in LockSlowLoop()
1919 v, (waitp->how->fast_or | in LockSlowLoop()
1921 waitp->how->fast_add, in LockSlowLoop()
1923 if (waitp->cond == nullptr || in LockSlowLoop()
1924 EvalConditionAnnotated(waitp->cond, this, true, false, in LockSlowLoop()
1925 waitp->how == kShared)) { in LockSlowLoop()
1928 this->UnlockSlow(waitp); // got lock but condition false in LockSlowLoop()
1929 this->Block(waitp->thread); in LockSlowLoop()
1941 if (waitp->how == kExclusive && (v & kMuReader) != 0) { in LockSlowLoop()
1950 waitp->thread->waitp = nullptr; in LockSlowLoop()
1952 } else if ((v & waitp->how->slow_inc_need_zero & in LockSlowLoop()
1961 h->readers += kMuOne; // inc reader count in waiter in LockSlowLoop()
1967 if (waitp->cond == nullptr || in LockSlowLoop()
1968 EvalConditionAnnotated(waitp->cond, this, true, false, in LockSlowLoop()
1969 waitp->how == kShared)) { in LockSlowLoop()
1972 this->UnlockSlow(waitp); // got lock but condition false in LockSlowLoop()
1973 this->Block(waitp->thread); in LockSlowLoop()
1986 if (waitp->how == kExclusive && (v & kMuReader) != 0) { in LockSlowLoop()
1998 this->Block(waitp->thread); // wait until removed from list or timeout in LockSlowLoop()
2004 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
2010 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
2014 waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING : in LockSlowLoop()
2020 // If waitp is non-zero, it must be the wait parameters for the current thread
2027 this->AssertReaderHeld(); in UnlockSlow()
2046 ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr || in UnlockSlow()
2047 waitp->thread->suppress_fatal_errors, in UnlockSlow()
2051 // waiters if waitp is non-zero. in UnlockSlow()
2065 if (mu_.compare_exchange_strong(v, v - clear, in UnlockSlow()
2082 intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v; in UnlockSlow()
2085 // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then in UnlockSlow()
2089 do_enqueue = (waitp->cv_word == nullptr); in UnlockSlow()
2105 // release spinlock & our lock; retry if reader-count changed in UnlockSlow()
2116 if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) { in UnlockSlow()
2118 h->readers -= kMuOne; // release our lock in UnlockSlow()
2134 ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking, in UnlockSlow()
2139 !old_h->may_skip) { // we used old_h as a terminator in UnlockSlow()
2140 old_h->may_skip = true; // allow old_h to skip once more in UnlockSlow()
2141 ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head"); in UnlockSlow()
2142 if (h != old_h && MuSameCondition(old_h, old_h->next)) { in UnlockSlow()
2143 old_h->skip = old_h->next; // old_h not head & can skip to successor in UnlockSlow()
2146 if (h->next->waitp->how == kExclusive && in UnlockSlow()
2147 Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) { in UnlockSlow()
2150 w = h->next; in UnlockSlow()
2151 w->wake = true; in UnlockSlow()
2160 } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) { in UnlockSlow()
2175 h->readers = 0; in UnlockSlow()
2176 h->maybe_unlocking = false; // finished unlocking in UnlockSlow()
2196 w_walk = old_h->next; in UnlockSlow()
2199 nullptr; // h->next's predecessor may change; don't record it in UnlockSlow()
2200 w_walk = h->next; in UnlockSlow()
2203 h->may_skip = false; // ensure we never skip past h in future searches in UnlockSlow()
2205 ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head"); in UnlockSlow()
2207 h->maybe_unlocking = true; // we're about to scan the waiter list in UnlockSlow()
2217 // Without the spinlock, the locations mu_ and h->next may now change in UnlockSlow()
2227 w_walk->wake = false; in UnlockSlow()
2228 if (w_walk->waitp->cond == in UnlockSlow()
2230 (w_walk->waitp->cond != known_false && in UnlockSlow()
2233 EvalConditionIgnored(this, w_walk->waitp->cond))) { in UnlockSlow()
2235 w_walk->wake = true; // can wake this waiter in UnlockSlow()
2238 if (w_walk->waitp->how == kExclusive) { in UnlockSlow()
2242 } else if (w_walk->waitp->how == kShared) { // wake if a reader in UnlockSlow()
2243 w_walk->wake = true; in UnlockSlow()
2248 known_false = w_walk->waitp->cond; // remember last false condition in UnlockSlow()
2250 if (w_walk->wake) { // we're waking reader w_walk in UnlockSlow()
2255 // If pw_walk == h, then load of pw_walk->next can race with in UnlockSlow()
2260 w_walk = pw_walk->next; in UnlockSlow()
2264 continue; // restart for(;;)-loop to wakeup w or to find more waiters in UnlockSlow()
2266 ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor"); in UnlockSlow()
2273 // This traverses the list in [ pw->next, h ], where h is the head, in UnlockSlow()
2275 // singly-linked list wake_list. Returns the new head. in UnlockSlow()
2292 h->readers = 0; in UnlockSlow()
2293 h->maybe_unlocking = false; // finished unlocking in UnlockSlow()
2300 break; // out of for(;;)-loop in UnlockSlow()
2304 } // end of for(;;)-loop in UnlockSlow()
2307 int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles; in UnlockSlow()
2308 bool cond_waiter = wake_list->cond_waiter; in UnlockSlow()
2315 int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp; in UnlockSlow()
2334 this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond); in Trans()
2344 ABSL_RAW_CHECK(w->waitp->cond == nullptr, in Fer()
2346 ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(), in Fer()
2348 ABSL_RAW_CHECK(w->waitp->cv_word == nullptr, in Fer()
2359 kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader); in Fer()
2361 w->next = nullptr; in Fer()
2362 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Fer()
2368 PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond); in Fer()
2379 PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond); in Fer()
2401 (e == nullptr ? "" : e->name)); in AssertHeld()
2410 static_cast<const void *>(this), (e == nullptr ? "" : e->name)); in AssertReaderHeld()
2414 // -------------------------------- condition variables
2427 SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin); in EnableDebugLog()
2428 e->log = true; in EnableDebugLog()
2434 ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin); in ~CondVar()
2453 while (w->next != s && w->next != h) { // search for thread in Remove()
2454 w = w->next; in Remove()
2456 if (w->next == s) { // found thread; remove it in Remove()
2457 w->next = s->next; in Remove()
2461 s->next = nullptr; in Remove()
2462 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Remove()
2476 // Queue thread waitp->thread on condition variable word cv_word using
2480 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2483 // a the condition variable waiter queue. Thus, we use the waitp->cv_word
2486 // importantly) after any call to an external routine that might re-enter the
2495 std::atomic<intptr_t> *cv_word = waitp->cv_word; in CondVarEnqueue()
2496 waitp->cv_word = nullptr; in CondVarEnqueue()
2498 intptr_t v = cv_word->load(std::memory_order_relaxed); in CondVarEnqueue()
2501 !cv_word->compare_exchange_weak(v, v | kCvSpin, in CondVarEnqueue()
2505 v = cv_word->load(std::memory_order_relaxed); in CondVarEnqueue()
2507 ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); in CondVarEnqueue()
2508 waitp->thread->waitp = waitp; // prepare ourselves for waiting in CondVarEnqueue()
2511 waitp->thread->next = waitp->thread; in CondVarEnqueue()
2513 waitp->thread->next = h->next; in CondVarEnqueue()
2514 h->next = waitp->thread; in CondVarEnqueue()
2516 waitp->thread->state.store(PerThreadSynch::kQueued, in CondVarEnqueue()
2518 cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread), in CondVarEnqueue()
2523 bool rc = false; // return value; true iff we timed-out in WaitCommon()
2525 intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed); in WaitCommon()
2542 mutex->UnlockSlow(&waitp); in WaitCommon()
2545 while (waitp.thread->state.load(std::memory_order_acquire) == in WaitCommon()
2548 this->Remove(waitp.thread); in WaitCommon()
2553 ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be"); in WaitCommon()
2554 waitp.thread->waitp = nullptr; // cleanup in WaitCommon()
2568 mutex->Trans(mutex_how); // Reacquire mutex in WaitCommon()
2586 // If it was a timed wait, w will be waiting on w->cv
2587 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2590 if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) { in Wakeup()
2591 // The waiting thread only needs to observe "w->state == kAvailable" to be in Wakeup()
2593 Mutex *mu = w->waitp->cvmu; in Wakeup()
2594 w->next = nullptr; in Wakeup()
2595 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Wakeup()
2598 w->waitp->cvmu->Fer(w); in Wakeup()
2616 w = h->next; in Signal()
2620 h->next = w->next; in Signal()
2659 PerThreadSynch *n = h->next; in SignalAll()
2662 n = n->next; in SignalAll()
2681 ABSL_RAW_CHECK(this->mu_ != nullptr, in Release()
2683 this->mu_->Unlock(); in Release()
2684 this->mu_ = nullptr; in Release()
2712 return (*c->function_)(c->arg_); in CallVoidPtrFunction()
2724 return (this->eval_ == nullptr) || (*this->eval_)(this); in Eval()
2729 return b == nullptr || b->eval_ == nullptr; in GuaranteedEqual()
2731 if (b == nullptr || b->eval_ == nullptr) { in GuaranteedEqual()
2732 return a->eval_ == nullptr; in GuaranteedEqual()
2734 return a->eval_ == b->eval_ && a->function_ == b->function_ && in GuaranteedEqual()
2735 a->arg_ == b->arg_ && a->method_ == b->method_; in GuaranteedEqual()