• Home
  • Raw
  • Download

Lines Matching +full:stale +full:- +full:pr +full:- +full:label

7 //      https://www.apache.org/licenses/LICENSE-2.0
188 // --------------------------Generic atomic ops
197 v = pv->load(std::memory_order_relaxed); in AtomicSetBits()
200 !pv->compare_exchange_weak(v, v | bits, in AtomicSetBits()
213 v = pv->load(std::memory_order_relaxed); in AtomicClearBits()
216 !pv->compare_exchange_weak(v, v & ~bits, in AtomicClearBits()
221 //------------------------------------------------------------------
231 //------------------------------------------------------------------
301 // buckets have linear, 0-terminated chains
315 char name[1]; // actually longer---NUL-terminated string
333 e != nullptr && e->masked_addr != base_internal::HidePtr(addr); in EnsureSynchEvent()
334 e = e->next) { in EnsureSynchEvent()
343 e->refcount = 2; // one for return value, one for linked list in EnsureSynchEvent()
344 e->masked_addr = base_internal::HidePtr(addr); in EnsureSynchEvent()
345 e->invariant = nullptr; in EnsureSynchEvent()
346 e->arg = nullptr; in EnsureSynchEvent()
347 e->log = false; in EnsureSynchEvent()
348 strcpy(e->name, name); // NOLINT(runtime/printf) in EnsureSynchEvent()
349 e->next = synch_event[h]; in EnsureSynchEvent()
353 e->refcount++; // for return value in EnsureSynchEvent()
368 bool del = (--(e->refcount) == 0); in UnrefSynchEvent()
386 (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr); in ForgetSynchEvent()
387 pe = &e->next) { in ForgetSynchEvent()
391 *pe = e->next; in ForgetSynchEvent()
392 del = (--(e->refcount) == 0); in ForgetSynchEvent()
409 e != nullptr && e->masked_addr != base_internal::HidePtr(addr); in GetSynchEvent()
410 e = e->next) { in GetSynchEvent()
413 e->refcount++; in GetSynchEvent()
425 if (e == nullptr || e->log) { in PostSynchEvent()
429 // 64-bit machine. in PostSynchEvent()
433 pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]); in PostSynchEvent()
436 (e == nullptr ? "" : e->name), buffer); in PostSynchEvent()
439 if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) { in PostSynchEvent()
448 (*ev->invariant)(ev->arg); in PostSynchEvent()
462 //------------------------------------------------------------------
495 KernelTimeout timeout; // timeout expiry---absolute time
531 ret->n = 0; in LocksHeldAlloc()
532 ret->overflow = false; in LocksHeldAlloc()
536 // Return the PerThreadSynch-struct for this thread.
539 return &identity->per_thread_synch; in Synch_GetPerThread()
555 if (s->all_locks == nullptr) { in Synch_GetAllLocks()
556 s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity. in Synch_GetAllLocks()
558 return s->all_locks; in Synch_GetAllLocks()
566 PerThreadSem::Post(w->thread_identity()); in IncrementSynchSem()
590 // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
593 // Fix the per-thread state only if it exists. in InternalAttemptToUseMutexInFatalSignalHandler()
596 identity->per_thread_synch.suppress_fatal_errors = true; in InternalAttemptToUseMutexInFatalSignalHandler()
603 // --------------------------time support
618 // --------------------------Mutexes
625 // bit-twiddling trick in Mutex::Unlock().
627 // to enable the bit-twiddling trick in CheckForMutexCorruption().
639 // unblocked reset the bit when they either acquire or re-block
691 // we're now slow-path). kMuWrWait be may
733 ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin); in ~Mutex()
736 this->ForgetDeadlockInfo(); in ~Mutex()
742 SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin); in EnableDebugLog()
743 e->log = true; in EnableDebugLog()
755 SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin); in EnableInvariantDebugging()
756 e->invariant = invariant; in EnableInvariantDebugging()
757 e->arg = arg; in EnableInvariantDebugging()
772 return x->waitp->how == y->waitp->how && x->priority == y->priority && in MuEquivalentWaiter()
773 Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond); in MuEquivalentWaiter()
782 // The next several routines maintain the per-thread next and skip fields
784 // The queue is a circular singly-linked list, of which the "head" is the
785 // last element, and head->next if the first element.
787 // For thread x, x->skip is one of:
788 // - invalid (iff x is not in a Mutex wait queue),
789 // - null, or
790 // - a pointer to a distinct thread waiting later in the same Mutex queue
791 // such that all threads in [x, x->skip] have the same condition, priority
793 // x->skip]).
794 // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
798 // field of another thread x because if x->skip==y, x->skip must (have) become
803 // if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
806 // if (x->skip != null) { x->skip = x->skip->skip; }
814 // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
815 // - Mutex is held in call from UnlockSlow() by last unlocker, with
817 // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
822 // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
823 // - Dequeue() (with spinlock and Mutex held)
824 // - UnlockSlow() (with spinlock and Mutex held)
826 // - Enqueue() (with spinlock held and maybe_unlocking == false)
828 // - Enqueue() (without spinlock held; but queue is empty and being freshly
830 // - Enqueue() (with spinlock held and maybe_unlocking == true)
840 PerThreadSynch *x2 = x->skip; in Skip()
843 // such that x1 == x0->skip && x2 == x1->skip in Skip()
844 while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) { in Skip()
845 x0->skip = x2; // short-circuit skip from x0 to x2 in Skip()
847 x->skip = x1; // short-circuit skip from x to result in Skip()
857 if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling in FixSkip()
858 if (to_be_removed->skip != nullptr) { in FixSkip()
859 ancestor->skip = to_be_removed->skip; // can skip past to_be_removed in FixSkip()
860 } else if (ancestor->next != to_be_removed) { // they are not adjacent in FixSkip()
861 ancestor->skip = ancestor->next; // can skip one past ancestor in FixSkip()
863 ancestor->skip = nullptr; // can't skip at all in FixSkip()
870 // Enqueue thread "waitp->thread" on a waiter queue.
872 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
876 // If waitp->cv_word == nullptr, queue the thread at either the front or
883 // non-trivial condition. In this case, Enqueue() never returns nullptr
885 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
893 if (waitp->cv_word != nullptr) { in Enqueue()
898 PerThreadSynch *s = waitp->thread; in Enqueue()
900 s->waitp == nullptr || // normal case in Enqueue()
901 s->waitp == waitp || // Fer()---transfer from condition variable in Enqueue()
902 s->suppress_fatal_errors, in Enqueue()
904 s->waitp = waitp; in Enqueue()
905 s->skip = nullptr; // maintain skip invariant (see above) in Enqueue()
906 s->may_skip = true; // always true on entering queue in Enqueue()
907 s->wake = false; // not being woken in Enqueue()
908 s->cond_waiter = ((flags & kMuIsCond) != 0); in Enqueue()
910 s->next = s; // it's the only entry in the cycle in Enqueue()
911 s->readers = mu; // reader count is from mu word in Enqueue()
912 s->maybe_unlocking = false; // no one is searching an empty list in Enqueue()
918 if (s->next_priority_read_cycles < now_cycles) { in Enqueue()
928 s->priority = param.sched_priority; in Enqueue()
929 s->next_priority_read_cycles = in Enqueue()
934 if (s->priority > head->priority) { // s's priority is above head's in Enqueue()
935 // try to put s in priority-fifo order, or failing that at the front. in Enqueue()
936 if (!head->maybe_unlocking) { in Enqueue()
946 // (side-effect: optimizes skip chain) in Enqueue()
947 advance_to = Skip(enqueue_after->next); in Enqueue()
948 } while (s->priority <= advance_to->priority); in Enqueue()
949 // termination guaranteed because s->priority > head->priority in Enqueue()
951 } else if (waitp->how == kExclusive && in Enqueue()
952 Condition::GuaranteedEqual(waitp->cond, nullptr)) { in Enqueue()
961 s->next = enqueue_after->next; in Enqueue()
962 enqueue_after->next = s; in Enqueue()
965 // The first two imply enqueue_after->skip == nullptr, and in Enqueue()
967 // We require this because clearing enqueue_after->skip in Enqueue()
971 ABSL_RAW_CHECK(enqueue_after->skip == nullptr || in Enqueue()
975 if (enqueue_after != head && enqueue_after->may_skip && in Enqueue()
976 MuEquivalentWaiter(enqueue_after, enqueue_after->next)) { in Enqueue()
978 enqueue_after->skip = enqueue_after->next; in Enqueue()
980 if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true in Enqueue()
981 s->skip = s->next; // s may skip to its successor in Enqueue()
986 s->next = head->next; // add s after head in Enqueue()
987 head->next = s; in Enqueue()
988 s->readers = head->readers; // reader count is from previous head in Enqueue()
989 s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint in Enqueue()
990 if (head->may_skip && MuEquivalentWaiter(head, s)) { in Enqueue()
992 head->skip = s; in Enqueue()
997 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed); in Enqueue()
1001 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1006 PerThreadSynch *w = pw->next; in Dequeue()
1007 pw->next = w->next; // snip w out of list in Dequeue()
1010 } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) { in Dequeue()
1012 if (pw->next->skip != in Dequeue()
1014 pw->skip = pw->next->skip; in Dequeue()
1016 pw->skip = pw->next; in Dequeue()
1022 // Traverse the elements [ pw->next, h] of the circular list whose last element
1025 // singly-linked list wake_list in the order found. Assumes that
1032 PerThreadSynch *w = pw->next; in DequeueAllWakeable()
1035 if (w->wake) { // remove this element in DequeueAllWakeable()
1036 ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable"); in DequeueAllWakeable()
1037 // we're removing pw's successor so either pw->skip is zero or we should in DequeueAllWakeable()
1038 // already have removed pw since if pw->skip!=null, pw has the same in DequeueAllWakeable()
1041 w->next = *wake_tail; // keep list terminated in DequeueAllWakeable()
1043 wake_tail = &w->next; // next addition to end in DequeueAllWakeable()
1044 if (w->waitp->how == kExclusive) { // wake at most 1 writer in DequeueAllWakeable()
1051 w = pw->next; in DequeueAllWakeable()
1078 if ((w = pw->next) != s) { // search for thread, in TryRemove()
1093 } while ((w = pw->next) != s && pw != h); in TryRemove()
1096 // pw->skip may be non-zero here; the loop above ensured that in TryRemove()
1099 s->next = nullptr; in TryRemove()
1100 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in TryRemove()
1109 h->readers = 0; // we hold writer lock in TryRemove()
1110 h->maybe_unlocking = false; // finished unlocking in TryRemove()
1119 // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1124 while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { in Block()
1125 if (!DecrementSynchSem(this, s, s->waitp->timeout)) { in Block()
1131 this->TryRemove(s); in Block()
1133 while (s->next != nullptr) { in Block()
1135 this->TryRemove(s); in Block()
1140 this->TryRemove(s); in Block()
1142 s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied in Block()
1143 s->waitp->cond = nullptr; // condition no longer relevant for wakeups in Block()
1146 ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors, in Block()
1148 s->waitp = nullptr; in Block()
1153 PerThreadSynch *next = w->next; in Wakeup()
1154 w->next = nullptr; in Wakeup()
1155 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Wakeup()
1168 return deadlock_graph->GetId(mu); in GetGraphIdLocked()
1182 int n = held_locks->n; in LockEnter()
1184 while (i != n && held_locks->locks[i].id != id) { in LockEnter()
1188 if (n == ABSL_ARRAYSIZE(held_locks->locks)) { in LockEnter()
1189 held_locks->overflow = true; // lost some data in LockEnter()
1191 held_locks->locks[i].mu = mu; in LockEnter()
1192 held_locks->locks[i].count = 1; in LockEnter()
1193 held_locks->locks[i].id = id; in LockEnter()
1194 held_locks->n = n + 1; in LockEnter()
1197 held_locks->locks[i].count++; in LockEnter()
1206 int n = held_locks->n; in LockLeave()
1208 while (i != n && held_locks->locks[i].id != id) { in LockLeave()
1212 if (!held_locks->overflow) { in LockLeave()
1216 while (i != n && held_locks->locks[i].mu != mu) { in LockLeave()
1225 mu_events == nullptr ? "" : mu_events->name); in LockLeave()
1228 } else if (held_locks->locks[i].count == 1) { in LockLeave()
1229 held_locks->n = n - 1; in LockLeave()
1230 held_locks->locks[i] = held_locks->locks[n - 1]; in LockLeave()
1231 held_locks->locks[n - 1].id = InvalidGraphId(); in LockLeave()
1232 held_locks->locks[n - 1].mu = in LockLeave()
1235 assert(held_locks->locks[i].count > 0); in LockLeave()
1236 held_locks->locks[i].count--; in LockLeave()
1280 snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n", in StackString()
1284 snprintf(buf + len, maxlen - len, " %p", pcs[i]); in StackString()
1335 if (all_locks->n == 0) { in DeadlockCheck()
1345 // in the acquires-before graph will be represented in the stack traces in DeadlockCheck()
1347 deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack); in DeadlockCheck()
1350 for (int i = 0; i != all_locks->n; i++) { in DeadlockCheck()
1351 const GraphId other_node_id = all_locks->locks[i].id; in DeadlockCheck()
1353 static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id)); in DeadlockCheck()
1355 // Ignore stale lock in DeadlockCheck()
1359 // Add the acquired-before edge to the graph. in DeadlockCheck()
1360 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) { in DeadlockCheck()
1368 CurrentStackString(b->buf, sizeof (b->buf), symbolize)); in DeadlockCheck()
1370 for (int j = 0; j != all_locks->n; j++) { in DeadlockCheck()
1371 void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); in DeadlockCheck() local
1372 if (pr != nullptr) { in DeadlockCheck()
1373 snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); in DeadlockCheck()
1374 len += static_cast<int>(strlen(&b->buf[len])); in DeadlockCheck()
1380 static_cast<void *>(mu), b->buf); in DeadlockCheck()
1382 int path_len = deadlock_graph->FindPath( in DeadlockCheck()
1383 mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path); in DeadlockCheck()
1385 GraphId id = b->path[j]; in DeadlockCheck()
1386 Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id)); in DeadlockCheck()
1389 int depth = deadlock_graph->GetStackTrace(id, &stack); in DeadlockCheck()
1390 snprintf(b->buf, sizeof(b->buf), in DeadlockCheck()
1392 StackString(stack, depth, b->buf + strlen(b->buf), in DeadlockCheck()
1393 static_cast<int>(sizeof(b->buf) - strlen(b->buf)), in DeadlockCheck()
1395 ABSL_RAW_LOG(ERROR, "%s", b->buf); in DeadlockCheck()
1426 deadlock_graph->RemoveNode(this); in ForgetDeadlockInfo()
1441 for (int i = 0; i != locks->n; i++) { in AssertNotHeld()
1442 if (locks->locks[i].id == id) { in AssertNotHeld()
1446 (mu_events == nullptr ? "" : mu_events->name)); in AssertNotHeld()
1457 intptr_t v = mu->load(std::memory_order_relaxed); in TryAcquireWithSpinning()
1459 return false; // a reader or tracing -> give up in TryAcquireWithSpinning()
1460 } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire in TryAcquireWithSpinning()
1461 mu->compare_exchange_strong(v, kMuWriter | v, in TryAcquireWithSpinning()
1466 } while (--c > 0); in TryAcquireWithSpinning()
1480 if (!TryAcquireWithSpinning(&this->mu_)) { in Lock()
1481 this->LockSlow(kExclusive, nullptr, 0); in Lock()
1497 this->LockSlow(kShared, nullptr, 0); in ReaderLock()
1506 this->LockSlow(kExclusive, &cond, 0); in LockWhen()
1528 this->LockSlow(kShared, &cond, 0); in ReaderLockWhen()
1551 this->AssertReaderHeld(); in Await()
1554 ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()), in Await()
1566 this->AssertReaderHeld(); in AwaitWithDeadline()
1572 bool res = this->AwaitCommon(cond, t); in AwaitWithDeadline()
1579 this->AssertReaderHeld(); in AwaitCommon()
1590 this->UnlockSlow(&waitp); in AwaitCommon()
1591 this->Block(waitp.thread); in AwaitCommon()
1594 this->LockSlowLoop(&waitp, flags); in AwaitCommon()
1613 if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire in TryLock()
1615 v, (kExclusive->fast_or | v) + kExclusive->fast_add, in TryLock()
1634 // The while-loops (here and below) iterate only if the mutex word keeps in ReaderTryLock()
1647 loop_limit--; in ReaderTryLock()
1652 while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) { in ReaderTryLock()
1662 loop_limit--; in ReaderTryLock()
1686 // should_try_cas is whether we'll try a compare-and-swap immediately. in Unlock()
1697 // all possible non-zero values for x exceed all possible values for y. in Unlock()
1712 this->UnlockSlow(nullptr /*no waitp*/); // take slow path in Unlock()
1717 // Requires v to represent a reader-locked state.
1736 if (mu_.compare_exchange_strong(v, v - clear, in ReaderUnlock()
1743 this->UnlockSlow(nullptr /*no waitp*/); // take slow path in ReaderUnlock()
1779 this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags), in LockSlow()
1783 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1804 res = cond->Eval(); in EvalConditionAnnotated()
1816 res = cond->Eval(); in EvalConditionAnnotated()
1819 // Prevent unused param warnings in non-TSAN builds. in EvalConditionAnnotated()
1826 // Compute cond->Eval() hiding it from race detectors.
1831 // tsan). As the result there is no tsan-visible synchronization between the
1839 // So we "divert" (which un-ignores both memory accesses and synchronization) in EvalConditionIgnored()
1843 bool res = cond->Eval(); in EvalConditionIgnored()
1846 static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds. in EvalConditionIgnored()
1854 // - kMuHasBlocked indicates that the client has already blocked on the call so
1857 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1863 if ((v & how->fast_need_zero) == 0 && // try fast acquire in LockSlowWithDeadline()
1866 (how->fast_or | in LockSlowWithDeadline()
1868 how->fast_add, in LockSlowWithDeadline()
1883 this->UnlockSlow(&waitp); in LockSlowWithDeadline()
1884 this->Block(waitp.thread); in LockSlowWithDeadline()
1887 this->LockSlowLoop(&waitp, flags); in LockSlowWithDeadline()
1893 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1894 // the printf-style argument list. The format string must be a literal.
1903 static void CheckForMutexCorruption(intptr_t v, const char* label) { in CheckForMutexCorruption() argument
1919 label, reinterpret_cast<void *>(v)); in CheckForMutexCorruption()
1922 label, reinterpret_cast<void *>(v)); in CheckForMutexCorruption()
1932 waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK); in LockSlowLoop()
1935 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
1940 if ((v & waitp->how->slow_need_zero) == 0) { in LockSlowLoop()
1943 (waitp->how->fast_or | in LockSlowLoop()
1945 waitp->how->fast_add, in LockSlowLoop()
1947 if (waitp->cond == nullptr || in LockSlowLoop()
1948 EvalConditionAnnotated(waitp->cond, this, true, false, in LockSlowLoop()
1949 waitp->how == kShared)) { in LockSlowLoop()
1952 this->UnlockSlow(waitp); // got lock but condition false in LockSlowLoop()
1953 this->Block(waitp->thread); in LockSlowLoop()
1966 if (waitp->how == kExclusive && (v & kMuReader) != 0) { in LockSlowLoop()
1975 waitp->thread->waitp = nullptr; in LockSlowLoop()
1977 } else if ((v & waitp->how->slow_inc_need_zero & in LockSlowLoop()
1987 h->readers += kMuOne; // inc reader count in waiter in LockSlowLoop()
1993 if (waitp->cond == nullptr || in LockSlowLoop()
1994 EvalConditionAnnotated(waitp->cond, this, true, false, in LockSlowLoop()
1995 waitp->how == kShared)) { in LockSlowLoop()
1998 this->UnlockSlow(waitp); // got lock but condition false in LockSlowLoop()
1999 this->Block(waitp->thread); in LockSlowLoop()
2013 if (waitp->how == kExclusive && (v & kMuReader) != 0) { in LockSlowLoop()
2025 this->Block(waitp->thread); // wait until removed from list or timeout in LockSlowLoop()
2031 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
2037 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, in LockSlowLoop()
2041 waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING : in LockSlowLoop()
2047 // If waitp is non-zero, it must be the wait parameters for the current thread
2054 this->AssertReaderHeld(); in UnlockSlow()
2073 ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr || in UnlockSlow()
2074 waitp->thread->suppress_fatal_errors, in UnlockSlow()
2078 // waiters if waitp is non-zero. in UnlockSlow()
2092 if (mu_.compare_exchange_strong(v, v - clear, in UnlockSlow()
2109 intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v; in UnlockSlow()
2112 // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then in UnlockSlow()
2116 do_enqueue = (waitp->cv_word == nullptr); in UnlockSlow()
2132 // release spinlock & our lock; retry if reader-count changed in UnlockSlow()
2143 if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) { in UnlockSlow()
2145 h->readers -= kMuOne; // release our lock in UnlockSlow()
2161 ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking, in UnlockSlow()
2166 !old_h->may_skip) { // we used old_h as a terminator in UnlockSlow()
2167 old_h->may_skip = true; // allow old_h to skip once more in UnlockSlow()
2168 ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head"); in UnlockSlow()
2169 if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) { in UnlockSlow()
2170 old_h->skip = old_h->next; // old_h not head & can skip to successor in UnlockSlow()
2173 if (h->next->waitp->how == kExclusive && in UnlockSlow()
2174 Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) { in UnlockSlow()
2177 w = h->next; in UnlockSlow()
2178 w->wake = true; in UnlockSlow()
2187 } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) { in UnlockSlow()
2202 h->readers = 0; in UnlockSlow()
2203 h->maybe_unlocking = false; // finished unlocking in UnlockSlow()
2223 w_walk = old_h->next; in UnlockSlow()
2226 nullptr; // h->next's predecessor may change; don't record it in UnlockSlow()
2227 w_walk = h->next; in UnlockSlow()
2230 h->may_skip = false; // ensure we never skip past h in future searches in UnlockSlow()
2232 ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head"); in UnlockSlow()
2234 h->maybe_unlocking = true; // we're about to scan the waiter list in UnlockSlow()
2244 // Without the spinlock, the locations mu_ and h->next may now change in UnlockSlow()
2254 w_walk->wake = false; in UnlockSlow()
2255 if (w_walk->waitp->cond == in UnlockSlow()
2257 (w_walk->waitp->cond != known_false && in UnlockSlow()
2260 EvalConditionIgnored(this, w_walk->waitp->cond))) { in UnlockSlow()
2262 w_walk->wake = true; // can wake this waiter in UnlockSlow()
2265 if (w_walk->waitp->how == kExclusive) { in UnlockSlow()
2269 } else if (w_walk->waitp->how == kShared) { // wake if a reader in UnlockSlow()
2270 w_walk->wake = true; in UnlockSlow()
2275 known_false = w_walk->waitp->cond; // remember last false condition in UnlockSlow()
2277 if (w_walk->wake) { // we're waking reader w_walk in UnlockSlow()
2282 // If pw_walk == h, then load of pw_walk->next can race with in UnlockSlow()
2287 w_walk = pw_walk->next; in UnlockSlow()
2291 continue; // restart for(;;)-loop to wakeup w or to find more waiters in UnlockSlow()
2293 ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor"); in UnlockSlow()
2300 // This traverses the list in [ pw->next, h ], where h is the head, in UnlockSlow()
2302 // singly-linked list wake_list. Returns the new head. in UnlockSlow()
2319 h->readers = 0; in UnlockSlow()
2320 h->maybe_unlocking = false; // finished unlocking in UnlockSlow()
2327 break; // out of for(;;)-loop in UnlockSlow()
2331 } // end of for(;;)-loop in UnlockSlow()
2339 if (!wake_list->cond_waiter) { in UnlockSlow()
2340 wait_cycles += (now - wake_list->waitp->contention_start_cycles); in UnlockSlow()
2341 wake_list->waitp->contention_start_cycles = now; in UnlockSlow()
2364 this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond); in Trans()
2374 ABSL_RAW_CHECK(w->waitp->cond == nullptr, in Fer()
2376 ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(), in Fer()
2378 ABSL_RAW_CHECK(w->waitp->cv_word == nullptr, in Fer()
2389 kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader); in Fer()
2391 w->next = nullptr; in Fer()
2392 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Fer()
2398 PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond); in Fer()
2409 PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond); in Fer()
2431 (e == nullptr ? "" : e->name)); in AssertHeld()
2440 static_cast<const void *>(this), (e == nullptr ? "" : e->name)); in AssertReaderHeld()
2444 // -------------------------------- condition variables
2457 SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin); in EnableDebugLog()
2458 e->log = true; in EnableDebugLog()
2464 ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin); in ~CondVar()
2483 while (w->next != s && w->next != h) { // search for thread in Remove()
2484 w = w->next; in Remove()
2486 if (w->next == s) { // found thread; remove it in Remove()
2487 w->next = s->next; in Remove()
2491 s->next = nullptr; in Remove()
2492 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Remove()
2506 // Queue thread waitp->thread on condition variable word cv_word using
2510 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2513 // a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
2516 // importantly) after any call to an external routine that might re-enter the
2525 std::atomic<intptr_t> *cv_word = waitp->cv_word; in CondVarEnqueue()
2526 waitp->cv_word = nullptr; in CondVarEnqueue()
2528 intptr_t v = cv_word->load(std::memory_order_relaxed); in CondVarEnqueue()
2531 !cv_word->compare_exchange_weak(v, v | kCvSpin, in CondVarEnqueue()
2535 v = cv_word->load(std::memory_order_relaxed); in CondVarEnqueue()
2537 ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); in CondVarEnqueue()
2538 waitp->thread->waitp = waitp; // prepare ourselves for waiting in CondVarEnqueue()
2541 waitp->thread->next = waitp->thread; in CondVarEnqueue()
2543 waitp->thread->next = h->next; in CondVarEnqueue()
2544 h->next = waitp->thread; in CondVarEnqueue()
2546 waitp->thread->state.store(PerThreadSynch::kQueued, in CondVarEnqueue()
2548 cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread), in CondVarEnqueue()
2553 bool rc = false; // return value; true iff we timed-out in WaitCommon()
2555 intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed); in WaitCommon()
2572 mutex->UnlockSlow(&waitp); in WaitCommon()
2575 while (waitp.thread->state.load(std::memory_order_acquire) == in WaitCommon()
2585 // we can live-lock in this loop since DecrementSynchSem will always in WaitCommon()
2592 // wait list with a single compare-exchange and does not really grab in WaitCommon()
2595 this->Remove(waitp.thread); in WaitCommon()
2600 ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be"); in WaitCommon()
2601 waitp.thread->waitp = nullptr; // cleanup in WaitCommon()
2615 mutex->Trans(mutex_how); // Reacquire mutex in WaitCommon()
2633 // If it was a timed wait, w will be waiting on w->cv
2634 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2637 if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) { in Wakeup()
2638 // The waiting thread only needs to observe "w->state == kAvailable" to be in Wakeup()
2640 Mutex *mu = w->waitp->cvmu; in Wakeup()
2641 w->next = nullptr; in Wakeup()
2642 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); in Wakeup()
2645 w->waitp->cvmu->Fer(w); in Wakeup()
2663 w = h->next; in Signal()
2667 h->next = w->next; in Signal()
2706 PerThreadSynch *n = h->next; in SignalAll()
2709 n = n->next; in SignalAll()
2728 ABSL_RAW_CHECK(this->mu_ != nullptr, in Release()
2730 this->mu_->Unlock(); in Release()
2731 this->mu_ = nullptr; in Release()
2759 return (*c->function_)(c->arg_); in CallVoidPtrFunction()
2771 return (this->eval_ == nullptr) || (*this->eval_)(this); in Eval()
2776 return b == nullptr || b->eval_ == nullptr; in GuaranteedEqual()
2778 if (b == nullptr || b->eval_ == nullptr) { in GuaranteedEqual()
2779 return a->eval_ == nullptr; in GuaranteedEqual()
2781 return a->eval_ == b->eval_ && a->function_ == b->function_ && in GuaranteedEqual()
2782 a->arg_ == b->arg_ && a->method_ == b->method_; in GuaranteedEqual()