• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mutex.h"
18 
19 #include <errno.h>
20 #include <sys/time.h>
21 
22 #include <sstream>
23 
24 #include "android-base/stringprintf.h"
25 
26 #include "base/atomic.h"
27 #include "base/logging.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "base/value_object.h"
31 #include "monitor.h"
32 #include "mutex-inl.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "thread-inl.h"
35 #include "thread.h"
36 #include "thread_list.h"
37 
38 namespace art {
39 
40 using android::base::StringPrintf;
41 
42 static constexpr uint64_t kIntervalMillis = 50;
43 static constexpr int kMonitorTimeoutTryMax = 5;
44 
45 static const char* kLastDumpStackTime = "LastDumpStackTime";
46 
47 struct AllMutexData {
48   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
49   Atomic<const BaseMutex*> all_mutexes_guard;
50   // All created mutexes guarded by all_mutexes_guard_.
51   std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData52   AllMutexData() : all_mutexes(nullptr) {}
53 };
54 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
55 
56 struct DumpStackLastTimeTLSData : public art::TLSData {
DumpStackLastTimeTLSDataart::DumpStackLastTimeTLSData57   explicit DumpStackLastTimeTLSData(uint64_t last_dump_time_ms)
58       : last_dump_time_ms_(last_dump_time_ms) {}
59   std::atomic<uint64_t> last_dump_time_ms_;
60 };
61 
62 #if ART_USE_FUTEXES
63 // Compute a relative timespec as *result_ts = lhs - rhs.
64 // Return false (and produce an invalid *result_ts) if lhs < rhs.
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)65 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
66   const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
67   static_assert(std::is_signed<decltype(result_ts->tv_sec)>::value);  // Signed on Linux.
68   result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
69   result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
70   if (result_ts->tv_nsec < 0) {
71     result_ts->tv_sec--;
72     result_ts->tv_nsec += one_sec;
73   }
74   DCHECK(result_ts->tv_nsec >= 0 && result_ts->tv_nsec < one_sec);
75   return result_ts->tv_sec >= 0;
76 }
77 #endif
78 
79 #if ART_USE_FUTEXES
80 // If we wake up from a futex wake, and the runtime disappeared while we were asleep,
81 // it's important to stop in our tracks before we touch deallocated memory.
SleepIfRuntimeDeleted(Thread * self)82 static inline void SleepIfRuntimeDeleted(Thread* self) {
83   if (self != nullptr) {
84     JNIEnvExt* const env = self->GetJniEnv();
85     if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
86       DCHECK(self->IsDaemon());
87       // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
88       // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
89       // --host and --gdb.
90       // After we wake up, the runtime may have been shutdown, which means that this condition may
91       // have been deleted. It is not safe to retry the wait.
92       SleepForever();
93     }
94   }
95 }
96 #else
97 // We should be doing this for pthreads to, but it seems to be impossible for something
98 // like a condition variable wait. Thus we don't bother trying.
99 #endif
100 
101 // Wait for an amount of time that roughly increases in the argument i.
102 // Spin for small arguments and yield/sleep for longer ones.
BackOff(uint32_t i)103 static void BackOff(uint32_t i) {
104   static constexpr uint32_t kSpinMax = 10;
105   static constexpr uint32_t kYieldMax = 20;
106   if (i <= kSpinMax) {
107     // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
108     // test-and-test-and-set loop in the caller.  Possibly skip entirely on a uniprocessor.
109     volatile uint32_t x = 0;
110     const uint32_t spin_count = 10 * i;
111     for (uint32_t spin = 0; spin < spin_count; ++spin) {
112       ++x;  // Volatile; hence should not be optimized away.
113     }
114     // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
115   } else if (i <= kYieldMax) {
116     sched_yield();
117   } else {
118     NanoSleep(1000ull * (i - kYieldMax));
119   }
120 }
121 
122 // Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
123 // short time interval, on the order of kernel context-switch time, passes.
124 // Return true if the predicate test succeeded, false if we timed out.
125 template<typename Pred>
WaitBrieflyFor(AtomicInteger * testLoc,Thread * self,Pred pred)126 static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
127   // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
128   // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
129   // maximum delay should be significantly less than the expected futex() context switch time, so
130   // there should be little danger of this worsening things appreciably. If the lock was only
131   // held briefly by a running thread, this should help immensely.
132   static constexpr uint32_t kMaxBackOff = 3;  // Should probably be <= kSpinMax above.
133   static constexpr uint32_t kMaxIters = 50;
134   JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
135   for (uint32_t i = 1; i <= kMaxIters; ++i) {
136     BackOff(std::min(i, kMaxBackOff));
137     if (pred(testLoc->load(std::memory_order_relaxed))) {
138       return true;
139     }
140     if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
141       // This returns true once we've started shutting down. We then try to reach a quiescent
142       // state as soon as possible to avoid touching data that may be deallocated by the shutdown
143       // process. It currently relies on a timeout.
144       return false;
145     }
146   }
147   return false;
148 }
149 
150 class ScopedAllMutexesLock final {
151  public:
ScopedAllMutexesLock(const BaseMutex * mutex)152   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
153     for (uint32_t i = 0;
154          !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
155          ++i) {
156       BackOff(i);
157     }
158   }
159 
~ScopedAllMutexesLock()160   ~ScopedAllMutexesLock() {
161     DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
162     gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
163   }
164 
165  private:
166   const BaseMutex* const mutex_;
167 };
168 
169 // Scoped class that generates events at the beginning and end of lock contention.
170 class ScopedContentionRecorder final : public ValueObject {
171  public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)172   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
173       : mutex_(kLogLockContentions ? mutex : nullptr),
174         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
175         owner_tid_(kLogLockContentions ? owner_tid : 0),
176         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
177     if (ATraceEnabled()) {
178       std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
179                                      mutex->GetName(), owner_tid);
180       ATraceBegin(msg.c_str());
181     }
182   }
183 
~ScopedContentionRecorder()184   ~ScopedContentionRecorder() {
185     ATraceEnd();
186     if (kLogLockContentions) {
187       uint64_t end_nano_time = NanoTime();
188       mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
189     }
190   }
191 
192  private:
193   BaseMutex* const mutex_;
194   const uint64_t blocked_tid_;
195   const uint64_t owner_tid_;
196   const uint64_t start_nano_time_;
197 };
198 
BaseMutex(const char * name,LockLevel level)199 BaseMutex::BaseMutex(const char* name, LockLevel level)
200     : name_(name),
201       level_(level),
202       should_respond_to_empty_checkpoint_request_(false) {
203   if (kLogLockContentions) {
204     ScopedAllMutexesLock mu(this);
205     std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
206     if (*all_mutexes_ptr == nullptr) {
207       // We leak the global set of all mutexes to avoid ordering issues in global variable
208       // construction/destruction.
209       *all_mutexes_ptr = new std::set<BaseMutex*>();
210     }
211     (*all_mutexes_ptr)->insert(this);
212   }
213 }
214 
~BaseMutex()215 BaseMutex::~BaseMutex() {
216   if (kLogLockContentions) {
217     ScopedAllMutexesLock mu(this);
218     gAllMutexData->all_mutexes->erase(this);
219   }
220 }
221 
DumpAll(std::ostream & os)222 void BaseMutex::DumpAll(std::ostream& os) {
223   if (kLogLockContentions) {
224     os << "Mutex logging:\n";
225     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
226     std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
227     if (all_mutexes == nullptr) {
228       // No mutexes have been created yet during at startup.
229       return;
230     }
231     os << "(Contended)\n";
232     for (const BaseMutex* mutex : *all_mutexes) {
233       if (mutex->HasEverContended()) {
234         mutex->Dump(os);
235         os << "\n";
236       }
237     }
238     os << "(Never contented)\n";
239     for (const BaseMutex* mutex : *all_mutexes) {
240       if (!mutex->HasEverContended()) {
241         mutex->Dump(os);
242         os << "\n";
243       }
244     }
245   }
246 }
247 
CheckSafeToWait(Thread * self)248 void BaseMutex::CheckSafeToWait(Thread* self) {
249   if (self == nullptr) {
250     CheckUnattachedThread(level_);
251     return;
252   }
253   if (kDebugLocking) {
254     CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
255         << "Waiting on unacquired mutex: " << name_;
256     bool bad_mutexes_held = false;
257     std::string error_msg;
258     for (int i = kLockLevelCount - 1; i >= 0; --i) {
259       if (i != level_) {
260         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
261         // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This
262         // just means that gc or some other internal process is suspending the thread while it is
263         // trying to suspend some other thread. So long as the current thread is not being suspended
264         // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear)
265         // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted
266         // code interacts with suspension. One holds the lock to prevent user-code-suspension from
267         // occurring. Since this is only initiated from user-supplied native-code this is safe.
268         if (held_mutex == Locks::user_code_suspension_lock_) {
269           // No thread safety analysis is fine since we have both the user_code_suspension_lock_
270           // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
271           // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
272           auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
273             return self->GetUserCodeSuspendCount() != 0;
274           };
275           if (is_suspending_for_user_code()) {
276             std::ostringstream oss;
277             oss << "Holding \"" << held_mutex->name_ << "\" "
278                 << "(level " << LockLevel(i) << ") while performing wait on "
279                 << "\"" << name_ << "\" (level " << level_ << ") "
280                 << "with SuspendReason::kForUserCode pending suspensions";
281             error_msg = oss.str();
282             LOG(ERROR) << error_msg;
283             bad_mutexes_held = true;
284           }
285         } else if (held_mutex != nullptr) {
286           std::ostringstream oss;
287           oss << "Holding \"" << held_mutex->name_ << "\" "
288               << "(level " << LockLevel(i) << ") while performing wait on "
289               << "\"" << name_ << "\" (level " << level_ << ")";
290           error_msg = oss.str();
291           LOG(ERROR) << error_msg;
292           bad_mutexes_held = true;
293         }
294       }
295     }
296     if (gAborting == 0) {  // Avoid recursive aborts.
297       CHECK(!bad_mutexes_held) << error_msg;
298     }
299   }
300 }
301 
AddToWaitTime(uint64_t value)302 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
303   if (kLogLockContentions) {
304     // Atomically add value to wait_time.
305     wait_time.fetch_add(value, std::memory_order_seq_cst);
306   }
307 }
308 
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)309 void BaseMutex::RecordContention(uint64_t blocked_tid,
310                                  uint64_t owner_tid,
311                                  uint64_t nano_time_blocked) {
312   if (kLogLockContentions) {
313     ContentionLogData* data = contention_log_data_;
314     ++(data->contention_count);
315     data->AddToWaitTime(nano_time_blocked);
316     ContentionLogEntry* log = data->contention_log;
317     // This code is intentionally racy as it is only used for diagnostics.
318     int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
319     if (log[slot].blocked_tid == blocked_tid &&
320         log[slot].owner_tid == blocked_tid) {
321       ++log[slot].count;
322     } else {
323       uint32_t new_slot;
324       do {
325         slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
326         new_slot = (slot + 1) % kContentionLogSize;
327       } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
328       log[new_slot].blocked_tid = blocked_tid;
329       log[new_slot].owner_tid = owner_tid;
330       log[new_slot].count.store(1, std::memory_order_relaxed);
331     }
332   }
333 }
334 
DumpContention(std::ostream & os) const335 void BaseMutex::DumpContention(std::ostream& os) const {
336   if (kLogLockContentions) {
337     const ContentionLogData* data = contention_log_data_;
338     const ContentionLogEntry* log = data->contention_log;
339     uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
340     uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
341     if (contention_count == 0) {
342       os << "never contended";
343     } else {
344       os << "contended " << contention_count
345          << " total wait of contender " << PrettyDuration(wait_time)
346          << " average " << PrettyDuration(wait_time / contention_count);
347       SafeMap<uint64_t, size_t> most_common_blocker;
348       SafeMap<uint64_t, size_t> most_common_blocked;
349       for (size_t i = 0; i < kContentionLogSize; ++i) {
350         uint64_t blocked_tid = log[i].blocked_tid;
351         uint64_t owner_tid = log[i].owner_tid;
352         uint32_t count = log[i].count.load(std::memory_order_relaxed);
353         if (count > 0) {
354           auto it = most_common_blocked.find(blocked_tid);
355           if (it != most_common_blocked.end()) {
356             most_common_blocked.Overwrite(blocked_tid, it->second + count);
357           } else {
358             most_common_blocked.Put(blocked_tid, count);
359           }
360           it = most_common_blocker.find(owner_tid);
361           if (it != most_common_blocker.end()) {
362             most_common_blocker.Overwrite(owner_tid, it->second + count);
363           } else {
364             most_common_blocker.Put(owner_tid, count);
365           }
366         }
367       }
368       uint64_t max_tid = 0;
369       size_t max_tid_count = 0;
370       for (const auto& pair : most_common_blocked) {
371         if (pair.second > max_tid_count) {
372           max_tid = pair.first;
373           max_tid_count = pair.second;
374         }
375       }
376       if (max_tid != 0) {
377         os << " sample shows most blocked tid=" << max_tid;
378       }
379       max_tid = 0;
380       max_tid_count = 0;
381       for (const auto& pair : most_common_blocker) {
382         if (pair.second > max_tid_count) {
383           max_tid = pair.first;
384           max_tid_count = pair.second;
385         }
386       }
387       if (max_tid != 0) {
388         os << " sample shows tid=" << max_tid << " owning during this time";
389       }
390     }
391   }
392 }
393 
394 
Mutex(const char * name,LockLevel level,bool recursive)395 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
396     : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) {
397 #if ART_USE_FUTEXES
398   DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed));
399 #else
400   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
401 #endif
402 }
403 
404 // Helper to allow checking shutdown while locking for thread safety.
IsSafeToCallAbortSafe()405 static bool IsSafeToCallAbortSafe() {
406   MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
407   return Locks::IsSafeToCallAbortRacy();
408 }
409 
~Mutex()410 Mutex::~Mutex() {
411   bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
412 #if ART_USE_FUTEXES
413   if (state_and_contenders_.load(std::memory_order_relaxed) != 0) {
414     LOG(safe_to_call_abort ? FATAL : WARNING)
415         << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid();
416   } else {
417     if (GetExclusiveOwnerTid() != 0) {
418       LOG(safe_to_call_abort ? FATAL : WARNING)
419           << "unexpectedly found an owner on unlocked mutex " << name_;
420     }
421   }
422 #else
423   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
424   // may still be using locks.
425   int rc = pthread_mutex_destroy(&mutex_);
426   if (rc != 0) {
427     errno = rc;
428     PLOG(safe_to_call_abort ? FATAL : WARNING)
429         << "pthread_mutex_destroy failed for " << name_;
430   }
431 #endif
432 }
433 
ExclusiveLock(Thread * self)434 void Mutex::ExclusiveLock(Thread* self) {
435   DCHECK(self == nullptr || self == Thread::Current());
436   if (kDebugLocking && !recursive_) {
437     AssertNotHeld(self);
438   }
439   if (!recursive_ || !IsExclusiveHeld(self)) {
440 #if ART_USE_FUTEXES
441     bool done = false;
442     do {
443       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
444       if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) {
445         done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
446       } else {
447         // Failed to acquire, hang up.
448         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
449         // Empirically, it appears important to spin again each time through the loop; if we
450         // bother to go to sleep and wake up, we should be fairly persistent in trying for the
451         // lock.
452         if (!WaitBrieflyFor(&state_and_contenders_, self,
453                             [](int32_t v) { return (v & kHeldMask) == 0; })) {
454           // Increment contender count. We can't create enough threads for this to overflow.
455           increment_contenders();
456           // Make cur_state again reflect the expected value of state_and_contenders.
457           cur_state += kContenderIncrement;
458           if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
459             self->CheckEmptyCheckpointFromMutex();
460           }
461 
462           uint64_t wait_start_ms = enable_monitor_timeout_ ? MilliTime() : 0;
463           uint64_t try_times = 0;
464           do {
465             timespec timeout_ts;
466             timeout_ts.tv_sec = 0;
467             // NB: Some tests use the mutex without the runtime.
468             timeout_ts.tv_nsec = Runtime::Current() != nullptr
469                 ? Runtime::Current()->GetMonitorTimeoutNs()
470                 : Monitor::kDefaultMonitorTimeoutMs;
471             if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
472                       enable_monitor_timeout_ ? &timeout_ts : nullptr , nullptr, 0) != 0) {
473               // We only went to sleep after incrementing and contenders and checking that the
474               // lock is still held by someone else.  EAGAIN and EINTR both indicate a spurious
475               // failure, try again from the beginning.  We don't use TEMP_FAILURE_RETRY so we can
476               // intentionally retry to acquire the lock.
477               if ((errno != EAGAIN) && (errno != EINTR)) {
478                 if (errno == ETIMEDOUT) {
479                   try_times++;
480                   if (try_times <= kMonitorTimeoutTryMax) {
481                     DumpStack(self, wait_start_ms, try_times);
482                   }
483                 } else {
484                   PLOG(FATAL) << "futex wait failed for " << name_;
485                 }
486               }
487             }
488             SleepIfRuntimeDeleted(self);
489             // Retry until not held. In heavy contention situations we otherwise get redundant
490             // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
491             cur_state = state_and_contenders_.load(std::memory_order_relaxed);
492           } while ((cur_state & kHeldMask) != 0);
493           decrement_contenders();
494         }
495       }
496     } while (!done);
497     // Confirm that lock is now held.
498     DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
499 #else
500     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
501 #endif
502     DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
503                                          << " recursive_ = " << recursive_;
504     exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
505     RegisterAsLocked(self);
506   }
507   recursion_count_++;
508   if (kDebugLocking) {
509     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
510         << name_ << " " << recursion_count_;
511     AssertHeld(self);
512   }
513 }
514 
DumpStack(Thread * self,uint64_t wait_start_ms,uint64_t try_times)515 void Mutex::DumpStack(Thread* self, uint64_t wait_start_ms, uint64_t try_times) {
516   ScopedObjectAccess soa(self);
517   Locks::thread_list_lock_->ExclusiveLock(self);
518   std::string owner_stack_dump;
519   pid_t owner_tid = GetExclusiveOwnerTid();
520   CHECK(Runtime::Current() != nullptr);
521   Thread *owner = Runtime::Current()->GetThreadList()->FindThreadByTid(owner_tid);
522   if (owner != nullptr) {
523     if (IsDumpFrequent(owner, try_times)) {
524       Locks::thread_list_lock_->ExclusiveUnlock(self);
525       LOG(WARNING) << "Contention with tid " << owner_tid << ", monitor id " << monitor_id_;
526       return;
527     }
528     struct CollectStackTrace : public Closure {
529       void Run(art::Thread* thread) override
530         REQUIRES_SHARED(art::Locks::mutator_lock_) {
531         if (IsDumpFrequent(thread)) {
532           return;
533         }
534         DumpStackLastTimeTLSData* tls_data =
535             reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
536         if (tls_data == nullptr) {
537           thread->SetCustomTLS(kLastDumpStackTime, new DumpStackLastTimeTLSData(MilliTime()));
538         } else {
539           tls_data->last_dump_time_ms_.store(MilliTime());
540         }
541         thread->DumpJavaStack(oss);
542       }
543       std::ostringstream oss;
544     };
545     CollectStackTrace owner_trace;
546     owner->RequestSynchronousCheckpoint(&owner_trace);
547     owner_stack_dump = owner_trace.oss.str();
548     uint64_t wait_ms = MilliTime() - wait_start_ms;
549     LOG(WARNING) << "Monitor contention with tid " << owner_tid << ", wait time: " << wait_ms
550                  << "ms, monitor id: " << monitor_id_
551                  << "\nPerfMonitor owner thread(" << owner_tid << ") stack is:\n"
552                  << owner_stack_dump;
553   } else {
554     Locks::thread_list_lock_->ExclusiveUnlock(self);
555   }
556 }
557 
IsDumpFrequent(Thread * thread,uint64_t try_times)558 bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
559   uint64_t last_dump_time_ms = 0;
560   DumpStackLastTimeTLSData* tls_data =
561       reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
562   if (tls_data != nullptr) {
563      last_dump_time_ms = tls_data->last_dump_time_ms_.load();
564   }
565   uint64_t interval = MilliTime() - last_dump_time_ms;
566   if (interval < kIntervalMillis * try_times) {
567     return true;
568   } else {
569     return false;
570   }
571 }
572 
ExclusiveTryLock(Thread * self)573 bool Mutex::ExclusiveTryLock(Thread* self) {
574   DCHECK(self == nullptr || self == Thread::Current());
575   if (kDebugLocking && !recursive_) {
576     AssertNotHeld(self);
577   }
578   if (!recursive_ || !IsExclusiveHeld(self)) {
579 #if ART_USE_FUTEXES
580     bool done = false;
581     do {
582       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
583       if ((cur_state & kHeldMask) == 0) {
584         // Change state to held and impose load/store ordering appropriate for lock acquisition.
585         done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
586       } else {
587         return false;
588       }
589     } while (!done);
590     DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
591 #else
592     int result = pthread_mutex_trylock(&mutex_);
593     if (result == EBUSY) {
594       return false;
595     }
596     if (result != 0) {
597       errno = result;
598       PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
599     }
600 #endif
601     DCHECK_EQ(GetExclusiveOwnerTid(), 0);
602     exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
603     RegisterAsLocked(self);
604   }
605   recursion_count_++;
606   if (kDebugLocking) {
607     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
608         << name_ << " " << recursion_count_;
609     AssertHeld(self);
610   }
611   return true;
612 }
613 
ExclusiveTryLockWithSpinning(Thread * self)614 bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
615   // Spin a small number of times, since this affects our ability to respond to suspension
616   // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
617   // in rapid succession, and then we will typically not spin for the maximal period.
618   const int kMaxSpins = 5;
619   for (int i = 0; i < kMaxSpins; ++i) {
620     if (ExclusiveTryLock(self)) {
621       return true;
622     }
623 #if ART_USE_FUTEXES
624     if (!WaitBrieflyFor(&state_and_contenders_, self,
625             [](int32_t v) { return (v & kHeldMask) == 0; })) {
626       return false;
627     }
628 #endif
629   }
630   return ExclusiveTryLock(self);
631 }
632 
633 #if ART_USE_FUTEXES
ExclusiveLockUncontendedFor(Thread * new_owner)634 void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
635   DCHECK_EQ(level_, kMonitorLock);
636   DCHECK(!recursive_);
637   state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
638   recursion_count_ = 1;
639   exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
640   // Don't call RegisterAsLocked(). It wouldn't register anything anyway.  And
641   // this happens as we're inflating a monitor, which doesn't logically affect
642   // held "locks"; it effectively just converts a thin lock to a mutex.  By doing
643   // this while the lock is already held, we're delaying the acquisition of a
644   // logically held mutex, which can introduce bogus lock order violations.
645 }
646 
ExclusiveUnlockUncontended()647 void Mutex::ExclusiveUnlockUncontended() {
648   DCHECK_EQ(level_, kMonitorLock);
649   state_and_contenders_.store(0, std::memory_order_relaxed);
650   recursion_count_ = 0;
651   exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
652   // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
653 }
654 #endif  // ART_USE_FUTEXES
655 
ExclusiveUnlock(Thread * self)656 void Mutex::ExclusiveUnlock(Thread* self) {
657   if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
658     std::string name1 = "<null>";
659     std::string name2 = "<null>";
660     if (self != nullptr) {
661       self->GetThreadName(name1);
662     }
663     if (Thread::Current() != nullptr) {
664       Thread::Current()->GetThreadName(name2);
665     }
666     LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
667                << " Thread::Current()=" << name2;
668   }
669   AssertHeld(self);
670   DCHECK_NE(GetExclusiveOwnerTid(), 0);
671   recursion_count_--;
672   if (!recursive_ || recursion_count_ == 0) {
673     if (kDebugLocking) {
674       CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
675           << name_ << " " << recursion_count_;
676     }
677     RegisterAsUnlocked(self);
678 #if ART_USE_FUTEXES
679     bool done = false;
680     do {
681       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
682       if (LIKELY((cur_state & kHeldMask) != 0)) {
683         // We're no longer the owner.
684         exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
685         // Change state to not held and impose load/store ordering appropriate for lock release.
686         uint32_t new_state = cur_state & ~kHeldMask;  // Same number of contenders.
687         done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state);
688         if (LIKELY(done)) {  // Spurious fail or waiters changed ?
689           if (UNLIKELY(new_state != 0) /* have contenders */) {
690             futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne,
691                   nullptr, nullptr, 0);
692           }
693           // We only do a futex wait after incrementing contenders and verifying the lock was
694           // still held. If we didn't see waiters, then there couldn't have been any futexes
695           // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us,
696           // since the futex wait call would see the lock available and immediately return.
697         }
698       } else {
699         // Logging acquires the logging lock, avoid infinite recursion in that case.
700         if (this != Locks::logging_lock_) {
701           LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
702         } else {
703           LogHelper::LogLineLowStack(__FILE__,
704                                      __LINE__,
705                                      ::android::base::FATAL_WITHOUT_ABORT,
706                                      StringPrintf("Unexpected state_ %d in unlock for %s",
707                                                   cur_state, name_).c_str());
708           _exit(1);
709         }
710       }
711     } while (!done);
712 #else
713     exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
714     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
715 #endif
716   }
717 }
718 
Dump(std::ostream & os) const719 void Mutex::Dump(std::ostream& os) const {
720   os << (recursive_ ? "recursive " : "non-recursive ")
721       << name_
722       << " level=" << static_cast<int>(level_)
723       << " rec=" << recursion_count_
724       << " owner=" << GetExclusiveOwnerTid() << " ";
725   DumpContention(os);
726 }
727 
operator <<(std::ostream & os,const Mutex & mu)728 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
729   mu.Dump(os);
730   return os;
731 }
732 
WakeupToRespondToEmptyCheckpoint()733 void Mutex::WakeupToRespondToEmptyCheckpoint() {
734 #if ART_USE_FUTEXES
735   // Wake up all the waiters so they will respond to the emtpy checkpoint.
736   DCHECK(should_respond_to_empty_checkpoint_request_);
737   if (UNLIKELY(get_contenders() != 0)) {
738     futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
739   }
740 #else
741   LOG(FATAL) << "Non futex case isn't supported.";
742 #endif
743 }
744 
ReaderWriterMutex(const char * name,LockLevel level)745 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
746     : BaseMutex(name, level)
747 #if ART_USE_FUTEXES
748     , state_(0), exclusive_owner_(0), num_contenders_(0)
749 #endif
750 {
751 #if !ART_USE_FUTEXES
752   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
753 #endif
754 }
755 
~ReaderWriterMutex()756 ReaderWriterMutex::~ReaderWriterMutex() {
757 #if ART_USE_FUTEXES
758   CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
759   CHECK_EQ(GetExclusiveOwnerTid(), 0);
760   CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
761 #else
762   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
763   // may still be using locks.
764   int rc = pthread_rwlock_destroy(&rwlock_);
765   if (rc != 0) {
766     errno = rc;
767     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
768     PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_;
769   }
770 #endif
771 }
772 
ExclusiveLock(Thread * self)773 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
774   DCHECK(self == nullptr || self == Thread::Current());
775   AssertNotExclusiveHeld(self);
776 #if ART_USE_FUTEXES
777   bool done = false;
778   do {
779     int32_t cur_state = state_.load(std::memory_order_relaxed);
780     if (LIKELY(cur_state == 0)) {
781       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
782       done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
783     } else {
784       // Failed to acquire, hang up.
785       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
786       if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
787         num_contenders_.fetch_add(1);
788         if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
789           self->CheckEmptyCheckpointFromMutex();
790         }
791         if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
792           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
793           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
794           if ((errno != EAGAIN) && (errno != EINTR)) {
795             PLOG(FATAL) << "futex wait failed for " << name_;
796           }
797         }
798         SleepIfRuntimeDeleted(self);
799         num_contenders_.fetch_sub(1);
800       }
801     }
802   } while (!done);
803   DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
804 #else
805   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
806 #endif
807   DCHECK_EQ(GetExclusiveOwnerTid(), 0);
808   exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
809   RegisterAsLocked(self);
810   AssertExclusiveHeld(self);
811 }
812 
ExclusiveUnlock(Thread * self)813 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
814   DCHECK(self == nullptr || self == Thread::Current());
815   AssertExclusiveHeld(self);
816   RegisterAsUnlocked(self);
817   DCHECK_NE(GetExclusiveOwnerTid(), 0);
818 #if ART_USE_FUTEXES
819   bool done = false;
820   do {
821     int32_t cur_state = state_.load(std::memory_order_relaxed);
822     if (LIKELY(cur_state == -1)) {
823       // We're no longer the owner.
824       exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
825       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
826       // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
827       done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
828       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
829         // Wake any waiters.
830         if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
831           futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
832         }
833       }
834     } else {
835       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
836     }
837   } while (!done);
838 #else
839   exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
840   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
841 #endif
842 }
843 
844 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)845 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
846   DCHECK(self == nullptr || self == Thread::Current());
847 #if ART_USE_FUTEXES
848   bool done = false;
849   timespec end_abs_ts;
850   InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
851   do {
852     int32_t cur_state = state_.load(std::memory_order_relaxed);
853     if (cur_state == 0) {
854       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
855       done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
856     } else {
857       // Failed to acquire, hang up.
858       timespec now_abs_ts;
859       InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
860       timespec rel_ts;
861       if (!ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
862         return false;  // Timed out.
863       }
864       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
865       if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
866         num_contenders_.fetch_add(1);
867         if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
868           self->CheckEmptyCheckpointFromMutex();
869         }
870         if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
871           if (errno == ETIMEDOUT) {
872             num_contenders_.fetch_sub(1);
873             return false;  // Timed out.
874           } else if ((errno != EAGAIN) && (errno != EINTR)) {
875             // EAGAIN and EINTR both indicate a spurious failure,
876             // recompute the relative time out from now and try again.
877             // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
878             num_contenders_.fetch_sub(1);  // Unlikely to matter.
879             PLOG(FATAL) << "timed futex wait failed for " << name_;
880           }
881         }
882         SleepIfRuntimeDeleted(self);
883         num_contenders_.fetch_sub(1);
884       }
885     }
886   } while (!done);
887 #else
888   timespec ts;
889   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
890   int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
891   if (result == ETIMEDOUT) {
892     return false;
893   }
894   if (result != 0) {
895     errno = result;
896     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
897   }
898 #endif
899   exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
900   RegisterAsLocked(self);
901   AssertSharedHeld(self);
902   return true;
903 }
904 #endif
905 
906 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)907 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
908   // Owner holds it exclusively, hang up.
909   ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
910   if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
911     num_contenders_.fetch_add(1);
912     if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
913       self->CheckEmptyCheckpointFromMutex();
914     }
915     if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
916       if (errno != EAGAIN && errno != EINTR) {
917         PLOG(FATAL) << "futex wait failed for " << name_;
918       }
919     }
920     SleepIfRuntimeDeleted(self);
921     num_contenders_.fetch_sub(1);
922   }
923 }
924 #endif
925 
SharedTryLock(Thread * self)926 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
927   DCHECK(self == nullptr || self == Thread::Current());
928 #if ART_USE_FUTEXES
929   bool done = false;
930   do {
931     int32_t cur_state = state_.load(std::memory_order_relaxed);
932     if (cur_state >= 0) {
933       // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
934       done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
935     } else {
936       // Owner holds it exclusively.
937       return false;
938     }
939   } while (!done);
940 #else
941   int result = pthread_rwlock_tryrdlock(&rwlock_);
942   if (result == EBUSY) {
943     return false;
944   }
945   if (result != 0) {
946     errno = result;
947     PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
948   }
949 #endif
950   RegisterAsLocked(self);
951   AssertSharedHeld(self);
952   return true;
953 }
954 
IsSharedHeld(const Thread * self) const955 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
956   DCHECK(self == nullptr || self == Thread::Current());
957   bool result;
958   if (UNLIKELY(self == nullptr)) {  // Handle unattached threads.
959     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
960   } else {
961     result = (self->GetHeldMutex(level_) == this);
962   }
963   return result;
964 }
965 
Dump(std::ostream & os) const966 void ReaderWriterMutex::Dump(std::ostream& os) const {
967   os << name_
968       << " level=" << static_cast<int>(level_)
969       << " owner=" << GetExclusiveOwnerTid()
970 #if ART_USE_FUTEXES
971       << " state=" << state_.load(std::memory_order_seq_cst)
972       << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
973 #endif
974       << " ";
975   DumpContention(os);
976 }
977 
operator <<(std::ostream & os,const ReaderWriterMutex & mu)978 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
979   mu.Dump(os);
980   return os;
981 }
982 
operator <<(std::ostream & os,const MutatorMutex & mu)983 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
984   mu.Dump(os);
985   return os;
986 }
987 
WakeupToRespondToEmptyCheckpoint()988 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
989 #if ART_USE_FUTEXES
990   // Wake up all the waiters so they will respond to the emtpy checkpoint.
991   DCHECK(should_respond_to_empty_checkpoint_request_);
992   if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
993     futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
994   }
995 #else
996   LOG(FATAL) << "Non futex case isn't supported.";
997 #endif
998 }
999 
ConditionVariable(const char * name,Mutex & guard)1000 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
1001     : name_(name), guard_(guard) {
1002 #if ART_USE_FUTEXES
1003   DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
1004   num_waiters_ = 0;
1005 #else
1006   pthread_condattr_t cond_attrs;
1007   CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
1008 #if !defined(__APPLE__)
1009   // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
1010   CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
1011 #endif
1012   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
1013 #endif
1014 }
1015 
~ConditionVariable()1016 ConditionVariable::~ConditionVariable() {
1017 #if ART_USE_FUTEXES
1018   if (num_waiters_!= 0) {
1019     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1020     LOG(is_safe_to_call_abort ? FATAL : WARNING)
1021         << "ConditionVariable::~ConditionVariable for " << name_
1022         << " called with " << num_waiters_ << " waiters.";
1023   }
1024 #else
1025   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
1026   // may still be using condition variables.
1027   int rc = pthread_cond_destroy(&cond_);
1028   if (rc != 0) {
1029     errno = rc;
1030     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1031     PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_;
1032   }
1033 #endif
1034 }
1035 
Broadcast(Thread * self)1036 void ConditionVariable::Broadcast(Thread* self) {
1037   DCHECK(self == nullptr || self == Thread::Current());
1038   // TODO: enable below, there's a race in thread creation that causes false failures currently.
1039   // guard_.AssertExclusiveHeld(self);
1040   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
1041 #if ART_USE_FUTEXES
1042   RequeueWaiters(std::numeric_limits<int32_t>::max());
1043 #else
1044   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
1045 #endif
1046 }
1047 
1048 #if ART_USE_FUTEXES
RequeueWaiters(int32_t count)1049 void ConditionVariable::RequeueWaiters(int32_t count) {
1050   if (num_waiters_ > 0) {
1051     sequence_++;  // Indicate a signal occurred.
1052     // Move waiters from the condition variable's futex to the guard's futex,
1053     // so that they will be woken up when the mutex is released.
1054     bool done = futex(sequence_.Address(),
1055                       FUTEX_REQUEUE_PRIVATE,
1056                       /* Threads to wake */ 0,
1057                       /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
1058                       guard_.state_and_contenders_.Address(),
1059                       0) != -1;
1060     if (!done && errno != EAGAIN && errno != EINTR) {
1061       PLOG(FATAL) << "futex requeue failed for " << name_;
1062     }
1063   }
1064 }
1065 #endif
1066 
1067 
Signal(Thread * self)1068 void ConditionVariable::Signal(Thread* self) {
1069   DCHECK(self == nullptr || self == Thread::Current());
1070   guard_.AssertExclusiveHeld(self);
1071 #if ART_USE_FUTEXES
1072   RequeueWaiters(1);
1073 #else
1074   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
1075 #endif
1076 }
1077 
Wait(Thread * self)1078 void ConditionVariable::Wait(Thread* self) {
1079   guard_.CheckSafeToWait(self);
1080   WaitHoldingLocks(self);
1081 }
1082 
WaitHoldingLocks(Thread * self)1083 void ConditionVariable::WaitHoldingLocks(Thread* self) {
1084   DCHECK(self == nullptr || self == Thread::Current());
1085   guard_.AssertExclusiveHeld(self);
1086   unsigned int old_recursion_count = guard_.recursion_count_;
1087 #if ART_USE_FUTEXES
1088   num_waiters_++;
1089   // Ensure the Mutex is contended so that requeued threads are awoken.
1090   guard_.increment_contenders();
1091   guard_.recursion_count_ = 1;
1092   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1093   guard_.ExclusiveUnlock(self);
1094   if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
1095     // Futex failed, check it is an expected error.
1096     // EAGAIN == EWOULDBLK, so we let the caller try again.
1097     // EINTR implies a signal was sent to this thread.
1098     if ((errno != EINTR) && (errno != EAGAIN)) {
1099       PLOG(FATAL) << "futex wait failed for " << name_;
1100     }
1101   }
1102   SleepIfRuntimeDeleted(self);
1103   guard_.ExclusiveLock(self);
1104   CHECK_GT(num_waiters_, 0);
1105   num_waiters_--;
1106   // We awoke and so no longer require awakes from the guard_'s unlock.
1107   CHECK_GT(guard_.get_contenders(), 0);
1108   guard_.decrement_contenders();
1109 #else
1110   pid_t old_owner = guard_.GetExclusiveOwnerTid();
1111   guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1112   guard_.recursion_count_ = 0;
1113   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
1114   guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1115 #endif
1116   guard_.recursion_count_ = old_recursion_count;
1117 }
1118 
TimedWait(Thread * self,int64_t ms,int32_t ns)1119 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
1120   DCHECK(self == nullptr || self == Thread::Current());
1121   bool timed_out = false;
1122   guard_.AssertExclusiveHeld(self);
1123   guard_.CheckSafeToWait(self);
1124   unsigned int old_recursion_count = guard_.recursion_count_;
1125 #if ART_USE_FUTEXES
1126   timespec rel_ts;
1127   InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
1128   num_waiters_++;
1129   // Ensure the Mutex is contended so that requeued threads are awoken.
1130   guard_.increment_contenders();
1131   guard_.recursion_count_ = 1;
1132   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1133   guard_.ExclusiveUnlock(self);
1134   if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
1135     if (errno == ETIMEDOUT) {
1136       // Timed out we're done.
1137       timed_out = true;
1138     } else if ((errno == EAGAIN) || (errno == EINTR)) {
1139       // A signal or ConditionVariable::Signal/Broadcast has come in.
1140     } else {
1141       PLOG(FATAL) << "timed futex wait failed for " << name_;
1142     }
1143   }
1144   SleepIfRuntimeDeleted(self);
1145   guard_.ExclusiveLock(self);
1146   CHECK_GT(num_waiters_, 0);
1147   num_waiters_--;
1148   // We awoke and so no longer require awakes from the guard_'s unlock.
1149   CHECK_GT(guard_.get_contenders(), 0);
1150   guard_.decrement_contenders();
1151 #else
1152 #if !defined(__APPLE__)
1153   int clock = CLOCK_MONOTONIC;
1154 #else
1155   int clock = CLOCK_REALTIME;
1156 #endif
1157   pid_t old_owner = guard_.GetExclusiveOwnerTid();
1158   guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1159   guard_.recursion_count_ = 0;
1160   timespec ts;
1161   InitTimeSpec(true, clock, ms, ns, &ts);
1162   int rc;
1163   while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
1164     continue;
1165   }
1166 
1167   if (rc == ETIMEDOUT) {
1168     timed_out = true;
1169   } else if (rc != 0) {
1170     errno = rc;
1171     PLOG(FATAL) << "TimedWait failed for " << name_;
1172   }
1173   guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1174 #endif
1175   guard_.recursion_count_ = old_recursion_count;
1176   return timed_out;
1177 }
1178 
1179 }  // namespace art
1180