1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mutex.h"
18
19 #include <errno.h>
20 #include <sys/time.h>
21
22 #include <sstream>
23
24 #include "android-base/stringprintf.h"
25
26 #include "base/atomic.h"
27 #include "base/logging.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "base/value_object.h"
31 #include "mutex-inl.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "thread-inl.h"
34 #include "thread.h"
35 #include "thread_list.h"
36
37 namespace art {
38
39 using android::base::StringPrintf;
40
41 static constexpr uint64_t kIntervalMillis = 50;
42 static constexpr int kMonitorTimeoutTryMax = 5;
43
44 static const char* kLastDumpStackTime = "LastDumpStackTime";
45
46 struct AllMutexData {
47 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
48 Atomic<const BaseMutex*> all_mutexes_guard;
49 // All created mutexes guarded by all_mutexes_guard_.
50 std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData51 AllMutexData() : all_mutexes(nullptr) {}
52 };
53 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
54
55 struct DumpStackLastTimeTLSData : public art::TLSData {
DumpStackLastTimeTLSDataart::DumpStackLastTimeTLSData56 explicit DumpStackLastTimeTLSData(uint64_t last_dump_time_ms) {
57 last_dump_time_ms_ = last_dump_time_ms;
58 }
59 uint64_t last_dump_time_ms_;
60 };
61
62 #if ART_USE_FUTEXES
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)63 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
64 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
65 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
66 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
67 if (result_ts->tv_nsec < 0) {
68 result_ts->tv_sec--;
69 result_ts->tv_nsec += one_sec;
70 } else if (result_ts->tv_nsec > one_sec) {
71 result_ts->tv_sec++;
72 result_ts->tv_nsec -= one_sec;
73 }
74 return result_ts->tv_sec < 0;
75 }
76 #endif
77
78 #if ART_USE_FUTEXES
79 // If we wake up from a futex wake, and the runtime disappeared while we were asleep,
80 // it's important to stop in our tracks before we touch deallocated memory.
SleepIfRuntimeDeleted(Thread * self)81 static inline void SleepIfRuntimeDeleted(Thread* self) {
82 if (self != nullptr) {
83 JNIEnvExt* const env = self->GetJniEnv();
84 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
85 DCHECK(self->IsDaemon());
86 // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
87 // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
88 // --host and --gdb.
89 // After we wake up, the runtime may have been shutdown, which means that this condition may
90 // have been deleted. It is not safe to retry the wait.
91 SleepForever();
92 }
93 }
94 }
95 #else
96 // We should be doing this for pthreads to, but it seems to be impossible for something
97 // like a condition variable wait. Thus we don't bother trying.
98 #endif
99
100 // Wait for an amount of time that roughly increases in the argument i.
101 // Spin for small arguments and yield/sleep for longer ones.
BackOff(uint32_t i)102 static void BackOff(uint32_t i) {
103 static constexpr uint32_t kSpinMax = 10;
104 static constexpr uint32_t kYieldMax = 20;
105 if (i <= kSpinMax) {
106 // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
107 // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor.
108 volatile uint32_t x = 0;
109 const uint32_t spin_count = 10 * i;
110 for (uint32_t spin = 0; spin < spin_count; ++spin) {
111 ++x; // Volatile; hence should not be optimized away.
112 }
113 // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
114 } else if (i <= kYieldMax) {
115 sched_yield();
116 } else {
117 NanoSleep(1000ull * (i - kYieldMax));
118 }
119 }
120
121 // Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
122 // short time interval, on the order of kernel context-switch time, passes.
123 // Return true if the predicate test succeeded, false if we timed out.
124 template<typename Pred>
WaitBrieflyFor(AtomicInteger * testLoc,Thread * self,Pred pred)125 static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
126 // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
127 // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
128 // maximum delay should be significantly less than the expected futex() context switch time, so
129 // there should be little danger of this worsening things appreciably. If the lock was only
130 // held briefly by a running thread, this should help immensely.
131 static constexpr uint32_t kMaxBackOff = 3; // Should probably be <= kSpinMax above.
132 static constexpr uint32_t kMaxIters = 50;
133 JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
134 for (uint32_t i = 1; i <= kMaxIters; ++i) {
135 BackOff(std::min(i, kMaxBackOff));
136 if (pred(testLoc->load(std::memory_order_relaxed))) {
137 return true;
138 }
139 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
140 // This returns true once we've started shutting down. We then try to reach a quiescent
141 // state as soon as possible to avoid touching data that may be deallocated by the shutdown
142 // process. It currently relies on a timeout.
143 return false;
144 }
145 }
146 return false;
147 }
148
149 class ScopedAllMutexesLock final {
150 public:
ScopedAllMutexesLock(const BaseMutex * mutex)151 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
152 for (uint32_t i = 0;
153 !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
154 ++i) {
155 BackOff(i);
156 }
157 }
158
~ScopedAllMutexesLock()159 ~ScopedAllMutexesLock() {
160 DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
161 gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
162 }
163
164 private:
165 const BaseMutex* const mutex_;
166 };
167
168 // Scoped class that generates events at the beginning and end of lock contention.
169 class ScopedContentionRecorder final : public ValueObject {
170 public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)171 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
172 : mutex_(kLogLockContentions ? mutex : nullptr),
173 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
174 owner_tid_(kLogLockContentions ? owner_tid : 0),
175 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
176 if (ATraceEnabled()) {
177 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
178 mutex->GetName(), owner_tid);
179 ATraceBegin(msg.c_str());
180 }
181 }
182
~ScopedContentionRecorder()183 ~ScopedContentionRecorder() {
184 ATraceEnd();
185 if (kLogLockContentions) {
186 uint64_t end_nano_time = NanoTime();
187 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
188 }
189 }
190
191 private:
192 BaseMutex* const mutex_;
193 const uint64_t blocked_tid_;
194 const uint64_t owner_tid_;
195 const uint64_t start_nano_time_;
196 };
197
BaseMutex(const char * name,LockLevel level)198 BaseMutex::BaseMutex(const char* name, LockLevel level)
199 : name_(name),
200 level_(level),
201 should_respond_to_empty_checkpoint_request_(false) {
202 if (kLogLockContentions) {
203 ScopedAllMutexesLock mu(this);
204 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
205 if (*all_mutexes_ptr == nullptr) {
206 // We leak the global set of all mutexes to avoid ordering issues in global variable
207 // construction/destruction.
208 *all_mutexes_ptr = new std::set<BaseMutex*>();
209 }
210 (*all_mutexes_ptr)->insert(this);
211 }
212 }
213
~BaseMutex()214 BaseMutex::~BaseMutex() {
215 if (kLogLockContentions) {
216 ScopedAllMutexesLock mu(this);
217 gAllMutexData->all_mutexes->erase(this);
218 }
219 }
220
DumpAll(std::ostream & os)221 void BaseMutex::DumpAll(std::ostream& os) {
222 if (kLogLockContentions) {
223 os << "Mutex logging:\n";
224 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
225 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
226 if (all_mutexes == nullptr) {
227 // No mutexes have been created yet during at startup.
228 return;
229 }
230 os << "(Contended)\n";
231 for (const BaseMutex* mutex : *all_mutexes) {
232 if (mutex->HasEverContended()) {
233 mutex->Dump(os);
234 os << "\n";
235 }
236 }
237 os << "(Never contented)\n";
238 for (const BaseMutex* mutex : *all_mutexes) {
239 if (!mutex->HasEverContended()) {
240 mutex->Dump(os);
241 os << "\n";
242 }
243 }
244 }
245 }
246
CheckSafeToWait(Thread * self)247 void BaseMutex::CheckSafeToWait(Thread* self) {
248 if (self == nullptr) {
249 CheckUnattachedThread(level_);
250 return;
251 }
252 if (kDebugLocking) {
253 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
254 << "Waiting on unacquired mutex: " << name_;
255 bool bad_mutexes_held = false;
256 std::string error_msg;
257 for (int i = kLockLevelCount - 1; i >= 0; --i) {
258 if (i != level_) {
259 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
260 // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This
261 // just means that gc or some other internal process is suspending the thread while it is
262 // trying to suspend some other thread. So long as the current thread is not being suspended
263 // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear)
264 // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted
265 // code interacts with suspension. One holds the lock to prevent user-code-suspension from
266 // occurring. Since this is only initiated from user-supplied native-code this is safe.
267 if (held_mutex == Locks::user_code_suspension_lock_) {
268 // No thread safety analysis is fine since we have both the user_code_suspension_lock_
269 // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
270 // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
271 auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
272 return self->GetUserCodeSuspendCount() != 0;
273 };
274 if (is_suspending_for_user_code()) {
275 std::ostringstream oss;
276 oss << "Holding \"" << held_mutex->name_ << "\" "
277 << "(level " << LockLevel(i) << ") while performing wait on "
278 << "\"" << name_ << "\" (level " << level_ << ") "
279 << "with SuspendReason::kForUserCode pending suspensions";
280 error_msg = oss.str();
281 LOG(ERROR) << error_msg;
282 bad_mutexes_held = true;
283 }
284 } else if (held_mutex != nullptr) {
285 std::ostringstream oss;
286 oss << "Holding \"" << held_mutex->name_ << "\" "
287 << "(level " << LockLevel(i) << ") while performing wait on "
288 << "\"" << name_ << "\" (level " << level_ << ")";
289 error_msg = oss.str();
290 LOG(ERROR) << error_msg;
291 bad_mutexes_held = true;
292 }
293 }
294 }
295 if (gAborting == 0) { // Avoid recursive aborts.
296 CHECK(!bad_mutexes_held) << error_msg;
297 }
298 }
299 }
300
AddToWaitTime(uint64_t value)301 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
302 if (kLogLockContentions) {
303 // Atomically add value to wait_time.
304 wait_time.fetch_add(value, std::memory_order_seq_cst);
305 }
306 }
307
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)308 void BaseMutex::RecordContention(uint64_t blocked_tid,
309 uint64_t owner_tid,
310 uint64_t nano_time_blocked) {
311 if (kLogLockContentions) {
312 ContentionLogData* data = contention_log_data_;
313 ++(data->contention_count);
314 data->AddToWaitTime(nano_time_blocked);
315 ContentionLogEntry* log = data->contention_log;
316 // This code is intentionally racy as it is only used for diagnostics.
317 int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
318 if (log[slot].blocked_tid == blocked_tid &&
319 log[slot].owner_tid == blocked_tid) {
320 ++log[slot].count;
321 } else {
322 uint32_t new_slot;
323 do {
324 slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
325 new_slot = (slot + 1) % kContentionLogSize;
326 } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
327 log[new_slot].blocked_tid = blocked_tid;
328 log[new_slot].owner_tid = owner_tid;
329 log[new_slot].count.store(1, std::memory_order_relaxed);
330 }
331 }
332 }
333
DumpContention(std::ostream & os) const334 void BaseMutex::DumpContention(std::ostream& os) const {
335 if (kLogLockContentions) {
336 const ContentionLogData* data = contention_log_data_;
337 const ContentionLogEntry* log = data->contention_log;
338 uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
339 uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
340 if (contention_count == 0) {
341 os << "never contended";
342 } else {
343 os << "contended " << contention_count
344 << " total wait of contender " << PrettyDuration(wait_time)
345 << " average " << PrettyDuration(wait_time / contention_count);
346 SafeMap<uint64_t, size_t> most_common_blocker;
347 SafeMap<uint64_t, size_t> most_common_blocked;
348 for (size_t i = 0; i < kContentionLogSize; ++i) {
349 uint64_t blocked_tid = log[i].blocked_tid;
350 uint64_t owner_tid = log[i].owner_tid;
351 uint32_t count = log[i].count.load(std::memory_order_relaxed);
352 if (count > 0) {
353 auto it = most_common_blocked.find(blocked_tid);
354 if (it != most_common_blocked.end()) {
355 most_common_blocked.Overwrite(blocked_tid, it->second + count);
356 } else {
357 most_common_blocked.Put(blocked_tid, count);
358 }
359 it = most_common_blocker.find(owner_tid);
360 if (it != most_common_blocker.end()) {
361 most_common_blocker.Overwrite(owner_tid, it->second + count);
362 } else {
363 most_common_blocker.Put(owner_tid, count);
364 }
365 }
366 }
367 uint64_t max_tid = 0;
368 size_t max_tid_count = 0;
369 for (const auto& pair : most_common_blocked) {
370 if (pair.second > max_tid_count) {
371 max_tid = pair.first;
372 max_tid_count = pair.second;
373 }
374 }
375 if (max_tid != 0) {
376 os << " sample shows most blocked tid=" << max_tid;
377 }
378 max_tid = 0;
379 max_tid_count = 0;
380 for (const auto& pair : most_common_blocker) {
381 if (pair.second > max_tid_count) {
382 max_tid = pair.first;
383 max_tid_count = pair.second;
384 }
385 }
386 if (max_tid != 0) {
387 os << " sample shows tid=" << max_tid << " owning during this time";
388 }
389 }
390 }
391 }
392
393
Mutex(const char * name,LockLevel level,bool recursive)394 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
395 : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) {
396 #if ART_USE_FUTEXES
397 DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed));
398 #else
399 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
400 #endif
401 }
402
403 // Helper to allow checking shutdown while locking for thread safety.
IsSafeToCallAbortSafe()404 static bool IsSafeToCallAbortSafe() {
405 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
406 return Locks::IsSafeToCallAbortRacy();
407 }
408
~Mutex()409 Mutex::~Mutex() {
410 bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
411 #if ART_USE_FUTEXES
412 if (state_and_contenders_.load(std::memory_order_relaxed) != 0) {
413 LOG(safe_to_call_abort ? FATAL : WARNING)
414 << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid();
415 } else {
416 if (GetExclusiveOwnerTid() != 0) {
417 LOG(safe_to_call_abort ? FATAL : WARNING)
418 << "unexpectedly found an owner on unlocked mutex " << name_;
419 }
420 }
421 #else
422 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
423 // may still be using locks.
424 int rc = pthread_mutex_destroy(&mutex_);
425 if (rc != 0) {
426 errno = rc;
427 PLOG(safe_to_call_abort ? FATAL : WARNING)
428 << "pthread_mutex_destroy failed for " << name_;
429 }
430 #endif
431 }
432
ExclusiveLock(Thread * self)433 void Mutex::ExclusiveLock(Thread* self) {
434 DCHECK(self == nullptr || self == Thread::Current());
435 if (kDebugLocking && !recursive_) {
436 AssertNotHeld(self);
437 }
438 if (!recursive_ || !IsExclusiveHeld(self)) {
439 #if ART_USE_FUTEXES
440 bool done = false;
441 do {
442 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
443 if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) {
444 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
445 } else {
446 // Failed to acquire, hang up.
447 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
448 // Empirically, it appears important to spin again each time through the loop; if we
449 // bother to go to sleep and wake up, we should be fairly persistent in trying for the
450 // lock.
451 if (!WaitBrieflyFor(&state_and_contenders_, self,
452 [](int32_t v) { return (v & kHeldMask) == 0; })) {
453 // Increment contender count. We can't create enough threads for this to overflow.
454 increment_contenders();
455 // Make cur_state again reflect the expected value of state_and_contenders.
456 cur_state += kContenderIncrement;
457 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
458 self->CheckEmptyCheckpointFromMutex();
459 }
460
461 uint64_t wait_start_ms = enable_monitor_timeout_ ? MilliTime() : 0;
462 uint64_t try_times = 0;
463 do {
464 timespec timeout_ts;
465 timeout_ts.tv_sec = 0;
466 timeout_ts.tv_nsec = Runtime::Current()->GetMonitorTimeoutNs();
467 if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
468 enable_monitor_timeout_ ? &timeout_ts : nullptr , nullptr, 0) != 0) {
469 // We only went to sleep after incrementing and contenders and checking that the
470 // lock is still held by someone else. EAGAIN and EINTR both indicate a spurious
471 // failure, try again from the beginning. We don't use TEMP_FAILURE_RETRY so we can
472 // intentionally retry to acquire the lock.
473 if ((errno != EAGAIN) && (errno != EINTR)) {
474 if (errno == ETIMEDOUT) {
475 try_times++;
476 if (try_times <= kMonitorTimeoutTryMax) {
477 DumpStack(self, wait_start_ms, try_times);
478 }
479 } else {
480 PLOG(FATAL) << "futex wait failed for " << name_;
481 }
482 }
483 }
484 SleepIfRuntimeDeleted(self);
485 // Retry until not held. In heavy contention situations we otherwise get redundant
486 // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
487 cur_state = state_and_contenders_.load(std::memory_order_relaxed);
488 } while ((cur_state & kHeldMask) != 0);
489 decrement_contenders();
490 }
491 }
492 } while (!done);
493 // Confirm that lock is now held.
494 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
495 #else
496 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
497 #endif
498 DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
499 << " recursive_ = " << recursive_;
500 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
501 RegisterAsLocked(self);
502 }
503 recursion_count_++;
504 if (kDebugLocking) {
505 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
506 << name_ << " " << recursion_count_;
507 AssertHeld(self);
508 }
509 }
510
DumpStack(Thread * self,uint64_t wait_start_ms,uint64_t try_times)511 void Mutex::DumpStack(Thread* self, uint64_t wait_start_ms, uint64_t try_times) {
512 ScopedObjectAccess soa(self);
513 Locks::thread_list_lock_->ExclusiveLock(self);
514 std::string owner_stack_dump;
515 pid_t owner_tid = GetExclusiveOwnerTid();
516 Thread *owner = Runtime::Current()->GetThreadList()->FindThreadByTid(owner_tid);
517 if (owner != nullptr) {
518 if (IsDumpFrequent(owner, try_times)) {
519 Locks::thread_list_lock_->ExclusiveUnlock(self);
520 LOG(WARNING) << "Contention with tid " << owner_tid << ", monitor id " << monitor_id_;
521 return;
522 }
523 struct CollectStackTrace : public Closure {
524 void Run(art::Thread* thread) override
525 REQUIRES_SHARED(art::Locks::mutator_lock_) {
526 if (IsDumpFrequent(thread)) {
527 return;
528 }
529 thread->SetCustomTLS(kLastDumpStackTime, new DumpStackLastTimeTLSData(MilliTime()));
530 thread->DumpJavaStack(oss);
531 }
532 std::ostringstream oss;
533 };
534 CollectStackTrace owner_trace;
535 owner->RequestSynchronousCheckpoint(&owner_trace);
536 owner_stack_dump = owner_trace.oss.str();
537 uint64_t wait_ms = MilliTime() - wait_start_ms;
538 LOG(WARNING) << "Monitor contention with tid " << owner_tid << ", wait time: " << wait_ms
539 << "ms, monitor id: " << monitor_id_
540 << "\nPerfMonitor owner thread(" << owner_tid << ") stack is:\n"
541 << owner_stack_dump;
542 } else {
543 Locks::thread_list_lock_->ExclusiveUnlock(self);
544 }
545 }
546
IsDumpFrequent(Thread * thread,uint64_t try_times)547 bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
548 uint64_t last_dump_time_ms = 0;
549 DumpStackLastTimeTLSData* tls_data =
550 reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
551 if (tls_data != nullptr) {
552 last_dump_time_ms = tls_data->last_dump_time_ms_;
553 }
554 uint64_t interval = MilliTime() - last_dump_time_ms;
555 if (interval < kIntervalMillis * try_times) {
556 return true;
557 } else {
558 return false;
559 }
560 }
561
ExclusiveTryLock(Thread * self)562 bool Mutex::ExclusiveTryLock(Thread* self) {
563 DCHECK(self == nullptr || self == Thread::Current());
564 if (kDebugLocking && !recursive_) {
565 AssertNotHeld(self);
566 }
567 if (!recursive_ || !IsExclusiveHeld(self)) {
568 #if ART_USE_FUTEXES
569 bool done = false;
570 do {
571 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
572 if ((cur_state & kHeldMask) == 0) {
573 // Change state to held and impose load/store ordering appropriate for lock acquisition.
574 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
575 } else {
576 return false;
577 }
578 } while (!done);
579 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
580 #else
581 int result = pthread_mutex_trylock(&mutex_);
582 if (result == EBUSY) {
583 return false;
584 }
585 if (result != 0) {
586 errno = result;
587 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
588 }
589 #endif
590 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
591 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
592 RegisterAsLocked(self);
593 }
594 recursion_count_++;
595 if (kDebugLocking) {
596 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
597 << name_ << " " << recursion_count_;
598 AssertHeld(self);
599 }
600 return true;
601 }
602
ExclusiveTryLockWithSpinning(Thread * self)603 bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
604 // Spin a small number of times, since this affects our ability to respond to suspension
605 // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
606 // in rapid succession, and then we will typically not spin for the maximal period.
607 const int kMaxSpins = 5;
608 for (int i = 0; i < kMaxSpins; ++i) {
609 if (ExclusiveTryLock(self)) {
610 return true;
611 }
612 #if ART_USE_FUTEXES
613 if (!WaitBrieflyFor(&state_and_contenders_, self,
614 [](int32_t v) { return (v & kHeldMask) == 0; })) {
615 return false;
616 }
617 #endif
618 }
619 return ExclusiveTryLock(self);
620 }
621
622 #if ART_USE_FUTEXES
ExclusiveLockUncontendedFor(Thread * new_owner)623 void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
624 DCHECK_EQ(level_, kMonitorLock);
625 DCHECK(!recursive_);
626 state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
627 recursion_count_ = 1;
628 exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
629 // Don't call RegisterAsLocked(). It wouldn't register anything anyway. And
630 // this happens as we're inflating a monitor, which doesn't logically affect
631 // held "locks"; it effectively just converts a thin lock to a mutex. By doing
632 // this while the lock is already held, we're delaying the acquisition of a
633 // logically held mutex, which can introduce bogus lock order violations.
634 }
635
ExclusiveUnlockUncontended()636 void Mutex::ExclusiveUnlockUncontended() {
637 DCHECK_EQ(level_, kMonitorLock);
638 state_and_contenders_.store(0, std::memory_order_relaxed);
639 recursion_count_ = 0;
640 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
641 // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
642 }
643 #endif // ART_USE_FUTEXES
644
ExclusiveUnlock(Thread * self)645 void Mutex::ExclusiveUnlock(Thread* self) {
646 if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
647 std::string name1 = "<null>";
648 std::string name2 = "<null>";
649 if (self != nullptr) {
650 self->GetThreadName(name1);
651 }
652 if (Thread::Current() != nullptr) {
653 Thread::Current()->GetThreadName(name2);
654 }
655 LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
656 << " Thread::Current()=" << name2;
657 }
658 AssertHeld(self);
659 DCHECK_NE(GetExclusiveOwnerTid(), 0);
660 recursion_count_--;
661 if (!recursive_ || recursion_count_ == 0) {
662 if (kDebugLocking) {
663 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
664 << name_ << " " << recursion_count_;
665 }
666 RegisterAsUnlocked(self);
667 #if ART_USE_FUTEXES
668 bool done = false;
669 do {
670 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
671 if (LIKELY((cur_state & kHeldMask) != 0)) {
672 // We're no longer the owner.
673 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
674 // Change state to not held and impose load/store ordering appropriate for lock release.
675 uint32_t new_state = cur_state & ~kHeldMask; // Same number of contenders.
676 done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state);
677 if (LIKELY(done)) { // Spurious fail or waiters changed ?
678 if (UNLIKELY(new_state != 0) /* have contenders */) {
679 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne,
680 nullptr, nullptr, 0);
681 }
682 // We only do a futex wait after incrementing contenders and verifying the lock was
683 // still held. If we didn't see waiters, then there couldn't have been any futexes
684 // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us,
685 // since the futex wait call would see the lock available and immediately return.
686 }
687 } else {
688 // Logging acquires the logging lock, avoid infinite recursion in that case.
689 if (this != Locks::logging_lock_) {
690 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
691 } else {
692 LogHelper::LogLineLowStack(__FILE__,
693 __LINE__,
694 ::android::base::FATAL_WITHOUT_ABORT,
695 StringPrintf("Unexpected state_ %d in unlock for %s",
696 cur_state, name_).c_str());
697 _exit(1);
698 }
699 }
700 } while (!done);
701 #else
702 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
703 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
704 #endif
705 }
706 }
707
Dump(std::ostream & os) const708 void Mutex::Dump(std::ostream& os) const {
709 os << (recursive_ ? "recursive " : "non-recursive ")
710 << name_
711 << " level=" << static_cast<int>(level_)
712 << " rec=" << recursion_count_
713 << " owner=" << GetExclusiveOwnerTid() << " ";
714 DumpContention(os);
715 }
716
operator <<(std::ostream & os,const Mutex & mu)717 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
718 mu.Dump(os);
719 return os;
720 }
721
WakeupToRespondToEmptyCheckpoint()722 void Mutex::WakeupToRespondToEmptyCheckpoint() {
723 #if ART_USE_FUTEXES
724 // Wake up all the waiters so they will respond to the emtpy checkpoint.
725 DCHECK(should_respond_to_empty_checkpoint_request_);
726 if (UNLIKELY(get_contenders() != 0)) {
727 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
728 }
729 #else
730 LOG(FATAL) << "Non futex case isn't supported.";
731 #endif
732 }
733
ReaderWriterMutex(const char * name,LockLevel level)734 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
735 : BaseMutex(name, level)
736 #if ART_USE_FUTEXES
737 , state_(0), exclusive_owner_(0), num_contenders_(0)
738 #endif
739 {
740 #if !ART_USE_FUTEXES
741 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
742 #endif
743 }
744
~ReaderWriterMutex()745 ReaderWriterMutex::~ReaderWriterMutex() {
746 #if ART_USE_FUTEXES
747 CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
748 CHECK_EQ(GetExclusiveOwnerTid(), 0);
749 CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
750 #else
751 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
752 // may still be using locks.
753 int rc = pthread_rwlock_destroy(&rwlock_);
754 if (rc != 0) {
755 errno = rc;
756 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
757 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_;
758 }
759 #endif
760 }
761
ExclusiveLock(Thread * self)762 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
763 DCHECK(self == nullptr || self == Thread::Current());
764 AssertNotExclusiveHeld(self);
765 #if ART_USE_FUTEXES
766 bool done = false;
767 do {
768 int32_t cur_state = state_.load(std::memory_order_relaxed);
769 if (LIKELY(cur_state == 0)) {
770 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
771 done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
772 } else {
773 // Failed to acquire, hang up.
774 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
775 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
776 num_contenders_.fetch_add(1);
777 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
778 self->CheckEmptyCheckpointFromMutex();
779 }
780 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
781 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
782 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
783 if ((errno != EAGAIN) && (errno != EINTR)) {
784 PLOG(FATAL) << "futex wait failed for " << name_;
785 }
786 }
787 SleepIfRuntimeDeleted(self);
788 num_contenders_.fetch_sub(1);
789 }
790 }
791 } while (!done);
792 DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
793 #else
794 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
795 #endif
796 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
797 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
798 RegisterAsLocked(self);
799 AssertExclusiveHeld(self);
800 }
801
ExclusiveUnlock(Thread * self)802 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
803 DCHECK(self == nullptr || self == Thread::Current());
804 AssertExclusiveHeld(self);
805 RegisterAsUnlocked(self);
806 DCHECK_NE(GetExclusiveOwnerTid(), 0);
807 #if ART_USE_FUTEXES
808 bool done = false;
809 do {
810 int32_t cur_state = state_.load(std::memory_order_relaxed);
811 if (LIKELY(cur_state == -1)) {
812 // We're no longer the owner.
813 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
814 // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
815 // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
816 done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
817 if (LIKELY(done)) { // Weak CAS may fail spuriously.
818 // Wake any waiters.
819 if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
820 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
821 }
822 }
823 } else {
824 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
825 }
826 } while (!done);
827 #else
828 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
829 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
830 #endif
831 }
832
833 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)834 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
835 DCHECK(self == nullptr || self == Thread::Current());
836 #if ART_USE_FUTEXES
837 bool done = false;
838 timespec end_abs_ts;
839 InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
840 do {
841 int32_t cur_state = state_.load(std::memory_order_relaxed);
842 if (cur_state == 0) {
843 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
844 done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
845 } else {
846 // Failed to acquire, hang up.
847 timespec now_abs_ts;
848 InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
849 timespec rel_ts;
850 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
851 return false; // Timed out.
852 }
853 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
854 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
855 num_contenders_.fetch_add(1);
856 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
857 self->CheckEmptyCheckpointFromMutex();
858 }
859 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
860 if (errno == ETIMEDOUT) {
861 num_contenders_.fetch_sub(1);
862 return false; // Timed out.
863 } else if ((errno != EAGAIN) && (errno != EINTR)) {
864 // EAGAIN and EINTR both indicate a spurious failure,
865 // recompute the relative time out from now and try again.
866 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
867 PLOG(FATAL) << "timed futex wait failed for " << name_;
868 }
869 }
870 SleepIfRuntimeDeleted(self);
871 num_contenders_.fetch_sub(1);
872 }
873 }
874 } while (!done);
875 #else
876 timespec ts;
877 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
878 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
879 if (result == ETIMEDOUT) {
880 return false;
881 }
882 if (result != 0) {
883 errno = result;
884 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
885 }
886 #endif
887 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
888 RegisterAsLocked(self);
889 AssertSharedHeld(self);
890 return true;
891 }
892 #endif
893
894 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)895 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
896 // Owner holds it exclusively, hang up.
897 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
898 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
899 num_contenders_.fetch_add(1);
900 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
901 self->CheckEmptyCheckpointFromMutex();
902 }
903 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
904 if (errno != EAGAIN && errno != EINTR) {
905 PLOG(FATAL) << "futex wait failed for " << name_;
906 }
907 }
908 SleepIfRuntimeDeleted(self);
909 num_contenders_.fetch_sub(1);
910 }
911 }
912 #endif
913
SharedTryLock(Thread * self)914 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
915 DCHECK(self == nullptr || self == Thread::Current());
916 #if ART_USE_FUTEXES
917 bool done = false;
918 do {
919 int32_t cur_state = state_.load(std::memory_order_relaxed);
920 if (cur_state >= 0) {
921 // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
922 done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
923 } else {
924 // Owner holds it exclusively.
925 return false;
926 }
927 } while (!done);
928 #else
929 int result = pthread_rwlock_tryrdlock(&rwlock_);
930 if (result == EBUSY) {
931 return false;
932 }
933 if (result != 0) {
934 errno = result;
935 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
936 }
937 #endif
938 RegisterAsLocked(self);
939 AssertSharedHeld(self);
940 return true;
941 }
942
IsSharedHeld(const Thread * self) const943 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
944 DCHECK(self == nullptr || self == Thread::Current());
945 bool result;
946 if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
947 result = IsExclusiveHeld(self); // TODO: a better best effort here.
948 } else {
949 result = (self->GetHeldMutex(level_) == this);
950 }
951 return result;
952 }
953
Dump(std::ostream & os) const954 void ReaderWriterMutex::Dump(std::ostream& os) const {
955 os << name_
956 << " level=" << static_cast<int>(level_)
957 << " owner=" << GetExclusiveOwnerTid()
958 #if ART_USE_FUTEXES
959 << " state=" << state_.load(std::memory_order_seq_cst)
960 << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
961 #endif
962 << " ";
963 DumpContention(os);
964 }
965
operator <<(std::ostream & os,const ReaderWriterMutex & mu)966 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
967 mu.Dump(os);
968 return os;
969 }
970
operator <<(std::ostream & os,const MutatorMutex & mu)971 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
972 mu.Dump(os);
973 return os;
974 }
975
WakeupToRespondToEmptyCheckpoint()976 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
977 #if ART_USE_FUTEXES
978 // Wake up all the waiters so they will respond to the emtpy checkpoint.
979 DCHECK(should_respond_to_empty_checkpoint_request_);
980 if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
981 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
982 }
983 #else
984 LOG(FATAL) << "Non futex case isn't supported.";
985 #endif
986 }
987
ConditionVariable(const char * name,Mutex & guard)988 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
989 : name_(name), guard_(guard) {
990 #if ART_USE_FUTEXES
991 DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
992 num_waiters_ = 0;
993 #else
994 pthread_condattr_t cond_attrs;
995 CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
996 #if !defined(__APPLE__)
997 // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
998 CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
999 #endif
1000 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
1001 #endif
1002 }
1003
~ConditionVariable()1004 ConditionVariable::~ConditionVariable() {
1005 #if ART_USE_FUTEXES
1006 if (num_waiters_!= 0) {
1007 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1008 LOG(is_safe_to_call_abort ? FATAL : WARNING)
1009 << "ConditionVariable::~ConditionVariable for " << name_
1010 << " called with " << num_waiters_ << " waiters.";
1011 }
1012 #else
1013 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
1014 // may still be using condition variables.
1015 int rc = pthread_cond_destroy(&cond_);
1016 if (rc != 0) {
1017 errno = rc;
1018 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1019 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_;
1020 }
1021 #endif
1022 }
1023
Broadcast(Thread * self)1024 void ConditionVariable::Broadcast(Thread* self) {
1025 DCHECK(self == nullptr || self == Thread::Current());
1026 // TODO: enable below, there's a race in thread creation that causes false failures currently.
1027 // guard_.AssertExclusiveHeld(self);
1028 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
1029 #if ART_USE_FUTEXES
1030 RequeueWaiters(std::numeric_limits<int32_t>::max());
1031 #else
1032 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
1033 #endif
1034 }
1035
1036 #if ART_USE_FUTEXES
RequeueWaiters(int32_t count)1037 void ConditionVariable::RequeueWaiters(int32_t count) {
1038 if (num_waiters_ > 0) {
1039 sequence_++; // Indicate a signal occurred.
1040 // Move waiters from the condition variable's futex to the guard's futex,
1041 // so that they will be woken up when the mutex is released.
1042 bool done = futex(sequence_.Address(),
1043 FUTEX_REQUEUE_PRIVATE,
1044 /* Threads to wake */ 0,
1045 /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
1046 guard_.state_and_contenders_.Address(),
1047 0) != -1;
1048 if (!done && errno != EAGAIN && errno != EINTR) {
1049 PLOG(FATAL) << "futex requeue failed for " << name_;
1050 }
1051 }
1052 }
1053 #endif
1054
1055
Signal(Thread * self)1056 void ConditionVariable::Signal(Thread* self) {
1057 DCHECK(self == nullptr || self == Thread::Current());
1058 guard_.AssertExclusiveHeld(self);
1059 #if ART_USE_FUTEXES
1060 RequeueWaiters(1);
1061 #else
1062 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
1063 #endif
1064 }
1065
Wait(Thread * self)1066 void ConditionVariable::Wait(Thread* self) {
1067 guard_.CheckSafeToWait(self);
1068 WaitHoldingLocks(self);
1069 }
1070
WaitHoldingLocks(Thread * self)1071 void ConditionVariable::WaitHoldingLocks(Thread* self) {
1072 DCHECK(self == nullptr || self == Thread::Current());
1073 guard_.AssertExclusiveHeld(self);
1074 unsigned int old_recursion_count = guard_.recursion_count_;
1075 #if ART_USE_FUTEXES
1076 num_waiters_++;
1077 // Ensure the Mutex is contended so that requeued threads are awoken.
1078 guard_.increment_contenders();
1079 guard_.recursion_count_ = 1;
1080 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1081 guard_.ExclusiveUnlock(self);
1082 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
1083 // Futex failed, check it is an expected error.
1084 // EAGAIN == EWOULDBLK, so we let the caller try again.
1085 // EINTR implies a signal was sent to this thread.
1086 if ((errno != EINTR) && (errno != EAGAIN)) {
1087 PLOG(FATAL) << "futex wait failed for " << name_;
1088 }
1089 }
1090 SleepIfRuntimeDeleted(self);
1091 guard_.ExclusiveLock(self);
1092 CHECK_GT(num_waiters_, 0);
1093 num_waiters_--;
1094 // We awoke and so no longer require awakes from the guard_'s unlock.
1095 CHECK_GT(guard_.get_contenders(), 0);
1096 guard_.decrement_contenders();
1097 #else
1098 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1099 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1100 guard_.recursion_count_ = 0;
1101 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
1102 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1103 #endif
1104 guard_.recursion_count_ = old_recursion_count;
1105 }
1106
TimedWait(Thread * self,int64_t ms,int32_t ns)1107 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
1108 DCHECK(self == nullptr || self == Thread::Current());
1109 bool timed_out = false;
1110 guard_.AssertExclusiveHeld(self);
1111 guard_.CheckSafeToWait(self);
1112 unsigned int old_recursion_count = guard_.recursion_count_;
1113 #if ART_USE_FUTEXES
1114 timespec rel_ts;
1115 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
1116 num_waiters_++;
1117 // Ensure the Mutex is contended so that requeued threads are awoken.
1118 guard_.increment_contenders();
1119 guard_.recursion_count_ = 1;
1120 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1121 guard_.ExclusiveUnlock(self);
1122 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
1123 if (errno == ETIMEDOUT) {
1124 // Timed out we're done.
1125 timed_out = true;
1126 } else if ((errno == EAGAIN) || (errno == EINTR)) {
1127 // A signal or ConditionVariable::Signal/Broadcast has come in.
1128 } else {
1129 PLOG(FATAL) << "timed futex wait failed for " << name_;
1130 }
1131 }
1132 SleepIfRuntimeDeleted(self);
1133 guard_.ExclusiveLock(self);
1134 CHECK_GT(num_waiters_, 0);
1135 num_waiters_--;
1136 // We awoke and so no longer require awakes from the guard_'s unlock.
1137 CHECK_GT(guard_.get_contenders(), 0);
1138 guard_.decrement_contenders();
1139 #else
1140 #if !defined(__APPLE__)
1141 int clock = CLOCK_MONOTONIC;
1142 #else
1143 int clock = CLOCK_REALTIME;
1144 #endif
1145 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1146 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1147 guard_.recursion_count_ = 0;
1148 timespec ts;
1149 InitTimeSpec(true, clock, ms, ns, &ts);
1150 int rc;
1151 while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
1152 continue;
1153 }
1154
1155 if (rc == ETIMEDOUT) {
1156 timed_out = true;
1157 } else if (rc != 0) {
1158 errno = rc;
1159 PLOG(FATAL) << "TimedWait failed for " << name_;
1160 }
1161 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1162 #endif
1163 guard_.recursion_count_ = old_recursion_count;
1164 return timed_out;
1165 }
1166
1167 } // namespace art
1168