1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mutex.h"
18
19 #include <errno.h>
20 #include <sys/time.h>
21
22 #include <sstream>
23
24 #include "android-base/stringprintf.h"
25
26 #include "base/atomic.h"
27 #include "base/logging.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "base/value_object.h"
31 #include "mutex-inl.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "thread-inl.h"
34 #include "thread.h"
35 #include "thread_list.h"
36
37 namespace art {
38
39 using android::base::StringPrintf;
40
41 static constexpr uint64_t kIntervalMillis = 50;
42 static constexpr int kMonitorTimeoutTryMax = 5;
43
44 static const char* kLastDumpStackTime = "LastDumpStackTime";
45
46 struct AllMutexData {
47 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
48 Atomic<const BaseMutex*> all_mutexes_guard;
49 // All created mutexes guarded by all_mutexes_guard_.
50 std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData51 AllMutexData() : all_mutexes(nullptr) {}
52 };
53 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
54
55 struct DumpStackLastTimeTLSData : public art::TLSData {
DumpStackLastTimeTLSDataart::DumpStackLastTimeTLSData56 explicit DumpStackLastTimeTLSData(uint64_t last_dump_time_ms)
57 : last_dump_time_ms_(last_dump_time_ms) {}
58 std::atomic<uint64_t> last_dump_time_ms_;
59 };
60
61 #if ART_USE_FUTEXES
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)62 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
63 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
64 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
65 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
66 if (result_ts->tv_nsec < 0) {
67 result_ts->tv_sec--;
68 result_ts->tv_nsec += one_sec;
69 } else if (result_ts->tv_nsec > one_sec) {
70 result_ts->tv_sec++;
71 result_ts->tv_nsec -= one_sec;
72 }
73 return result_ts->tv_sec < 0;
74 }
75 #endif
76
77 #if ART_USE_FUTEXES
78 // If we wake up from a futex wake, and the runtime disappeared while we were asleep,
79 // it's important to stop in our tracks before we touch deallocated memory.
SleepIfRuntimeDeleted(Thread * self)80 static inline void SleepIfRuntimeDeleted(Thread* self) {
81 if (self != nullptr) {
82 JNIEnvExt* const env = self->GetJniEnv();
83 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
84 DCHECK(self->IsDaemon());
85 // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
86 // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
87 // --host and --gdb.
88 // After we wake up, the runtime may have been shutdown, which means that this condition may
89 // have been deleted. It is not safe to retry the wait.
90 SleepForever();
91 }
92 }
93 }
94 #else
95 // We should be doing this for pthreads to, but it seems to be impossible for something
96 // like a condition variable wait. Thus we don't bother trying.
97 #endif
98
99 // Wait for an amount of time that roughly increases in the argument i.
100 // Spin for small arguments and yield/sleep for longer ones.
BackOff(uint32_t i)101 static void BackOff(uint32_t i) {
102 static constexpr uint32_t kSpinMax = 10;
103 static constexpr uint32_t kYieldMax = 20;
104 if (i <= kSpinMax) {
105 // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
106 // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor.
107 volatile uint32_t x = 0;
108 const uint32_t spin_count = 10 * i;
109 for (uint32_t spin = 0; spin < spin_count; ++spin) {
110 ++x; // Volatile; hence should not be optimized away.
111 }
112 // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
113 } else if (i <= kYieldMax) {
114 sched_yield();
115 } else {
116 NanoSleep(1000ull * (i - kYieldMax));
117 }
118 }
119
120 // Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
121 // short time interval, on the order of kernel context-switch time, passes.
122 // Return true if the predicate test succeeded, false if we timed out.
123 template<typename Pred>
WaitBrieflyFor(AtomicInteger * testLoc,Thread * self,Pred pred)124 static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
125 // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
126 // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
127 // maximum delay should be significantly less than the expected futex() context switch time, so
128 // there should be little danger of this worsening things appreciably. If the lock was only
129 // held briefly by a running thread, this should help immensely.
130 static constexpr uint32_t kMaxBackOff = 3; // Should probably be <= kSpinMax above.
131 static constexpr uint32_t kMaxIters = 50;
132 JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
133 for (uint32_t i = 1; i <= kMaxIters; ++i) {
134 BackOff(std::min(i, kMaxBackOff));
135 if (pred(testLoc->load(std::memory_order_relaxed))) {
136 return true;
137 }
138 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
139 // This returns true once we've started shutting down. We then try to reach a quiescent
140 // state as soon as possible to avoid touching data that may be deallocated by the shutdown
141 // process. It currently relies on a timeout.
142 return false;
143 }
144 }
145 return false;
146 }
147
148 class ScopedAllMutexesLock final {
149 public:
ScopedAllMutexesLock(const BaseMutex * mutex)150 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
151 for (uint32_t i = 0;
152 !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
153 ++i) {
154 BackOff(i);
155 }
156 }
157
~ScopedAllMutexesLock()158 ~ScopedAllMutexesLock() {
159 DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
160 gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
161 }
162
163 private:
164 const BaseMutex* const mutex_;
165 };
166
167 // Scoped class that generates events at the beginning and end of lock contention.
168 class ScopedContentionRecorder final : public ValueObject {
169 public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)170 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
171 : mutex_(kLogLockContentions ? mutex : nullptr),
172 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
173 owner_tid_(kLogLockContentions ? owner_tid : 0),
174 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
175 if (ATraceEnabled()) {
176 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
177 mutex->GetName(), owner_tid);
178 ATraceBegin(msg.c_str());
179 }
180 }
181
~ScopedContentionRecorder()182 ~ScopedContentionRecorder() {
183 ATraceEnd();
184 if (kLogLockContentions) {
185 uint64_t end_nano_time = NanoTime();
186 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
187 }
188 }
189
190 private:
191 BaseMutex* const mutex_;
192 const uint64_t blocked_tid_;
193 const uint64_t owner_tid_;
194 const uint64_t start_nano_time_;
195 };
196
BaseMutex(const char * name,LockLevel level)197 BaseMutex::BaseMutex(const char* name, LockLevel level)
198 : name_(name),
199 level_(level),
200 should_respond_to_empty_checkpoint_request_(false) {
201 if (kLogLockContentions) {
202 ScopedAllMutexesLock mu(this);
203 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
204 if (*all_mutexes_ptr == nullptr) {
205 // We leak the global set of all mutexes to avoid ordering issues in global variable
206 // construction/destruction.
207 *all_mutexes_ptr = new std::set<BaseMutex*>();
208 }
209 (*all_mutexes_ptr)->insert(this);
210 }
211 }
212
~BaseMutex()213 BaseMutex::~BaseMutex() {
214 if (kLogLockContentions) {
215 ScopedAllMutexesLock mu(this);
216 gAllMutexData->all_mutexes->erase(this);
217 }
218 }
219
DumpAll(std::ostream & os)220 void BaseMutex::DumpAll(std::ostream& os) {
221 if (kLogLockContentions) {
222 os << "Mutex logging:\n";
223 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
224 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
225 if (all_mutexes == nullptr) {
226 // No mutexes have been created yet during at startup.
227 return;
228 }
229 os << "(Contended)\n";
230 for (const BaseMutex* mutex : *all_mutexes) {
231 if (mutex->HasEverContended()) {
232 mutex->Dump(os);
233 os << "\n";
234 }
235 }
236 os << "(Never contented)\n";
237 for (const BaseMutex* mutex : *all_mutexes) {
238 if (!mutex->HasEverContended()) {
239 mutex->Dump(os);
240 os << "\n";
241 }
242 }
243 }
244 }
245
CheckSafeToWait(Thread * self)246 void BaseMutex::CheckSafeToWait(Thread* self) {
247 if (self == nullptr) {
248 CheckUnattachedThread(level_);
249 return;
250 }
251 if (kDebugLocking) {
252 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
253 << "Waiting on unacquired mutex: " << name_;
254 bool bad_mutexes_held = false;
255 std::string error_msg;
256 for (int i = kLockLevelCount - 1; i >= 0; --i) {
257 if (i != level_) {
258 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
259 // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This
260 // just means that gc or some other internal process is suspending the thread while it is
261 // trying to suspend some other thread. So long as the current thread is not being suspended
262 // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear)
263 // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted
264 // code interacts with suspension. One holds the lock to prevent user-code-suspension from
265 // occurring. Since this is only initiated from user-supplied native-code this is safe.
266 if (held_mutex == Locks::user_code_suspension_lock_) {
267 // No thread safety analysis is fine since we have both the user_code_suspension_lock_
268 // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
269 // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
270 auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
271 return self->GetUserCodeSuspendCount() != 0;
272 };
273 if (is_suspending_for_user_code()) {
274 std::ostringstream oss;
275 oss << "Holding \"" << held_mutex->name_ << "\" "
276 << "(level " << LockLevel(i) << ") while performing wait on "
277 << "\"" << name_ << "\" (level " << level_ << ") "
278 << "with SuspendReason::kForUserCode pending suspensions";
279 error_msg = oss.str();
280 LOG(ERROR) << error_msg;
281 bad_mutexes_held = true;
282 }
283 } else if (held_mutex != nullptr) {
284 std::ostringstream oss;
285 oss << "Holding \"" << held_mutex->name_ << "\" "
286 << "(level " << LockLevel(i) << ") while performing wait on "
287 << "\"" << name_ << "\" (level " << level_ << ")";
288 error_msg = oss.str();
289 LOG(ERROR) << error_msg;
290 bad_mutexes_held = true;
291 }
292 }
293 }
294 if (gAborting == 0) { // Avoid recursive aborts.
295 CHECK(!bad_mutexes_held) << error_msg;
296 }
297 }
298 }
299
AddToWaitTime(uint64_t value)300 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
301 if (kLogLockContentions) {
302 // Atomically add value to wait_time.
303 wait_time.fetch_add(value, std::memory_order_seq_cst);
304 }
305 }
306
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)307 void BaseMutex::RecordContention(uint64_t blocked_tid,
308 uint64_t owner_tid,
309 uint64_t nano_time_blocked) {
310 if (kLogLockContentions) {
311 ContentionLogData* data = contention_log_data_;
312 ++(data->contention_count);
313 data->AddToWaitTime(nano_time_blocked);
314 ContentionLogEntry* log = data->contention_log;
315 // This code is intentionally racy as it is only used for diagnostics.
316 int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
317 if (log[slot].blocked_tid == blocked_tid &&
318 log[slot].owner_tid == blocked_tid) {
319 ++log[slot].count;
320 } else {
321 uint32_t new_slot;
322 do {
323 slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
324 new_slot = (slot + 1) % kContentionLogSize;
325 } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
326 log[new_slot].blocked_tid = blocked_tid;
327 log[new_slot].owner_tid = owner_tid;
328 log[new_slot].count.store(1, std::memory_order_relaxed);
329 }
330 }
331 }
332
DumpContention(std::ostream & os) const333 void BaseMutex::DumpContention(std::ostream& os) const {
334 if (kLogLockContentions) {
335 const ContentionLogData* data = contention_log_data_;
336 const ContentionLogEntry* log = data->contention_log;
337 uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
338 uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
339 if (contention_count == 0) {
340 os << "never contended";
341 } else {
342 os << "contended " << contention_count
343 << " total wait of contender " << PrettyDuration(wait_time)
344 << " average " << PrettyDuration(wait_time / contention_count);
345 SafeMap<uint64_t, size_t> most_common_blocker;
346 SafeMap<uint64_t, size_t> most_common_blocked;
347 for (size_t i = 0; i < kContentionLogSize; ++i) {
348 uint64_t blocked_tid = log[i].blocked_tid;
349 uint64_t owner_tid = log[i].owner_tid;
350 uint32_t count = log[i].count.load(std::memory_order_relaxed);
351 if (count > 0) {
352 auto it = most_common_blocked.find(blocked_tid);
353 if (it != most_common_blocked.end()) {
354 most_common_blocked.Overwrite(blocked_tid, it->second + count);
355 } else {
356 most_common_blocked.Put(blocked_tid, count);
357 }
358 it = most_common_blocker.find(owner_tid);
359 if (it != most_common_blocker.end()) {
360 most_common_blocker.Overwrite(owner_tid, it->second + count);
361 } else {
362 most_common_blocker.Put(owner_tid, count);
363 }
364 }
365 }
366 uint64_t max_tid = 0;
367 size_t max_tid_count = 0;
368 for (const auto& pair : most_common_blocked) {
369 if (pair.second > max_tid_count) {
370 max_tid = pair.first;
371 max_tid_count = pair.second;
372 }
373 }
374 if (max_tid != 0) {
375 os << " sample shows most blocked tid=" << max_tid;
376 }
377 max_tid = 0;
378 max_tid_count = 0;
379 for (const auto& pair : most_common_blocker) {
380 if (pair.second > max_tid_count) {
381 max_tid = pair.first;
382 max_tid_count = pair.second;
383 }
384 }
385 if (max_tid != 0) {
386 os << " sample shows tid=" << max_tid << " owning during this time";
387 }
388 }
389 }
390 }
391
392
Mutex(const char * name,LockLevel level,bool recursive)393 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
394 : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) {
395 #if ART_USE_FUTEXES
396 DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed));
397 #else
398 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
399 #endif
400 }
401
402 // Helper to allow checking shutdown while locking for thread safety.
IsSafeToCallAbortSafe()403 static bool IsSafeToCallAbortSafe() {
404 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
405 return Locks::IsSafeToCallAbortRacy();
406 }
407
~Mutex()408 Mutex::~Mutex() {
409 bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
410 #if ART_USE_FUTEXES
411 if (state_and_contenders_.load(std::memory_order_relaxed) != 0) {
412 LOG(safe_to_call_abort ? FATAL : WARNING)
413 << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid();
414 } else {
415 if (GetExclusiveOwnerTid() != 0) {
416 LOG(safe_to_call_abort ? FATAL : WARNING)
417 << "unexpectedly found an owner on unlocked mutex " << name_;
418 }
419 }
420 #else
421 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
422 // may still be using locks.
423 int rc = pthread_mutex_destroy(&mutex_);
424 if (rc != 0) {
425 errno = rc;
426 PLOG(safe_to_call_abort ? FATAL : WARNING)
427 << "pthread_mutex_destroy failed for " << name_;
428 }
429 #endif
430 }
431
ExclusiveLock(Thread * self)432 void Mutex::ExclusiveLock(Thread* self) {
433 DCHECK(self == nullptr || self == Thread::Current());
434 if (kDebugLocking && !recursive_) {
435 AssertNotHeld(self);
436 }
437 if (!recursive_ || !IsExclusiveHeld(self)) {
438 #if ART_USE_FUTEXES
439 bool done = false;
440 do {
441 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
442 if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) {
443 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
444 } else {
445 // Failed to acquire, hang up.
446 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
447 // Empirically, it appears important to spin again each time through the loop; if we
448 // bother to go to sleep and wake up, we should be fairly persistent in trying for the
449 // lock.
450 if (!WaitBrieflyFor(&state_and_contenders_, self,
451 [](int32_t v) { return (v & kHeldMask) == 0; })) {
452 // Increment contender count. We can't create enough threads for this to overflow.
453 increment_contenders();
454 // Make cur_state again reflect the expected value of state_and_contenders.
455 cur_state += kContenderIncrement;
456 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
457 self->CheckEmptyCheckpointFromMutex();
458 }
459
460 uint64_t wait_start_ms = enable_monitor_timeout_ ? MilliTime() : 0;
461 uint64_t try_times = 0;
462 do {
463 timespec timeout_ts;
464 timeout_ts.tv_sec = 0;
465 timeout_ts.tv_nsec = Runtime::Current()->GetMonitorTimeoutNs();
466 if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
467 enable_monitor_timeout_ ? &timeout_ts : nullptr , nullptr, 0) != 0) {
468 // We only went to sleep after incrementing and contenders and checking that the
469 // lock is still held by someone else. EAGAIN and EINTR both indicate a spurious
470 // failure, try again from the beginning. We don't use TEMP_FAILURE_RETRY so we can
471 // intentionally retry to acquire the lock.
472 if ((errno != EAGAIN) && (errno != EINTR)) {
473 if (errno == ETIMEDOUT) {
474 try_times++;
475 if (try_times <= kMonitorTimeoutTryMax) {
476 DumpStack(self, wait_start_ms, try_times);
477 }
478 } else {
479 PLOG(FATAL) << "futex wait failed for " << name_;
480 }
481 }
482 }
483 SleepIfRuntimeDeleted(self);
484 // Retry until not held. In heavy contention situations we otherwise get redundant
485 // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
486 cur_state = state_and_contenders_.load(std::memory_order_relaxed);
487 } while ((cur_state & kHeldMask) != 0);
488 decrement_contenders();
489 }
490 }
491 } while (!done);
492 // Confirm that lock is now held.
493 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
494 #else
495 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
496 #endif
497 DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
498 << " recursive_ = " << recursive_;
499 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
500 RegisterAsLocked(self);
501 }
502 recursion_count_++;
503 if (kDebugLocking) {
504 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
505 << name_ << " " << recursion_count_;
506 AssertHeld(self);
507 }
508 }
509
DumpStack(Thread * self,uint64_t wait_start_ms,uint64_t try_times)510 void Mutex::DumpStack(Thread* self, uint64_t wait_start_ms, uint64_t try_times) {
511 ScopedObjectAccess soa(self);
512 Locks::thread_list_lock_->ExclusiveLock(self);
513 std::string owner_stack_dump;
514 pid_t owner_tid = GetExclusiveOwnerTid();
515 Thread *owner = Runtime::Current()->GetThreadList()->FindThreadByTid(owner_tid);
516 if (owner != nullptr) {
517 if (IsDumpFrequent(owner, try_times)) {
518 Locks::thread_list_lock_->ExclusiveUnlock(self);
519 LOG(WARNING) << "Contention with tid " << owner_tid << ", monitor id " << monitor_id_;
520 return;
521 }
522 struct CollectStackTrace : public Closure {
523 void Run(art::Thread* thread) override
524 REQUIRES_SHARED(art::Locks::mutator_lock_) {
525 if (IsDumpFrequent(thread)) {
526 return;
527 }
528 DumpStackLastTimeTLSData* tls_data =
529 reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
530 if (tls_data == nullptr) {
531 thread->SetCustomTLS(kLastDumpStackTime, new DumpStackLastTimeTLSData(MilliTime()));
532 } else {
533 tls_data->last_dump_time_ms_.store(MilliTime());
534 }
535 thread->DumpJavaStack(oss);
536 }
537 std::ostringstream oss;
538 };
539 CollectStackTrace owner_trace;
540 owner->RequestSynchronousCheckpoint(&owner_trace);
541 owner_stack_dump = owner_trace.oss.str();
542 uint64_t wait_ms = MilliTime() - wait_start_ms;
543 LOG(WARNING) << "Monitor contention with tid " << owner_tid << ", wait time: " << wait_ms
544 << "ms, monitor id: " << monitor_id_
545 << "\nPerfMonitor owner thread(" << owner_tid << ") stack is:\n"
546 << owner_stack_dump;
547 } else {
548 Locks::thread_list_lock_->ExclusiveUnlock(self);
549 }
550 }
551
IsDumpFrequent(Thread * thread,uint64_t try_times)552 bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
553 uint64_t last_dump_time_ms = 0;
554 DumpStackLastTimeTLSData* tls_data =
555 reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
556 if (tls_data != nullptr) {
557 last_dump_time_ms = tls_data->last_dump_time_ms_.load();
558 }
559 uint64_t interval = MilliTime() - last_dump_time_ms;
560 if (interval < kIntervalMillis * try_times) {
561 return true;
562 } else {
563 return false;
564 }
565 }
566
ExclusiveTryLock(Thread * self)567 bool Mutex::ExclusiveTryLock(Thread* self) {
568 DCHECK(self == nullptr || self == Thread::Current());
569 if (kDebugLocking && !recursive_) {
570 AssertNotHeld(self);
571 }
572 if (!recursive_ || !IsExclusiveHeld(self)) {
573 #if ART_USE_FUTEXES
574 bool done = false;
575 do {
576 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
577 if ((cur_state & kHeldMask) == 0) {
578 // Change state to held and impose load/store ordering appropriate for lock acquisition.
579 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
580 } else {
581 return false;
582 }
583 } while (!done);
584 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
585 #else
586 int result = pthread_mutex_trylock(&mutex_);
587 if (result == EBUSY) {
588 return false;
589 }
590 if (result != 0) {
591 errno = result;
592 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
593 }
594 #endif
595 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
596 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
597 RegisterAsLocked(self);
598 }
599 recursion_count_++;
600 if (kDebugLocking) {
601 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
602 << name_ << " " << recursion_count_;
603 AssertHeld(self);
604 }
605 return true;
606 }
607
ExclusiveTryLockWithSpinning(Thread * self)608 bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
609 // Spin a small number of times, since this affects our ability to respond to suspension
610 // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
611 // in rapid succession, and then we will typically not spin for the maximal period.
612 const int kMaxSpins = 5;
613 for (int i = 0; i < kMaxSpins; ++i) {
614 if (ExclusiveTryLock(self)) {
615 return true;
616 }
617 #if ART_USE_FUTEXES
618 if (!WaitBrieflyFor(&state_and_contenders_, self,
619 [](int32_t v) { return (v & kHeldMask) == 0; })) {
620 return false;
621 }
622 #endif
623 }
624 return ExclusiveTryLock(self);
625 }
626
627 #if ART_USE_FUTEXES
ExclusiveLockUncontendedFor(Thread * new_owner)628 void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
629 DCHECK_EQ(level_, kMonitorLock);
630 DCHECK(!recursive_);
631 state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
632 recursion_count_ = 1;
633 exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
634 // Don't call RegisterAsLocked(). It wouldn't register anything anyway. And
635 // this happens as we're inflating a monitor, which doesn't logically affect
636 // held "locks"; it effectively just converts a thin lock to a mutex. By doing
637 // this while the lock is already held, we're delaying the acquisition of a
638 // logically held mutex, which can introduce bogus lock order violations.
639 }
640
ExclusiveUnlockUncontended()641 void Mutex::ExclusiveUnlockUncontended() {
642 DCHECK_EQ(level_, kMonitorLock);
643 state_and_contenders_.store(0, std::memory_order_relaxed);
644 recursion_count_ = 0;
645 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
646 // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
647 }
648 #endif // ART_USE_FUTEXES
649
ExclusiveUnlock(Thread * self)650 void Mutex::ExclusiveUnlock(Thread* self) {
651 if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
652 std::string name1 = "<null>";
653 std::string name2 = "<null>";
654 if (self != nullptr) {
655 self->GetThreadName(name1);
656 }
657 if (Thread::Current() != nullptr) {
658 Thread::Current()->GetThreadName(name2);
659 }
660 LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
661 << " Thread::Current()=" << name2;
662 }
663 AssertHeld(self);
664 DCHECK_NE(GetExclusiveOwnerTid(), 0);
665 recursion_count_--;
666 if (!recursive_ || recursion_count_ == 0) {
667 if (kDebugLocking) {
668 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
669 << name_ << " " << recursion_count_;
670 }
671 RegisterAsUnlocked(self);
672 #if ART_USE_FUTEXES
673 bool done = false;
674 do {
675 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
676 if (LIKELY((cur_state & kHeldMask) != 0)) {
677 // We're no longer the owner.
678 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
679 // Change state to not held and impose load/store ordering appropriate for lock release.
680 uint32_t new_state = cur_state & ~kHeldMask; // Same number of contenders.
681 done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state);
682 if (LIKELY(done)) { // Spurious fail or waiters changed ?
683 if (UNLIKELY(new_state != 0) /* have contenders */) {
684 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne,
685 nullptr, nullptr, 0);
686 }
687 // We only do a futex wait after incrementing contenders and verifying the lock was
688 // still held. If we didn't see waiters, then there couldn't have been any futexes
689 // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us,
690 // since the futex wait call would see the lock available and immediately return.
691 }
692 } else {
693 // Logging acquires the logging lock, avoid infinite recursion in that case.
694 if (this != Locks::logging_lock_) {
695 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
696 } else {
697 LogHelper::LogLineLowStack(__FILE__,
698 __LINE__,
699 ::android::base::FATAL_WITHOUT_ABORT,
700 StringPrintf("Unexpected state_ %d in unlock for %s",
701 cur_state, name_).c_str());
702 _exit(1);
703 }
704 }
705 } while (!done);
706 #else
707 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
708 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
709 #endif
710 }
711 }
712
Dump(std::ostream & os) const713 void Mutex::Dump(std::ostream& os) const {
714 os << (recursive_ ? "recursive " : "non-recursive ")
715 << name_
716 << " level=" << static_cast<int>(level_)
717 << " rec=" << recursion_count_
718 << " owner=" << GetExclusiveOwnerTid() << " ";
719 DumpContention(os);
720 }
721
operator <<(std::ostream & os,const Mutex & mu)722 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
723 mu.Dump(os);
724 return os;
725 }
726
WakeupToRespondToEmptyCheckpoint()727 void Mutex::WakeupToRespondToEmptyCheckpoint() {
728 #if ART_USE_FUTEXES
729 // Wake up all the waiters so they will respond to the emtpy checkpoint.
730 DCHECK(should_respond_to_empty_checkpoint_request_);
731 if (UNLIKELY(get_contenders() != 0)) {
732 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
733 }
734 #else
735 LOG(FATAL) << "Non futex case isn't supported.";
736 #endif
737 }
738
ReaderWriterMutex(const char * name,LockLevel level)739 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
740 : BaseMutex(name, level)
741 #if ART_USE_FUTEXES
742 , state_(0), exclusive_owner_(0), num_contenders_(0)
743 #endif
744 {
745 #if !ART_USE_FUTEXES
746 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
747 #endif
748 }
749
~ReaderWriterMutex()750 ReaderWriterMutex::~ReaderWriterMutex() {
751 #if ART_USE_FUTEXES
752 CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
753 CHECK_EQ(GetExclusiveOwnerTid(), 0);
754 CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
755 #else
756 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
757 // may still be using locks.
758 int rc = pthread_rwlock_destroy(&rwlock_);
759 if (rc != 0) {
760 errno = rc;
761 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
762 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_;
763 }
764 #endif
765 }
766
ExclusiveLock(Thread * self)767 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
768 DCHECK(self == nullptr || self == Thread::Current());
769 AssertNotExclusiveHeld(self);
770 #if ART_USE_FUTEXES
771 bool done = false;
772 do {
773 int32_t cur_state = state_.load(std::memory_order_relaxed);
774 if (LIKELY(cur_state == 0)) {
775 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
776 done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
777 } else {
778 // Failed to acquire, hang up.
779 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
780 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
781 num_contenders_.fetch_add(1);
782 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
783 self->CheckEmptyCheckpointFromMutex();
784 }
785 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
786 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
787 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
788 if ((errno != EAGAIN) && (errno != EINTR)) {
789 PLOG(FATAL) << "futex wait failed for " << name_;
790 }
791 }
792 SleepIfRuntimeDeleted(self);
793 num_contenders_.fetch_sub(1);
794 }
795 }
796 } while (!done);
797 DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
798 #else
799 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
800 #endif
801 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
802 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
803 RegisterAsLocked(self);
804 AssertExclusiveHeld(self);
805 }
806
ExclusiveUnlock(Thread * self)807 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
808 DCHECK(self == nullptr || self == Thread::Current());
809 AssertExclusiveHeld(self);
810 RegisterAsUnlocked(self);
811 DCHECK_NE(GetExclusiveOwnerTid(), 0);
812 #if ART_USE_FUTEXES
813 bool done = false;
814 do {
815 int32_t cur_state = state_.load(std::memory_order_relaxed);
816 if (LIKELY(cur_state == -1)) {
817 // We're no longer the owner.
818 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
819 // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
820 // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
821 done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
822 if (LIKELY(done)) { // Weak CAS may fail spuriously.
823 // Wake any waiters.
824 if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
825 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
826 }
827 }
828 } else {
829 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
830 }
831 } while (!done);
832 #else
833 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
834 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
835 #endif
836 }
837
838 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)839 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
840 DCHECK(self == nullptr || self == Thread::Current());
841 #if ART_USE_FUTEXES
842 bool done = false;
843 timespec end_abs_ts;
844 InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
845 do {
846 int32_t cur_state = state_.load(std::memory_order_relaxed);
847 if (cur_state == 0) {
848 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
849 done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
850 } else {
851 // Failed to acquire, hang up.
852 timespec now_abs_ts;
853 InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
854 timespec rel_ts;
855 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
856 return false; // Timed out.
857 }
858 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
859 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
860 num_contenders_.fetch_add(1);
861 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
862 self->CheckEmptyCheckpointFromMutex();
863 }
864 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
865 if (errno == ETIMEDOUT) {
866 num_contenders_.fetch_sub(1);
867 return false; // Timed out.
868 } else if ((errno != EAGAIN) && (errno != EINTR)) {
869 // EAGAIN and EINTR both indicate a spurious failure,
870 // recompute the relative time out from now and try again.
871 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
872 PLOG(FATAL) << "timed futex wait failed for " << name_;
873 }
874 }
875 SleepIfRuntimeDeleted(self);
876 num_contenders_.fetch_sub(1);
877 }
878 }
879 } while (!done);
880 #else
881 timespec ts;
882 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
883 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
884 if (result == ETIMEDOUT) {
885 return false;
886 }
887 if (result != 0) {
888 errno = result;
889 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
890 }
891 #endif
892 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
893 RegisterAsLocked(self);
894 AssertSharedHeld(self);
895 return true;
896 }
897 #endif
898
899 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)900 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
901 // Owner holds it exclusively, hang up.
902 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
903 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
904 num_contenders_.fetch_add(1);
905 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
906 self->CheckEmptyCheckpointFromMutex();
907 }
908 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
909 if (errno != EAGAIN && errno != EINTR) {
910 PLOG(FATAL) << "futex wait failed for " << name_;
911 }
912 }
913 SleepIfRuntimeDeleted(self);
914 num_contenders_.fetch_sub(1);
915 }
916 }
917 #endif
918
SharedTryLock(Thread * self)919 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
920 DCHECK(self == nullptr || self == Thread::Current());
921 #if ART_USE_FUTEXES
922 bool done = false;
923 do {
924 int32_t cur_state = state_.load(std::memory_order_relaxed);
925 if (cur_state >= 0) {
926 // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
927 done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
928 } else {
929 // Owner holds it exclusively.
930 return false;
931 }
932 } while (!done);
933 #else
934 int result = pthread_rwlock_tryrdlock(&rwlock_);
935 if (result == EBUSY) {
936 return false;
937 }
938 if (result != 0) {
939 errno = result;
940 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
941 }
942 #endif
943 RegisterAsLocked(self);
944 AssertSharedHeld(self);
945 return true;
946 }
947
IsSharedHeld(const Thread * self) const948 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
949 DCHECK(self == nullptr || self == Thread::Current());
950 bool result;
951 if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
952 result = IsExclusiveHeld(self); // TODO: a better best effort here.
953 } else {
954 result = (self->GetHeldMutex(level_) == this);
955 }
956 return result;
957 }
958
Dump(std::ostream & os) const959 void ReaderWriterMutex::Dump(std::ostream& os) const {
960 os << name_
961 << " level=" << static_cast<int>(level_)
962 << " owner=" << GetExclusiveOwnerTid()
963 #if ART_USE_FUTEXES
964 << " state=" << state_.load(std::memory_order_seq_cst)
965 << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
966 #endif
967 << " ";
968 DumpContention(os);
969 }
970
operator <<(std::ostream & os,const ReaderWriterMutex & mu)971 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
972 mu.Dump(os);
973 return os;
974 }
975
operator <<(std::ostream & os,const MutatorMutex & mu)976 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
977 mu.Dump(os);
978 return os;
979 }
980
WakeupToRespondToEmptyCheckpoint()981 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
982 #if ART_USE_FUTEXES
983 // Wake up all the waiters so they will respond to the emtpy checkpoint.
984 DCHECK(should_respond_to_empty_checkpoint_request_);
985 if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
986 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
987 }
988 #else
989 LOG(FATAL) << "Non futex case isn't supported.";
990 #endif
991 }
992
ConditionVariable(const char * name,Mutex & guard)993 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
994 : name_(name), guard_(guard) {
995 #if ART_USE_FUTEXES
996 DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
997 num_waiters_ = 0;
998 #else
999 pthread_condattr_t cond_attrs;
1000 CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
1001 #if !defined(__APPLE__)
1002 // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
1003 CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
1004 #endif
1005 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
1006 #endif
1007 }
1008
~ConditionVariable()1009 ConditionVariable::~ConditionVariable() {
1010 #if ART_USE_FUTEXES
1011 if (num_waiters_!= 0) {
1012 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1013 LOG(is_safe_to_call_abort ? FATAL : WARNING)
1014 << "ConditionVariable::~ConditionVariable for " << name_
1015 << " called with " << num_waiters_ << " waiters.";
1016 }
1017 #else
1018 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
1019 // may still be using condition variables.
1020 int rc = pthread_cond_destroy(&cond_);
1021 if (rc != 0) {
1022 errno = rc;
1023 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1024 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_;
1025 }
1026 #endif
1027 }
1028
Broadcast(Thread * self)1029 void ConditionVariable::Broadcast(Thread* self) {
1030 DCHECK(self == nullptr || self == Thread::Current());
1031 // TODO: enable below, there's a race in thread creation that causes false failures currently.
1032 // guard_.AssertExclusiveHeld(self);
1033 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
1034 #if ART_USE_FUTEXES
1035 RequeueWaiters(std::numeric_limits<int32_t>::max());
1036 #else
1037 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
1038 #endif
1039 }
1040
1041 #if ART_USE_FUTEXES
RequeueWaiters(int32_t count)1042 void ConditionVariable::RequeueWaiters(int32_t count) {
1043 if (num_waiters_ > 0) {
1044 sequence_++; // Indicate a signal occurred.
1045 // Move waiters from the condition variable's futex to the guard's futex,
1046 // so that they will be woken up when the mutex is released.
1047 bool done = futex(sequence_.Address(),
1048 FUTEX_REQUEUE_PRIVATE,
1049 /* Threads to wake */ 0,
1050 /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
1051 guard_.state_and_contenders_.Address(),
1052 0) != -1;
1053 if (!done && errno != EAGAIN && errno != EINTR) {
1054 PLOG(FATAL) << "futex requeue failed for " << name_;
1055 }
1056 }
1057 }
1058 #endif
1059
1060
Signal(Thread * self)1061 void ConditionVariable::Signal(Thread* self) {
1062 DCHECK(self == nullptr || self == Thread::Current());
1063 guard_.AssertExclusiveHeld(self);
1064 #if ART_USE_FUTEXES
1065 RequeueWaiters(1);
1066 #else
1067 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
1068 #endif
1069 }
1070
Wait(Thread * self)1071 void ConditionVariable::Wait(Thread* self) {
1072 guard_.CheckSafeToWait(self);
1073 WaitHoldingLocks(self);
1074 }
1075
WaitHoldingLocks(Thread * self)1076 void ConditionVariable::WaitHoldingLocks(Thread* self) {
1077 DCHECK(self == nullptr || self == Thread::Current());
1078 guard_.AssertExclusiveHeld(self);
1079 unsigned int old_recursion_count = guard_.recursion_count_;
1080 #if ART_USE_FUTEXES
1081 num_waiters_++;
1082 // Ensure the Mutex is contended so that requeued threads are awoken.
1083 guard_.increment_contenders();
1084 guard_.recursion_count_ = 1;
1085 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1086 guard_.ExclusiveUnlock(self);
1087 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
1088 // Futex failed, check it is an expected error.
1089 // EAGAIN == EWOULDBLK, so we let the caller try again.
1090 // EINTR implies a signal was sent to this thread.
1091 if ((errno != EINTR) && (errno != EAGAIN)) {
1092 PLOG(FATAL) << "futex wait failed for " << name_;
1093 }
1094 }
1095 SleepIfRuntimeDeleted(self);
1096 guard_.ExclusiveLock(self);
1097 CHECK_GT(num_waiters_, 0);
1098 num_waiters_--;
1099 // We awoke and so no longer require awakes from the guard_'s unlock.
1100 CHECK_GT(guard_.get_contenders(), 0);
1101 guard_.decrement_contenders();
1102 #else
1103 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1104 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1105 guard_.recursion_count_ = 0;
1106 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
1107 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1108 #endif
1109 guard_.recursion_count_ = old_recursion_count;
1110 }
1111
TimedWait(Thread * self,int64_t ms,int32_t ns)1112 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
1113 DCHECK(self == nullptr || self == Thread::Current());
1114 bool timed_out = false;
1115 guard_.AssertExclusiveHeld(self);
1116 guard_.CheckSafeToWait(self);
1117 unsigned int old_recursion_count = guard_.recursion_count_;
1118 #if ART_USE_FUTEXES
1119 timespec rel_ts;
1120 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
1121 num_waiters_++;
1122 // Ensure the Mutex is contended so that requeued threads are awoken.
1123 guard_.increment_contenders();
1124 guard_.recursion_count_ = 1;
1125 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1126 guard_.ExclusiveUnlock(self);
1127 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
1128 if (errno == ETIMEDOUT) {
1129 // Timed out we're done.
1130 timed_out = true;
1131 } else if ((errno == EAGAIN) || (errno == EINTR)) {
1132 // A signal or ConditionVariable::Signal/Broadcast has come in.
1133 } else {
1134 PLOG(FATAL) << "timed futex wait failed for " << name_;
1135 }
1136 }
1137 SleepIfRuntimeDeleted(self);
1138 guard_.ExclusiveLock(self);
1139 CHECK_GT(num_waiters_, 0);
1140 num_waiters_--;
1141 // We awoke and so no longer require awakes from the guard_'s unlock.
1142 CHECK_GT(guard_.get_contenders(), 0);
1143 guard_.decrement_contenders();
1144 #else
1145 #if !defined(__APPLE__)
1146 int clock = CLOCK_MONOTONIC;
1147 #else
1148 int clock = CLOCK_REALTIME;
1149 #endif
1150 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1151 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1152 guard_.recursion_count_ = 0;
1153 timespec ts;
1154 InitTimeSpec(true, clock, ms, ns, &ts);
1155 int rc;
1156 while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
1157 continue;
1158 }
1159
1160 if (rc == ETIMEDOUT) {
1161 timed_out = true;
1162 } else if (rc != 0) {
1163 errno = rc;
1164 PLOG(FATAL) << "TimedWait failed for " << name_;
1165 }
1166 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1167 #endif
1168 guard_.recursion_count_ = old_recursion_count;
1169 return timed_out;
1170 }
1171
1172 } // namespace art
1173