• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mutex.h"
18 
19 #include <errno.h>
20 #include <sys/time.h>
21 
22 #include "atomic.h"
23 #include "base/logging.h"
24 #include "base/time_utils.h"
25 #include "base/systrace.h"
26 #include "base/value_object.h"
27 #include "mutex-inl.h"
28 #include "runtime.h"
29 #include "scoped_thread_state_change.h"
30 #include "thread-inl.h"
31 
32 namespace art {
33 
34 Mutex* Locks::abort_lock_ = nullptr;
35 Mutex* Locks::alloc_tracker_lock_ = nullptr;
36 Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
37 Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
38 ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
39 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
40 Mutex* Locks::deoptimization_lock_ = nullptr;
41 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
42 Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
43 Mutex* Locks::intern_table_lock_ = nullptr;
44 Mutex* Locks::interpreter_string_init_map_lock_ = nullptr;
45 Mutex* Locks::jni_libraries_lock_ = nullptr;
46 Mutex* Locks::logging_lock_ = nullptr;
47 Mutex* Locks::mem_maps_lock_ = nullptr;
48 Mutex* Locks::modify_ldt_lock_ = nullptr;
49 MutatorMutex* Locks::mutator_lock_ = nullptr;
50 Mutex* Locks::profiler_lock_ = nullptr;
51 ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
52 Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
53 Mutex* Locks::reference_processor_lock_ = nullptr;
54 Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
55 Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
56 Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
57 Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
58 Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
59 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
60 Mutex* Locks::thread_list_lock_ = nullptr;
61 ConditionVariable* Locks::thread_exit_cond_ = nullptr;
62 Mutex* Locks::thread_suspend_count_lock_ = nullptr;
63 Mutex* Locks::trace_lock_ = nullptr;
64 Mutex* Locks::unexpected_signal_lock_ = nullptr;
65 Mutex* Locks::lambda_table_lock_ = nullptr;
66 Uninterruptible Roles::uninterruptible_;
67 
68 struct AllMutexData {
69   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
70   Atomic<const BaseMutex*> all_mutexes_guard;
71   // All created mutexes guarded by all_mutexes_guard_.
72   std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData73   AllMutexData() : all_mutexes(nullptr) {}
74 };
75 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
76 
77 #if ART_USE_FUTEXES
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)78 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
79   const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
80   result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
81   result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
82   if (result_ts->tv_nsec < 0) {
83     result_ts->tv_sec--;
84     result_ts->tv_nsec += one_sec;
85   } else if (result_ts->tv_nsec > one_sec) {
86     result_ts->tv_sec++;
87     result_ts->tv_nsec -= one_sec;
88   }
89   return result_ts->tv_sec < 0;
90 }
91 #endif
92 
93 class ScopedAllMutexesLock FINAL {
94  public:
ScopedAllMutexesLock(const BaseMutex * mutex)95   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
96     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
97       NanoSleep(100);
98     }
99   }
100 
~ScopedAllMutexesLock()101   ~ScopedAllMutexesLock() {
102 #if !defined(__clang__)
103     // TODO: remove this workaround target GCC/libc++/bionic bug "invalid failure memory model".
104     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakSequentiallyConsistent(mutex_, 0)) {
105 #else
106     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
107 #endif
108       NanoSleep(100);
109     }
110   }
111 
112  private:
113   const BaseMutex* const mutex_;
114 };
115 
116 // Scoped class that generates events at the beginning and end of lock contention.
117 class ScopedContentionRecorder FINAL : public ValueObject {
118  public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)119   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
120       : mutex_(kLogLockContentions ? mutex : nullptr),
121         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
122         owner_tid_(kLogLockContentions ? owner_tid : 0),
123         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
124     if (ATRACE_ENABLED()) {
125       std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
126                                      mutex->GetName(), owner_tid);
127       ATRACE_BEGIN(msg.c_str());
128     }
129   }
130 
~ScopedContentionRecorder()131   ~ScopedContentionRecorder() {
132     ATRACE_END();
133     if (kLogLockContentions) {
134       uint64_t end_nano_time = NanoTime();
135       mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
136     }
137   }
138 
139  private:
140   BaseMutex* const mutex_;
141   const uint64_t blocked_tid_;
142   const uint64_t owner_tid_;
143   const uint64_t start_nano_time_;
144 };
145 
BaseMutex(const char * name,LockLevel level)146 BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
147   if (kLogLockContentions) {
148     ScopedAllMutexesLock mu(this);
149     std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
150     if (*all_mutexes_ptr == nullptr) {
151       // We leak the global set of all mutexes to avoid ordering issues in global variable
152       // construction/destruction.
153       *all_mutexes_ptr = new std::set<BaseMutex*>();
154     }
155     (*all_mutexes_ptr)->insert(this);
156   }
157 }
158 
~BaseMutex()159 BaseMutex::~BaseMutex() {
160   if (kLogLockContentions) {
161     ScopedAllMutexesLock mu(this);
162     gAllMutexData->all_mutexes->erase(this);
163   }
164 }
165 
DumpAll(std::ostream & os)166 void BaseMutex::DumpAll(std::ostream& os) {
167   if (kLogLockContentions) {
168     os << "Mutex logging:\n";
169     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
170     std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
171     if (all_mutexes == nullptr) {
172       // No mutexes have been created yet during at startup.
173       return;
174     }
175     typedef std::set<BaseMutex*>::const_iterator It;
176     os << "(Contended)\n";
177     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
178       BaseMutex* mutex = *it;
179       if (mutex->HasEverContended()) {
180         mutex->Dump(os);
181         os << "\n";
182       }
183     }
184     os << "(Never contented)\n";
185     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
186       BaseMutex* mutex = *it;
187       if (!mutex->HasEverContended()) {
188         mutex->Dump(os);
189         os << "\n";
190       }
191     }
192   }
193 }
194 
CheckSafeToWait(Thread * self)195 void BaseMutex::CheckSafeToWait(Thread* self) {
196   if (self == nullptr) {
197     CheckUnattachedThread(level_);
198     return;
199   }
200   if (kDebugLocking) {
201     CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
202         << "Waiting on unacquired mutex: " << name_;
203     bool bad_mutexes_held = false;
204     for (int i = kLockLevelCount - 1; i >= 0; --i) {
205       if (i != level_) {
206         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
207         // We expect waits to happen while holding the thread list suspend thread lock.
208         if (held_mutex != nullptr) {
209           LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
210                      << "(level " << LockLevel(i) << ") while performing wait on "
211                      << "\"" << name_ << "\" (level " << level_ << ")";
212           bad_mutexes_held = true;
213         }
214       }
215     }
216     if (gAborting == 0) {  // Avoid recursive aborts.
217       CHECK(!bad_mutexes_held);
218     }
219   }
220 }
221 
AddToWaitTime(uint64_t value)222 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
223   if (kLogLockContentions) {
224     // Atomically add value to wait_time.
225     wait_time.FetchAndAddSequentiallyConsistent(value);
226   }
227 }
228 
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)229 void BaseMutex::RecordContention(uint64_t blocked_tid,
230                                  uint64_t owner_tid,
231                                  uint64_t nano_time_blocked) {
232   if (kLogLockContentions) {
233     ContentionLogData* data = contention_log_data_;
234     ++(data->contention_count);
235     data->AddToWaitTime(nano_time_blocked);
236     ContentionLogEntry* log = data->contention_log;
237     // This code is intentionally racy as it is only used for diagnostics.
238     uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
239     if (log[slot].blocked_tid == blocked_tid &&
240         log[slot].owner_tid == blocked_tid) {
241       ++log[slot].count;
242     } else {
243       uint32_t new_slot;
244       do {
245         slot = data->cur_content_log_entry.LoadRelaxed();
246         new_slot = (slot + 1) % kContentionLogSize;
247       } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
248       log[new_slot].blocked_tid = blocked_tid;
249       log[new_slot].owner_tid = owner_tid;
250       log[new_slot].count.StoreRelaxed(1);
251     }
252   }
253 }
254 
DumpContention(std::ostream & os) const255 void BaseMutex::DumpContention(std::ostream& os) const {
256   if (kLogLockContentions) {
257     const ContentionLogData* data = contention_log_data_;
258     const ContentionLogEntry* log = data->contention_log;
259     uint64_t wait_time = data->wait_time.LoadRelaxed();
260     uint32_t contention_count = data->contention_count.LoadRelaxed();
261     if (contention_count == 0) {
262       os << "never contended";
263     } else {
264       os << "contended " << contention_count
265          << " total wait of contender " << PrettyDuration(wait_time)
266          << " average " << PrettyDuration(wait_time / contention_count);
267       SafeMap<uint64_t, size_t> most_common_blocker;
268       SafeMap<uint64_t, size_t> most_common_blocked;
269       for (size_t i = 0; i < kContentionLogSize; ++i) {
270         uint64_t blocked_tid = log[i].blocked_tid;
271         uint64_t owner_tid = log[i].owner_tid;
272         uint32_t count = log[i].count.LoadRelaxed();
273         if (count > 0) {
274           auto it = most_common_blocked.find(blocked_tid);
275           if (it != most_common_blocked.end()) {
276             most_common_blocked.Overwrite(blocked_tid, it->second + count);
277           } else {
278             most_common_blocked.Put(blocked_tid, count);
279           }
280           it = most_common_blocker.find(owner_tid);
281           if (it != most_common_blocker.end()) {
282             most_common_blocker.Overwrite(owner_tid, it->second + count);
283           } else {
284             most_common_blocker.Put(owner_tid, count);
285           }
286         }
287       }
288       uint64_t max_tid = 0;
289       size_t max_tid_count = 0;
290       for (const auto& pair : most_common_blocked) {
291         if (pair.second > max_tid_count) {
292           max_tid = pair.first;
293           max_tid_count = pair.second;
294         }
295       }
296       if (max_tid != 0) {
297         os << " sample shows most blocked tid=" << max_tid;
298       }
299       max_tid = 0;
300       max_tid_count = 0;
301       for (const auto& pair : most_common_blocker) {
302         if (pair.second > max_tid_count) {
303           max_tid = pair.first;
304           max_tid_count = pair.second;
305         }
306       }
307       if (max_tid != 0) {
308         os << " sample shows tid=" << max_tid << " owning during this time";
309       }
310     }
311   }
312 }
313 
314 
Mutex(const char * name,LockLevel level,bool recursive)315 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
316     : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
317 #if ART_USE_FUTEXES
318   DCHECK_EQ(0, state_.LoadRelaxed());
319   DCHECK_EQ(0, num_contenders_.LoadRelaxed());
320 #else
321   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
322 #endif
323   exclusive_owner_ = 0;
324 }
325 
326 // Helper to ignore the lock requirement.
IsShuttingDown()327 static bool IsShuttingDown() NO_THREAD_SAFETY_ANALYSIS {
328   Runtime* runtime = Runtime::Current();
329   return runtime == nullptr || runtime->IsShuttingDownLocked();
330 }
331 
~Mutex()332 Mutex::~Mutex() {
333   bool shutting_down = IsShuttingDown();
334 #if ART_USE_FUTEXES
335   if (state_.LoadRelaxed() != 0) {
336     LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
337   } else {
338     if (exclusive_owner_ != 0) {
339       LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found an owner on unlocked mutex "
340                                            << name_;
341     }
342     if (num_contenders_.LoadSequentiallyConsistent() != 0) {
343       LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found a contender on mutex " << name_;
344     }
345   }
346 #else
347   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
348   // may still be using locks.
349   int rc = pthread_mutex_destroy(&mutex_);
350   if (rc != 0) {
351     errno = rc;
352     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
353     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
354     PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
355   }
356 #endif
357 }
358 
ExclusiveLock(Thread * self)359 void Mutex::ExclusiveLock(Thread* self) {
360   DCHECK(self == nullptr || self == Thread::Current());
361   if (kDebugLocking && !recursive_) {
362     AssertNotHeld(self);
363   }
364   if (!recursive_ || !IsExclusiveHeld(self)) {
365 #if ART_USE_FUTEXES
366     bool done = false;
367     do {
368       int32_t cur_state = state_.LoadRelaxed();
369       if (LIKELY(cur_state == 0)) {
370         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
371         done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
372       } else {
373         // Failed to acquire, hang up.
374         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
375         num_contenders_++;
376         if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
377           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
378           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
379           if ((errno != EAGAIN) && (errno != EINTR)) {
380             PLOG(FATAL) << "futex wait failed for " << name_;
381           }
382         }
383         num_contenders_--;
384       }
385     } while (!done);
386     DCHECK_EQ(state_.LoadRelaxed(), 1);
387 #else
388     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
389 #endif
390     DCHECK_EQ(exclusive_owner_, 0U);
391     exclusive_owner_ = SafeGetTid(self);
392     RegisterAsLocked(self);
393   }
394   recursion_count_++;
395   if (kDebugLocking) {
396     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
397         << name_ << " " << recursion_count_;
398     AssertHeld(self);
399   }
400 }
401 
ExclusiveTryLock(Thread * self)402 bool Mutex::ExclusiveTryLock(Thread* self) {
403   DCHECK(self == nullptr || self == Thread::Current());
404   if (kDebugLocking && !recursive_) {
405     AssertNotHeld(self);
406   }
407   if (!recursive_ || !IsExclusiveHeld(self)) {
408 #if ART_USE_FUTEXES
409     bool done = false;
410     do {
411       int32_t cur_state = state_.LoadRelaxed();
412       if (cur_state == 0) {
413         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
414         done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
415       } else {
416         return false;
417       }
418     } while (!done);
419     DCHECK_EQ(state_.LoadRelaxed(), 1);
420 #else
421     int result = pthread_mutex_trylock(&mutex_);
422     if (result == EBUSY) {
423       return false;
424     }
425     if (result != 0) {
426       errno = result;
427       PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
428     }
429 #endif
430     DCHECK_EQ(exclusive_owner_, 0U);
431     exclusive_owner_ = SafeGetTid(self);
432     RegisterAsLocked(self);
433   }
434   recursion_count_++;
435   if (kDebugLocking) {
436     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
437         << name_ << " " << recursion_count_;
438     AssertHeld(self);
439   }
440   return true;
441 }
442 
ExclusiveUnlock(Thread * self)443 void Mutex::ExclusiveUnlock(Thread* self) {
444   if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
445     std::string name1 = "<null>";
446     std::string name2 = "<null>";
447     if (self != nullptr) {
448       self->GetThreadName(name1);
449     }
450     if (Thread::Current() != nullptr) {
451       Thread::Current()->GetThreadName(name2);
452     }
453     LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
454                << " Thread::Current()=" << name2;
455   }
456   AssertHeld(self);
457   DCHECK_NE(exclusive_owner_, 0U);
458   recursion_count_--;
459   if (!recursive_ || recursion_count_ == 0) {
460     if (kDebugLocking) {
461       CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
462           << name_ << " " << recursion_count_;
463     }
464     RegisterAsUnlocked(self);
465 #if ART_USE_FUTEXES
466     bool done = false;
467     do {
468       int32_t cur_state = state_.LoadRelaxed();
469       if (LIKELY(cur_state == 1)) {
470         // We're no longer the owner.
471         exclusive_owner_ = 0;
472         // Change state to 0 and impose load/store ordering appropriate for lock release.
473         // Note, the relaxed loads below musn't reorder before the CompareExchange.
474         // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
475         // a status bit into the state on contention.
476         done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
477         if (LIKELY(done)) {  // Spurious fail?
478           // Wake a contender.
479           if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
480             futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
481           }
482         }
483       } else {
484         // Logging acquires the logging lock, avoid infinite recursion in that case.
485         if (this != Locks::logging_lock_) {
486           LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
487         } else {
488           LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
489                               StringPrintf("Unexpected state_ %d in unlock for %s",
490                                            cur_state, name_).c_str());
491           _exit(1);
492         }
493       }
494     } while (!done);
495 #else
496     exclusive_owner_ = 0;
497     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
498 #endif
499   }
500 }
501 
Dump(std::ostream & os) const502 void Mutex::Dump(std::ostream& os) const {
503   os << (recursive_ ? "recursive " : "non-recursive ")
504       << name_
505       << " level=" << static_cast<int>(level_)
506       << " rec=" << recursion_count_
507       << " owner=" << GetExclusiveOwnerTid() << " ";
508   DumpContention(os);
509 }
510 
operator <<(std::ostream & os,const Mutex & mu)511 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
512   mu.Dump(os);
513   return os;
514 }
515 
ReaderWriterMutex(const char * name,LockLevel level)516 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
517     : BaseMutex(name, level)
518 #if ART_USE_FUTEXES
519     , state_(0), num_pending_readers_(0), num_pending_writers_(0)
520 #endif
521 {  // NOLINT(whitespace/braces)
522 #if !ART_USE_FUTEXES
523   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
524 #endif
525   exclusive_owner_ = 0;
526 }
527 
~ReaderWriterMutex()528 ReaderWriterMutex::~ReaderWriterMutex() {
529 #if ART_USE_FUTEXES
530   CHECK_EQ(state_.LoadRelaxed(), 0);
531   CHECK_EQ(exclusive_owner_, 0U);
532   CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
533   CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
534 #else
535   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
536   // may still be using locks.
537   int rc = pthread_rwlock_destroy(&rwlock_);
538   if (rc != 0) {
539     errno = rc;
540     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
541     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
542     Runtime* runtime = Runtime::Current();
543     bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
544     PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
545   }
546 #endif
547 }
548 
ExclusiveLock(Thread * self)549 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
550   DCHECK(self == nullptr || self == Thread::Current());
551   AssertNotExclusiveHeld(self);
552 #if ART_USE_FUTEXES
553   bool done = false;
554   do {
555     int32_t cur_state = state_.LoadRelaxed();
556     if (LIKELY(cur_state == 0)) {
557       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
558       done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
559     } else {
560       // Failed to acquire, hang up.
561       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
562       ++num_pending_writers_;
563       if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
564         // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
565         // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
566         if ((errno != EAGAIN) && (errno != EINTR)) {
567           PLOG(FATAL) << "futex wait failed for " << name_;
568         }
569       }
570       --num_pending_writers_;
571     }
572   } while (!done);
573   DCHECK_EQ(state_.LoadRelaxed(), -1);
574 #else
575   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
576 #endif
577   DCHECK_EQ(exclusive_owner_, 0U);
578   exclusive_owner_ = SafeGetTid(self);
579   RegisterAsLocked(self);
580   AssertExclusiveHeld(self);
581 }
582 
ExclusiveUnlock(Thread * self)583 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
584   DCHECK(self == nullptr || self == Thread::Current());
585   AssertExclusiveHeld(self);
586   RegisterAsUnlocked(self);
587   DCHECK_NE(exclusive_owner_, 0U);
588 #if ART_USE_FUTEXES
589   bool done = false;
590   do {
591     int32_t cur_state = state_.LoadRelaxed();
592     if (LIKELY(cur_state == -1)) {
593       // We're no longer the owner.
594       exclusive_owner_ = 0;
595       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
596       // Note, the relaxed loads below musn't reorder before the CompareExchange.
597       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
598       // a status bit into the state on contention.
599       done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
600       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
601         // Wake any waiters.
602         if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
603                      num_pending_writers_.LoadRelaxed() > 0)) {
604           futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
605         }
606       }
607     } else {
608       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
609     }
610   } while (!done);
611 #else
612   exclusive_owner_ = 0;
613   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
614 #endif
615 }
616 
617 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)618 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
619   DCHECK(self == nullptr || self == Thread::Current());
620 #if ART_USE_FUTEXES
621   bool done = false;
622   timespec end_abs_ts;
623   InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
624   do {
625     int32_t cur_state = state_.LoadRelaxed();
626     if (cur_state == 0) {
627       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
628       done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
629     } else {
630       // Failed to acquire, hang up.
631       timespec now_abs_ts;
632       InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
633       timespec rel_ts;
634       if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
635         return false;  // Timed out.
636       }
637       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
638       ++num_pending_writers_;
639       if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
640         if (errno == ETIMEDOUT) {
641           --num_pending_writers_;
642           return false;  // Timed out.
643         } else if ((errno != EAGAIN) && (errno != EINTR)) {
644           // EAGAIN and EINTR both indicate a spurious failure,
645           // recompute the relative time out from now and try again.
646           // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
647           PLOG(FATAL) << "timed futex wait failed for " << name_;
648         }
649       }
650       --num_pending_writers_;
651     }
652   } while (!done);
653 #else
654   timespec ts;
655   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
656   int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
657   if (result == ETIMEDOUT) {
658     return false;
659   }
660   if (result != 0) {
661     errno = result;
662     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
663   }
664 #endif
665   exclusive_owner_ = SafeGetTid(self);
666   RegisterAsLocked(self);
667   AssertSharedHeld(self);
668   return true;
669 }
670 #endif
671 
672 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)673 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
674   // Owner holds it exclusively, hang up.
675   ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
676   ++num_pending_readers_;
677   if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
678     if (errno != EAGAIN) {
679       PLOG(FATAL) << "futex wait failed for " << name_;
680     }
681   }
682   --num_pending_readers_;
683 }
684 #endif
685 
SharedTryLock(Thread * self)686 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
687   DCHECK(self == nullptr || self == Thread::Current());
688 #if ART_USE_FUTEXES
689   bool done = false;
690   do {
691     int32_t cur_state = state_.LoadRelaxed();
692     if (cur_state >= 0) {
693       // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
694       done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
695     } else {
696       // Owner holds it exclusively.
697       return false;
698     }
699   } while (!done);
700 #else
701   int result = pthread_rwlock_tryrdlock(&rwlock_);
702   if (result == EBUSY) {
703     return false;
704   }
705   if (result != 0) {
706     errno = result;
707     PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
708   }
709 #endif
710   RegisterAsLocked(self);
711   AssertSharedHeld(self);
712   return true;
713 }
714 
IsSharedHeld(const Thread * self) const715 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
716   DCHECK(self == nullptr || self == Thread::Current());
717   bool result;
718   if (UNLIKELY(self == nullptr)) {  // Handle unattached threads.
719     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
720   } else {
721     result = (self->GetHeldMutex(level_) == this);
722   }
723   return result;
724 }
725 
Dump(std::ostream & os) const726 void ReaderWriterMutex::Dump(std::ostream& os) const {
727   os << name_
728       << " level=" << static_cast<int>(level_)
729       << " owner=" << GetExclusiveOwnerTid()
730 #if ART_USE_FUTEXES
731       << " state=" << state_.LoadSequentiallyConsistent()
732       << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent()
733       << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent()
734 #endif
735       << " ";
736   DumpContention(os);
737 }
738 
operator <<(std::ostream & os,const ReaderWriterMutex & mu)739 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
740   mu.Dump(os);
741   return os;
742 }
743 
operator <<(std::ostream & os,const MutatorMutex & mu)744 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
745   mu.Dump(os);
746   return os;
747 }
748 
ConditionVariable(const char * name,Mutex & guard)749 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
750     : name_(name), guard_(guard) {
751 #if ART_USE_FUTEXES
752   DCHECK_EQ(0, sequence_.LoadRelaxed());
753   num_waiters_ = 0;
754 #else
755   pthread_condattr_t cond_attrs;
756   CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
757 #if !defined(__APPLE__)
758   // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
759   CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
760 #endif
761   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
762 #endif
763 }
764 
~ConditionVariable()765 ConditionVariable::~ConditionVariable() {
766 #if ART_USE_FUTEXES
767   if (num_waiters_!= 0) {
768     Runtime* runtime = Runtime::Current();
769     bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
770     LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
771         << " called with " << num_waiters_ << " waiters.";
772   }
773 #else
774   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
775   // may still be using condition variables.
776   int rc = pthread_cond_destroy(&cond_);
777   if (rc != 0) {
778     errno = rc;
779     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
780     Runtime* runtime = Runtime::Current();
781     bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
782     PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
783   }
784 #endif
785 }
786 
Broadcast(Thread * self)787 void ConditionVariable::Broadcast(Thread* self) {
788   DCHECK(self == nullptr || self == Thread::Current());
789   // TODO: enable below, there's a race in thread creation that causes false failures currently.
790   // guard_.AssertExclusiveHeld(self);
791   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
792 #if ART_USE_FUTEXES
793   if (num_waiters_ > 0) {
794     sequence_++;  // Indicate the broadcast occurred.
795     bool done = false;
796     do {
797       int32_t cur_sequence = sequence_.LoadRelaxed();
798       // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
799       // mutex unlocks will awaken the requeued waiter thread.
800       done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
801                    reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
802                    guard_.state_.Address(), cur_sequence) != -1;
803       if (!done) {
804         if (errno != EAGAIN) {
805           PLOG(FATAL) << "futex cmp requeue failed for " << name_;
806         }
807       }
808     } while (!done);
809   }
810 #else
811   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
812 #endif
813 }
814 
Signal(Thread * self)815 void ConditionVariable::Signal(Thread* self) {
816   DCHECK(self == nullptr || self == Thread::Current());
817   guard_.AssertExclusiveHeld(self);
818 #if ART_USE_FUTEXES
819   if (num_waiters_ > 0) {
820     sequence_++;  // Indicate a signal occurred.
821     // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
822     // to avoid this, however, requeueing can only move all waiters.
823     int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
824     // Check something was woken or else we changed sequence_ before they had chance to wait.
825     CHECK((num_woken == 0) || (num_woken == 1));
826   }
827 #else
828   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
829 #endif
830 }
831 
Wait(Thread * self)832 void ConditionVariable::Wait(Thread* self) {
833   guard_.CheckSafeToWait(self);
834   WaitHoldingLocks(self);
835 }
836 
WaitHoldingLocks(Thread * self)837 void ConditionVariable::WaitHoldingLocks(Thread* self) {
838   DCHECK(self == nullptr || self == Thread::Current());
839   guard_.AssertExclusiveHeld(self);
840   unsigned int old_recursion_count = guard_.recursion_count_;
841 #if ART_USE_FUTEXES
842   num_waiters_++;
843   // Ensure the Mutex is contended so that requeued threads are awoken.
844   guard_.num_contenders_++;
845   guard_.recursion_count_ = 1;
846   int32_t cur_sequence = sequence_.LoadRelaxed();
847   guard_.ExclusiveUnlock(self);
848   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
849     // Futex failed, check it is an expected error.
850     // EAGAIN == EWOULDBLK, so we let the caller try again.
851     // EINTR implies a signal was sent to this thread.
852     if ((errno != EINTR) && (errno != EAGAIN)) {
853       PLOG(FATAL) << "futex wait failed for " << name_;
854     }
855   }
856   if (self != nullptr) {
857     JNIEnvExt* const env = self->GetJniEnv();
858     if (UNLIKELY(env != nullptr && env->runtime_deleted)) {
859       CHECK(self->IsDaemon());
860       // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
861       // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
862       // --host and --gdb.
863       // After we wake up, the runtime may have been shutdown, which means that this condition may
864       // have been deleted. It is not safe to retry the wait.
865       SleepForever();
866     }
867   }
868   guard_.ExclusiveLock(self);
869   CHECK_GE(num_waiters_, 0);
870   num_waiters_--;
871   // We awoke and so no longer require awakes from the guard_'s unlock.
872   CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
873   guard_.num_contenders_--;
874 #else
875   uint64_t old_owner = guard_.exclusive_owner_;
876   guard_.exclusive_owner_ = 0;
877   guard_.recursion_count_ = 0;
878   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
879   guard_.exclusive_owner_ = old_owner;
880 #endif
881   guard_.recursion_count_ = old_recursion_count;
882 }
883 
TimedWait(Thread * self,int64_t ms,int32_t ns)884 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
885   DCHECK(self == nullptr || self == Thread::Current());
886   bool timed_out = false;
887   guard_.AssertExclusiveHeld(self);
888   guard_.CheckSafeToWait(self);
889   unsigned int old_recursion_count = guard_.recursion_count_;
890 #if ART_USE_FUTEXES
891   timespec rel_ts;
892   InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
893   num_waiters_++;
894   // Ensure the Mutex is contended so that requeued threads are awoken.
895   guard_.num_contenders_++;
896   guard_.recursion_count_ = 1;
897   int32_t cur_sequence = sequence_.LoadRelaxed();
898   guard_.ExclusiveUnlock(self);
899   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
900     if (errno == ETIMEDOUT) {
901       // Timed out we're done.
902       timed_out = true;
903     } else if ((errno == EAGAIN) || (errno == EINTR)) {
904       // A signal or ConditionVariable::Signal/Broadcast has come in.
905     } else {
906       PLOG(FATAL) << "timed futex wait failed for " << name_;
907     }
908   }
909   guard_.ExclusiveLock(self);
910   CHECK_GE(num_waiters_, 0);
911   num_waiters_--;
912   // We awoke and so no longer require awakes from the guard_'s unlock.
913   CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
914   guard_.num_contenders_--;
915 #else
916 #if !defined(__APPLE__)
917   int clock = CLOCK_MONOTONIC;
918 #else
919   int clock = CLOCK_REALTIME;
920 #endif
921   uint64_t old_owner = guard_.exclusive_owner_;
922   guard_.exclusive_owner_ = 0;
923   guard_.recursion_count_ = 0;
924   timespec ts;
925   InitTimeSpec(true, clock, ms, ns, &ts);
926   int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
927   if (rc == ETIMEDOUT) {
928     timed_out = true;
929   } else if (rc != 0) {
930     errno = rc;
931     PLOG(FATAL) << "TimedWait failed for " << name_;
932   }
933   guard_.exclusive_owner_ = old_owner;
934 #endif
935   guard_.recursion_count_ = old_recursion_count;
936   return timed_out;
937 }
938 
Init()939 void Locks::Init() {
940   if (logging_lock_ != nullptr) {
941     // Already initialized.
942     if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
943       DCHECK(modify_ldt_lock_ != nullptr);
944     } else {
945       DCHECK(modify_ldt_lock_ == nullptr);
946     }
947     DCHECK(abort_lock_ != nullptr);
948     DCHECK(alloc_tracker_lock_ != nullptr);
949     DCHECK(allocated_monitor_ids_lock_ != nullptr);
950     DCHECK(allocated_thread_ids_lock_ != nullptr);
951     DCHECK(breakpoint_lock_ != nullptr);
952     DCHECK(classlinker_classes_lock_ != nullptr);
953     DCHECK(deoptimization_lock_ != nullptr);
954     DCHECK(heap_bitmap_lock_ != nullptr);
955     DCHECK(oat_file_manager_lock_ != nullptr);
956     DCHECK(host_dlopen_handles_lock_ != nullptr);
957     DCHECK(intern_table_lock_ != nullptr);
958     DCHECK(jni_libraries_lock_ != nullptr);
959     DCHECK(logging_lock_ != nullptr);
960     DCHECK(mutator_lock_ != nullptr);
961     DCHECK(profiler_lock_ != nullptr);
962     DCHECK(thread_list_lock_ != nullptr);
963     DCHECK(thread_suspend_count_lock_ != nullptr);
964     DCHECK(trace_lock_ != nullptr);
965     DCHECK(unexpected_signal_lock_ != nullptr);
966     DCHECK(lambda_table_lock_ != nullptr);
967   } else {
968     // Create global locks in level order from highest lock level to lowest.
969     LockLevel current_lock_level = kInstrumentEntrypointsLock;
970     DCHECK(instrument_entrypoints_lock_ == nullptr);
971     instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
972 
973     #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
974       if (new_level >= current_lock_level) { \
975         /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
976         fprintf(stderr, "New local level %d is not less than current level %d\n", \
977                 new_level, current_lock_level); \
978         exit(1); \
979       } \
980       current_lock_level = new_level;
981 
982     UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
983     DCHECK(mutator_lock_ == nullptr);
984     mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
985 
986     UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
987     DCHECK(heap_bitmap_lock_ == nullptr);
988     heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
989 
990     UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
991     DCHECK(trace_lock_ == nullptr);
992     trace_lock_ = new Mutex("trace lock", current_lock_level);
993 
994     UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
995     DCHECK(runtime_shutdown_lock_ == nullptr);
996     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
997 
998     UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
999     DCHECK(profiler_lock_ == nullptr);
1000     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
1001 
1002     UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
1003     DCHECK(deoptimization_lock_ == nullptr);
1004     deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
1005 
1006     UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
1007     DCHECK(alloc_tracker_lock_ == nullptr);
1008     alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
1009 
1010     UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
1011     DCHECK(thread_list_lock_ == nullptr);
1012     thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
1013 
1014     UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
1015     DCHECK(jni_libraries_lock_ == nullptr);
1016     jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
1017 
1018     UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
1019     DCHECK(breakpoint_lock_ == nullptr);
1020     breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
1021 
1022     UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
1023     DCHECK(classlinker_classes_lock_ == nullptr);
1024     classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
1025                                                       current_lock_level);
1026 
1027     UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
1028     DCHECK(allocated_monitor_ids_lock_ == nullptr);
1029     allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
1030 
1031     UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
1032     DCHECK(allocated_thread_ids_lock_ == nullptr);
1033     allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
1034 
1035     if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
1036       UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
1037       DCHECK(modify_ldt_lock_ == nullptr);
1038       modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
1039     }
1040 
1041     UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
1042     DCHECK(oat_file_manager_lock_ == nullptr);
1043     oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
1044 
1045     UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
1046     DCHECK(host_dlopen_handles_lock_ == nullptr);
1047     host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
1048 
1049     UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
1050     DCHECK(intern_table_lock_ == nullptr);
1051     intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
1052 
1053     UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
1054     DCHECK(reference_processor_lock_ == nullptr);
1055     reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
1056 
1057     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
1058     DCHECK(reference_queue_cleared_references_lock_ == nullptr);
1059     reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1060 
1061     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
1062     DCHECK(reference_queue_weak_references_lock_ == nullptr);
1063     reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
1064 
1065     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
1066     DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
1067     reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
1068 
1069     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
1070     DCHECK(reference_queue_phantom_references_lock_ == nullptr);
1071     reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
1072 
1073     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
1074     DCHECK(reference_queue_soft_references_lock_ == nullptr);
1075     reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
1076 
1077     UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock);
1078     DCHECK(lambda_table_lock_ == nullptr);
1079     lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
1080 
1081     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
1082     DCHECK(abort_lock_ == nullptr);
1083     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
1084 
1085     UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
1086     DCHECK(thread_suspend_count_lock_ == nullptr);
1087     thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
1088 
1089     UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
1090     DCHECK(unexpected_signal_lock_ == nullptr);
1091     unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
1092 
1093     UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
1094     DCHECK(mem_maps_lock_ == nullptr);
1095     mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
1096 
1097     UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
1098     DCHECK(logging_lock_ == nullptr);
1099     logging_lock_ = new Mutex("logging lock", current_lock_level, true);
1100 
1101     #undef UPDATE_CURRENT_LOCK_LEVEL
1102 
1103     InitConditions();
1104   }
1105 }
1106 
InitConditions()1107 void Locks::InitConditions() {
1108   thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
1109 }
1110 
1111 }  // namespace art
1112