• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_LOCKS_H_
18 #define ART_RUNTIME_BASE_LOCKS_H_
19 
20 #include <stdint.h>
21 
22 #include <iosfwd>
23 #include <vector>
24 
25 #include "base/atomic.h"
26 #include "base/macros.h"
27 
28 namespace art HIDDEN {
29 
30 class BaseMutex;
31 class ConditionVariable;
32 class SHARED_LOCKABLE ReaderWriterMutex;
33 class SHARED_LOCKABLE MutatorMutex;
34 class LOCKABLE Mutex;
35 class Thread;
36 
37 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
38 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
39 // partial ordering and thereby cause deadlock situations to fail checks.
40 //
41 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
42 enum LockLevel : uint8_t {
43   kLoggingLock = 0,
44   kSwapMutexesLock,
45   kUnexpectedSignalLock,
46   kThreadSuspendCountLock,
47   kAbortLock,
48   kJniIdLock,
49   kNativeDebugInterfaceLock,
50   kSignalHandlingLock,
51   // A generic lock level for mutexes that should not allow any additional mutexes to be gained
52   // after acquiring it.
53   kGenericBottomLock,
54   // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
55   // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
56   // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
57   // the second lock acquisition does not result in deadlock. This is implemented in the lock
58   // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
59   // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
60   // is here near the bottom of the hierarchy because other locks should not be
61   // acquired while it is held. kThreadWaitLock cannot be moved here because GC
62   // activity acquires locks while holding the wait lock.
63   kThreadWaitWakeLock,
64   kJdwpAdbStateLock,
65   kJdwpSocketLock,
66   kRegionSpaceRegionLock,
67   kMarkSweepMarkStackLock,
68   // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
69   kThreadWaitLock,
70   kCHALock,
71   kRosAllocGlobalLock,
72   kRosAllocBracketLock,
73   kRosAllocBulkFreeLock,
74   kAllocSpaceLock,
75   kTaggingLockLevel,
76   kJitCodeCacheLock,
77   kTransactionLogLock,
78   kCustomTlsLock,
79   kJniFunctionTableLock,
80   kJniWeakGlobalsLock,
81   kJniGlobalsLock,
82   kReferenceQueueSoftReferencesLock,
83   kReferenceQueuePhantomReferencesLock,
84   kReferenceQueueFinalizerReferencesLock,
85   kReferenceQueueWeakReferencesLock,
86   kReferenceQueueClearedReferencesLock,
87   kReferenceProcessorLock,
88   kJitDebugInterfaceLock,
89   kBumpPointerSpaceBlockLock,
90   kArenaPoolLock,
91   kInternTableLock,
92   kOatFileSecondaryLookupLock,
93   kHostDlOpenHandlesLock,
94   kVerifierDepsLock,
95   kOatFileManagerLock,
96   kTracingUniqueMethodsLock,
97   kTracingStreamingLock,
98   kJniLoadLibraryLock,
99   kClassLoaderClassesLock,
100   kDefaultMutexLevel,
101   kDexCacheLock,
102   kDexLock,
103   kMarkSweepLargeObjectLock,
104   kJdwpObjectRegistryLock,
105   kModifyLdtLock,
106   kAllocatedThreadIdsLock,
107   kMonitorPoolLock,
108   kClassLinkerClassesLock,  // TODO rename.
109   kSubtypeCheckLock,
110   kBreakpointLock,
111   kMonitorListLock,
112   kThreadListLock,
113   kAllocTrackerLock,
114   kDeoptimizationLock,
115   kProfilerLock,
116   kJdwpShutdownLock,
117   kJdwpEventListLock,
118   kJdwpAttachLock,
119   kJdwpStartLock,
120   kRuntimeThreadPoolLock,
121   kRuntimeShutdownLock,
122   kTraceLock,
123   kHeapBitmapLock,
124   // This is a generic lock level for a lock meant to be gained after having a
125   // monitor lock.
126   kPostMonitorLock,
127   kMonitorLock,
128   // This is a generic lock level for a top-level lock meant to be gained after having the
129   // mutator_lock_.
130   kPostMutatorTopLockLevel,
131 
132   kMutatorLock,
133   kInstrumentEntrypointsLock,
134   // This is a generic lock level for a top-level lock meant to be gained after having the
135   // UserCodeSuspensionLock.
136   kPostUserCodeSuspensionTopLevelLock,
137   kUserCodeSuspensionLock,
138   kZygoteCreationLock,
139 
140   // The highest valid lock level. Use this for locks that should only be acquired with no
141   // other locks held. Since this is the highest lock level we also allow it to be held even if the
142   // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
143   // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
144   // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
145   // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
146   // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
147   // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
148   kTopLockLevel,
149 
150   kLockLevelCount  // Must come last.
151 };
152 EXPORT std::ostream& operator<<(std::ostream& os, LockLevel rhs);
153 
154 // For StartNoThreadSuspension and EndNoThreadSuspension.
155 class CAPABILITY("role") Role {
156  public:
Acquire()157   void Acquire() ACQUIRE() {}
Release()158   void Release() RELEASE() {}
159   const Role& operator!() const { return *this; }
160 };
161 
162 class Uninterruptible : public Role {
163 };
164 
165 // Global mutexes corresponding to the levels above.
166 class EXPORT Locks {
167  public:
168   static void Init();
169   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
170 
171   // Destroying various lock types can emit errors that vary depending upon
172   // whether the client (art::Runtime) is currently active.  Allow the client
173   // to set a callback that is used to check when it is acceptable to call
174   // Abort.  The default behavior is that the client *is not* able to call
175   // Abort if no callback is established.
176   using ClientCallback = bool();
177   static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
178   // Checks for whether it is safe to call Abort() without using locks.
179   static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
180 
181   // Add a mutex to expected_mutexes_on_weak_ref_access_.
182   static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
183   // Remove a mutex from expected_mutexes_on_weak_ref_access_.
184   static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
185   // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
186   static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
187 
188   // Guards code that deals with user-code suspension. This mutex must be held when suspending or
189   // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
190   // only if the suspension is not due to SuspendReason::kForUserCode.
191   static Mutex* user_code_suspension_lock_;
192 
193   // Guards allocation entrypoint instrumenting.
194   static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
195 
196   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
197   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
198   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
199   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
200   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
201   //
202   // Thread suspension:
203   // mutator thread                                | GC/Debugger
204   //   .. running ..                               |   .. running ..
205   //   .. running ..                               | Request thread suspension by:
206   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
207   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
208   //   .. running ..                               |     all mutator threads
209   //   .. running ..                               |   - releasing thread_suspend_count_lock_
210   //   .. running ..                               | Block wait for all threads to pass a barrier
211   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
212   // suspend code.                                 |   .. blocked ..
213   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
214   // x: Acquire thread_suspend_count_lock_         |   .. running ..
215   // while Thread::suspend_count_ > 0              |   .. running ..
216   //   - wait on Thread::resume_cond_              |   .. running ..
217   //     (releases thread_suspend_count_lock_)     |   .. running ..
218   //   .. waiting ..                               | Request thread resumption by:
219   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
220   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
221   //   .. waiting ..                               |     all mutator threads
222   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
223   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
224   // Release thread_suspend_count_lock_            |  .. running ..
225   // Change to kRunnable                           |  .. running ..
226   //  - this uses a CAS operation to ensure the    |  .. running ..
227   //    suspend request flag isn't raised as the   |  .. running ..
228   //    state is changed                           |  .. running ..
229   //  - if the CAS operation fails then goto x     |  .. running ..
230   //  .. running ..                                |  .. running ..
231   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
232 
233   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
234   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
235 
236   // Guards shutdown of the runtime.
237   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
238 
239   // Runtime thread pool lock.
240   static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
241 
242   // Guards background profiler global state.
243   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
244 
245   // Guards trace (ie traceview) requests.
246   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
247 
248   // Guards debugger recent allocation records.
249   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
250 
251   // Guards updates to instrumentation to ensure mutual exclusion of
252   // events like deoptimization requests.
253   // TODO: improve name, perhaps instrumentation_update_lock_.
254   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
255 
256   // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
257   // This lock is used in SubtypeCheck methods which are the interface for
258   // any SubtypeCheck-mutating methods.
259   // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
260   static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
261 
262   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
263   // attaching and detaching.
264   static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
265 
266   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
267   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
268 
269   // Guards maintaining loading library data structures.
270   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
271 
272   // Guards breakpoints.
273   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
274 
275   // Guards lists of classes within the class linker.
276   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
277 
278   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
279   // doesn't try to hold a higher level Mutex.
280   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
281 
282   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
283 
284   // Guard the allocation/deallocation of thread ids.
285   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
286 
287   // Guards modification of the LDT on x86.
288   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
289 
290   static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
291 
292   static Mutex* dex_cache_lock_ ACQUIRED_AFTER(dex_lock_);
293 
294   // Guards opened oat files in OatFileManager.
295   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
296 
297   // Guards extra string entries for VerifierDeps.
298   static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
299 
300   // Guards dlopen_handles_ in DlOpenOatFile.
301   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
302 
303   // Guards intern table.
304   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
305 
306   // Guards reference processor.
307   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
308 
309   // Guards cleared references queue.
310   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
311 
312   // Guards weak references queue.
313   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
314 
315   // Guards finalizer references queue.
316   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
317 
318   // Guards phantom references queue.
319   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
320 
321   // Guards soft references queue.
322   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
323 
324   // Guard accesses to the JNI Global Reference table.
325   static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
326 
327   // Guard accesses to the JNI Weak Global Reference table.
328   static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
329 
330   // Guard accesses to the JNI function table override.
331   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
332 
333   // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
334   // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
335   // will not die in that case though). This is useful for (eg) the implementation of
336   // GetThreadLocalStorage.
337   static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
338 
339   // Guard access to any JIT data structure.
340   static Mutex* jit_lock_ ACQUIRED_AFTER(custom_tls_lock_);
341 
342   // Guards Class Hierarchy Analysis (CHA).
343   static Mutex* cha_lock_ ACQUIRED_AFTER(jit_lock_);
344 
345   // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
346   // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
347   // actually only encodes the mutex being below jni_function_table_lock_ although having
348   // kGenericBottomLock level is lower than this.
349   #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
350 
351   // Have an exclusive aborting thread.
352   static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
353 
354   // Allow mutual exclusion when manipulating Thread::suspend_count_.
355   // TODO: Does the trade-off of a per-thread lock make sense?
356   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
357 
358   // One unexpected signal at a time lock.
359   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
360 
361   // Guards the magic global variables used by native tools (e.g. libunwind).
362   static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
363 
364   // Guards the data structures responsible for keeping track of the JNI
365   // jmethodID/jfieldID <-> ArtMethod/ArtField mapping when using index-ids.
366   static ReaderWriterMutex* jni_id_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
367 
368   // Have an exclusive logging thread.
369   static Mutex* logging_lock_ ACQUIRED_AFTER(jni_id_lock_);
370 
371   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
372   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
373   // encounter an unexpected mutex on accessing weak refs,
374   // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
375   static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
376   static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
377   class ScopedExpectedMutexesOnWeakRefAccessLock;
378 };
379 
380 class Roles {
381  public:
382   // Uninterruptible means that the thread may not become suspended.
383   EXPORT static Uninterruptible uninterruptible_;
384 };
385 
386 }  // namespace art
387 
388 #endif  // ART_RUNTIME_BASE_LOCKS_H_
389