• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_LOCKS_H_
18 #define ART_RUNTIME_BASE_LOCKS_H_
19 
20 #include <stdint.h>
21 
22 #include <iosfwd>
23 #include <vector>
24 
25 #include "base/atomic.h"
26 #include "base/macros.h"
27 
28 namespace art {
29 
30 class BaseMutex;
31 class ConditionVariable;
32 class SHARED_LOCKABLE ReaderWriterMutex;
33 class SHARED_LOCKABLE MutatorMutex;
34 class LOCKABLE Mutex;
35 class Thread;
36 
37 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
38 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
39 // partial ordering and thereby cause deadlock situations to fail checks.
40 //
41 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
42 enum LockLevel : uint8_t {
43   kLoggingLock = 0,
44   kSwapMutexesLock,
45   kUnexpectedSignalLock,
46   kThreadSuspendCountLock,
47   kAbortLock,
48   kJniIdLock,
49   kNativeDebugInterfaceLock,
50   kSignalHandlingLock,
51   // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
52   // acquiring it.
53   kGenericBottomLock,
54   // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
55   // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
56   // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
57   // the second lock acquisition does not result in deadlock. This is implemented in the lock
58   // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
59   // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
60   // is here near the bottom of the hierarchy because other locks should not be
61   // acquired while it is held. kThreadWaitLock cannot be moved here because GC
62   // activity acquires locks while holding the wait lock.
63   kThreadWaitWakeLock,
64   kJdwpAdbStateLock,
65   kJdwpSocketLock,
66   kRegionSpaceRegionLock,
67   kMarkSweepMarkStackLock,
68   // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
69   kThreadWaitLock,
70   kCHALock,
71   kJitCodeCacheLock,
72   kRosAllocGlobalLock,
73   kRosAllocBracketLock,
74   kRosAllocBulkFreeLock,
75   kAllocSpaceLock,
76   kTaggingLockLevel,
77   kTransactionLogLock,
78   kCustomTlsLock,
79   kJniFunctionTableLock,
80   kJniWeakGlobalsLock,
81   kJniGlobalsLock,
82   kReferenceQueueSoftReferencesLock,
83   kReferenceQueuePhantomReferencesLock,
84   kReferenceQueueFinalizerReferencesLock,
85   kReferenceQueueWeakReferencesLock,
86   kReferenceQueueClearedReferencesLock,
87   kReferenceProcessorLock,
88   kJitDebugInterfaceLock,
89   kBumpPointerSpaceBlockLock,
90   kArenaPoolLock,
91   kInternTableLock,
92   kOatFileSecondaryLookupLock,
93   kHostDlOpenHandlesLock,
94   kVerifierDepsLock,
95   kOatFileManagerLock,
96   kTracingUniqueMethodsLock,
97   kTracingStreamingLock,
98   kClassLoaderClassesLock,
99   kDefaultMutexLevel,
100   kDexLock,
101   kMarkSweepLargeObjectLock,
102   kJdwpObjectRegistryLock,
103   kModifyLdtLock,
104   kAllocatedThreadIdsLock,
105   kMonitorPoolLock,
106   kClassLinkerClassesLock,  // TODO rename.
107   kDexToDexCompilerLock,
108   kSubtypeCheckLock,
109   kBreakpointLock,
110   // This is a generic lock level for a lock meant to be gained after having a
111   // monitor lock.
112   kPostMonitorLock,
113   kMonitorLock,
114   kMonitorListLock,
115   kJniLoadLibraryLock,
116   kThreadListLock,
117   kAllocTrackerLock,
118   kDeoptimizationLock,
119   kProfilerLock,
120   kJdwpShutdownLock,
121   kJdwpEventListLock,
122   kJdwpAttachLock,
123   kJdwpStartLock,
124   kRuntimeThreadPoolLock,
125   kRuntimeShutdownLock,
126   kTraceLock,
127   kHeapBitmapLock,
128 
129   // This is a generic lock level for a top-level lock meant to be gained after having the
130   // mutator_lock_.
131   kPostMutatorTopLockLevel,
132 
133   kMutatorLock,
134   kInstrumentEntrypointsLock,
135   // This is a generic lock level for a top-level lock meant to be gained after having the
136   // UserCodeSuspensionLock.
137   kPostUserCodeSuspensionTopLevelLock,
138   kUserCodeSuspensionLock,
139   kZygoteCreationLock,
140 
141   // The highest valid lock level. Use this if there is code that should only be called with no
142   // other locks held. Since this is the highest lock level we also allow it to be held even if the
143   // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
144   // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
145   // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
146   // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
147   // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
148   // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
149   kTopLockLevel,
150 
151   kLockLevelCount  // Must come last.
152 };
153 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
154 
155 // For StartNoThreadSuspension and EndNoThreadSuspension.
156 class CAPABILITY("role") Role {
157  public:
Acquire()158   void Acquire() ACQUIRE() {}
Release()159   void Release() RELEASE() {}
160   const Role& operator!() const { return *this; }
161 };
162 
163 class Uninterruptible : public Role {
164 };
165 
166 // Global mutexes corresponding to the levels above.
167 class Locks {
168  public:
169   static void Init();
170   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
171 
172   // Destroying various lock types can emit errors that vary depending upon
173   // whether the client (art::Runtime) is currently active.  Allow the client
174   // to set a callback that is used to check when it is acceptable to call
175   // Abort.  The default behavior is that the client *is not* able to call
176   // Abort if no callback is established.
177   using ClientCallback = bool();
178   static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
179   // Checks for whether it is safe to call Abort() without using locks.
180   static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
181 
182   // Add a mutex to expected_mutexes_on_weak_ref_access_.
183   static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
184   // Remove a mutex from expected_mutexes_on_weak_ref_access_.
185   static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
186   // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
187   static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
188 
189   // Guards code that deals with user-code suspension. This mutex must be held when suspending or
190   // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
191   // only if the suspension is not due to SuspendReason::kForUserCode.
192   static Mutex* user_code_suspension_lock_;
193 
194   // Guards allocation entrypoint instrumenting.
195   static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
196 
197   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
198   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
199   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
200   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
201   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
202   //
203   // Thread suspension:
204   // mutator thread                                | GC/Debugger
205   //   .. running ..                               |   .. running ..
206   //   .. running ..                               | Request thread suspension by:
207   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
208   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
209   //   .. running ..                               |     all mutator threads
210   //   .. running ..                               |   - releasing thread_suspend_count_lock_
211   //   .. running ..                               | Block wait for all threads to pass a barrier
212   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
213   // suspend code.                                 |   .. blocked ..
214   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
215   // x: Acquire thread_suspend_count_lock_         |   .. running ..
216   // while Thread::suspend_count_ > 0              |   .. running ..
217   //   - wait on Thread::resume_cond_              |   .. running ..
218   //     (releases thread_suspend_count_lock_)     |   .. running ..
219   //   .. waiting ..                               | Request thread resumption by:
220   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
221   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
222   //   .. waiting ..                               |     all mutator threads
223   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
224   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
225   // Release thread_suspend_count_lock_            |  .. running ..
226   // Change to kRunnable                           |  .. running ..
227   //  - this uses a CAS operation to ensure the    |  .. running ..
228   //    suspend request flag isn't raised as the   |  .. running ..
229   //    state is changed                           |  .. running ..
230   //  - if the CAS operation fails then goto x     |  .. running ..
231   //  .. running ..                                |  .. running ..
232   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
233 
234   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
235   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
236 
237   // Guards shutdown of the runtime.
238   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
239 
240   // Runtime thread pool lock.
241   static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
242 
243   // Guards background profiler global state.
244   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
245 
246   // Guards trace (ie traceview) requests.
247   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
248 
249   // Guards debugger recent allocation records.
250   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
251 
252   // Guards updates to instrumentation to ensure mutual exclusion of
253   // events like deoptimization requests.
254   // TODO: improve name, perhaps instrumentation_update_lock_.
255   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
256 
257   // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
258   // This lock is used in SubtypeCheck methods which are the interface for
259   // any SubtypeCheck-mutating methods.
260   // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
261   static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
262 
263   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
264   // attaching and detaching.
265   static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
266 
267   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
268   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
269 
270   // Guards maintaining loading library data structures.
271   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
272 
273   // Guards breakpoints.
274   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
275 
276   // Guards lists of classes within the class linker.
277   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
278 
279   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
280   // doesn't try to hold a higher level Mutex.
281   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
282 
283   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
284 
285   // Guard the allocation/deallocation of thread ids.
286   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
287 
288   // Guards modification of the LDT on x86.
289   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
290 
291   static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
292 
293   // Guards opened oat files in OatFileManager.
294   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
295 
296   // Guards extra string entries for VerifierDeps.
297   static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
298 
299   // Guards dlopen_handles_ in DlOpenOatFile.
300   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
301 
302   // Guards intern table.
303   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
304 
305   // Guards reference processor.
306   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
307 
308   // Guards cleared references queue.
309   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
310 
311   // Guards weak references queue.
312   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
313 
314   // Guards finalizer references queue.
315   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
316 
317   // Guards phantom references queue.
318   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
319 
320   // Guards soft references queue.
321   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
322 
323   // Guard accesses to the JNI Global Reference table.
324   static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
325 
326   // Guard accesses to the JNI Weak Global Reference table.
327   static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
328 
329   // Guard accesses to the JNI function table override.
330   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
331 
332   // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
333   // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
334   // will not die in that case though). This is useful for (eg) the implementation of
335   // GetThreadLocalStorage.
336   static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
337 
338   // Guard access to any JIT data structure.
339   static Mutex* jit_lock_ ACQUIRED_AFTER(custom_tls_lock_);
340 
341   // Guards Class Hierarchy Analysis (CHA).
342   static Mutex* cha_lock_ ACQUIRED_AFTER(jit_lock_);
343 
344   // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
345   // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
346   // actually only encodes the mutex being below jni_function_table_lock_ although having
347   // kGenericBottomLock level is lower than this.
348   #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
349 
350   // Have an exclusive aborting thread.
351   static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
352 
353   // Allow mutual exclusion when manipulating Thread::suspend_count_.
354   // TODO: Does the trade-off of a per-thread lock make sense?
355   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
356 
357   // One unexpected signal at a time lock.
358   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
359 
360   // Guards the magic global variables used by native tools (e.g. libunwind).
361   static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
362 
363   // Guards the data structures responsible for keeping track of the JNI
364   // jmethodID/jfieldID <-> ArtMethod/ArtField mapping when using index-ids.
365   static ReaderWriterMutex* jni_id_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
366 
367   // Have an exclusive logging thread.
368   static Mutex* logging_lock_ ACQUIRED_AFTER(jni_id_lock_);
369 
370   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
371   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
372   // encounter an unexpected mutex on accessing weak refs,
373   // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
374   static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
375   static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
376   class ScopedExpectedMutexesOnWeakRefAccessLock;
377 };
378 
379 class Roles {
380  public:
381   // Uninterruptible means that the thread may not become suspended.
382   static Uninterruptible uninterruptible_;
383 };
384 
385 }  // namespace art
386 
387 #endif  // ART_RUNTIME_BASE_LOCKS_H_
388