• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_LOCKS_H_
18 #define ART_RUNTIME_BASE_LOCKS_H_
19 
20 #include <stdint.h>
21 
22 #include <iosfwd>
23 #include <vector>
24 
25 #include "base/atomic.h"
26 #include "base/macros.h"
27 
28 namespace art {
29 
30 class BaseMutex;
31 class ConditionVariable;
32 class SHARED_LOCKABLE ReaderWriterMutex;
33 class SHARED_LOCKABLE MutatorMutex;
34 class LOCKABLE Mutex;
35 class Thread;
36 
37 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
38 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
39 // partial ordering and thereby cause deadlock situations to fail checks.
40 //
41 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
42 enum LockLevel : uint8_t {
43   kLoggingLock = 0,
44   kSwapMutexesLock,
45   kUnexpectedSignalLock,
46   kThreadSuspendCountLock,
47   kAbortLock,
48   kNativeDebugInterfaceLock,
49   kSignalHandlingLock,
50   // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
51   // acquiring it.
52   kGenericBottomLock,
53   // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
54   // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
55   // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
56   // the second lock acquisition does not result in deadlock. This is implemented in the lock
57   // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
58   // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
59   // is here near the bottom of the hierarchy because other locks should not be
60   // acquired while it is held. kThreadWaitLock cannot be moved here because GC
61   // activity acquires locks while holding the wait lock.
62   kThreadWaitWakeLock,
63   kJdwpAdbStateLock,
64   kJdwpSocketLock,
65   kRegionSpaceRegionLock,
66   kMarkSweepMarkStackLock,
67   // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
68   kThreadWaitLock,
69   kCHALock,
70   kJitCodeCacheLock,
71   kRosAllocGlobalLock,
72   kRosAllocBracketLock,
73   kRosAllocBulkFreeLock,
74   kAllocSpaceLock,
75   kTaggingLockLevel,
76   kTransactionLogLock,
77   kCustomTlsLock,
78   kJniFunctionTableLock,
79   kJniWeakGlobalsLock,
80   kJniGlobalsLock,
81   kReferenceQueueSoftReferencesLock,
82   kReferenceQueuePhantomReferencesLock,
83   kReferenceQueueFinalizerReferencesLock,
84   kReferenceQueueWeakReferencesLock,
85   kReferenceQueueClearedReferencesLock,
86   kReferenceProcessorLock,
87   kJitDebugInterfaceLock,
88   kBumpPointerSpaceBlockLock,
89   kArenaPoolLock,
90   kInternTableLock,
91   kOatFileSecondaryLookupLock,
92   kHostDlOpenHandlesLock,
93   kVerifierDepsLock,
94   kOatFileManagerLock,
95   kTracingUniqueMethodsLock,
96   kTracingStreamingLock,
97   kClassLoaderClassesLock,
98   kDefaultMutexLevel,
99   kDexLock,
100   kMarkSweepLargeObjectLock,
101   kJdwpObjectRegistryLock,
102   kModifyLdtLock,
103   kAllocatedThreadIdsLock,
104   kMonitorPoolLock,
105   kClassLinkerClassesLock,  // TODO rename.
106   kDexToDexCompilerLock,
107   kSubtypeCheckLock,
108   kBreakpointLock,
109   kMonitorLock,
110   kMonitorListLock,
111   kJniLoadLibraryLock,
112   kThreadListLock,
113   kAllocTrackerLock,
114   kDeoptimizationLock,
115   kProfilerLock,
116   kJdwpShutdownLock,
117   kJdwpEventListLock,
118   kJdwpAttachLock,
119   kJdwpStartLock,
120   kRuntimeThreadPoolLock,
121   kRuntimeShutdownLock,
122   kTraceLock,
123   kHeapBitmapLock,
124 
125   // This is a generic lock level for a top-level lock meant to be gained after having the
126   // mutator_lock_.
127   kPostMutatorTopLockLevel,
128 
129   kMutatorLock,
130   kInstrumentEntrypointsLock,
131   kUserCodeSuspensionLock,
132   kZygoteCreationLock,
133 
134   // The highest valid lock level. Use this if there is code that should only be called with no
135   // other locks held. Since this is the highest lock level we also allow it to be held even if the
136   // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
137   // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
138   // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
139   // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
140   // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
141   // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
142   kTopLockLevel,
143 
144   kLockLevelCount  // Must come last.
145 };
146 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
147 
148 // For StartNoThreadSuspension and EndNoThreadSuspension.
149 class CAPABILITY("role") Role {
150  public:
Acquire()151   void Acquire() ACQUIRE() {}
Release()152   void Release() RELEASE() {}
153   const Role& operator!() const { return *this; }
154 };
155 
156 class Uninterruptible : public Role {
157 };
158 
159 // Global mutexes corresponding to the levels above.
160 class Locks {
161  public:
162   static void Init();
163   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
164 
165   // Destroying various lock types can emit errors that vary depending upon
166   // whether the client (art::Runtime) is currently active.  Allow the client
167   // to set a callback that is used to check when it is acceptable to call
168   // Abort.  The default behavior is that the client *is not* able to call
169   // Abort if no callback is established.
170   using ClientCallback = bool();
171   static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
172   // Checks for whether it is safe to call Abort() without using locks.
173   static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
174 
175   // Add a mutex to expected_mutexes_on_weak_ref_access_.
176   static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
177   // Remove a mutex from expected_mutexes_on_weak_ref_access_.
178   static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
179   // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
180   static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
181 
182   // Guards code that deals with user-code suspension. This mutex must be held when suspending or
183   // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
184   // only if the suspension is not due to SuspendReason::kForUserCode.
185   static Mutex* user_code_suspension_lock_;
186 
187   // Guards allocation entrypoint instrumenting.
188   static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
189 
190   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
191   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
192   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
193   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
194   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
195   //
196   // Thread suspension:
197   // mutator thread                                | GC/Debugger
198   //   .. running ..                               |   .. running ..
199   //   .. running ..                               | Request thread suspension by:
200   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
201   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
202   //   .. running ..                               |     all mutator threads
203   //   .. running ..                               |   - releasing thread_suspend_count_lock_
204   //   .. running ..                               | Block wait for all threads to pass a barrier
205   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
206   // suspend code.                                 |   .. blocked ..
207   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
208   // x: Acquire thread_suspend_count_lock_         |   .. running ..
209   // while Thread::suspend_count_ > 0              |   .. running ..
210   //   - wait on Thread::resume_cond_              |   .. running ..
211   //     (releases thread_suspend_count_lock_)     |   .. running ..
212   //   .. waiting ..                               | Request thread resumption by:
213   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
214   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
215   //   .. waiting ..                               |     all mutator threads
216   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
217   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
218   // Release thread_suspend_count_lock_            |  .. running ..
219   // Change to kRunnable                           |  .. running ..
220   //  - this uses a CAS operation to ensure the    |  .. running ..
221   //    suspend request flag isn't raised as the   |  .. running ..
222   //    state is changed                           |  .. running ..
223   //  - if the CAS operation fails then goto x     |  .. running ..
224   //  .. running ..                                |  .. running ..
225   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
226 
227   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
228   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
229 
230   // Guards shutdown of the runtime.
231   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
232 
233   // Runtime thread pool lock.
234   static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
235 
236   // Guards background profiler global state.
237   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
238 
239   // Guards trace (ie traceview) requests.
240   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
241 
242   // Guards debugger recent allocation records.
243   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
244 
245   // Guards updates to instrumentation to ensure mutual exclusion of
246   // events like deoptimization requests.
247   // TODO: improve name, perhaps instrumentation_update_lock_.
248   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
249 
250   // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
251   // This lock is used in SubtypeCheck methods which are the interface for
252   // any SubtypeCheck-mutating methods.
253   // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
254   static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
255 
256   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
257   // attaching and detaching.
258   static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
259 
260   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
261   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
262 
263   // Guards maintaining loading library data structures.
264   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
265 
266   // Guards breakpoints.
267   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
268 
269   // Guards lists of classes within the class linker.
270   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
271 
272   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
273   // doesn't try to hold a higher level Mutex.
274   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
275 
276   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
277 
278   // Guard the allocation/deallocation of thread ids.
279   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
280 
281   // Guards modification of the LDT on x86.
282   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
283 
284   static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
285 
286   // Guards opened oat files in OatFileManager.
287   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
288 
289   // Guards extra string entries for VerifierDeps.
290   static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
291 
292   // Guards dlopen_handles_ in DlOpenOatFile.
293   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
294 
295   // Guards intern table.
296   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
297 
298   // Guards reference processor.
299   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
300 
301   // Guards cleared references queue.
302   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
303 
304   // Guards weak references queue.
305   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
306 
307   // Guards finalizer references queue.
308   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
309 
310   // Guards phantom references queue.
311   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
312 
313   // Guards soft references queue.
314   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
315 
316   // Guard accesses to the JNI Global Reference table.
317   static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
318 
319   // Guard accesses to the JNI Weak Global Reference table.
320   static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
321 
322   // Guard accesses to the JNI function table override.
323   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
324 
325   // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
326   // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
327   // will not die in that case though). This is useful for (eg) the implementation of
328   // GetThreadLocalStorage.
329   static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
330 
331   // Guards Class Hierarchy Analysis (CHA).
332   static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
333 
334   // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
335   // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
336   // actually only encodes the mutex being below jni_function_table_lock_ although having
337   // kGenericBottomLock level is lower than this.
338   #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
339 
340   // Have an exclusive aborting thread.
341   static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
342 
343   // Allow mutual exclusion when manipulating Thread::suspend_count_.
344   // TODO: Does the trade-off of a per-thread lock make sense?
345   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
346 
347   // One unexpected signal at a time lock.
348   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
349 
350   // Guards the magic global variables used by native tools (e.g. libunwind).
351   static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
352 
353   // Have an exclusive logging thread.
354   static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
355 
356   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
357   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
358   // encounter an unexpected mutex on accessing weak refs,
359   // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
360   static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
361   static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
362   class ScopedExpectedMutexesOnWeakRefAccessLock;
363 };
364 
365 class Roles {
366  public:
367   // Uninterruptible means that the thread may not become suspended.
368   static Uninterruptible uninterruptible_;
369 };
370 
371 }  // namespace art
372 
373 #endif  // ART_RUNTIME_BASE_LOCKS_H_
374