• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MONITOR_H_
18 #define ART_RUNTIME_MONITOR_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 
24 #include <atomic>
25 #include <iosfwd>
26 #include <list>
27 #include <vector>
28 
29 #include "base/allocator.h"
30 #include "base/atomic.h"
31 #include "base/macros.h"
32 #include "base/mutex.h"
33 #include "gc_root.h"
34 #include "lock_word.h"
35 #include "obj_ptr.h"
36 #include "read_barrier_option.h"
37 #include "runtime_callbacks.h"
38 #include "thread_state.h"
39 
40 namespace art HIDDEN {
41 
42 class ArtMethod;
43 class IsMarkedVisitor;
44 class LockWord;
45 template<class T> class Handle;
46 class StackVisitor;
47 class Thread;
48 using MonitorId = uint32_t;
49 
50 namespace mirror {
51 class Object;
52 }  // namespace mirror
53 
54 enum class LockReason {
55   kForWait,
56   kForLock,
57 };
58 
59 class Monitor {
60  public:
61   // The default number of spins that are done before thread suspension is used to forcibly inflate
62   // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
63   constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
64 
65   static constexpr int kDefaultMonitorTimeoutMs = 500;
66 
67   static constexpr int kMonitorTimeoutMinMs = 200;
68 
69   static constexpr int kMonitorTimeoutMaxMs = 1000;  // 1 second
70 
71   ~Monitor();
72 
73   static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold);
74 
75   // Return the thread id of the lock owner or 0 when there is no owner.
76   EXPORT static uint32_t GetLockOwnerThreadId(ObjPtr<mirror::Object> obj)
77       REQUIRES_SHARED(Locks::mutator_lock_);
78 
79   // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
80   EXPORT static ObjPtr<mirror::Object> MonitorEnter(Thread* thread,
81                                                     ObjPtr<mirror::Object> obj,
82                                                     bool trylock)
83       EXCLUSIVE_LOCK_FUNCTION(obj.Ptr())
84       NO_THREAD_SAFETY_ANALYSIS
85       REQUIRES(!Roles::uninterruptible_)
86       REQUIRES_SHARED(Locks::mutator_lock_);
87 
88   // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
89   EXPORT static bool MonitorExit(Thread* thread, ObjPtr<mirror::Object> obj)
90       NO_THREAD_SAFETY_ANALYSIS
91       REQUIRES(!Roles::uninterruptible_)
92       REQUIRES_SHARED(Locks::mutator_lock_)
93       UNLOCK_FUNCTION(obj.Ptr());
94 
Notify(Thread * self,ObjPtr<mirror::Object> obj)95   static void Notify(Thread* self, ObjPtr<mirror::Object> obj)
96       REQUIRES_SHARED(Locks::mutator_lock_) {
97     DoNotify(self, obj, false);
98   }
NotifyAll(Thread * self,ObjPtr<mirror::Object> obj)99   static void NotifyAll(Thread* self, ObjPtr<mirror::Object> obj)
100       REQUIRES_SHARED(Locks::mutator_lock_) {
101     DoNotify(self, obj, true);
102   }
103 
104   // Object.wait().  Also called for class init.
105   // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
106   EXPORT static void Wait(Thread* self,
107                           ObjPtr<mirror::Object> obj,
108                           int64_t ms,
109                           int32_t ns,
110                           bool interruptShouldThrow,
111                           ThreadState why)
112       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
113 
114   static ThreadState FetchState(const Thread* thread,
115                                 /* out */ ObjPtr<mirror::Object>* monitor_object,
116                                 /* out */ uint32_t* lock_owner_tid)
117       REQUIRES(!Locks::thread_suspend_count_lock_)
118       REQUIRES_SHARED(Locks::mutator_lock_);
119 
120   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
121   EXPORT static ObjPtr<mirror::Object> GetContendedMonitor(Thread* thread)
122       REQUIRES_SHARED(Locks::mutator_lock_);
123 
124   // Calls 'callback' once for each lock held in the single stack frame represented by
125   // the current state of 'stack_visitor'.
126   // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
127   // is necessary when we have already aborted but want to dump the stack as much as we can.
128   EXPORT static void VisitLocks(StackVisitor* stack_visitor,
129                                 void (*callback)(ObjPtr<mirror::Object>, void*),
130                                 void* callback_context,
131                                 bool abort_on_failure = true) REQUIRES_SHARED(Locks::mutator_lock_);
132 
133   static bool IsValidLockWord(LockWord lock_word);
134 
135   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
136   ObjPtr<mirror::Object> GetObject() REQUIRES_SHARED(Locks::mutator_lock_);
137 
138   void SetObject(ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_);
139 
140   // Provides no memory ordering guarantees.
GetOwner()141   Thread* GetOwner() const REQUIRES_SHARED(Locks::mutator_lock_) {
142     return owner_.load(std::memory_order_relaxed);
143   }
144 
145   int32_t GetHashCode();
146 
147   // Is the monitor currently locked? Debug only, provides no memory ordering guarantees.
148   bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
149 
HasHashCode()150   bool HasHashCode() const {
151     return hash_code_.load(std::memory_order_relaxed) != 0;
152   }
153 
GetMonitorId()154   MonitorId GetMonitorId() const {
155     return monitor_id_;
156   }
157 
158   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
159   // attempt_of_4 is in 1..4 inclusive or 0. A non-zero value indicates that we are retrying
160   // up to 4 times, and should only abort on 4. Zero means we are only trying once, with the
161   // full suspend timeout instead of a quarter.
162   static void InflateThinLocked(Thread* self,
163                                 Handle<mirror::Object> obj,
164                                 LockWord lock_word,
165                                 uint32_t hash_code,
166                                 int attempt_of_4 = 0) REQUIRES_SHARED(Locks::mutator_lock_);
167 
168   // Try to deflate the monitor associated with obj. Only called when we logically hold
169   // mutator_lock_ exclusively. ImageWriter calls this without actually invoking SuspendAll, but
170   // it is already entirely single-threaded.
171   EXPORT static bool Deflate(Thread* self, ObjPtr<mirror::Object> obj)
172       REQUIRES(Locks::mutator_lock_);
173 
174 #ifndef __LP64__
new(size_t size)175   void* operator new(size_t size) {
176     // Align Monitor* as per the monitor ID field size in the lock word.
177     void* result;
178     int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
179     CHECK_EQ(error, 0) << strerror(error);
180     return result;
181   }
182 
delete(void * ptr)183   void operator delete(void* ptr) {
184     free(ptr);
185   }
186 #endif
187 
188  private:
189   Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
190       REQUIRES_SHARED(Locks::mutator_lock_);
191   Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code, MonitorId id)
192       REQUIRES_SHARED(Locks::mutator_lock_);
193 
194   // Install the monitor into its object, may fail if another thread installs a different monitor
195   // first. Monitor remains in the same logical state as before, i.e. held the same # of times.
196   bool Install(Thread* self)
197       REQUIRES(!monitor_lock_)
198       REQUIRES_SHARED(Locks::mutator_lock_);
199 
200   // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
201   // routine.
202   void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
203 
204   // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
205   // this routine.
206   void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
207 
208   // Release the monitor lock and signal a waiting thread that has been notified and now needs the
209   // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to
210   // null. Caller may be suspended (Wait) or runnable (MonitorExit).
211   void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
212 
213   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
214   // calling thread must own the lock or the owner must be suspended. There's a race with other
215   // threads inflating the lock, installing hash codes and spurious failures. The caller should
216   // re-read the lock word following the call.
217   static void Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
218       REQUIRES_SHARED(Locks::mutator_lock_)
219       NO_THREAD_SAFETY_ANALYSIS;  // For m->Install(self)
220 
221   void LogContentionEvent(Thread* self,
222                           uint32_t wait_ms,
223                           uint32_t sample_percent,
224                           ArtMethod* owner_method,
225                           uint32_t owner_dex_pc)
226       REQUIRES_SHARED(Locks::mutator_lock_);
227 
228   static void FailedUnlock(ObjPtr<mirror::Object> obj,
229                            uint32_t expected_owner_thread_id,
230                            uint32_t found_owner_thread_id,
231                            Monitor* mon)
232       REQUIRES(!Locks::thread_list_lock_)
233       REQUIRES_SHARED(Locks::mutator_lock_);
234 
235   // Try to lock without blocking, returns true if we acquired the lock.
236   // If spin is true, then we spin for a short period before failing.
237   bool TryLock(Thread* self, bool spin = false)
238       TRY_ACQUIRE(true, monitor_lock_)
239       REQUIRES_SHARED(Locks::mutator_lock_);
240 
241   template<LockReason reason = LockReason::kForLock>
242   void Lock(Thread* self)
243       ACQUIRE(monitor_lock_)
244       REQUIRES_SHARED(Locks::mutator_lock_);
245 
246   bool Unlock(Thread* thread)
247       RELEASE(monitor_lock_)
248       REQUIRES_SHARED(Locks::mutator_lock_);
249 
250   static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all)
251       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;  // For mon->Notify.
252 
253   void Notify(Thread* self)
254       REQUIRES(monitor_lock_)
255       REQUIRES_SHARED(Locks::mutator_lock_);
256 
257   void NotifyAll(Thread* self)
258       REQUIRES(monitor_lock_)
259       REQUIRES_SHARED(Locks::mutator_lock_);
260 
261   static std::string PrettyContentionInfo(const std::string& owner_name,
262                                           pid_t owner_tid,
263                                           ArtMethod* owners_method,
264                                           uint32_t owners_dex_pc,
265                                           size_t num_waiters)
266       REQUIRES_SHARED(Locks::mutator_lock_);
267 
268   // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
269   // (somewhat indirectly) Thread.sleep() and Thread.join().
270   //
271   // If another thread calls Thread.interrupt(), we throw InterruptedException and return
272   // immediately if one of the following are true:
273   //  - blocked in wait(), wait(long), or wait(long, int) methods of Object
274   //  - blocked in join(), join(long), or join(long, int) methods of Thread
275   //  - blocked in sleep(long), or sleep(long, int) methods of Thread
276   // Otherwise, we set the "interrupted" flag.
277   //
278   // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
279   // throws the appropriate exception if it isn't.
280   //
281   // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
282   // a loop.  This appears to derive from concerns about pthread_cond_wait() on multiprocessor
283   // systems.  Some commentary on the web casts doubt on whether these can/should occur.
284   //
285   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
286   // of the 32-bit time epoch.
287   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
288       REQUIRES(monitor_lock_)
289       REQUIRES_SHARED(Locks::mutator_lock_);
290 
291   // Translates the provided method and pc into its declaring class' source file and line number.
292   static void TranslateLocation(ArtMethod* method, uint32_t pc,
293                                 const char** source_file,
294                                 int32_t* line_number)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
297   // Provides no memory ordering guarantees.
298   uint32_t GetOwnerThreadId() REQUIRES(!Locks::thread_list_lock_)
299       REQUIRES_SHARED(Locks::mutator_lock_);
300 
301   // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack.
302   // owner is either self or suspended.
303   void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_)
304       REQUIRES_SHARED(Locks::mutator_lock_);
305 
306   // The same, but without checking for a proxy method. Currently requires owner == self.
307   void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_)
308       REQUIRES_SHARED(Locks::mutator_lock_);
309 
310   // Support for systrace output of monitor operations.
311   ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
312                                               ObjPtr<mirror::Object> obj,
313                                               bool is_wait)
314       REQUIRES_SHARED(Locks::mutator_lock_);
315   static void AtraceMonitorLockImpl(Thread* self,
316                                     ObjPtr<mirror::Object> obj,
317                                     bool is_wait)
318       REQUIRES_SHARED(Locks::mutator_lock_);
319   ALWAYS_INLINE static void AtraceMonitorUnlock();
320 
321   static uint32_t lock_profiling_threshold_;
322   static uint32_t stack_dump_lock_profiling_threshold_;
323   static bool capture_method_eagerly_;
324 
325   // Holding the monitor N times is represented by holding monitor_lock_ N times.
326   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
327 
328   // Pretend to unlock monitor lock.
FakeUnlockMonitorLock()329   void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {}
330 
331   // Number of threads either waiting on the condition or waiting on a contended
332   // monitor acquisition. Prevents deflation.
333   std::atomic<size_t> num_waiters_;
334 
335   // Which thread currently owns the lock? monitor_lock_ only keeps the tid.
336   // Only set while holding monitor_lock_. Non-locking readers only use it to
337   // compare to self or for debugging.
338   std::atomic<Thread*> owner_;
339 
340   // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once.
341   unsigned int lock_count_ GUARDED_BY(monitor_lock_);
342 
343   // Owner's recursive lock depth is given by monitor_lock_.GetDepth().
344 
345   // What object are we part of. This is a weak root. Do not access
346   // this directly, use GetObject() to read it so it will be guarded
347   // by a read barrier.
348   GcRoot<mirror::Object> obj_;
349 
350   // Threads currently waiting on this monitor.
351   Thread* wait_set_ GUARDED_BY(monitor_lock_);
352 
353   // Threads that were waiting on this monitor, but are now contending on it.
354   Thread* wake_set_ GUARDED_BY(monitor_lock_);
355 
356   // Stored object hash code, generated lazily by GetHashCode.
357   AtomicInteger hash_code_;
358 
359   // Data structure used to remember the method and dex pc of a recent holder of the
360   // lock. Used for tracing and contention reporting. Setting these is expensive, since it
361   // involves a partial stack walk. We set them only as follows, to minimize the cost:
362   // - If tracing is enabled, they are needed immediately when we first notice contention, so we
363   //   set them unconditionally when a monitor is acquired.
364   // - If contention reporting is enabled, we use the lock_owner_request_ field to have the
365   //   contending thread request them. The current owner then sets them when releasing the monitor,
366   //   making them available when the contending thread acquires the monitor.
367   // - If tracing and contention reporting are enabled, we do both. This usually prevents us from
368   //   switching between reporting the end and beginning of critical sections for contention logging
369   //   when tracing is enabled.  We expect that tracing overhead is normally much higher than for
370   //   contention logging, so the added cost should be small. It also minimizes glitches when
371   //   enabling and disabling traces.
372   // We're tolerant of missing information. E.g. when tracing is initially turned on, we may
373   // not have the lock holder information if the holder acquired the lock with tracing off.
374   //
375   // We make this data unconditionally atomic; for contention logging all accesses are in fact
376   // protected by the monitor, but for tracing, reads are not. Writes are always
377   // protected by the monitor.
378   //
379   // The fields are always accessed without memory ordering. We store a checksum, and reread if
380   // the checksum doesn't correspond to the values.  This results in values that are correct with
381   // very high probability, but not certainty.
382   //
383   // If we need lock_owner information for a certain thread for contenion logging, we store its
384   // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_,
385   // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the
386   // monitor.
387   //
388   // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively
389   // in the process of establishing one of those states. Only one thread at a time can be actively
390   // establishing such a state, since writes are protected by the monitor.
391   std::atomic<Thread*> lock_owner_;  // *lock_owner_ may no longer exist!
392   std::atomic<ArtMethod*> lock_owner_method_;
393   std::atomic<uint32_t> lock_owner_dex_pc_;
394   std::atomic<uintptr_t> lock_owner_sum_;
395 
396   // Request lock owner save method and dex_pc. Written asynchronously.
397   std::atomic<Thread*> lock_owner_request_;
398 
399   // Compute method, dex pc, and tid "checksum".
400   uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t);
401 
402   // Set owning method, dex pc, and tid. owner_ field is set and points to current thread.
403   void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t)
404       REQUIRES(monitor_lock_);
405 
406   // Get owning method and dex pc for the given thread, if available.
407   void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t);
408 
409   // Do the same, while holding the monitor. There are no concurrent updates.
410   void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
411                               uint32_t thread_id)
412       REQUIRES(monitor_lock_);
413 
414   // We never clear lock_owner method and dex pc. Since it often reflects
415   // ownership when we last detected contention, it may be inconsistent with owner_
416   // and not 100% reliable. For lock contention monitoring, in the absence of tracing,
417   // there is a small risk that the current owner may finish before noticing the request,
418   // or the information will be overwritten by another intervening request and monitor
419   // release, so it's also not 100% reliable. But if we report information at all, it
420   // should generally (modulo accidental checksum matches) pertain to to an acquisition of the
421   // right monitor by the right thread, so it's extremely unlikely to be seriously misleading.
422   // Since we track threads by a pointer to the Thread structure, there is a small chance we may
423   // confuse threads allocated at the same exact address, if a contending thread dies before
424   // we inquire about it.
425 
426   // Check for and act on a pending lock_owner_request_
427   void CheckLockOwnerRequest(Thread* self)
428       REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
429 
430   void MaybeEnableTimeout() REQUIRES(Locks::mutator_lock_);
431 
432   // The denser encoded version of this monitor as stored in the lock word.
433   MonitorId monitor_id_;
434 
435 #ifdef __LP64__
436   // Free list for monitor pool.
437   Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
438 #endif
439 
440   friend class MonitorInfo;
441   friend class MonitorList;
442   friend class MonitorPool;
443   friend class mirror::Object;
444   DISALLOW_COPY_AND_ASSIGN(Monitor);
445 };
446 
447 class MonitorList {
448  public:
449   MonitorList();
450   ~MonitorList();
451 
452   void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
453 
454   void SweepMonitorList(IsMarkedVisitor* visitor)
455       REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
456   void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
457   void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
458   void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
459   // Returns how many monitors were deflated.
460   size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
461   EXPORT size_t Size() REQUIRES(!monitor_list_lock_);
462 
463   using Monitors = std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>>;
464 
465  private:
466   // During sweeping we may free an object and on a separate thread have an object created using
467   // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
468   // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
469   // the object wasn't marked when sweeping began.
470   bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
471   Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
472   ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
473   Monitors list_ GUARDED_BY(monitor_list_lock_);
474 
475   friend class Monitor;
476   DISALLOW_COPY_AND_ASSIGN(MonitorList);
477 };
478 
479 // Collects information about the current state of an object's monitor.
480 // This is very unsafe, and must only be called when all threads are suspended.
481 // For use only by the JDWP implementation.
482 class MonitorInfo {
483  public:
MonitorInfo()484   MonitorInfo() : owner_(nullptr), entry_count_(0) {}
485   MonitorInfo(const MonitorInfo&) = default;
486   MonitorInfo& operator=(const MonitorInfo&) = default;
487   EXPORT explicit MonitorInfo(ObjPtr<mirror::Object> o) REQUIRES(Locks::mutator_lock_);
488 
489   Thread* owner_;
490   size_t entry_count_;
491   std::vector<Thread*> waiters_;
492 };
493 
494 }  // namespace art
495 
496 #endif  // ART_RUNTIME_MONITOR_H_
497