• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "monitor.h"
18 
19 #include <vector>
20 
21 #include "base/mutex.h"
22 #include "base/stl_util.h"
23 #include "class_linker.h"
24 #include "dex_file-inl.h"
25 #include "dex_instruction.h"
26 #include "mirror/art_method-inl.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "mirror/object_array-inl.h"
30 #include "object_utils.h"
31 #include "scoped_thread_state_change.h"
32 #include "thread.h"
33 #include "thread_list.h"
34 #include "verifier/method_verifier.h"
35 #include "well_known_classes.h"
36 
37 namespace art {
38 
39 /*
40  * Every Object has a monitor associated with it, but not every Object is
41  * actually locked.  Even the ones that are locked do not need a
42  * full-fledged monitor until a) there is actual contention or b) wait()
43  * is called on the Object.
44  *
45  * For Android, we have implemented a scheme similar to the one described
46  * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
47  * (ACM 1998).  Things are even easier for us, though, because we have
48  * a full 32 bits to work with.
49  *
50  * The two states of an Object's lock are referred to as "thin" and
51  * "fat".  A lock may transition from the "thin" state to the "fat"
52  * state and this transition is referred to as inflation.  Once a lock
53  * has been inflated it remains in the "fat" state indefinitely.
54  *
55  * The lock value itself is stored in Object.lock.  The LSB of the
56  * lock encodes its state.  When cleared, the lock is in the "thin"
57  * state and its bits are formatted as follows:
58  *
59  *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
60  *     lock count   thread id  hash state  0
61  *
62  * When set, the lock is in the "fat" state and its bits are formatted
63  * as follows:
64  *
65  *    [31 ---- 3] [2 ---- 1] [0]
66  *      pointer   hash state  1
67  *
68  * For an in-depth description of the mechanics of thin-vs-fat locking,
69  * read the paper referred to above.
70  *
71  * Monitors provide:
72  *  - mutually exclusive access to resources
73  *  - a way for multiple threads to wait for notification
74  *
75  * In effect, they fill the role of both mutexes and condition variables.
76  *
77  * Only one thread can own the monitor at any time.  There may be several
78  * threads waiting on it (the wait call unlocks it).  One or more waiting
79  * threads may be getting interrupted or notified at any given time.
80  *
81  * TODO: the various members of monitor are not SMP-safe.
82  */
83 
84 // The shape is the bottom bit; either LW_SHAPE_THIN or LW_SHAPE_FAT.
85 #define LW_SHAPE_MASK 0x1
86 #define LW_SHAPE(x) static_cast<int>((x) & LW_SHAPE_MASK)
87 
88 /*
89  * Monitor accessor.  Extracts a monitor structure pointer from a fat
90  * lock.  Performs no error checking.
91  */
92 #define LW_MONITOR(x) \
93   (reinterpret_cast<Monitor*>((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
94 
95 /*
96  * Lock recursion count field.  Contains a count of the number of times
97  * a lock has been recursively acquired.
98  */
99 #define LW_LOCK_COUNT_MASK 0x1fff
100 #define LW_LOCK_COUNT_SHIFT 19
101 #define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
102 
103 bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
104 uint32_t Monitor::lock_profiling_threshold_ = 0;
105 
IsSensitiveThread()106 bool Monitor::IsSensitiveThread() {
107   if (is_sensitive_thread_hook_ != NULL) {
108     return (*is_sensitive_thread_hook_)();
109   }
110   return false;
111 }
112 
Init(uint32_t lock_profiling_threshold,bool (* is_sensitive_thread_hook)())113 void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
114   lock_profiling_threshold_ = lock_profiling_threshold;
115   is_sensitive_thread_hook_ = is_sensitive_thread_hook;
116 }
117 
Monitor(Thread * owner,mirror::Object * obj)118 Monitor::Monitor(Thread* owner, mirror::Object* obj)
119     : monitor_lock_("a monitor lock", kMonitorLock),
120       owner_(owner),
121       lock_count_(0),
122       obj_(obj),
123       wait_set_(NULL),
124       locking_method_(NULL),
125       locking_dex_pc_(0) {
126   monitor_lock_.Lock(owner);
127   // Propagate the lock state.
128   uint32_t thin = *obj->GetRawLockWordAddress();
129   lock_count_ = LW_LOCK_COUNT(thin);
130   thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
131   thin |= reinterpret_cast<uint32_t>(this) | LW_SHAPE_FAT;
132   // Publish the updated lock word.
133   android_atomic_release_store(thin, obj->GetRawLockWordAddress());
134   // Lock profiling.
135   if (lock_profiling_threshold_ != 0) {
136     locking_method_ = owner->GetCurrentMethod(&locking_dex_pc_);
137   }
138 }
139 
~Monitor()140 Monitor::~Monitor() {
141   DCHECK(obj_ != NULL);
142   DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
143 }
144 
145 /*
146  * Links a thread into a monitor's wait set.  The monitor lock must be
147  * held by the caller of this routine.
148  */
AppendToWaitSet(Thread * thread)149 void Monitor::AppendToWaitSet(Thread* thread) {
150   DCHECK(owner_ == Thread::Current());
151   DCHECK(thread != NULL);
152   DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
153   if (wait_set_ == NULL) {
154     wait_set_ = thread;
155     return;
156   }
157 
158   // push_back.
159   Thread* t = wait_set_;
160   while (t->wait_next_ != NULL) {
161     t = t->wait_next_;
162   }
163   t->wait_next_ = thread;
164 }
165 
166 /*
167  * Unlinks a thread from a monitor's wait set.  The monitor lock must
168  * be held by the caller of this routine.
169  */
RemoveFromWaitSet(Thread * thread)170 void Monitor::RemoveFromWaitSet(Thread *thread) {
171   DCHECK(owner_ == Thread::Current());
172   DCHECK(thread != NULL);
173   if (wait_set_ == NULL) {
174     return;
175   }
176   if (wait_set_ == thread) {
177     wait_set_ = thread->wait_next_;
178     thread->wait_next_ = NULL;
179     return;
180   }
181 
182   Thread* t = wait_set_;
183   while (t->wait_next_ != NULL) {
184     if (t->wait_next_ == thread) {
185       t->wait_next_ = thread->wait_next_;
186       thread->wait_next_ = NULL;
187       return;
188     }
189     t = t->wait_next_;
190   }
191 }
192 
GetObject()193 mirror::Object* Monitor::GetObject() {
194   return obj_;
195 }
196 
Lock(Thread * self)197 void Monitor::Lock(Thread* self) {
198   if (owner_ == self) {
199     lock_count_++;
200     return;
201   }
202 
203   if (!monitor_lock_.TryLock(self)) {
204     uint64_t waitStart = 0;
205     uint64_t waitEnd = 0;
206     uint32_t wait_threshold = lock_profiling_threshold_;
207     const mirror::ArtMethod* current_locking_method = NULL;
208     uint32_t current_locking_dex_pc = 0;
209     {
210       ScopedThreadStateChange tsc(self, kBlocked);
211       if (wait_threshold != 0) {
212         waitStart = NanoTime() / 1000;
213       }
214       current_locking_method = locking_method_;
215       current_locking_dex_pc = locking_dex_pc_;
216 
217       monitor_lock_.Lock(self);
218       if (wait_threshold != 0) {
219         waitEnd = NanoTime() / 1000;
220       }
221     }
222 
223     if (wait_threshold != 0) {
224       uint64_t wait_ms = (waitEnd - waitStart) / 1000;
225       uint32_t sample_percent;
226       if (wait_ms >= wait_threshold) {
227         sample_percent = 100;
228       } else {
229         sample_percent = 100 * wait_ms / wait_threshold;
230       }
231       if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
232         const char* current_locking_filename;
233         uint32_t current_locking_line_number;
234         TranslateLocation(current_locking_method, current_locking_dex_pc,
235                           current_locking_filename, current_locking_line_number);
236         LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
237       }
238     }
239   }
240   owner_ = self;
241   DCHECK_EQ(lock_count_, 0);
242 
243   // When debugging, save the current monitor holder for future
244   // acquisition failures to use in sampled logging.
245   if (lock_profiling_threshold_ != 0) {
246     locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
247   }
248 }
249 
250 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
251                                               __attribute__((format(printf, 1, 2)));
252 
ThrowIllegalMonitorStateExceptionF(const char * fmt,...)253 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
254     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
255   va_list args;
256   va_start(args, fmt);
257   Thread* self = Thread::Current();
258   ThrowLocation throw_location = self->GetCurrentLocationForThrow();
259   self->ThrowNewExceptionV(throw_location, "Ljava/lang/IllegalMonitorStateException;", fmt, args);
260   if (!Runtime::Current()->IsStarted()) {
261     std::ostringstream ss;
262     self->Dump(ss);
263     std::string str(ss.str());
264     LOG(ERROR) << "IllegalMonitorStateException: " << str;
265   }
266   va_end(args);
267 }
268 
ThreadToString(Thread * thread)269 static std::string ThreadToString(Thread* thread) {
270   if (thread == NULL) {
271     return "NULL";
272   }
273   std::ostringstream oss;
274   // TODO: alternatively, we could just return the thread's name.
275   oss << *thread;
276   return oss.str();
277 }
278 
FailedUnlock(mirror::Object * o,Thread * expected_owner,Thread * found_owner,Monitor * monitor)279 void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner,
280                            Monitor* monitor) {
281   Thread* current_owner = NULL;
282   std::string current_owner_string;
283   std::string expected_owner_string;
284   std::string found_owner_string;
285   {
286     // TODO: isn't this too late to prevent threads from disappearing?
287     // Acquire thread list lock so threads won't disappear from under us.
288     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
289     // Re-read owner now that we hold lock.
290     current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
291     // Get short descriptions of the threads involved.
292     current_owner_string = ThreadToString(current_owner);
293     expected_owner_string = ThreadToString(expected_owner);
294     found_owner_string = ThreadToString(found_owner);
295   }
296   if (current_owner == NULL) {
297     if (found_owner == NULL) {
298       ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
299                                          " on thread '%s'",
300                                          PrettyTypeOf(o).c_str(),
301                                          expected_owner_string.c_str());
302     } else {
303       // Race: the original read found an owner but now there is none
304       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
305                                          " (where now the monitor appears unowned) on thread '%s'",
306                                          found_owner_string.c_str(),
307                                          PrettyTypeOf(o).c_str(),
308                                          expected_owner_string.c_str());
309     }
310   } else {
311     if (found_owner == NULL) {
312       // Race: originally there was no owner, there is now
313       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
314                                          " (originally believed to be unowned) on thread '%s'",
315                                          current_owner_string.c_str(),
316                                          PrettyTypeOf(o).c_str(),
317                                          expected_owner_string.c_str());
318     } else {
319       if (found_owner != current_owner) {
320         // Race: originally found and current owner have changed
321         ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
322                                            " owned by '%s') on object of type '%s' on thread '%s'",
323                                            found_owner_string.c_str(),
324                                            current_owner_string.c_str(),
325                                            PrettyTypeOf(o).c_str(),
326                                            expected_owner_string.c_str());
327       } else {
328         ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
329                                            " on thread '%s",
330                                            current_owner_string.c_str(),
331                                            PrettyTypeOf(o).c_str(),
332                                            expected_owner_string.c_str());
333       }
334     }
335   }
336 }
337 
Unlock(Thread * self,bool for_wait)338 bool Monitor::Unlock(Thread* self, bool for_wait) {
339   DCHECK(self != NULL);
340   Thread* owner = owner_;
341   if (owner == self) {
342     // We own the monitor, so nobody else can be in here.
343     if (lock_count_ == 0) {
344       owner_ = NULL;
345       locking_method_ = NULL;
346       locking_dex_pc_ = 0;
347       monitor_lock_.Unlock(self);
348     } else {
349       --lock_count_;
350     }
351   } else if (for_wait) {
352     // Wait should have already cleared the fields.
353     DCHECK_EQ(lock_count_, 0);
354     DCHECK(owner == NULL);
355     DCHECK(locking_method_ == NULL);
356     DCHECK_EQ(locking_dex_pc_, 0u);
357     monitor_lock_.Unlock(self);
358   } else {
359     // We don't own this, so we're not allowed to unlock it.
360     // The JNI spec says that we should throw IllegalMonitorStateException
361     // in this case.
362     FailedUnlock(obj_, self, owner, this);
363     return false;
364   }
365   return true;
366 }
367 
368 /*
369  * Wait on a monitor until timeout, interrupt, or notification.  Used for
370  * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
371  *
372  * If another thread calls Thread.interrupt(), we throw InterruptedException
373  * and return immediately if one of the following are true:
374  *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
375  *  - blocked in join(), join(long), or join(long, int) methods of Thread
376  *  - blocked in sleep(long), or sleep(long, int) methods of Thread
377  * Otherwise, we set the "interrupted" flag.
378  *
379  * Checks to make sure that "ns" is in the range 0-999999
380  * (i.e. fractions of a millisecond) and throws the appropriate
381  * exception if it isn't.
382  *
383  * The spec allows "spurious wakeups", and recommends that all code using
384  * Object.wait() do so in a loop.  This appears to derive from concerns
385  * about pthread_cond_wait() on multiprocessor systems.  Some commentary
386  * on the web casts doubt on whether these can/should occur.
387  *
388  * Since we're allowed to wake up "early", we clamp extremely long durations
389  * to return at the end of the 32-bit time epoch.
390  */
Wait(Thread * self,int64_t ms,int32_t ns,bool interruptShouldThrow,ThreadState why)391 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
392                    bool interruptShouldThrow, ThreadState why) {
393   DCHECK(self != NULL);
394   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
395 
396   // Make sure that we hold the lock.
397   if (owner_ != self) {
398     ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
399     return;
400   }
401   monitor_lock_.AssertHeld(self);
402 
403   // We need to turn a zero-length timed wait into a regular wait because
404   // Object.wait(0, 0) is defined as Object.wait(0), which is defined as Object.wait().
405   if (why == kTimedWaiting && (ms == 0 && ns == 0)) {
406     why = kWaiting;
407   }
408 
409   WaitWithLock(self, ms, ns, interruptShouldThrow, why);
410 }
411 
WaitWithLock(Thread * self,int64_t ms,int32_t ns,bool interruptShouldThrow,ThreadState why)412 void Monitor::WaitWithLock(Thread* self, int64_t ms, int32_t ns,
413                            bool interruptShouldThrow, ThreadState why) {
414   // Enforce the timeout range.
415   if (ms < 0 || ns < 0 || ns > 999999) {
416     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
417     self->ThrowNewExceptionF(throw_location, "Ljava/lang/IllegalArgumentException;",
418                              "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
419     return;
420   }
421 
422   /*
423    * Add ourselves to the set of threads waiting on this monitor, and
424    * release our hold.  We need to let it go even if we're a few levels
425    * deep in a recursive lock, and we need to restore that later.
426    *
427    * We append to the wait set ahead of clearing the count and owner
428    * fields so the subroutine can check that the calling thread owns
429    * the monitor.  Aside from that, the order of member updates is
430    * not order sensitive as we hold the pthread mutex.
431    */
432   AppendToWaitSet(self);
433   int prev_lock_count = lock_count_;
434   lock_count_ = 0;
435   owner_ = NULL;
436   const mirror::ArtMethod* saved_method = locking_method_;
437   locking_method_ = NULL;
438   uintptr_t saved_dex_pc = locking_dex_pc_;
439   locking_dex_pc_ = 0;
440 
441   /*
442    * Update thread state. If the GC wakes up, it'll ignore us, knowing
443    * that we won't touch any references in this state, and we'll check
444    * our suspend mode before we transition out.
445    */
446   self->TransitionFromRunnableToSuspended(why);
447 
448   bool was_interrupted = false;
449   {
450     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
451     MutexLock mu(self, *self->wait_mutex_);
452 
453     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
454     // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
455     // up.
456     DCHECK(self->wait_monitor_ == NULL);
457     self->wait_monitor_ = this;
458 
459     // Release the monitor lock.
460     Unlock(self, true);
461 
462     // Handle the case where the thread was interrupted before we called wait().
463     if (self->interrupted_) {
464       was_interrupted = true;
465     } else {
466       // Wait for a notification or a timeout to occur.
467       if (why == kWaiting) {
468         self->wait_cond_->Wait(self);
469       } else {
470         DCHECK(why == kTimedWaiting || why == kSleeping) << why;
471         self->wait_cond_->TimedWait(self, ms, ns);
472       }
473       if (self->interrupted_) {
474         was_interrupted = true;
475       }
476       self->interrupted_ = false;
477     }
478   }
479 
480   // Set self->status back to kRunnable, and self-suspend if needed.
481   self->TransitionFromSuspendedToRunnable();
482 
483   {
484     // We reset the thread's wait_monitor_ field after transitioning back to runnable so
485     // that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
486     // and diagnostic purposes. (If you reset this earlier, stack dumps will claim that threads
487     // are waiting on "null".)
488     MutexLock mu(self, *self->wait_mutex_);
489     DCHECK(self->wait_monitor_ != NULL);
490     self->wait_monitor_ = NULL;
491   }
492 
493   // Re-acquire the monitor lock.
494   Lock(self);
495 
496   self->wait_mutex_->AssertNotHeld(self);
497 
498   /*
499    * We remove our thread from wait set after restoring the count
500    * and owner fields so the subroutine can check that the calling
501    * thread owns the monitor. Aside from that, the order of member
502    * updates is not order sensitive as we hold the pthread mutex.
503    */
504   owner_ = self;
505   lock_count_ = prev_lock_count;
506   locking_method_ = saved_method;
507   locking_dex_pc_ = saved_dex_pc;
508   RemoveFromWaitSet(self);
509 
510   if (was_interrupted) {
511     /*
512      * We were interrupted while waiting, or somebody interrupted an
513      * un-interruptible thread earlier and we're bailing out immediately.
514      *
515      * The doc sayeth: "The interrupted status of the current thread is
516      * cleared when this exception is thrown."
517      */
518     {
519       MutexLock mu(self, *self->wait_mutex_);
520       self->interrupted_ = false;
521     }
522     if (interruptShouldThrow) {
523       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
524       self->ThrowNewException(throw_location, "Ljava/lang/InterruptedException;", NULL);
525     }
526   }
527 }
528 
Notify(Thread * self)529 void Monitor::Notify(Thread* self) {
530   DCHECK(self != NULL);
531   // Make sure that we hold the lock.
532   if (owner_ != self) {
533     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
534     return;
535   }
536   monitor_lock_.AssertHeld(self);
537   NotifyWithLock(self);
538 }
539 
NotifyWithLock(Thread * self)540 void Monitor::NotifyWithLock(Thread* self) {
541   // Signal the first waiting thread in the wait set.
542   while (wait_set_ != NULL) {
543     Thread* thread = wait_set_;
544     wait_set_ = thread->wait_next_;
545     thread->wait_next_ = NULL;
546 
547     // Check to see if the thread is still waiting.
548     MutexLock mu(self, *thread->wait_mutex_);
549     if (thread->wait_monitor_ != NULL) {
550       thread->wait_cond_->Signal(self);
551       return;
552     }
553   }
554 }
555 
NotifyAll(Thread * self)556 void Monitor::NotifyAll(Thread* self) {
557   DCHECK(self != NULL);
558   // Make sure that we hold the lock.
559   if (owner_ != self) {
560     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
561     return;
562   }
563   monitor_lock_.AssertHeld(self);
564   NotifyAllWithLock();
565 }
566 
NotifyAllWithLock()567 void Monitor::NotifyAllWithLock() {
568   // Signal all threads in the wait set.
569   while (wait_set_ != NULL) {
570     Thread* thread = wait_set_;
571     wait_set_ = thread->wait_next_;
572     thread->wait_next_ = NULL;
573     thread->Notify();
574   }
575 }
576 
577 /*
578  * Changes the shape of a monitor from thin to fat, preserving the
579  * internal lock state. The calling thread must own the lock.
580  */
Inflate(Thread * self,mirror::Object * obj)581 void Monitor::Inflate(Thread* self, mirror::Object* obj) {
582   DCHECK(self != NULL);
583   DCHECK(obj != NULL);
584   DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
585   DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
586 
587   // Allocate and acquire a new monitor.
588   Monitor* m = new Monitor(self, obj);
589   VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
590                 << " created monitor " << m << " for object " << obj;
591   Runtime::Current()->GetMonitorList()->Add(m);
592 }
593 
MonitorEnter(Thread * self,mirror::Object * obj)594 void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
595   volatile int32_t* thinp = obj->GetRawLockWordAddress();
596   uint32_t sleepDelayNs;
597   uint32_t minSleepDelayNs = 1000000;  /* 1 millisecond */
598   uint32_t maxSleepDelayNs = 1000000000;  /* 1 second */
599   uint32_t thin, newThin;
600 
601   DCHECK(self != NULL);
602   DCHECK(obj != NULL);
603   uint32_t threadId = self->GetThinLockId();
604  retry:
605   thin = *thinp;
606   if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
607     /*
608      * The lock is a thin lock.  The owner field is used to
609      * determine the acquire method, ordered by cost.
610      */
611     if (LW_LOCK_OWNER(thin) == threadId) {
612       /*
613        * The calling thread owns the lock.  Increment the
614        * value of the recursion count field.
615        */
616       *thinp += 1 << LW_LOCK_COUNT_SHIFT;
617       if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
618         /*
619          * The reacquisition limit has been reached.  Inflate
620          * the lock so the next acquire will not overflow the
621          * recursion count field.
622          */
623         Inflate(self, obj);
624       }
625     } else if (LW_LOCK_OWNER(thin) == 0) {
626       // The lock is unowned. Install the thread id of the calling thread into the owner field.
627       // This is the common case: compiled code will have tried this before calling back into
628       // the runtime.
629       newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
630       if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
631         // The acquire failed. Try again.
632         goto retry;
633       }
634     } else {
635       VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
636                                     threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
637       // The lock is owned by another thread. Notify the runtime that we are about to wait.
638       self->monitor_enter_object_ = obj;
639       self->TransitionFromRunnableToSuspended(kBlocked);
640       // Spin until the thin lock is released or inflated.
641       sleepDelayNs = 0;
642       for (;;) {
643         thin = *thinp;
644         // Check the shape of the lock word. Another thread
645         // may have inflated the lock while we were waiting.
646         if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
647           if (LW_LOCK_OWNER(thin) == 0) {
648             // The lock has been released. Install the thread id of the
649             // calling thread into the owner field.
650             newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
651             if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
652               // The acquire succeed. Break out of the loop and proceed to inflate the lock.
653               break;
654             }
655           } else {
656             // The lock has not been released. Yield so the owning thread can run.
657             if (sleepDelayNs == 0) {
658               sched_yield();
659               sleepDelayNs = minSleepDelayNs;
660             } else {
661               NanoSleep(sleepDelayNs);
662               // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
663               if (sleepDelayNs < maxSleepDelayNs / 2) {
664                 sleepDelayNs *= 2;
665               } else {
666                 sleepDelayNs = minSleepDelayNs;
667               }
668             }
669           }
670         } else {
671           // The thin lock was inflated by another thread. Let the runtime know we are no longer
672           // waiting and try again.
673           VLOG(monitor) << StringPrintf("monitor: thread %d found lock %p surprise-fattened by another thread", threadId, thinp);
674           self->monitor_enter_object_ = NULL;
675           self->TransitionFromSuspendedToRunnable();
676           goto retry;
677         }
678       }
679       VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
680       // We have acquired the thin lock. Let the runtime know that we are no longer waiting.
681       self->monitor_enter_object_ = NULL;
682       self->TransitionFromSuspendedToRunnable();
683       // Fatten the lock.
684       Inflate(self, obj);
685       VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
686     }
687   } else {
688     // The lock is a fat lock.
689     VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
690                                   threadId, thinp, LW_MONITOR(*thinp),
691                                   reinterpret_cast<void*>(*thinp), PrettyTypeOf(obj).c_str());
692     DCHECK(LW_MONITOR(*thinp) != NULL);
693     LW_MONITOR(*thinp)->Lock(self);
694   }
695 }
696 
MonitorExit(Thread * self,mirror::Object * obj)697 bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
698   volatile int32_t* thinp = obj->GetRawLockWordAddress();
699 
700   DCHECK(self != NULL);
701   // DCHECK_EQ(self->GetState(), kRunnable);
702   DCHECK(obj != NULL);
703 
704   /*
705    * Cache the lock word as its value can change while we are
706    * examining its state.
707    */
708   uint32_t thin = *thinp;
709   if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
710     /*
711      * The lock is thin.  We must ensure that the lock is owned
712      * by the given thread before unlocking it.
713      */
714     if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
715       /*
716        * We are the lock owner.  It is safe to update the lock
717        * without CAS as lock ownership guards the lock itself.
718        */
719       if (LW_LOCK_COUNT(thin) == 0) {
720         /*
721          * The lock was not recursively acquired, the common
722          * case.  Unlock by clearing all bits except for the
723          * hash state.
724          */
725         thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
726         android_atomic_release_store(thin, thinp);
727       } else {
728         /*
729          * The object was recursively acquired.  Decrement the
730          * lock recursion count field.
731          */
732         *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
733       }
734     } else {
735       /*
736        * We do not own the lock.  The JVM spec requires that we
737        * throw an exception in this case.
738        */
739       FailedUnlock(obj, self, NULL, NULL);
740       return false;
741     }
742   } else {
743     /*
744      * The lock is fat.  We must check to see if Unlock has
745      * raised any exceptions before continuing.
746      */
747     DCHECK(LW_MONITOR(*thinp) != NULL);
748     if (!LW_MONITOR(*thinp)->Unlock(self, false)) {
749       // An exception has been raised.  Do not fall through.
750       return false;
751     }
752   }
753   return true;
754 }
755 
756 /*
757  * Object.wait().  Also called for class init.
758  */
Wait(Thread * self,mirror::Object * obj,int64_t ms,int32_t ns,bool interruptShouldThrow,ThreadState why)759 void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
760                    bool interruptShouldThrow, ThreadState why) {
761   volatile int32_t* thinp = obj->GetRawLockWordAddress();
762 
763   // If the lock is still thin, we need to fatten it.
764   uint32_t thin = *thinp;
765   if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
766     // Make sure that 'self' holds the lock.
767     if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
768       ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
769       return;
770     }
771 
772     /* This thread holds the lock.  We need to fatten the lock
773      * so 'self' can block on it.  Don't update the object lock
774      * field yet, because 'self' needs to acquire the lock before
775      * any other thread gets a chance.
776      */
777     Inflate(self, obj);
778     VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
779   }
780   LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow, why);
781 }
782 
Notify(Thread * self,mirror::Object * obj)783 void Monitor::Notify(Thread* self, mirror::Object *obj) {
784   uint32_t thin = *obj->GetRawLockWordAddress();
785 
786   // If the lock is still thin, there aren't any waiters;
787   // waiting on an object forces lock fattening.
788   if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
789     // Make sure that 'self' holds the lock.
790     if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
791       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
792       return;
793     }
794     // no-op;  there are no waiters to notify.
795     // We inflate here in case the Notify is in a tight loop. Without inflation here the waiter
796     // will struggle to get in. Bug 6961405.
797     Inflate(self, obj);
798   } else {
799     // It's a fat lock.
800     LW_MONITOR(thin)->Notify(self);
801   }
802 }
803 
NotifyAll(Thread * self,mirror::Object * obj)804 void Monitor::NotifyAll(Thread* self, mirror::Object *obj) {
805   uint32_t thin = *obj->GetRawLockWordAddress();
806 
807   // If the lock is still thin, there aren't any waiters;
808   // waiting on an object forces lock fattening.
809   if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
810     // Make sure that 'self' holds the lock.
811     if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
812       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
813       return;
814     }
815     // no-op;  there are no waiters to notify.
816     // We inflate here in case the NotifyAll is in a tight loop. Without inflation here the waiter
817     // will struggle to get in. Bug 6961405.
818     Inflate(self, obj);
819   } else {
820     // It's a fat lock.
821     LW_MONITOR(thin)->NotifyAll(self);
822   }
823 }
824 
GetThinLockId(uint32_t raw_lock_word)825 uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
826   if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
827     return LW_LOCK_OWNER(raw_lock_word);
828   } else {
829     Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
830     return owner ? owner->GetThinLockId() : 0;
831   }
832 }
833 
DescribeWait(std::ostream & os,const Thread * thread)834 void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
835   ThreadState state = thread->GetState();
836 
837   mirror::Object* object = NULL;
838   uint32_t lock_owner = ThreadList::kInvalidId;
839   if (state == kWaiting || state == kTimedWaiting || state == kSleeping) {
840     if (state == kSleeping) {
841       os << "  - sleeping on ";
842     } else {
843       os << "  - waiting on ";
844     }
845     {
846       Thread* self = Thread::Current();
847       MutexLock mu(self, *thread->wait_mutex_);
848       Monitor* monitor = thread->wait_monitor_;
849       if (monitor != NULL) {
850         object = monitor->obj_;
851       }
852     }
853   } else if (state == kBlocked) {
854     os << "  - waiting to lock ";
855     object = thread->monitor_enter_object_;
856     if (object != NULL) {
857       lock_owner = object->GetThinLockId();
858     }
859   } else {
860     // We're not waiting on anything.
861     return;
862   }
863 
864   // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
865   os << "<" << object << "> (a " << PrettyTypeOf(object) << ")";
866 
867   // - waiting to lock <0x613f83d8> (a java.lang.Object) held by thread 5
868   if (lock_owner != ThreadList::kInvalidId) {
869     os << " held by thread " << lock_owner;
870   }
871 
872   os << "\n";
873 }
874 
GetContendedMonitor(Thread * thread)875 mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
876   // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
877   // definition of contended that includes a monitor a thread is trying to enter...
878   mirror::Object* result = thread->monitor_enter_object_;
879   if (result != NULL) {
880     return result;
881   }
882   // ...but also a monitor that the thread is waiting on.
883   {
884     MutexLock mu(Thread::Current(), *thread->wait_mutex_);
885     Monitor* monitor = thread->wait_monitor_;
886     if (monitor != NULL) {
887       return monitor->obj_;
888     }
889   }
890   return NULL;
891 }
892 
VisitLocks(StackVisitor * stack_visitor,void (* callback)(mirror::Object *,void *),void * callback_context)893 void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
894                          void* callback_context) {
895   mirror::ArtMethod* m = stack_visitor->GetMethod();
896   CHECK(m != NULL);
897 
898   // Native methods are an easy special case.
899   // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
900   if (m->IsNative()) {
901     if (m->IsSynchronized()) {
902       mirror::Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
903       callback(jni_this, callback_context);
904     }
905     return;
906   }
907 
908   // Proxy methods should not be synchronized.
909   if (m->IsProxyMethod()) {
910     CHECK(!m->IsSynchronized());
911     return;
912   }
913 
914   // <clinit> is another special case. The runtime holds the class lock while calling <clinit>.
915   MethodHelper mh(m);
916   if (mh.IsClassInitializer()) {
917     callback(m->GetDeclaringClass(), callback_context);
918     // Fall through because there might be synchronization in the user code too.
919   }
920 
921   // Is there any reason to believe there's any synchronization in this method?
922   const DexFile::CodeItem* code_item = mh.GetCodeItem();
923   CHECK(code_item != NULL) << PrettyMethod(m);
924   if (code_item->tries_size_ == 0) {
925     return;  // No "tries" implies no synchronization, so no held locks to report.
926   }
927 
928   // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
929   // the locks held in this stack frame.
930   std::vector<uint32_t> monitor_enter_dex_pcs;
931   verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), monitor_enter_dex_pcs);
932   if (monitor_enter_dex_pcs.empty()) {
933     return;
934   }
935 
936   for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
937     // The verifier works in terms of the dex pcs of the monitor-enter instructions.
938     // We want the registers used by those instructions (so we can read the values out of them).
939     uint32_t dex_pc = monitor_enter_dex_pcs[i];
940     uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
941 
942     // Quick sanity check.
943     if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
944       LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
945                  << reinterpret_cast<void*>(monitor_enter_instruction);
946     }
947 
948     uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff);
949     mirror::Object* o = reinterpret_cast<mirror::Object*>(stack_visitor->GetVReg(m, monitor_register,
950                                                                                  kReferenceVReg));
951     callback(o, callback_context);
952   }
953 }
954 
IsValidLockWord(int32_t lock_word)955 bool Monitor::IsValidLockWord(int32_t lock_word) {
956   if (lock_word == 0) {
957     return true;
958   } else if (LW_SHAPE(lock_word) == LW_SHAPE_FAT) {
959     Monitor* mon = LW_MONITOR(lock_word);
960     MonitorList* list = Runtime::Current()->GetMonitorList();
961     MutexLock mu(Thread::Current(), list->monitor_list_lock_);
962     bool found = false;
963     for (Monitor* list_mon : list->list_) {
964       if (mon == list_mon) {
965         found = true;
966         break;
967       }
968     }
969     return found;
970   } else {
971     // TODO: thin lock validity checking.
972     return LW_SHAPE(lock_word) == LW_SHAPE_THIN;
973   }
974 }
975 
TranslateLocation(const mirror::ArtMethod * method,uint32_t dex_pc,const char * & source_file,uint32_t & line_number) const976 void Monitor::TranslateLocation(const mirror::ArtMethod* method, uint32_t dex_pc,
977                                 const char*& source_file, uint32_t& line_number) const {
978   // If method is null, location is unknown
979   if (method == NULL) {
980     source_file = "";
981     line_number = 0;
982     return;
983   }
984   MethodHelper mh(method);
985   source_file = mh.GetDeclaringClassSourceFile();
986   if (source_file == NULL) {
987     source_file = "";
988   }
989   line_number = mh.GetLineNumFromDexPC(dex_pc);
990 }
991 
MonitorList()992 MonitorList::MonitorList()
993     : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock"),
994       monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
995 }
996 
~MonitorList()997 MonitorList::~MonitorList() {
998   MutexLock mu(Thread::Current(), monitor_list_lock_);
999   STLDeleteElements(&list_);
1000 }
1001 
DisallowNewMonitors()1002 void MonitorList::DisallowNewMonitors() {
1003   MutexLock mu(Thread::Current(), monitor_list_lock_);
1004   allow_new_monitors_ = false;
1005 }
1006 
AllowNewMonitors()1007 void MonitorList::AllowNewMonitors() {
1008   Thread* self = Thread::Current();
1009   MutexLock mu(self, monitor_list_lock_);
1010   allow_new_monitors_ = true;
1011   monitor_add_condition_.Broadcast(self);
1012 }
1013 
Add(Monitor * m)1014 void MonitorList::Add(Monitor* m) {
1015   Thread* self = Thread::Current();
1016   MutexLock mu(self, monitor_list_lock_);
1017   while (UNLIKELY(!allow_new_monitors_)) {
1018     monitor_add_condition_.WaitHoldingLocks(self);
1019   }
1020   list_.push_front(m);
1021 }
1022 
SweepMonitorList(IsMarkedTester is_marked,void * arg)1023 void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) {
1024   MutexLock mu(Thread::Current(), monitor_list_lock_);
1025   for (auto it = list_.begin(); it != list_.end(); ) {
1026     Monitor* m = *it;
1027     if (!is_marked(m->GetObject(), arg)) {
1028       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
1029       delete m;
1030       it = list_.erase(it);
1031     } else {
1032       ++it;
1033     }
1034   }
1035 }
1036 
MonitorInfo(mirror::Object * o)1037 MonitorInfo::MonitorInfo(mirror::Object* o) : owner(NULL), entry_count(0) {
1038   uint32_t lock_word = *o->GetRawLockWordAddress();
1039   if (LW_SHAPE(lock_word) == LW_SHAPE_THIN) {
1040     uint32_t owner_thin_lock_id = LW_LOCK_OWNER(lock_word);
1041     if (owner_thin_lock_id != 0) {
1042       owner = Runtime::Current()->GetThreadList()->FindThreadByThinLockId(owner_thin_lock_id);
1043       entry_count = 1 + LW_LOCK_COUNT(lock_word);
1044     }
1045     // Thin locks have no waiters.
1046   } else {
1047     CHECK_EQ(LW_SHAPE(lock_word), LW_SHAPE_FAT);
1048     Monitor* monitor = LW_MONITOR(lock_word);
1049     owner = monitor->owner_;
1050     entry_count = 1 + monitor->lock_count_;
1051     for (Thread* waiter = monitor->wait_set_; waiter != NULL; waiter = waiter->wait_next_) {
1052       waiters.push_back(waiter);
1053     }
1054   }
1055 }
1056 
1057 }  // namespace art
1058