• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Dalvik.h"
18 
19 #include <fcntl.h>
20 #include <stdlib.h>
21 #include <unistd.h>
22 #include <pthread.h>
23 #include <time.h>
24 #include <errno.h>
25 
26 /*
27  * Every Object has a monitor associated with it, but not every Object is
28  * actually locked.  Even the ones that are locked do not need a
29  * full-fledged monitor until a) there is actual contention or b) wait()
30  * is called on the Object.
31  *
32  * For Dalvik, we have implemented a scheme similar to the one described
33  * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
34  * (ACM 1998).  Things are even easier for us, though, because we have
35  * a full 32 bits to work with.
36  *
37  * The two states of an Object's lock are referred to as "thin" and
38  * "fat".  A lock may transition from the "thin" state to the "fat"
39  * state and this transition is referred to as inflation.  Once a lock
40  * has been inflated it remains in the "fat" state indefinitely.
41  *
42  * The lock value itself is stored in Object.lock.  The LSB of the
43  * lock encodes its state.  When cleared, the lock is in the "thin"
44  * state and its bits are formatted as follows:
45  *
46  *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
47  *     lock count   thread id  hash state  0
48  *
49  * When set, the lock is in the "fat" state and its bits are formatted
50  * as follows:
51  *
52  *    [31 ---- 3] [2 ---- 1] [0]
53  *      pointer   hash state  1
54  *
55  * For an in-depth description of the mechanics of thin-vs-fat locking,
56  * read the paper referred to above.
57  */
58 
59 /*
60  * Monitors provide:
61  *  - mutually exclusive access to resources
62  *  - a way for multiple threads to wait for notification
63  *
64  * In effect, they fill the role of both mutexes and condition variables.
65  *
66  * Only one thread can own the monitor at any time.  There may be several
67  * threads waiting on it (the wait call unlocks it).  One or more waiting
68  * threads may be getting interrupted or notified at any given time.
69  *
70  * TODO: the various members of monitor are not SMP-safe.
71  */
72 struct Monitor {
73     Thread*     owner;          /* which thread currently owns the lock? */
74     int         lockCount;      /* owner's recursive lock depth */
75     Object*     obj;            /* what object are we part of [debug only] */
76 
77     Thread*     waitSet;	/* threads currently waiting on this monitor */
78 
79     pthread_mutex_t lock;
80 
81     Monitor*    next;
82 
83     /*
84      * Who last acquired this monitor, when lock sampling is enabled.
85      * Even when enabled, ownerMethod may be NULL.
86      */
87     const Method* ownerMethod;
88     u4 ownerPc;
89 };
90 
91 
92 /*
93  * Create and initialize a monitor.
94  */
dvmCreateMonitor(Object * obj)95 Monitor* dvmCreateMonitor(Object* obj)
96 {
97     Monitor* mon;
98 
99     mon = (Monitor*) calloc(1, sizeof(Monitor));
100     if (mon == NULL) {
101         ALOGE("Unable to allocate monitor");
102         dvmAbort();
103     }
104     mon->obj = obj;
105     dvmInitMutex(&mon->lock);
106 
107     /* replace the head of the list with the new monitor */
108     do {
109         mon->next = gDvm.monitorList;
110     } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon,
111             (int32_t*)(void*)&gDvm.monitorList) != 0);
112 
113     return mon;
114 }
115 
116 /*
117  * Free the monitor list.  Only used when shutting the VM down.
118  */
dvmFreeMonitorList()119 void dvmFreeMonitorList()
120 {
121     Monitor* mon;
122     Monitor* nextMon;
123 
124     mon = gDvm.monitorList;
125     while (mon != NULL) {
126         nextMon = mon->next;
127         free(mon);
128         mon = nextMon;
129     }
130 }
131 
132 /*
133  * Get the object that a monitor is part of.
134  */
dvmGetMonitorObject(Monitor * mon)135 Object* dvmGetMonitorObject(Monitor* mon)
136 {
137     if (mon == NULL)
138         return NULL;
139     else
140         return mon->obj;
141 }
142 
143 /*
144  * Returns the thread id of the thread owning the given lock.
145  */
lockOwner(Object * obj)146 static u4 lockOwner(Object* obj)
147 {
148     Thread *owner;
149     u4 lock;
150 
151     assert(obj != NULL);
152     /*
153      * Since we're reading the lock value multiple times, latch it so
154      * that it doesn't change out from under us if we get preempted.
155      */
156     lock = obj->lock;
157     if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
158         return LW_LOCK_OWNER(lock);
159     } else {
160         owner = LW_MONITOR(lock)->owner;
161         return owner ? owner->threadId : 0;
162     }
163 }
164 
165 /*
166  * Get the thread that holds the lock on the specified object.  The
167  * object may be unlocked, thin-locked, or fat-locked.
168  *
169  * The caller must lock the thread list before calling here.
170  */
dvmGetObjectLockHolder(Object * obj)171 Thread* dvmGetObjectLockHolder(Object* obj)
172 {
173     u4 threadId = lockOwner(obj);
174 
175     if (threadId == 0)
176         return NULL;
177     return dvmGetThreadByThreadId(threadId);
178 }
179 
180 /*
181  * Checks whether the given thread holds the given
182  * objects's lock.
183  */
dvmHoldsLock(Thread * thread,Object * obj)184 bool dvmHoldsLock(Thread* thread, Object* obj)
185 {
186     if (thread == NULL || obj == NULL) {
187         return false;
188     } else {
189         return thread->threadId == lockOwner(obj);
190     }
191 }
192 
193 /*
194  * Free the monitor associated with an object and make the object's lock
195  * thin again.  This is called during garbage collection.
196  */
freeMonitor(Monitor * mon)197 static void freeMonitor(Monitor *mon)
198 {
199     assert(mon != NULL);
200     assert(mon->obj != NULL);
201     assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT);
202 
203     /* This lock is associated with an object
204      * that's being swept.  The only possible way
205      * anyone could be holding this lock would be
206      * if some JNI code locked but didn't unlock
207      * the object, in which case we've got some bad
208      * native code somewhere.
209      */
210     assert(pthread_mutex_trylock(&mon->lock) == 0);
211     assert(pthread_mutex_unlock(&mon->lock) == 0);
212     dvmDestroyMutex(&mon->lock);
213     free(mon);
214 }
215 
216 /*
217  * Frees monitor objects belonging to unmarked objects.
218  */
dvmSweepMonitorList(Monitor ** mon,int (* isUnmarkedObject)(void *))219 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
220 {
221     Monitor handle;
222     Monitor *prev, *curr;
223     Object *obj;
224 
225     assert(mon != NULL);
226     assert(isUnmarkedObject != NULL);
227     prev = &handle;
228     prev->next = curr = *mon;
229     while (curr != NULL) {
230         obj = curr->obj;
231         if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
232             prev->next = curr->next;
233             freeMonitor(curr);
234             curr = prev->next;
235         } else {
236             prev = curr;
237             curr = curr->next;
238         }
239     }
240     *mon = handle.next;
241 }
242 
logWriteInt(char * dst,int value)243 static char *logWriteInt(char *dst, int value)
244 {
245     *dst++ = EVENT_TYPE_INT;
246     set4LE((u1 *)dst, value);
247     return dst + 4;
248 }
249 
logWriteString(char * dst,const char * value,size_t len)250 static char *logWriteString(char *dst, const char *value, size_t len)
251 {
252     *dst++ = EVENT_TYPE_STRING;
253     len = len < 32 ? len : 32;
254     set4LE((u1 *)dst, len);
255     dst += 4;
256     memcpy(dst, value, len);
257     return dst + len;
258 }
259 
260 #define EVENT_LOG_TAG_dvm_lock_sample 20003
261 
logContentionEvent(Thread * self,u4 waitMs,u4 samplePercent,const char * ownerFileName,u4 ownerLineNumber)262 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent,
263                                const char *ownerFileName, u4 ownerLineNumber)
264 {
265     const StackSaveArea *saveArea;
266     const Method *meth;
267     u4 relativePc;
268     char eventBuffer[174];
269     const char *fileName;
270     char procName[33];
271     char *cp;
272     size_t len;
273     int fd;
274 
275     /* When a thread is being destroyed it is normal that the frame depth is zero */
276     if (self->interpSave.curFrame == NULL) {
277         return;
278     }
279 
280     saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
281     meth = saveArea->method;
282     cp = eventBuffer;
283 
284     /* Emit the event list length, 1 byte. */
285     *cp++ = 9;
286 
287     /* Emit the process name, <= 37 bytes. */
288     fd = open("/proc/self/cmdline", O_RDONLY);
289     memset(procName, 0, sizeof(procName));
290     read(fd, procName, sizeof(procName) - 1);
291     close(fd);
292     len = strlen(procName);
293     cp = logWriteString(cp, procName, len);
294 
295     /* Emit the sensitive thread ("main thread") status, 5 bytes. */
296     bool isSensitive = false;
297     if (gDvm.isSensitiveThreadHook != NULL) {
298         isSensitive = gDvm.isSensitiveThreadHook();
299     }
300     cp = logWriteInt(cp, isSensitive);
301 
302     /* Emit self thread name string, <= 37 bytes. */
303     std::string selfName = dvmGetThreadName(self);
304     cp = logWriteString(cp, selfName.c_str(), selfName.size());
305 
306     /* Emit the wait time, 5 bytes. */
307     cp = logWriteInt(cp, waitMs);
308 
309     /* Emit the source code file name, <= 37 bytes. */
310     fileName = dvmGetMethodSourceFile(meth);
311     if (fileName == NULL) fileName = "";
312     cp = logWriteString(cp, fileName, strlen(fileName));
313 
314     /* Emit the source code line number, 5 bytes. */
315     relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
316     cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
317 
318     /* Emit the lock owner source code file name, <= 37 bytes. */
319     if (ownerFileName == NULL) {
320         ownerFileName = "";
321     } else if (strcmp(fileName, ownerFileName) == 0) {
322         /* Common case, so save on log space. */
323         ownerFileName = "-";
324     }
325     cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
326 
327     /* Emit the source code line number, 5 bytes. */
328     cp = logWriteInt(cp, ownerLineNumber);
329 
330     /* Emit the sample percentage, 5 bytes. */
331     cp = logWriteInt(cp, samplePercent);
332 
333     assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
334     android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
335                        EVENT_TYPE_LIST,
336                        eventBuffer,
337                        (size_t)(cp - eventBuffer));
338 }
339 
340 /*
341  * Lock a monitor.
342  */
lockMonitor(Thread * self,Monitor * mon)343 static void lockMonitor(Thread* self, Monitor* mon)
344 {
345     ThreadStatus oldStatus;
346     u4 waitThreshold, samplePercent;
347     u8 waitStart, waitEnd, waitMs;
348 
349     if (mon->owner == self) {
350         mon->lockCount++;
351         return;
352     }
353     if (dvmTryLockMutex(&mon->lock) != 0) {
354         oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
355         waitThreshold = gDvm.lockProfThreshold;
356         if (waitThreshold) {
357             waitStart = dvmGetRelativeTimeUsec();
358         }
359 
360         const Method* currentOwnerMethod = mon->ownerMethod;
361         u4 currentOwnerPc = mon->ownerPc;
362 
363         dvmLockMutex(&mon->lock);
364         if (waitThreshold) {
365             waitEnd = dvmGetRelativeTimeUsec();
366         }
367         dvmChangeStatus(self, oldStatus);
368         if (waitThreshold) {
369             waitMs = (waitEnd - waitStart) / 1000;
370             if (waitMs >= waitThreshold) {
371                 samplePercent = 100;
372             } else {
373                 samplePercent = 100 * waitMs / waitThreshold;
374             }
375             if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
376                 const char* currentOwnerFileName = "no_method";
377                 u4 currentOwnerLineNumber = 0;
378                 if (currentOwnerMethod != NULL) {
379                     currentOwnerFileName = dvmGetMethodSourceFile(currentOwnerMethod);
380                     if (currentOwnerFileName == NULL) {
381                         currentOwnerFileName = "no_method_file";
382                     }
383                     currentOwnerLineNumber = dvmLineNumFromPC(currentOwnerMethod, currentOwnerPc);
384                 }
385                 logContentionEvent(self, waitMs, samplePercent,
386                                    currentOwnerFileName, currentOwnerLineNumber);
387             }
388         }
389     }
390     mon->owner = self;
391     assert(mon->lockCount == 0);
392 
393     // When debugging, save the current monitor holder for future
394     // acquisition failures to use in sampled logging.
395     if (gDvm.lockProfThreshold > 0) {
396         mon->ownerMethod = NULL;
397         mon->ownerPc = 0;
398         if (self->interpSave.curFrame == NULL) {
399             return;
400         }
401         const StackSaveArea* saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
402         if (saveArea == NULL) {
403             return;
404         }
405         mon->ownerMethod = saveArea->method;
406         mon->ownerPc = (saveArea->xtra.currentPc - saveArea->method->insns);
407     }
408 }
409 
410 /*
411  * Try to lock a monitor.
412  *
413  * Returns "true" on success.
414  */
415 #ifdef WITH_COPYING_GC
tryLockMonitor(Thread * self,Monitor * mon)416 static bool tryLockMonitor(Thread* self, Monitor* mon)
417 {
418     if (mon->owner == self) {
419         mon->lockCount++;
420         return true;
421     } else {
422         if (dvmTryLockMutex(&mon->lock) == 0) {
423             mon->owner = self;
424             assert(mon->lockCount == 0);
425             return true;
426         } else {
427             return false;
428         }
429     }
430 }
431 #endif
432 
433 /*
434  * Unlock a monitor.
435  *
436  * Returns true if the unlock succeeded.
437  * If the unlock failed, an exception will be pending.
438  */
unlockMonitor(Thread * self,Monitor * mon)439 static bool unlockMonitor(Thread* self, Monitor* mon)
440 {
441     assert(self != NULL);
442     assert(mon != NULL);
443     if (mon->owner == self) {
444         /*
445          * We own the monitor, so nobody else can be in here.
446          */
447         if (mon->lockCount == 0) {
448             mon->owner = NULL;
449             mon->ownerMethod = NULL;
450             mon->ownerPc = 0;
451             dvmUnlockMutex(&mon->lock);
452         } else {
453             mon->lockCount--;
454         }
455     } else {
456         /*
457          * We don't own this, so we're not allowed to unlock it.
458          * The JNI spec says that we should throw IllegalMonitorStateException
459          * in this case.
460          */
461         dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
462         return false;
463     }
464     return true;
465 }
466 
467 /*
468  * Checks the wait set for circular structure.  Returns 0 if the list
469  * is not circular.  Otherwise, returns 1.  Used only by asserts.
470  */
471 #ifndef NDEBUG
waitSetCheck(Monitor * mon)472 static int waitSetCheck(Monitor *mon)
473 {
474     Thread *fast, *slow;
475     size_t n;
476 
477     assert(mon != NULL);
478     fast = slow = mon->waitSet;
479     n = 0;
480     for (;;) {
481         if (fast == NULL) return 0;
482         if (fast->waitNext == NULL) return 0;
483         if (fast == slow && n > 0) return 1;
484         n += 2;
485         fast = fast->waitNext->waitNext;
486         slow = slow->waitNext;
487     }
488 }
489 #endif
490 
491 /*
492  * Links a thread into a monitor's wait set.  The monitor lock must be
493  * held by the caller of this routine.
494  */
waitSetAppend(Monitor * mon,Thread * thread)495 static void waitSetAppend(Monitor *mon, Thread *thread)
496 {
497     Thread *elt;
498 
499     assert(mon != NULL);
500     assert(mon->owner == dvmThreadSelf());
501     assert(thread != NULL);
502     assert(thread->waitNext == NULL);
503     assert(waitSetCheck(mon) == 0);
504     if (mon->waitSet == NULL) {
505         mon->waitSet = thread;
506         return;
507     }
508     elt = mon->waitSet;
509     while (elt->waitNext != NULL) {
510         elt = elt->waitNext;
511     }
512     elt->waitNext = thread;
513 }
514 
515 /*
516  * Unlinks a thread from a monitor's wait set.  The monitor lock must
517  * be held by the caller of this routine.
518  */
waitSetRemove(Monitor * mon,Thread * thread)519 static void waitSetRemove(Monitor *mon, Thread *thread)
520 {
521     Thread *elt;
522 
523     assert(mon != NULL);
524     assert(mon->owner == dvmThreadSelf());
525     assert(thread != NULL);
526     assert(waitSetCheck(mon) == 0);
527     if (mon->waitSet == NULL) {
528         return;
529     }
530     if (mon->waitSet == thread) {
531         mon->waitSet = thread->waitNext;
532         thread->waitNext = NULL;
533         return;
534     }
535     elt = mon->waitSet;
536     while (elt->waitNext != NULL) {
537         if (elt->waitNext == thread) {
538             elt->waitNext = thread->waitNext;
539             thread->waitNext = NULL;
540             return;
541         }
542         elt = elt->waitNext;
543     }
544 }
545 
546 /*
547  * Converts the given relative waiting time into an absolute time.
548  */
absoluteTime(s8 msec,s4 nsec,struct timespec * ts)549 static void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
550 {
551     s8 endSec;
552 
553 #ifdef HAVE_TIMEDWAIT_MONOTONIC
554     clock_gettime(CLOCK_MONOTONIC, ts);
555 #else
556     {
557         struct timeval tv;
558         gettimeofday(&tv, NULL);
559         ts->tv_sec = tv.tv_sec;
560         ts->tv_nsec = tv.tv_usec * 1000;
561     }
562 #endif
563     endSec = ts->tv_sec + msec / 1000;
564     if (endSec >= 0x7fffffff) {
565         ALOGV("NOTE: end time exceeds epoch");
566         endSec = 0x7ffffffe;
567     }
568     ts->tv_sec = endSec;
569     ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
570 
571     /* catch rollover */
572     if (ts->tv_nsec >= 1000000000L) {
573         ts->tv_sec++;
574         ts->tv_nsec -= 1000000000L;
575     }
576 }
577 
dvmRelativeCondWait(pthread_cond_t * cond,pthread_mutex_t * mutex,s8 msec,s4 nsec)578 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
579                         s8 msec, s4 nsec)
580 {
581     int ret;
582     struct timespec ts;
583     absoluteTime(msec, nsec, &ts);
584 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
585     ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
586 #else
587     ret = pthread_cond_timedwait(cond, mutex, &ts);
588 #endif
589     assert(ret == 0 || ret == ETIMEDOUT);
590     return ret;
591 }
592 
593 /*
594  * Wait on a monitor until timeout, interrupt, or notification.  Used for
595  * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
596  *
597  * If another thread calls Thread.interrupt(), we throw InterruptedException
598  * and return immediately if one of the following are true:
599  *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
600  *  - blocked in join(), join(long), or join(long, int) methods of Thread
601  *  - blocked in sleep(long), or sleep(long, int) methods of Thread
602  * Otherwise, we set the "interrupted" flag.
603  *
604  * Checks to make sure that "nsec" is in the range 0-999999
605  * (i.e. fractions of a millisecond) and throws the appropriate
606  * exception if it isn't.
607  *
608  * The spec allows "spurious wakeups", and recommends that all code using
609  * Object.wait() do so in a loop.  This appears to derive from concerns
610  * about pthread_cond_wait() on multiprocessor systems.  Some commentary
611  * on the web casts doubt on whether these can/should occur.
612  *
613  * Since we're allowed to wake up "early", we clamp extremely long durations
614  * to return at the end of the 32-bit time epoch.
615  */
waitMonitor(Thread * self,Monitor * mon,s8 msec,s4 nsec,bool interruptShouldThrow)616 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
617     bool interruptShouldThrow)
618 {
619     struct timespec ts;
620     bool wasInterrupted = false;
621     bool timed;
622     int ret;
623 
624     assert(self != NULL);
625     assert(mon != NULL);
626 
627     /* Make sure that we hold the lock. */
628     if (mon->owner != self) {
629         dvmThrowIllegalMonitorStateException(
630             "object not locked by thread before wait()");
631         return;
632     }
633 
634     /*
635      * Enforce the timeout range.
636      */
637     if (msec < 0 || nsec < 0 || nsec > 999999) {
638         dvmThrowIllegalArgumentException("timeout arguments out of range");
639         return;
640     }
641 
642     /*
643      * Compute absolute wakeup time, if necessary.
644      */
645     if (msec == 0 && nsec == 0) {
646         timed = false;
647     } else {
648         absoluteTime(msec, nsec, &ts);
649         timed = true;
650     }
651 
652     /*
653      * Add ourselves to the set of threads waiting on this monitor, and
654      * release our hold.  We need to let it go even if we're a few levels
655      * deep in a recursive lock, and we need to restore that later.
656      *
657      * We append to the wait set ahead of clearing the count and owner
658      * fields so the subroutine can check that the calling thread owns
659      * the monitor.  Aside from that, the order of member updates is
660      * not order sensitive as we hold the pthread mutex.
661      */
662     waitSetAppend(mon, self);
663     int prevLockCount = mon->lockCount;
664     mon->lockCount = 0;
665     mon->owner = NULL;
666 
667     const Method* savedMethod = mon->ownerMethod;
668     u4 savedPc = mon->ownerPc;
669     mon->ownerMethod = NULL;
670     mon->ownerPc = 0;
671 
672     /*
673      * Update thread status.  If the GC wakes up, it'll ignore us, knowing
674      * that we won't touch any references in this state, and we'll check
675      * our suspend mode before we transition out.
676      */
677     if (timed)
678         dvmChangeStatus(self, THREAD_TIMED_WAIT);
679     else
680         dvmChangeStatus(self, THREAD_WAIT);
681 
682     dvmLockMutex(&self->waitMutex);
683 
684     /*
685      * Set waitMonitor to the monitor object we will be waiting on.
686      * When waitMonitor is non-NULL a notifying or interrupting thread
687      * must signal the thread's waitCond to wake it up.
688      */
689     assert(self->waitMonitor == NULL);
690     self->waitMonitor = mon;
691 
692     /*
693      * Handle the case where the thread was interrupted before we called
694      * wait().
695      */
696     if (self->interrupted) {
697         wasInterrupted = true;
698         self->waitMonitor = NULL;
699         dvmUnlockMutex(&self->waitMutex);
700         goto done;
701     }
702 
703     /*
704      * Release the monitor lock and wait for a notification or
705      * a timeout to occur.
706      */
707     dvmUnlockMutex(&mon->lock);
708 
709     if (!timed) {
710         ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
711         assert(ret == 0);
712     } else {
713 #ifdef HAVE_TIMEDWAIT_MONOTONIC
714         ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
715 #else
716         ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
717 #endif
718         assert(ret == 0 || ret == ETIMEDOUT);
719     }
720     if (self->interrupted) {
721         wasInterrupted = true;
722     }
723 
724     self->interrupted = false;
725     self->waitMonitor = NULL;
726 
727     dvmUnlockMutex(&self->waitMutex);
728 
729     /* Reacquire the monitor lock. */
730     lockMonitor(self, mon);
731 
732 done:
733     /*
734      * We remove our thread from wait set after restoring the count
735      * and owner fields so the subroutine can check that the calling
736      * thread owns the monitor. Aside from that, the order of member
737      * updates is not order sensitive as we hold the pthread mutex.
738      */
739     mon->owner = self;
740     mon->lockCount = prevLockCount;
741     mon->ownerMethod = savedMethod;
742     mon->ownerPc = savedPc;
743     waitSetRemove(mon, self);
744 
745     /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
746     dvmChangeStatus(self, THREAD_RUNNING);
747 
748     if (wasInterrupted) {
749         /*
750          * We were interrupted while waiting, or somebody interrupted an
751          * un-interruptible thread earlier and we're bailing out immediately.
752          *
753          * The doc sayeth: "The interrupted status of the current thread is
754          * cleared when this exception is thrown."
755          */
756         self->interrupted = false;
757         if (interruptShouldThrow) {
758             dvmThrowInterruptedException(NULL);
759         }
760     }
761 }
762 
763 /*
764  * Notify one thread waiting on this monitor.
765  */
notifyMonitor(Thread * self,Monitor * mon)766 static void notifyMonitor(Thread* self, Monitor* mon)
767 {
768     Thread* thread;
769 
770     assert(self != NULL);
771     assert(mon != NULL);
772 
773     /* Make sure that we hold the lock. */
774     if (mon->owner != self) {
775         dvmThrowIllegalMonitorStateException(
776             "object not locked by thread before notify()");
777         return;
778     }
779     /* Signal the first waiting thread in the wait set. */
780     while (mon->waitSet != NULL) {
781         thread = mon->waitSet;
782         mon->waitSet = thread->waitNext;
783         thread->waitNext = NULL;
784         dvmLockMutex(&thread->waitMutex);
785         /* Check to see if the thread is still waiting. */
786         if (thread->waitMonitor != NULL) {
787             pthread_cond_signal(&thread->waitCond);
788             dvmUnlockMutex(&thread->waitMutex);
789             return;
790         }
791         dvmUnlockMutex(&thread->waitMutex);
792     }
793 }
794 
795 /*
796  * Notify all threads waiting on this monitor.
797  */
notifyAllMonitor(Thread * self,Monitor * mon)798 static void notifyAllMonitor(Thread* self, Monitor* mon)
799 {
800     Thread* thread;
801 
802     assert(self != NULL);
803     assert(mon != NULL);
804 
805     /* Make sure that we hold the lock. */
806     if (mon->owner != self) {
807         dvmThrowIllegalMonitorStateException(
808             "object not locked by thread before notifyAll()");
809         return;
810     }
811     /* Signal all threads in the wait set. */
812     while (mon->waitSet != NULL) {
813         thread = mon->waitSet;
814         mon->waitSet = thread->waitNext;
815         thread->waitNext = NULL;
816         dvmLockMutex(&thread->waitMutex);
817         /* Check to see if the thread is still waiting. */
818         if (thread->waitMonitor != NULL) {
819             pthread_cond_signal(&thread->waitCond);
820         }
821         dvmUnlockMutex(&thread->waitMutex);
822     }
823 }
824 
825 /*
826  * Changes the shape of a monitor from thin to fat, preserving the
827  * internal lock state.  The calling thread must own the lock.
828  */
inflateMonitor(Thread * self,Object * obj)829 static void inflateMonitor(Thread *self, Object *obj)
830 {
831     Monitor *mon;
832     u4 thin;
833 
834     assert(self != NULL);
835     assert(obj != NULL);
836     assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
837     assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
838     /* Allocate and acquire a new monitor. */
839     mon = dvmCreateMonitor(obj);
840     lockMonitor(self, mon);
841     /* Propagate the lock state. */
842     thin = obj->lock;
843     mon->lockCount = LW_LOCK_COUNT(thin);
844     thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
845     thin |= (u4)mon | LW_SHAPE_FAT;
846     /* Publish the updated lock word. */
847     android_atomic_release_store(thin, (int32_t *)&obj->lock);
848 }
849 
850 /*
851  * Implements monitorenter for "synchronized" stuff.
852  *
853  * This does not fail or throw an exception (unless deadlock prediction
854  * is enabled and set to "err" mode).
855  */
dvmLockObject(Thread * self,Object * obj)856 void dvmLockObject(Thread* self, Object *obj)
857 {
858     volatile u4 *thinp;
859     ThreadStatus oldStatus;
860     struct timespec tm;
861     long sleepDelayNs;
862     long minSleepDelayNs = 1000000;  /* 1 millisecond */
863     long maxSleepDelayNs = 1000000000;  /* 1 second */
864     u4 thin, newThin, threadId;
865 
866     assert(self != NULL);
867     assert(obj != NULL);
868     threadId = self->threadId;
869     thinp = &obj->lock;
870 retry:
871     thin = *thinp;
872     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
873         /*
874          * The lock is a thin lock.  The owner field is used to
875          * determine the acquire method, ordered by cost.
876          */
877         if (LW_LOCK_OWNER(thin) == threadId) {
878             /*
879              * The calling thread owns the lock.  Increment the
880              * value of the recursion count field.
881              */
882             obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
883             if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
884                 /*
885                  * The reacquisition limit has been reached.  Inflate
886                  * the lock so the next acquire will not overflow the
887                  * recursion count field.
888                  */
889                 inflateMonitor(self, obj);
890             }
891         } else if (LW_LOCK_OWNER(thin) == 0) {
892             /*
893              * The lock is unowned.  Install the thread id of the
894              * calling thread into the owner field.  This is the
895              * common case.  In performance critical code the JIT
896              * will have tried this before calling out to the VM.
897              */
898             newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
899             if (android_atomic_acquire_cas(thin, newThin,
900                     (int32_t*)thinp) != 0) {
901                 /*
902                  * The acquire failed.  Try again.
903                  */
904                 goto retry;
905             }
906         } else {
907             ALOGV("(%d) spin on lock %p: %#x (%#x) %#x",
908                  threadId, &obj->lock, 0, *thinp, thin);
909             /*
910              * The lock is owned by another thread.  Notify the VM
911              * that we are about to wait.
912              */
913             oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
914             /*
915              * Spin until the thin lock is released or inflated.
916              */
917             sleepDelayNs = 0;
918             for (;;) {
919                 thin = *thinp;
920                 /*
921                  * Check the shape of the lock word.  Another thread
922                  * may have inflated the lock while we were waiting.
923                  */
924                 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
925                     if (LW_LOCK_OWNER(thin) == 0) {
926                         /*
927                          * The lock has been released.  Install the
928                          * thread id of the calling thread into the
929                          * owner field.
930                          */
931                         newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
932                         if (android_atomic_acquire_cas(thin, newThin,
933                                 (int32_t *)thinp) == 0) {
934                             /*
935                              * The acquire succeed.  Break out of the
936                              * loop and proceed to inflate the lock.
937                              */
938                             break;
939                         }
940                     } else {
941                         /*
942                          * The lock has not been released.  Yield so
943                          * the owning thread can run.
944                          */
945                         if (sleepDelayNs == 0) {
946                             sched_yield();
947                             sleepDelayNs = minSleepDelayNs;
948                         } else {
949                             tm.tv_sec = 0;
950                             tm.tv_nsec = sleepDelayNs;
951                             nanosleep(&tm, NULL);
952                             /*
953                              * Prepare the next delay value.  Wrap to
954                              * avoid once a second polls for eternity.
955                              */
956                             if (sleepDelayNs < maxSleepDelayNs / 2) {
957                                 sleepDelayNs *= 2;
958                             } else {
959                                 sleepDelayNs = minSleepDelayNs;
960                             }
961                         }
962                     }
963                 } else {
964                     /*
965                      * The thin lock was inflated by another thread.
966                      * Let the VM know we are no longer waiting and
967                      * try again.
968                      */
969                     ALOGV("(%d) lock %p surprise-fattened",
970                              threadId, &obj->lock);
971                     dvmChangeStatus(self, oldStatus);
972                     goto retry;
973                 }
974             }
975             ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
976                  threadId, &obj->lock, 0, *thinp, thin);
977             /*
978              * We have acquired the thin lock.  Let the VM know that
979              * we are no longer waiting.
980              */
981             dvmChangeStatus(self, oldStatus);
982             /*
983              * Fatten the lock.
984              */
985             inflateMonitor(self, obj);
986             ALOGV("(%d) lock %p fattened", threadId, &obj->lock);
987         }
988     } else {
989         /*
990          * The lock is a fat lock.
991          */
992         assert(LW_MONITOR(obj->lock) != NULL);
993         lockMonitor(self, LW_MONITOR(obj->lock));
994     }
995 }
996 
997 /*
998  * Implements monitorexit for "synchronized" stuff.
999  *
1000  * On failure, throws an exception and returns "false".
1001  */
dvmUnlockObject(Thread * self,Object * obj)1002 bool dvmUnlockObject(Thread* self, Object *obj)
1003 {
1004     u4 thin;
1005 
1006     assert(self != NULL);
1007     assert(self->status == THREAD_RUNNING);
1008     assert(obj != NULL);
1009     /*
1010      * Cache the lock word as its value can change while we are
1011      * examining its state.
1012      */
1013     thin = *(volatile u4 *)&obj->lock;
1014     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1015         /*
1016          * The lock is thin.  We must ensure that the lock is owned
1017          * by the given thread before unlocking it.
1018          */
1019         if (LW_LOCK_OWNER(thin) == self->threadId) {
1020             /*
1021              * We are the lock owner.  It is safe to update the lock
1022              * without CAS as lock ownership guards the lock itself.
1023              */
1024             if (LW_LOCK_COUNT(thin) == 0) {
1025                 /*
1026                  * The lock was not recursively acquired, the common
1027                  * case.  Unlock by clearing all bits except for the
1028                  * hash state.
1029                  */
1030                 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1031                 android_atomic_release_store(thin, (int32_t*)&obj->lock);
1032             } else {
1033                 /*
1034                  * The object was recursively acquired.  Decrement the
1035                  * lock recursion count field.
1036                  */
1037                 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1038             }
1039         } else {
1040             /*
1041              * We do not own the lock.  The JVM spec requires that we
1042              * throw an exception in this case.
1043              */
1044             dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
1045             return false;
1046         }
1047     } else {
1048         /*
1049          * The lock is fat.  We must check to see if unlockMonitor has
1050          * raised any exceptions before continuing.
1051          */
1052         assert(LW_MONITOR(obj->lock) != NULL);
1053         if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1054             /*
1055              * An exception has been raised.  Do not fall through.
1056              */
1057             return false;
1058         }
1059     }
1060     return true;
1061 }
1062 
1063 /*
1064  * Object.wait().  Also called for class init.
1065  */
dvmObjectWait(Thread * self,Object * obj,s8 msec,s4 nsec,bool interruptShouldThrow)1066 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1067     bool interruptShouldThrow)
1068 {
1069     Monitor* mon;
1070     u4 thin = *(volatile u4 *)&obj->lock;
1071 
1072     /* If the lock is still thin, we need to fatten it.
1073      */
1074     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1075         /* Make sure that 'self' holds the lock.
1076          */
1077         if (LW_LOCK_OWNER(thin) != self->threadId) {
1078             dvmThrowIllegalMonitorStateException(
1079                 "object not locked by thread before wait()");
1080             return;
1081         }
1082 
1083         /* This thread holds the lock.  We need to fatten the lock
1084          * so 'self' can block on it.  Don't update the object lock
1085          * field yet, because 'self' needs to acquire the lock before
1086          * any other thread gets a chance.
1087          */
1088         inflateMonitor(self, obj);
1089         ALOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
1090     }
1091     mon = LW_MONITOR(obj->lock);
1092     waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1093 }
1094 
1095 /*
1096  * Object.notify().
1097  */
dvmObjectNotify(Thread * self,Object * obj)1098 void dvmObjectNotify(Thread* self, Object *obj)
1099 {
1100     u4 thin = *(volatile u4 *)&obj->lock;
1101 
1102     /* If the lock is still thin, there aren't any waiters;
1103      * waiting on an object forces lock fattening.
1104      */
1105     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1106         /* Make sure that 'self' holds the lock.
1107          */
1108         if (LW_LOCK_OWNER(thin) != self->threadId) {
1109             dvmThrowIllegalMonitorStateException(
1110                 "object not locked by thread before notify()");
1111             return;
1112         }
1113 
1114         /* no-op;  there are no waiters to notify.
1115          */
1116     } else {
1117         /* It's a fat lock.
1118          */
1119         notifyMonitor(self, LW_MONITOR(thin));
1120     }
1121 }
1122 
1123 /*
1124  * Object.notifyAll().
1125  */
dvmObjectNotifyAll(Thread * self,Object * obj)1126 void dvmObjectNotifyAll(Thread* self, Object *obj)
1127 {
1128     u4 thin = *(volatile u4 *)&obj->lock;
1129 
1130     /* If the lock is still thin, there aren't any waiters;
1131      * waiting on an object forces lock fattening.
1132      */
1133     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1134         /* Make sure that 'self' holds the lock.
1135          */
1136         if (LW_LOCK_OWNER(thin) != self->threadId) {
1137             dvmThrowIllegalMonitorStateException(
1138                 "object not locked by thread before notifyAll()");
1139             return;
1140         }
1141 
1142         /* no-op;  there are no waiters to notify.
1143          */
1144     } else {
1145         /* It's a fat lock.
1146          */
1147         notifyAllMonitor(self, LW_MONITOR(thin));
1148     }
1149 }
1150 
1151 /*
1152  * This implements java.lang.Thread.sleep(long msec, int nsec).
1153  *
1154  * The sleep is interruptible by other threads, which means we can't just
1155  * plop into an OS sleep call.  (We probably could if we wanted to send
1156  * signals around and rely on EINTR, but that's inefficient and relies
1157  * on native code respecting our signal mask.)
1158  *
1159  * We have to do all of this stuff for Object.wait() as well, so it's
1160  * easiest to just sleep on a private Monitor.
1161  *
1162  * It appears that we want sleep(0,0) to go through the motions of sleeping
1163  * for a very short duration, rather than just returning.
1164  */
dvmThreadSleep(u8 msec,u4 nsec)1165 void dvmThreadSleep(u8 msec, u4 nsec)
1166 {
1167     Thread* self = dvmThreadSelf();
1168     Monitor* mon = gDvm.threadSleepMon;
1169 
1170     /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1171     if (msec == 0 && nsec == 0)
1172         nsec++;
1173 
1174     lockMonitor(self, mon);
1175     waitMonitor(self, mon, msec, nsec, true);
1176     unlockMonitor(self, mon);
1177 }
1178 
1179 /*
1180  * Implement java.lang.Thread.interrupt().
1181  */
dvmThreadInterrupt(Thread * thread)1182 void dvmThreadInterrupt(Thread* thread)
1183 {
1184     assert(thread != NULL);
1185 
1186     dvmLockMutex(&thread->waitMutex);
1187 
1188     /*
1189      * If the interrupted flag is already set no additional action is
1190      * required.
1191      */
1192     if (thread->interrupted == true) {
1193         dvmUnlockMutex(&thread->waitMutex);
1194         return;
1195     }
1196 
1197     /*
1198      * Raise the "interrupted" flag.  This will cause it to bail early out
1199      * of the next wait() attempt, if it's not currently waiting on
1200      * something.
1201      */
1202     thread->interrupted = true;
1203 
1204     /*
1205      * Is the thread waiting?
1206      *
1207      * Note that fat vs. thin doesn't matter here;  waitMonitor
1208      * is only set when a thread actually waits on a monitor,
1209      * which implies that the monitor has already been fattened.
1210      */
1211     if (thread->waitMonitor != NULL) {
1212         pthread_cond_signal(&thread->waitCond);
1213     }
1214 
1215     dvmUnlockMutex(&thread->waitMutex);
1216 }
1217 
1218 #ifndef WITH_COPYING_GC
dvmIdentityHashCode(Object * obj)1219 u4 dvmIdentityHashCode(Object *obj)
1220 {
1221     return (u4)obj;
1222 }
1223 #else
1224 /*
1225  * Returns the identity hash code of the given object.
1226  */
dvmIdentityHashCode(Object * obj)1227 u4 dvmIdentityHashCode(Object *obj)
1228 {
1229     Thread *self, *thread;
1230     volatile u4 *lw;
1231     size_t size;
1232     u4 lock, owner, hashState;
1233 
1234     if (obj == NULL) {
1235         /*
1236          * Null is defined to have an identity hash code of 0.
1237          */
1238         return 0;
1239     }
1240     lw = &obj->lock;
1241 retry:
1242     hashState = LW_HASH_STATE(*lw);
1243     if (hashState == LW_HASH_STATE_HASHED) {
1244         /*
1245          * The object has been hashed but has not had its hash code
1246          * relocated by the garbage collector.  Use the raw object
1247          * address.
1248          */
1249         return (u4)obj >> 3;
1250     } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1251         /*
1252          * The object has been hashed and its hash code has been
1253          * relocated by the collector.  Use the value of the naturally
1254          * aligned word following the instance data.
1255          */
1256         assert(!dvmIsClassObject(obj));
1257         if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1258             size = dvmArrayObjectSize((ArrayObject *)obj);
1259             size = (size + 2) & ~2;
1260         } else {
1261             size = obj->clazz->objectSize;
1262         }
1263         return *(u4 *)(((char *)obj) + size);
1264     } else if (hashState == LW_HASH_STATE_UNHASHED) {
1265         /*
1266          * The object has never been hashed.  Change the hash state to
1267          * hashed and use the raw object address.
1268          */
1269         self = dvmThreadSelf();
1270         if (self->threadId == lockOwner(obj)) {
1271             /*
1272              * We already own the lock so we can update the hash state
1273              * directly.
1274              */
1275             *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1276             return (u4)obj >> 3;
1277         }
1278         /*
1279          * We do not own the lock.  Try acquiring the lock.  Should
1280          * this fail, we must suspend the owning thread.
1281          */
1282         if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1283             /*
1284              * If the lock is thin assume it is unowned.  We simulate
1285              * an acquire, update, and release with a single CAS.
1286              */
1287             lock = (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1288             if (android_atomic_acquire_cas(
1289                                 0,
1290                                 (int32_t)lock,
1291                                 (int32_t *)lw) == 0) {
1292                 /*
1293                  * A new lockword has been installed with a hash state
1294                  * of hashed.  Use the raw object address.
1295                  */
1296                 return (u4)obj >> 3;
1297             }
1298         } else {
1299             if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1300                 /*
1301                  * The monitor lock has been acquired.  Change the
1302                  * hash state to hashed and use the raw object
1303                  * address.
1304                  */
1305                 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1306                 unlockMonitor(self, LW_MONITOR(*lw));
1307                 return (u4)obj >> 3;
1308             }
1309         }
1310         /*
1311          * At this point we have failed to acquire the lock.  We must
1312          * identify the owning thread and suspend it.
1313          */
1314         dvmLockThreadList(self);
1315         /*
1316          * Cache the lock word as its value can change between
1317          * determining its shape and retrieving its owner.
1318          */
1319         lock = *lw;
1320         if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1321             /*
1322              * Find the thread with the corresponding thread id.
1323              */
1324             owner = LW_LOCK_OWNER(lock);
1325             assert(owner != self->threadId);
1326             /*
1327              * If the lock has no owner do not bother scanning the
1328              * thread list and fall through to the failure handler.
1329              */
1330             thread = owner ? gDvm.threadList : NULL;
1331             while (thread != NULL) {
1332                 if (thread->threadId == owner) {
1333                     break;
1334                 }
1335                 thread = thread->next;
1336             }
1337         } else {
1338             thread = LW_MONITOR(lock)->owner;
1339         }
1340         /*
1341          * If thread is NULL the object has been released since the
1342          * thread list lock was acquired.  Try again.
1343          */
1344         if (thread == NULL) {
1345             dvmUnlockThreadList();
1346             goto retry;
1347         }
1348         /*
1349          * Wait for the owning thread to suspend.
1350          */
1351         dvmSuspendThread(thread);
1352         if (dvmHoldsLock(thread, obj)) {
1353             /*
1354              * The owning thread has been suspended.  We can safely
1355              * change the hash state to hashed.
1356              */
1357             *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1358             dvmResumeThread(thread);
1359             dvmUnlockThreadList();
1360             return (u4)obj >> 3;
1361         }
1362         /*
1363          * The wrong thread has been suspended.  Try again.
1364          */
1365         dvmResumeThread(thread);
1366         dvmUnlockThreadList();
1367         goto retry;
1368     }
1369     ALOGE("object %p has an unknown hash state %#x", obj, hashState);
1370     dvmDumpThread(dvmThreadSelf(), false);
1371     dvmAbort();
1372     return 0;  /* Quiet the compiler. */
1373 }
1374 #endif  /* WITH_COPYING_GC */
1375