1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Dalvik.h"
18
19 #include <fcntl.h>
20 #include <stdlib.h>
21 #include <unistd.h>
22 #include <pthread.h>
23 #include <time.h>
24 #include <errno.h>
25
26 /*
27 * Every Object has a monitor associated with it, but not every Object is
28 * actually locked. Even the ones that are locked do not need a
29 * full-fledged monitor until a) there is actual contention or b) wait()
30 * is called on the Object.
31 *
32 * For Dalvik, we have implemented a scheme similar to the one described
33 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
34 * (ACM 1998). Things are even easier for us, though, because we have
35 * a full 32 bits to work with.
36 *
37 * The two states of an Object's lock are referred to as "thin" and
38 * "fat". A lock may transition from the "thin" state to the "fat"
39 * state and this transition is referred to as inflation. Once a lock
40 * has been inflated it remains in the "fat" state indefinitely.
41 *
42 * The lock value itself is stored in Object.lock. The LSB of the
43 * lock encodes its state. When cleared, the lock is in the "thin"
44 * state and its bits are formatted as follows:
45 *
46 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
47 * lock count thread id hash state 0
48 *
49 * When set, the lock is in the "fat" state and its bits are formatted
50 * as follows:
51 *
52 * [31 ---- 3] [2 ---- 1] [0]
53 * pointer hash state 1
54 *
55 * For an in-depth description of the mechanics of thin-vs-fat locking,
56 * read the paper referred to above.
57 */
58
59 /*
60 * Monitors provide:
61 * - mutually exclusive access to resources
62 * - a way for multiple threads to wait for notification
63 *
64 * In effect, they fill the role of both mutexes and condition variables.
65 *
66 * Only one thread can own the monitor at any time. There may be several
67 * threads waiting on it (the wait call unlocks it). One or more waiting
68 * threads may be getting interrupted or notified at any given time.
69 *
70 * TODO: the various members of monitor are not SMP-safe.
71 */
72 struct Monitor {
73 Thread* owner; /* which thread currently owns the lock? */
74 int lockCount; /* owner's recursive lock depth */
75 Object* obj; /* what object are we part of [debug only] */
76
77 Thread* waitSet; /* threads currently waiting on this monitor */
78
79 pthread_mutex_t lock;
80
81 Monitor* next;
82
83 /*
84 * Who last acquired this monitor, when lock sampling is enabled.
85 * Even when enabled, ownerFileName may be NULL.
86 */
87 const char* ownerFileName;
88 u4 ownerLineNumber;
89 };
90
91
92 /*
93 * Create and initialize a monitor.
94 */
dvmCreateMonitor(Object * obj)95 Monitor* dvmCreateMonitor(Object* obj)
96 {
97 Monitor* mon;
98
99 mon = (Monitor*) calloc(1, sizeof(Monitor));
100 if (mon == NULL) {
101 LOGE("Unable to allocate monitor");
102 dvmAbort();
103 }
104 if (((u4)mon & 7) != 0) {
105 LOGE("Misaligned monitor: %p", mon);
106 dvmAbort();
107 }
108 mon->obj = obj;
109 dvmInitMutex(&mon->lock);
110
111 /* replace the head of the list with the new monitor */
112 do {
113 mon->next = gDvm.monitorList;
114 } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon,
115 (int32_t*)(void*)&gDvm.monitorList) != 0);
116
117 return mon;
118 }
119
120 /*
121 * Free the monitor list. Only used when shutting the VM down.
122 */
dvmFreeMonitorList()123 void dvmFreeMonitorList()
124 {
125 Monitor* mon;
126 Monitor* nextMon;
127
128 mon = gDvm.monitorList;
129 while (mon != NULL) {
130 nextMon = mon->next;
131 free(mon);
132 mon = nextMon;
133 }
134 }
135
136 /*
137 * Get the object that a monitor is part of.
138 */
dvmGetMonitorObject(Monitor * mon)139 Object* dvmGetMonitorObject(Monitor* mon)
140 {
141 if (mon == NULL)
142 return NULL;
143 else
144 return mon->obj;
145 }
146
147 /*
148 * Returns the thread id of the thread owning the given lock.
149 */
lockOwner(Object * obj)150 static u4 lockOwner(Object* obj)
151 {
152 Thread *owner;
153 u4 lock;
154
155 assert(obj != NULL);
156 /*
157 * Since we're reading the lock value multiple times, latch it so
158 * that it doesn't change out from under us if we get preempted.
159 */
160 lock = obj->lock;
161 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
162 return LW_LOCK_OWNER(lock);
163 } else {
164 owner = LW_MONITOR(lock)->owner;
165 return owner ? owner->threadId : 0;
166 }
167 }
168
169 /*
170 * Get the thread that holds the lock on the specified object. The
171 * object may be unlocked, thin-locked, or fat-locked.
172 *
173 * The caller must lock the thread list before calling here.
174 */
dvmGetObjectLockHolder(Object * obj)175 Thread* dvmGetObjectLockHolder(Object* obj)
176 {
177 u4 threadId = lockOwner(obj);
178
179 if (threadId == 0)
180 return NULL;
181 return dvmGetThreadByThreadId(threadId);
182 }
183
184 /*
185 * Checks whether the given thread holds the given
186 * objects's lock.
187 */
dvmHoldsLock(Thread * thread,Object * obj)188 bool dvmHoldsLock(Thread* thread, Object* obj)
189 {
190 if (thread == NULL || obj == NULL) {
191 return false;
192 } else {
193 return thread->threadId == lockOwner(obj);
194 }
195 }
196
197 /*
198 * Free the monitor associated with an object and make the object's lock
199 * thin again. This is called during garbage collection.
200 */
freeMonitor(Monitor * mon)201 static void freeMonitor(Monitor *mon)
202 {
203 assert(mon != NULL);
204 assert(mon->obj != NULL);
205 assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT);
206
207 /* This lock is associated with an object
208 * that's being swept. The only possible way
209 * anyone could be holding this lock would be
210 * if some JNI code locked but didn't unlock
211 * the object, in which case we've got some bad
212 * native code somewhere.
213 */
214 assert(pthread_mutex_trylock(&mon->lock) == 0);
215 assert(pthread_mutex_unlock(&mon->lock) == 0);
216 dvmDestroyMutex(&mon->lock);
217 free(mon);
218 }
219
220 /*
221 * Frees monitor objects belonging to unmarked objects.
222 */
dvmSweepMonitorList(Monitor ** mon,int (* isUnmarkedObject)(void *))223 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
224 {
225 Monitor handle;
226 Monitor *prev, *curr;
227 Object *obj;
228
229 assert(mon != NULL);
230 assert(isUnmarkedObject != NULL);
231 prev = &handle;
232 prev->next = curr = *mon;
233 while (curr != NULL) {
234 obj = curr->obj;
235 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
236 prev->next = curr->next;
237 freeMonitor(curr);
238 curr = prev->next;
239 } else {
240 prev = curr;
241 curr = curr->next;
242 }
243 }
244 *mon = handle.next;
245 }
246
logWriteInt(char * dst,int value)247 static char *logWriteInt(char *dst, int value)
248 {
249 *dst++ = EVENT_TYPE_INT;
250 set4LE((u1 *)dst, value);
251 return dst + 4;
252 }
253
logWriteString(char * dst,const char * value,size_t len)254 static char *logWriteString(char *dst, const char *value, size_t len)
255 {
256 *dst++ = EVENT_TYPE_STRING;
257 len = len < 32 ? len : 32;
258 set4LE((u1 *)dst, len);
259 dst += 4;
260 memcpy(dst, value, len);
261 return dst + len;
262 }
263
264 #define EVENT_LOG_TAG_dvm_lock_sample 20003
265
logContentionEvent(Thread * self,u4 waitMs,u4 samplePercent,const char * ownerFileName,u4 ownerLineNumber)266 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent,
267 const char *ownerFileName, u4 ownerLineNumber)
268 {
269 const StackSaveArea *saveArea;
270 const Method *meth;
271 u4 relativePc;
272 char eventBuffer[174];
273 const char *fileName;
274 char procName[33];
275 char *cp;
276 size_t len;
277 int fd;
278
279 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
280 meth = saveArea->method;
281 cp = eventBuffer;
282
283 /* Emit the event list length, 1 byte. */
284 *cp++ = 9;
285
286 /* Emit the process name, <= 37 bytes. */
287 fd = open("/proc/self/cmdline", O_RDONLY);
288 memset(procName, 0, sizeof(procName));
289 read(fd, procName, sizeof(procName) - 1);
290 close(fd);
291 len = strlen(procName);
292 cp = logWriteString(cp, procName, len);
293
294 /* Emit the sensitive thread ("main thread") status, 5 bytes. */
295 bool isSensitive = false;
296 if (gDvm.isSensitiveThreadHook != NULL) {
297 isSensitive = gDvm.isSensitiveThreadHook();
298 }
299 cp = logWriteInt(cp, isSensitive);
300
301 /* Emit self thread name string, <= 37 bytes. */
302 std::string selfName = dvmGetThreadName(self);
303 cp = logWriteString(cp, selfName.c_str(), selfName.size());
304
305 /* Emit the wait time, 5 bytes. */
306 cp = logWriteInt(cp, waitMs);
307
308 /* Emit the source code file name, <= 37 bytes. */
309 fileName = dvmGetMethodSourceFile(meth);
310 if (fileName == NULL) fileName = "";
311 cp = logWriteString(cp, fileName, strlen(fileName));
312
313 /* Emit the source code line number, 5 bytes. */
314 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
315 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
316
317 /* Emit the lock owner source code file name, <= 37 bytes. */
318 if (ownerFileName == NULL) {
319 ownerFileName = "";
320 } else if (strcmp(fileName, ownerFileName) == 0) {
321 /* Common case, so save on log space. */
322 ownerFileName = "-";
323 }
324 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
325
326 /* Emit the source code line number, 5 bytes. */
327 cp = logWriteInt(cp, ownerLineNumber);
328
329 /* Emit the sample percentage, 5 bytes. */
330 cp = logWriteInt(cp, samplePercent);
331
332 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
333 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
334 EVENT_TYPE_LIST,
335 eventBuffer,
336 (size_t)(cp - eventBuffer));
337 }
338
339 /*
340 * Lock a monitor.
341 */
lockMonitor(Thread * self,Monitor * mon)342 static void lockMonitor(Thread* self, Monitor* mon)
343 {
344 ThreadStatus oldStatus;
345 u4 waitThreshold, samplePercent;
346 u8 waitStart, waitEnd, waitMs;
347
348 if (mon->owner == self) {
349 mon->lockCount++;
350 return;
351 }
352 if (dvmTryLockMutex(&mon->lock) != 0) {
353 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
354 waitThreshold = gDvm.lockProfThreshold;
355 if (waitThreshold) {
356 waitStart = dvmGetRelativeTimeUsec();
357 }
358 const char* currentOwnerFileName = mon->ownerFileName;
359 u4 currentOwnerLineNumber = mon->ownerLineNumber;
360
361 dvmLockMutex(&mon->lock);
362 if (waitThreshold) {
363 waitEnd = dvmGetRelativeTimeUsec();
364 }
365 dvmChangeStatus(self, oldStatus);
366 if (waitThreshold) {
367 waitMs = (waitEnd - waitStart) / 1000;
368 if (waitMs >= waitThreshold) {
369 samplePercent = 100;
370 } else {
371 samplePercent = 100 * waitMs / waitThreshold;
372 }
373 if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
374 logContentionEvent(self, waitMs, samplePercent,
375 currentOwnerFileName, currentOwnerLineNumber);
376 }
377 }
378 }
379 mon->owner = self;
380 assert(mon->lockCount == 0);
381
382 // When debugging, save the current monitor holder for future
383 // acquisition failures to use in sampled logging.
384 if (gDvm.lockProfThreshold > 0) {
385 const StackSaveArea *saveArea;
386 const Method *meth;
387 mon->ownerLineNumber = 0;
388 if (self->interpSave.curFrame == NULL) {
389 mon->ownerFileName = "no_frame";
390 } else if ((saveArea =
391 SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
392 mon->ownerFileName = "no_save_area";
393 } else if ((meth = saveArea->method) == NULL) {
394 mon->ownerFileName = "no_method";
395 } else {
396 u4 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
397 mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
398 if (mon->ownerFileName == NULL) {
399 mon->ownerFileName = "no_method_file";
400 } else {
401 mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
402 }
403 }
404 }
405 }
406
407 /*
408 * Try to lock a monitor.
409 *
410 * Returns "true" on success.
411 */
412 #ifdef WITH_COPYING_GC
tryLockMonitor(Thread * self,Monitor * mon)413 static bool tryLockMonitor(Thread* self, Monitor* mon)
414 {
415 if (mon->owner == self) {
416 mon->lockCount++;
417 return true;
418 } else {
419 if (dvmTryLockMutex(&mon->lock) == 0) {
420 mon->owner = self;
421 assert(mon->lockCount == 0);
422 return true;
423 } else {
424 return false;
425 }
426 }
427 }
428 #endif
429
430 /*
431 * Unlock a monitor.
432 *
433 * Returns true if the unlock succeeded.
434 * If the unlock failed, an exception will be pending.
435 */
unlockMonitor(Thread * self,Monitor * mon)436 static bool unlockMonitor(Thread* self, Monitor* mon)
437 {
438 assert(self != NULL);
439 assert(mon != NULL);
440 if (mon->owner == self) {
441 /*
442 * We own the monitor, so nobody else can be in here.
443 */
444 if (mon->lockCount == 0) {
445 mon->owner = NULL;
446 mon->ownerFileName = "unlocked";
447 mon->ownerLineNumber = 0;
448 dvmUnlockMutex(&mon->lock);
449 } else {
450 mon->lockCount--;
451 }
452 } else {
453 /*
454 * We don't own this, so we're not allowed to unlock it.
455 * The JNI spec says that we should throw IllegalMonitorStateException
456 * in this case.
457 */
458 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
459 return false;
460 }
461 return true;
462 }
463
464 /*
465 * Checks the wait set for circular structure. Returns 0 if the list
466 * is not circular. Otherwise, returns 1. Used only by asserts.
467 */
468 #ifndef NDEBUG
waitSetCheck(Monitor * mon)469 static int waitSetCheck(Monitor *mon)
470 {
471 Thread *fast, *slow;
472 size_t n;
473
474 assert(mon != NULL);
475 fast = slow = mon->waitSet;
476 n = 0;
477 for (;;) {
478 if (fast == NULL) return 0;
479 if (fast->waitNext == NULL) return 0;
480 if (fast == slow && n > 0) return 1;
481 n += 2;
482 fast = fast->waitNext->waitNext;
483 slow = slow->waitNext;
484 }
485 }
486 #endif
487
488 /*
489 * Links a thread into a monitor's wait set. The monitor lock must be
490 * held by the caller of this routine.
491 */
waitSetAppend(Monitor * mon,Thread * thread)492 static void waitSetAppend(Monitor *mon, Thread *thread)
493 {
494 Thread *elt;
495
496 assert(mon != NULL);
497 assert(mon->owner == dvmThreadSelf());
498 assert(thread != NULL);
499 assert(thread->waitNext == NULL);
500 assert(waitSetCheck(mon) == 0);
501 if (mon->waitSet == NULL) {
502 mon->waitSet = thread;
503 return;
504 }
505 elt = mon->waitSet;
506 while (elt->waitNext != NULL) {
507 elt = elt->waitNext;
508 }
509 elt->waitNext = thread;
510 }
511
512 /*
513 * Unlinks a thread from a monitor's wait set. The monitor lock must
514 * be held by the caller of this routine.
515 */
waitSetRemove(Monitor * mon,Thread * thread)516 static void waitSetRemove(Monitor *mon, Thread *thread)
517 {
518 Thread *elt;
519
520 assert(mon != NULL);
521 assert(mon->owner == dvmThreadSelf());
522 assert(thread != NULL);
523 assert(waitSetCheck(mon) == 0);
524 if (mon->waitSet == NULL) {
525 return;
526 }
527 if (mon->waitSet == thread) {
528 mon->waitSet = thread->waitNext;
529 thread->waitNext = NULL;
530 return;
531 }
532 elt = mon->waitSet;
533 while (elt->waitNext != NULL) {
534 if (elt->waitNext == thread) {
535 elt->waitNext = thread->waitNext;
536 thread->waitNext = NULL;
537 return;
538 }
539 elt = elt->waitNext;
540 }
541 }
542
543 /*
544 * Converts the given relative waiting time into an absolute time.
545 */
absoluteTime(s8 msec,s4 nsec,struct timespec * ts)546 static void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
547 {
548 s8 endSec;
549
550 #ifdef HAVE_TIMEDWAIT_MONOTONIC
551 clock_gettime(CLOCK_MONOTONIC, ts);
552 #else
553 {
554 struct timeval tv;
555 gettimeofday(&tv, NULL);
556 ts->tv_sec = tv.tv_sec;
557 ts->tv_nsec = tv.tv_usec * 1000;
558 }
559 #endif
560 endSec = ts->tv_sec + msec / 1000;
561 if (endSec >= 0x7fffffff) {
562 LOGV("NOTE: end time exceeds epoch");
563 endSec = 0x7ffffffe;
564 }
565 ts->tv_sec = endSec;
566 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
567
568 /* catch rollover */
569 if (ts->tv_nsec >= 1000000000L) {
570 ts->tv_sec++;
571 ts->tv_nsec -= 1000000000L;
572 }
573 }
574
dvmRelativeCondWait(pthread_cond_t * cond,pthread_mutex_t * mutex,s8 msec,s4 nsec)575 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
576 s8 msec, s4 nsec)
577 {
578 int ret;
579 struct timespec ts;
580 absoluteTime(msec, nsec, &ts);
581 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
582 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
583 #else
584 ret = pthread_cond_timedwait(cond, mutex, &ts);
585 #endif
586 assert(ret == 0 || ret == ETIMEDOUT);
587 return ret;
588 }
589
590 /*
591 * Wait on a monitor until timeout, interrupt, or notification. Used for
592 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
593 *
594 * If another thread calls Thread.interrupt(), we throw InterruptedException
595 * and return immediately if one of the following are true:
596 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
597 * - blocked in join(), join(long), or join(long, int) methods of Thread
598 * - blocked in sleep(long), or sleep(long, int) methods of Thread
599 * Otherwise, we set the "interrupted" flag.
600 *
601 * Checks to make sure that "nsec" is in the range 0-999999
602 * (i.e. fractions of a millisecond) and throws the appropriate
603 * exception if it isn't.
604 *
605 * The spec allows "spurious wakeups", and recommends that all code using
606 * Object.wait() do so in a loop. This appears to derive from concerns
607 * about pthread_cond_wait() on multiprocessor systems. Some commentary
608 * on the web casts doubt on whether these can/should occur.
609 *
610 * Since we're allowed to wake up "early", we clamp extremely long durations
611 * to return at the end of the 32-bit time epoch.
612 */
waitMonitor(Thread * self,Monitor * mon,s8 msec,s4 nsec,bool interruptShouldThrow)613 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
614 bool interruptShouldThrow)
615 {
616 struct timespec ts;
617 bool wasInterrupted = false;
618 bool timed;
619 int ret;
620 const char *savedFileName;
621 u4 savedLineNumber;
622
623 assert(self != NULL);
624 assert(mon != NULL);
625
626 /* Make sure that we hold the lock. */
627 if (mon->owner != self) {
628 dvmThrowIllegalMonitorStateException(
629 "object not locked by thread before wait()");
630 return;
631 }
632
633 /*
634 * Enforce the timeout range.
635 */
636 if (msec < 0 || nsec < 0 || nsec > 999999) {
637 dvmThrowIllegalArgumentException("timeout arguments out of range");
638 return;
639 }
640
641 /*
642 * Compute absolute wakeup time, if necessary.
643 */
644 if (msec == 0 && nsec == 0) {
645 timed = false;
646 } else {
647 absoluteTime(msec, nsec, &ts);
648 timed = true;
649 }
650
651 /*
652 * Add ourselves to the set of threads waiting on this monitor, and
653 * release our hold. We need to let it go even if we're a few levels
654 * deep in a recursive lock, and we need to restore that later.
655 *
656 * We append to the wait set ahead of clearing the count and owner
657 * fields so the subroutine can check that the calling thread owns
658 * the monitor. Aside from that, the order of member updates is
659 * not order sensitive as we hold the pthread mutex.
660 */
661 waitSetAppend(mon, self);
662 int prevLockCount = mon->lockCount;
663 mon->lockCount = 0;
664 mon->owner = NULL;
665 savedFileName = mon->ownerFileName;
666 mon->ownerFileName = NULL;
667 savedLineNumber = mon->ownerLineNumber;
668 mon->ownerLineNumber = 0;
669
670 /*
671 * Update thread status. If the GC wakes up, it'll ignore us, knowing
672 * that we won't touch any references in this state, and we'll check
673 * our suspend mode before we transition out.
674 */
675 if (timed)
676 dvmChangeStatus(self, THREAD_TIMED_WAIT);
677 else
678 dvmChangeStatus(self, THREAD_WAIT);
679
680 dvmLockMutex(&self->waitMutex);
681
682 /*
683 * Set waitMonitor to the monitor object we will be waiting on.
684 * When waitMonitor is non-NULL a notifying or interrupting thread
685 * must signal the thread's waitCond to wake it up.
686 */
687 assert(self->waitMonitor == NULL);
688 self->waitMonitor = mon;
689
690 /*
691 * Handle the case where the thread was interrupted before we called
692 * wait().
693 */
694 if (self->interrupted) {
695 wasInterrupted = true;
696 self->waitMonitor = NULL;
697 dvmUnlockMutex(&self->waitMutex);
698 goto done;
699 }
700
701 /*
702 * Release the monitor lock and wait for a notification or
703 * a timeout to occur.
704 */
705 dvmUnlockMutex(&mon->lock);
706
707 if (!timed) {
708 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
709 assert(ret == 0);
710 } else {
711 #ifdef HAVE_TIMEDWAIT_MONOTONIC
712 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
713 #else
714 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
715 #endif
716 assert(ret == 0 || ret == ETIMEDOUT);
717 }
718 if (self->interrupted) {
719 wasInterrupted = true;
720 }
721
722 self->interrupted = false;
723 self->waitMonitor = NULL;
724
725 dvmUnlockMutex(&self->waitMutex);
726
727 /* Reacquire the monitor lock. */
728 lockMonitor(self, mon);
729
730 done:
731 /*
732 * We remove our thread from wait set after restoring the count
733 * and owner fields so the subroutine can check that the calling
734 * thread owns the monitor. Aside from that, the order of member
735 * updates is not order sensitive as we hold the pthread mutex.
736 */
737 mon->owner = self;
738 mon->lockCount = prevLockCount;
739 mon->ownerFileName = savedFileName;
740 mon->ownerLineNumber = savedLineNumber;
741 waitSetRemove(mon, self);
742
743 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
744 dvmChangeStatus(self, THREAD_RUNNING);
745
746 if (wasInterrupted) {
747 /*
748 * We were interrupted while waiting, or somebody interrupted an
749 * un-interruptible thread earlier and we're bailing out immediately.
750 *
751 * The doc sayeth: "The interrupted status of the current thread is
752 * cleared when this exception is thrown."
753 */
754 self->interrupted = false;
755 if (interruptShouldThrow) {
756 dvmThrowInterruptedException(NULL);
757 }
758 }
759 }
760
761 /*
762 * Notify one thread waiting on this monitor.
763 */
notifyMonitor(Thread * self,Monitor * mon)764 static void notifyMonitor(Thread* self, Monitor* mon)
765 {
766 Thread* thread;
767
768 assert(self != NULL);
769 assert(mon != NULL);
770
771 /* Make sure that we hold the lock. */
772 if (mon->owner != self) {
773 dvmThrowIllegalMonitorStateException(
774 "object not locked by thread before notify()");
775 return;
776 }
777 /* Signal the first waiting thread in the wait set. */
778 while (mon->waitSet != NULL) {
779 thread = mon->waitSet;
780 mon->waitSet = thread->waitNext;
781 thread->waitNext = NULL;
782 dvmLockMutex(&thread->waitMutex);
783 /* Check to see if the thread is still waiting. */
784 if (thread->waitMonitor != NULL) {
785 pthread_cond_signal(&thread->waitCond);
786 dvmUnlockMutex(&thread->waitMutex);
787 return;
788 }
789 dvmUnlockMutex(&thread->waitMutex);
790 }
791 }
792
793 /*
794 * Notify all threads waiting on this monitor.
795 */
notifyAllMonitor(Thread * self,Monitor * mon)796 static void notifyAllMonitor(Thread* self, Monitor* mon)
797 {
798 Thread* thread;
799
800 assert(self != NULL);
801 assert(mon != NULL);
802
803 /* Make sure that we hold the lock. */
804 if (mon->owner != self) {
805 dvmThrowIllegalMonitorStateException(
806 "object not locked by thread before notifyAll()");
807 return;
808 }
809 /* Signal all threads in the wait set. */
810 while (mon->waitSet != NULL) {
811 thread = mon->waitSet;
812 mon->waitSet = thread->waitNext;
813 thread->waitNext = NULL;
814 dvmLockMutex(&thread->waitMutex);
815 /* Check to see if the thread is still waiting. */
816 if (thread->waitMonitor != NULL) {
817 pthread_cond_signal(&thread->waitCond);
818 }
819 dvmUnlockMutex(&thread->waitMutex);
820 }
821 }
822
823 /*
824 * Changes the shape of a monitor from thin to fat, preserving the
825 * internal lock state. The calling thread must own the lock.
826 */
inflateMonitor(Thread * self,Object * obj)827 static void inflateMonitor(Thread *self, Object *obj)
828 {
829 Monitor *mon;
830 u4 thin;
831
832 assert(self != NULL);
833 assert(obj != NULL);
834 assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
835 assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
836 /* Allocate and acquire a new monitor. */
837 mon = dvmCreateMonitor(obj);
838 lockMonitor(self, mon);
839 /* Propagate the lock state. */
840 thin = obj->lock;
841 mon->lockCount = LW_LOCK_COUNT(thin);
842 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
843 thin |= (u4)mon | LW_SHAPE_FAT;
844 /* Publish the updated lock word. */
845 android_atomic_release_store(thin, (int32_t *)&obj->lock);
846 }
847
848 /*
849 * Implements monitorenter for "synchronized" stuff.
850 *
851 * This does not fail or throw an exception (unless deadlock prediction
852 * is enabled and set to "err" mode).
853 */
dvmLockObject(Thread * self,Object * obj)854 void dvmLockObject(Thread* self, Object *obj)
855 {
856 volatile u4 *thinp;
857 ThreadStatus oldStatus;
858 struct timespec tm;
859 long sleepDelayNs;
860 long minSleepDelayNs = 1000000; /* 1 millisecond */
861 long maxSleepDelayNs = 1000000000; /* 1 second */
862 u4 thin, newThin, threadId;
863
864 assert(self != NULL);
865 assert(obj != NULL);
866 threadId = self->threadId;
867 thinp = &obj->lock;
868 retry:
869 thin = *thinp;
870 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
871 /*
872 * The lock is a thin lock. The owner field is used to
873 * determine the acquire method, ordered by cost.
874 */
875 if (LW_LOCK_OWNER(thin) == threadId) {
876 /*
877 * The calling thread owns the lock. Increment the
878 * value of the recursion count field.
879 */
880 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
881 if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
882 /*
883 * The reacquisition limit has been reached. Inflate
884 * the lock so the next acquire will not overflow the
885 * recursion count field.
886 */
887 inflateMonitor(self, obj);
888 }
889 } else if (LW_LOCK_OWNER(thin) == 0) {
890 /*
891 * The lock is unowned. Install the thread id of the
892 * calling thread into the owner field. This is the
893 * common case. In performance critical code the JIT
894 * will have tried this before calling out to the VM.
895 */
896 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
897 if (android_atomic_acquire_cas(thin, newThin,
898 (int32_t*)thinp) != 0) {
899 /*
900 * The acquire failed. Try again.
901 */
902 goto retry;
903 }
904 } else {
905 LOGV("(%d) spin on lock %p: %#x (%#x) %#x",
906 threadId, &obj->lock, 0, *thinp, thin);
907 /*
908 * The lock is owned by another thread. Notify the VM
909 * that we are about to wait.
910 */
911 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
912 /*
913 * Spin until the thin lock is released or inflated.
914 */
915 sleepDelayNs = 0;
916 for (;;) {
917 thin = *thinp;
918 /*
919 * Check the shape of the lock word. Another thread
920 * may have inflated the lock while we were waiting.
921 */
922 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
923 if (LW_LOCK_OWNER(thin) == 0) {
924 /*
925 * The lock has been released. Install the
926 * thread id of the calling thread into the
927 * owner field.
928 */
929 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
930 if (android_atomic_acquire_cas(thin, newThin,
931 (int32_t *)thinp) == 0) {
932 /*
933 * The acquire succeed. Break out of the
934 * loop and proceed to inflate the lock.
935 */
936 break;
937 }
938 } else {
939 /*
940 * The lock has not been released. Yield so
941 * the owning thread can run.
942 */
943 if (sleepDelayNs == 0) {
944 sched_yield();
945 sleepDelayNs = minSleepDelayNs;
946 } else {
947 tm.tv_sec = 0;
948 tm.tv_nsec = sleepDelayNs;
949 nanosleep(&tm, NULL);
950 /*
951 * Prepare the next delay value. Wrap to
952 * avoid once a second polls for eternity.
953 */
954 if (sleepDelayNs < maxSleepDelayNs / 2) {
955 sleepDelayNs *= 2;
956 } else {
957 sleepDelayNs = minSleepDelayNs;
958 }
959 }
960 }
961 } else {
962 /*
963 * The thin lock was inflated by another thread.
964 * Let the VM know we are no longer waiting and
965 * try again.
966 */
967 LOGV("(%d) lock %p surprise-fattened",
968 threadId, &obj->lock);
969 dvmChangeStatus(self, oldStatus);
970 goto retry;
971 }
972 }
973 LOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
974 threadId, &obj->lock, 0, *thinp, thin);
975 /*
976 * We have acquired the thin lock. Let the VM know that
977 * we are no longer waiting.
978 */
979 dvmChangeStatus(self, oldStatus);
980 /*
981 * Fatten the lock.
982 */
983 inflateMonitor(self, obj);
984 LOGV("(%d) lock %p fattened", threadId, &obj->lock);
985 }
986 } else {
987 /*
988 * The lock is a fat lock.
989 */
990 assert(LW_MONITOR(obj->lock) != NULL);
991 lockMonitor(self, LW_MONITOR(obj->lock));
992 }
993 }
994
995 /*
996 * Implements monitorexit for "synchronized" stuff.
997 *
998 * On failure, throws an exception and returns "false".
999 */
dvmUnlockObject(Thread * self,Object * obj)1000 bool dvmUnlockObject(Thread* self, Object *obj)
1001 {
1002 u4 thin;
1003
1004 assert(self != NULL);
1005 assert(self->status == THREAD_RUNNING);
1006 assert(obj != NULL);
1007 /*
1008 * Cache the lock word as its value can change while we are
1009 * examining its state.
1010 */
1011 thin = *(volatile u4 *)&obj->lock;
1012 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1013 /*
1014 * The lock is thin. We must ensure that the lock is owned
1015 * by the given thread before unlocking it.
1016 */
1017 if (LW_LOCK_OWNER(thin) == self->threadId) {
1018 /*
1019 * We are the lock owner. It is safe to update the lock
1020 * without CAS as lock ownership guards the lock itself.
1021 */
1022 if (LW_LOCK_COUNT(thin) == 0) {
1023 /*
1024 * The lock was not recursively acquired, the common
1025 * case. Unlock by clearing all bits except for the
1026 * hash state.
1027 */
1028 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1029 android_atomic_release_store(thin, (int32_t*)&obj->lock);
1030 } else {
1031 /*
1032 * The object was recursively acquired. Decrement the
1033 * lock recursion count field.
1034 */
1035 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1036 }
1037 } else {
1038 /*
1039 * We do not own the lock. The JVM spec requires that we
1040 * throw an exception in this case.
1041 */
1042 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
1043 return false;
1044 }
1045 } else {
1046 /*
1047 * The lock is fat. We must check to see if unlockMonitor has
1048 * raised any exceptions before continuing.
1049 */
1050 assert(LW_MONITOR(obj->lock) != NULL);
1051 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1052 /*
1053 * An exception has been raised. Do not fall through.
1054 */
1055 return false;
1056 }
1057 }
1058 return true;
1059 }
1060
1061 /*
1062 * Object.wait(). Also called for class init.
1063 */
dvmObjectWait(Thread * self,Object * obj,s8 msec,s4 nsec,bool interruptShouldThrow)1064 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1065 bool interruptShouldThrow)
1066 {
1067 Monitor* mon;
1068 u4 thin = *(volatile u4 *)&obj->lock;
1069
1070 /* If the lock is still thin, we need to fatten it.
1071 */
1072 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1073 /* Make sure that 'self' holds the lock.
1074 */
1075 if (LW_LOCK_OWNER(thin) != self->threadId) {
1076 dvmThrowIllegalMonitorStateException(
1077 "object not locked by thread before wait()");
1078 return;
1079 }
1080
1081 /* This thread holds the lock. We need to fatten the lock
1082 * so 'self' can block on it. Don't update the object lock
1083 * field yet, because 'self' needs to acquire the lock before
1084 * any other thread gets a chance.
1085 */
1086 inflateMonitor(self, obj);
1087 LOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
1088 }
1089 mon = LW_MONITOR(obj->lock);
1090 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1091 }
1092
1093 /*
1094 * Object.notify().
1095 */
dvmObjectNotify(Thread * self,Object * obj)1096 void dvmObjectNotify(Thread* self, Object *obj)
1097 {
1098 u4 thin = *(volatile u4 *)&obj->lock;
1099
1100 /* If the lock is still thin, there aren't any waiters;
1101 * waiting on an object forces lock fattening.
1102 */
1103 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1104 /* Make sure that 'self' holds the lock.
1105 */
1106 if (LW_LOCK_OWNER(thin) != self->threadId) {
1107 dvmThrowIllegalMonitorStateException(
1108 "object not locked by thread before notify()");
1109 return;
1110 }
1111
1112 /* no-op; there are no waiters to notify.
1113 */
1114 } else {
1115 /* It's a fat lock.
1116 */
1117 notifyMonitor(self, LW_MONITOR(thin));
1118 }
1119 }
1120
1121 /*
1122 * Object.notifyAll().
1123 */
dvmObjectNotifyAll(Thread * self,Object * obj)1124 void dvmObjectNotifyAll(Thread* self, Object *obj)
1125 {
1126 u4 thin = *(volatile u4 *)&obj->lock;
1127
1128 /* If the lock is still thin, there aren't any waiters;
1129 * waiting on an object forces lock fattening.
1130 */
1131 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1132 /* Make sure that 'self' holds the lock.
1133 */
1134 if (LW_LOCK_OWNER(thin) != self->threadId) {
1135 dvmThrowIllegalMonitorStateException(
1136 "object not locked by thread before notifyAll()");
1137 return;
1138 }
1139
1140 /* no-op; there are no waiters to notify.
1141 */
1142 } else {
1143 /* It's a fat lock.
1144 */
1145 notifyAllMonitor(self, LW_MONITOR(thin));
1146 }
1147 }
1148
1149 /*
1150 * This implements java.lang.Thread.sleep(long msec, int nsec).
1151 *
1152 * The sleep is interruptible by other threads, which means we can't just
1153 * plop into an OS sleep call. (We probably could if we wanted to send
1154 * signals around and rely on EINTR, but that's inefficient and relies
1155 * on native code respecting our signal mask.)
1156 *
1157 * We have to do all of this stuff for Object.wait() as well, so it's
1158 * easiest to just sleep on a private Monitor.
1159 *
1160 * It appears that we want sleep(0,0) to go through the motions of sleeping
1161 * for a very short duration, rather than just returning.
1162 */
dvmThreadSleep(u8 msec,u4 nsec)1163 void dvmThreadSleep(u8 msec, u4 nsec)
1164 {
1165 Thread* self = dvmThreadSelf();
1166 Monitor* mon = gDvm.threadSleepMon;
1167
1168 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1169 if (msec == 0 && nsec == 0)
1170 nsec++;
1171
1172 lockMonitor(self, mon);
1173 waitMonitor(self, mon, msec, nsec, true);
1174 unlockMonitor(self, mon);
1175 }
1176
1177 /*
1178 * Implement java.lang.Thread.interrupt().
1179 */
dvmThreadInterrupt(Thread * thread)1180 void dvmThreadInterrupt(Thread* thread)
1181 {
1182 assert(thread != NULL);
1183
1184 dvmLockMutex(&thread->waitMutex);
1185
1186 /*
1187 * If the interrupted flag is already set no additional action is
1188 * required.
1189 */
1190 if (thread->interrupted == true) {
1191 dvmUnlockMutex(&thread->waitMutex);
1192 return;
1193 }
1194
1195 /*
1196 * Raise the "interrupted" flag. This will cause it to bail early out
1197 * of the next wait() attempt, if it's not currently waiting on
1198 * something.
1199 */
1200 thread->interrupted = true;
1201
1202 /*
1203 * Is the thread waiting?
1204 *
1205 * Note that fat vs. thin doesn't matter here; waitMonitor
1206 * is only set when a thread actually waits on a monitor,
1207 * which implies that the monitor has already been fattened.
1208 */
1209 if (thread->waitMonitor != NULL) {
1210 pthread_cond_signal(&thread->waitCond);
1211 }
1212
1213 dvmUnlockMutex(&thread->waitMutex);
1214 }
1215
1216 #ifndef WITH_COPYING_GC
dvmIdentityHashCode(Object * obj)1217 u4 dvmIdentityHashCode(Object *obj)
1218 {
1219 return (u4)obj;
1220 }
1221 #else
1222 /*
1223 * Returns the identity hash code of the given object.
1224 */
dvmIdentityHashCode(Object * obj)1225 u4 dvmIdentityHashCode(Object *obj)
1226 {
1227 Thread *self, *thread;
1228 volatile u4 *lw;
1229 size_t size;
1230 u4 lock, owner, hashState;
1231
1232 if (obj == NULL) {
1233 /*
1234 * Null is defined to have an identity hash code of 0.
1235 */
1236 return 0;
1237 }
1238 lw = &obj->lock;
1239 retry:
1240 hashState = LW_HASH_STATE(*lw);
1241 if (hashState == LW_HASH_STATE_HASHED) {
1242 /*
1243 * The object has been hashed but has not had its hash code
1244 * relocated by the garbage collector. Use the raw object
1245 * address.
1246 */
1247 return (u4)obj >> 3;
1248 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1249 /*
1250 * The object has been hashed and its hash code has been
1251 * relocated by the collector. Use the value of the naturally
1252 * aligned word following the instance data.
1253 */
1254 assert(!dvmIsClassObject(obj));
1255 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1256 size = dvmArrayObjectSize((ArrayObject *)obj);
1257 size = (size + 2) & ~2;
1258 } else {
1259 size = obj->clazz->objectSize;
1260 }
1261 return *(u4 *)(((char *)obj) + size);
1262 } else if (hashState == LW_HASH_STATE_UNHASHED) {
1263 /*
1264 * The object has never been hashed. Change the hash state to
1265 * hashed and use the raw object address.
1266 */
1267 self = dvmThreadSelf();
1268 if (self->threadId == lockOwner(obj)) {
1269 /*
1270 * We already own the lock so we can update the hash state
1271 * directly.
1272 */
1273 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1274 return (u4)obj >> 3;
1275 }
1276 /*
1277 * We do not own the lock. Try acquiring the lock. Should
1278 * this fail, we must suspend the owning thread.
1279 */
1280 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1281 /*
1282 * If the lock is thin assume it is unowned. We simulate
1283 * an acquire, update, and release with a single CAS.
1284 */
1285 lock = (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1286 if (android_atomic_acquire_cas(
1287 0,
1288 (int32_t)lock,
1289 (int32_t *)lw) == 0) {
1290 /*
1291 * A new lockword has been installed with a hash state
1292 * of hashed. Use the raw object address.
1293 */
1294 return (u4)obj >> 3;
1295 }
1296 } else {
1297 if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1298 /*
1299 * The monitor lock has been acquired. Change the
1300 * hash state to hashed and use the raw object
1301 * address.
1302 */
1303 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1304 unlockMonitor(self, LW_MONITOR(*lw));
1305 return (u4)obj >> 3;
1306 }
1307 }
1308 /*
1309 * At this point we have failed to acquire the lock. We must
1310 * identify the owning thread and suspend it.
1311 */
1312 dvmLockThreadList(self);
1313 /*
1314 * Cache the lock word as its value can change between
1315 * determining its shape and retrieving its owner.
1316 */
1317 lock = *lw;
1318 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1319 /*
1320 * Find the thread with the corresponding thread id.
1321 */
1322 owner = LW_LOCK_OWNER(lock);
1323 assert(owner != self->threadId);
1324 /*
1325 * If the lock has no owner do not bother scanning the
1326 * thread list and fall through to the failure handler.
1327 */
1328 thread = owner ? gDvm.threadList : NULL;
1329 while (thread != NULL) {
1330 if (thread->threadId == owner) {
1331 break;
1332 }
1333 thread = thread->next;
1334 }
1335 } else {
1336 thread = LW_MONITOR(lock)->owner;
1337 }
1338 /*
1339 * If thread is NULL the object has been released since the
1340 * thread list lock was acquired. Try again.
1341 */
1342 if (thread == NULL) {
1343 dvmUnlockThreadList();
1344 goto retry;
1345 }
1346 /*
1347 * Wait for the owning thread to suspend.
1348 */
1349 dvmSuspendThread(thread);
1350 if (dvmHoldsLock(thread, obj)) {
1351 /*
1352 * The owning thread has been suspended. We can safely
1353 * change the hash state to hashed.
1354 */
1355 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1356 dvmResumeThread(thread);
1357 dvmUnlockThreadList();
1358 return (u4)obj >> 3;
1359 }
1360 /*
1361 * The wrong thread has been suspended. Try again.
1362 */
1363 dvmResumeThread(thread);
1364 dvmUnlockThreadList();
1365 goto retry;
1366 }
1367 LOGE("object %p has an unknown hash state %#x", obj, hashState);
1368 dvmDumpThread(dvmThreadSelf(), false);
1369 dvmAbort();
1370 return 0; /* Quiet the compiler. */
1371 }
1372 #endif /* WITH_COPYING_GC */
1373