1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Dalvik.h"
18
19 #include <fcntl.h>
20 #include <stdlib.h>
21 #include <unistd.h>
22 #include <pthread.h>
23 #include <time.h>
24 #include <errno.h>
25
26 /*
27 * Every Object has a monitor associated with it, but not every Object is
28 * actually locked. Even the ones that are locked do not need a
29 * full-fledged monitor until a) there is actual contention or b) wait()
30 * is called on the Object.
31 *
32 * For Dalvik, we have implemented a scheme similar to the one described
33 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
34 * (ACM 1998). Things are even easier for us, though, because we have
35 * a full 32 bits to work with.
36 *
37 * The two states of an Object's lock are referred to as "thin" and
38 * "fat". A lock may transition from the "thin" state to the "fat"
39 * state and this transition is referred to as inflation. Once a lock
40 * has been inflated it remains in the "fat" state indefinitely.
41 *
42 * The lock value itself is stored in Object.lock. The LSB of the
43 * lock encodes its state. When cleared, the lock is in the "thin"
44 * state and its bits are formatted as follows:
45 *
46 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
47 * lock count thread id hash state 0
48 *
49 * When set, the lock is in the "fat" state and its bits are formatted
50 * as follows:
51 *
52 * [31 ---- 3] [2 ---- 1] [0]
53 * pointer hash state 1
54 *
55 * For an in-depth description of the mechanics of thin-vs-fat locking,
56 * read the paper referred to above.
57 */
58
59 /*
60 * Monitors provide:
61 * - mutually exclusive access to resources
62 * - a way for multiple threads to wait for notification
63 *
64 * In effect, they fill the role of both mutexes and condition variables.
65 *
66 * Only one thread can own the monitor at any time. There may be several
67 * threads waiting on it (the wait call unlocks it). One or more waiting
68 * threads may be getting interrupted or notified at any given time.
69 *
70 * TODO: the various members of monitor are not SMP-safe.
71 */
72 struct Monitor {
73 Thread* owner; /* which thread currently owns the lock? */
74 int lockCount; /* owner's recursive lock depth */
75 Object* obj; /* what object are we part of [debug only] */
76
77 Thread* waitSet; /* threads currently waiting on this monitor */
78
79 pthread_mutex_t lock;
80
81 Monitor* next;
82
83 /*
84 * Who last acquired this monitor, when lock sampling is enabled.
85 * Even when enabled, ownerMethod may be NULL.
86 */
87 const Method* ownerMethod;
88 u4 ownerPc;
89 };
90
91
92 /*
93 * Create and initialize a monitor.
94 */
dvmCreateMonitor(Object * obj)95 Monitor* dvmCreateMonitor(Object* obj)
96 {
97 Monitor* mon;
98
99 mon = (Monitor*) calloc(1, sizeof(Monitor));
100 if (mon == NULL) {
101 ALOGE("Unable to allocate monitor");
102 dvmAbort();
103 }
104 if (((u4)mon & 7) != 0) {
105 ALOGE("Misaligned monitor: %p", mon);
106 dvmAbort();
107 }
108 mon->obj = obj;
109 dvmInitMutex(&mon->lock);
110
111 /* replace the head of the list with the new monitor */
112 do {
113 mon->next = gDvm.monitorList;
114 } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon,
115 (int32_t*)(void*)&gDvm.monitorList) != 0);
116
117 return mon;
118 }
119
120 /*
121 * Free the monitor list. Only used when shutting the VM down.
122 */
dvmFreeMonitorList()123 void dvmFreeMonitorList()
124 {
125 Monitor* mon;
126 Monitor* nextMon;
127
128 mon = gDvm.monitorList;
129 while (mon != NULL) {
130 nextMon = mon->next;
131 free(mon);
132 mon = nextMon;
133 }
134 }
135
136 /*
137 * Get the object that a monitor is part of.
138 */
dvmGetMonitorObject(Monitor * mon)139 Object* dvmGetMonitorObject(Monitor* mon)
140 {
141 if (mon == NULL)
142 return NULL;
143 else
144 return mon->obj;
145 }
146
147 /*
148 * Returns the thread id of the thread owning the given lock.
149 */
lockOwner(Object * obj)150 static u4 lockOwner(Object* obj)
151 {
152 Thread *owner;
153 u4 lock;
154
155 assert(obj != NULL);
156 /*
157 * Since we're reading the lock value multiple times, latch it so
158 * that it doesn't change out from under us if we get preempted.
159 */
160 lock = obj->lock;
161 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
162 return LW_LOCK_OWNER(lock);
163 } else {
164 owner = LW_MONITOR(lock)->owner;
165 return owner ? owner->threadId : 0;
166 }
167 }
168
169 /*
170 * Get the thread that holds the lock on the specified object. The
171 * object may be unlocked, thin-locked, or fat-locked.
172 *
173 * The caller must lock the thread list before calling here.
174 */
dvmGetObjectLockHolder(Object * obj)175 Thread* dvmGetObjectLockHolder(Object* obj)
176 {
177 u4 threadId = lockOwner(obj);
178
179 if (threadId == 0)
180 return NULL;
181 return dvmGetThreadByThreadId(threadId);
182 }
183
184 /*
185 * Checks whether the given thread holds the given
186 * objects's lock.
187 */
dvmHoldsLock(Thread * thread,Object * obj)188 bool dvmHoldsLock(Thread* thread, Object* obj)
189 {
190 if (thread == NULL || obj == NULL) {
191 return false;
192 } else {
193 return thread->threadId == lockOwner(obj);
194 }
195 }
196
197 /*
198 * Free the monitor associated with an object and make the object's lock
199 * thin again. This is called during garbage collection.
200 */
freeMonitor(Monitor * mon)201 static void freeMonitor(Monitor *mon)
202 {
203 assert(mon != NULL);
204 assert(mon->obj != NULL);
205 assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT);
206
207 /* This lock is associated with an object
208 * that's being swept. The only possible way
209 * anyone could be holding this lock would be
210 * if some JNI code locked but didn't unlock
211 * the object, in which case we've got some bad
212 * native code somewhere.
213 */
214 assert(pthread_mutex_trylock(&mon->lock) == 0);
215 assert(pthread_mutex_unlock(&mon->lock) == 0);
216 dvmDestroyMutex(&mon->lock);
217 free(mon);
218 }
219
220 /*
221 * Frees monitor objects belonging to unmarked objects.
222 */
dvmSweepMonitorList(Monitor ** mon,int (* isUnmarkedObject)(void *))223 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
224 {
225 Monitor handle;
226 Monitor *prev, *curr;
227 Object *obj;
228
229 assert(mon != NULL);
230 assert(isUnmarkedObject != NULL);
231 prev = &handle;
232 prev->next = curr = *mon;
233 while (curr != NULL) {
234 obj = curr->obj;
235 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
236 prev->next = curr->next;
237 freeMonitor(curr);
238 curr = prev->next;
239 } else {
240 prev = curr;
241 curr = curr->next;
242 }
243 }
244 *mon = handle.next;
245 }
246
logWriteInt(char * dst,int value)247 static char *logWriteInt(char *dst, int value)
248 {
249 *dst++ = EVENT_TYPE_INT;
250 set4LE((u1 *)dst, value);
251 return dst + 4;
252 }
253
logWriteString(char * dst,const char * value,size_t len)254 static char *logWriteString(char *dst, const char *value, size_t len)
255 {
256 *dst++ = EVENT_TYPE_STRING;
257 len = len < 32 ? len : 32;
258 set4LE((u1 *)dst, len);
259 dst += 4;
260 memcpy(dst, value, len);
261 return dst + len;
262 }
263
264 #define EVENT_LOG_TAG_dvm_lock_sample 20003
265
logContentionEvent(Thread * self,u4 waitMs,u4 samplePercent,const char * ownerFileName,u4 ownerLineNumber)266 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent,
267 const char *ownerFileName, u4 ownerLineNumber)
268 {
269 const StackSaveArea *saveArea;
270 const Method *meth;
271 u4 relativePc;
272 char eventBuffer[174];
273 const char *fileName;
274 char procName[33];
275 char *cp;
276 size_t len;
277 int fd;
278
279 /* When a thread is being destroyed it is normal that the frame depth is zero */
280 if (self->interpSave.curFrame == NULL) {
281 return;
282 }
283
284 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
285 meth = saveArea->method;
286 cp = eventBuffer;
287
288 /* Emit the event list length, 1 byte. */
289 *cp++ = 9;
290
291 /* Emit the process name, <= 37 bytes. */
292 fd = open("/proc/self/cmdline", O_RDONLY);
293 memset(procName, 0, sizeof(procName));
294 read(fd, procName, sizeof(procName) - 1);
295 close(fd);
296 len = strlen(procName);
297 cp = logWriteString(cp, procName, len);
298
299 /* Emit the sensitive thread ("main thread") status, 5 bytes. */
300 bool isSensitive = false;
301 if (gDvm.isSensitiveThreadHook != NULL) {
302 isSensitive = gDvm.isSensitiveThreadHook();
303 }
304 cp = logWriteInt(cp, isSensitive);
305
306 /* Emit self thread name string, <= 37 bytes. */
307 std::string selfName = dvmGetThreadName(self);
308 cp = logWriteString(cp, selfName.c_str(), selfName.size());
309
310 /* Emit the wait time, 5 bytes. */
311 cp = logWriteInt(cp, waitMs);
312
313 /* Emit the source code file name, <= 37 bytes. */
314 fileName = dvmGetMethodSourceFile(meth);
315 if (fileName == NULL) fileName = "";
316 cp = logWriteString(cp, fileName, strlen(fileName));
317
318 /* Emit the source code line number, 5 bytes. */
319 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
320 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
321
322 /* Emit the lock owner source code file name, <= 37 bytes. */
323 if (ownerFileName == NULL) {
324 ownerFileName = "";
325 } else if (strcmp(fileName, ownerFileName) == 0) {
326 /* Common case, so save on log space. */
327 ownerFileName = "-";
328 }
329 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
330
331 /* Emit the source code line number, 5 bytes. */
332 cp = logWriteInt(cp, ownerLineNumber);
333
334 /* Emit the sample percentage, 5 bytes. */
335 cp = logWriteInt(cp, samplePercent);
336
337 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
338 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
339 EVENT_TYPE_LIST,
340 eventBuffer,
341 (size_t)(cp - eventBuffer));
342 }
343
344 /*
345 * Lock a monitor.
346 */
lockMonitor(Thread * self,Monitor * mon)347 static void lockMonitor(Thread* self, Monitor* mon)
348 {
349 ThreadStatus oldStatus;
350 u4 waitThreshold, samplePercent;
351 u8 waitStart, waitEnd, waitMs;
352
353 if (mon->owner == self) {
354 mon->lockCount++;
355 return;
356 }
357 if (dvmTryLockMutex(&mon->lock) != 0) {
358 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
359 waitThreshold = gDvm.lockProfThreshold;
360 if (waitThreshold) {
361 waitStart = dvmGetRelativeTimeUsec();
362 }
363
364 const Method* currentOwnerMethod = mon->ownerMethod;
365 u4 currentOwnerPc = mon->ownerPc;
366
367 dvmLockMutex(&mon->lock);
368 if (waitThreshold) {
369 waitEnd = dvmGetRelativeTimeUsec();
370 }
371 dvmChangeStatus(self, oldStatus);
372 if (waitThreshold) {
373 waitMs = (waitEnd - waitStart) / 1000;
374 if (waitMs >= waitThreshold) {
375 samplePercent = 100;
376 } else {
377 samplePercent = 100 * waitMs / waitThreshold;
378 }
379 if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
380 const char* currentOwnerFileName = "no_method";
381 u4 currentOwnerLineNumber = 0;
382 if (currentOwnerMethod != NULL) {
383 currentOwnerFileName = dvmGetMethodSourceFile(currentOwnerMethod);
384 if (currentOwnerFileName == NULL) {
385 currentOwnerFileName = "no_method_file";
386 }
387 currentOwnerLineNumber = dvmLineNumFromPC(currentOwnerMethod, currentOwnerPc);
388 }
389 logContentionEvent(self, waitMs, samplePercent,
390 currentOwnerFileName, currentOwnerLineNumber);
391 }
392 }
393 }
394 mon->owner = self;
395 assert(mon->lockCount == 0);
396
397 // When debugging, save the current monitor holder for future
398 // acquisition failures to use in sampled logging.
399 if (gDvm.lockProfThreshold > 0) {
400 mon->ownerMethod = NULL;
401 mon->ownerPc = 0;
402 if (self->interpSave.curFrame == NULL) {
403 return;
404 }
405 const StackSaveArea* saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
406 if (saveArea == NULL) {
407 return;
408 }
409 mon->ownerMethod = saveArea->method;
410 mon->ownerPc = (saveArea->xtra.currentPc - saveArea->method->insns);
411 }
412 }
413
414 /*
415 * Try to lock a monitor.
416 *
417 * Returns "true" on success.
418 */
419 #ifdef WITH_COPYING_GC
tryLockMonitor(Thread * self,Monitor * mon)420 static bool tryLockMonitor(Thread* self, Monitor* mon)
421 {
422 if (mon->owner == self) {
423 mon->lockCount++;
424 return true;
425 } else {
426 if (dvmTryLockMutex(&mon->lock) == 0) {
427 mon->owner = self;
428 assert(mon->lockCount == 0);
429 return true;
430 } else {
431 return false;
432 }
433 }
434 }
435 #endif
436
437 /*
438 * Unlock a monitor.
439 *
440 * Returns true if the unlock succeeded.
441 * If the unlock failed, an exception will be pending.
442 */
unlockMonitor(Thread * self,Monitor * mon)443 static bool unlockMonitor(Thread* self, Monitor* mon)
444 {
445 assert(self != NULL);
446 assert(mon != NULL);
447 if (mon->owner == self) {
448 /*
449 * We own the monitor, so nobody else can be in here.
450 */
451 if (mon->lockCount == 0) {
452 mon->owner = NULL;
453 mon->ownerMethod = NULL;
454 mon->ownerPc = 0;
455 dvmUnlockMutex(&mon->lock);
456 } else {
457 mon->lockCount--;
458 }
459 } else {
460 /*
461 * We don't own this, so we're not allowed to unlock it.
462 * The JNI spec says that we should throw IllegalMonitorStateException
463 * in this case.
464 */
465 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
466 return false;
467 }
468 return true;
469 }
470
471 /*
472 * Checks the wait set for circular structure. Returns 0 if the list
473 * is not circular. Otherwise, returns 1. Used only by asserts.
474 */
475 #ifndef NDEBUG
waitSetCheck(Monitor * mon)476 static int waitSetCheck(Monitor *mon)
477 {
478 Thread *fast, *slow;
479 size_t n;
480
481 assert(mon != NULL);
482 fast = slow = mon->waitSet;
483 n = 0;
484 for (;;) {
485 if (fast == NULL) return 0;
486 if (fast->waitNext == NULL) return 0;
487 if (fast == slow && n > 0) return 1;
488 n += 2;
489 fast = fast->waitNext->waitNext;
490 slow = slow->waitNext;
491 }
492 }
493 #endif
494
495 /*
496 * Links a thread into a monitor's wait set. The monitor lock must be
497 * held by the caller of this routine.
498 */
waitSetAppend(Monitor * mon,Thread * thread)499 static void waitSetAppend(Monitor *mon, Thread *thread)
500 {
501 Thread *elt;
502
503 assert(mon != NULL);
504 assert(mon->owner == dvmThreadSelf());
505 assert(thread != NULL);
506 assert(thread->waitNext == NULL);
507 assert(waitSetCheck(mon) == 0);
508 if (mon->waitSet == NULL) {
509 mon->waitSet = thread;
510 return;
511 }
512 elt = mon->waitSet;
513 while (elt->waitNext != NULL) {
514 elt = elt->waitNext;
515 }
516 elt->waitNext = thread;
517 }
518
519 /*
520 * Unlinks a thread from a monitor's wait set. The monitor lock must
521 * be held by the caller of this routine.
522 */
waitSetRemove(Monitor * mon,Thread * thread)523 static void waitSetRemove(Monitor *mon, Thread *thread)
524 {
525 Thread *elt;
526
527 assert(mon != NULL);
528 assert(mon->owner == dvmThreadSelf());
529 assert(thread != NULL);
530 assert(waitSetCheck(mon) == 0);
531 if (mon->waitSet == NULL) {
532 return;
533 }
534 if (mon->waitSet == thread) {
535 mon->waitSet = thread->waitNext;
536 thread->waitNext = NULL;
537 return;
538 }
539 elt = mon->waitSet;
540 while (elt->waitNext != NULL) {
541 if (elt->waitNext == thread) {
542 elt->waitNext = thread->waitNext;
543 thread->waitNext = NULL;
544 return;
545 }
546 elt = elt->waitNext;
547 }
548 }
549
550 /*
551 * Converts the given relative waiting time into an absolute time.
552 */
absoluteTime(s8 msec,s4 nsec,struct timespec * ts)553 static void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
554 {
555 s8 endSec;
556
557 #ifdef HAVE_TIMEDWAIT_MONOTONIC
558 clock_gettime(CLOCK_MONOTONIC, ts);
559 #else
560 {
561 struct timeval tv;
562 gettimeofday(&tv, NULL);
563 ts->tv_sec = tv.tv_sec;
564 ts->tv_nsec = tv.tv_usec * 1000;
565 }
566 #endif
567 endSec = ts->tv_sec + msec / 1000;
568 if (endSec >= 0x7fffffff) {
569 ALOGV("NOTE: end time exceeds epoch");
570 endSec = 0x7ffffffe;
571 }
572 ts->tv_sec = endSec;
573 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
574
575 /* catch rollover */
576 if (ts->tv_nsec >= 1000000000L) {
577 ts->tv_sec++;
578 ts->tv_nsec -= 1000000000L;
579 }
580 }
581
dvmRelativeCondWait(pthread_cond_t * cond,pthread_mutex_t * mutex,s8 msec,s4 nsec)582 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
583 s8 msec, s4 nsec)
584 {
585 int ret;
586 struct timespec ts;
587 absoluteTime(msec, nsec, &ts);
588 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
589 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
590 #else
591 ret = pthread_cond_timedwait(cond, mutex, &ts);
592 #endif
593 assert(ret == 0 || ret == ETIMEDOUT);
594 return ret;
595 }
596
597 /*
598 * Wait on a monitor until timeout, interrupt, or notification. Used for
599 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
600 *
601 * If another thread calls Thread.interrupt(), we throw InterruptedException
602 * and return immediately if one of the following are true:
603 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
604 * - blocked in join(), join(long), or join(long, int) methods of Thread
605 * - blocked in sleep(long), or sleep(long, int) methods of Thread
606 * Otherwise, we set the "interrupted" flag.
607 *
608 * Checks to make sure that "nsec" is in the range 0-999999
609 * (i.e. fractions of a millisecond) and throws the appropriate
610 * exception if it isn't.
611 *
612 * The spec allows "spurious wakeups", and recommends that all code using
613 * Object.wait() do so in a loop. This appears to derive from concerns
614 * about pthread_cond_wait() on multiprocessor systems. Some commentary
615 * on the web casts doubt on whether these can/should occur.
616 *
617 * Since we're allowed to wake up "early", we clamp extremely long durations
618 * to return at the end of the 32-bit time epoch.
619 */
waitMonitor(Thread * self,Monitor * mon,s8 msec,s4 nsec,bool interruptShouldThrow)620 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
621 bool interruptShouldThrow)
622 {
623 struct timespec ts;
624 bool wasInterrupted = false;
625 bool timed;
626 int ret;
627
628 assert(self != NULL);
629 assert(mon != NULL);
630
631 /* Make sure that we hold the lock. */
632 if (mon->owner != self) {
633 dvmThrowIllegalMonitorStateException(
634 "object not locked by thread before wait()");
635 return;
636 }
637
638 /*
639 * Enforce the timeout range.
640 */
641 if (msec < 0 || nsec < 0 || nsec > 999999) {
642 dvmThrowIllegalArgumentException("timeout arguments out of range");
643 return;
644 }
645
646 /*
647 * Compute absolute wakeup time, if necessary.
648 */
649 if (msec == 0 && nsec == 0) {
650 timed = false;
651 } else {
652 absoluteTime(msec, nsec, &ts);
653 timed = true;
654 }
655
656 /*
657 * Add ourselves to the set of threads waiting on this monitor, and
658 * release our hold. We need to let it go even if we're a few levels
659 * deep in a recursive lock, and we need to restore that later.
660 *
661 * We append to the wait set ahead of clearing the count and owner
662 * fields so the subroutine can check that the calling thread owns
663 * the monitor. Aside from that, the order of member updates is
664 * not order sensitive as we hold the pthread mutex.
665 */
666 waitSetAppend(mon, self);
667 int prevLockCount = mon->lockCount;
668 mon->lockCount = 0;
669 mon->owner = NULL;
670
671 const Method* savedMethod = mon->ownerMethod;
672 u4 savedPc = mon->ownerPc;
673 mon->ownerMethod = NULL;
674 mon->ownerPc = 0;
675
676 /*
677 * Update thread status. If the GC wakes up, it'll ignore us, knowing
678 * that we won't touch any references in this state, and we'll check
679 * our suspend mode before we transition out.
680 */
681 if (timed)
682 dvmChangeStatus(self, THREAD_TIMED_WAIT);
683 else
684 dvmChangeStatus(self, THREAD_WAIT);
685
686 dvmLockMutex(&self->waitMutex);
687
688 /*
689 * Set waitMonitor to the monitor object we will be waiting on.
690 * When waitMonitor is non-NULL a notifying or interrupting thread
691 * must signal the thread's waitCond to wake it up.
692 */
693 assert(self->waitMonitor == NULL);
694 self->waitMonitor = mon;
695
696 /*
697 * Handle the case where the thread was interrupted before we called
698 * wait().
699 */
700 if (self->interrupted) {
701 wasInterrupted = true;
702 self->waitMonitor = NULL;
703 dvmUnlockMutex(&self->waitMutex);
704 goto done;
705 }
706
707 /*
708 * Release the monitor lock and wait for a notification or
709 * a timeout to occur.
710 */
711 dvmUnlockMutex(&mon->lock);
712
713 if (!timed) {
714 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
715 assert(ret == 0);
716 } else {
717 #ifdef HAVE_TIMEDWAIT_MONOTONIC
718 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
719 #else
720 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
721 #endif
722 assert(ret == 0 || ret == ETIMEDOUT);
723 }
724 if (self->interrupted) {
725 wasInterrupted = true;
726 }
727
728 self->interrupted = false;
729 self->waitMonitor = NULL;
730
731 dvmUnlockMutex(&self->waitMutex);
732
733 /* Reacquire the monitor lock. */
734 lockMonitor(self, mon);
735
736 done:
737 /*
738 * We remove our thread from wait set after restoring the count
739 * and owner fields so the subroutine can check that the calling
740 * thread owns the monitor. Aside from that, the order of member
741 * updates is not order sensitive as we hold the pthread mutex.
742 */
743 mon->owner = self;
744 mon->lockCount = prevLockCount;
745 mon->ownerMethod = savedMethod;
746 mon->ownerPc = savedPc;
747 waitSetRemove(mon, self);
748
749 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
750 dvmChangeStatus(self, THREAD_RUNNING);
751
752 if (wasInterrupted) {
753 /*
754 * We were interrupted while waiting, or somebody interrupted an
755 * un-interruptible thread earlier and we're bailing out immediately.
756 *
757 * The doc sayeth: "The interrupted status of the current thread is
758 * cleared when this exception is thrown."
759 */
760 self->interrupted = false;
761 if (interruptShouldThrow) {
762 dvmThrowInterruptedException(NULL);
763 }
764 }
765 }
766
767 /*
768 * Notify one thread waiting on this monitor.
769 */
notifyMonitor(Thread * self,Monitor * mon)770 static void notifyMonitor(Thread* self, Monitor* mon)
771 {
772 Thread* thread;
773
774 assert(self != NULL);
775 assert(mon != NULL);
776
777 /* Make sure that we hold the lock. */
778 if (mon->owner != self) {
779 dvmThrowIllegalMonitorStateException(
780 "object not locked by thread before notify()");
781 return;
782 }
783 /* Signal the first waiting thread in the wait set. */
784 while (mon->waitSet != NULL) {
785 thread = mon->waitSet;
786 mon->waitSet = thread->waitNext;
787 thread->waitNext = NULL;
788 dvmLockMutex(&thread->waitMutex);
789 /* Check to see if the thread is still waiting. */
790 if (thread->waitMonitor != NULL) {
791 pthread_cond_signal(&thread->waitCond);
792 dvmUnlockMutex(&thread->waitMutex);
793 return;
794 }
795 dvmUnlockMutex(&thread->waitMutex);
796 }
797 }
798
799 /*
800 * Notify all threads waiting on this monitor.
801 */
notifyAllMonitor(Thread * self,Monitor * mon)802 static void notifyAllMonitor(Thread* self, Monitor* mon)
803 {
804 Thread* thread;
805
806 assert(self != NULL);
807 assert(mon != NULL);
808
809 /* Make sure that we hold the lock. */
810 if (mon->owner != self) {
811 dvmThrowIllegalMonitorStateException(
812 "object not locked by thread before notifyAll()");
813 return;
814 }
815 /* Signal all threads in the wait set. */
816 while (mon->waitSet != NULL) {
817 thread = mon->waitSet;
818 mon->waitSet = thread->waitNext;
819 thread->waitNext = NULL;
820 dvmLockMutex(&thread->waitMutex);
821 /* Check to see if the thread is still waiting. */
822 if (thread->waitMonitor != NULL) {
823 pthread_cond_signal(&thread->waitCond);
824 }
825 dvmUnlockMutex(&thread->waitMutex);
826 }
827 }
828
829 /*
830 * Changes the shape of a monitor from thin to fat, preserving the
831 * internal lock state. The calling thread must own the lock.
832 */
inflateMonitor(Thread * self,Object * obj)833 static void inflateMonitor(Thread *self, Object *obj)
834 {
835 Monitor *mon;
836 u4 thin;
837
838 assert(self != NULL);
839 assert(obj != NULL);
840 assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
841 assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
842 /* Allocate and acquire a new monitor. */
843 mon = dvmCreateMonitor(obj);
844 lockMonitor(self, mon);
845 /* Propagate the lock state. */
846 thin = obj->lock;
847 mon->lockCount = LW_LOCK_COUNT(thin);
848 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
849 thin |= (u4)mon | LW_SHAPE_FAT;
850 /* Publish the updated lock word. */
851 android_atomic_release_store(thin, (int32_t *)&obj->lock);
852 }
853
854 /*
855 * Implements monitorenter for "synchronized" stuff.
856 *
857 * This does not fail or throw an exception (unless deadlock prediction
858 * is enabled and set to "err" mode).
859 */
dvmLockObject(Thread * self,Object * obj)860 void dvmLockObject(Thread* self, Object *obj)
861 {
862 volatile u4 *thinp;
863 ThreadStatus oldStatus;
864 struct timespec tm;
865 long sleepDelayNs;
866 long minSleepDelayNs = 1000000; /* 1 millisecond */
867 long maxSleepDelayNs = 1000000000; /* 1 second */
868 u4 thin, newThin, threadId;
869
870 assert(self != NULL);
871 assert(obj != NULL);
872 threadId = self->threadId;
873 thinp = &obj->lock;
874 retry:
875 thin = *thinp;
876 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
877 /*
878 * The lock is a thin lock. The owner field is used to
879 * determine the acquire method, ordered by cost.
880 */
881 if (LW_LOCK_OWNER(thin) == threadId) {
882 /*
883 * The calling thread owns the lock. Increment the
884 * value of the recursion count field.
885 */
886 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
887 if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
888 /*
889 * The reacquisition limit has been reached. Inflate
890 * the lock so the next acquire will not overflow the
891 * recursion count field.
892 */
893 inflateMonitor(self, obj);
894 }
895 } else if (LW_LOCK_OWNER(thin) == 0) {
896 /*
897 * The lock is unowned. Install the thread id of the
898 * calling thread into the owner field. This is the
899 * common case. In performance critical code the JIT
900 * will have tried this before calling out to the VM.
901 */
902 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
903 if (android_atomic_acquire_cas(thin, newThin,
904 (int32_t*)thinp) != 0) {
905 /*
906 * The acquire failed. Try again.
907 */
908 goto retry;
909 }
910 } else {
911 ALOGV("(%d) spin on lock %p: %#x (%#x) %#x",
912 threadId, &obj->lock, 0, *thinp, thin);
913 /*
914 * The lock is owned by another thread. Notify the VM
915 * that we are about to wait.
916 */
917 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
918 /*
919 * Spin until the thin lock is released or inflated.
920 */
921 sleepDelayNs = 0;
922 for (;;) {
923 thin = *thinp;
924 /*
925 * Check the shape of the lock word. Another thread
926 * may have inflated the lock while we were waiting.
927 */
928 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
929 if (LW_LOCK_OWNER(thin) == 0) {
930 /*
931 * The lock has been released. Install the
932 * thread id of the calling thread into the
933 * owner field.
934 */
935 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
936 if (android_atomic_acquire_cas(thin, newThin,
937 (int32_t *)thinp) == 0) {
938 /*
939 * The acquire succeed. Break out of the
940 * loop and proceed to inflate the lock.
941 */
942 break;
943 }
944 } else {
945 /*
946 * The lock has not been released. Yield so
947 * the owning thread can run.
948 */
949 if (sleepDelayNs == 0) {
950 sched_yield();
951 sleepDelayNs = minSleepDelayNs;
952 } else {
953 tm.tv_sec = 0;
954 tm.tv_nsec = sleepDelayNs;
955 nanosleep(&tm, NULL);
956 /*
957 * Prepare the next delay value. Wrap to
958 * avoid once a second polls for eternity.
959 */
960 if (sleepDelayNs < maxSleepDelayNs / 2) {
961 sleepDelayNs *= 2;
962 } else {
963 sleepDelayNs = minSleepDelayNs;
964 }
965 }
966 }
967 } else {
968 /*
969 * The thin lock was inflated by another thread.
970 * Let the VM know we are no longer waiting and
971 * try again.
972 */
973 ALOGV("(%d) lock %p surprise-fattened",
974 threadId, &obj->lock);
975 dvmChangeStatus(self, oldStatus);
976 goto retry;
977 }
978 }
979 ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
980 threadId, &obj->lock, 0, *thinp, thin);
981 /*
982 * We have acquired the thin lock. Let the VM know that
983 * we are no longer waiting.
984 */
985 dvmChangeStatus(self, oldStatus);
986 /*
987 * Fatten the lock.
988 */
989 inflateMonitor(self, obj);
990 ALOGV("(%d) lock %p fattened", threadId, &obj->lock);
991 }
992 } else {
993 /*
994 * The lock is a fat lock.
995 */
996 assert(LW_MONITOR(obj->lock) != NULL);
997 lockMonitor(self, LW_MONITOR(obj->lock));
998 }
999 }
1000
1001 /*
1002 * Implements monitorexit for "synchronized" stuff.
1003 *
1004 * On failure, throws an exception and returns "false".
1005 */
dvmUnlockObject(Thread * self,Object * obj)1006 bool dvmUnlockObject(Thread* self, Object *obj)
1007 {
1008 u4 thin;
1009
1010 assert(self != NULL);
1011 assert(self->status == THREAD_RUNNING);
1012 assert(obj != NULL);
1013 /*
1014 * Cache the lock word as its value can change while we are
1015 * examining its state.
1016 */
1017 thin = *(volatile u4 *)&obj->lock;
1018 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1019 /*
1020 * The lock is thin. We must ensure that the lock is owned
1021 * by the given thread before unlocking it.
1022 */
1023 if (LW_LOCK_OWNER(thin) == self->threadId) {
1024 /*
1025 * We are the lock owner. It is safe to update the lock
1026 * without CAS as lock ownership guards the lock itself.
1027 */
1028 if (LW_LOCK_COUNT(thin) == 0) {
1029 /*
1030 * The lock was not recursively acquired, the common
1031 * case. Unlock by clearing all bits except for the
1032 * hash state.
1033 */
1034 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1035 android_atomic_release_store(thin, (int32_t*)&obj->lock);
1036 } else {
1037 /*
1038 * The object was recursively acquired. Decrement the
1039 * lock recursion count field.
1040 */
1041 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1042 }
1043 } else {
1044 /*
1045 * We do not own the lock. The JVM spec requires that we
1046 * throw an exception in this case.
1047 */
1048 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
1049 return false;
1050 }
1051 } else {
1052 /*
1053 * The lock is fat. We must check to see if unlockMonitor has
1054 * raised any exceptions before continuing.
1055 */
1056 assert(LW_MONITOR(obj->lock) != NULL);
1057 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1058 /*
1059 * An exception has been raised. Do not fall through.
1060 */
1061 return false;
1062 }
1063 }
1064 return true;
1065 }
1066
1067 /*
1068 * Object.wait(). Also called for class init.
1069 */
dvmObjectWait(Thread * self,Object * obj,s8 msec,s4 nsec,bool interruptShouldThrow)1070 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1071 bool interruptShouldThrow)
1072 {
1073 Monitor* mon;
1074 u4 thin = *(volatile u4 *)&obj->lock;
1075
1076 /* If the lock is still thin, we need to fatten it.
1077 */
1078 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1079 /* Make sure that 'self' holds the lock.
1080 */
1081 if (LW_LOCK_OWNER(thin) != self->threadId) {
1082 dvmThrowIllegalMonitorStateException(
1083 "object not locked by thread before wait()");
1084 return;
1085 }
1086
1087 /* This thread holds the lock. We need to fatten the lock
1088 * so 'self' can block on it. Don't update the object lock
1089 * field yet, because 'self' needs to acquire the lock before
1090 * any other thread gets a chance.
1091 */
1092 inflateMonitor(self, obj);
1093 ALOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
1094 }
1095 mon = LW_MONITOR(obj->lock);
1096 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1097 }
1098
1099 /*
1100 * Object.notify().
1101 */
dvmObjectNotify(Thread * self,Object * obj)1102 void dvmObjectNotify(Thread* self, Object *obj)
1103 {
1104 u4 thin = *(volatile u4 *)&obj->lock;
1105
1106 /* If the lock is still thin, there aren't any waiters;
1107 * waiting on an object forces lock fattening.
1108 */
1109 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1110 /* Make sure that 'self' holds the lock.
1111 */
1112 if (LW_LOCK_OWNER(thin) != self->threadId) {
1113 dvmThrowIllegalMonitorStateException(
1114 "object not locked by thread before notify()");
1115 return;
1116 }
1117
1118 /* no-op; there are no waiters to notify.
1119 */
1120 } else {
1121 /* It's a fat lock.
1122 */
1123 notifyMonitor(self, LW_MONITOR(thin));
1124 }
1125 }
1126
1127 /*
1128 * Object.notifyAll().
1129 */
dvmObjectNotifyAll(Thread * self,Object * obj)1130 void dvmObjectNotifyAll(Thread* self, Object *obj)
1131 {
1132 u4 thin = *(volatile u4 *)&obj->lock;
1133
1134 /* If the lock is still thin, there aren't any waiters;
1135 * waiting on an object forces lock fattening.
1136 */
1137 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1138 /* Make sure that 'self' holds the lock.
1139 */
1140 if (LW_LOCK_OWNER(thin) != self->threadId) {
1141 dvmThrowIllegalMonitorStateException(
1142 "object not locked by thread before notifyAll()");
1143 return;
1144 }
1145
1146 /* no-op; there are no waiters to notify.
1147 */
1148 } else {
1149 /* It's a fat lock.
1150 */
1151 notifyAllMonitor(self, LW_MONITOR(thin));
1152 }
1153 }
1154
1155 /*
1156 * This implements java.lang.Thread.sleep(long msec, int nsec).
1157 *
1158 * The sleep is interruptible by other threads, which means we can't just
1159 * plop into an OS sleep call. (We probably could if we wanted to send
1160 * signals around and rely on EINTR, but that's inefficient and relies
1161 * on native code respecting our signal mask.)
1162 *
1163 * We have to do all of this stuff for Object.wait() as well, so it's
1164 * easiest to just sleep on a private Monitor.
1165 *
1166 * It appears that we want sleep(0,0) to go through the motions of sleeping
1167 * for a very short duration, rather than just returning.
1168 */
dvmThreadSleep(u8 msec,u4 nsec)1169 void dvmThreadSleep(u8 msec, u4 nsec)
1170 {
1171 Thread* self = dvmThreadSelf();
1172 Monitor* mon = gDvm.threadSleepMon;
1173
1174 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1175 if (msec == 0 && nsec == 0)
1176 nsec++;
1177
1178 lockMonitor(self, mon);
1179 waitMonitor(self, mon, msec, nsec, true);
1180 unlockMonitor(self, mon);
1181 }
1182
1183 /*
1184 * Implement java.lang.Thread.interrupt().
1185 */
dvmThreadInterrupt(Thread * thread)1186 void dvmThreadInterrupt(Thread* thread)
1187 {
1188 assert(thread != NULL);
1189
1190 dvmLockMutex(&thread->waitMutex);
1191
1192 /*
1193 * If the interrupted flag is already set no additional action is
1194 * required.
1195 */
1196 if (thread->interrupted == true) {
1197 dvmUnlockMutex(&thread->waitMutex);
1198 return;
1199 }
1200
1201 /*
1202 * Raise the "interrupted" flag. This will cause it to bail early out
1203 * of the next wait() attempt, if it's not currently waiting on
1204 * something.
1205 */
1206 thread->interrupted = true;
1207
1208 /*
1209 * Is the thread waiting?
1210 *
1211 * Note that fat vs. thin doesn't matter here; waitMonitor
1212 * is only set when a thread actually waits on a monitor,
1213 * which implies that the monitor has already been fattened.
1214 */
1215 if (thread->waitMonitor != NULL) {
1216 pthread_cond_signal(&thread->waitCond);
1217 }
1218
1219 dvmUnlockMutex(&thread->waitMutex);
1220 }
1221
1222 #ifndef WITH_COPYING_GC
dvmIdentityHashCode(Object * obj)1223 u4 dvmIdentityHashCode(Object *obj)
1224 {
1225 return (u4)obj;
1226 }
1227 #else
1228 /*
1229 * Returns the identity hash code of the given object.
1230 */
dvmIdentityHashCode(Object * obj)1231 u4 dvmIdentityHashCode(Object *obj)
1232 {
1233 Thread *self, *thread;
1234 volatile u4 *lw;
1235 size_t size;
1236 u4 lock, owner, hashState;
1237
1238 if (obj == NULL) {
1239 /*
1240 * Null is defined to have an identity hash code of 0.
1241 */
1242 return 0;
1243 }
1244 lw = &obj->lock;
1245 retry:
1246 hashState = LW_HASH_STATE(*lw);
1247 if (hashState == LW_HASH_STATE_HASHED) {
1248 /*
1249 * The object has been hashed but has not had its hash code
1250 * relocated by the garbage collector. Use the raw object
1251 * address.
1252 */
1253 return (u4)obj >> 3;
1254 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1255 /*
1256 * The object has been hashed and its hash code has been
1257 * relocated by the collector. Use the value of the naturally
1258 * aligned word following the instance data.
1259 */
1260 assert(!dvmIsClassObject(obj));
1261 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1262 size = dvmArrayObjectSize((ArrayObject *)obj);
1263 size = (size + 2) & ~2;
1264 } else {
1265 size = obj->clazz->objectSize;
1266 }
1267 return *(u4 *)(((char *)obj) + size);
1268 } else if (hashState == LW_HASH_STATE_UNHASHED) {
1269 /*
1270 * The object has never been hashed. Change the hash state to
1271 * hashed and use the raw object address.
1272 */
1273 self = dvmThreadSelf();
1274 if (self->threadId == lockOwner(obj)) {
1275 /*
1276 * We already own the lock so we can update the hash state
1277 * directly.
1278 */
1279 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1280 return (u4)obj >> 3;
1281 }
1282 /*
1283 * We do not own the lock. Try acquiring the lock. Should
1284 * this fail, we must suspend the owning thread.
1285 */
1286 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1287 /*
1288 * If the lock is thin assume it is unowned. We simulate
1289 * an acquire, update, and release with a single CAS.
1290 */
1291 lock = (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1292 if (android_atomic_acquire_cas(
1293 0,
1294 (int32_t)lock,
1295 (int32_t *)lw) == 0) {
1296 /*
1297 * A new lockword has been installed with a hash state
1298 * of hashed. Use the raw object address.
1299 */
1300 return (u4)obj >> 3;
1301 }
1302 } else {
1303 if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1304 /*
1305 * The monitor lock has been acquired. Change the
1306 * hash state to hashed and use the raw object
1307 * address.
1308 */
1309 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1310 unlockMonitor(self, LW_MONITOR(*lw));
1311 return (u4)obj >> 3;
1312 }
1313 }
1314 /*
1315 * At this point we have failed to acquire the lock. We must
1316 * identify the owning thread and suspend it.
1317 */
1318 dvmLockThreadList(self);
1319 /*
1320 * Cache the lock word as its value can change between
1321 * determining its shape and retrieving its owner.
1322 */
1323 lock = *lw;
1324 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1325 /*
1326 * Find the thread with the corresponding thread id.
1327 */
1328 owner = LW_LOCK_OWNER(lock);
1329 assert(owner != self->threadId);
1330 /*
1331 * If the lock has no owner do not bother scanning the
1332 * thread list and fall through to the failure handler.
1333 */
1334 thread = owner ? gDvm.threadList : NULL;
1335 while (thread != NULL) {
1336 if (thread->threadId == owner) {
1337 break;
1338 }
1339 thread = thread->next;
1340 }
1341 } else {
1342 thread = LW_MONITOR(lock)->owner;
1343 }
1344 /*
1345 * If thread is NULL the object has been released since the
1346 * thread list lock was acquired. Try again.
1347 */
1348 if (thread == NULL) {
1349 dvmUnlockThreadList();
1350 goto retry;
1351 }
1352 /*
1353 * Wait for the owning thread to suspend.
1354 */
1355 dvmSuspendThread(thread);
1356 if (dvmHoldsLock(thread, obj)) {
1357 /*
1358 * The owning thread has been suspended. We can safely
1359 * change the hash state to hashed.
1360 */
1361 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1362 dvmResumeThread(thread);
1363 dvmUnlockThreadList();
1364 return (u4)obj >> 3;
1365 }
1366 /*
1367 * The wrong thread has been suspended. Try again.
1368 */
1369 dvmResumeThread(thread);
1370 dvmUnlockThreadList();
1371 goto retry;
1372 }
1373 ALOGE("object %p has an unknown hash state %#x", obj, hashState);
1374 dvmDumpThread(dvmThreadSelf(), false);
1375 dvmAbort();
1376 return 0; /* Quiet the compiler. */
1377 }
1378 #endif /* WITH_COPYING_GC */
1379