1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Fundamental synchronization mechanisms.
19 *
20 * The top part of the file has operations on "monitor" structs; the
21 * next part has the native calls on objects.
22 *
23 * The current implementation uses "thin locking" to avoid allocating
24 * an Object's full Monitor struct until absolutely necessary (i.e.,
25 * during contention or a call to wait()).
26 *
27 * TODO: make improvements to thin locking
28 * We may be able to improve performance and reduce memory requirements by:
29 * - reverting to a thin lock once the Monitor is no longer necessary
30 * - using a pool of monitor objects, with some sort of recycling scheme
31 *
32 * TODO: recycle native-level monitors when objects are garbage collected.
33 */
34 #include "Dalvik.h"
35
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <pthread.h>
40 #include <time.h>
41 #include <sys/time.h>
42 #include <errno.h>
43
44 #define LOG_THIN LOGV
45
46 #ifdef WITH_DEADLOCK_PREDICTION /* fwd */
47 static const char* kStartBanner =
48 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
49 static const char* kEndBanner =
50 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
51
52 /*
53 * Unsorted, expanding list of objects.
54 *
55 * This is very similar to PointerSet (which came into existence after this),
56 * but these are unsorted, uniqueness is not enforced by the "add" function,
57 * and the base object isn't allocated on the heap.
58 */
59 typedef struct ExpandingObjectList {
60 u2 alloc;
61 u2 count;
62 Object** list;
63 } ExpandingObjectList;
64
65 /* fwd */
66 static void updateDeadlockPrediction(Thread* self, Object* obj);
67 static void removeCollectedObject(Object* obj);
68 static void expandObjClear(ExpandingObjectList* pList);
69 #endif
70
71 /*
72 * Every Object has a monitor associated with it, but not every Object is
73 * actually locked. Even the ones that are locked do not need a
74 * full-fledged monitor until a) there is actual contention or b) wait()
75 * is called on the Object.
76 *
77 * For Dalvik, we have implemented a scheme similar to the one described
78 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
79 * (ACM 1998). Things are even easier for us, though, because we have
80 * a full 32 bits to work with.
81 *
82 * The two states of an Object's lock are referred to as "thin" and
83 * "fat". A lock may transition from the "thin" state to the "fat"
84 * state and this transition is referred to as inflation. Once a lock
85 * has been inflated it remains in the "fat" state indefinitely.
86 *
87 * The lock value itself is stored in Object.lock. The LSB of the
88 * lock encodes its state. When cleared, the lock is in the "thin"
89 * state and its bits are formatted as follows:
90 *
91 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
92 * lock count thread id hash state 0
93 *
94 * When set, the lock is in the "fat" state and its bits are formatted
95 * as follows:
96 *
97 * [31 ---- 3] [2 ---- 1] [0]
98 * pointer hash state 1
99 *
100 * For an in-depth description of the mechanics of thin-vs-fat locking,
101 * read the paper referred to above.
102 */
103
104 /*
105 * Monitors provide:
106 * - mutually exclusive access to resources
107 * - a way for multiple threads to wait for notification
108 *
109 * In effect, they fill the role of both mutexes and condition variables.
110 *
111 * Only one thread can own the monitor at any time. There may be several
112 * threads waiting on it (the wait call unlocks it). One or more waiting
113 * threads may be getting interrupted or notified at any given time.
114 *
115 * TODO: the various members of monitor are not SMP-safe.
116 */
117 struct Monitor {
118 Thread* owner; /* which thread currently owns the lock? */
119 int lockCount; /* owner's recursive lock depth */
120 Object* obj; /* what object are we part of [debug only] */
121
122 Thread* waitSet; /* threads currently waiting on this monitor */
123
124 pthread_mutex_t lock;
125
126 Monitor* next;
127
128 /*
129 * Who last acquired this monitor, when lock sampling is enabled.
130 * Even when enabled, ownerFileName may be NULL.
131 */
132 char* ownerFileName;
133 u4 ownerLineNumber;
134
135 #ifdef WITH_DEADLOCK_PREDICTION
136 /*
137 * Objects that have been locked immediately after this one in the
138 * past. We use an expanding flat array, allocated on first use, to
139 * minimize allocations. Deletions from the list, expected to be
140 * infrequent, are crunched down.
141 */
142 ExpandingObjectList historyChildren;
143
144 /*
145 * We also track parents. This isn't strictly necessary, but it makes
146 * the cleanup at GC time significantly faster.
147 */
148 ExpandingObjectList historyParents;
149
150 /* used during cycle detection */
151 bool historyMark;
152
153 /* stack trace, established the first time we locked the object */
154 int historyStackDepth;
155 int* historyRawStackTrace;
156 #endif
157 };
158
159
160 /*
161 * Create and initialize a monitor.
162 */
dvmCreateMonitor(Object * obj)163 Monitor* dvmCreateMonitor(Object* obj)
164 {
165 Monitor* mon;
166
167 mon = (Monitor*) calloc(1, sizeof(Monitor));
168 if (mon == NULL) {
169 LOGE("Unable to allocate monitor\n");
170 dvmAbort();
171 }
172 if (((u4)mon & 7) != 0) {
173 LOGE("Misaligned monitor: %p\n", mon);
174 dvmAbort();
175 }
176 mon->obj = obj;
177 dvmInitMutex(&mon->lock);
178
179 /* replace the head of the list with the new monitor */
180 do {
181 mon->next = gDvm.monitorList;
182 } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon,
183 (int32_t*)(void*)&gDvm.monitorList) != 0);
184
185 return mon;
186 }
187
188 /*
189 * Free the monitor list. Only used when shutting the VM down.
190 */
dvmFreeMonitorList(void)191 void dvmFreeMonitorList(void)
192 {
193 Monitor* mon;
194 Monitor* nextMon;
195
196 mon = gDvm.monitorList;
197 while (mon != NULL) {
198 nextMon = mon->next;
199
200 #ifdef WITH_DEADLOCK_PREDICTION
201 expandObjClear(&mon->historyChildren);
202 expandObjClear(&mon->historyParents);
203 free(mon->historyRawStackTrace);
204 #endif
205 free(mon);
206 mon = nextMon;
207 }
208 }
209
210 /*
211 * Log some info about our monitors.
212 */
dvmDumpMonitorInfo(const char * msg)213 void dvmDumpMonitorInfo(const char* msg)
214 {
215 #if QUIET_ZYGOTE_MONITOR
216 if (gDvm.zygote) {
217 return;
218 }
219 #endif
220
221 int totalCount;
222 int liveCount;
223
224 totalCount = liveCount = 0;
225 Monitor* mon = gDvm.monitorList;
226 while (mon != NULL) {
227 totalCount++;
228 if (mon->obj != NULL)
229 liveCount++;
230 mon = mon->next;
231 }
232
233 LOGD("%s: monitor list has %d entries (%d live)\n",
234 msg, totalCount, liveCount);
235 }
236
237 /*
238 * Get the object that a monitor is part of.
239 */
dvmGetMonitorObject(Monitor * mon)240 Object* dvmGetMonitorObject(Monitor* mon)
241 {
242 if (mon == NULL)
243 return NULL;
244 else
245 return mon->obj;
246 }
247
248 /*
249 * Returns the thread id of the thread owning the given lock.
250 */
lockOwner(Object * obj)251 static u4 lockOwner(Object* obj)
252 {
253 Thread *owner;
254 u4 lock;
255
256 assert(obj != NULL);
257 /*
258 * Since we're reading the lock value multiple times, latch it so
259 * that it doesn't change out from under us if we get preempted.
260 */
261 lock = obj->lock;
262 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
263 return LW_LOCK_OWNER(lock);
264 } else {
265 owner = LW_MONITOR(lock)->owner;
266 return owner ? owner->threadId : 0;
267 }
268 }
269
270 /*
271 * Get the thread that holds the lock on the specified object. The
272 * object may be unlocked, thin-locked, or fat-locked.
273 *
274 * The caller must lock the thread list before calling here.
275 */
dvmGetObjectLockHolder(Object * obj)276 Thread* dvmGetObjectLockHolder(Object* obj)
277 {
278 u4 threadId = lockOwner(obj);
279
280 if (threadId == 0)
281 return NULL;
282 return dvmGetThreadByThreadId(threadId);
283 }
284
285 /*
286 * Checks whether the given thread holds the given
287 * objects's lock.
288 */
dvmHoldsLock(Thread * thread,Object * obj)289 bool dvmHoldsLock(Thread* thread, Object* obj)
290 {
291 if (thread == NULL || obj == NULL) {
292 return false;
293 } else {
294 return thread->threadId == lockOwner(obj);
295 }
296 }
297
298 /*
299 * Free the monitor associated with an object and make the object's lock
300 * thin again. This is called during garbage collection.
301 */
freeObjectMonitor(Object * obj)302 static void freeObjectMonitor(Object* obj)
303 {
304 Monitor *mon;
305
306 assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
307
308 #ifdef WITH_DEADLOCK_PREDICTION
309 if (gDvm.deadlockPredictMode != kDPOff)
310 removeCollectedObject(obj);
311 #endif
312
313 mon = LW_MONITOR(obj->lock);
314 obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
315
316 /* This lock is associated with an object
317 * that's being swept. The only possible way
318 * anyone could be holding this lock would be
319 * if some JNI code locked but didn't unlock
320 * the object, in which case we've got some bad
321 * native code somewhere.
322 */
323 assert(pthread_mutex_trylock(&mon->lock) == 0);
324 assert(pthread_mutex_unlock(&mon->lock) == 0);
325 dvmDestroyMutex(&mon->lock);
326 #ifdef WITH_DEADLOCK_PREDICTION
327 expandObjClear(&mon->historyChildren);
328 expandObjClear(&mon->historyParents);
329 free(mon->historyRawStackTrace);
330 #endif
331 free(mon);
332 }
333
334 /*
335 * Frees monitor objects belonging to unmarked objects.
336 */
dvmSweepMonitorList(Monitor ** mon,int (* isUnmarkedObject)(void *))337 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
338 {
339 Monitor handle;
340 Monitor *prev, *curr;
341 Object *obj;
342
343 assert(mon != NULL);
344 assert(isUnmarkedObject != NULL);
345 #ifdef WITH_DEADLOCK_PREDICTION
346 dvmDumpMonitorInfo("before monitor sweep");
347 #endif
348 prev = &handle;
349 prev->next = curr = *mon;
350 while (curr != NULL) {
351 obj = curr->obj;
352 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
353 prev->next = curr = curr->next;
354 freeObjectMonitor(obj);
355 } else {
356 prev = curr;
357 curr = curr->next;
358 }
359 }
360 *mon = handle.next;
361 #ifdef WITH_DEADLOCK_PREDICTION
362 dvmDumpMonitorInfo("after monitor sweep");
363 #endif
364 }
365
logWriteInt(char * dst,int value)366 static char *logWriteInt(char *dst, int value)
367 {
368 *dst++ = EVENT_TYPE_INT;
369 set4LE((u1 *)dst, value);
370 return dst + 4;
371 }
372
logWriteString(char * dst,const char * value,size_t len)373 static char *logWriteString(char *dst, const char *value, size_t len)
374 {
375 *dst++ = EVENT_TYPE_STRING;
376 len = len < 32 ? len : 32;
377 set4LE((u1 *)dst, len);
378 dst += 4;
379 memcpy(dst, value, len);
380 return dst + len;
381 }
382
383 #define EVENT_LOG_TAG_dvm_lock_sample 20003
384
logContentionEvent(Thread * self,u4 waitMs,u4 samplePercent,const char * ownerFileName,u4 ownerLineNumber)385 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent,
386 const char *ownerFileName, u4 ownerLineNumber)
387 {
388 const StackSaveArea *saveArea;
389 const Method *meth;
390 u4 relativePc;
391 char eventBuffer[174];
392 const char *fileName;
393 char procName[33], *selfName;
394 char *cp;
395 size_t len;
396 int fd;
397
398 saveArea = SAVEAREA_FROM_FP(self->curFrame);
399 meth = saveArea->method;
400 cp = eventBuffer;
401
402 /* Emit the event list length, 1 byte. */
403 *cp++ = 9;
404
405 /* Emit the process name, <= 37 bytes. */
406 fd = open("/proc/self/cmdline", O_RDONLY);
407 memset(procName, 0, sizeof(procName));
408 read(fd, procName, sizeof(procName) - 1);
409 close(fd);
410 len = strlen(procName);
411 cp = logWriteString(cp, procName, len);
412
413 /* Emit the main thread status, 5 bytes. */
414 bool isMainThread = (self->systemTid == getpid());
415 cp = logWriteInt(cp, isMainThread);
416
417 /* Emit self thread name string, <= 37 bytes. */
418 selfName = dvmGetThreadName(self);
419 cp = logWriteString(cp, selfName, strlen(selfName));
420 free(selfName);
421
422 /* Emit the wait time, 5 bytes. */
423 cp = logWriteInt(cp, waitMs);
424
425 /* Emit the source code file name, <= 37 bytes. */
426 fileName = dvmGetMethodSourceFile(meth);
427 if (fileName == NULL) fileName = "";
428 cp = logWriteString(cp, fileName, strlen(fileName));
429
430 /* Emit the source code line number, 5 bytes. */
431 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
432 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
433
434 /* Emit the lock owner source code file name, <= 37 bytes. */
435 if (ownerFileName == NULL) {
436 ownerFileName = "";
437 } else if (strcmp(fileName, ownerFileName) == 0) {
438 /* Common case, so save on log space. */
439 ownerFileName = "-";
440 }
441 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
442
443 /* Emit the source code line number, 5 bytes. */
444 cp = logWriteInt(cp, ownerLineNumber);
445
446 /* Emit the sample percentage, 5 bytes. */
447 cp = logWriteInt(cp, samplePercent);
448
449 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
450 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
451 EVENT_TYPE_LIST,
452 eventBuffer,
453 (size_t)(cp - eventBuffer));
454 }
455
456 /*
457 * Lock a monitor.
458 */
lockMonitor(Thread * self,Monitor * mon)459 static void lockMonitor(Thread* self, Monitor* mon)
460 {
461 ThreadStatus oldStatus;
462 u4 waitThreshold, samplePercent;
463 u8 waitStart, waitEnd, waitMs;
464
465 if (mon->owner == self) {
466 mon->lockCount++;
467 return;
468 }
469 if (dvmTryLockMutex(&mon->lock) != 0) {
470 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
471 waitThreshold = gDvm.lockProfThreshold;
472 if (waitThreshold) {
473 waitStart = dvmGetRelativeTimeUsec();
474 }
475 const char* currentOwnerFileName = mon->ownerFileName;
476 u4 currentOwnerLineNumber = mon->ownerLineNumber;
477
478 dvmLockMutex(&mon->lock);
479 if (waitThreshold) {
480 waitEnd = dvmGetRelativeTimeUsec();
481 }
482 dvmChangeStatus(self, oldStatus);
483 if (waitThreshold) {
484 waitMs = (waitEnd - waitStart) / 1000;
485 if (waitMs >= waitThreshold) {
486 samplePercent = 100;
487 } else {
488 samplePercent = 100 * waitMs / waitThreshold;
489 }
490 if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
491 logContentionEvent(self, waitMs, samplePercent,
492 currentOwnerFileName, currentOwnerLineNumber);
493 }
494 }
495 }
496 mon->owner = self;
497 assert(mon->lockCount == 0);
498
499 // When debugging, save the current monitor holder for future
500 // acquisition failures to use in sampled logging.
501 if (gDvm.lockProfThreshold > 0) {
502 const StackSaveArea *saveArea;
503 const Method *meth;
504 mon->ownerLineNumber = 0;
505 if (self->curFrame == NULL) {
506 mon->ownerFileName = "no_frame";
507 } else if ((saveArea = SAVEAREA_FROM_FP(self->curFrame)) == NULL) {
508 mon->ownerFileName = "no_save_area";
509 } else if ((meth = saveArea->method) == NULL) {
510 mon->ownerFileName = "no_method";
511 } else {
512 u4 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
513 mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
514 if (mon->ownerFileName == NULL) {
515 mon->ownerFileName = "no_method_file";
516 } else {
517 mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
518 }
519 }
520 }
521 }
522
523 /*
524 * Try to lock a monitor.
525 *
526 * Returns "true" on success.
527 */
528 #ifdef WITH_COPYING_GC
tryLockMonitor(Thread * self,Monitor * mon)529 static bool tryLockMonitor(Thread* self, Monitor* mon)
530 {
531 if (mon->owner == self) {
532 mon->lockCount++;
533 return true;
534 } else {
535 if (dvmTryLockMutex(&mon->lock) == 0) {
536 mon->owner = self;
537 assert(mon->lockCount == 0);
538 return true;
539 } else {
540 return false;
541 }
542 }
543 }
544 #endif
545
546 /*
547 * Unlock a monitor.
548 *
549 * Returns true if the unlock succeeded.
550 * If the unlock failed, an exception will be pending.
551 */
unlockMonitor(Thread * self,Monitor * mon)552 static bool unlockMonitor(Thread* self, Monitor* mon)
553 {
554 assert(self != NULL);
555 assert(mon != NULL);
556 if (mon->owner == self) {
557 /*
558 * We own the monitor, so nobody else can be in here.
559 */
560 if (mon->lockCount == 0) {
561 mon->owner = NULL;
562 mon->ownerFileName = "unlocked";
563 mon->ownerLineNumber = 0;
564 dvmUnlockMutex(&mon->lock);
565 } else {
566 mon->lockCount--;
567 }
568 } else {
569 /*
570 * We don't own this, so we're not allowed to unlock it.
571 * The JNI spec says that we should throw IllegalMonitorStateException
572 * in this case.
573 */
574 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
575 "unlock of unowned monitor");
576 return false;
577 }
578 return true;
579 }
580
581 /*
582 * Checks the wait set for circular structure. Returns 0 if the list
583 * is not circular. Otherwise, returns 1. Used only by asserts.
584 */
585 #ifndef NDEBUG
waitSetCheck(Monitor * mon)586 static int waitSetCheck(Monitor *mon)
587 {
588 Thread *fast, *slow;
589 size_t n;
590
591 assert(mon != NULL);
592 fast = slow = mon->waitSet;
593 n = 0;
594 for (;;) {
595 if (fast == NULL) return 0;
596 if (fast->waitNext == NULL) return 0;
597 if (fast == slow && n > 0) return 1;
598 n += 2;
599 fast = fast->waitNext->waitNext;
600 slow = slow->waitNext;
601 }
602 }
603 #endif
604
605 /*
606 * Links a thread into a monitor's wait set. The monitor lock must be
607 * held by the caller of this routine.
608 */
waitSetAppend(Monitor * mon,Thread * thread)609 static void waitSetAppend(Monitor *mon, Thread *thread)
610 {
611 Thread *elt;
612
613 assert(mon != NULL);
614 assert(mon->owner == dvmThreadSelf());
615 assert(thread != NULL);
616 assert(thread->waitNext == NULL);
617 assert(waitSetCheck(mon) == 0);
618 if (mon->waitSet == NULL) {
619 mon->waitSet = thread;
620 return;
621 }
622 elt = mon->waitSet;
623 while (elt->waitNext != NULL) {
624 elt = elt->waitNext;
625 }
626 elt->waitNext = thread;
627 }
628
629 /*
630 * Unlinks a thread from a monitor's wait set. The monitor lock must
631 * be held by the caller of this routine.
632 */
waitSetRemove(Monitor * mon,Thread * thread)633 static void waitSetRemove(Monitor *mon, Thread *thread)
634 {
635 Thread *elt;
636
637 assert(mon != NULL);
638 assert(mon->owner == dvmThreadSelf());
639 assert(thread != NULL);
640 assert(waitSetCheck(mon) == 0);
641 if (mon->waitSet == NULL) {
642 return;
643 }
644 if (mon->waitSet == thread) {
645 mon->waitSet = thread->waitNext;
646 thread->waitNext = NULL;
647 return;
648 }
649 elt = mon->waitSet;
650 while (elt->waitNext != NULL) {
651 if (elt->waitNext == thread) {
652 elt->waitNext = thread->waitNext;
653 thread->waitNext = NULL;
654 return;
655 }
656 elt = elt->waitNext;
657 }
658 }
659
660 /*
661 * Converts the given relative waiting time into an absolute time.
662 */
absoluteTime(s8 msec,s4 nsec,struct timespec * ts)663 void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
664 {
665 s8 endSec;
666
667 #ifdef HAVE_TIMEDWAIT_MONOTONIC
668 clock_gettime(CLOCK_MONOTONIC, ts);
669 #else
670 {
671 struct timeval tv;
672 gettimeofday(&tv, NULL);
673 ts->tv_sec = tv.tv_sec;
674 ts->tv_nsec = tv.tv_usec * 1000;
675 }
676 #endif
677 endSec = ts->tv_sec + msec / 1000;
678 if (endSec >= 0x7fffffff) {
679 LOGV("NOTE: end time exceeds epoch\n");
680 endSec = 0x7ffffffe;
681 }
682 ts->tv_sec = endSec;
683 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
684
685 /* catch rollover */
686 if (ts->tv_nsec >= 1000000000L) {
687 ts->tv_sec++;
688 ts->tv_nsec -= 1000000000L;
689 }
690 }
691
dvmRelativeCondWait(pthread_cond_t * cond,pthread_mutex_t * mutex,s8 msec,s4 nsec)692 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
693 s8 msec, s4 nsec)
694 {
695 int ret;
696 struct timespec ts;
697 absoluteTime(msec, nsec, &ts);
698 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
699 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
700 #else
701 ret = pthread_cond_timedwait(cond, mutex, &ts);
702 #endif
703 assert(ret == 0 || ret == ETIMEDOUT);
704 return ret;
705 }
706
707 /*
708 * Wait on a monitor until timeout, interrupt, or notification. Used for
709 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
710 *
711 * If another thread calls Thread.interrupt(), we throw InterruptedException
712 * and return immediately if one of the following are true:
713 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
714 * - blocked in join(), join(long), or join(long, int) methods of Thread
715 * - blocked in sleep(long), or sleep(long, int) methods of Thread
716 * Otherwise, we set the "interrupted" flag.
717 *
718 * Checks to make sure that "nsec" is in the range 0-999999
719 * (i.e. fractions of a millisecond) and throws the appropriate
720 * exception if it isn't.
721 *
722 * The spec allows "spurious wakeups", and recommends that all code using
723 * Object.wait() do so in a loop. This appears to derive from concerns
724 * about pthread_cond_wait() on multiprocessor systems. Some commentary
725 * on the web casts doubt on whether these can/should occur.
726 *
727 * Since we're allowed to wake up "early", we clamp extremely long durations
728 * to return at the end of the 32-bit time epoch.
729 */
waitMonitor(Thread * self,Monitor * mon,s8 msec,s4 nsec,bool interruptShouldThrow)730 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
731 bool interruptShouldThrow)
732 {
733 struct timespec ts;
734 bool wasInterrupted = false;
735 bool timed;
736 int ret;
737 char *savedFileName;
738 u4 savedLineNumber;
739
740 assert(self != NULL);
741 assert(mon != NULL);
742
743 /* Make sure that we hold the lock. */
744 if (mon->owner != self) {
745 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
746 "object not locked by thread before wait()");
747 return;
748 }
749
750 /*
751 * Enforce the timeout range.
752 */
753 if (msec < 0 || nsec < 0 || nsec > 999999) {
754 dvmThrowException("Ljava/lang/IllegalArgumentException;",
755 "timeout arguments out of range");
756 return;
757 }
758
759 /*
760 * Compute absolute wakeup time, if necessary.
761 */
762 if (msec == 0 && nsec == 0) {
763 timed = false;
764 } else {
765 absoluteTime(msec, nsec, &ts);
766 timed = true;
767 }
768
769 /*
770 * Add ourselves to the set of threads waiting on this monitor, and
771 * release our hold. We need to let it go even if we're a few levels
772 * deep in a recursive lock, and we need to restore that later.
773 *
774 * We append to the wait set ahead of clearing the count and owner
775 * fields so the subroutine can check that the calling thread owns
776 * the monitor. Aside from that, the order of member updates is
777 * not order sensitive as we hold the pthread mutex.
778 */
779 waitSetAppend(mon, self);
780 int prevLockCount = mon->lockCount;
781 mon->lockCount = 0;
782 mon->owner = NULL;
783 savedFileName = mon->ownerFileName;
784 mon->ownerFileName = NULL;
785 savedLineNumber = mon->ownerLineNumber;
786 mon->ownerLineNumber = 0;
787
788 /*
789 * Update thread status. If the GC wakes up, it'll ignore us, knowing
790 * that we won't touch any references in this state, and we'll check
791 * our suspend mode before we transition out.
792 */
793 if (timed)
794 dvmChangeStatus(self, THREAD_TIMED_WAIT);
795 else
796 dvmChangeStatus(self, THREAD_WAIT);
797
798 dvmLockMutex(&self->waitMutex);
799
800 /*
801 * Set waitMonitor to the monitor object we will be waiting on.
802 * When waitMonitor is non-NULL a notifying or interrupting thread
803 * must signal the thread's waitCond to wake it up.
804 */
805 assert(self->waitMonitor == NULL);
806 self->waitMonitor = mon;
807
808 /*
809 * Handle the case where the thread was interrupted before we called
810 * wait().
811 */
812 if (self->interrupted) {
813 wasInterrupted = true;
814 self->waitMonitor = NULL;
815 dvmUnlockMutex(&self->waitMutex);
816 goto done;
817 }
818
819 /*
820 * Release the monitor lock and wait for a notification or
821 * a timeout to occur.
822 */
823 dvmUnlockMutex(&mon->lock);
824
825 if (!timed) {
826 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
827 assert(ret == 0);
828 } else {
829 #ifdef HAVE_TIMEDWAIT_MONOTONIC
830 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
831 #else
832 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
833 #endif
834 assert(ret == 0 || ret == ETIMEDOUT);
835 }
836 if (self->interrupted) {
837 wasInterrupted = true;
838 }
839
840 self->interrupted = false;
841 self->waitMonitor = NULL;
842
843 dvmUnlockMutex(&self->waitMutex);
844
845 /* Reacquire the monitor lock. */
846 lockMonitor(self, mon);
847
848 done:
849 /*
850 * We remove our thread from wait set after restoring the count
851 * and owner fields so the subroutine can check that the calling
852 * thread owns the monitor. Aside from that, the order of member
853 * updates is not order sensitive as we hold the pthread mutex.
854 */
855 mon->owner = self;
856 mon->lockCount = prevLockCount;
857 mon->ownerFileName = savedFileName;
858 mon->ownerLineNumber = savedLineNumber;
859 waitSetRemove(mon, self);
860
861 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
862 dvmChangeStatus(self, THREAD_RUNNING);
863
864 if (wasInterrupted) {
865 /*
866 * We were interrupted while waiting, or somebody interrupted an
867 * un-interruptible thread earlier and we're bailing out immediately.
868 *
869 * The doc sayeth: "The interrupted status of the current thread is
870 * cleared when this exception is thrown."
871 */
872 self->interrupted = false;
873 if (interruptShouldThrow)
874 dvmThrowException("Ljava/lang/InterruptedException;", NULL);
875 }
876 }
877
878 /*
879 * Notify one thread waiting on this monitor.
880 */
notifyMonitor(Thread * self,Monitor * mon)881 static void notifyMonitor(Thread* self, Monitor* mon)
882 {
883 Thread* thread;
884
885 assert(self != NULL);
886 assert(mon != NULL);
887
888 /* Make sure that we hold the lock. */
889 if (mon->owner != self) {
890 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
891 "object not locked by thread before notify()");
892 return;
893 }
894 /* Signal the first waiting thread in the wait set. */
895 while (mon->waitSet != NULL) {
896 thread = mon->waitSet;
897 mon->waitSet = thread->waitNext;
898 thread->waitNext = NULL;
899 dvmLockMutex(&thread->waitMutex);
900 /* Check to see if the thread is still waiting. */
901 if (thread->waitMonitor != NULL) {
902 pthread_cond_signal(&thread->waitCond);
903 dvmUnlockMutex(&thread->waitMutex);
904 return;
905 }
906 dvmUnlockMutex(&thread->waitMutex);
907 }
908 }
909
910 /*
911 * Notify all threads waiting on this monitor.
912 */
notifyAllMonitor(Thread * self,Monitor * mon)913 static void notifyAllMonitor(Thread* self, Monitor* mon)
914 {
915 Thread* thread;
916
917 assert(self != NULL);
918 assert(mon != NULL);
919
920 /* Make sure that we hold the lock. */
921 if (mon->owner != self) {
922 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
923 "object not locked by thread before notifyAll()");
924 return;
925 }
926 /* Signal all threads in the wait set. */
927 while (mon->waitSet != NULL) {
928 thread = mon->waitSet;
929 mon->waitSet = thread->waitNext;
930 thread->waitNext = NULL;
931 dvmLockMutex(&thread->waitMutex);
932 /* Check to see if the thread is still waiting. */
933 if (thread->waitMonitor != NULL) {
934 pthread_cond_signal(&thread->waitCond);
935 }
936 dvmUnlockMutex(&thread->waitMutex);
937 }
938 }
939
940 /*
941 * Changes the shape of a monitor from thin to fat, preserving the
942 * internal lock state. The calling thread must own the lock.
943 */
inflateMonitor(Thread * self,Object * obj)944 static void inflateMonitor(Thread *self, Object *obj)
945 {
946 Monitor *mon;
947 u4 thin;
948
949 assert(self != NULL);
950 assert(obj != NULL);
951 assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
952 assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
953 /* Allocate and acquire a new monitor. */
954 mon = dvmCreateMonitor(obj);
955 lockMonitor(self, mon);
956 /* Propagate the lock state. */
957 thin = obj->lock;
958 mon->lockCount = LW_LOCK_COUNT(thin);
959 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
960 thin |= (u4)mon | LW_SHAPE_FAT;
961 /* Publish the updated lock word. */
962 android_atomic_release_store(thin, (int32_t *)&obj->lock);
963 }
964
965 /*
966 * Implements monitorenter for "synchronized" stuff.
967 *
968 * This does not fail or throw an exception (unless deadlock prediction
969 * is enabled and set to "err" mode).
970 */
dvmLockObject(Thread * self,Object * obj)971 void dvmLockObject(Thread* self, Object *obj)
972 {
973 volatile u4 *thinp;
974 ThreadStatus oldStatus;
975 useconds_t sleepDelay;
976 const useconds_t maxSleepDelay = 1 << 20;
977 u4 thin, newThin, threadId;
978
979 assert(self != NULL);
980 assert(obj != NULL);
981 threadId = self->threadId;
982 thinp = &obj->lock;
983 retry:
984 thin = *thinp;
985 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
986 /*
987 * The lock is a thin lock. The owner field is used to
988 * determine the acquire method, ordered by cost.
989 */
990 if (LW_LOCK_OWNER(thin) == threadId) {
991 /*
992 * The calling thread owns the lock. Increment the
993 * value of the recursion count field.
994 */
995 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
996 if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
997 /*
998 * The reacquisition limit has been reached. Inflate
999 * the lock so the next acquire will not overflow the
1000 * recursion count field.
1001 */
1002 inflateMonitor(self, obj);
1003 }
1004 } else if (LW_LOCK_OWNER(thin) == 0) {
1005 /*
1006 * The lock is unowned. Install the thread id of the
1007 * calling thread into the owner field. This is the
1008 * common case. In performance critical code the JIT
1009 * will have tried this before calling out to the VM.
1010 */
1011 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
1012 if (android_atomic_acquire_cas(thin, newThin,
1013 (int32_t*)thinp) != 0) {
1014 /*
1015 * The acquire failed. Try again.
1016 */
1017 goto retry;
1018 }
1019 } else {
1020 LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
1021 threadId, &obj->lock, 0, *thinp, thin);
1022 /*
1023 * The lock is owned by another thread. Notify the VM
1024 * that we are about to wait.
1025 */
1026 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
1027 /*
1028 * Spin until the thin lock is released or inflated.
1029 */
1030 sleepDelay = 0;
1031 for (;;) {
1032 thin = *thinp;
1033 /*
1034 * Check the shape of the lock word. Another thread
1035 * may have inflated the lock while we were waiting.
1036 */
1037 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1038 if (LW_LOCK_OWNER(thin) == 0) {
1039 /*
1040 * The lock has been released. Install the
1041 * thread id of the calling thread into the
1042 * owner field.
1043 */
1044 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
1045 if (android_atomic_acquire_cas(thin, newThin,
1046 (int32_t *)thinp) == 0) {
1047 /*
1048 * The acquire succeed. Break out of the
1049 * loop and proceed to inflate the lock.
1050 */
1051 break;
1052 }
1053 } else {
1054 /*
1055 * The lock has not been released. Yield so
1056 * the owning thread can run.
1057 */
1058 if (sleepDelay == 0) {
1059 sched_yield();
1060 sleepDelay = 1000;
1061 } else {
1062 usleep(sleepDelay);
1063 if (sleepDelay < maxSleepDelay / 2) {
1064 sleepDelay *= 2;
1065 }
1066 }
1067 }
1068 } else {
1069 /*
1070 * The thin lock was inflated by another thread.
1071 * Let the VM know we are no longer waiting and
1072 * try again.
1073 */
1074 LOG_THIN("(%d) lock %p surprise-fattened",
1075 threadId, &obj->lock);
1076 dvmChangeStatus(self, oldStatus);
1077 goto retry;
1078 }
1079 }
1080 LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
1081 threadId, &obj->lock, 0, *thinp, thin);
1082 /*
1083 * We have acquired the thin lock. Let the VM know that
1084 * we are no longer waiting.
1085 */
1086 dvmChangeStatus(self, oldStatus);
1087 /*
1088 * Fatten the lock.
1089 */
1090 inflateMonitor(self, obj);
1091 LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
1092 }
1093 } else {
1094 /*
1095 * The lock is a fat lock.
1096 */
1097 assert(LW_MONITOR(obj->lock) != NULL);
1098 lockMonitor(self, LW_MONITOR(obj->lock));
1099 }
1100 #ifdef WITH_DEADLOCK_PREDICTION
1101 /*
1102 * See if we were allowed to grab the lock at this time. We do it
1103 * *after* acquiring the lock, rather than before, so that we can
1104 * freely update the Monitor struct. This seems counter-intuitive,
1105 * but our goal is deadlock *prediction* not deadlock *prevention*.
1106 * (If we actually deadlock, the situation is easy to diagnose from
1107 * a thread dump, so there's no point making a special effort to do
1108 * the checks before the lock is held.)
1109 *
1110 * This needs to happen before we add the object to the thread's
1111 * monitor list, so we can tell the difference between first-lock and
1112 * re-lock.
1113 *
1114 * It's also important that we do this while in THREAD_RUNNING, so
1115 * that we don't interfere with cleanup operations in the GC.
1116 */
1117 if (gDvm.deadlockPredictMode != kDPOff) {
1118 if (self->status != THREAD_RUNNING) {
1119 LOGE("Bad thread status (%d) in DP\n", self->status);
1120 dvmDumpThread(self, false);
1121 dvmAbort();
1122 }
1123 assert(!dvmCheckException(self));
1124 updateDeadlockPrediction(self, obj);
1125 if (dvmCheckException(self)) {
1126 /*
1127 * If we're throwing an exception here, we need to free the
1128 * lock. We add the object to the thread's monitor list so the
1129 * "unlock" code can remove it.
1130 */
1131 dvmAddToMonitorList(self, obj, false);
1132 dvmUnlockObject(self, obj);
1133 LOGV("--- unlocked, pending is '%s'\n",
1134 dvmGetException(self)->clazz->descriptor);
1135 }
1136 }
1137
1138 /*
1139 * Add the locked object, and the current stack trace, to the list
1140 * held by the Thread object. If deadlock prediction isn't on,
1141 * don't capture the stack trace.
1142 */
1143 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
1144 #elif defined(WITH_MONITOR_TRACKING)
1145 /*
1146 * Add the locked object to the list held by the Thread object.
1147 */
1148 dvmAddToMonitorList(self, obj, false);
1149 #endif
1150 }
1151
1152 /*
1153 * Implements monitorexit for "synchronized" stuff.
1154 *
1155 * On failure, throws an exception and returns "false".
1156 */
dvmUnlockObject(Thread * self,Object * obj)1157 bool dvmUnlockObject(Thread* self, Object *obj)
1158 {
1159 u4 thin;
1160
1161 assert(self != NULL);
1162 assert(self->status == THREAD_RUNNING);
1163 assert(obj != NULL);
1164 /*
1165 * Cache the lock word as its value can change while we are
1166 * examining its state.
1167 */
1168 thin = obj->lock;
1169 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1170 /*
1171 * The lock is thin. We must ensure that the lock is owned
1172 * by the given thread before unlocking it.
1173 */
1174 if (LW_LOCK_OWNER(thin) == self->threadId) {
1175 /*
1176 * We are the lock owner. It is safe to update the lock
1177 * without CAS as lock ownership guards the lock itself.
1178 */
1179 if (LW_LOCK_COUNT(thin) == 0) {
1180 /*
1181 * The lock was not recursively acquired, the common
1182 * case. Unlock by clearing all bits except for the
1183 * hash state.
1184 */
1185 obj->lock &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1186 } else {
1187 /*
1188 * The object was recursively acquired. Decrement the
1189 * lock recursion count field.
1190 */
1191 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1192 }
1193 } else {
1194 /*
1195 * We do not own the lock. The JVM spec requires that we
1196 * throw an exception in this case.
1197 */
1198 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1199 "unlock of unowned monitor");
1200 return false;
1201 }
1202 } else {
1203 /*
1204 * The lock is fat. We must check to see if unlockMonitor has
1205 * raised any exceptions before continuing.
1206 */
1207 assert(LW_MONITOR(obj->lock) != NULL);
1208 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1209 /*
1210 * An exception has been raised. Do not fall through.
1211 */
1212 return false;
1213 }
1214 }
1215
1216 #ifdef WITH_MONITOR_TRACKING
1217 /*
1218 * Remove the object from the Thread's list.
1219 */
1220 dvmRemoveFromMonitorList(self, obj);
1221 #endif
1222
1223 return true;
1224 }
1225
1226 /*
1227 * Object.wait(). Also called for class init.
1228 */
dvmObjectWait(Thread * self,Object * obj,s8 msec,s4 nsec,bool interruptShouldThrow)1229 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1230 bool interruptShouldThrow)
1231 {
1232 Monitor* mon;
1233 u4 thin = obj->lock;
1234
1235 /* If the lock is still thin, we need to fatten it.
1236 */
1237 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1238 /* Make sure that 'self' holds the lock.
1239 */
1240 if (LW_LOCK_OWNER(thin) != self->threadId) {
1241 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1242 "object not locked by thread before wait()");
1243 return;
1244 }
1245
1246 /* This thread holds the lock. We need to fatten the lock
1247 * so 'self' can block on it. Don't update the object lock
1248 * field yet, because 'self' needs to acquire the lock before
1249 * any other thread gets a chance.
1250 */
1251 inflateMonitor(self, obj);
1252 LOG_THIN("(%d) lock %p fattened by wait() to count %d",
1253 self->threadId, &obj->lock, mon->lockCount);
1254 }
1255 mon = LW_MONITOR(obj->lock);
1256 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1257 }
1258
1259 /*
1260 * Object.notify().
1261 */
dvmObjectNotify(Thread * self,Object * obj)1262 void dvmObjectNotify(Thread* self, Object *obj)
1263 {
1264 u4 thin = obj->lock;
1265
1266 /* If the lock is still thin, there aren't any waiters;
1267 * waiting on an object forces lock fattening.
1268 */
1269 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1270 /* Make sure that 'self' holds the lock.
1271 */
1272 if (LW_LOCK_OWNER(thin) != self->threadId) {
1273 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1274 "object not locked by thread before notify()");
1275 return;
1276 }
1277
1278 /* no-op; there are no waiters to notify.
1279 */
1280 } else {
1281 /* It's a fat lock.
1282 */
1283 notifyMonitor(self, LW_MONITOR(thin));
1284 }
1285 }
1286
1287 /*
1288 * Object.notifyAll().
1289 */
dvmObjectNotifyAll(Thread * self,Object * obj)1290 void dvmObjectNotifyAll(Thread* self, Object *obj)
1291 {
1292 u4 thin = obj->lock;
1293
1294 /* If the lock is still thin, there aren't any waiters;
1295 * waiting on an object forces lock fattening.
1296 */
1297 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1298 /* Make sure that 'self' holds the lock.
1299 */
1300 if (LW_LOCK_OWNER(thin) != self->threadId) {
1301 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1302 "object not locked by thread before notifyAll()");
1303 return;
1304 }
1305
1306 /* no-op; there are no waiters to notify.
1307 */
1308 } else {
1309 /* It's a fat lock.
1310 */
1311 notifyAllMonitor(self, LW_MONITOR(thin));
1312 }
1313 }
1314
1315 /*
1316 * This implements java.lang.Thread.sleep(long msec, int nsec).
1317 *
1318 * The sleep is interruptible by other threads, which means we can't just
1319 * plop into an OS sleep call. (We probably could if we wanted to send
1320 * signals around and rely on EINTR, but that's inefficient and relies
1321 * on native code respecting our signal mask.)
1322 *
1323 * We have to do all of this stuff for Object.wait() as well, so it's
1324 * easiest to just sleep on a private Monitor.
1325 *
1326 * It appears that we want sleep(0,0) to go through the motions of sleeping
1327 * for a very short duration, rather than just returning.
1328 */
dvmThreadSleep(u8 msec,u4 nsec)1329 void dvmThreadSleep(u8 msec, u4 nsec)
1330 {
1331 Thread* self = dvmThreadSelf();
1332 Monitor* mon = gDvm.threadSleepMon;
1333
1334 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1335 if (msec == 0 && nsec == 0)
1336 nsec++;
1337
1338 lockMonitor(self, mon);
1339 waitMonitor(self, mon, msec, nsec, true);
1340 unlockMonitor(self, mon);
1341 }
1342
1343 /*
1344 * Implement java.lang.Thread.interrupt().
1345 */
dvmThreadInterrupt(Thread * thread)1346 void dvmThreadInterrupt(Thread* thread)
1347 {
1348 assert(thread != NULL);
1349
1350 dvmLockMutex(&thread->waitMutex);
1351
1352 /*
1353 * If the interrupted flag is already set no additional action is
1354 * required.
1355 */
1356 if (thread->interrupted == true) {
1357 dvmUnlockMutex(&thread->waitMutex);
1358 return;
1359 }
1360
1361 /*
1362 * Raise the "interrupted" flag. This will cause it to bail early out
1363 * of the next wait() attempt, if it's not currently waiting on
1364 * something.
1365 */
1366 thread->interrupted = true;
1367
1368 /*
1369 * Is the thread waiting?
1370 *
1371 * Note that fat vs. thin doesn't matter here; waitMonitor
1372 * is only set when a thread actually waits on a monitor,
1373 * which implies that the monitor has already been fattened.
1374 */
1375 if (thread->waitMonitor != NULL) {
1376 pthread_cond_signal(&thread->waitCond);
1377 }
1378
1379 dvmUnlockMutex(&thread->waitMutex);
1380 }
1381
1382 #ifndef WITH_COPYING_GC
dvmIdentityHashCode(Object * obj)1383 u4 dvmIdentityHashCode(Object *obj)
1384 {
1385 return (u4)obj;
1386 }
1387 #else
1388 /*
1389 * Returns the identity hash code of the given object.
1390 */
dvmIdentityHashCode(Object * obj)1391 u4 dvmIdentityHashCode(Object *obj)
1392 {
1393 Thread *self, *thread;
1394 volatile u4 *lw;
1395 size_t size;
1396 u4 lock, owner, hashState;
1397
1398 if (obj == NULL) {
1399 /*
1400 * Null is defined to have an identity hash code of 0.
1401 */
1402 return 0;
1403 }
1404 lw = &obj->lock;
1405 retry:
1406 hashState = LW_HASH_STATE(*lw);
1407 if (hashState == LW_HASH_STATE_HASHED) {
1408 /*
1409 * The object has been hashed but has not had its hash code
1410 * relocated by the garbage collector. Use the raw object
1411 * address.
1412 */
1413 return (u4)obj >> 3;
1414 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1415 /*
1416 * The object has been hashed and its hash code has been
1417 * relocated by the collector. Use the value of the naturally
1418 * aligned word following the instance data.
1419 */
1420 assert(obj->clazz != gDvm.classJavaLangClass);
1421 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1422 size = dvmArrayObjectSize((ArrayObject *)obj);
1423 size = (size + 2) & ~2;
1424 } else {
1425 size = obj->clazz->objectSize;
1426 }
1427 return *(u4 *)(((char *)obj) + size);
1428 } else if (hashState == LW_HASH_STATE_UNHASHED) {
1429 /*
1430 * The object has never been hashed. Change the hash state to
1431 * hashed and use the raw object address.
1432 */
1433 self = dvmThreadSelf();
1434 if (self->threadId == lockOwner(obj)) {
1435 /*
1436 * We already own the lock so we can update the hash state
1437 * directly.
1438 */
1439 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1440 return (u4)obj >> 3;
1441 }
1442 /*
1443 * We do not own the lock. Try acquiring the lock. Should
1444 * this fail, we must suspend the owning thread.
1445 */
1446 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1447 /*
1448 * If the lock is thin assume it is unowned. We simulate
1449 * an acquire, update, and release with a single CAS.
1450 */
1451 lock = DVM_LOCK_INITIAL_THIN_VALUE;
1452 lock |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1453 if (android_atomic_acquire_cas(
1454 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
1455 (int32_t)lock,
1456 (int32_t *)lw) == 0) {
1457 /*
1458 * A new lockword has been installed with a hash state
1459 * of hashed. Use the raw object address.
1460 */
1461 return (u4)obj >> 3;
1462 }
1463 } else {
1464 if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1465 /*
1466 * The monitor lock has been acquired. Change the
1467 * hash state to hashed and use the raw object
1468 * address.
1469 */
1470 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1471 unlockMonitor(self, LW_MONITOR(*lw));
1472 return (u4)obj >> 3;
1473 }
1474 }
1475 /*
1476 * At this point we have failed to acquire the lock. We must
1477 * identify the owning thread and suspend it.
1478 */
1479 dvmLockThreadList(self);
1480 /*
1481 * Cache the lock word as its value can change between
1482 * determining its shape and retrieving its owner.
1483 */
1484 lock = *lw;
1485 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1486 /*
1487 * Find the thread with the corresponding thread id.
1488 */
1489 owner = LW_LOCK_OWNER(lock);
1490 assert(owner != self->threadId);
1491 /*
1492 * If the lock has no owner do not bother scanning the
1493 * thread list and fall through to the failure handler.
1494 */
1495 thread = owner ? gDvm.threadList : NULL;
1496 while (thread != NULL) {
1497 if (thread->threadId == owner) {
1498 break;
1499 }
1500 thread = thread->next;
1501 }
1502 } else {
1503 thread = LW_MONITOR(lock)->owner;
1504 }
1505 /*
1506 * If thread is NULL the object has been released since the
1507 * thread list lock was acquired. Try again.
1508 */
1509 if (thread == NULL) {
1510 dvmUnlockThreadList();
1511 goto retry;
1512 }
1513 /*
1514 * Wait for the owning thread to suspend.
1515 */
1516 dvmSuspendThread(thread);
1517 if (dvmHoldsLock(thread, obj)) {
1518 /*
1519 * The owning thread has been suspended. We can safely
1520 * change the hash state to hashed.
1521 */
1522 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1523 dvmResumeThread(thread);
1524 dvmUnlockThreadList();
1525 return (u4)obj >> 3;
1526 }
1527 /*
1528 * The wrong thread has been suspended. Try again.
1529 */
1530 dvmResumeThread(thread);
1531 dvmUnlockThreadList();
1532 goto retry;
1533 }
1534 LOGE("object %p has an unknown hash state %#x", obj, hashState);
1535 dvmDumpThread(dvmThreadSelf(), false);
1536 dvmAbort();
1537 return 0; /* Quiet the compiler. */
1538 }
1539 #endif /* WITH_COPYING_GC */
1540
1541 #ifdef WITH_DEADLOCK_PREDICTION
1542 /*
1543 * ===========================================================================
1544 * Deadlock prediction
1545 * ===========================================================================
1546 */
1547 /*
1548 The idea is to predict the possibility of deadlock by recording the order
1549 in which monitors are acquired. If we see an attempt to acquire a lock
1550 out of order, we can identify the locks and offending code.
1551
1552 To make this work, we need to keep track of the locks held by each thread,
1553 and create history trees for each lock. When a thread tries to acquire
1554 a new lock, we walk through the "history children" of the lock, looking
1555 for a match with locks the thread already holds. If we find a match,
1556 it means the thread has made a request that could result in a deadlock.
1557
1558 To support recursive locks, we always allow re-locking a currently-held
1559 lock, and maintain a recursion depth count.
1560
1561 An ASCII-art example, where letters represent Objects:
1562
1563 A
1564 /|\
1565 / | \
1566 B | D
1567 \ |
1568 \|
1569 C
1570
1571 The above is the tree we'd have after handling Object synchronization
1572 sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
1573 a child of B. (The lines represent pointers between parent and child.
1574 Every node can have multiple parents and multiple children.)
1575
1576 If we hold AC, and want to lock B, we recursively search through B's
1577 children to see if A or C appears. It does, so we reject the attempt.
1578 (A straightforward way to implement it: add a link from C to B, then
1579 determine whether the graph starting at B contains a cycle.)
1580
1581 If we hold AC and want to lock D, we would succeed, creating a new link
1582 from C to D.
1583
1584 The lock history and a stack trace is attached to the Object's Monitor
1585 struct, which means we need to fatten every Object we lock (thin locking
1586 is effectively disabled). If we don't need the stack trace we can
1587 avoid fattening the leaf nodes, only fattening objects that need to hold
1588 history trees.
1589
1590 Updates to Monitor structs are only allowed for the thread that holds
1591 the Monitor, so we actually do most of our deadlock prediction work after
1592 the lock has been acquired.
1593
1594 When an object with a monitor is GCed, we need to remove it from the
1595 history trees. There are two basic approaches:
1596 (1) For through the entire set of known monitors, search all child
1597 lists for the object in question. This is rather slow, resulting
1598 in GC passes that take upwards of 10 seconds to complete.
1599 (2) Maintain "parent" pointers in each node. Remove the entries as
1600 required. This requires additional storage and maintenance for
1601 every operation, but is significantly faster at GC time.
1602 For each GCed object, we merge all of the object's children into each of
1603 the object's parents.
1604 */
1605
1606 #if !defined(WITH_MONITOR_TRACKING)
1607 # error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1608 #endif
1609
1610 /*
1611 * Clear out the contents of an ExpandingObjectList, freeing any
1612 * dynamic allocations.
1613 */
expandObjClear(ExpandingObjectList * pList)1614 static void expandObjClear(ExpandingObjectList* pList)
1615 {
1616 if (pList->list != NULL) {
1617 free(pList->list);
1618 pList->list = NULL;
1619 }
1620 pList->alloc = pList->count = 0;
1621 }
1622
1623 /*
1624 * Get the number of objects currently stored in the list.
1625 */
expandBufGetCount(const ExpandingObjectList * pList)1626 static inline int expandBufGetCount(const ExpandingObjectList* pList)
1627 {
1628 return pList->count;
1629 }
1630
1631 /*
1632 * Get the Nth entry from the list.
1633 */
expandBufGetEntry(const ExpandingObjectList * pList,int i)1634 static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1635 int i)
1636 {
1637 return pList->list[i];
1638 }
1639
1640 /*
1641 * Add a new entry to the list.
1642 *
1643 * We don't check for or try to enforce uniqueness. It's expected that
1644 * the higher-level code does this for us.
1645 */
expandObjAddEntry(ExpandingObjectList * pList,Object * obj)1646 static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1647 {
1648 if (pList->count == pList->alloc) {
1649 /* time to expand */
1650 Object** newList;
1651
1652 if (pList->alloc == 0)
1653 pList->alloc = 4;
1654 else
1655 pList->alloc *= 2;
1656 LOGVV("expanding %p to %d\n", pList, pList->alloc);
1657 newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1658 if (newList == NULL) {
1659 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1660 dvmAbort();
1661 }
1662 pList->list = newList;
1663 }
1664
1665 pList->list[pList->count++] = obj;
1666 }
1667
1668 /*
1669 * Returns "true" if the element was successfully removed.
1670 */
expandObjRemoveEntry(ExpandingObjectList * pList,Object * obj)1671 static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1672 {
1673 int i;
1674
1675 for (i = pList->count-1; i >= 0; i--) {
1676 if (pList->list[i] == obj)
1677 break;
1678 }
1679 if (i < 0)
1680 return false;
1681
1682 if (i != pList->count-1) {
1683 /*
1684 * The order of elements is not important, so we just copy the
1685 * last entry into the new slot.
1686 */
1687 //memmove(&pList->list[i], &pList->list[i+1],
1688 // (pList->count-1 - i) * sizeof(pList->list[0]));
1689 pList->list[i] = pList->list[pList->count-1];
1690 }
1691
1692 pList->count--;
1693 pList->list[pList->count] = (Object*) 0xdecadead;
1694 return true;
1695 }
1696
1697 /*
1698 * Returns "true" if "obj" appears in the list.
1699 */
expandObjHas(const ExpandingObjectList * pList,Object * obj)1700 static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1701 {
1702 int i;
1703
1704 for (i = 0; i < pList->count; i++) {
1705 if (pList->list[i] == obj)
1706 return true;
1707 }
1708 return false;
1709 }
1710
1711 /*
1712 * Print the list contents to stdout. For debugging.
1713 */
expandObjDump(const ExpandingObjectList * pList)1714 static void expandObjDump(const ExpandingObjectList* pList)
1715 {
1716 int i;
1717 for (i = 0; i < pList->count; i++)
1718 printf(" %p", pList->list[i]);
1719 }
1720
1721 /*
1722 * Check for duplicate entries. Returns the index of the first instance
1723 * of the duplicated value, or -1 if no duplicates were found.
1724 */
expandObjCheckForDuplicates(const ExpandingObjectList * pList)1725 static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1726 {
1727 int i, j;
1728 for (i = 0; i < pList->count-1; i++) {
1729 for (j = i + 1; j < pList->count; j++) {
1730 if (pList->list[i] == pList->list[j]) {
1731 return i;
1732 }
1733 }
1734 }
1735
1736 return -1;
1737 }
1738
1739
1740 /*
1741 * Determine whether "child" appears in the list of objects associated
1742 * with the Monitor in "parent". If "parent" is a thin lock, we return
1743 * false immediately.
1744 */
objectInChildList(const Object * parent,Object * child)1745 static bool objectInChildList(const Object* parent, Object* child)
1746 {
1747 u4 lock = parent->lock;
1748 if (!IS_LOCK_FAT(&lock)) {
1749 //LOGI("on thin\n");
1750 return false;
1751 }
1752
1753 return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
1754 }
1755
1756 /*
1757 * Print the child list.
1758 */
dumpKids(Object * parent)1759 static void dumpKids(Object* parent)
1760 {
1761 Monitor* mon = LW_MONITOR(parent->lock);
1762
1763 printf("Children of %p:", parent);
1764 expandObjDump(&mon->historyChildren);
1765 printf("\n");
1766 }
1767
1768 /*
1769 * Add "child" to the list of children in "parent", and add "parent" to
1770 * the list of parents in "child".
1771 */
linkParentToChild(Object * parent,Object * child)1772 static void linkParentToChild(Object* parent, Object* child)
1773 {
1774 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for merge
1775 assert(IS_LOCK_FAT(&parent->lock));
1776 assert(IS_LOCK_FAT(&child->lock));
1777 assert(parent != child);
1778 Monitor* mon;
1779
1780 mon = LW_MONITOR(parent->lock);
1781 assert(!expandObjHas(&mon->historyChildren, child));
1782 expandObjAddEntry(&mon->historyChildren, child);
1783
1784 mon = LW_MONITOR(child->lock);
1785 assert(!expandObjHas(&mon->historyParents, parent));
1786 expandObjAddEntry(&mon->historyParents, parent);
1787 }
1788
1789
1790 /*
1791 * Remove "child" from the list of children in "parent".
1792 */
unlinkParentFromChild(Object * parent,Object * child)1793 static void unlinkParentFromChild(Object* parent, Object* child)
1794 {
1795 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for GC
1796 assert(IS_LOCK_FAT(&parent->lock));
1797 assert(IS_LOCK_FAT(&child->lock));
1798 assert(parent != child);
1799 Monitor* mon;
1800
1801 mon = LW_MONITOR(parent->lock);
1802 if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1803 LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1804 }
1805 assert(!expandObjHas(&mon->historyChildren, child));
1806 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1807
1808 mon = LW_MONITOR(child->lock);
1809 if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1810 LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1811 }
1812 assert(!expandObjHas(&mon->historyParents, parent));
1813 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1814 }
1815
1816
1817 /*
1818 * Log the monitors held by the current thread. This is done as part of
1819 * flagging an error.
1820 */
logHeldMonitors(Thread * self)1821 static void logHeldMonitors(Thread* self)
1822 {
1823 char* name = NULL;
1824
1825 name = dvmGetThreadName(self);
1826 LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1827 self->threadId, name);
1828 LOGW("(most-recently-acquired on top):\n");
1829 free(name);
1830
1831 LockedObjectData* lod = self->pLockedObjects;
1832 while (lod != NULL) {
1833 LOGW("--- object %p[%d] (%s)\n",
1834 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1835 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1836
1837 lod = lod->next;
1838 }
1839 }
1840
1841 /*
1842 * Recursively traverse the object hierarchy starting at "obj". We mark
1843 * ourselves on entry and clear the mark on exit. If we ever encounter
1844 * a marked object, we have a cycle.
1845 *
1846 * Returns "true" if all is well, "false" if we found a cycle.
1847 */
traverseTree(Thread * self,const Object * obj)1848 static bool traverseTree(Thread* self, const Object* obj)
1849 {
1850 assert(IS_LOCK_FAT(&obj->lock));
1851 Monitor* mon = LW_MONITOR(obj->lock);
1852
1853 /*
1854 * Have we been here before?
1855 */
1856 if (mon->historyMark) {
1857 int* rawStackTrace;
1858 int stackDepth;
1859
1860 LOGW("%s\n", kStartBanner);
1861 LOGW("Illegal lock attempt:\n");
1862 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1863
1864 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1865 dvmLogRawStackTrace(rawStackTrace, stackDepth);
1866 free(rawStackTrace);
1867
1868 LOGW(" ");
1869 logHeldMonitors(self);
1870
1871 LOGW(" ");
1872 LOGW("Earlier, the following lock order (from last to first) was\n");
1873 LOGW("established -- stack trace is from first successful lock):\n");
1874 return false;
1875 }
1876 mon->historyMark = true;
1877
1878 /*
1879 * Examine the children. We do NOT hold these locks, so they might
1880 * very well transition from thin to fat or change ownership while
1881 * we work.
1882 *
1883 * NOTE: we rely on the fact that they cannot revert from fat to thin
1884 * while we work. This is currently a safe assumption.
1885 *
1886 * We can safely ignore thin-locked children, because by definition
1887 * they have no history and are leaf nodes. In the current
1888 * implementation we always fatten the locks to provide a place to
1889 * hang the stack trace.
1890 */
1891 ExpandingObjectList* pList = &mon->historyChildren;
1892 int i;
1893 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1894 const Object* child = expandBufGetEntry(pList, i);
1895 u4 lock = child->lock;
1896 if (!IS_LOCK_FAT(&lock))
1897 continue;
1898 if (!traverseTree(self, child)) {
1899 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1900 dvmLogRawStackTrace(mon->historyRawStackTrace,
1901 mon->historyStackDepth);
1902 mon->historyMark = false;
1903 return false;
1904 }
1905 }
1906
1907 mon->historyMark = false;
1908
1909 return true;
1910 }
1911
1912 /*
1913 * Update the deadlock prediction tree, based on the current thread
1914 * acquiring "acqObj". This must be called before the object is added to
1915 * the thread's list of held monitors.
1916 *
1917 * If the thread already holds the lock (recursion), or this is a known
1918 * lock configuration, we return without doing anything. Otherwise, we add
1919 * a link from the most-recently-acquired lock in this thread to "acqObj"
1920 * after ensuring that the parent lock is "fat".
1921 *
1922 * This MUST NOT be called while a GC is in progress in another thread,
1923 * because we assume exclusive access to history trees in owned monitors.
1924 */
updateDeadlockPrediction(Thread * self,Object * acqObj)1925 static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1926 {
1927 LockedObjectData* lod;
1928 LockedObjectData* mrl;
1929
1930 /*
1931 * Quick check for recursive access.
1932 */
1933 lod = dvmFindInMonitorList(self, acqObj);
1934 if (lod != NULL) {
1935 LOGV("+++ DP: recursive %p\n", acqObj);
1936 return;
1937 }
1938
1939 /*
1940 * Make the newly-acquired object's monitor "fat". In some ways this
1941 * isn't strictly necessary, but we need the GC to tell us when
1942 * "interesting" objects go away, and right now the only way to make
1943 * an object look interesting is to give it a monitor.
1944 *
1945 * This also gives us a place to hang a stack trace.
1946 *
1947 * Our thread holds the lock, so we're allowed to rewrite the lock
1948 * without worrying that something will change out from under us.
1949 */
1950 if (!IS_LOCK_FAT(&acqObj->lock)) {
1951 LOGVV("fattening lockee %p (recur=%d)\n",
1952 acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
1953 inflateMonitor(self, acqObj);
1954 }
1955
1956 /* if we don't have a stack trace for this monitor, establish one */
1957 if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
1958 Monitor* mon = LW_MONITOR(acqObj->lock);
1959 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1960 &mon->historyStackDepth);
1961 }
1962
1963 /*
1964 * We need to examine and perhaps modify the most-recently-locked
1965 * monitor. We own that, so there's no risk of another thread
1966 * stepping on us.
1967 *
1968 * Retrieve the most-recently-locked entry from our thread.
1969 */
1970 mrl = self->pLockedObjects;
1971 if (mrl == NULL)
1972 return; /* no other locks held */
1973
1974 /*
1975 * Do a quick check to see if "acqObj" is a direct descendant. We can do
1976 * this without holding the global lock because of our assertion that
1977 * a GC is not running in parallel -- nobody except the GC can
1978 * modify a history list in a Monitor they don't own, and we own "mrl".
1979 * (There might be concurrent *reads*, but no concurrent *writes.)
1980 *
1981 * If we find it, this is a known good configuration, and we're done.
1982 */
1983 if (objectInChildList(mrl->obj, acqObj))
1984 return;
1985
1986 /*
1987 * "mrl" is going to need to have a history tree. If it's currently
1988 * a thin lock, we make it fat now. The thin lock might have a
1989 * nonzero recursive lock count, which we need to carry over.
1990 *
1991 * Our thread holds the lock, so we're allowed to rewrite the lock
1992 * without worrying that something will change out from under us.
1993 */
1994 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1995 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1996 mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
1997 inflateMonitor(self, mrl->obj);
1998 }
1999
2000 /*
2001 * We haven't seen this configuration before. We need to scan down
2002 * acqObj's tree to see if any of the monitors in self->pLockedObjects
2003 * appear. We grab a global lock before traversing or updating the
2004 * history list.
2005 *
2006 * If we find a match for any of our held locks, we know that the lock
2007 * has previously been acquired *after* acqObj, and we throw an error.
2008 *
2009 * The easiest way to do this is to create a link from "mrl" to "acqObj"
2010 * and do a recursive traversal, marking nodes as we cross them. If
2011 * we cross one a second time, we have a cycle and can throw an error.
2012 * (We do the flag-clearing traversal before adding the new link, so
2013 * that we're guaranteed to terminate.)
2014 *
2015 * If "acqObj" is a thin lock, it has no history, and we can create a
2016 * link to it without additional checks. [ We now guarantee that it's
2017 * always fat. ]
2018 */
2019 bool failed = false;
2020 dvmLockMutex(&gDvm.deadlockHistoryLock);
2021 linkParentToChild(mrl->obj, acqObj);
2022 if (!traverseTree(self, acqObj)) {
2023 LOGW("%s\n", kEndBanner);
2024 failed = true;
2025
2026 /* remove the entry so we're still okay when in "warning" mode */
2027 unlinkParentFromChild(mrl->obj, acqObj);
2028 }
2029 dvmUnlockMutex(&gDvm.deadlockHistoryLock);
2030
2031 if (failed) {
2032 switch (gDvm.deadlockPredictMode) {
2033 case kDPErr:
2034 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
2035 break;
2036 case kDPAbort:
2037 LOGE("Aborting due to potential deadlock\n");
2038 dvmAbort();
2039 break;
2040 default:
2041 /* warn only */
2042 break;
2043 }
2044 }
2045 }
2046
2047 /*
2048 * We're removing "child" from existence. We want to pull all of
2049 * child's children into "parent", filtering out duplicates. This is
2050 * called during the GC.
2051 *
2052 * This does not modify "child", which might have multiple parents.
2053 */
mergeChildren(Object * parent,const Object * child)2054 static void mergeChildren(Object* parent, const Object* child)
2055 {
2056 Monitor* mon;
2057 int i;
2058
2059 assert(IS_LOCK_FAT(&child->lock));
2060 mon = LW_MONITOR(child->lock);
2061 ExpandingObjectList* pList = &mon->historyChildren;
2062
2063 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2064 Object* grandChild = expandBufGetEntry(pList, i);
2065
2066 if (!objectInChildList(parent, grandChild)) {
2067 LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
2068 linkParentToChild(parent, grandChild);
2069 } else {
2070 LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
2071 }
2072 }
2073 }
2074
2075 /*
2076 * An object with a fat lock is being collected during a GC pass. We
2077 * want to remove it from any lock history trees that it is a part of.
2078 *
2079 * This may require updating the history trees in several monitors. The
2080 * monitor semantics guarantee that no other thread will be accessing
2081 * the history trees at the same time.
2082 */
removeCollectedObject(Object * obj)2083 static void removeCollectedObject(Object* obj)
2084 {
2085 Monitor* mon;
2086
2087 LOGVV("+++ collecting %p\n", obj);
2088
2089 /*
2090 * For every parent of this object:
2091 * - merge all of our children into the parent's child list (creates
2092 * a two-way link between parent and child)
2093 * - remove ourselves from the parent's child list
2094 */
2095 ExpandingObjectList* pList;
2096 int i;
2097
2098 assert(IS_LOCK_FAT(&obj->lock));
2099 mon = LW_MONITOR(obj->lock);
2100 pList = &mon->historyParents;
2101 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2102 Object* parent = expandBufGetEntry(pList, i);
2103 Monitor* parentMon = LW_MONITOR(parent->lock);
2104
2105 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
2106 LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
2107 }
2108 assert(!expandObjHas(&parentMon->historyChildren, obj));
2109
2110 mergeChildren(parent, obj);
2111 }
2112
2113 /*
2114 * For every child of this object:
2115 * - remove ourselves from the child's parent list
2116 */
2117 pList = &mon->historyChildren;
2118 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2119 Object* child = expandBufGetEntry(pList, i);
2120 Monitor* childMon = LW_MONITOR(child->lock);
2121
2122 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
2123 LOGW("WARNING: parent %p not found in child %p\n", obj, child);
2124 }
2125 assert(!expandObjHas(&childMon->historyParents, obj));
2126 }
2127 }
2128
2129 #endif /*WITH_DEADLOCK_PREDICTION*/
2130