1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Thread support.
19 */
20 #include "Dalvik.h"
21
22 #include "utils/threads.h" // need Android thread priorities
23
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <sys/time.h>
27 #include <sys/types.h>
28 #include <sys/resource.h>
29 #include <sys/mman.h>
30 #include <signal.h>
31 #include <errno.h>
32 #include <fcntl.h>
33
34 #if defined(HAVE_PRCTL)
35 #include <sys/prctl.h>
36 #endif
37
38 #if defined(WITH_SELF_VERIFICATION)
39 #include "interp/Jit.h" // need for self verification
40 #endif
41
42
43 /* desktop Linux needs a little help with gettid() */
44 #if defined(HAVE_GETTID) && !defined(HAVE_ANDROID_OS)
45 #define __KERNEL__
46 # include <linux/unistd.h>
47 #ifdef _syscall0
48 _syscall0(pid_t,gettid)
49 #else
50 pid_t gettid() { return syscall(__NR_gettid);}
51 #endif
52 #undef __KERNEL__
53 #endif
54
55 // Change this to enable logging on cgroup errors
56 #define ENABLE_CGROUP_ERR_LOGGING 0
57
58 // change this to LOGV/LOGD to debug thread activity
59 #define LOG_THREAD LOGVV
60
61 /*
62 Notes on Threading
63
64 All threads are native pthreads. All threads, except the JDWP debugger
65 thread, are visible to code running in the VM and to the debugger. (We
66 don't want the debugger to try to manipulate the thread that listens for
67 instructions from the debugger.) Internal VM threads are in the "system"
68 ThreadGroup, all others are in the "main" ThreadGroup, per convention.
69
70 The GC only runs when all threads have been suspended. Threads are
71 expected to suspend themselves, using a "safe point" mechanism. We check
72 for a suspend request at certain points in the main interpreter loop,
73 and on requests coming in from native code (e.g. all JNI functions).
74 Certain debugger events may inspire threads to self-suspend.
75
76 Native methods must use JNI calls to modify object references to avoid
77 clashes with the GC. JNI doesn't provide a way for native code to access
78 arrays of objects as such -- code must always get/set individual entries --
79 so it should be possible to fully control access through JNI.
80
81 Internal native VM threads, such as the finalizer thread, must explicitly
82 check for suspension periodically. In most cases they will be sound
83 asleep on a condition variable, and won't notice the suspension anyway.
84
85 Threads may be suspended by the GC, debugger, or the SIGQUIT listener
86 thread. The debugger may suspend or resume individual threads, while the
87 GC always suspends all threads. Each thread has a "suspend count" that
88 is incremented on suspend requests and decremented on resume requests.
89 When the count is zero, the thread is runnable. This allows us to fulfill
90 a debugger requirement: if the debugger suspends a thread, the thread is
91 not allowed to run again until the debugger resumes it (or disconnects,
92 in which case we must resume all debugger-suspended threads).
93
94 Paused threads sleep on a condition variable, and are awoken en masse.
95 Certain "slow" VM operations, such as starting up a new thread, will be
96 done in a separate "VMWAIT" state, so that the rest of the VM doesn't
97 freeze up waiting for the operation to finish. Threads must check for
98 pending suspension when leaving VMWAIT.
99
100 Because threads suspend themselves while interpreting code or when native
101 code makes JNI calls, there is no risk of suspending while holding internal
102 VM locks. All threads can enter a suspended (or native-code-only) state.
103 Also, we don't have to worry about object references existing solely
104 in hardware registers.
105
106 We do, however, have to worry about objects that were allocated internally
107 and aren't yet visible to anything else in the VM. If we allocate an
108 object, and then go to sleep on a mutex after changing to a non-RUNNING
109 state (e.g. while trying to allocate a second object), the first object
110 could be garbage-collected out from under us while we sleep. To manage
111 this, we automatically add all allocated objects to an internal object
112 tracking list, and only remove them when we know we won't be suspended
113 before the object appears in the GC root set.
114
115 The debugger may choose to suspend or resume a single thread, which can
116 lead to application-level deadlocks; this is expected behavior. The VM
117 will only check for suspension of single threads when the debugger is
118 active (the java.lang.Thread calls for this are deprecated and hence are
119 not supported). Resumption of a single thread is handled by decrementing
120 the thread's suspend count and sending a broadcast signal to the condition
121 variable. (This will cause all threads to wake up and immediately go back
122 to sleep, which isn't tremendously efficient, but neither is having the
123 debugger attached.)
124
125 The debugger is not allowed to resume threads suspended by the GC. This
126 is trivially enforced by ignoring debugger requests while the GC is running
127 (the JDWP thread is suspended during GC).
128
129 The VM maintains a Thread struct for every pthread known to the VM. There
130 is a java/lang/Thread object associated with every Thread. At present,
131 there is no safe way to go from a Thread object to a Thread struct except by
132 locking and scanning the list; this is necessary because the lifetimes of
133 the two are not closely coupled. We may want to change this behavior,
134 though at present the only performance impact is on the debugger (see
135 threadObjToThread()). See also notes about dvmDetachCurrentThread().
136 */
137 /*
138 Alternate implementation (signal-based):
139
140 Threads run without safe points -- zero overhead. The VM uses a signal
141 (e.g. pthread_kill(SIGUSR1)) to notify threads of suspension or resumption.
142
143 The trouble with using signals to suspend threads is that it means a thread
144 can be in the middle of an operation when garbage collection starts.
145 To prevent some sticky situations, we have to introduce critical sections
146 to the VM code.
147
148 Critical sections temporarily block suspension for a given thread.
149 The thread must move to a non-blocked state (and self-suspend) after
150 finishing its current task. If the thread blocks on a resource held
151 by a suspended thread, we're hosed.
152
153 One approach is to require that no blocking operations, notably
154 acquisition of mutexes, can be performed within a critical section.
155 This is too limiting. For example, if thread A gets suspended while
156 holding the thread list lock, it will prevent the GC or debugger from
157 being able to safely access the thread list. We need to wrap the critical
158 section around the entire operation (enter critical, get lock, do stuff,
159 release lock, exit critical).
160
161 A better approach is to declare that certain resources can only be held
162 within critical sections. A thread that enters a critical section and
163 then gets blocked on the thread list lock knows that the thread it is
164 waiting for is also in a critical section, and will release the lock
165 before suspending itself. Eventually all threads will complete their
166 operations and self-suspend. For this to work, the VM must:
167
168 (1) Determine the set of resources that may be accessed from the GC or
169 debugger threads. The mutexes guarding those go into the "critical
170 resource set" (CRS).
171 (2) Ensure that no resource in the CRS can be acquired outside of a
172 critical section. This can be verified with an assert().
173 (3) Ensure that only resources in the CRS can be held while in a critical
174 section. This is harder to enforce.
175
176 If any of these conditions are not met, deadlock can ensue when grabbing
177 resources in the GC or debugger (#1) or waiting for threads to suspend
178 (#2,#3). (You won't actually deadlock in the GC, because if the semantics
179 above are followed you don't need to lock anything in the GC. The risk is
180 rather that the GC will access data structures in an intermediate state.)
181
182 This approach requires more care and awareness in the VM than
183 safe-pointing. Because the GC and debugger are fairly intrusive, there
184 really aren't any internal VM resources that aren't shared. Thus, the
185 enter/exit critical calls can be added to internal mutex wrappers, which
186 makes it easy to get #1 and #2 right.
187
188 An ordering should be established for all locks to avoid deadlocks.
189
190 Monitor locks, which are also implemented with pthread calls, should not
191 cause any problems here. Threads fighting over such locks will not be in
192 critical sections and can be suspended freely.
193
194 This can get tricky if we ever need exclusive access to VM and non-VM
195 resources at the same time. It's not clear if this is a real concern.
196
197 There are (at least) two ways to handle the incoming signals:
198
199 (a) Always accept signals. If we're in a critical section, the signal
200 handler just returns without doing anything (the "suspend level"
201 should have been incremented before the signal was sent). Otherwise,
202 if the "suspend level" is nonzero, we go to sleep.
203 (b) Block signals in critical sections. This ensures that we can't be
204 interrupted in a critical section, but requires pthread_sigmask()
205 calls on entry and exit.
206
207 This is a choice between blocking the message and blocking the messenger.
208 Because UNIX signals are unreliable (you can only know that you have been
209 signaled, not whether you were signaled once or 10 times), the choice is
210 not significant for correctness. The choice depends on the efficiency
211 of pthread_sigmask() and the desire to actually block signals. Either way,
212 it is best to ensure that there is only one indication of "blocked";
213 having two (i.e. block signals and set a flag, then only send a signal
214 if the flag isn't set) can lead to race conditions.
215
216 The signal handler must take care to copy registers onto the stack (via
217 setjmp), so that stack scans find all references. Because we have to scan
218 native stacks, "exact" GC is not possible with this approach.
219
220 Some other concerns with flinging signals around:
221 - Odd interactions with some debuggers (e.g. gdb on the Mac)
222 - Restrictions on some standard library calls during GC (e.g. don't
223 use printf on stdout to print GC debug messages)
224 */
225
226 #define kMaxThreadId ((1 << 16) - 1)
227 #define kMainThreadId 1
228
229
230 static Thread* allocThread(int interpStackSize);
231 static bool prepareThread(Thread* thread);
232 static void setThreadSelf(Thread* thread);
233 static void unlinkThread(Thread* thread);
234 static void freeThread(Thread* thread);
235 static void assignThreadId(Thread* thread);
236 static bool createFakeEntryFrame(Thread* thread);
237 static bool createFakeRunFrame(Thread* thread);
238 static void* interpThreadStart(void* arg);
239 static void* internalThreadStart(void* arg);
240 static void threadExitUncaughtException(Thread* thread, Object* group);
241 static void threadExitCheck(void* arg);
242 static void waitForThreadSuspend(Thread* self, Thread* thread);
243 static int getThreadPriorityFromSystem(void);
244
245 /*
246 * The JIT needs to know if any thread is suspended. We do this by
247 * maintaining a global sum of all threads' suspend counts. All suspendCount
248 * updates should go through this after aquiring threadSuspendCountLock.
249 */
dvmAddToThreadSuspendCount(int * pSuspendCount,int delta)250 static inline void dvmAddToThreadSuspendCount(int *pSuspendCount, int delta)
251 {
252 *pSuspendCount += delta;
253 gDvm.sumThreadSuspendCount += delta;
254 }
255
256 /*
257 * Initialize thread list and main thread's environment. We need to set
258 * up some basic stuff so that dvmThreadSelf() will work when we start
259 * loading classes (e.g. to check for exceptions).
260 */
dvmThreadStartup(void)261 bool dvmThreadStartup(void)
262 {
263 Thread* thread;
264
265 /* allocate a TLS slot */
266 if (pthread_key_create(&gDvm.pthreadKeySelf, threadExitCheck) != 0) {
267 LOGE("ERROR: pthread_key_create failed\n");
268 return false;
269 }
270
271 /* test our pthread lib */
272 if (pthread_getspecific(gDvm.pthreadKeySelf) != NULL)
273 LOGW("WARNING: newly-created pthread TLS slot is not NULL\n");
274
275 /* prep thread-related locks and conditions */
276 dvmInitMutex(&gDvm.threadListLock);
277 pthread_cond_init(&gDvm.threadStartCond, NULL);
278 //dvmInitMutex(&gDvm.vmExitLock);
279 pthread_cond_init(&gDvm.vmExitCond, NULL);
280 dvmInitMutex(&gDvm._threadSuspendLock);
281 dvmInitMutex(&gDvm.threadSuspendCountLock);
282 pthread_cond_init(&gDvm.threadSuspendCountCond, NULL);
283 #ifdef WITH_DEADLOCK_PREDICTION
284 dvmInitMutex(&gDvm.deadlockHistoryLock);
285 #endif
286
287 /*
288 * Dedicated monitor for Thread.sleep().
289 * TODO: change this to an Object* so we don't have to expose this
290 * call, and we interact better with JDWP monitor calls. Requires
291 * deferring the object creation to much later (e.g. final "main"
292 * thread prep) or until first use.
293 */
294 gDvm.threadSleepMon = dvmCreateMonitor(NULL);
295
296 gDvm.threadIdMap = dvmAllocBitVector(kMaxThreadId, false);
297
298 thread = allocThread(gDvm.stackSize);
299 if (thread == NULL)
300 return false;
301
302 /* switch mode for when we run initializers */
303 thread->status = THREAD_RUNNING;
304
305 /*
306 * We need to assign the threadId early so we can lock/notify
307 * object monitors. We'll set the "threadObj" field later.
308 */
309 prepareThread(thread);
310 gDvm.threadList = thread;
311
312 #ifdef COUNT_PRECISE_METHODS
313 gDvm.preciseMethods = dvmPointerSetAlloc(200);
314 #endif
315
316 return true;
317 }
318
319 /*
320 * We're a little farther up now, and can load some basic classes.
321 *
322 * We're far enough along that we can poke at java.lang.Thread and friends,
323 * but should not assume that static initializers have run (or cause them
324 * to do so). That means no object allocations yet.
325 */
dvmThreadObjStartup(void)326 bool dvmThreadObjStartup(void)
327 {
328 /*
329 * Cache the locations of these classes. It's likely that we're the
330 * first to reference them, so they're being loaded now.
331 */
332 gDvm.classJavaLangThread =
333 dvmFindSystemClassNoInit("Ljava/lang/Thread;");
334 gDvm.classJavaLangVMThread =
335 dvmFindSystemClassNoInit("Ljava/lang/VMThread;");
336 gDvm.classJavaLangThreadGroup =
337 dvmFindSystemClassNoInit("Ljava/lang/ThreadGroup;");
338 if (gDvm.classJavaLangThread == NULL ||
339 gDvm.classJavaLangThreadGroup == NULL ||
340 gDvm.classJavaLangThreadGroup == NULL)
341 {
342 LOGE("Could not find one or more essential thread classes\n");
343 return false;
344 }
345
346 /*
347 * Cache field offsets. This makes things a little faster, at the
348 * expense of hard-coding non-public field names into the VM.
349 */
350 gDvm.offJavaLangThread_vmThread =
351 dvmFindFieldOffset(gDvm.classJavaLangThread,
352 "vmThread", "Ljava/lang/VMThread;");
353 gDvm.offJavaLangThread_group =
354 dvmFindFieldOffset(gDvm.classJavaLangThread,
355 "group", "Ljava/lang/ThreadGroup;");
356 gDvm.offJavaLangThread_daemon =
357 dvmFindFieldOffset(gDvm.classJavaLangThread, "daemon", "Z");
358 gDvm.offJavaLangThread_name =
359 dvmFindFieldOffset(gDvm.classJavaLangThread,
360 "name", "Ljava/lang/String;");
361 gDvm.offJavaLangThread_priority =
362 dvmFindFieldOffset(gDvm.classJavaLangThread, "priority", "I");
363
364 if (gDvm.offJavaLangThread_vmThread < 0 ||
365 gDvm.offJavaLangThread_group < 0 ||
366 gDvm.offJavaLangThread_daemon < 0 ||
367 gDvm.offJavaLangThread_name < 0 ||
368 gDvm.offJavaLangThread_priority < 0)
369 {
370 LOGE("Unable to find all fields in java.lang.Thread\n");
371 return false;
372 }
373
374 gDvm.offJavaLangVMThread_thread =
375 dvmFindFieldOffset(gDvm.classJavaLangVMThread,
376 "thread", "Ljava/lang/Thread;");
377 gDvm.offJavaLangVMThread_vmData =
378 dvmFindFieldOffset(gDvm.classJavaLangVMThread, "vmData", "I");
379 if (gDvm.offJavaLangVMThread_thread < 0 ||
380 gDvm.offJavaLangVMThread_vmData < 0)
381 {
382 LOGE("Unable to find all fields in java.lang.VMThread\n");
383 return false;
384 }
385
386 /*
387 * Cache the vtable offset for "run()".
388 *
389 * We don't want to keep the Method* because then we won't find see
390 * methods defined in subclasses.
391 */
392 Method* meth;
393 meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThread, "run", "()V");
394 if (meth == NULL) {
395 LOGE("Unable to find run() in java.lang.Thread\n");
396 return false;
397 }
398 gDvm.voffJavaLangThread_run = meth->methodIndex;
399
400 /*
401 * Cache vtable offsets for ThreadGroup methods.
402 */
403 meth = dvmFindVirtualMethodByDescriptor(gDvm.classJavaLangThreadGroup,
404 "removeThread", "(Ljava/lang/Thread;)V");
405 if (meth == NULL) {
406 LOGE("Unable to find removeThread(Thread) in java.lang.ThreadGroup\n");
407 return false;
408 }
409 gDvm.voffJavaLangThreadGroup_removeThread = meth->methodIndex;
410
411 return true;
412 }
413
414 /*
415 * All threads should be stopped by now. Clean up some thread globals.
416 */
dvmThreadShutdown(void)417 void dvmThreadShutdown(void)
418 {
419 if (gDvm.threadList != NULL) {
420 /*
421 * If we walk through the thread list and try to free the
422 * lingering thread structures (which should only be for daemon
423 * threads), the daemon threads may crash if they execute before
424 * the process dies. Let them leak.
425 */
426 freeThread(gDvm.threadList);
427 gDvm.threadList = NULL;
428 }
429
430 dvmFreeBitVector(gDvm.threadIdMap);
431
432 dvmFreeMonitorList();
433
434 pthread_key_delete(gDvm.pthreadKeySelf);
435 }
436
437
438 /*
439 * Grab the suspend count global lock.
440 */
lockThreadSuspendCount(void)441 static inline void lockThreadSuspendCount(void)
442 {
443 /*
444 * Don't try to change to VMWAIT here. When we change back to RUNNING
445 * we have to check for a pending suspend, which results in grabbing
446 * this lock recursively. Doesn't work with "fast" pthread mutexes.
447 *
448 * This lock is always held for very brief periods, so as long as
449 * mutex ordering is respected we shouldn't stall.
450 */
451 dvmLockMutex(&gDvm.threadSuspendCountLock);
452 }
453
454 /*
455 * Release the suspend count global lock.
456 */
unlockThreadSuspendCount(void)457 static inline void unlockThreadSuspendCount(void)
458 {
459 dvmUnlockMutex(&gDvm.threadSuspendCountLock);
460 }
461
462 /*
463 * Grab the thread list global lock.
464 *
465 * This is held while "suspend all" is trying to make everybody stop. If
466 * the shutdown is in progress, and somebody tries to grab the lock, they'll
467 * have to wait for the GC to finish. Therefore it's important that the
468 * thread not be in RUNNING mode.
469 *
470 * We don't have to check to see if we should be suspended once we have
471 * the lock. Nobody can suspend all threads without holding the thread list
472 * lock while they do it, so by definition there isn't a GC in progress.
473 *
474 * This function deliberately avoids the use of dvmChangeStatus(),
475 * which could grab threadSuspendCountLock. To avoid deadlock, threads
476 * are required to grab the thread list lock before the thread suspend
477 * count lock. (See comment in DvmGlobals.)
478 *
479 * TODO: consider checking for suspend after acquiring the lock, and
480 * backing off if set. As stated above, it can't happen during normal
481 * execution, but it *can* happen during shutdown when daemon threads
482 * are being suspended.
483 */
dvmLockThreadList(Thread * self)484 void dvmLockThreadList(Thread* self)
485 {
486 ThreadStatus oldStatus;
487
488 if (self == NULL) /* try to get it from TLS */
489 self = dvmThreadSelf();
490
491 if (self != NULL) {
492 oldStatus = self->status;
493 self->status = THREAD_VMWAIT;
494 } else {
495 /* happens during VM shutdown */
496 //LOGW("NULL self in dvmLockThreadList\n");
497 oldStatus = -1; // shut up gcc
498 }
499
500 dvmLockMutex(&gDvm.threadListLock);
501
502 if (self != NULL)
503 self->status = oldStatus;
504 }
505
506 /*
507 * Release the thread list global lock.
508 */
dvmUnlockThreadList(void)509 void dvmUnlockThreadList(void)
510 {
511 dvmUnlockMutex(&gDvm.threadListLock);
512 }
513
514 /*
515 * Convert SuspendCause to a string.
516 */
getSuspendCauseStr(SuspendCause why)517 static const char* getSuspendCauseStr(SuspendCause why)
518 {
519 switch (why) {
520 case SUSPEND_NOT: return "NOT?";
521 case SUSPEND_FOR_GC: return "gc";
522 case SUSPEND_FOR_DEBUG: return "debug";
523 case SUSPEND_FOR_DEBUG_EVENT: return "debug-event";
524 case SUSPEND_FOR_STACK_DUMP: return "stack-dump";
525 case SUSPEND_FOR_VERIFY: return "verify";
526 #if defined(WITH_JIT)
527 case SUSPEND_FOR_TBL_RESIZE: return "table-resize";
528 case SUSPEND_FOR_IC_PATCH: return "inline-cache-patch";
529 case SUSPEND_FOR_CC_RESET: return "reset-code-cache";
530 case SUSPEND_FOR_REFRESH: return "refresh jit status";
531 #endif
532 default: return "UNKNOWN";
533 }
534 }
535
536 /*
537 * Grab the "thread suspend" lock. This is required to prevent the
538 * GC and the debugger from simultaneously suspending all threads.
539 *
540 * If we fail to get the lock, somebody else is trying to suspend all
541 * threads -- including us. If we go to sleep on the lock we'll deadlock
542 * the VM. Loop until we get it or somebody puts us to sleep.
543 */
lockThreadSuspend(const char * who,SuspendCause why)544 static void lockThreadSuspend(const char* who, SuspendCause why)
545 {
546 const int kSpinSleepTime = 3*1000*1000; /* 3s */
547 u8 startWhen = 0; // init req'd to placate gcc
548 int sleepIter = 0;
549 int cc;
550
551 do {
552 cc = dvmTryLockMutex(&gDvm._threadSuspendLock);
553 if (cc != 0) {
554 Thread* self = dvmThreadSelf();
555
556 if (!dvmCheckSuspendPending(self)) {
557 /*
558 * Could be that a resume-all is in progress, and something
559 * grabbed the CPU when the wakeup was broadcast. The thread
560 * performing the resume hasn't had a chance to release the
561 * thread suspend lock. (We release before the broadcast,
562 * so this should be a narrow window.)
563 *
564 * Could be we hit the window as a suspend was started,
565 * and the lock has been grabbed but the suspend counts
566 * haven't been incremented yet.
567 *
568 * Could be an unusual JNI thread-attach thing.
569 *
570 * Could be the debugger telling us to resume at roughly
571 * the same time we're posting an event.
572 *
573 * Could be two app threads both want to patch predicted
574 * chaining cells around the same time.
575 */
576 LOGI("threadid=%d ODD: want thread-suspend lock (%s:%s),"
577 " it's held, no suspend pending\n",
578 self->threadId, who, getSuspendCauseStr(why));
579 } else {
580 /* we suspended; reset timeout */
581 sleepIter = 0;
582 }
583
584 /* give the lock-holder a chance to do some work */
585 if (sleepIter == 0)
586 startWhen = dvmGetRelativeTimeUsec();
587 if (!dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) {
588 LOGE("threadid=%d: couldn't get thread-suspend lock (%s:%s),"
589 " bailing\n",
590 self->threadId, who, getSuspendCauseStr(why));
591 /* threads are not suspended, thread dump could crash */
592 dvmDumpAllThreads(false);
593 dvmAbort();
594 }
595 }
596 } while (cc != 0);
597 assert(cc == 0);
598 }
599
600 /*
601 * Release the "thread suspend" lock.
602 */
unlockThreadSuspend(void)603 static inline void unlockThreadSuspend(void)
604 {
605 dvmUnlockMutex(&gDvm._threadSuspendLock);
606 }
607
608
609 /*
610 * Kill any daemon threads that still exist. All of ours should be
611 * stopped, so these should be Thread objects or JNI-attached threads
612 * started by the application. Actively-running threads are likely
613 * to crash the process if they continue to execute while the VM
614 * shuts down, so we really need to kill or suspend them. (If we want
615 * the VM to restart within this process, we need to kill them, but that
616 * leaves open the possibility of orphaned resources.)
617 *
618 * Waiting for the thread to suspend may be unwise at this point, but
619 * if one of these is wedged in a critical section then we probably
620 * would've locked up on the last GC attempt.
621 *
622 * It's possible for this function to get called after a failed
623 * initialization, so be careful with assumptions about the environment.
624 *
625 * This will be called from whatever thread calls DestroyJavaVM, usually
626 * but not necessarily the main thread. It's likely, but not guaranteed,
627 * that the current thread has already been cleaned up.
628 */
dvmSlayDaemons(void)629 void dvmSlayDaemons(void)
630 {
631 Thread* self = dvmThreadSelf(); // may be null
632 Thread* target;
633 int threadId = 0;
634 bool doWait = false;
635
636 dvmLockThreadList(self);
637
638 if (self != NULL)
639 threadId = self->threadId;
640
641 target = gDvm.threadList;
642 while (target != NULL) {
643 if (target == self) {
644 target = target->next;
645 continue;
646 }
647
648 if (!dvmGetFieldBoolean(target->threadObj,
649 gDvm.offJavaLangThread_daemon))
650 {
651 /* should never happen; suspend it with the rest */
652 LOGW("threadid=%d: non-daemon id=%d still running at shutdown?!\n",
653 threadId, target->threadId);
654 }
655
656 char* threadName = dvmGetThreadName(target);
657 LOGD("threadid=%d: suspending daemon id=%d name='%s'\n",
658 threadId, target->threadId, threadName);
659 free(threadName);
660
661 /* mark as suspended */
662 lockThreadSuspendCount();
663 dvmAddToThreadSuspendCount(&target->suspendCount, 1);
664 unlockThreadSuspendCount();
665 doWait = true;
666
667 target = target->next;
668 }
669
670 //dvmDumpAllThreads(false);
671
672 /*
673 * Unlock the thread list, relocking it later if necessary. It's
674 * possible a thread is in VMWAIT after calling dvmLockThreadList,
675 * and that function *doesn't* check for pending suspend after
676 * acquiring the lock. We want to let them finish their business
677 * and see the pending suspend before we continue here.
678 *
679 * There's no guarantee of mutex fairness, so this might not work.
680 * (The alternative is to have dvmLockThreadList check for suspend
681 * after acquiring the lock and back off, something we should consider.)
682 */
683 dvmUnlockThreadList();
684
685 if (doWait) {
686 bool complained = false;
687
688 usleep(200 * 1000);
689
690 dvmLockThreadList(self);
691
692 /*
693 * Sleep for a bit until the threads have suspended. We're trying
694 * to exit, so don't wait for too long.
695 */
696 int i;
697 for (i = 0; i < 10; i++) {
698 bool allSuspended = true;
699
700 target = gDvm.threadList;
701 while (target != NULL) {
702 if (target == self) {
703 target = target->next;
704 continue;
705 }
706
707 if (target->status == THREAD_RUNNING) {
708 if (!complained)
709 LOGD("threadid=%d not ready yet\n", target->threadId);
710 allSuspended = false;
711 /* keep going so we log each running daemon once */
712 }
713
714 target = target->next;
715 }
716
717 if (allSuspended) {
718 LOGD("threadid=%d: all daemons have suspended\n", threadId);
719 break;
720 } else {
721 if (!complained) {
722 complained = true;
723 LOGD("threadid=%d: waiting briefly for daemon suspension\n",
724 threadId);
725 }
726 }
727
728 usleep(200 * 1000);
729 }
730 dvmUnlockThreadList();
731 }
732
733 #if 0 /* bad things happen if they come out of JNI or "spuriously" wake up */
734 /*
735 * Abandon the threads and recover their resources.
736 */
737 target = gDvm.threadList;
738 while (target != NULL) {
739 Thread* nextTarget = target->next;
740 unlinkThread(target);
741 freeThread(target);
742 target = nextTarget;
743 }
744 #endif
745
746 //dvmDumpAllThreads(true);
747 }
748
749
750 /*
751 * Finish preparing the parts of the Thread struct required to support
752 * JNI registration.
753 */
dvmPrepMainForJni(JNIEnv * pEnv)754 bool dvmPrepMainForJni(JNIEnv* pEnv)
755 {
756 Thread* self;
757
758 /* main thread is always first in list at this point */
759 self = gDvm.threadList;
760 assert(self->threadId == kMainThreadId);
761
762 /* create a "fake" JNI frame at the top of the main thread interp stack */
763 if (!createFakeEntryFrame(self))
764 return false;
765
766 /* fill these in, since they weren't ready at dvmCreateJNIEnv time */
767 dvmSetJniEnvThreadId(pEnv, self);
768 dvmSetThreadJNIEnv(self, (JNIEnv*) pEnv);
769
770 return true;
771 }
772
773
774 /*
775 * Finish preparing the main thread, allocating some objects to represent
776 * it. As part of doing so, we finish initializing Thread and ThreadGroup.
777 * This will execute some interpreted code (e.g. class initializers).
778 */
dvmPrepMainThread(void)779 bool dvmPrepMainThread(void)
780 {
781 Thread* thread;
782 Object* groupObj;
783 Object* threadObj;
784 Object* vmThreadObj;
785 StringObject* threadNameStr;
786 Method* init;
787 JValue unused;
788
789 LOGV("+++ finishing prep on main VM thread\n");
790
791 /* main thread is always first in list at this point */
792 thread = gDvm.threadList;
793 assert(thread->threadId == kMainThreadId);
794
795 /*
796 * Make sure the classes are initialized. We have to do this before
797 * we create an instance of them.
798 */
799 if (!dvmInitClass(gDvm.classJavaLangClass)) {
800 LOGE("'Class' class failed to initialize\n");
801 return false;
802 }
803 if (!dvmInitClass(gDvm.classJavaLangThreadGroup) ||
804 !dvmInitClass(gDvm.classJavaLangThread) ||
805 !dvmInitClass(gDvm.classJavaLangVMThread))
806 {
807 LOGE("thread classes failed to initialize\n");
808 return false;
809 }
810
811 groupObj = dvmGetMainThreadGroup();
812 if (groupObj == NULL)
813 return false;
814
815 /*
816 * Allocate and construct a Thread with the internal-creation
817 * constructor.
818 */
819 threadObj = dvmAllocObject(gDvm.classJavaLangThread, ALLOC_DEFAULT);
820 if (threadObj == NULL) {
821 LOGE("unable to allocate main thread object\n");
822 return false;
823 }
824 dvmReleaseTrackedAlloc(threadObj, NULL);
825
826 threadNameStr = dvmCreateStringFromCstr("main");
827 if (threadNameStr == NULL)
828 return false;
829 dvmReleaseTrackedAlloc((Object*)threadNameStr, NULL);
830
831 init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangThread, "<init>",
832 "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
833 assert(init != NULL);
834 dvmCallMethod(thread, init, threadObj, &unused, groupObj, threadNameStr,
835 THREAD_NORM_PRIORITY, false);
836 if (dvmCheckException(thread)) {
837 LOGE("exception thrown while constructing main thread object\n");
838 return false;
839 }
840
841 /*
842 * Allocate and construct a VMThread.
843 */
844 vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_DEFAULT);
845 if (vmThreadObj == NULL) {
846 LOGE("unable to allocate main vmthread object\n");
847 return false;
848 }
849 dvmReleaseTrackedAlloc(vmThreadObj, NULL);
850
851 init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangVMThread, "<init>",
852 "(Ljava/lang/Thread;)V");
853 dvmCallMethod(thread, init, vmThreadObj, &unused, threadObj);
854 if (dvmCheckException(thread)) {
855 LOGE("exception thrown while constructing main vmthread object\n");
856 return false;
857 }
858
859 /* set the VMThread.vmData field to our Thread struct */
860 assert(gDvm.offJavaLangVMThread_vmData != 0);
861 dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)thread);
862
863 /*
864 * Stuff the VMThread back into the Thread. From this point on, other
865 * Threads will see that this Thread is running (at least, they would,
866 * if there were any).
867 */
868 dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread,
869 vmThreadObj);
870
871 thread->threadObj = threadObj;
872
873 /*
874 * Set the context class loader. This invokes a ClassLoader method,
875 * which could conceivably call Thread.currentThread(), so we want the
876 * Thread to be fully configured before we do this.
877 */
878 Object* systemLoader = dvmGetSystemClassLoader();
879 if (systemLoader == NULL) {
880 LOGW("WARNING: system class loader is NULL (setting main ctxt)\n");
881 /* keep going */
882 }
883 int ctxtClassLoaderOffset = dvmFindFieldOffset(gDvm.classJavaLangThread,
884 "contextClassLoader", "Ljava/lang/ClassLoader;");
885 if (ctxtClassLoaderOffset < 0) {
886 LOGE("Unable to find contextClassLoader field in Thread\n");
887 return false;
888 }
889 dvmSetFieldObject(threadObj, ctxtClassLoaderOffset, systemLoader);
890
891 /*
892 * Finish our thread prep.
893 */
894
895 /* include self in non-daemon threads (mainly for AttachCurrentThread) */
896 gDvm.nonDaemonThreadCount++;
897
898 return true;
899 }
900
901
902 /*
903 * Alloc and initialize a Thread struct.
904 *
905 * Does not create any objects, just stuff on the system (malloc) heap.
906 */
allocThread(int interpStackSize)907 static Thread* allocThread(int interpStackSize)
908 {
909 Thread* thread;
910 u1* stackBottom;
911
912 thread = (Thread*) calloc(1, sizeof(Thread));
913 if (thread == NULL)
914 return NULL;
915
916 #if defined(WITH_SELF_VERIFICATION)
917 if (dvmSelfVerificationShadowSpaceAlloc(thread) == NULL)
918 return NULL;
919 #endif
920
921 assert(interpStackSize >= kMinStackSize && interpStackSize <=kMaxStackSize);
922
923 thread->status = THREAD_INITIALIZING;
924 thread->suspendCount = 0;
925
926 #ifdef WITH_ALLOC_LIMITS
927 thread->allocLimit = -1;
928 #endif
929
930 /*
931 * Allocate and initialize the interpreted code stack. We essentially
932 * "lose" the alloc pointer, which points at the bottom of the stack,
933 * but we can get it back later because we know how big the stack is.
934 *
935 * The stack must be aligned on a 4-byte boundary.
936 */
937 #ifdef MALLOC_INTERP_STACK
938 stackBottom = (u1*) malloc(interpStackSize);
939 if (stackBottom == NULL) {
940 #if defined(WITH_SELF_VERIFICATION)
941 dvmSelfVerificationShadowSpaceFree(thread);
942 #endif
943 free(thread);
944 return NULL;
945 }
946 memset(stackBottom, 0xc5, interpStackSize); // stop valgrind complaints
947 #else
948 stackBottom = mmap(NULL, interpStackSize, PROT_READ | PROT_WRITE,
949 MAP_PRIVATE | MAP_ANON, -1, 0);
950 if (stackBottom == MAP_FAILED) {
951 #if defined(WITH_SELF_VERIFICATION)
952 dvmSelfVerificationShadowSpaceFree(thread);
953 #endif
954 free(thread);
955 return NULL;
956 }
957 #endif
958
959 assert(((u4)stackBottom & 0x03) == 0); // looks like our malloc ensures this
960 thread->interpStackSize = interpStackSize;
961 thread->interpStackStart = stackBottom + interpStackSize;
962 thread->interpStackEnd = stackBottom + STACK_OVERFLOW_RESERVE;
963
964 /* give the thread code a chance to set things up */
965 dvmInitInterpStack(thread, interpStackSize);
966
967 return thread;
968 }
969
970 /*
971 * Get a meaningful thread ID. At present this only has meaning under Linux,
972 * where getpid() and gettid() sometimes agree and sometimes don't depending
973 * on your thread model (try "export LD_ASSUME_KERNEL=2.4.19").
974 */
dvmGetSysThreadId(void)975 pid_t dvmGetSysThreadId(void)
976 {
977 #ifdef HAVE_GETTID
978 return gettid();
979 #else
980 return getpid();
981 #endif
982 }
983
984 /*
985 * Finish initialization of a Thread struct.
986 *
987 * This must be called while executing in the new thread, but before the
988 * thread is added to the thread list.
989 *
990 * NOTE: The threadListLock must be held by the caller (needed for
991 * assignThreadId()).
992 */
prepareThread(Thread * thread)993 static bool prepareThread(Thread* thread)
994 {
995 assignThreadId(thread);
996 thread->handle = pthread_self();
997 thread->systemTid = dvmGetSysThreadId();
998
999 //LOGI("SYSTEM TID IS %d (pid is %d)\n", (int) thread->systemTid,
1000 // (int) getpid());
1001 /*
1002 * If we were called by dvmAttachCurrentThread, the self value is
1003 * already correctly established as "thread".
1004 */
1005 setThreadSelf(thread);
1006
1007 LOGV("threadid=%d: interp stack at %p\n",
1008 thread->threadId, thread->interpStackStart - thread->interpStackSize);
1009
1010 /*
1011 * Initialize invokeReq.
1012 */
1013 dvmInitMutex(&thread->invokeReq.lock);
1014 pthread_cond_init(&thread->invokeReq.cv, NULL);
1015
1016 /*
1017 * Initialize our reference tracking tables.
1018 *
1019 * Most threads won't use jniMonitorRefTable, so we clear out the
1020 * structure but don't call the init function (which allocs storage).
1021 */
1022 #ifdef USE_INDIRECT_REF
1023 if (!dvmInitIndirectRefTable(&thread->jniLocalRefTable,
1024 kJniLocalRefMin, kJniLocalRefMax, kIndirectKindLocal))
1025 return false;
1026 #else
1027 /*
1028 * The JNI local ref table *must* be fixed-size because we keep pointers
1029 * into the table in our stack frames.
1030 */
1031 if (!dvmInitReferenceTable(&thread->jniLocalRefTable,
1032 kJniLocalRefMax, kJniLocalRefMax))
1033 return false;
1034 #endif
1035 if (!dvmInitReferenceTable(&thread->internalLocalRefTable,
1036 kInternalRefDefault, kInternalRefMax))
1037 return false;
1038
1039 memset(&thread->jniMonitorRefTable, 0, sizeof(thread->jniMonitorRefTable));
1040
1041 pthread_cond_init(&thread->waitCond, NULL);
1042 dvmInitMutex(&thread->waitMutex);
1043
1044 return true;
1045 }
1046
1047 /*
1048 * Remove a thread from the internal list.
1049 * Clear out the links to make it obvious that the thread is
1050 * no longer on the list. Caller must hold gDvm.threadListLock.
1051 */
unlinkThread(Thread * thread)1052 static void unlinkThread(Thread* thread)
1053 {
1054 LOG_THREAD("threadid=%d: removing from list\n", thread->threadId);
1055 if (thread == gDvm.threadList) {
1056 assert(thread->prev == NULL);
1057 gDvm.threadList = thread->next;
1058 } else {
1059 assert(thread->prev != NULL);
1060 thread->prev->next = thread->next;
1061 }
1062 if (thread->next != NULL)
1063 thread->next->prev = thread->prev;
1064 thread->prev = thread->next = NULL;
1065 }
1066
1067 /*
1068 * Free a Thread struct, and all the stuff allocated within.
1069 */
freeThread(Thread * thread)1070 static void freeThread(Thread* thread)
1071 {
1072 if (thread == NULL)
1073 return;
1074
1075 /* thread->threadId is zero at this point */
1076 LOGVV("threadid=%d: freeing\n", thread->threadId);
1077
1078 if (thread->interpStackStart != NULL) {
1079 u1* interpStackBottom;
1080
1081 interpStackBottom = thread->interpStackStart;
1082 interpStackBottom -= thread->interpStackSize;
1083 #ifdef MALLOC_INTERP_STACK
1084 free(interpStackBottom);
1085 #else
1086 if (munmap(interpStackBottom, thread->interpStackSize) != 0)
1087 LOGW("munmap(thread stack) failed\n");
1088 #endif
1089 }
1090
1091 #ifdef USE_INDIRECT_REF
1092 dvmClearIndirectRefTable(&thread->jniLocalRefTable);
1093 #else
1094 dvmClearReferenceTable(&thread->jniLocalRefTable);
1095 #endif
1096 dvmClearReferenceTable(&thread->internalLocalRefTable);
1097 if (&thread->jniMonitorRefTable.table != NULL)
1098 dvmClearReferenceTable(&thread->jniMonitorRefTable);
1099
1100 #if defined(WITH_SELF_VERIFICATION)
1101 dvmSelfVerificationShadowSpaceFree(thread);
1102 #endif
1103 free(thread);
1104 }
1105
1106 /*
1107 * Like pthread_self(), but on a Thread*.
1108 */
dvmThreadSelf(void)1109 Thread* dvmThreadSelf(void)
1110 {
1111 return (Thread*) pthread_getspecific(gDvm.pthreadKeySelf);
1112 }
1113
1114 /*
1115 * Explore our sense of self. Stuffs the thread pointer into TLS.
1116 */
setThreadSelf(Thread * thread)1117 static void setThreadSelf(Thread* thread)
1118 {
1119 int cc;
1120
1121 cc = pthread_setspecific(gDvm.pthreadKeySelf, thread);
1122 if (cc != 0) {
1123 /*
1124 * Sometimes this fails under Bionic with EINVAL during shutdown.
1125 * This can happen if the timing is just right, e.g. a thread
1126 * fails to attach during shutdown, but the "fail" path calls
1127 * here to ensure we clean up after ourselves.
1128 */
1129 if (thread != NULL) {
1130 LOGE("pthread_setspecific(%p) failed, err=%d\n", thread, cc);
1131 dvmAbort(); /* the world is fundamentally hosed */
1132 }
1133 }
1134 }
1135
1136 /*
1137 * This is associated with the pthreadKeySelf key. It's called by the
1138 * pthread library when a thread is exiting and the "self" pointer in TLS
1139 * is non-NULL, meaning the VM hasn't had a chance to clean up. In normal
1140 * operation this will not be called.
1141 *
1142 * This is mainly of use to ensure that we don't leak resources if, for
1143 * example, a thread attaches itself to us with AttachCurrentThread and
1144 * then exits without notifying the VM.
1145 *
1146 * We could do the detach here instead of aborting, but this will lead to
1147 * portability problems. Other implementations do not do this check and
1148 * will simply be unaware that the thread has exited, leading to resource
1149 * leaks (and, if this is a non-daemon thread, an infinite hang when the
1150 * VM tries to shut down).
1151 *
1152 * Because some implementations may want to use the pthread destructor
1153 * to initiate the detach, and the ordering of destructors is not defined,
1154 * we want to iterate a couple of times to give those a chance to run.
1155 */
threadExitCheck(void * arg)1156 static void threadExitCheck(void* arg)
1157 {
1158 const int kMaxCount = 2;
1159
1160 Thread* self = (Thread*) arg;
1161 assert(self != NULL);
1162
1163 LOGV("threadid=%d: threadExitCheck(%p) count=%d\n",
1164 self->threadId, arg, self->threadExitCheckCount);
1165
1166 if (self->status == THREAD_ZOMBIE) {
1167 LOGW("threadid=%d: Weird -- shouldn't be in threadExitCheck\n",
1168 self->threadId);
1169 return;
1170 }
1171
1172 if (self->threadExitCheckCount < kMaxCount) {
1173 /*
1174 * Spin a couple of times to let other destructors fire.
1175 */
1176 LOGD("threadid=%d: thread exiting, not yet detached (count=%d)\n",
1177 self->threadId, self->threadExitCheckCount);
1178 self->threadExitCheckCount++;
1179 int cc = pthread_setspecific(gDvm.pthreadKeySelf, self);
1180 if (cc != 0) {
1181 LOGE("threadid=%d: unable to re-add thread to TLS\n",
1182 self->threadId);
1183 dvmAbort();
1184 }
1185 } else {
1186 LOGE("threadid=%d: native thread exited without detaching\n",
1187 self->threadId);
1188 dvmAbort();
1189 }
1190 }
1191
1192
1193 /*
1194 * Assign the threadId. This needs to be a small integer so that our
1195 * "thin" locks fit in a small number of bits.
1196 *
1197 * We reserve zero for use as an invalid ID.
1198 *
1199 * This must be called with threadListLock held.
1200 */
assignThreadId(Thread * thread)1201 static void assignThreadId(Thread* thread)
1202 {
1203 /*
1204 * Find a small unique integer. threadIdMap is a vector of
1205 * kMaxThreadId bits; dvmAllocBit() returns the index of a
1206 * bit, meaning that it will always be < kMaxThreadId.
1207 */
1208 int num = dvmAllocBit(gDvm.threadIdMap);
1209 if (num < 0) {
1210 LOGE("Ran out of thread IDs\n");
1211 dvmAbort(); // TODO: make this a non-fatal error result
1212 }
1213
1214 thread->threadId = num + 1;
1215
1216 assert(thread->threadId != 0);
1217 assert(thread->threadId != DVM_LOCK_INITIAL_THIN_VALUE);
1218 }
1219
1220 /*
1221 * Give back the thread ID.
1222 */
releaseThreadId(Thread * thread)1223 static void releaseThreadId(Thread* thread)
1224 {
1225 assert(thread->threadId > 0);
1226 dvmClearBit(gDvm.threadIdMap, thread->threadId - 1);
1227 thread->threadId = 0;
1228 }
1229
1230
1231 /*
1232 * Add a stack frame that makes it look like the native code in the main
1233 * thread was originally invoked from interpreted code. This gives us a
1234 * place to hang JNI local references. The VM spec says (v2 5.2) that the
1235 * VM begins by executing "main" in a class, so in a way this brings us
1236 * closer to the spec.
1237 */
createFakeEntryFrame(Thread * thread)1238 static bool createFakeEntryFrame(Thread* thread)
1239 {
1240 assert(thread->threadId == kMainThreadId); // main thread only
1241
1242 /* find the method on first use */
1243 if (gDvm.methFakeNativeEntry == NULL) {
1244 ClassObject* nativeStart;
1245 Method* mainMeth;
1246
1247 nativeStart = dvmFindSystemClassNoInit(
1248 "Ldalvik/system/NativeStart;");
1249 if (nativeStart == NULL) {
1250 LOGE("Unable to find dalvik.system.NativeStart class\n");
1251 return false;
1252 }
1253
1254 /*
1255 * Because we are creating a frame that represents application code, we
1256 * want to stuff the application class loader into the method's class
1257 * loader field, even though we're using the system class loader to
1258 * load it. This makes life easier over in JNI FindClass (though it
1259 * could bite us in other ways).
1260 *
1261 * Unfortunately this is occurring too early in the initialization,
1262 * of necessity coming before JNI is initialized, and we're not quite
1263 * ready to set up the application class loader.
1264 *
1265 * So we save a pointer to the method in gDvm.methFakeNativeEntry
1266 * and check it in FindClass. The method is private so nobody else
1267 * can call it.
1268 */
1269 //nativeStart->classLoader = dvmGetSystemClassLoader();
1270
1271 mainMeth = dvmFindDirectMethodByDescriptor(nativeStart,
1272 "main", "([Ljava/lang/String;)V");
1273 if (mainMeth == NULL) {
1274 LOGE("Unable to find 'main' in dalvik.system.NativeStart\n");
1275 return false;
1276 }
1277
1278 gDvm.methFakeNativeEntry = mainMeth;
1279 }
1280
1281 if (!dvmPushJNIFrame(thread, gDvm.methFakeNativeEntry))
1282 return false;
1283
1284 /*
1285 * Null out the "String[] args" argument.
1286 */
1287 assert(gDvm.methFakeNativeEntry->registersSize == 1);
1288 u4* framePtr = (u4*) thread->curFrame;
1289 framePtr[0] = 0;
1290
1291 return true;
1292 }
1293
1294
1295 /*
1296 * Add a stack frame that makes it look like the native thread has been
1297 * executing interpreted code. This gives us a place to hang JNI local
1298 * references.
1299 */
createFakeRunFrame(Thread * thread)1300 static bool createFakeRunFrame(Thread* thread)
1301 {
1302 ClassObject* nativeStart;
1303 Method* runMeth;
1304
1305 /*
1306 * TODO: cache this result so we don't have to dig for it every time
1307 * somebody attaches a thread to the VM. Also consider changing this
1308 * to a static method so we don't have a null "this" pointer in the
1309 * "ins" on the stack. (Does it really need to look like a Runnable?)
1310 */
1311 nativeStart = dvmFindSystemClassNoInit("Ldalvik/system/NativeStart;");
1312 if (nativeStart == NULL) {
1313 LOGE("Unable to find dalvik.system.NativeStart class\n");
1314 return false;
1315 }
1316
1317 runMeth = dvmFindVirtualMethodByDescriptor(nativeStart, "run", "()V");
1318 if (runMeth == NULL) {
1319 LOGE("Unable to find 'run' in dalvik.system.NativeStart\n");
1320 return false;
1321 }
1322
1323 if (!dvmPushJNIFrame(thread, runMeth))
1324 return false;
1325
1326 /*
1327 * Provide a NULL 'this' argument. The method we've put at the top of
1328 * the stack looks like a virtual call to run() in a Runnable class.
1329 * (If we declared the method static, it wouldn't take any arguments
1330 * and we wouldn't have to do this.)
1331 */
1332 assert(runMeth->registersSize == 1);
1333 u4* framePtr = (u4*) thread->curFrame;
1334 framePtr[0] = 0;
1335
1336 return true;
1337 }
1338
1339 /*
1340 * Helper function to set the name of the current thread
1341 */
setThreadName(const char * threadName)1342 static void setThreadName(const char *threadName)
1343 {
1344 int hasAt = 0;
1345 int hasDot = 0;
1346 const char *s = threadName;
1347 while (*s) {
1348 if (*s == '.') hasDot = 1;
1349 else if (*s == '@') hasAt = 1;
1350 s++;
1351 }
1352 int len = s - threadName;
1353 if (len < 15 || hasAt || !hasDot) {
1354 s = threadName;
1355 } else {
1356 s = threadName + len - 15;
1357 }
1358 #if defined(HAVE_ANDROID_PTHREAD_SETNAME_NP)
1359 /* pthread_setname_np fails rather than truncating long strings */
1360 char buf[16]; // MAX_TASK_COMM_LEN=16 is hard-coded into bionic
1361 strncpy(buf, s, sizeof(buf)-1);
1362 buf[sizeof(buf)-1] = '\0';
1363 int err = pthread_setname_np(pthread_self(), buf);
1364 if (err != 0) {
1365 LOGW("Unable to set the name of current thread to '%s': %s\n",
1366 buf, strerror(err));
1367 }
1368 #elif defined(HAVE_PRCTL)
1369 prctl(PR_SET_NAME, (unsigned long) s, 0, 0, 0);
1370 #else
1371 LOGD("No way to set current thread's name (%s)\n", s);
1372 #endif
1373 }
1374
1375 /*
1376 * Create a thread as a result of java.lang.Thread.start().
1377 *
1378 * We do have to worry about some concurrency problems, e.g. programs
1379 * that try to call Thread.start() on the same object from multiple threads.
1380 * (This will fail for all but one, but we have to make sure that it succeeds
1381 * for exactly one.)
1382 *
1383 * Some of the complexity here arises from our desire to mimic the
1384 * Thread vs. VMThread class decomposition we inherited. We've been given
1385 * a Thread, and now we need to create a VMThread and then populate both
1386 * objects. We also need to create one of our internal Thread objects.
1387 *
1388 * Pass in a stack size of 0 to get the default.
1389 *
1390 * The "threadObj" reference must be pinned by the caller to prevent the GC
1391 * from moving it around (e.g. added to the tracked allocation list).
1392 */
dvmCreateInterpThread(Object * threadObj,int reqStackSize)1393 bool dvmCreateInterpThread(Object* threadObj, int reqStackSize)
1394 {
1395 pthread_attr_t threadAttr;
1396 pthread_t threadHandle;
1397 Thread* self;
1398 Thread* newThread = NULL;
1399 Object* vmThreadObj = NULL;
1400 int stackSize;
1401
1402 assert(threadObj != NULL);
1403
1404 if(gDvm.zygote) {
1405 // Allow the sampling profiler thread. We shut it down before forking.
1406 StringObject* nameStr = (StringObject*) dvmGetFieldObject(threadObj,
1407 gDvm.offJavaLangThread_name);
1408 char* threadName = dvmCreateCstrFromString(nameStr);
1409 bool profilerThread = strcmp(threadName, "SamplingProfiler") == 0;
1410 free(threadName);
1411 if (!profilerThread) {
1412 dvmThrowException("Ljava/lang/IllegalStateException;",
1413 "No new threads in -Xzygote mode");
1414
1415 goto fail;
1416 }
1417 }
1418
1419 self = dvmThreadSelf();
1420 if (reqStackSize == 0)
1421 stackSize = gDvm.stackSize;
1422 else if (reqStackSize < kMinStackSize)
1423 stackSize = kMinStackSize;
1424 else if (reqStackSize > kMaxStackSize)
1425 stackSize = kMaxStackSize;
1426 else
1427 stackSize = reqStackSize;
1428
1429 pthread_attr_init(&threadAttr);
1430 pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_DETACHED);
1431
1432 /*
1433 * To minimize the time spent in the critical section, we allocate the
1434 * vmThread object here.
1435 */
1436 vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_DEFAULT);
1437 if (vmThreadObj == NULL)
1438 goto fail;
1439
1440 newThread = allocThread(stackSize);
1441 if (newThread == NULL)
1442 goto fail;
1443 newThread->threadObj = threadObj;
1444
1445 assert(newThread->status == THREAD_INITIALIZING);
1446
1447 /*
1448 * We need to lock out other threads while we test and set the
1449 * "vmThread" field in java.lang.Thread, because we use that to determine
1450 * if this thread has been started before. We use the thread list lock
1451 * because it's handy and we're going to need to grab it again soon
1452 * anyway.
1453 */
1454 dvmLockThreadList(self);
1455
1456 if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
1457 dvmUnlockThreadList();
1458 dvmThrowException("Ljava/lang/IllegalThreadStateException;",
1459 "thread has already been started");
1460 goto fail;
1461 }
1462
1463 /*
1464 * There are actually three data structures: Thread (object), VMThread
1465 * (object), and Thread (C struct). All of them point to at least one
1466 * other.
1467 *
1468 * As soon as "VMThread.vmData" is assigned, other threads can start
1469 * making calls into us (e.g. setPriority).
1470 */
1471 dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)newThread);
1472 dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, vmThreadObj);
1473
1474 /*
1475 * Thread creation might take a while, so release the lock.
1476 */
1477 dvmUnlockThreadList();
1478
1479 ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
1480 int cc = pthread_create(&threadHandle, &threadAttr, interpThreadStart,
1481 newThread);
1482 oldStatus = dvmChangeStatus(self, oldStatus);
1483
1484 if (cc != 0) {
1485 /*
1486 * Failure generally indicates that we have exceeded system
1487 * resource limits. VirtualMachineError is probably too severe,
1488 * so use OutOfMemoryError.
1489 */
1490 LOGE("Thread creation failed (err=%s)\n", strerror(errno));
1491
1492 dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, NULL);
1493
1494 dvmThrowException("Ljava/lang/OutOfMemoryError;",
1495 "thread creation failed");
1496 goto fail;
1497 }
1498
1499 /*
1500 * We need to wait for the thread to start. Otherwise, depending on
1501 * the whims of the OS scheduler, we could return and the code in our
1502 * thread could try to do operations on the new thread before it had
1503 * finished starting.
1504 *
1505 * The new thread will lock the thread list, change its state to
1506 * THREAD_STARTING, broadcast to gDvm.threadStartCond, and then sleep
1507 * on gDvm.threadStartCond (which uses the thread list lock). This
1508 * thread (the parent) will either see that the thread is already ready
1509 * after we grab the thread list lock, or will be awakened from the
1510 * condition variable on the broadcast.
1511 *
1512 * We don't want to stall the rest of the VM while the new thread
1513 * starts, which can happen if the GC wakes up at the wrong moment.
1514 * So, we change our own status to VMWAIT, and self-suspend if
1515 * necessary after we finish adding the new thread.
1516 *
1517 *
1518 * We have to deal with an odd race with the GC/debugger suspension
1519 * mechanism when creating a new thread. The information about whether
1520 * or not a thread should be suspended is contained entirely within
1521 * the Thread struct; this is usually cleaner to deal with than having
1522 * one or more globally-visible suspension flags. The trouble is that
1523 * we could create the thread while the VM is trying to suspend all
1524 * threads. The suspend-count won't be nonzero for the new thread,
1525 * so dvmChangeStatus(THREAD_RUNNING) won't cause a suspension.
1526 *
1527 * The easiest way to deal with this is to prevent the new thread from
1528 * running until the parent says it's okay. This results in the
1529 * following (correct) sequence of events for a "badly timed" GC
1530 * (where '-' is us, 'o' is the child, and '+' is some other thread):
1531 *
1532 * - call pthread_create()
1533 * - lock thread list
1534 * - put self into THREAD_VMWAIT so GC doesn't wait for us
1535 * - sleep on condition var (mutex = thread list lock) until child starts
1536 * + GC triggered by another thread
1537 * + thread list locked; suspend counts updated; thread list unlocked
1538 * + loop waiting for all runnable threads to suspend
1539 * + success, start GC
1540 * o child thread wakes, signals condition var to wake parent
1541 * o child waits for parent ack on condition variable
1542 * - we wake up, locking thread list
1543 * - add child to thread list
1544 * - unlock thread list
1545 * - change our state back to THREAD_RUNNING; GC causes us to suspend
1546 * + GC finishes; all threads in thread list are resumed
1547 * - lock thread list
1548 * - set child to THREAD_VMWAIT, and signal it to start
1549 * - unlock thread list
1550 * o child resumes
1551 * o child changes state to THREAD_RUNNING
1552 *
1553 * The above shows the GC starting up during thread creation, but if
1554 * it starts anywhere after VMThread.create() is called it will
1555 * produce the same series of events.
1556 *
1557 * Once the child is in the thread list, it will be suspended and
1558 * resumed like any other thread. In the above scenario the resume-all
1559 * code will try to resume the new thread, which was never actually
1560 * suspended, and try to decrement the child's thread suspend count to -1.
1561 * We can catch this in the resume-all code.
1562 *
1563 * Bouncing back and forth between threads like this adds a small amount
1564 * of scheduler overhead to thread startup.
1565 *
1566 * One alternative to having the child wait for the parent would be
1567 * to have the child inherit the parents' suspension count. This
1568 * would work for a GC, since we can safely assume that the parent
1569 * thread didn't cause it, but we must only do so if the parent suspension
1570 * was caused by a suspend-all. If the parent was being asked to
1571 * suspend singly by the debugger, the child should not inherit the value.
1572 *
1573 * We could also have a global "new thread suspend count" that gets
1574 * picked up by new threads before changing state to THREAD_RUNNING.
1575 * This would be protected by the thread list lock and set by a
1576 * suspend-all.
1577 */
1578 dvmLockThreadList(self);
1579 assert(self->status == THREAD_RUNNING);
1580 self->status = THREAD_VMWAIT;
1581 while (newThread->status != THREAD_STARTING)
1582 pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
1583
1584 LOG_THREAD("threadid=%d: adding to list\n", newThread->threadId);
1585 newThread->next = gDvm.threadList->next;
1586 if (newThread->next != NULL)
1587 newThread->next->prev = newThread;
1588 newThread->prev = gDvm.threadList;
1589 gDvm.threadList->next = newThread;
1590
1591 if (!dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon))
1592 gDvm.nonDaemonThreadCount++; // guarded by thread list lock
1593
1594 dvmUnlockThreadList();
1595
1596 /* change status back to RUNNING, self-suspending if necessary */
1597 dvmChangeStatus(self, THREAD_RUNNING);
1598
1599 /*
1600 * Tell the new thread to start.
1601 *
1602 * We must hold the thread list lock before messing with another thread.
1603 * In the general case we would also need to verify that newThread was
1604 * still in the thread list, but in our case the thread has not started
1605 * executing user code and therefore has not had a chance to exit.
1606 *
1607 * We move it to VMWAIT, and it then shifts itself to RUNNING, which
1608 * comes with a suspend-pending check.
1609 */
1610 dvmLockThreadList(self);
1611
1612 assert(newThread->status == THREAD_STARTING);
1613 newThread->status = THREAD_VMWAIT;
1614 pthread_cond_broadcast(&gDvm.threadStartCond);
1615
1616 dvmUnlockThreadList();
1617
1618 dvmReleaseTrackedAlloc(vmThreadObj, NULL);
1619 return true;
1620
1621 fail:
1622 freeThread(newThread);
1623 dvmReleaseTrackedAlloc(vmThreadObj, NULL);
1624 return false;
1625 }
1626
1627 /*
1628 * pthread entry function for threads started from interpreted code.
1629 */
interpThreadStart(void * arg)1630 static void* interpThreadStart(void* arg)
1631 {
1632 Thread* self = (Thread*) arg;
1633
1634 char *threadName = dvmGetThreadName(self);
1635 setThreadName(threadName);
1636 free(threadName);
1637
1638 /*
1639 * Finish initializing the Thread struct.
1640 */
1641 dvmLockThreadList(self);
1642 prepareThread(self);
1643
1644 LOG_THREAD("threadid=%d: created from interp\n", self->threadId);
1645
1646 /*
1647 * Change our status and wake our parent, who will add us to the
1648 * thread list and advance our state to VMWAIT.
1649 */
1650 self->status = THREAD_STARTING;
1651 pthread_cond_broadcast(&gDvm.threadStartCond);
1652
1653 /*
1654 * Wait until the parent says we can go. Assuming there wasn't a
1655 * suspend pending, this will happen immediately. When it completes,
1656 * we're full-fledged citizens of the VM.
1657 *
1658 * We have to use THREAD_VMWAIT here rather than THREAD_RUNNING
1659 * because the pthread_cond_wait below needs to reacquire a lock that
1660 * suspend-all is also interested in. If we get unlucky, the parent could
1661 * change us to THREAD_RUNNING, then a GC could start before we get
1662 * signaled, and suspend-all will grab the thread list lock and then
1663 * wait for us to suspend. We'll be in the tail end of pthread_cond_wait
1664 * trying to get the lock.
1665 */
1666 while (self->status != THREAD_VMWAIT)
1667 pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
1668
1669 dvmUnlockThreadList();
1670
1671 /*
1672 * Add a JNI context.
1673 */
1674 self->jniEnv = dvmCreateJNIEnv(self);
1675
1676 /*
1677 * Change our state so the GC will wait for us from now on. If a GC is
1678 * in progress this call will suspend us.
1679 */
1680 dvmChangeStatus(self, THREAD_RUNNING);
1681
1682 /*
1683 * Notify the debugger & DDM. The debugger notification may cause
1684 * us to suspend ourselves (and others).
1685 */
1686 if (gDvm.debuggerConnected)
1687 dvmDbgPostThreadStart(self);
1688
1689 /*
1690 * Set the system thread priority according to the Thread object's
1691 * priority level. We don't usually need to do this, because both the
1692 * Thread object and system thread priorities inherit from parents. The
1693 * tricky case is when somebody creates a Thread object, calls
1694 * setPriority(), and then starts the thread. We could manage this with
1695 * a "needs priority update" flag to avoid the redundant call.
1696 */
1697 int priority = dvmGetFieldInt(self->threadObj,
1698 gDvm.offJavaLangThread_priority);
1699 dvmChangeThreadPriority(self, priority);
1700
1701 /*
1702 * Execute the "run" method.
1703 *
1704 * At this point our stack is empty, so somebody who comes looking for
1705 * stack traces right now won't have much to look at. This is normal.
1706 */
1707 Method* run = self->threadObj->clazz->vtable[gDvm.voffJavaLangThread_run];
1708 JValue unused;
1709
1710 LOGV("threadid=%d: calling run()\n", self->threadId);
1711 assert(strcmp(run->name, "run") == 0);
1712 dvmCallMethod(self, run, self->threadObj, &unused);
1713 LOGV("threadid=%d: exiting\n", self->threadId);
1714
1715 /*
1716 * Remove the thread from various lists, report its death, and free
1717 * its resources.
1718 */
1719 dvmDetachCurrentThread();
1720
1721 return NULL;
1722 }
1723
1724 /*
1725 * The current thread is exiting with an uncaught exception. The
1726 * Java programming language allows the application to provide a
1727 * thread-exit-uncaught-exception handler for the VM, for a specific
1728 * Thread, and for all threads in a ThreadGroup.
1729 *
1730 * Version 1.5 added the per-thread handler. We need to call
1731 * "uncaughtException" in the handler object, which is either the
1732 * ThreadGroup object or the Thread-specific handler.
1733 */
threadExitUncaughtException(Thread * self,Object * group)1734 static void threadExitUncaughtException(Thread* self, Object* group)
1735 {
1736 Object* exception;
1737 Object* handlerObj;
1738 Method* uncaughtHandler = NULL;
1739 InstField* threadHandler;
1740
1741 LOGW("threadid=%d: thread exiting with uncaught exception (group=%p)\n",
1742 self->threadId, group);
1743 assert(group != NULL);
1744
1745 /*
1746 * Get a pointer to the exception, then clear out the one in the
1747 * thread. We don't want to have it set when executing interpreted code.
1748 */
1749 exception = dvmGetException(self);
1750 dvmAddTrackedAlloc(exception, self);
1751 dvmClearException(self);
1752
1753 /*
1754 * Get the Thread's "uncaughtHandler" object. Use it if non-NULL;
1755 * else use "group" (which is an instance of UncaughtExceptionHandler).
1756 */
1757 threadHandler = dvmFindInstanceField(gDvm.classJavaLangThread,
1758 "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
1759 if (threadHandler == NULL) {
1760 LOGW("WARNING: no 'uncaughtHandler' field in java/lang/Thread\n");
1761 goto bail;
1762 }
1763 handlerObj = dvmGetFieldObject(self->threadObj, threadHandler->byteOffset);
1764 if (handlerObj == NULL)
1765 handlerObj = group;
1766
1767 /*
1768 * Find the "uncaughtHandler" field in this object.
1769 */
1770 uncaughtHandler = dvmFindVirtualMethodHierByDescriptor(handlerObj->clazz,
1771 "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
1772
1773 if (uncaughtHandler != NULL) {
1774 //LOGI("+++ calling %s.uncaughtException\n",
1775 // handlerObj->clazz->descriptor);
1776 JValue unused;
1777 dvmCallMethod(self, uncaughtHandler, handlerObj, &unused,
1778 self->threadObj, exception);
1779 } else {
1780 /* restore it and dump a stack trace */
1781 LOGW("WARNING: no 'uncaughtException' method in class %s\n",
1782 handlerObj->clazz->descriptor);
1783 dvmSetException(self, exception);
1784 dvmLogExceptionStackTrace();
1785 }
1786
1787 bail:
1788 #if defined(WITH_JIT)
1789 /* Remove this thread's suspendCount from global suspendCount sum */
1790 lockThreadSuspendCount();
1791 dvmAddToThreadSuspendCount(&self->suspendCount, -self->suspendCount);
1792 unlockThreadSuspendCount();
1793 #endif
1794 dvmReleaseTrackedAlloc(exception, self);
1795 }
1796
1797
1798 /*
1799 * Create an internal VM thread, for things like JDWP and finalizers.
1800 *
1801 * The easiest way to do this is create a new thread and then use the
1802 * JNI AttachCurrentThread implementation.
1803 *
1804 * This does not return until after the new thread has begun executing.
1805 */
dvmCreateInternalThread(pthread_t * pHandle,const char * name,InternalThreadStart func,void * funcArg)1806 bool dvmCreateInternalThread(pthread_t* pHandle, const char* name,
1807 InternalThreadStart func, void* funcArg)
1808 {
1809 InternalStartArgs* pArgs;
1810 Object* systemGroup;
1811 pthread_attr_t threadAttr;
1812 volatile Thread* newThread = NULL;
1813 volatile int createStatus = 0;
1814
1815 systemGroup = dvmGetSystemThreadGroup();
1816 if (systemGroup == NULL)
1817 return false;
1818
1819 pArgs = (InternalStartArgs*) malloc(sizeof(*pArgs));
1820 pArgs->func = func;
1821 pArgs->funcArg = funcArg;
1822 pArgs->name = strdup(name); // storage will be owned by new thread
1823 pArgs->group = systemGroup;
1824 pArgs->isDaemon = true;
1825 pArgs->pThread = &newThread;
1826 pArgs->pCreateStatus = &createStatus;
1827
1828 pthread_attr_init(&threadAttr);
1829 //pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_DETACHED);
1830
1831 if (pthread_create(pHandle, &threadAttr, internalThreadStart,
1832 pArgs) != 0)
1833 {
1834 LOGE("internal thread creation failed\n");
1835 free(pArgs->name);
1836 free(pArgs);
1837 return false;
1838 }
1839
1840 /*
1841 * Wait for the child to start. This gives us an opportunity to make
1842 * sure that the thread started correctly, and allows our caller to
1843 * assume that the thread has started running.
1844 *
1845 * Because we aren't holding a lock across the thread creation, it's
1846 * possible that the child will already have completed its
1847 * initialization. Because the child only adjusts "createStatus" while
1848 * holding the thread list lock, the initial condition on the "while"
1849 * loop will correctly avoid the wait if this occurs.
1850 *
1851 * It's also possible that we'll have to wait for the thread to finish
1852 * being created, and as part of allocating a Thread object it might
1853 * need to initiate a GC. We switch to VMWAIT while we pause.
1854 */
1855 Thread* self = dvmThreadSelf();
1856 ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
1857 dvmLockThreadList(self);
1858 while (createStatus == 0)
1859 pthread_cond_wait(&gDvm.threadStartCond, &gDvm.threadListLock);
1860
1861 if (newThread == NULL) {
1862 LOGW("internal thread create failed (createStatus=%d)\n", createStatus);
1863 assert(createStatus < 0);
1864 /* don't free pArgs -- if pthread_create succeeded, child owns it */
1865 dvmUnlockThreadList();
1866 dvmChangeStatus(self, oldStatus);
1867 return false;
1868 }
1869
1870 /* thread could be in any state now (except early init states) */
1871 //assert(newThread->status == THREAD_RUNNING);
1872
1873 dvmUnlockThreadList();
1874 dvmChangeStatus(self, oldStatus);
1875
1876 return true;
1877 }
1878
1879 /*
1880 * pthread entry function for internally-created threads.
1881 *
1882 * We are expected to free "arg" and its contents. If we're a daemon
1883 * thread, and we get cancelled abruptly when the VM shuts down, the
1884 * storage won't be freed. If this becomes a concern we can make a copy
1885 * on the stack.
1886 */
internalThreadStart(void * arg)1887 static void* internalThreadStart(void* arg)
1888 {
1889 InternalStartArgs* pArgs = (InternalStartArgs*) arg;
1890 JavaVMAttachArgs jniArgs;
1891
1892 jniArgs.version = JNI_VERSION_1_2;
1893 jniArgs.name = pArgs->name;
1894 jniArgs.group = pArgs->group;
1895
1896 setThreadName(pArgs->name);
1897
1898 /* use local jniArgs as stack top */
1899 if (dvmAttachCurrentThread(&jniArgs, pArgs->isDaemon)) {
1900 /*
1901 * Tell the parent of our success.
1902 *
1903 * threadListLock is the mutex for threadStartCond.
1904 */
1905 dvmLockThreadList(dvmThreadSelf());
1906 *pArgs->pCreateStatus = 1;
1907 *pArgs->pThread = dvmThreadSelf();
1908 pthread_cond_broadcast(&gDvm.threadStartCond);
1909 dvmUnlockThreadList();
1910
1911 LOG_THREAD("threadid=%d: internal '%s'\n",
1912 dvmThreadSelf()->threadId, pArgs->name);
1913
1914 /* execute */
1915 (*pArgs->func)(pArgs->funcArg);
1916
1917 /* detach ourselves */
1918 dvmDetachCurrentThread();
1919 } else {
1920 /*
1921 * Tell the parent of our failure. We don't have a Thread struct,
1922 * so we can't be suspended, so we don't need to enter a critical
1923 * section.
1924 */
1925 dvmLockThreadList(dvmThreadSelf());
1926 *pArgs->pCreateStatus = -1;
1927 assert(*pArgs->pThread == NULL);
1928 pthread_cond_broadcast(&gDvm.threadStartCond);
1929 dvmUnlockThreadList();
1930
1931 assert(*pArgs->pThread == NULL);
1932 }
1933
1934 free(pArgs->name);
1935 free(pArgs);
1936 return NULL;
1937 }
1938
1939 /*
1940 * Attach the current thread to the VM.
1941 *
1942 * Used for internally-created threads and JNI's AttachCurrentThread.
1943 */
dvmAttachCurrentThread(const JavaVMAttachArgs * pArgs,bool isDaemon)1944 bool dvmAttachCurrentThread(const JavaVMAttachArgs* pArgs, bool isDaemon)
1945 {
1946 Thread* self = NULL;
1947 Object* threadObj = NULL;
1948 Object* vmThreadObj = NULL;
1949 StringObject* threadNameStr = NULL;
1950 Method* init;
1951 bool ok, ret;
1952
1953 /* allocate thread struct, and establish a basic sense of self */
1954 self = allocThread(gDvm.stackSize);
1955 if (self == NULL)
1956 goto fail;
1957 setThreadSelf(self);
1958
1959 /*
1960 * Finish our thread prep. We need to do this before adding ourselves
1961 * to the thread list or invoking any interpreted code. prepareThread()
1962 * requires that we hold the thread list lock.
1963 */
1964 dvmLockThreadList(self);
1965 ok = prepareThread(self);
1966 dvmUnlockThreadList();
1967 if (!ok)
1968 goto fail;
1969
1970 self->jniEnv = dvmCreateJNIEnv(self);
1971 if (self->jniEnv == NULL)
1972 goto fail;
1973
1974 /*
1975 * Create a "fake" JNI frame at the top of the main thread interp stack.
1976 * It isn't really necessary for the internal threads, but it gives
1977 * the debugger something to show. It is essential for the JNI-attached
1978 * threads.
1979 */
1980 if (!createFakeRunFrame(self))
1981 goto fail;
1982
1983 /*
1984 * The native side of the thread is ready; add it to the list. Once
1985 * it's on the list the thread is visible to the JDWP code and the GC.
1986 */
1987 LOG_THREAD("threadid=%d: adding to list (attached)\n", self->threadId);
1988
1989 dvmLockThreadList(self);
1990
1991 self->next = gDvm.threadList->next;
1992 if (self->next != NULL)
1993 self->next->prev = self;
1994 self->prev = gDvm.threadList;
1995 gDvm.threadList->next = self;
1996 if (!isDaemon)
1997 gDvm.nonDaemonThreadCount++;
1998
1999 dvmUnlockThreadList();
2000
2001 /*
2002 * Switch state from initializing to running.
2003 *
2004 * It's possible that a GC began right before we added ourselves
2005 * to the thread list, and is still going. That means our thread
2006 * suspend count won't reflect the fact that we should be suspended.
2007 * To deal with this, we transition to VMWAIT, pulse the heap lock,
2008 * and then advance to RUNNING. That will ensure that we stall until
2009 * the GC completes.
2010 *
2011 * Once we're in RUNNING, we're like any other thread in the VM (except
2012 * for the lack of an initialized threadObj). We're then free to
2013 * allocate and initialize objects.
2014 */
2015 assert(self->status == THREAD_INITIALIZING);
2016 dvmChangeStatus(self, THREAD_VMWAIT);
2017 dvmLockMutex(&gDvm.gcHeapLock);
2018 dvmUnlockMutex(&gDvm.gcHeapLock);
2019 dvmChangeStatus(self, THREAD_RUNNING);
2020
2021 /*
2022 * Create Thread and VMThread objects.
2023 */
2024 threadObj = dvmAllocObject(gDvm.classJavaLangThread, ALLOC_DEFAULT);
2025 vmThreadObj = dvmAllocObject(gDvm.classJavaLangVMThread, ALLOC_DEFAULT);
2026 if (threadObj == NULL || vmThreadObj == NULL)
2027 goto fail_unlink;
2028
2029 /*
2030 * This makes threadObj visible to the GC. We still have it in the
2031 * tracked allocation table, so it can't move around on us.
2032 */
2033 self->threadObj = threadObj;
2034 dvmSetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData, (u4)self);
2035
2036 /*
2037 * Create a string for the thread name.
2038 */
2039 if (pArgs->name != NULL) {
2040 threadNameStr = dvmCreateStringFromCstr(pArgs->name);
2041 if (threadNameStr == NULL) {
2042 assert(dvmCheckException(dvmThreadSelf()));
2043 goto fail_unlink;
2044 }
2045 }
2046
2047 init = dvmFindDirectMethodByDescriptor(gDvm.classJavaLangThread, "<init>",
2048 "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
2049 if (init == NULL) {
2050 assert(dvmCheckException(self));
2051 goto fail_unlink;
2052 }
2053
2054 /*
2055 * Now we're ready to run some interpreted code.
2056 *
2057 * We need to construct the Thread object and set the VMThread field.
2058 * Setting VMThread tells interpreted code that we're alive.
2059 *
2060 * Call the (group, name, priority, daemon) constructor on the Thread.
2061 * This sets the thread's name and adds it to the specified group, and
2062 * provides values for priority and daemon (which are normally inherited
2063 * from the current thread).
2064 */
2065 JValue unused;
2066 dvmCallMethod(self, init, threadObj, &unused, (Object*)pArgs->group,
2067 threadNameStr, getThreadPriorityFromSystem(), isDaemon);
2068 if (dvmCheckException(self)) {
2069 LOGE("exception thrown while constructing attached thread object\n");
2070 goto fail_unlink;
2071 }
2072
2073 /*
2074 * Set the VMThread field, which tells interpreted code that we're alive.
2075 *
2076 * The risk of a thread start collision here is very low; somebody
2077 * would have to be deliberately polling the ThreadGroup list and
2078 * trying to start threads against anything it sees, which would
2079 * generally cause problems for all thread creation. However, for
2080 * correctness we test "vmThread" before setting it.
2081 *
2082 * TODO: this still has a race, it's just smaller. Not sure this is
2083 * worth putting effort into fixing. Need to hold a lock while
2084 * fiddling with the field, or maybe initialize the Thread object in a
2085 * way that ensures another thread can't call start() on it.
2086 */
2087 if (dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread) != NULL) {
2088 LOGW("WOW: thread start hijack\n");
2089 dvmThrowException("Ljava/lang/IllegalThreadStateException;",
2090 "thread has already been started");
2091 /* We don't want to free anything associated with the thread
2092 * because someone is obviously interested in it. Just let
2093 * it go and hope it will clean itself up when its finished.
2094 * This case should never happen anyway.
2095 *
2096 * Since we're letting it live, we need to finish setting it up.
2097 * We just have to let the caller know that the intended operation
2098 * has failed.
2099 *
2100 * [ This seems strange -- stepping on the vmThread object that's
2101 * already present seems like a bad idea. TODO: figure this out. ]
2102 */
2103 ret = false;
2104 } else {
2105 ret = true;
2106 }
2107 dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, vmThreadObj);
2108
2109 /* we can now safely un-pin these */
2110 dvmReleaseTrackedAlloc(threadObj, self);
2111 dvmReleaseTrackedAlloc(vmThreadObj, self);
2112 dvmReleaseTrackedAlloc((Object*)threadNameStr, self);
2113
2114 LOG_THREAD("threadid=%d: attached from native, name=%s\n",
2115 self->threadId, pArgs->name);
2116
2117 /* tell the debugger & DDM */
2118 if (gDvm.debuggerConnected)
2119 dvmDbgPostThreadStart(self);
2120
2121 return ret;
2122
2123 fail_unlink:
2124 dvmLockThreadList(self);
2125 unlinkThread(self);
2126 if (!isDaemon)
2127 gDvm.nonDaemonThreadCount--;
2128 dvmUnlockThreadList();
2129 /* fall through to "fail" */
2130 fail:
2131 dvmReleaseTrackedAlloc(threadObj, self);
2132 dvmReleaseTrackedAlloc(vmThreadObj, self);
2133 dvmReleaseTrackedAlloc((Object*)threadNameStr, self);
2134 if (self != NULL) {
2135 if (self->jniEnv != NULL) {
2136 dvmDestroyJNIEnv(self->jniEnv);
2137 self->jniEnv = NULL;
2138 }
2139 freeThread(self);
2140 }
2141 setThreadSelf(NULL);
2142 return false;
2143 }
2144
2145 /*
2146 * Detach the thread from the various data structures, notify other threads
2147 * that are waiting to "join" it, and free up all heap-allocated storage.
2148 *
2149 * Used for all threads.
2150 *
2151 * When we get here the interpreted stack should be empty. The JNI 1.6 spec
2152 * requires us to enforce this for the DetachCurrentThread call, probably
2153 * because it also says that DetachCurrentThread causes all monitors
2154 * associated with the thread to be released. (Because the stack is empty,
2155 * we only have to worry about explicit JNI calls to MonitorEnter.)
2156 *
2157 * THOUGHT:
2158 * We might want to avoid freeing our internal Thread structure until the
2159 * associated Thread/VMThread objects get GCed. Our Thread is impossible to
2160 * get to once the thread shuts down, but there is a small possibility of
2161 * an operation starting in another thread before this thread halts, and
2162 * finishing much later (perhaps the thread got stalled by a weird OS bug).
2163 * We don't want something like Thread.isInterrupted() crawling through
2164 * freed storage. Can do with a Thread finalizer, or by creating a
2165 * dedicated ThreadObject class for java/lang/Thread and moving all of our
2166 * state into that.
2167 */
dvmDetachCurrentThread(void)2168 void dvmDetachCurrentThread(void)
2169 {
2170 Thread* self = dvmThreadSelf();
2171 Object* vmThread;
2172 Object* group;
2173
2174 /*
2175 * Make sure we're not detaching a thread that's still running. (This
2176 * could happen with an explicit JNI detach call.)
2177 *
2178 * A thread created by interpreted code will finish with a depth of
2179 * zero, while a JNI-attached thread will have the synthetic "stack
2180 * starter" native method at the top.
2181 */
2182 int curDepth = dvmComputeExactFrameDepth(self->curFrame);
2183 if (curDepth != 0) {
2184 bool topIsNative = false;
2185
2186 if (curDepth == 1) {
2187 /* not expecting a lingering break frame; just look at curFrame */
2188 assert(!dvmIsBreakFrame(self->curFrame));
2189 StackSaveArea* ssa = SAVEAREA_FROM_FP(self->curFrame);
2190 if (dvmIsNativeMethod(ssa->method))
2191 topIsNative = true;
2192 }
2193
2194 if (!topIsNative) {
2195 LOGE("ERROR: detaching thread with interp frames (count=%d)\n",
2196 curDepth);
2197 dvmDumpThread(self, false);
2198 dvmAbort();
2199 }
2200 }
2201
2202 group = dvmGetFieldObject(self->threadObj, gDvm.offJavaLangThread_group);
2203 LOG_THREAD("threadid=%d: detach (group=%p)\n", self->threadId, group);
2204
2205 /*
2206 * Release any held monitors. Since there are no interpreted stack
2207 * frames, the only thing left are the monitors held by JNI MonitorEnter
2208 * calls.
2209 */
2210 dvmReleaseJniMonitors(self);
2211
2212 /*
2213 * Do some thread-exit uncaught exception processing if necessary.
2214 */
2215 if (dvmCheckException(self))
2216 threadExitUncaughtException(self, group);
2217
2218 /*
2219 * Remove the thread from the thread group.
2220 */
2221 if (group != NULL) {
2222 Method* removeThread =
2223 group->clazz->vtable[gDvm.voffJavaLangThreadGroup_removeThread];
2224 JValue unused;
2225 dvmCallMethod(self, removeThread, group, &unused, self->threadObj);
2226 }
2227
2228 /*
2229 * Clear the vmThread reference in the Thread object. Interpreted code
2230 * will now see that this Thread is not running. As this may be the
2231 * only reference to the VMThread object that the VM knows about, we
2232 * have to create an internal reference to it first.
2233 */
2234 vmThread = dvmGetFieldObject(self->threadObj,
2235 gDvm.offJavaLangThread_vmThread);
2236 dvmAddTrackedAlloc(vmThread, self);
2237 dvmSetFieldObject(self->threadObj, gDvm.offJavaLangThread_vmThread, NULL);
2238
2239 /* clear out our struct Thread pointer, since it's going away */
2240 dvmSetFieldObject(vmThread, gDvm.offJavaLangVMThread_vmData, NULL);
2241
2242 /*
2243 * Tell the debugger & DDM. This may cause the current thread or all
2244 * threads to suspend.
2245 *
2246 * The JDWP spec is somewhat vague about when this happens, other than
2247 * that it's issued by the dying thread, which may still appear in
2248 * an "all threads" listing.
2249 */
2250 if (gDvm.debuggerConnected)
2251 dvmDbgPostThreadDeath(self);
2252
2253 /*
2254 * Thread.join() is implemented as an Object.wait() on the VMThread
2255 * object. Signal anyone who is waiting.
2256 */
2257 dvmLockObject(self, vmThread);
2258 dvmObjectNotifyAll(self, vmThread);
2259 dvmUnlockObject(self, vmThread);
2260
2261 dvmReleaseTrackedAlloc(vmThread, self);
2262 vmThread = NULL;
2263
2264 /*
2265 * We're done manipulating objects, so it's okay if the GC runs in
2266 * parallel with us from here out. It's important to do this if
2267 * profiling is enabled, since we can wait indefinitely.
2268 */
2269 android_atomic_release_store(THREAD_VMWAIT, &self->status);
2270
2271 /*
2272 * If we're doing method trace profiling, we don't want threads to exit,
2273 * because if they do we'll end up reusing thread IDs. This complicates
2274 * analysis and makes it impossible to have reasonable output in the
2275 * "threads" section of the "key" file.
2276 *
2277 * We need to do this after Thread.join() completes, or other threads
2278 * could get wedged. Since self->threadObj is still valid, the Thread
2279 * object will not get GCed even though we're no longer in the ThreadGroup
2280 * list (which is important since the profiling thread needs to get
2281 * the thread's name).
2282 */
2283 MethodTraceState* traceState = &gDvm.methodTrace;
2284
2285 dvmLockMutex(&traceState->startStopLock);
2286 if (traceState->traceEnabled) {
2287 LOGI("threadid=%d: waiting for method trace to finish\n",
2288 self->threadId);
2289 while (traceState->traceEnabled) {
2290 dvmWaitCond(&traceState->threadExitCond,
2291 &traceState->startStopLock);
2292 }
2293 }
2294 dvmUnlockMutex(&traceState->startStopLock);
2295
2296 dvmLockThreadList(self);
2297
2298 /*
2299 * Lose the JNI context.
2300 */
2301 dvmDestroyJNIEnv(self->jniEnv);
2302 self->jniEnv = NULL;
2303
2304 self->status = THREAD_ZOMBIE;
2305
2306 /*
2307 * Remove ourselves from the internal thread list.
2308 */
2309 unlinkThread(self);
2310
2311 /*
2312 * If we're the last one standing, signal anybody waiting in
2313 * DestroyJavaVM that it's okay to exit.
2314 */
2315 if (!dvmGetFieldBoolean(self->threadObj, gDvm.offJavaLangThread_daemon)) {
2316 gDvm.nonDaemonThreadCount--; // guarded by thread list lock
2317
2318 if (gDvm.nonDaemonThreadCount == 0) {
2319 int cc;
2320
2321 LOGV("threadid=%d: last non-daemon thread\n", self->threadId);
2322 //dvmDumpAllThreads(false);
2323 // cond var guarded by threadListLock, which we already hold
2324 cc = pthread_cond_signal(&gDvm.vmExitCond);
2325 assert(cc == 0);
2326 }
2327 }
2328
2329 LOGV("threadid=%d: bye!\n", self->threadId);
2330 releaseThreadId(self);
2331 dvmUnlockThreadList();
2332
2333 setThreadSelf(NULL);
2334
2335 freeThread(self);
2336 }
2337
2338
2339 /*
2340 * Suspend a single thread. Do not use to suspend yourself.
2341 *
2342 * This is used primarily for debugger/DDMS activity. Does not return
2343 * until the thread has suspended or is in a "safe" state (e.g. executing
2344 * native code outside the VM).
2345 *
2346 * The thread list lock should be held before calling here -- it's not
2347 * entirely safe to hang on to a Thread* from another thread otherwise.
2348 * (We'd need to grab it here anyway to avoid clashing with a suspend-all.)
2349 */
dvmSuspendThread(Thread * thread)2350 void dvmSuspendThread(Thread* thread)
2351 {
2352 assert(thread != NULL);
2353 assert(thread != dvmThreadSelf());
2354 //assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
2355
2356 lockThreadSuspendCount();
2357 dvmAddToThreadSuspendCount(&thread->suspendCount, 1);
2358 thread->dbgSuspendCount++;
2359
2360 LOG_THREAD("threadid=%d: suspend++, now=%d\n",
2361 thread->threadId, thread->suspendCount);
2362 unlockThreadSuspendCount();
2363
2364 waitForThreadSuspend(dvmThreadSelf(), thread);
2365 }
2366
2367 /*
2368 * Reduce the suspend count of a thread. If it hits zero, tell it to
2369 * resume.
2370 *
2371 * Used primarily for debugger/DDMS activity. The thread in question
2372 * might have been suspended singly or as part of a suspend-all operation.
2373 *
2374 * The thread list lock should be held before calling here -- it's not
2375 * entirely safe to hang on to a Thread* from another thread otherwise.
2376 * (We'd need to grab it here anyway to avoid clashing with a suspend-all.)
2377 */
dvmResumeThread(Thread * thread)2378 void dvmResumeThread(Thread* thread)
2379 {
2380 assert(thread != NULL);
2381 assert(thread != dvmThreadSelf());
2382 //assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
2383
2384 lockThreadSuspendCount();
2385 if (thread->suspendCount > 0) {
2386 dvmAddToThreadSuspendCount(&thread->suspendCount, -1);
2387 thread->dbgSuspendCount--;
2388 } else {
2389 LOG_THREAD("threadid=%d: suspendCount already zero\n",
2390 thread->threadId);
2391 }
2392
2393 LOG_THREAD("threadid=%d: suspend--, now=%d\n",
2394 thread->threadId, thread->suspendCount);
2395
2396 if (thread->suspendCount == 0) {
2397 dvmBroadcastCond(&gDvm.threadSuspendCountCond);
2398 }
2399
2400 unlockThreadSuspendCount();
2401 }
2402
2403 /*
2404 * Suspend yourself, as a result of debugger activity.
2405 */
dvmSuspendSelf(bool jdwpActivity)2406 void dvmSuspendSelf(bool jdwpActivity)
2407 {
2408 Thread* self = dvmThreadSelf();
2409
2410 /* debugger thread must not suspend itself due to debugger activity! */
2411 assert(gDvm.jdwpState != NULL);
2412 if (self->handle == dvmJdwpGetDebugThread(gDvm.jdwpState)) {
2413 assert(false);
2414 return;
2415 }
2416
2417 /*
2418 * Collisions with other suspends aren't really interesting. We want
2419 * to ensure that we're the only one fiddling with the suspend count
2420 * though.
2421 */
2422 lockThreadSuspendCount();
2423 dvmAddToThreadSuspendCount(&self->suspendCount, 1);
2424 self->dbgSuspendCount++;
2425
2426 /*
2427 * Suspend ourselves.
2428 */
2429 assert(self->suspendCount > 0);
2430 self->status = THREAD_SUSPENDED;
2431 LOG_THREAD("threadid=%d: self-suspending (dbg)\n", self->threadId);
2432
2433 /*
2434 * Tell JDWP that we've completed suspension. The JDWP thread can't
2435 * tell us to resume before we're fully asleep because we hold the
2436 * suspend count lock.
2437 *
2438 * If we got here via waitForDebugger(), don't do this part.
2439 */
2440 if (jdwpActivity) {
2441 //LOGI("threadid=%d: clearing wait-for-event (my handle=%08x)\n",
2442 // self->threadId, (int) self->handle);
2443 dvmJdwpClearWaitForEventThread(gDvm.jdwpState);
2444 }
2445
2446 while (self->suspendCount != 0) {
2447 dvmWaitCond(&gDvm.threadSuspendCountCond,
2448 &gDvm.threadSuspendCountLock);
2449 if (self->suspendCount != 0) {
2450 /*
2451 * The condition was signaled but we're still suspended. This
2452 * can happen if the debugger lets go while a SIGQUIT thread
2453 * dump event is pending (assuming SignalCatcher was resumed for
2454 * just long enough to try to grab the thread-suspend lock).
2455 */
2456 LOGD("threadid=%d: still suspended after undo (sc=%d dc=%d)\n",
2457 self->threadId, self->suspendCount, self->dbgSuspendCount);
2458 }
2459 }
2460 assert(self->suspendCount == 0 && self->dbgSuspendCount == 0);
2461 self->status = THREAD_RUNNING;
2462 LOG_THREAD("threadid=%d: self-reviving (dbg), status=%d\n",
2463 self->threadId, self->status);
2464
2465 unlockThreadSuspendCount();
2466 }
2467
2468
2469 #ifdef HAVE_GLIBC
2470 # define NUM_FRAMES 20
2471 # include <execinfo.h>
2472 /*
2473 * glibc-only stack dump function. Requires link with "--export-dynamic".
2474 *
2475 * TODO: move this into libs/cutils and make it work for all platforms.
2476 */
printBackTrace(void)2477 static void printBackTrace(void)
2478 {
2479 void* array[NUM_FRAMES];
2480 size_t size;
2481 char** strings;
2482 size_t i;
2483
2484 size = backtrace(array, NUM_FRAMES);
2485 strings = backtrace_symbols(array, size);
2486
2487 LOGW("Obtained %zd stack frames.\n", size);
2488
2489 for (i = 0; i < size; i++)
2490 LOGW("%s\n", strings[i]);
2491
2492 free(strings);
2493 }
2494 #else
printBackTrace(void)2495 static void printBackTrace(void) {}
2496 #endif
2497
2498 /*
2499 * Dump the state of the current thread and that of another thread that
2500 * we think is wedged.
2501 */
dumpWedgedThread(Thread * thread)2502 static void dumpWedgedThread(Thread* thread)
2503 {
2504 dvmDumpThread(dvmThreadSelf(), false);
2505 printBackTrace();
2506
2507 // dumping a running thread is risky, but could be useful
2508 dvmDumpThread(thread, true);
2509
2510 // stop now and get a core dump
2511 //abort();
2512 }
2513
2514 /*
2515 * If the thread is running at below-normal priority, temporarily elevate
2516 * it to "normal".
2517 *
2518 * Returns zero if no changes were made. Otherwise, returns bit flags
2519 * indicating what was changed, storing the previous values in the
2520 * provided locations.
2521 */
dvmRaiseThreadPriorityIfNeeded(Thread * thread,int * pSavedThreadPrio,SchedPolicy * pSavedThreadPolicy)2522 int dvmRaiseThreadPriorityIfNeeded(Thread* thread, int* pSavedThreadPrio,
2523 SchedPolicy* pSavedThreadPolicy)
2524 {
2525 errno = 0;
2526 *pSavedThreadPrio = getpriority(PRIO_PROCESS, thread->systemTid);
2527 if (errno != 0) {
2528 LOGW("Unable to get priority for threadid=%d sysTid=%d\n",
2529 thread->threadId, thread->systemTid);
2530 return 0;
2531 }
2532 if (get_sched_policy(thread->systemTid, pSavedThreadPolicy) != 0) {
2533 LOGW("Unable to get policy for threadid=%d sysTid=%d\n",
2534 thread->threadId, thread->systemTid);
2535 return 0;
2536 }
2537
2538 int changeFlags = 0;
2539
2540 /*
2541 * Change the priority if we're in the background group.
2542 */
2543 if (*pSavedThreadPolicy == SP_BACKGROUND) {
2544 if (set_sched_policy(thread->systemTid, SP_FOREGROUND) != 0) {
2545 LOGW("Couldn't set fg policy on tid %d\n", thread->systemTid);
2546 } else {
2547 changeFlags |= kChangedPolicy;
2548 LOGD("Temporarily moving tid %d to fg (was %d)\n",
2549 thread->systemTid, *pSavedThreadPolicy);
2550 }
2551 }
2552
2553 /*
2554 * getpriority() returns the "nice" value, so larger numbers indicate
2555 * lower priority, with 0 being normal.
2556 */
2557 if (*pSavedThreadPrio > 0) {
2558 const int kHigher = 0;
2559 if (setpriority(PRIO_PROCESS, thread->systemTid, kHigher) != 0) {
2560 LOGW("Couldn't raise priority on tid %d to %d\n",
2561 thread->systemTid, kHigher);
2562 } else {
2563 changeFlags |= kChangedPriority;
2564 LOGD("Temporarily raised priority on tid %d (%d -> %d)\n",
2565 thread->systemTid, *pSavedThreadPrio, kHigher);
2566 }
2567 }
2568
2569 return changeFlags;
2570 }
2571
2572 /*
2573 * Reset the priority values for the thread in question.
2574 */
dvmResetThreadPriority(Thread * thread,int changeFlags,int savedThreadPrio,SchedPolicy savedThreadPolicy)2575 void dvmResetThreadPriority(Thread* thread, int changeFlags,
2576 int savedThreadPrio, SchedPolicy savedThreadPolicy)
2577 {
2578 if ((changeFlags & kChangedPolicy) != 0) {
2579 if (set_sched_policy(thread->systemTid, savedThreadPolicy) != 0) {
2580 LOGW("NOTE: couldn't reset tid %d to (%d)\n",
2581 thread->systemTid, savedThreadPolicy);
2582 } else {
2583 LOGD("Restored policy of %d to %d\n",
2584 thread->systemTid, savedThreadPolicy);
2585 }
2586 }
2587
2588 if ((changeFlags & kChangedPriority) != 0) {
2589 if (setpriority(PRIO_PROCESS, thread->systemTid, savedThreadPrio) != 0)
2590 {
2591 LOGW("NOTE: couldn't reset priority on thread %d to %d\n",
2592 thread->systemTid, savedThreadPrio);
2593 } else {
2594 LOGD("Restored priority on %d to %d\n",
2595 thread->systemTid, savedThreadPrio);
2596 }
2597 }
2598 }
2599
2600 /*
2601 * Wait for another thread to see the pending suspension and stop running.
2602 * It can either suspend itself or go into a non-running state such as
2603 * VMWAIT or NATIVE in which it cannot interact with the GC.
2604 *
2605 * If we're running at a higher priority, sched_yield() may not do anything,
2606 * so we need to sleep for "long enough" to guarantee that the other
2607 * thread has a chance to finish what it's doing. Sleeping for too short
2608 * a period (e.g. less than the resolution of the sleep clock) might cause
2609 * the scheduler to return immediately, so we want to start with a
2610 * "reasonable" value and expand.
2611 *
2612 * This does not return until the other thread has stopped running.
2613 * Eventually we time out and the VM aborts.
2614 *
2615 * This does not try to detect the situation where two threads are
2616 * waiting for each other to suspend. In normal use this is part of a
2617 * suspend-all, which implies that the suspend-all lock is held, or as
2618 * part of a debugger action in which the JDWP thread is always the one
2619 * doing the suspending. (We may need to re-evaluate this now that
2620 * getThreadStackTrace is implemented as suspend-snapshot-resume.)
2621 *
2622 * TODO: track basic stats about time required to suspend VM.
2623 */
2624 #define FIRST_SLEEP (250*1000) /* 0.25s */
2625 #define MORE_SLEEP (750*1000) /* 0.75s */
waitForThreadSuspend(Thread * self,Thread * thread)2626 static void waitForThreadSuspend(Thread* self, Thread* thread)
2627 {
2628 const int kMaxRetries = 10;
2629 int spinSleepTime = FIRST_SLEEP;
2630 bool complained = false;
2631 int priChangeFlags = 0;
2632 int savedThreadPrio = -500;
2633 SchedPolicy savedThreadPolicy = SP_FOREGROUND;
2634
2635 int sleepIter = 0;
2636 int retryCount = 0;
2637 u8 startWhen = 0; // init req'd to placate gcc
2638 u8 firstStartWhen = 0;
2639
2640 while (thread->status == THREAD_RUNNING) {
2641 if (sleepIter == 0) { // get current time on first iteration
2642 startWhen = dvmGetRelativeTimeUsec();
2643 if (firstStartWhen == 0) // first iteration of first attempt
2644 firstStartWhen = startWhen;
2645
2646 /*
2647 * After waiting for a bit, check to see if the target thread is
2648 * running at a reduced priority. If so, bump it up temporarily
2649 * to give it more CPU time.
2650 */
2651 if (retryCount == 2) {
2652 assert(thread->systemTid != 0);
2653 priChangeFlags = dvmRaiseThreadPriorityIfNeeded(thread,
2654 &savedThreadPrio, &savedThreadPolicy);
2655 }
2656 }
2657
2658 #if defined (WITH_JIT)
2659 /*
2660 * If we're still waiting after the first timeout, unchain all
2661 * translations iff:
2662 * 1) There are new chains formed since the last unchain
2663 * 2) The top VM frame of the running thread is running JIT'ed code
2664 */
2665 if (gDvmJit.pJitEntryTable && retryCount > 0 &&
2666 gDvmJit.hasNewChain && thread->inJitCodeCache) {
2667 LOGD("JIT unchain all for threadid=%d", thread->threadId);
2668 dvmJitUnchainAll();
2669 }
2670 #endif
2671
2672 /*
2673 * Sleep briefly. The iterative sleep call returns false if we've
2674 * exceeded the total time limit for this round of sleeping.
2675 */
2676 if (!dvmIterativeSleep(sleepIter++, spinSleepTime, startWhen)) {
2677 if (spinSleepTime != FIRST_SLEEP) {
2678 LOGW("threadid=%d: spin on suspend #%d threadid=%d (pcf=%d)\n",
2679 self->threadId, retryCount,
2680 thread->threadId, priChangeFlags);
2681 if (retryCount > 1) {
2682 /* stack trace logging is slow; skip on first iter */
2683 dumpWedgedThread(thread);
2684 }
2685 complained = true;
2686 }
2687
2688 // keep going; could be slow due to valgrind
2689 sleepIter = 0;
2690 spinSleepTime = MORE_SLEEP;
2691
2692 if (retryCount++ == kMaxRetries) {
2693 LOGE("Fatal spin-on-suspend, dumping threads\n");
2694 dvmDumpAllThreads(false);
2695
2696 /* log this after -- long traces will scroll off log */
2697 LOGE("threadid=%d: stuck on threadid=%d, giving up\n",
2698 self->threadId, thread->threadId);
2699
2700 /* try to get a debuggerd dump from the spinning thread */
2701 dvmNukeThread(thread);
2702 /* abort the VM */
2703 dvmAbort();
2704 }
2705 }
2706 }
2707
2708 if (complained) {
2709 LOGW("threadid=%d: spin on suspend resolved in %lld msec\n",
2710 self->threadId,
2711 (dvmGetRelativeTimeUsec() - firstStartWhen) / 1000);
2712 //dvmDumpThread(thread, false); /* suspended, so dump is safe */
2713 }
2714 if (priChangeFlags != 0) {
2715 dvmResetThreadPriority(thread, priChangeFlags, savedThreadPrio,
2716 savedThreadPolicy);
2717 }
2718 }
2719
2720 /*
2721 * Suspend all threads except the current one. This is used by the GC,
2722 * the debugger, and by any thread that hits a "suspend all threads"
2723 * debugger event (e.g. breakpoint or exception).
2724 *
2725 * If thread N hits a "suspend all threads" breakpoint, we don't want it
2726 * to suspend the JDWP thread. For the GC, we do, because the debugger can
2727 * create objects and even execute arbitrary code. The "why" argument
2728 * allows the caller to say why the suspension is taking place.
2729 *
2730 * This can be called when a global suspend has already happened, due to
2731 * various debugger gymnastics, so keeping an "everybody is suspended" flag
2732 * doesn't work.
2733 *
2734 * DO NOT grab any locks before calling here. We grab & release the thread
2735 * lock and suspend lock here (and we're not using recursive threads), and
2736 * we might have to self-suspend if somebody else beats us here.
2737 *
2738 * We know the current thread is in the thread list, because we attach the
2739 * thread before doing anything that could cause VM suspension (like object
2740 * allocation).
2741 */
dvmSuspendAllThreads(SuspendCause why)2742 void dvmSuspendAllThreads(SuspendCause why)
2743 {
2744 Thread* self = dvmThreadSelf();
2745 Thread* thread;
2746
2747 assert(why != 0);
2748
2749 /*
2750 * Start by grabbing the thread suspend lock. If we can't get it, most
2751 * likely somebody else is in the process of performing a suspend or
2752 * resume, so lockThreadSuspend() will cause us to self-suspend.
2753 *
2754 * We keep the lock until all other threads are suspended.
2755 */
2756 lockThreadSuspend("susp-all", why);
2757
2758 LOG_THREAD("threadid=%d: SuspendAll starting\n", self->threadId);
2759
2760 /*
2761 * This is possible if the current thread was in VMWAIT mode when a
2762 * suspend-all happened, and then decided to do its own suspend-all.
2763 * This can happen when a couple of threads have simultaneous events
2764 * of interest to the debugger.
2765 */
2766 //assert(self->suspendCount == 0);
2767
2768 /*
2769 * Increment everybody's suspend count (except our own).
2770 */
2771 dvmLockThreadList(self);
2772
2773 lockThreadSuspendCount();
2774 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
2775 if (thread == self)
2776 continue;
2777
2778 /* debugger events don't suspend JDWP thread */
2779 if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
2780 thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
2781 continue;
2782
2783 dvmAddToThreadSuspendCount(&thread->suspendCount, 1);
2784 if (why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT)
2785 thread->dbgSuspendCount++;
2786 }
2787 unlockThreadSuspendCount();
2788
2789 /*
2790 * Wait for everybody in THREAD_RUNNING state to stop. Other states
2791 * indicate the code is either running natively or sleeping quietly.
2792 * Any attempt to transition back to THREAD_RUNNING will cause a check
2793 * for suspension, so it should be impossible for anything to execute
2794 * interpreted code or modify objects (assuming native code plays nicely).
2795 *
2796 * It's also okay if the thread transitions to a non-RUNNING state.
2797 *
2798 * Note we released the threadSuspendCountLock before getting here,
2799 * so if another thread is fiddling with its suspend count (perhaps
2800 * self-suspending for the debugger) it won't block while we're waiting
2801 * in here.
2802 */
2803 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
2804 if (thread == self)
2805 continue;
2806
2807 /* debugger events don't suspend JDWP thread */
2808 if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
2809 thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
2810 continue;
2811
2812 /* wait for the other thread to see the pending suspend */
2813 waitForThreadSuspend(self, thread);
2814
2815 LOG_THREAD("threadid=%d: threadid=%d status=%d sc=%d dc=%d\n",
2816 self->threadId,
2817 thread->threadId, thread->status, thread->suspendCount,
2818 thread->dbgSuspendCount);
2819 }
2820
2821 dvmUnlockThreadList();
2822 unlockThreadSuspend();
2823
2824 LOG_THREAD("threadid=%d: SuspendAll complete\n", self->threadId);
2825 }
2826
2827 /*
2828 * Resume all threads that are currently suspended.
2829 *
2830 * The "why" must match with the previous suspend.
2831 */
dvmResumeAllThreads(SuspendCause why)2832 void dvmResumeAllThreads(SuspendCause why)
2833 {
2834 Thread* self = dvmThreadSelf();
2835 Thread* thread;
2836 int cc;
2837
2838 lockThreadSuspend("res-all", why); /* one suspend/resume at a time */
2839 LOG_THREAD("threadid=%d: ResumeAll starting\n", self->threadId);
2840
2841 /*
2842 * Decrement the suspend counts for all threads. No need for atomic
2843 * writes, since nobody should be moving until we decrement the count.
2844 * We do need to hold the thread list because of JNI attaches.
2845 */
2846 dvmLockThreadList(self);
2847 lockThreadSuspendCount();
2848 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
2849 if (thread == self)
2850 continue;
2851
2852 /* debugger events don't suspend JDWP thread */
2853 if ((why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT) &&
2854 thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState))
2855 {
2856 continue;
2857 }
2858
2859 if (thread->suspendCount > 0) {
2860 dvmAddToThreadSuspendCount(&thread->suspendCount, -1);
2861 if (why == SUSPEND_FOR_DEBUG || why == SUSPEND_FOR_DEBUG_EVENT)
2862 thread->dbgSuspendCount--;
2863 } else {
2864 LOG_THREAD("threadid=%d: suspendCount already zero\n",
2865 thread->threadId);
2866 }
2867 }
2868 unlockThreadSuspendCount();
2869 dvmUnlockThreadList();
2870
2871 /*
2872 * In some ways it makes sense to continue to hold the thread-suspend
2873 * lock while we issue the wakeup broadcast. It allows us to complete
2874 * one operation before moving on to the next, which simplifies the
2875 * thread activity debug traces.
2876 *
2877 * This approach caused us some difficulty under Linux, because the
2878 * condition variable broadcast not only made the threads runnable,
2879 * but actually caused them to execute, and it was a while before
2880 * the thread performing the wakeup had an opportunity to release the
2881 * thread-suspend lock.
2882 *
2883 * This is a problem because, when a thread tries to acquire that
2884 * lock, it times out after 3 seconds. If at some point the thread
2885 * is told to suspend, the clock resets; but since the VM is still
2886 * theoretically mid-resume, there's no suspend pending. If, for
2887 * example, the GC was waking threads up while the SIGQUIT handler
2888 * was trying to acquire the lock, we would occasionally time out on
2889 * a busy system and SignalCatcher would abort.
2890 *
2891 * We now perform the unlock before the wakeup broadcast. The next
2892 * suspend can't actually start until the broadcast completes and
2893 * returns, because we're holding the thread-suspend-count lock, but the
2894 * suspending thread is now able to make progress and we avoid the abort.
2895 *
2896 * (Technically there is a narrow window between when we release
2897 * the thread-suspend lock and grab the thread-suspend-count lock.
2898 * This could cause us to send a broadcast to threads with nonzero
2899 * suspend counts, but this is expected and they'll all just fall
2900 * right back to sleep. It's probably safe to grab the suspend-count
2901 * lock before releasing thread-suspend, since we're still following
2902 * the correct order of acquisition, but it feels weird.)
2903 */
2904
2905 LOG_THREAD("threadid=%d: ResumeAll waking others\n", self->threadId);
2906 unlockThreadSuspend();
2907
2908 /*
2909 * Broadcast a notification to all suspended threads, some or all of
2910 * which may choose to wake up. No need to wait for them.
2911 */
2912 lockThreadSuspendCount();
2913 cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
2914 assert(cc == 0);
2915 unlockThreadSuspendCount();
2916
2917 LOG_THREAD("threadid=%d: ResumeAll complete\n", self->threadId);
2918 }
2919
2920 /*
2921 * Undo any debugger suspensions. This is called when the debugger
2922 * disconnects.
2923 */
dvmUndoDebuggerSuspensions(void)2924 void dvmUndoDebuggerSuspensions(void)
2925 {
2926 Thread* self = dvmThreadSelf();
2927 Thread* thread;
2928 int cc;
2929
2930 lockThreadSuspend("undo", SUSPEND_FOR_DEBUG);
2931 LOG_THREAD("threadid=%d: UndoDebuggerSusp starting\n", self->threadId);
2932
2933 /*
2934 * Decrement the suspend counts for all threads. No need for atomic
2935 * writes, since nobody should be moving until we decrement the count.
2936 * We do need to hold the thread list because of JNI attaches.
2937 */
2938 dvmLockThreadList(self);
2939 lockThreadSuspendCount();
2940 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
2941 if (thread == self)
2942 continue;
2943
2944 /* debugger events don't suspend JDWP thread */
2945 if (thread->handle == dvmJdwpGetDebugThread(gDvm.jdwpState)) {
2946 assert(thread->dbgSuspendCount == 0);
2947 continue;
2948 }
2949
2950 assert(thread->suspendCount >= thread->dbgSuspendCount);
2951 dvmAddToThreadSuspendCount(&thread->suspendCount,
2952 -thread->dbgSuspendCount);
2953 thread->dbgSuspendCount = 0;
2954 }
2955 unlockThreadSuspendCount();
2956 dvmUnlockThreadList();
2957
2958 /*
2959 * Broadcast a notification to all suspended threads, some or all of
2960 * which may choose to wake up. No need to wait for them.
2961 */
2962 lockThreadSuspendCount();
2963 cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
2964 assert(cc == 0);
2965 unlockThreadSuspendCount();
2966
2967 unlockThreadSuspend();
2968
2969 LOG_THREAD("threadid=%d: UndoDebuggerSusp complete\n", self->threadId);
2970 }
2971
2972 /*
2973 * Determine if a thread is suspended.
2974 *
2975 * As with all operations on foreign threads, the caller should hold
2976 * the thread list lock before calling.
2977 *
2978 * If the thread is suspending or waking, these fields could be changing
2979 * out from under us (or the thread could change state right after we
2980 * examine it), making this generally unreliable. This is chiefly
2981 * intended for use by the debugger.
2982 */
dvmIsSuspended(const Thread * thread)2983 bool dvmIsSuspended(const Thread* thread)
2984 {
2985 /*
2986 * The thread could be:
2987 * (1) Running happily. status is RUNNING, suspendCount is zero.
2988 * Return "false".
2989 * (2) Pending suspend. status is RUNNING, suspendCount is nonzero.
2990 * Return "false".
2991 * (3) Suspended. suspendCount is nonzero, and status is !RUNNING.
2992 * Return "true".
2993 * (4) Waking up. suspendCount is zero, status is SUSPENDED
2994 * Return "false" (since it could change out from under us, unless
2995 * we hold suspendCountLock).
2996 */
2997
2998 return (thread->suspendCount != 0 && thread->status != THREAD_RUNNING);
2999 }
3000
3001 /*
3002 * Wait until another thread self-suspends. This is specifically for
3003 * synchronization between the JDWP thread and a thread that has decided
3004 * to suspend itself after sending an event to the debugger.
3005 *
3006 * Threads that encounter "suspend all" events work as well -- the thread
3007 * in question suspends everybody else and then itself.
3008 *
3009 * We can't hold a thread lock here or in the caller, because we could
3010 * get here just before the to-be-waited-for-thread issues a "suspend all".
3011 * There's an opportunity for badness if the thread we're waiting for exits
3012 * and gets cleaned up, but since the thread in question is processing a
3013 * debugger event, that's not really a possibility. (To avoid deadlock,
3014 * it's important that we not be in THREAD_RUNNING while we wait.)
3015 */
dvmWaitForSuspend(Thread * thread)3016 void dvmWaitForSuspend(Thread* thread)
3017 {
3018 Thread* self = dvmThreadSelf();
3019
3020 LOG_THREAD("threadid=%d: waiting for threadid=%d to sleep\n",
3021 self->threadId, thread->threadId);
3022
3023 assert(thread->handle != dvmJdwpGetDebugThread(gDvm.jdwpState));
3024 assert(thread != self);
3025 assert(self->status != THREAD_RUNNING);
3026
3027 waitForThreadSuspend(self, thread);
3028
3029 LOG_THREAD("threadid=%d: threadid=%d is now asleep\n",
3030 self->threadId, thread->threadId);
3031 }
3032
3033 /*
3034 * Check to see if we need to suspend ourselves. If so, go to sleep on
3035 * a condition variable.
3036 *
3037 * Returns "true" if we suspended ourselves.
3038 */
fullSuspendCheck(Thread * self)3039 static bool fullSuspendCheck(Thread* self)
3040 {
3041 assert(self != NULL);
3042 assert(self->suspendCount >= 0);
3043
3044 /*
3045 * Grab gDvm.threadSuspendCountLock. This gives us exclusive write
3046 * access to self->suspendCount.
3047 */
3048 lockThreadSuspendCount(); /* grab gDvm.threadSuspendCountLock */
3049
3050 bool needSuspend = (self->suspendCount != 0);
3051 if (needSuspend) {
3052 LOG_THREAD("threadid=%d: self-suspending\n", self->threadId);
3053 ThreadStatus oldStatus = self->status; /* should be RUNNING */
3054 self->status = THREAD_SUSPENDED;
3055
3056 while (self->suspendCount != 0) {
3057 /*
3058 * Wait for wakeup signal, releasing lock. The act of releasing
3059 * and re-acquiring the lock provides the memory barriers we
3060 * need for correct behavior on SMP.
3061 */
3062 dvmWaitCond(&gDvm.threadSuspendCountCond,
3063 &gDvm.threadSuspendCountLock);
3064 }
3065 assert(self->suspendCount == 0 && self->dbgSuspendCount == 0);
3066 self->status = oldStatus;
3067 LOG_THREAD("threadid=%d: self-reviving, status=%d\n",
3068 self->threadId, self->status);
3069 }
3070
3071 unlockThreadSuspendCount();
3072
3073 return needSuspend;
3074 }
3075
3076 /*
3077 * Check to see if a suspend is pending. If so, suspend the current
3078 * thread, and return "true" after we have been resumed.
3079 */
dvmCheckSuspendPending(Thread * self)3080 bool dvmCheckSuspendPending(Thread* self)
3081 {
3082 assert(self != NULL);
3083 if (self->suspendCount == 0) {
3084 return false;
3085 } else {
3086 return fullSuspendCheck(self);
3087 }
3088 }
3089
3090 /*
3091 * Update our status.
3092 *
3093 * The "self" argument, which may be NULL, is accepted as an optimization.
3094 *
3095 * Returns the old status.
3096 */
dvmChangeStatus(Thread * self,ThreadStatus newStatus)3097 ThreadStatus dvmChangeStatus(Thread* self, ThreadStatus newStatus)
3098 {
3099 ThreadStatus oldStatus;
3100
3101 if (self == NULL)
3102 self = dvmThreadSelf();
3103
3104 LOGVV("threadid=%d: (status %d -> %d)\n",
3105 self->threadId, self->status, newStatus);
3106
3107 oldStatus = self->status;
3108
3109 if (newStatus == THREAD_RUNNING) {
3110 /*
3111 * Change our status to THREAD_RUNNING. The transition requires
3112 * that we check for pending suspension, because the VM considers
3113 * us to be "asleep" in all other states, and another thread could
3114 * be performing a GC now.
3115 *
3116 * The order of operations is very significant here. One way to
3117 * do this wrong is:
3118 *
3119 * GCing thread Our thread (in NATIVE)
3120 * ------------ ----------------------
3121 * check suspend count (== 0)
3122 * dvmSuspendAllThreads()
3123 * grab suspend-count lock
3124 * increment all suspend counts
3125 * release suspend-count lock
3126 * check thread state (== NATIVE)
3127 * all are suspended, begin GC
3128 * set state to RUNNING
3129 * (continue executing)
3130 *
3131 * We can correct this by grabbing the suspend-count lock and
3132 * performing both of our operations (check suspend count, set
3133 * state) while holding it, now we need to grab a mutex on every
3134 * transition to RUNNING.
3135 *
3136 * What we do instead is change the order of operations so that
3137 * the transition to RUNNING happens first. If we then detect
3138 * that the suspend count is nonzero, we switch to SUSPENDED.
3139 *
3140 * Appropriate compiler and memory barriers are required to ensure
3141 * that the operations are observed in the expected order.
3142 *
3143 * This does create a small window of opportunity where a GC in
3144 * progress could observe what appears to be a running thread (if
3145 * it happens to look between when we set to RUNNING and when we
3146 * switch to SUSPENDED). At worst this only affects assertions
3147 * and thread logging. (We could work around it with some sort
3148 * of intermediate "pre-running" state that is generally treated
3149 * as equivalent to running, but that doesn't seem worthwhile.)
3150 *
3151 * We can also solve this by combining the "status" and "suspend
3152 * count" fields into a single 32-bit value. This trades the
3153 * store/load barrier on transition to RUNNING for an atomic RMW
3154 * op on all transitions and all suspend count updates (also, all
3155 * accesses to status or the thread count require bit-fiddling).
3156 * It also eliminates the brief transition through RUNNING when
3157 * the thread is supposed to be suspended. This is possibly faster
3158 * on SMP and slightly more correct, but less convenient.
3159 */
3160 assert(oldStatus != THREAD_RUNNING);
3161 android_atomic_acquire_store(newStatus, &self->status);
3162 if (self->suspendCount != 0) {
3163 fullSuspendCheck(self);
3164 }
3165 } else {
3166 /*
3167 * Not changing to THREAD_RUNNING. No additional work required.
3168 *
3169 * We use a releasing store to ensure that, if we were RUNNING,
3170 * any updates we previously made to objects on the managed heap
3171 * will be observed before the state change.
3172 */
3173 assert(newStatus != THREAD_SUSPENDED);
3174 android_atomic_release_store(newStatus, &self->status);
3175 }
3176
3177 return oldStatus;
3178 }
3179
3180 /*
3181 * Get a statically defined thread group from a field in the ThreadGroup
3182 * Class object. Expected arguments are "mMain" and "mSystem".
3183 */
getStaticThreadGroup(const char * fieldName)3184 static Object* getStaticThreadGroup(const char* fieldName)
3185 {
3186 StaticField* groupField;
3187 Object* groupObj;
3188
3189 groupField = dvmFindStaticField(gDvm.classJavaLangThreadGroup,
3190 fieldName, "Ljava/lang/ThreadGroup;");
3191 if (groupField == NULL) {
3192 LOGE("java.lang.ThreadGroup does not have an '%s' field\n", fieldName);
3193 dvmThrowException("Ljava/lang/IncompatibleClassChangeError;", NULL);
3194 return NULL;
3195 }
3196 groupObj = dvmGetStaticFieldObject(groupField);
3197 if (groupObj == NULL) {
3198 LOGE("java.lang.ThreadGroup.%s not initialized\n", fieldName);
3199 dvmThrowException("Ljava/lang/InternalError;", NULL);
3200 return NULL;
3201 }
3202
3203 return groupObj;
3204 }
dvmGetSystemThreadGroup(void)3205 Object* dvmGetSystemThreadGroup(void)
3206 {
3207 return getStaticThreadGroup("mSystem");
3208 }
dvmGetMainThreadGroup(void)3209 Object* dvmGetMainThreadGroup(void)
3210 {
3211 return getStaticThreadGroup("mMain");
3212 }
3213
3214 /*
3215 * Given a VMThread object, return the associated Thread*.
3216 *
3217 * NOTE: if the thread detaches, the struct Thread will disappear, and
3218 * we will be touching invalid data. For safety, lock the thread list
3219 * before calling this.
3220 */
dvmGetThreadFromThreadObject(Object * vmThreadObj)3221 Thread* dvmGetThreadFromThreadObject(Object* vmThreadObj)
3222 {
3223 int vmData;
3224
3225 vmData = dvmGetFieldInt(vmThreadObj, gDvm.offJavaLangVMThread_vmData);
3226
3227 if (false) {
3228 Thread* thread = gDvm.threadList;
3229 while (thread != NULL) {
3230 if ((Thread*)vmData == thread)
3231 break;
3232
3233 thread = thread->next;
3234 }
3235
3236 if (thread == NULL) {
3237 LOGW("WARNING: vmThreadObj=%p has thread=%p, not in thread list\n",
3238 vmThreadObj, (Thread*)vmData);
3239 vmData = 0;
3240 }
3241 }
3242
3243 return (Thread*) vmData;
3244 }
3245
3246 /*
3247 * Given a pthread handle, return the associated Thread*.
3248 * Caller must hold the thread list lock.
3249 *
3250 * Returns NULL if the thread was not found.
3251 */
dvmGetThreadByHandle(pthread_t handle)3252 Thread* dvmGetThreadByHandle(pthread_t handle)
3253 {
3254 Thread* thread;
3255 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
3256 if (thread->handle == handle)
3257 break;
3258 }
3259 return thread;
3260 }
3261
3262 /*
3263 * Given a threadId, return the associated Thread*.
3264 * Caller must hold the thread list lock.
3265 *
3266 * Returns NULL if the thread was not found.
3267 */
dvmGetThreadByThreadId(u4 threadId)3268 Thread* dvmGetThreadByThreadId(u4 threadId)
3269 {
3270 Thread* thread;
3271 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
3272 if (thread->threadId == threadId)
3273 break;
3274 }
3275 return thread;
3276 }
3277
3278
3279 /*
3280 * Conversion map for "nice" values.
3281 *
3282 * We use Android thread priority constants to be consistent with the rest
3283 * of the system. In some cases adjacent entries may overlap.
3284 */
3285 static const int kNiceValues[10] = {
3286 ANDROID_PRIORITY_LOWEST, /* 1 (MIN_PRIORITY) */
3287 ANDROID_PRIORITY_BACKGROUND + 6,
3288 ANDROID_PRIORITY_BACKGROUND + 3,
3289 ANDROID_PRIORITY_BACKGROUND,
3290 ANDROID_PRIORITY_NORMAL, /* 5 (NORM_PRIORITY) */
3291 ANDROID_PRIORITY_NORMAL - 2,
3292 ANDROID_PRIORITY_NORMAL - 4,
3293 ANDROID_PRIORITY_URGENT_DISPLAY + 3,
3294 ANDROID_PRIORITY_URGENT_DISPLAY + 2,
3295 ANDROID_PRIORITY_URGENT_DISPLAY /* 10 (MAX_PRIORITY) */
3296 };
3297
3298 /*
3299 * Change the priority of a system thread to match that of the Thread object.
3300 *
3301 * We map a priority value from 1-10 to Linux "nice" values, where lower
3302 * numbers indicate higher priority.
3303 */
dvmChangeThreadPriority(Thread * thread,int newPriority)3304 void dvmChangeThreadPriority(Thread* thread, int newPriority)
3305 {
3306 pid_t pid = thread->systemTid;
3307 int newNice;
3308
3309 if (newPriority < 1 || newPriority > 10) {
3310 LOGW("bad priority %d\n", newPriority);
3311 newPriority = 5;
3312 }
3313 newNice = kNiceValues[newPriority-1];
3314
3315 if (newNice >= ANDROID_PRIORITY_BACKGROUND) {
3316 set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
3317 } else if (getpriority(PRIO_PROCESS, pid) >= ANDROID_PRIORITY_BACKGROUND) {
3318 set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
3319 }
3320
3321 if (setpriority(PRIO_PROCESS, pid, newNice) != 0) {
3322 char* str = dvmGetThreadName(thread);
3323 LOGI("setPriority(%d) '%s' to prio=%d(n=%d) failed: %s\n",
3324 pid, str, newPriority, newNice, strerror(errno));
3325 free(str);
3326 } else {
3327 LOGV("setPriority(%d) to prio=%d(n=%d)\n",
3328 pid, newPriority, newNice);
3329 }
3330 }
3331
3332 /*
3333 * Get the thread priority for the current thread by querying the system.
3334 * This is useful when attaching a thread through JNI.
3335 *
3336 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
3337 */
getThreadPriorityFromSystem(void)3338 static int getThreadPriorityFromSystem(void)
3339 {
3340 int i, sysprio, jprio;
3341
3342 errno = 0;
3343 sysprio = getpriority(PRIO_PROCESS, 0);
3344 if (sysprio == -1 && errno != 0) {
3345 LOGW("getpriority() failed: %s\n", strerror(errno));
3346 return THREAD_NORM_PRIORITY;
3347 }
3348
3349 jprio = THREAD_MIN_PRIORITY;
3350 for (i = 0; i < NELEM(kNiceValues); i++) {
3351 if (sysprio >= kNiceValues[i])
3352 break;
3353 jprio++;
3354 }
3355 if (jprio > THREAD_MAX_PRIORITY)
3356 jprio = THREAD_MAX_PRIORITY;
3357
3358 return jprio;
3359 }
3360
3361
3362 /*
3363 * Return true if the thread is on gDvm.threadList.
3364 * Caller should not hold gDvm.threadListLock.
3365 */
dvmIsOnThreadList(const Thread * thread)3366 bool dvmIsOnThreadList(const Thread* thread)
3367 {
3368 bool ret = false;
3369
3370 dvmLockThreadList(NULL);
3371 if (thread == gDvm.threadList) {
3372 ret = true;
3373 } else {
3374 ret = thread->prev != NULL || thread->next != NULL;
3375 }
3376 dvmUnlockThreadList();
3377
3378 return ret;
3379 }
3380
3381 /*
3382 * Dump a thread to the log file -- just calls dvmDumpThreadEx() with an
3383 * output target.
3384 */
dvmDumpThread(Thread * thread,bool isRunning)3385 void dvmDumpThread(Thread* thread, bool isRunning)
3386 {
3387 DebugOutputTarget target;
3388
3389 dvmCreateLogOutputTarget(&target, ANDROID_LOG_INFO, LOG_TAG);
3390 dvmDumpThreadEx(&target, thread, isRunning);
3391 }
3392
3393 /*
3394 * Try to get the scheduler group.
3395 *
3396 * The data from /proc/<pid>/cgroup looks (something) like:
3397 * 2:cpu:/bg_non_interactive
3398 * 1:cpuacct:/
3399 *
3400 * We return the part on the "cpu" line after the '/', which will be an
3401 * empty string for the default cgroup. If the string is longer than
3402 * "bufLen", the string will be truncated.
3403 *
3404 * On error, -1 is returned, and an error description will be stored in
3405 * the buffer.
3406 */
getSchedulerGroup(int tid,char * buf,size_t bufLen)3407 static int getSchedulerGroup(int tid, char* buf, size_t bufLen)
3408 {
3409 #ifdef HAVE_ANDROID_OS
3410 char pathBuf[32];
3411 char lineBuf[256];
3412 FILE *fp;
3413
3414 snprintf(pathBuf, sizeof(pathBuf), "/proc/%d/cgroup", tid);
3415 if ((fp = fopen(pathBuf, "r")) == NULL) {
3416 snprintf(buf, bufLen, "[fopen-error:%d]", errno);
3417 return -1;
3418 }
3419
3420 while (fgets(lineBuf, sizeof(lineBuf) -1, fp) != NULL) {
3421 char* subsys;
3422 char* grp;
3423 size_t len;
3424
3425 /* Junk the first field */
3426 subsys = strchr(lineBuf, ':');
3427 if (subsys == NULL) {
3428 goto out_bad_data;
3429 }
3430
3431 if (strncmp(subsys, ":cpu:", 5) != 0) {
3432 /* Not the subsys we're looking for */
3433 continue;
3434 }
3435
3436 grp = strchr(subsys, '/');
3437 if (grp == NULL) {
3438 goto out_bad_data;
3439 }
3440 grp++; /* Drop the leading '/' */
3441
3442 len = strlen(grp);
3443 grp[len-1] = '\0'; /* Drop the trailing '\n' */
3444
3445 if (bufLen <= len) {
3446 len = bufLen - 1;
3447 }
3448 strncpy(buf, grp, len);
3449 buf[len] = '\0';
3450 fclose(fp);
3451 return 0;
3452 }
3453
3454 snprintf(buf, bufLen, "[no-cpu-subsys]");
3455 fclose(fp);
3456 return -1;
3457
3458 out_bad_data:
3459 LOGE("Bad cgroup data {%s}", lineBuf);
3460 snprintf(buf, bufLen, "[data-parse-failed]");
3461 fclose(fp);
3462 return -1;
3463
3464 #else
3465 snprintf(buf, bufLen, "[n/a]");
3466 return -1;
3467 #endif
3468 }
3469
3470 /*
3471 * Convert ThreadStatus to a string.
3472 */
dvmGetThreadStatusStr(ThreadStatus status)3473 const char* dvmGetThreadStatusStr(ThreadStatus status)
3474 {
3475 switch (status) {
3476 case THREAD_ZOMBIE: return "ZOMBIE";
3477 case THREAD_RUNNING: return "RUNNABLE";
3478 case THREAD_TIMED_WAIT: return "TIMED_WAIT";
3479 case THREAD_MONITOR: return "MONITOR";
3480 case THREAD_WAIT: return "WAIT";
3481 case THREAD_INITIALIZING: return "INITIALIZING";
3482 case THREAD_STARTING: return "STARTING";
3483 case THREAD_NATIVE: return "NATIVE";
3484 case THREAD_VMWAIT: return "VMWAIT";
3485 case THREAD_SUSPENDED: return "SUSPENDED";
3486 default: return "UNKNOWN";
3487 }
3488 }
3489
3490 /*
3491 * Print information about the specified thread.
3492 *
3493 * Works best when the thread in question is "self" or has been suspended.
3494 * When dumping a separate thread that's still running, set "isRunning" to
3495 * use a more cautious thread dump function.
3496 */
dvmDumpThreadEx(const DebugOutputTarget * target,Thread * thread,bool isRunning)3497 void dvmDumpThreadEx(const DebugOutputTarget* target, Thread* thread,
3498 bool isRunning)
3499 {
3500 Object* threadObj;
3501 Object* groupObj;
3502 StringObject* nameStr;
3503 char* threadName = NULL;
3504 char* groupName = NULL;
3505 char schedulerGroupBuf[32];
3506 bool isDaemon;
3507 int priority; // java.lang.Thread priority
3508 int policy; // pthread policy
3509 struct sched_param sp; // pthread scheduling parameters
3510 char schedstatBuf[64]; // contents of /proc/[pid]/task/[tid]/schedstat
3511 int schedstatFd;
3512
3513 /*
3514 * Get the java.lang.Thread object. This function gets called from
3515 * some weird debug contexts, so it's possible that there's a GC in
3516 * progress on some other thread. To decrease the chances of the
3517 * thread object being moved out from under us, we add the reference
3518 * to the tracked allocation list, which pins it in place.
3519 *
3520 * If threadObj is NULL, the thread is still in the process of being
3521 * attached to the VM, and there's really nothing interesting to
3522 * say about it yet.
3523 */
3524 threadObj = thread->threadObj;
3525 if (threadObj == NULL) {
3526 LOGI("Can't dump thread %d: threadObj not set\n", thread->threadId);
3527 return;
3528 }
3529 dvmAddTrackedAlloc(threadObj, NULL);
3530
3531 nameStr = (StringObject*) dvmGetFieldObject(threadObj,
3532 gDvm.offJavaLangThread_name);
3533 threadName = dvmCreateCstrFromString(nameStr);
3534
3535 priority = dvmGetFieldInt(threadObj, gDvm.offJavaLangThread_priority);
3536 isDaemon = dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon);
3537
3538 if (pthread_getschedparam(pthread_self(), &policy, &sp) != 0) {
3539 LOGW("Warning: pthread_getschedparam failed\n");
3540 policy = -1;
3541 sp.sched_priority = -1;
3542 }
3543 if (getSchedulerGroup(thread->systemTid, schedulerGroupBuf,
3544 sizeof(schedulerGroupBuf)) == 0 &&
3545 schedulerGroupBuf[0] == '\0') {
3546 strcpy(schedulerGroupBuf, "default");
3547 }
3548
3549 /* a null value for group is not expected, but deal with it anyway */
3550 groupObj = (Object*) dvmGetFieldObject(threadObj,
3551 gDvm.offJavaLangThread_group);
3552 if (groupObj != NULL) {
3553 int offset = dvmFindFieldOffset(gDvm.classJavaLangThreadGroup,
3554 "name", "Ljava/lang/String;");
3555 if (offset < 0) {
3556 LOGW("Unable to find 'name' field in ThreadGroup\n");
3557 } else {
3558 nameStr = (StringObject*) dvmGetFieldObject(groupObj, offset);
3559 groupName = dvmCreateCstrFromString(nameStr);
3560 }
3561 }
3562 if (groupName == NULL)
3563 groupName = strdup("(null; initializing?)");
3564
3565 dvmPrintDebugMessage(target,
3566 "\"%s\"%s prio=%d tid=%d %s%s\n",
3567 threadName, isDaemon ? " daemon" : "",
3568 priority, thread->threadId, dvmGetThreadStatusStr(thread->status),
3569 #if defined(WITH_JIT)
3570 thread->inJitCodeCache ? " JIT" : ""
3571 #else
3572 ""
3573 #endif
3574 );
3575 dvmPrintDebugMessage(target,
3576 " | group=\"%s\" sCount=%d dsCount=%d obj=%p self=%p\n",
3577 groupName, thread->suspendCount, thread->dbgSuspendCount,
3578 thread->threadObj, thread);
3579 dvmPrintDebugMessage(target,
3580 " | sysTid=%d nice=%d sched=%d/%d cgrp=%s handle=%d\n",
3581 thread->systemTid, getpriority(PRIO_PROCESS, thread->systemTid),
3582 policy, sp.sched_priority, schedulerGroupBuf, (int)thread->handle);
3583
3584 snprintf(schedstatBuf, sizeof(schedstatBuf), "/proc/%d/task/%d/schedstat",
3585 getpid(), thread->systemTid);
3586 schedstatFd = open(schedstatBuf, O_RDONLY);
3587 if (schedstatFd >= 0) {
3588 int bytes;
3589 bytes = read(schedstatFd, schedstatBuf, sizeof(schedstatBuf) - 1);
3590 close(schedstatFd);
3591 if (bytes > 1) {
3592 schedstatBuf[bytes-1] = 0; // trailing newline
3593 dvmPrintDebugMessage(target, " | schedstat=( %s )\n", schedstatBuf);
3594 }
3595 }
3596
3597 #ifdef WITH_MONITOR_TRACKING
3598 if (!isRunning) {
3599 LockedObjectData* lod = thread->pLockedObjects;
3600 if (lod != NULL)
3601 dvmPrintDebugMessage(target, " | monitors held:\n");
3602 else
3603 dvmPrintDebugMessage(target, " | monitors held: <none>\n");
3604 while (lod != NULL) {
3605 Object* obj = lod->obj;
3606 if (obj->clazz == gDvm.classJavaLangClass) {
3607 ClassObject* clazz = (ClassObject*) obj;
3608 dvmPrintDebugMessage(target, " > %p[%d] (%s object for class %s)\n",
3609 obj, lod->recursionCount, obj->clazz->descriptor,
3610 clazz->descriptor);
3611 } else {
3612 dvmPrintDebugMessage(target, " > %p[%d] (%s)\n",
3613 obj, lod->recursionCount, obj->clazz->descriptor);
3614 }
3615 lod = lod->next;
3616 }
3617 }
3618 #endif
3619
3620 if (isRunning)
3621 dvmDumpRunningThreadStack(target, thread);
3622 else
3623 dvmDumpThreadStack(target, thread);
3624
3625 dvmReleaseTrackedAlloc(threadObj, NULL);
3626 free(threadName);
3627 free(groupName);
3628 }
3629
3630 /*
3631 * Get the name of a thread.
3632 *
3633 * For correctness, the caller should hold the thread list lock to ensure
3634 * that the thread doesn't go away mid-call.
3635 *
3636 * Returns a newly-allocated string, or NULL if the Thread doesn't have a name.
3637 */
dvmGetThreadName(Thread * thread)3638 char* dvmGetThreadName(Thread* thread)
3639 {
3640 StringObject* nameObj;
3641
3642 if (thread->threadObj == NULL) {
3643 LOGW("threadObj is NULL, name not available\n");
3644 return strdup("-unknown-");
3645 }
3646
3647 nameObj = (StringObject*)
3648 dvmGetFieldObject(thread->threadObj, gDvm.offJavaLangThread_name);
3649 return dvmCreateCstrFromString(nameObj);
3650 }
3651
3652 /*
3653 * Dump all threads to the log file -- just calls dvmDumpAllThreadsEx() with
3654 * an output target.
3655 */
dvmDumpAllThreads(bool grabLock)3656 void dvmDumpAllThreads(bool grabLock)
3657 {
3658 DebugOutputTarget target;
3659
3660 dvmCreateLogOutputTarget(&target, ANDROID_LOG_INFO, LOG_TAG);
3661 dvmDumpAllThreadsEx(&target, grabLock);
3662 }
3663
3664 /*
3665 * Print information about all known threads. Assumes they have been
3666 * suspended (or are in a non-interpreting state, e.g. WAIT or NATIVE).
3667 *
3668 * If "grabLock" is true, we grab the thread lock list. This is important
3669 * to do unless the caller already holds the lock.
3670 */
dvmDumpAllThreadsEx(const DebugOutputTarget * target,bool grabLock)3671 void dvmDumpAllThreadsEx(const DebugOutputTarget* target, bool grabLock)
3672 {
3673 Thread* thread;
3674
3675 dvmPrintDebugMessage(target, "DALVIK THREADS:\n");
3676
3677 #ifdef HAVE_ANDROID_OS
3678 dvmPrintDebugMessage(target,
3679 "(mutexes: tll=%x tsl=%x tscl=%x ghl=%x hwl=%x hwll=%x)\n",
3680 gDvm.threadListLock.value,
3681 gDvm._threadSuspendLock.value,
3682 gDvm.threadSuspendCountLock.value,
3683 gDvm.gcHeapLock.value,
3684 gDvm.heapWorkerLock.value,
3685 gDvm.heapWorkerListLock.value);
3686 #endif
3687
3688 if (grabLock)
3689 dvmLockThreadList(dvmThreadSelf());
3690
3691 thread = gDvm.threadList;
3692 while (thread != NULL) {
3693 dvmDumpThreadEx(target, thread, false);
3694
3695 /* verify link */
3696 assert(thread->next == NULL || thread->next->prev == thread);
3697
3698 thread = thread->next;
3699 }
3700
3701 if (grabLock)
3702 dvmUnlockThreadList();
3703 }
3704
3705 /*
3706 * Nuke the target thread from orbit.
3707 *
3708 * The idea is to send a "crash" signal to the target thread so that
3709 * debuggerd will take notice and dump an appropriate stack trace.
3710 * Because of the way debuggerd works, we have to throw the same signal
3711 * at it twice.
3712 *
3713 * This does not necessarily cause the entire process to stop, but once a
3714 * thread has been nuked the rest of the system is likely to be unstable.
3715 * This returns so that some limited set of additional operations may be
3716 * performed, but it's advisable (and expected) to call dvmAbort soon.
3717 * (This is NOT a way to simply cancel a thread.)
3718 */
dvmNukeThread(Thread * thread)3719 void dvmNukeThread(Thread* thread)
3720 {
3721 int killResult;
3722
3723 /* suppress the heapworker watchdog to assist anyone using a debugger */
3724 gDvm.nativeDebuggerActive = true;
3725
3726 /*
3727 * Send the signals, separated by a brief interval to allow debuggerd
3728 * to work its magic. An uncommon signal like SIGFPE or SIGSTKFLT
3729 * can be used instead of SIGSEGV to avoid making it look like the
3730 * code actually crashed at the current point of execution.
3731 *
3732 * (Observed behavior: with SIGFPE, debuggerd will dump the target
3733 * thread and then the thread that calls dvmAbort. With SIGSEGV,
3734 * you don't get the second stack trace; possibly something in the
3735 * kernel decides that a signal has already been sent and it's time
3736 * to just kill the process. The position in the current thread is
3737 * generally known, so the second dump is not useful.)
3738 *
3739 * The target thread can continue to execute between the two signals.
3740 * (The first just causes debuggerd to attach to it.)
3741 */
3742 LOGD("threadid=%d: sending two SIGSTKFLTs to threadid=%d (tid=%d) to"
3743 " cause debuggerd dump\n",
3744 dvmThreadSelf()->threadId, thread->threadId, thread->systemTid);
3745 killResult = pthread_kill(thread->handle, SIGSTKFLT);
3746 if (killResult != 0) {
3747 LOGD("NOTE: pthread_kill #1 failed: %s\n", strerror(killResult));
3748 }
3749 usleep(2 * 1000 * 1000); // TODO: timed-wait until debuggerd attaches
3750 killResult = pthread_kill(thread->handle, SIGSTKFLT);
3751 if (killResult != 0) {
3752 LOGD("NOTE: pthread_kill #2 failed: %s\n", strerror(killResult));
3753 }
3754 LOGD("Sent, pausing to let debuggerd run\n");
3755 usleep(8 * 1000 * 1000); // TODO: timed-wait until debuggerd finishes
3756
3757 /* ignore SIGSEGV so the eventual dmvAbort() doesn't notify debuggerd */
3758 signal(SIGSEGV, SIG_IGN);
3759 LOGD("Continuing\n");
3760 }
3761
3762 #ifdef WITH_MONITOR_TRACKING
3763 /*
3764 * Count up the #of locked objects in the current thread.
3765 */
getThreadObjectCount(const Thread * self)3766 static int getThreadObjectCount(const Thread* self)
3767 {
3768 LockedObjectData* lod;
3769 int count = 0;
3770
3771 lod = self->pLockedObjects;
3772 while (lod != NULL) {
3773 count++;
3774 lod = lod->next;
3775 }
3776 return count;
3777 }
3778
3779 /*
3780 * Add the object to the thread's locked object list if it doesn't already
3781 * exist. The most recently added object is the most likely to be released
3782 * next, so we insert at the head of the list.
3783 *
3784 * If it already exists, we increase the recursive lock count.
3785 *
3786 * The object's lock may be thin or fat.
3787 */
dvmAddToMonitorList(Thread * self,Object * obj,bool withTrace)3788 void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace)
3789 {
3790 LockedObjectData* newLod;
3791 LockedObjectData* lod;
3792 int* trace;
3793 int depth;
3794
3795 lod = self->pLockedObjects;
3796 while (lod != NULL) {
3797 if (lod->obj == obj) {
3798 lod->recursionCount++;
3799 LOGV("+++ +recursive lock %p -> %d\n", obj, lod->recursionCount);
3800 return;
3801 }
3802 lod = lod->next;
3803 }
3804
3805 newLod = (LockedObjectData*) calloc(1, sizeof(LockedObjectData));
3806 if (newLod == NULL) {
3807 LOGE("malloc failed on %d bytes\n", sizeof(LockedObjectData));
3808 return;
3809 }
3810 newLod->obj = obj;
3811 newLod->recursionCount = 0;
3812
3813 if (withTrace) {
3814 trace = dvmFillInStackTraceRaw(self, &depth);
3815 newLod->rawStackTrace = trace;
3816 newLod->stackDepth = depth;
3817 }
3818
3819 newLod->next = self->pLockedObjects;
3820 self->pLockedObjects = newLod;
3821
3822 LOGV("+++ threadid=%d: added %p, now %d\n",
3823 self->threadId, newLod, getThreadObjectCount(self));
3824 }
3825
3826 /*
3827 * Remove the object from the thread's locked object list. If the entry
3828 * has a nonzero recursion count, we just decrement the count instead.
3829 */
dvmRemoveFromMonitorList(Thread * self,Object * obj)3830 void dvmRemoveFromMonitorList(Thread* self, Object* obj)
3831 {
3832 LockedObjectData* lod;
3833 LockedObjectData* prevLod;
3834
3835 lod = self->pLockedObjects;
3836 prevLod = NULL;
3837 while (lod != NULL) {
3838 if (lod->obj == obj) {
3839 if (lod->recursionCount > 0) {
3840 lod->recursionCount--;
3841 LOGV("+++ -recursive lock %p -> %d\n",
3842 obj, lod->recursionCount);
3843 return;
3844 } else {
3845 break;
3846 }
3847 }
3848 prevLod = lod;
3849 lod = lod->next;
3850 }
3851
3852 if (lod == NULL) {
3853 LOGW("BUG: object %p not found in thread's lock list\n", obj);
3854 return;
3855 }
3856 if (prevLod == NULL) {
3857 /* first item in list */
3858 assert(self->pLockedObjects == lod);
3859 self->pLockedObjects = lod->next;
3860 } else {
3861 /* middle/end of list */
3862 prevLod->next = lod->next;
3863 }
3864
3865 LOGV("+++ threadid=%d: removed %p, now %d\n",
3866 self->threadId, lod, getThreadObjectCount(self));
3867 free(lod->rawStackTrace);
3868 free(lod);
3869 }
3870
3871 /*
3872 * If the specified object is already in the thread's locked object list,
3873 * return the LockedObjectData struct. Otherwise return NULL.
3874 */
dvmFindInMonitorList(const Thread * self,const Object * obj)3875 LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj)
3876 {
3877 LockedObjectData* lod;
3878
3879 lod = self->pLockedObjects;
3880 while (lod != NULL) {
3881 if (lod->obj == obj)
3882 return lod;
3883 lod = lod->next;
3884 }
3885 return NULL;
3886 }
3887 #endif /*WITH_MONITOR_TRACKING*/
3888
3889
3890 /*
3891 * GC helper functions
3892 */
3893
3894 /*
3895 * Add the contents of the registers from the interpreted call stack.
3896 */
gcScanInterpStackReferences(Thread * thread)3897 static void gcScanInterpStackReferences(Thread *thread)
3898 {
3899 const u4 *framePtr;
3900 #if WITH_EXTRA_GC_CHECKS > 1
3901 bool first = true;
3902 #endif
3903
3904 framePtr = (const u4 *)thread->curFrame;
3905 while (framePtr != NULL) {
3906 const StackSaveArea *saveArea;
3907 const Method *method;
3908
3909 saveArea = SAVEAREA_FROM_FP(framePtr);
3910 method = saveArea->method;
3911 if (method != NULL) {
3912 #ifdef COUNT_PRECISE_METHODS
3913 /* the GC is running, so no lock required */
3914 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
3915 LOGI("PGC: added %s.%s %p\n",
3916 method->clazz->descriptor, method->name, method);
3917 #endif
3918 #if WITH_EXTRA_GC_CHECKS > 1
3919 /*
3920 * May also want to enable the memset() in the "invokeMethod"
3921 * goto target in the portable interpreter. That sets the stack
3922 * to a pattern that makes referring to uninitialized data
3923 * very obvious.
3924 */
3925
3926 if (first) {
3927 /*
3928 * First frame, isn't native, check the "alternate" saved PC
3929 * as a sanity check.
3930 *
3931 * It seems like we could check the second frame if the first
3932 * is native, since the PCs should be the same. It turns out
3933 * this doesn't always work. The problem is that we could
3934 * have calls in the sequence:
3935 * interp method #2
3936 * native method
3937 * interp method #1
3938 *
3939 * and then GC while in the native method after returning
3940 * from interp method #2. The currentPc on the stack is
3941 * for interp method #1, but thread->currentPc2 is still
3942 * set for the last thing interp method #2 did.
3943 *
3944 * This can also happen in normal execution:
3945 * - sget-object on not-yet-loaded class
3946 * - class init updates currentPc2
3947 * - static field init is handled by parsing annotations;
3948 * static String init requires creation of a String object,
3949 * which can cause a GC
3950 *
3951 * Essentially, any pattern that involves executing
3952 * interpreted code and then causes an allocation without
3953 * executing instructions in the original method will hit
3954 * this. These are rare enough that the test still has
3955 * some value.
3956 */
3957 if (saveArea->xtra.currentPc != thread->currentPc2) {
3958 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
3959 saveArea->xtra.currentPc, thread->currentPc2,
3960 method->clazz->descriptor, method->name, method->insns);
3961 if (saveArea->xtra.currentPc != NULL)
3962 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
3963 if (thread->currentPc2 != NULL)
3964 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
3965 dvmDumpThread(thread, false);
3966 }
3967 } else {
3968 /*
3969 * It's unusual, but not impossible, for a non-first frame
3970 * to be at something other than a method invocation. For
3971 * example, if we do a new-instance on a nonexistent class,
3972 * we'll have a lot of class loader activity on the stack
3973 * above the frame with the "new" operation. Could also
3974 * happen while we initialize a Throwable when an instruction
3975 * fails.
3976 *
3977 * So there's not much we can do here to verify the PC,
3978 * except to verify that it's a GC point.
3979 */
3980 }
3981 assert(saveArea->xtra.currentPc != NULL);
3982 #endif
3983
3984 const RegisterMap* pMap;
3985 const u1* regVector;
3986 int i;
3987
3988 Method* nonConstMethod = (Method*) method; // quiet gcc
3989 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
3990 if (pMap != NULL) {
3991 /* found map, get registers for this address */
3992 int addr = saveArea->xtra.currentPc - method->insns;
3993 regVector = dvmRegisterMapGetLine(pMap, addr);
3994 if (regVector == NULL) {
3995 LOGW("PGC: map but no entry for %s.%s addr=0x%04x\n",
3996 method->clazz->descriptor, method->name, addr);
3997 } else {
3998 LOGV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
3999 method->clazz->descriptor, method->name, addr,
4000 thread->threadId);
4001 }
4002 } else {
4003 /*
4004 * No map found. If precise GC is disabled this is
4005 * expected -- we don't create pointers to the map data even
4006 * if it's present -- but if it's enabled it means we're
4007 * unexpectedly falling back on a conservative scan, so it's
4008 * worth yelling a little.
4009 */
4010 if (gDvm.preciseGc) {
4011 LOGVV("PGC: no map for %s.%s\n",
4012 method->clazz->descriptor, method->name);
4013 }
4014 regVector = NULL;
4015 }
4016
4017 if (regVector == NULL) {
4018 /* conservative scan */
4019 for (i = method->registersSize - 1; i >= 0; i--) {
4020 u4 rval = *framePtr++;
4021 if (rval != 0 && (rval & 0x3) == 0) {
4022 dvmMarkIfObject((Object *)rval);
4023 }
4024 }
4025 } else {
4026 /*
4027 * Precise scan. v0 is at the lowest address on the
4028 * interpreted stack, and is the first bit in the register
4029 * vector, so we can walk through the register map and
4030 * memory in the same direction.
4031 *
4032 * A '1' bit indicates a live reference.
4033 */
4034 u2 bits = 1 << 1;
4035 for (i = method->registersSize - 1; i >= 0; i--) {
4036 u4 rval = *framePtr++;
4037
4038 bits >>= 1;
4039 if (bits == 1) {
4040 /* set bit 9 so we can tell when we're empty */
4041 bits = *regVector++ | 0x0100;
4042 LOGVV("loaded bits: 0x%02x\n", bits & 0xff);
4043 }
4044
4045 if (rval != 0 && (bits & 0x01) != 0) {
4046 /*
4047 * Non-null, register marked as live reference. This
4048 * should always be a valid object.
4049 */
4050 #if WITH_EXTRA_GC_CHECKS > 0
4051 if ((rval & 0x3) != 0 ||
4052 !dvmIsValidObject((Object*) rval))
4053 {
4054 /* this is very bad */
4055 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
4056 method->registersSize-1 - i, rval);
4057 LOGE("PGC: %s.%s addr 0x%04x\n",
4058 method->clazz->descriptor, method->name,
4059 saveArea->xtra.currentPc - method->insns);
4060 } else
4061 #endif
4062 {
4063 dvmMarkObjectNonNull((Object *)rval);
4064 }
4065 } else {
4066 /*
4067 * Null or non-reference, do nothing at all.
4068 */
4069 #if WITH_EXTRA_GC_CHECKS > 1
4070 if (dvmIsValidObject((Object*) rval)) {
4071 /* this is normal, but we feel chatty */
4072 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
4073 method->registersSize-1 - i, rval);
4074 }
4075 #endif
4076 }
4077 }
4078 dvmReleaseRegisterMapLine(pMap, regVector);
4079 }
4080 }
4081
4082 #if WITH_EXTRA_GC_CHECKS > 1
4083 first = false;
4084 #endif
4085
4086 /* Don't fall into an infinite loop if things get corrupted.
4087 */
4088 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
4089 saveArea->prevFrame == NULL);
4090 framePtr = saveArea->prevFrame;
4091 }
4092 }
4093
gcScanReferenceTable(ReferenceTable * refTable)4094 static void gcScanReferenceTable(ReferenceTable *refTable)
4095 {
4096 Object **op;
4097
4098 //TODO: these asserts are overkill; turn them off when things stablize.
4099 assert(refTable != NULL);
4100 assert(refTable->table != NULL);
4101 assert(refTable->nextEntry != NULL);
4102 assert((uintptr_t)refTable->nextEntry >= (uintptr_t)refTable->table);
4103 assert(refTable->nextEntry - refTable->table <= refTable->maxEntries);
4104
4105 op = refTable->table;
4106 while ((uintptr_t)op < (uintptr_t)refTable->nextEntry) {
4107 dvmMarkObjectNonNull(*(op++));
4108 }
4109 }
4110
4111 #ifdef USE_INDIRECT_REF
gcScanIndirectRefTable(IndirectRefTable * pRefTable)4112 static void gcScanIndirectRefTable(IndirectRefTable* pRefTable)
4113 {
4114 Object** op = pRefTable->table;
4115 int numEntries = dvmIndirectRefTableEntries(pRefTable);
4116 int i;
4117
4118 for (i = 0; i < numEntries; i++) {
4119 Object* obj = *op;
4120 if (obj != NULL)
4121 dvmMarkObjectNonNull(obj);
4122 op++;
4123 }
4124 }
4125 #endif
4126
4127 /*
4128 * Scan a Thread and mark any objects it references.
4129 */
gcScanThread(Thread * thread)4130 static void gcScanThread(Thread *thread)
4131 {
4132 assert(thread != NULL);
4133
4134 /*
4135 * The target thread must be suspended or in a state where it can't do
4136 * any harm (e.g. in Object.wait()). The only exception is the current
4137 * thread, which will still be active and in the "running" state.
4138 *
4139 * It's possible to encounter a false-positive here because a thread
4140 * transitioning to running from (say) vmwait or native will briefly
4141 * set their status to running before switching to suspended. This
4142 * is highly unlikely, but does mean that we don't want to abort if
4143 * the situation arises.
4144 */
4145 if (thread->status == THREAD_RUNNING && thread != dvmThreadSelf()) {
4146 Thread* self = dvmThreadSelf();
4147 LOGW("threadid=%d: Warning: GC scanning a running thread (%d)\n",
4148 self->threadId, thread->threadId);
4149 dvmDumpThread(thread, true);
4150 LOGW("Found by:\n");
4151 dvmDumpThread(self, false);
4152
4153 /* continue anyway */
4154 }
4155
4156 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_THREAD_OBJECT, thread->threadId);
4157
4158 dvmMarkObject(thread->threadObj); // could be NULL, when constructing
4159
4160 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_NATIVE_STACK, thread->threadId);
4161
4162 dvmMarkObject(thread->exception); // usually NULL
4163 gcScanReferenceTable(&thread->internalLocalRefTable);
4164
4165 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_LOCAL, thread->threadId);
4166
4167 #ifdef USE_INDIRECT_REF
4168 gcScanIndirectRefTable(&thread->jniLocalRefTable);
4169 #else
4170 gcScanReferenceTable(&thread->jniLocalRefTable);
4171 #endif
4172
4173 if (thread->jniMonitorRefTable.table != NULL) {
4174 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JNI_MONITOR, thread->threadId);
4175
4176 gcScanReferenceTable(&thread->jniMonitorRefTable);
4177 }
4178
4179 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_JAVA_FRAME, thread->threadId);
4180
4181 gcScanInterpStackReferences(thread);
4182
4183 HPROF_CLEAR_GC_SCAN_STATE();
4184 }
4185
gcScanAllThreads()4186 static void gcScanAllThreads()
4187 {
4188 Thread *thread;
4189
4190 /* Lock the thread list so we can safely use the
4191 * next/prev pointers.
4192 */
4193 dvmLockThreadList(dvmThreadSelf());
4194
4195 for (thread = gDvm.threadList; thread != NULL;
4196 thread = thread->next)
4197 {
4198 /* We need to scan our own stack, so don't special-case
4199 * the current thread.
4200 */
4201 gcScanThread(thread);
4202 }
4203
4204 dvmUnlockThreadList();
4205 }
4206
dvmGcScanRootThreadGroups()4207 void dvmGcScanRootThreadGroups()
4208 {
4209 /* We scan the VM's list of threads instead of going
4210 * through the actual ThreadGroups, but it should be
4211 * equivalent.
4212 *
4213 * This assumes that the ThreadGroup class object is in
4214 * the root set, which should always be true; it's
4215 * loaded by the built-in class loader, which is part
4216 * of the root set.
4217 */
4218 gcScanAllThreads();
4219 }
4220