1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * VM thread support.
19 */
20 #ifndef _DALVIK_THREAD
21 #define _DALVIK_THREAD
22
23 #include "jni.h"
24
25 #include <errno.h>
26 #include <cutils/sched_policy.h>
27
28
29 #if defined(CHECK_MUTEX) && !defined(__USE_UNIX98)
30 /* glibc lacks this unless you #define __USE_UNIX98 */
31 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
32 enum { PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP };
33 #endif
34
35 #ifdef WITH_MONITOR_TRACKING
36 struct LockedObjectData;
37 #endif
38
39 /*
40 * Current status; these map to JDWP constants, so don't rearrange them.
41 * (If you do alter this, update the strings in dvmDumpThread and the
42 * conversion table in VMThread.java.)
43 *
44 * Note that "suspended" is orthogonal to these values (so says JDWP).
45 */
46 typedef enum ThreadStatus {
47 THREAD_UNDEFINED = -1, /* makes enum compatible with int32_t */
48
49 /* these match up with JDWP values */
50 THREAD_ZOMBIE = 0, /* TERMINATED */
51 THREAD_RUNNING = 1, /* RUNNABLE or running now */
52 THREAD_TIMED_WAIT = 2, /* TIMED_WAITING in Object.wait() */
53 THREAD_MONITOR = 3, /* BLOCKED on a monitor */
54 THREAD_WAIT = 4, /* WAITING in Object.wait() */
55 /* non-JDWP states */
56 THREAD_INITIALIZING = 5, /* allocated, not yet running */
57 THREAD_STARTING = 6, /* started, not yet on thread list */
58 THREAD_NATIVE = 7, /* off in a JNI native method */
59 THREAD_VMWAIT = 8, /* waiting on a VM resource */
60 THREAD_SUSPENDED = 9, /* suspended, usually by GC or debugger */
61 } ThreadStatus;
62
63 /* thread priorities, from java.lang.Thread */
64 enum {
65 THREAD_MIN_PRIORITY = 1,
66 THREAD_NORM_PRIORITY = 5,
67 THREAD_MAX_PRIORITY = 10,
68 };
69
70
71 /* initialization */
72 bool dvmThreadStartup(void);
73 bool dvmThreadObjStartup(void);
74 void dvmThreadShutdown(void);
75 void dvmSlayDaemons(void);
76
77
78 #define kJniLocalRefMin 32
79 #define kJniLocalRefMax 512 /* arbitrary; should be plenty */
80 #define kInternalRefDefault 32 /* equally arbitrary */
81 #define kInternalRefMax 4096 /* mainly a sanity check */
82
83 #define kMinStackSize (512 + STACK_OVERFLOW_RESERVE)
84 #define kDefaultStackSize (12*1024) /* three 4K pages */
85 #define kMaxStackSize (256*1024 + STACK_OVERFLOW_RESERVE)
86
87 /*
88 * Our per-thread data.
89 *
90 * These are allocated on the system heap.
91 */
92 typedef struct Thread {
93 /* small unique integer; useful for "thin" locks and debug messages */
94 u4 threadId;
95
96 /*
97 * Thread's current status. Can only be changed by the thread itself
98 * (i.e. don't mess with this from other threads).
99 */
100 volatile ThreadStatus status;
101
102 /*
103 * This is the number of times the thread has been suspended. When the
104 * count drops to zero, the thread resumes.
105 *
106 * "dbgSuspendCount" is the portion of the suspend count that the
107 * debugger is responsible for. This has to be tracked separately so
108 * that we can recover correctly if the debugger abruptly disconnects
109 * (suspendCount -= dbgSuspendCount). The debugger should not be able
110 * to resume GC-suspended threads, because we ignore the debugger while
111 * a GC is in progress.
112 *
113 * Both of these are guarded by gDvm.threadSuspendCountLock.
114 *
115 * (We could store both of these in the same 32-bit, using 16-bit
116 * halves, to make atomic ops possible. In practice, you only need
117 * to read suspendCount, and we need to hold a mutex when making
118 * changes, so there's no need to merge them. Note the non-debug
119 * component will rarely be other than 1 or 0 -- not sure it's even
120 * possible with the way mutexes are currently used.)
121 */
122 int suspendCount;
123 int dbgSuspendCount;
124
125 /* thread handle, as reported by pthread_self() */
126 pthread_t handle;
127
128 /* thread ID, only useful under Linux */
129 pid_t systemTid;
130
131 /* start (high addr) of interp stack (subtract size to get malloc addr) */
132 u1* interpStackStart;
133
134 /* current limit of stack; flexes for StackOverflowError */
135 const u1* interpStackEnd;
136
137 /* interpreter stack size; our stacks are fixed-length */
138 int interpStackSize;
139 bool stackOverflowed;
140
141 /* FP of bottom-most (currently executing) stack frame on interp stack */
142 void* curFrame;
143
144 /* current exception, or NULL if nothing pending */
145 Object* exception;
146
147 /* the java/lang/Thread that we are associated with */
148 Object* threadObj;
149
150 /* the JNIEnv pointer associated with this thread */
151 JNIEnv* jniEnv;
152
153 /* internal reference tracking */
154 ReferenceTable internalLocalRefTable;
155
156 #if defined(WITH_JIT)
157 /*
158 * Whether the current top VM frame is in the interpreter or JIT cache:
159 * NULL : in the interpreter
160 * non-NULL: entry address of the JIT'ed code (the actual value doesn't
161 * matter)
162 */
163 void* inJitCodeCache;
164 #if defined(WITH_SELF_VERIFICATION)
165 /* Buffer for register state during self verification */
166 struct ShadowSpace* shadowSpace;
167 #endif
168 #endif
169
170 /* JNI local reference tracking */
171 #ifdef USE_INDIRECT_REF
172 IndirectRefTable jniLocalRefTable;
173 #else
174 ReferenceTable jniLocalRefTable;
175 #endif
176
177 /* JNI native monitor reference tracking (initialized on first use) */
178 ReferenceTable jniMonitorRefTable;
179
180 /* hack to make JNI_OnLoad work right */
181 Object* classLoaderOverride;
182
183 /* mutex to guard the interrupted and the waitMonitor members */
184 pthread_mutex_t waitMutex;
185
186 /* pointer to the monitor lock we're currently waiting on */
187 /* guarded by waitMutex */
188 /* TODO: consider changing this to Object* for better JDWP interaction */
189 Monitor* waitMonitor;
190
191 /* thread "interrupted" status; stays raised until queried or thrown */
192 /* guarded by waitMutex */
193 bool interrupted;
194
195 /* links to the next thread in the wait set this thread is part of */
196 struct Thread* waitNext;
197
198 /* object to sleep on while we are waiting for a monitor */
199 pthread_cond_t waitCond;
200
201 /*
202 * Set to true when the thread is in the process of throwing an
203 * OutOfMemoryError.
204 */
205 bool throwingOOME;
206
207 /* links to rest of thread list; grab global lock before traversing */
208 struct Thread* prev;
209 struct Thread* next;
210
211 /* used by threadExitCheck when a thread exits without detaching */
212 int threadExitCheckCount;
213
214 /* JDWP invoke-during-breakpoint support */
215 DebugInvokeReq invokeReq;
216
217 #ifdef WITH_MONITOR_TRACKING
218 /* objects locked by this thread; most recent is at head of list */
219 struct LockedObjectData* pLockedObjects;
220 #endif
221
222 #ifdef WITH_ALLOC_LIMITS
223 /* allocation limit, for Debug.setAllocationLimit() regression testing */
224 int allocLimit;
225 #endif
226
227 /* base time for per-thread CPU timing (used by method profiling) */
228 bool cpuClockBaseSet;
229 u8 cpuClockBase;
230
231 /* memory allocation profiling state */
232 AllocProfState allocProf;
233
234 #ifdef WITH_JNI_STACK_CHECK
235 u4 stackCrc;
236 #endif
237
238 #if WITH_EXTRA_GC_CHECKS > 1
239 /* PC, saved on every instruction; redundant with StackSaveArea */
240 const u2* currentPc2;
241 #endif
242 } Thread;
243
244 /* start point for an internal thread; mimics pthread args */
245 typedef void* (*InternalThreadStart)(void* arg);
246
247 /* args for internal thread creation */
248 typedef struct InternalStartArgs {
249 /* inputs */
250 InternalThreadStart func;
251 void* funcArg;
252 char* name;
253 Object* group;
254 bool isDaemon;
255 /* result */
256 volatile Thread** pThread;
257 volatile int* pCreateStatus;
258 } InternalStartArgs;
259
260 /* finish init */
261 bool dvmPrepMainForJni(JNIEnv* pEnv);
262 bool dvmPrepMainThread(void);
263
264 /* utility function to get the tid */
265 pid_t dvmGetSysThreadId(void);
266
267 /*
268 * Get our Thread* from TLS.
269 *
270 * Returns NULL if this isn't a thread that the VM is aware of.
271 */
272 Thread* dvmThreadSelf(void);
273
274 /* grab the thread list global lock */
275 void dvmLockThreadList(Thread* self);
276 /* release the thread list global lock */
277 void dvmUnlockThreadList(void);
278
279 /*
280 * Thread suspend/resume, used by the GC and debugger.
281 */
282 typedef enum SuspendCause {
283 SUSPEND_NOT = 0,
284 SUSPEND_FOR_GC,
285 SUSPEND_FOR_DEBUG,
286 SUSPEND_FOR_DEBUG_EVENT,
287 SUSPEND_FOR_STACK_DUMP,
288 SUSPEND_FOR_DEX_OPT,
289 SUSPEND_FOR_VERIFY,
290 #if defined(WITH_JIT)
291 SUSPEND_FOR_TBL_RESIZE, // jit-table resize
292 SUSPEND_FOR_IC_PATCH, // polymorphic callsite inline-cache patch
293 SUSPEND_FOR_CC_RESET, // code-cache reset
294 SUSPEND_FOR_REFRESH, // Reload data cached in interpState
295 #endif
296 } SuspendCause;
297 void dvmSuspendThread(Thread* thread);
298 void dvmSuspendSelf(bool jdwpActivity);
299 void dvmResumeThread(Thread* thread);
300 void dvmSuspendAllThreads(SuspendCause why);
301 void dvmResumeAllThreads(SuspendCause why);
302 void dvmUndoDebuggerSuspensions(void);
303
304 /*
305 * Check suspend state. Grab threadListLock before calling.
306 */
307 bool dvmIsSuspended(const Thread* thread);
308
309 /*
310 * Wait until a thread has suspended. (Used by debugger support.)
311 */
312 void dvmWaitForSuspend(Thread* thread);
313
314 /*
315 * Check to see if we should be suspended now. If so, suspend ourselves
316 * by sleeping on a condition variable.
317 */
318 bool dvmCheckSuspendPending(Thread* self);
319
320 /*
321 * Fast test for use in the interpreter. Returns "true" if our suspend
322 * count is nonzero.
323 */
dvmCheckSuspendQuick(Thread * self)324 INLINE bool dvmCheckSuspendQuick(Thread* self) {
325 return (self->suspendCount != 0);
326 }
327
328 /*
329 * Used when changing thread state. Threads may only change their own.
330 * The "self" argument, which may be NULL, is accepted as an optimization.
331 *
332 * If you're calling this before waiting on a resource (e.g. THREAD_WAIT
333 * or THREAD_MONITOR), do so in the same function as the wait -- this records
334 * the current stack depth for the GC.
335 *
336 * If you're changing to THREAD_RUNNING, this will check for suspension.
337 *
338 * Returns the old status.
339 */
340 ThreadStatus dvmChangeStatus(Thread* self, ThreadStatus newStatus);
341
342 /*
343 * Initialize a mutex.
344 */
dvmInitMutex(pthread_mutex_t * pMutex)345 INLINE void dvmInitMutex(pthread_mutex_t* pMutex)
346 {
347 #ifdef CHECK_MUTEX
348 pthread_mutexattr_t attr;
349 int cc;
350
351 pthread_mutexattr_init(&attr);
352 cc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP);
353 assert(cc == 0);
354 pthread_mutex_init(pMutex, &attr);
355 pthread_mutexattr_destroy(&attr);
356 #else
357 pthread_mutex_init(pMutex, NULL); // default=PTHREAD_MUTEX_FAST_NP
358 #endif
359 }
360
361 /*
362 * Grab a plain mutex.
363 */
dvmLockMutex(pthread_mutex_t * pMutex)364 INLINE void dvmLockMutex(pthread_mutex_t* pMutex)
365 {
366 int cc __attribute__ ((__unused__)) = pthread_mutex_lock(pMutex);
367 assert(cc == 0);
368 }
369
370 /*
371 * Try grabbing a plain mutex. Returns 0 if successful.
372 */
dvmTryLockMutex(pthread_mutex_t * pMutex)373 INLINE int dvmTryLockMutex(pthread_mutex_t* pMutex)
374 {
375 int cc = pthread_mutex_trylock(pMutex);
376 assert(cc == 0 || cc == EBUSY);
377 return cc;
378 }
379
380 /*
381 * Unlock pthread mutex.
382 */
dvmUnlockMutex(pthread_mutex_t * pMutex)383 INLINE void dvmUnlockMutex(pthread_mutex_t* pMutex)
384 {
385 int cc __attribute__ ((__unused__)) = pthread_mutex_unlock(pMutex);
386 assert(cc == 0);
387 }
388
389 /*
390 * Destroy a mutex.
391 */
dvmDestroyMutex(pthread_mutex_t * pMutex)392 INLINE void dvmDestroyMutex(pthread_mutex_t* pMutex)
393 {
394 int cc __attribute__ ((__unused__)) = pthread_mutex_destroy(pMutex);
395 assert(cc == 0);
396 }
397
dvmBroadcastCond(pthread_cond_t * pCond)398 INLINE void dvmBroadcastCond(pthread_cond_t* pCond)
399 {
400 int cc __attribute__ ((__unused__)) = pthread_cond_broadcast(pCond);
401 assert(cc == 0);
402 }
403
dvmSignalCond(pthread_cond_t * pCond)404 INLINE void dvmSignalCond(pthread_cond_t* pCond)
405 {
406 int cc __attribute__ ((__unused__)) = pthread_cond_signal(pCond);
407 assert(cc == 0);
408 }
409
dvmWaitCond(pthread_cond_t * pCond,pthread_mutex_t * pMutex)410 INLINE void dvmWaitCond(pthread_cond_t* pCond, pthread_mutex_t* pMutex)
411 {
412 int cc __attribute__ ((__unused__)) = pthread_cond_wait(pCond, pMutex);
413 assert(cc == 0);
414 }
415
416 /*
417 * Create a thread as a result of java.lang.Thread.start().
418 */
419 bool dvmCreateInterpThread(Object* threadObj, int reqStackSize);
420
421 /*
422 * Create a thread internal to the VM. It's visible to interpreted code,
423 * but found in the "system" thread group rather than "main".
424 */
425 bool dvmCreateInternalThread(pthread_t* pHandle, const char* name,
426 InternalThreadStart func, void* funcArg);
427
428 /*
429 * Attach or detach the current thread from the VM.
430 */
431 bool dvmAttachCurrentThread(const JavaVMAttachArgs* pArgs, bool isDaemon);
432 void dvmDetachCurrentThread(void);
433
434 /*
435 * Get the "main" or "system" thread group.
436 */
437 Object* dvmGetMainThreadGroup(void);
438 Object* dvmGetSystemThreadGroup(void);
439
440 /*
441 * Given a java/lang/VMThread object, return our Thread.
442 */
443 Thread* dvmGetThreadFromThreadObject(Object* vmThreadObj);
444
445 /*
446 * Given a pthread handle, return the associated Thread*.
447 * Caller must hold the thread list lock.
448 *
449 * Returns NULL if the thread was not found.
450 */
451 Thread* dvmGetThreadByHandle(pthread_t handle);
452
453 /*
454 * Given a thread ID, return the associated Thread*.
455 * Caller must hold the thread list lock.
456 *
457 * Returns NULL if the thread was not found.
458 */
459 Thread* dvmGetThreadByThreadId(u4 threadId);
460
461 /*
462 * Sleep in a thread. Returns when the sleep timer returns or the thread
463 * is interrupted.
464 */
465 void dvmThreadSleep(u8 msec, u4 nsec);
466
467 /*
468 * Get the name of a thread. (For safety, hold the thread list lock.)
469 */
470 char* dvmGetThreadName(Thread* thread);
471
472 /*
473 * Convert ThreadStatus to a string.
474 */
475 const char* dvmGetThreadStatusStr(ThreadStatus status);
476
477 /*
478 * Return true if a thread is on the internal list. If it is, the
479 * thread is part of the GC's root set.
480 */
481 bool dvmIsOnThreadList(const Thread* thread);
482
483 /*
484 * Get/set the JNIEnv field.
485 */
dvmGetThreadJNIEnv(Thread * self)486 INLINE JNIEnv* dvmGetThreadJNIEnv(Thread* self) { return self->jniEnv; }
dvmSetThreadJNIEnv(Thread * self,JNIEnv * env)487 INLINE void dvmSetThreadJNIEnv(Thread* self, JNIEnv* env) { self->jniEnv = env;}
488
489 /*
490 * Update the priority value of the underlying pthread.
491 */
492 void dvmChangeThreadPriority(Thread* thread, int newPriority);
493
494 /* "change flags" values for raise/reset thread priority calls */
495 #define kChangedPriority 0x01
496 #define kChangedPolicy 0x02
497
498 /*
499 * If necessary, raise the thread's priority to nice=0 cgroup=fg.
500 *
501 * Returns bit flags indicating changes made (zero if nothing was done).
502 */
503 int dvmRaiseThreadPriorityIfNeeded(Thread* thread, int* pSavedThreadPrio,
504 SchedPolicy* pSavedThreadPolicy);
505
506 /*
507 * Drop the thread priority to what it was before an earlier call to
508 * dvmRaiseThreadPriorityIfNeeded().
509 */
510 void dvmResetThreadPriority(Thread* thread, int changeFlags,
511 int savedThreadPrio, SchedPolicy savedThreadPolicy);
512
513 /*
514 * Debug: dump information about a single thread.
515 */
516 void dvmDumpThread(Thread* thread, bool isRunning);
517 void dvmDumpThreadEx(const DebugOutputTarget* target, Thread* thread,
518 bool isRunning);
519
520 /*
521 * Debug: dump information about all threads.
522 */
523 void dvmDumpAllThreads(bool grabLock);
524 void dvmDumpAllThreadsEx(const DebugOutputTarget* target, bool grabLock);
525
526 /*
527 * Debug: kill a thread to get a debuggerd stack trace. Leaves the VM
528 * in an uncertain state.
529 */
530 void dvmNukeThread(Thread* thread);
531
532 #ifdef WITH_MONITOR_TRACKING
533 /*
534 * Track locks held by the current thread, along with the stack trace at
535 * the point the lock was acquired.
536 *
537 * At any given time the number of locks held across the VM should be
538 * fairly small, so there's no reason not to generate and store the entire
539 * stack trace.
540 */
541 typedef struct LockedObjectData {
542 /* the locked object */
543 struct Object* obj;
544
545 /* number of times it has been locked recursively (zero-based ref count) */
546 int recursionCount;
547
548 /* stack trace at point of initial acquire */
549 u4 stackDepth;
550 int* rawStackTrace;
551
552 struct LockedObjectData* next;
553 } LockedObjectData;
554
555 /*
556 * Add/remove/find objects from the thread's monitor list.
557 */
558 void dvmAddToMonitorList(Thread* self, Object* obj, bool withTrace);
559 void dvmRemoveFromMonitorList(Thread* self, Object* obj);
560 LockedObjectData* dvmFindInMonitorList(const Thread* self, const Object* obj);
561 #endif
562
563 #endif /*_DALVIK_THREAD*/
564