1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * An async worker thread to handle certain heap operations that need
19 * to be done in a separate thread to avoid synchronization problems.
20 * HeapWorkers and reference enqueuing are handled by this thread.
21 * The VM does all clearing.
22 */
23 #include "Dalvik.h"
24 #include "HeapInternal.h"
25
26 #include <sys/time.h>
27 #include <stdlib.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <errno.h> // for ETIMEDOUT, etc.
31
32 static void* heapWorkerThreadStart(void* arg);
33
34 /*
35 * Initialize any HeapWorker state that Heap.c
36 * cares about. This lets the GC start before the
37 * HeapWorker thread is initialized.
38 */
dvmInitializeHeapWorkerState()39 void dvmInitializeHeapWorkerState()
40 {
41 assert(!gDvm.heapWorkerInitialized);
42
43 dvmInitMutex(&gDvm.heapWorkerLock);
44 pthread_cond_init(&gDvm.heapWorkerCond, NULL);
45 pthread_cond_init(&gDvm.heapWorkerIdleCond, NULL);
46
47 gDvm.heapWorkerInitialized = true;
48 }
49
50 /*
51 * Crank up the heap worker thread.
52 *
53 * Does not return until the thread is ready for business.
54 */
dvmHeapWorkerStartup(void)55 bool dvmHeapWorkerStartup(void)
56 {
57 assert(!gDvm.haltHeapWorker);
58 assert(!gDvm.heapWorkerReady);
59 assert(gDvm.heapWorkerHandle == 0);
60 assert(gDvm.heapWorkerInitialized);
61
62 /* use heapWorkerLock/heapWorkerCond to communicate readiness */
63 dvmLockMutex(&gDvm.heapWorkerLock);
64
65 //BUG: If a GC happens in here or in the new thread while we hold the lock,
66 // the GC will deadlock when trying to acquire heapWorkerLock.
67 if (!dvmCreateInternalThread(&gDvm.heapWorkerHandle,
68 "HeapWorker", heapWorkerThreadStart, NULL))
69 {
70 dvmUnlockMutex(&gDvm.heapWorkerLock);
71 return false;
72 }
73
74 /*
75 * Wait for the heap worker to come up. We know the thread was created,
76 * so this should not get stuck.
77 */
78 while (!gDvm.heapWorkerReady) {
79 dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
80 }
81
82 dvmUnlockMutex(&gDvm.heapWorkerLock);
83 return true;
84 }
85
86 /*
87 * Shut down the heap worker thread if it was started.
88 */
dvmHeapWorkerShutdown(void)89 void dvmHeapWorkerShutdown(void)
90 {
91 void* threadReturn;
92
93 /* note: assuming that (pthread_t)0 is not a valid thread handle */
94 if (gDvm.heapWorkerHandle != 0) {
95 gDvm.haltHeapWorker = true;
96 dvmSignalHeapWorker(true);
97
98 /*
99 * We may not want to wait for the heapWorkers to complete. It's
100 * a good idea to do so, in case they're holding some sort of OS
101 * resource that doesn't get reclaimed when the process exits
102 * (e.g. an open temp file).
103 */
104 if (pthread_join(gDvm.heapWorkerHandle, &threadReturn) != 0)
105 LOGW("HeapWorker thread join failed\n");
106 else if (gDvm.verboseShutdown)
107 LOGD("HeapWorker thread has shut down\n");
108
109 gDvm.heapWorkerReady = false;
110 }
111 }
112
113 /* Make sure that the HeapWorker thread hasn't spent an inordinate
114 * amount of time inside a finalizer.
115 *
116 * Aborts the VM if the thread appears to be wedged.
117 *
118 * The caller must hold the heapWorkerLock to guarantee an atomic
119 * read of the watchdog values.
120 */
dvmAssertHeapWorkerThreadRunning()121 void dvmAssertHeapWorkerThreadRunning()
122 {
123 if (gDvm.gcHeap->heapWorkerCurrentObject != NULL) {
124 static const u8 HEAP_WORKER_WATCHDOG_TIMEOUT = 10*1000*1000LL; // 10sec
125
126 u8 heapWorkerInterpStartTime = gDvm.gcHeap->heapWorkerInterpStartTime;
127 u8 now = dvmGetRelativeTimeUsec();
128 u8 delta = now - heapWorkerInterpStartTime;
129
130 if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT &&
131 (gDvm.debuggerActive || gDvm.nativeDebuggerActive))
132 {
133 /*
134 * Debugger suspension can block the thread indefinitely. For
135 * best results we should reset this explicitly whenever the
136 * HeapWorker thread is resumed. Unfortunately this is also
137 * affected by native debuggers, and we have no visibility
138 * into how they're manipulating us. So, we ignore the
139 * watchdog and just reset the timer.
140 */
141 LOGI("Debugger is attached -- suppressing HeapWorker watchdog\n");
142 gDvm.gcHeap->heapWorkerInterpStartTime = now; /* reset timer */
143 } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT) {
144 /*
145 * Before we give up entirely, see if maybe we're just not
146 * getting any CPU time because we're stuck in a background
147 * process group. If we successfully move the thread into the
148 * foreground we'll just leave it there (it doesn't do anything
149 * if the process isn't GCing).
150 */
151 dvmLockThreadList(NULL);
152 Thread* thread = dvmGetThreadByHandle(gDvm.heapWorkerHandle);
153 dvmUnlockThreadList();
154
155 if (thread != NULL) {
156 int priChangeFlags, threadPrio;
157 SchedPolicy threadPolicy;
158 priChangeFlags = dvmRaiseThreadPriorityIfNeeded(thread,
159 &threadPrio, &threadPolicy);
160 if (priChangeFlags != 0) {
161 LOGI("HeapWorker watchdog expired, raising priority"
162 " and retrying\n");
163 gDvm.gcHeap->heapWorkerInterpStartTime = now;
164 return;
165 }
166 }
167
168 char* desc = dexProtoCopyMethodDescriptor(
169 &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
170 LOGE("HeapWorker is wedged: %lldms spent inside %s.%s%s\n",
171 delta / 1000,
172 gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
173 gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
174 free(desc);
175 dvmDumpAllThreads(true);
176
177 /* try to get a debuggerd dump from the target thread */
178 dvmNukeThread(thread);
179
180 /* abort the VM */
181 dvmAbort();
182 } else if (delta > HEAP_WORKER_WATCHDOG_TIMEOUT / 2) {
183 char* desc = dexProtoCopyMethodDescriptor(
184 &gDvm.gcHeap->heapWorkerCurrentMethod->prototype);
185 LOGW("HeapWorker may be wedged: %lldms spent inside %s.%s%s\n",
186 delta / 1000,
187 gDvm.gcHeap->heapWorkerCurrentObject->clazz->descriptor,
188 gDvm.gcHeap->heapWorkerCurrentMethod->name, desc);
189 free(desc);
190 }
191 }
192 }
193
194 /*
195 * Acquires a mutex, transitioning to the VMWAIT state if the mutex is
196 * held. This allows the thread to suspend while it waits for another
197 * thread to release the mutex.
198 */
lockMutex(pthread_mutex_t * mu)199 static void lockMutex(pthread_mutex_t *mu)
200 {
201 Thread *self;
202 ThreadStatus oldStatus;
203
204 assert(mu != NULL);
205 if (dvmTryLockMutex(mu) != 0) {
206 self = dvmThreadSelf();
207 assert(self != NULL);
208 oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
209 dvmLockMutex(mu);
210 dvmChangeStatus(self, oldStatus);
211 }
212 }
213
callMethod(Thread * self,Object * obj,Method * method)214 static void callMethod(Thread *self, Object *obj, Method *method)
215 {
216 JValue unused;
217
218 /* Keep track of the method we're about to call and
219 * the current time so that other threads can detect
220 * when this thread wedges and provide useful information.
221 */
222 gDvm.gcHeap->heapWorkerInterpStartTime = dvmGetRelativeTimeUsec();
223 gDvm.gcHeap->heapWorkerInterpCpuStartTime = dvmGetThreadCpuTimeUsec();
224 gDvm.gcHeap->heapWorkerCurrentMethod = method;
225 gDvm.gcHeap->heapWorkerCurrentObject = obj;
226
227 /* Call the method.
228 *
229 * Don't hold the lock when executing interpreted
230 * code. It may suspend, and the GC needs to grab
231 * heapWorkerLock.
232 */
233 dvmUnlockMutex(&gDvm.heapWorkerLock);
234 if (false) {
235 /* Log entry/exit; this will likely flood the log enough to
236 * cause "logcat" to drop entries.
237 */
238 char tmpTag[16];
239 sprintf(tmpTag, "HW%d", self->systemTid);
240 LOG(LOG_DEBUG, tmpTag, "Call %s\n", method->clazz->descriptor);
241 dvmCallMethod(self, method, obj, &unused);
242 LOG(LOG_DEBUG, tmpTag, " done\n");
243 } else {
244 dvmCallMethod(self, method, obj, &unused);
245 }
246 /*
247 * Reacquire the heap worker lock in a suspend-friendly way.
248 */
249 lockMutex(&gDvm.heapWorkerLock);
250
251 gDvm.gcHeap->heapWorkerCurrentObject = NULL;
252 gDvm.gcHeap->heapWorkerCurrentMethod = NULL;
253 gDvm.gcHeap->heapWorkerInterpStartTime = 0LL;
254
255 /* Exceptions thrown during these calls interrupt
256 * the method, but are otherwise ignored.
257 */
258 if (dvmCheckException(self)) {
259 #if DVM_SHOW_EXCEPTION >= 1
260 LOGI("Uncaught exception thrown by finalizer (will be discarded):\n");
261 dvmLogExceptionStackTrace();
262 #endif
263 dvmClearException(self);
264 }
265 }
266
267 /* Process all enqueued heap work, including finalizers and reference
268 * enqueueing. Clearing has already been done by the VM.
269 *
270 * Caller must hold gDvm.heapWorkerLock.
271 */
doHeapWork(Thread * self)272 static void doHeapWork(Thread *self)
273 {
274 Object *obj;
275 HeapWorkerOperation op;
276 int numFinalizersCalled, numReferencesEnqueued;
277
278 assert(gDvm.voffJavaLangObject_finalize >= 0);
279 assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL);
280
281 numFinalizersCalled = 0;
282 numReferencesEnqueued = 0;
283 while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) {
284 Method *method = NULL;
285
286 /* Make sure the object hasn't been collected since
287 * being scheduled.
288 */
289 assert(dvmIsValidObject(obj));
290
291 /* Call the appropriate method(s).
292 */
293 if (op == WORKER_FINALIZE) {
294 numFinalizersCalled++;
295 method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize];
296 assert(dvmCompareNameDescriptorAndMethod("finalize", "()V",
297 method) == 0);
298 assert(method->clazz != gDvm.classJavaLangObject);
299 callMethod(self, obj, method);
300 } else {
301 assert(op == WORKER_ENQUEUE);
302 assert(dvmGetFieldObject(
303 obj, gDvm.offJavaLangRefReference_queue) != NULL);
304 assert(dvmGetFieldObject(
305 obj, gDvm.offJavaLangRefReference_queueNext) == NULL);
306 numReferencesEnqueued++;
307 callMethod(self, obj,
308 gDvm.methJavaLangRefReference_enqueueInternal);
309 }
310
311 /* Let the GC collect the object.
312 */
313 dvmReleaseTrackedAlloc(obj, self);
314 }
315 LOGV("Called %d finalizers\n", numFinalizersCalled);
316 LOGV("Enqueued %d references\n", numReferencesEnqueued);
317 }
318
319 /*
320 * The heap worker thread sits quietly until the GC tells it there's work
321 * to do.
322 */
heapWorkerThreadStart(void * arg)323 static void* heapWorkerThreadStart(void* arg)
324 {
325 Thread *self = dvmThreadSelf();
326
327 UNUSED_PARAMETER(arg);
328
329 LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);
330
331 /* tell the main thread that we're ready */
332 lockMutex(&gDvm.heapWorkerLock);
333 gDvm.heapWorkerReady = true;
334 dvmSignalCond(&gDvm.heapWorkerCond);
335 dvmUnlockMutex(&gDvm.heapWorkerLock);
336
337 lockMutex(&gDvm.heapWorkerLock);
338 while (!gDvm.haltHeapWorker) {
339 struct timespec trimtime;
340 bool timedwait = false;
341
342 /* We're done running interpreted code for now. */
343 dvmChangeStatus(NULL, THREAD_VMWAIT);
344
345 /* Signal anyone who wants to know when we're done. */
346 dvmBroadcastCond(&gDvm.heapWorkerIdleCond);
347
348 /* Trim the heap if we were asked to. */
349 trimtime = gDvm.gcHeap->heapWorkerNextTrim;
350 if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
351 struct timespec now;
352
353 #ifdef HAVE_TIMEDWAIT_MONOTONIC
354 clock_gettime(CLOCK_MONOTONIC, &now); // relative time
355 #else
356 struct timeval tvnow;
357 gettimeofday(&tvnow, NULL); // absolute time
358 now.tv_sec = tvnow.tv_sec;
359 now.tv_nsec = tvnow.tv_usec * 1000;
360 #endif
361
362 if (trimtime.tv_sec < now.tv_sec ||
363 (trimtime.tv_sec == now.tv_sec &&
364 trimtime.tv_nsec <= now.tv_nsec))
365 {
366 size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];
367
368 /*
369 * Acquire the gcHeapLock. The requires releasing the
370 * heapWorkerLock before the gcHeapLock is acquired.
371 * It is possible that the gcHeapLock may be acquired
372 * during a concurrent GC in which case heapWorkerLock
373 * is held by the GC and we are unable to make forward
374 * progress. We avoid deadlock by releasing the
375 * gcHeapLock and then waiting to be signaled when the
376 * GC completes. There is no guarantee that the next
377 * time we are run will coincide with GC inactivity so
378 * the check and wait must be performed within a loop.
379 */
380 dvmUnlockMutex(&gDvm.heapWorkerLock);
381 dvmLockHeap();
382 while (gDvm.gcHeap->gcRunning) {
383 dvmWaitForConcurrentGcToComplete();
384 }
385 dvmLockMutex(&gDvm.heapWorkerLock);
386
387 memset(madvisedSizes, 0, sizeof(madvisedSizes));
388 dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
389 dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
390
391 dvmUnlockHeap();
392
393 trimtime.tv_sec = 0;
394 trimtime.tv_nsec = 0;
395 gDvm.gcHeap->heapWorkerNextTrim = trimtime;
396 } else {
397 timedwait = true;
398 }
399 }
400
401 /* sleep until signaled */
402 if (timedwait) {
403 int cc __attribute__ ((__unused__));
404 #ifdef HAVE_TIMEDWAIT_MONOTONIC
405 cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond,
406 &gDvm.heapWorkerLock, &trimtime);
407 #else
408 cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
409 &gDvm.heapWorkerLock, &trimtime);
410 #endif
411 assert(cc == 0 || cc == ETIMEDOUT);
412 } else {
413 dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
414 }
415
416 /*
417 * Return to the running state before doing heap work. This
418 * will block if the GC has initiated a suspend. We release
419 * the heapWorkerLock beforehand for the GC to make progress
420 * and wait to be signaled after the GC completes. There is
421 * no guarantee that the next time we are run will coincide
422 * with GC inactivity so the check and wait must be performed
423 * within a loop.
424 */
425 dvmUnlockMutex(&gDvm.heapWorkerLock);
426 dvmChangeStatus(NULL, THREAD_RUNNING);
427 dvmLockHeap();
428 while (gDvm.gcHeap->gcRunning) {
429 dvmWaitForConcurrentGcToComplete();
430 }
431 dvmLockMutex(&gDvm.heapWorkerLock);
432 dvmUnlockHeap();
433 LOGV("HeapWorker is awake\n");
434
435 /* Process any events in the queue.
436 */
437 doHeapWork(self);
438 }
439 dvmUnlockMutex(&gDvm.heapWorkerLock);
440
441 if (gDvm.verboseShutdown)
442 LOGD("HeapWorker thread shutting down\n");
443 return NULL;
444 }
445
446 /*
447 * Wake up the heap worker to let it know that there's work to be done.
448 */
dvmSignalHeapWorker(bool shouldLock)449 void dvmSignalHeapWorker(bool shouldLock)
450 {
451 if (shouldLock) {
452 dvmLockMutex(&gDvm.heapWorkerLock);
453 }
454
455 dvmSignalCond(&gDvm.heapWorkerCond);
456
457 if (shouldLock) {
458 dvmUnlockMutex(&gDvm.heapWorkerLock);
459 }
460 }
461
462 /*
463 * Block until all pending heap worker work has finished.
464 */
dvmWaitForHeapWorkerIdle()465 void dvmWaitForHeapWorkerIdle()
466 {
467 assert(gDvm.heapWorkerReady);
468
469 dvmChangeStatus(NULL, THREAD_VMWAIT);
470
471 dvmLockMutex(&gDvm.heapWorkerLock);
472
473 /* Wake up the heap worker and wait for it to finish. */
474 //TODO(http://b/issue?id=699704): This will deadlock if
475 // called from finalize(), enqueue(), or clear(). We
476 // need to detect when this is called from the HeapWorker
477 // context and just give up.
478 dvmSignalHeapWorker(false);
479 dvmWaitCond(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);
480
481 dvmUnlockMutex(&gDvm.heapWorkerLock);
482
483 dvmChangeStatus(NULL, THREAD_RUNNING);
484 }
485
486 /*
487 * Do not return until any pending heap work has finished. This may
488 * or may not happen in the context of the calling thread.
489 * No exceptions will escape.
490 */
dvmRunFinalizationSync()491 void dvmRunFinalizationSync()
492 {
493 if (gDvm.zygote) {
494 assert(!gDvm.heapWorkerReady);
495
496 /* When in zygote mode, there is no heap worker.
497 * Do the work in the current thread.
498 */
499 dvmLockMutex(&gDvm.heapWorkerLock);
500 doHeapWork(dvmThreadSelf());
501 dvmUnlockMutex(&gDvm.heapWorkerLock);
502 } else {
503 /* Outside of zygote mode, we can just ask the
504 * heap worker thread to do the work.
505 */
506 dvmWaitForHeapWorkerIdle();
507 }
508 }
509
510 /*
511 * Requests that dvmHeapSourceTrim() be called no sooner
512 * than timeoutSec seconds from now. If timeoutSec
513 * is zero, any pending trim is cancelled.
514 *
515 * Caller must hold heapWorkerLock.
516 */
dvmScheduleHeapSourceTrim(size_t timeoutSec)517 void dvmScheduleHeapSourceTrim(size_t timeoutSec)
518 {
519 GcHeap *gcHeap = gDvm.gcHeap;
520 struct timespec timeout;
521
522 if (timeoutSec == 0) {
523 timeout.tv_sec = 0;
524 timeout.tv_nsec = 0;
525 /* Don't wake up the thread just to tell it to cancel.
526 * If it wakes up naturally, we can avoid the extra
527 * context switch.
528 */
529 } else {
530 #ifdef HAVE_TIMEDWAIT_MONOTONIC
531 clock_gettime(CLOCK_MONOTONIC, &timeout);
532 timeout.tv_sec += timeoutSec;
533 #else
534 struct timeval now;
535 gettimeofday(&now, NULL);
536 timeout.tv_sec = now.tv_sec + timeoutSec;
537 timeout.tv_nsec = now.tv_usec * 1000;
538 #endif
539 dvmSignalHeapWorker(false);
540 }
541 gcHeap->heapWorkerNextTrim = timeout;
542 }
543