1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 /*
17 * Garbage-collecting memory allocator.
18 */
19 #include "Dalvik.h"
20 #include "alloc/HeapBitmap.h"
21 #include "alloc/Verify.h"
22 #include "alloc/Heap.h"
23 #include "alloc/HeapInternal.h"
24 #include "alloc/DdmHeap.h"
25 #include "alloc/HeapSource.h"
26 #include "alloc/MarkSweep.h"
27 #include "os/os.h"
28
29 #include <sys/time.h>
30 #include <sys/resource.h>
31 #include <limits.h>
32 #include <errno.h>
33
34 static const GcSpec kGcForMallocSpec = {
35 true, /* isPartial */
36 false, /* isConcurrent */
37 true, /* doPreserve */
38 "GC_FOR_ALLOC"
39 };
40
41 const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
42
43 static const GcSpec kGcConcurrentSpec = {
44 true, /* isPartial */
45 true, /* isConcurrent */
46 true, /* doPreserve */
47 "GC_CONCURRENT"
48 };
49
50 const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
51
52 static const GcSpec kGcExplicitSpec = {
53 false, /* isPartial */
54 true, /* isConcurrent */
55 true, /* doPreserve */
56 "GC_EXPLICIT"
57 };
58
59 const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
60
61 static const GcSpec kGcBeforeOomSpec = {
62 false, /* isPartial */
63 false, /* isConcurrent */
64 false, /* doPreserve */
65 "GC_BEFORE_OOM"
66 };
67
68 const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
69
70 /*
71 * Initialize the GC heap.
72 *
73 * Returns true if successful, false otherwise.
74 */
dvmHeapStartup()75 bool dvmHeapStartup()
76 {
77 GcHeap *gcHeap;
78
79 if (gDvm.heapGrowthLimit == 0) {
80 gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
81 }
82
83 gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
84 gDvm.heapMaximumSize,
85 gDvm.heapGrowthLimit);
86 if (gcHeap == NULL) {
87 return false;
88 }
89 gcHeap->ddmHpifWhen = 0;
90 gcHeap->ddmHpsgWhen = 0;
91 gcHeap->ddmHpsgWhat = 0;
92 gcHeap->ddmNhsgWhen = 0;
93 gcHeap->ddmNhsgWhat = 0;
94 gDvm.gcHeap = gcHeap;
95
96 /* Set up the lists we'll use for cleared reference objects.
97 */
98 gcHeap->clearedReferences = NULL;
99
100 if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
101 LOGE_HEAP("card table startup failed.");
102 return false;
103 }
104
105 return true;
106 }
107
dvmHeapStartupAfterZygote()108 bool dvmHeapStartupAfterZygote()
109 {
110 return dvmHeapSourceStartupAfterZygote();
111 }
112
dvmHeapShutdown()113 void dvmHeapShutdown()
114 {
115 //TODO: make sure we're locked
116 if (gDvm.gcHeap != NULL) {
117 dvmCardTableShutdown();
118 /* Destroy the heap. Any outstanding pointers will point to
119 * unmapped memory (unless/until someone else maps it). This
120 * frees gDvm.gcHeap as a side-effect.
121 */
122 dvmHeapSourceShutdown(&gDvm.gcHeap);
123 }
124 }
125
126 /*
127 * Shutdown any threads internal to the heap.
128 */
dvmHeapThreadShutdown()129 void dvmHeapThreadShutdown()
130 {
131 dvmHeapSourceThreadShutdown();
132 }
133
134 /*
135 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
136 * we're going to have to wait on the mutex.
137 */
dvmLockHeap()138 bool dvmLockHeap()
139 {
140 if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
141 Thread *self;
142 ThreadStatus oldStatus;
143
144 self = dvmThreadSelf();
145 oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
146 dvmLockMutex(&gDvm.gcHeapLock);
147 dvmChangeStatus(self, oldStatus);
148 }
149
150 return true;
151 }
152
dvmUnlockHeap()153 void dvmUnlockHeap()
154 {
155 dvmUnlockMutex(&gDvm.gcHeapLock);
156 }
157
158 /* Do a full garbage collection, which may grow the
159 * heap as a side-effect if the live set is large.
160 */
gcForMalloc(bool clearSoftReferences)161 static void gcForMalloc(bool clearSoftReferences)
162 {
163 if (gDvm.allocProf.enabled) {
164 Thread* self = dvmThreadSelf();
165 gDvm.allocProf.gcCount++;
166 if (self != NULL) {
167 self->allocProf.gcCount++;
168 }
169 }
170 /* This may adjust the soft limit as a side-effect.
171 */
172 const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
173 dvmCollectGarbageInternal(spec);
174 }
175
176 /* Try as hard as possible to allocate some memory.
177 */
tryMalloc(size_t size)178 static void *tryMalloc(size_t size)
179 {
180 void *ptr;
181
182 /* Don't try too hard if there's no way the allocation is
183 * going to succeed. We have to collect SoftReferences before
184 * throwing an OOME, though.
185 */
186 if (size >= gDvm.heapGrowthLimit) {
187 LOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
188 size, gDvm.heapGrowthLimit);
189 ptr = NULL;
190 goto collect_soft_refs;
191 }
192
193 //TODO: figure out better heuristics
194 // There will be a lot of churn if someone allocates a bunch of
195 // big objects in a row, and we hit the frag case each time.
196 // A full GC for each.
197 // Maybe we grow the heap in bigger leaps
198 // Maybe we skip the GC if the size is large and we did one recently
199 // (number of allocations ago) (watch for thread effects)
200 // DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
201 // (or, at least, there are only 0-5 objects swept each time)
202
203 ptr = dvmHeapSourceAlloc(size);
204 if (ptr != NULL) {
205 return ptr;
206 }
207
208 /*
209 * The allocation failed. If the GC is running, block until it
210 * completes and retry.
211 */
212 if (gDvm.gcHeap->gcRunning) {
213 /*
214 * The GC is concurrently tracing the heap. Release the heap
215 * lock, wait for the GC to complete, and retrying allocating.
216 */
217 dvmWaitForConcurrentGcToComplete();
218 ptr = dvmHeapSourceAlloc(size);
219 if (ptr != NULL) {
220 return ptr;
221 }
222 }
223 /*
224 * Another failure. Our thread was starved or there may be too
225 * many live objects. Try a foreground GC. This will have no
226 * effect if the concurrent GC is already running.
227 */
228 gcForMalloc(false);
229 ptr = dvmHeapSourceAlloc(size);
230 if (ptr != NULL) {
231 return ptr;
232 }
233
234 /* Even that didn't work; this is an exceptional state.
235 * Try harder, growing the heap if necessary.
236 */
237 ptr = dvmHeapSourceAllocAndGrow(size);
238 if (ptr != NULL) {
239 size_t newHeapSize;
240
241 newHeapSize = dvmHeapSourceGetIdealFootprint();
242 //TODO: may want to grow a little bit more so that the amount of free
243 // space is equal to the old free space + the utilization slop for
244 // the new allocation.
245 LOGI_HEAP("Grow heap (frag case) to "
246 "%zu.%03zuMB for %zu-byte allocation",
247 FRACTIONAL_MB(newHeapSize), size);
248 return ptr;
249 }
250
251 /* Most allocations should have succeeded by now, so the heap
252 * is really full, really fragmented, or the requested size is
253 * really big. Do another GC, collecting SoftReferences this
254 * time. The VM spec requires that all SoftReferences have
255 * been collected and cleared before throwing an OOME.
256 */
257 //TODO: wait for the finalizers from the previous GC to finish
258 collect_soft_refs:
259 LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
260 size);
261 gcForMalloc(true);
262 ptr = dvmHeapSourceAllocAndGrow(size);
263 if (ptr != NULL) {
264 return ptr;
265 }
266 //TODO: maybe wait for finalizers and try one last time
267
268 LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
269 //TODO: tell the HeapSource to dump its state
270 dvmDumpThread(dvmThreadSelf(), false);
271
272 return NULL;
273 }
274
275 /* Throw an OutOfMemoryError if there's a thread to attach it to.
276 * Avoid recursing.
277 *
278 * The caller must not be holding the heap lock, or else the allocations
279 * in dvmThrowException() will deadlock.
280 */
throwOOME()281 static void throwOOME()
282 {
283 Thread *self;
284
285 if ((self = dvmThreadSelf()) != NULL) {
286 /* If the current (failing) dvmMalloc() happened as part of thread
287 * creation/attachment before the thread became part of the root set,
288 * we can't rely on the thread-local trackedAlloc table, so
289 * we can't keep track of a real allocated OOME object. But, since
290 * the thread is in the process of being created, it won't have
291 * a useful stack anyway, so we may as well make things easier
292 * by throwing the (stackless) pre-built OOME.
293 */
294 if (dvmIsOnThreadList(self) && !self->throwingOOME) {
295 /* Let ourselves know that we tried to throw an OOM
296 * error in the normal way in case we run out of
297 * memory trying to allocate it inside dvmThrowException().
298 */
299 self->throwingOOME = true;
300
301 /* Don't include a description string;
302 * one fewer allocation.
303 */
304 dvmThrowOutOfMemoryError(NULL);
305 } else {
306 /*
307 * This thread has already tried to throw an OutOfMemoryError,
308 * which probably means that we're running out of memory
309 * while recursively trying to throw.
310 *
311 * To avoid any more allocation attempts, "throw" a pre-built
312 * OutOfMemoryError object (which won't have a useful stack trace).
313 *
314 * Note that since this call can't possibly allocate anything,
315 * we don't care about the state of self->throwingOOME
316 * (which will usually already be set).
317 */
318 dvmSetException(self, gDvm.outOfMemoryObj);
319 }
320 /* We're done with the possible recursion.
321 */
322 self->throwingOOME = false;
323 }
324 }
325
326 /*
327 * Allocate storage on the GC heap. We guarantee 8-byte alignment.
328 *
329 * The new storage is zeroed out.
330 *
331 * Note that, in rare cases, this could get called while a GC is in
332 * progress. If a non-VM thread tries to attach itself through JNI,
333 * it will need to allocate some objects. If this becomes annoying to
334 * deal with, we can block it at the source, but holding the allocation
335 * mutex should be enough.
336 *
337 * In rare circumstances (JNI AttachCurrentThread) we can be called
338 * from a non-VM thread.
339 *
340 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
341 * (because it's being done for the interpreter "new" operation and will
342 * be part of the root set immediately) or we can't (because this allocation
343 * is for a brand new thread).
344 *
345 * Returns NULL and throws an exception on failure.
346 *
347 * TODO: don't do a GC if the debugger thinks all threads are suspended
348 */
dvmMalloc(size_t size,int flags)349 void* dvmMalloc(size_t size, int flags)
350 {
351 void *ptr;
352
353 dvmLockHeap();
354
355 /* Try as hard as possible to allocate some memory.
356 */
357 ptr = tryMalloc(size);
358 if (ptr != NULL) {
359 /* We've got the memory.
360 */
361 if (gDvm.allocProf.enabled) {
362 Thread* self = dvmThreadSelf();
363 gDvm.allocProf.allocCount++;
364 gDvm.allocProf.allocSize += size;
365 if (self != NULL) {
366 self->allocProf.allocCount++;
367 self->allocProf.allocSize += size;
368 }
369 }
370 } else {
371 /* The allocation failed.
372 */
373
374 if (gDvm.allocProf.enabled) {
375 Thread* self = dvmThreadSelf();
376 gDvm.allocProf.failedAllocCount++;
377 gDvm.allocProf.failedAllocSize += size;
378 if (self != NULL) {
379 self->allocProf.failedAllocCount++;
380 self->allocProf.failedAllocSize += size;
381 }
382 }
383 }
384
385 dvmUnlockHeap();
386
387 if (ptr != NULL) {
388 /*
389 * If caller hasn't asked us not to track it, add it to the
390 * internal tracking list.
391 */
392 if ((flags & ALLOC_DONT_TRACK) == 0) {
393 dvmAddTrackedAlloc((Object*)ptr, NULL);
394 }
395 } else {
396 /*
397 * The allocation failed; throw an OutOfMemoryError.
398 */
399 throwOOME();
400 }
401
402 return ptr;
403 }
404
405 /*
406 * Returns true iff <obj> points to a valid allocated object.
407 */
dvmIsValidObject(const Object * obj)408 bool dvmIsValidObject(const Object* obj)
409 {
410 /* Don't bother if it's NULL or not 8-byte aligned.
411 */
412 if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
413 /* Even if the heap isn't locked, this shouldn't return
414 * any false negatives. The only mutation that could
415 * be happening is allocation, which means that another
416 * thread could be in the middle of a read-modify-write
417 * to add a new bit for a new object. However, that
418 * RMW will have completed by the time any other thread
419 * could possibly see the new pointer, so there is no
420 * danger of dvmIsValidObject() being called on a valid
421 * pointer whose bit isn't set.
422 *
423 * Freeing will only happen during the sweep phase, which
424 * only happens while the heap is locked.
425 */
426 return dvmHeapSourceContains(obj);
427 }
428 return false;
429 }
430
dvmObjectSizeInHeap(const Object * obj)431 size_t dvmObjectSizeInHeap(const Object *obj)
432 {
433 return dvmHeapSourceChunkSize(obj);
434 }
435
verifyRootsAndHeap()436 static void verifyRootsAndHeap()
437 {
438 dvmVerifyRoots();
439 dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
440 }
441
442 /*
443 * Initiate garbage collection.
444 *
445 * NOTES:
446 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
447 * be added to the thread list while we work. The thread should NOT
448 * start executing, so this is only interesting when we start chasing
449 * thread stacks. (Before we do so, grab the lock.)
450 *
451 * We are not allowed to GC when the debugger has suspended the VM, which
452 * is awkward because debugger requests can cause allocations. The easiest
453 * way to enforce this is to refuse to GC on an allocation made by the
454 * JDWP thread -- we have to expand the heap or fail.
455 */
dvmCollectGarbageInternal(const GcSpec * spec)456 void dvmCollectGarbageInternal(const GcSpec* spec)
457 {
458 GcHeap *gcHeap = gDvm.gcHeap;
459 u4 rootStart = 0 , rootEnd = 0;
460 u4 dirtyStart = 0, dirtyEnd = 0;
461 size_t numObjectsFreed, numBytesFreed;
462 size_t currAllocated, currFootprint;
463 size_t percentFree;
464 int oldThreadPriority = INT_MAX;
465
466 /* The heap lock must be held.
467 */
468
469 if (gcHeap->gcRunning) {
470 LOGW_HEAP("Attempted recursive GC");
471 return;
472 }
473
474 gcHeap->gcRunning = true;
475
476 dvmSuspendAllThreads(SUSPEND_FOR_GC);
477 rootStart = dvmGetRelativeTimeMsec();
478
479 /*
480 * If we are not marking concurrently raise the priority of the
481 * thread performing the garbage collection.
482 */
483 if (!spec->isConcurrent) {
484 oldThreadPriority = os_raiseThreadPriority();
485 }
486 if (gDvm.preVerify) {
487 LOGV_HEAP("Verifying roots and heap before GC");
488 verifyRootsAndHeap();
489 }
490
491 dvmMethodTraceGCBegin();
492
493 /* Set up the marking context.
494 */
495 if (!dvmHeapBeginMarkStep(spec->isPartial)) {
496 LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
497 dvmAbort();
498 }
499
500 /* Mark the set of objects that are strongly reachable from the roots.
501 */
502 LOGD_HEAP("Marking...");
503 dvmHeapMarkRootSet();
504
505 /* dvmHeapScanMarkedObjects() will build the lists of known
506 * instances of the Reference classes.
507 */
508 assert(gcHeap->softReferences == NULL);
509 assert(gcHeap->weakReferences == NULL);
510 assert(gcHeap->finalizerReferences == NULL);
511 assert(gcHeap->phantomReferences == NULL);
512 assert(gcHeap->clearedReferences == NULL);
513
514 if (spec->isConcurrent) {
515 /*
516 * Resume threads while tracing from the roots. We unlock the
517 * heap to allow mutator threads to allocate from free space.
518 */
519 rootEnd = dvmGetRelativeTimeMsec();
520 dvmClearCardTable();
521 dvmUnlockHeap();
522 dvmResumeAllThreads(SUSPEND_FOR_GC);
523 }
524
525 /* Recursively mark any objects that marked objects point to strongly.
526 * If we're not collecting soft references, soft-reachable
527 * objects will also be marked.
528 */
529 LOGD_HEAP("Recursing...");
530 dvmHeapScanMarkedObjects();
531
532 if (spec->isConcurrent) {
533 /*
534 * Re-acquire the heap lock and perform the final thread
535 * suspension.
536 */
537 dvmLockHeap();
538 dvmSuspendAllThreads(SUSPEND_FOR_GC);
539 dirtyStart = dvmGetRelativeTimeMsec();
540 /*
541 * As no barrier intercepts root updates, we conservatively
542 * assume all roots may be gray and re-mark them.
543 */
544 dvmHeapReMarkRootSet();
545 /*
546 * With the exception of reference objects and weak interned
547 * strings, all gray objects should now be on dirty cards.
548 */
549 if (gDvm.verifyCardTable) {
550 dvmVerifyCardTable();
551 }
552 /*
553 * Recursively mark gray objects pointed to by the roots or by
554 * heap objects dirtied during the concurrent mark.
555 */
556 dvmHeapReScanMarkedObjects();
557 }
558
559 /*
560 * All strongly-reachable objects have now been marked. Process
561 * weakly-reachable objects discovered while tracing.
562 */
563 dvmHeapProcessReferences(&gcHeap->softReferences,
564 spec->doPreserve == false,
565 &gcHeap->weakReferences,
566 &gcHeap->finalizerReferences,
567 &gcHeap->phantomReferences);
568
569 #if defined(WITH_JIT)
570 /*
571 * Patching a chaining cell is very cheap as it only updates 4 words. It's
572 * the overhead of stopping all threads and synchronizing the I/D cache
573 * that makes it expensive.
574 *
575 * Therefore we batch those work orders in a queue and go through them
576 * when threads are suspended for GC.
577 */
578 dvmCompilerPerformSafePointChecks();
579 #endif
580
581 LOGD_HEAP("Sweeping...");
582
583 dvmHeapSweepSystemWeaks();
584
585 /*
586 * Live objects have a bit set in the mark bitmap, swap the mark
587 * and live bitmaps. The sweep can proceed concurrently viewing
588 * the new live bitmap as the old mark bitmap, and vice versa.
589 */
590 dvmHeapSourceSwapBitmaps();
591
592 if (gDvm.postVerify) {
593 LOGV_HEAP("Verifying roots and heap after GC");
594 verifyRootsAndHeap();
595 }
596
597 if (spec->isConcurrent) {
598 dirtyEnd = dvmGetRelativeTimeMsec();
599 dvmUnlockHeap();
600 dvmResumeAllThreads(SUSPEND_FOR_GC);
601 }
602 dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
603 &numObjectsFreed, &numBytesFreed);
604 LOGD_HEAP("Cleaning up...");
605 dvmHeapFinishMarkStep();
606 if (spec->isConcurrent) {
607 dvmLockHeap();
608 }
609
610 LOGD_HEAP("Done.");
611
612 /* Now's a good time to adjust the heap size, since
613 * we know what our utilization is.
614 *
615 * This doesn't actually resize any memory;
616 * it just lets the heap grow more when necessary.
617 */
618 dvmHeapSourceGrowForUtilization();
619
620 currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
621 currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
622
623 dvmMethodTraceGCEnd();
624 LOGV_HEAP("GC finished");
625
626 gcHeap->gcRunning = false;
627
628 LOGV_HEAP("Resuming threads");
629
630 if (spec->isConcurrent) {
631 /*
632 * Wake-up any threads that blocked after a failed allocation
633 * request.
634 */
635 dvmBroadcastCond(&gDvm.gcHeapCond);
636 }
637
638 if (!spec->isConcurrent) {
639 dirtyEnd = dvmGetRelativeTimeMsec();
640 dvmResumeAllThreads(SUSPEND_FOR_GC);
641 /*
642 * Restore the original thread scheduling priority if it was
643 * changed at the start of the current garbage collection.
644 */
645 if (oldThreadPriority != INT_MAX) {
646 os_lowerThreadPriority(oldThreadPriority);
647 }
648 }
649
650 /*
651 * Move queue of pending references back into Java.
652 */
653 dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
654
655 percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
656 if (!spec->isConcurrent) {
657 u4 markSweepTime = dirtyEnd - rootStart;
658 bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
659 LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums",
660 spec->reason,
661 isSmall ? "<" : "",
662 numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
663 percentFree,
664 currAllocated / 1024, currFootprint / 1024,
665 markSweepTime);
666 } else {
667 u4 rootTime = rootEnd - rootStart;
668 u4 dirtyTime = dirtyEnd - dirtyStart;
669 bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
670 LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums",
671 spec->reason,
672 isSmall ? "<" : "",
673 numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
674 percentFree,
675 currAllocated / 1024, currFootprint / 1024,
676 rootTime, dirtyTime);
677 }
678 if (gcHeap->ddmHpifWhen != 0) {
679 LOGD_HEAP("Sending VM heap info to DDM");
680 dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
681 }
682 if (gcHeap->ddmHpsgWhen != 0) {
683 LOGD_HEAP("Dumping VM heap to DDM");
684 dvmDdmSendHeapSegments(false, false);
685 }
686 if (gcHeap->ddmNhsgWhen != 0) {
687 LOGD_HEAP("Dumping native heap to DDM");
688 dvmDdmSendHeapSegments(false, true);
689 }
690 }
691
692 /*
693 * If the concurrent GC is running, wait for it to finish. The caller
694 * must hold the heap lock.
695 *
696 * Note: the second dvmChangeStatus() could stall if we were in RUNNING
697 * on entry, and some other thread has asked us to suspend. In that
698 * case we will be suspended with the heap lock held, which can lead to
699 * deadlock if the other thread tries to do something with the managed heap.
700 * For example, the debugger might suspend us and then execute a method that
701 * allocates memory. We can avoid this situation by releasing the lock
702 * before self-suspending. (The developer can work around this specific
703 * situation by single-stepping the VM. Alternatively, we could disable
704 * concurrent GC when the debugger is attached, but that might change
705 * behavior more than is desirable.)
706 *
707 * This should not be a problem in production, because any GC-related
708 * activity will grab the lock before issuing a suspend-all. (We may briefly
709 * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
710 * but there's no risk of deadlock.)
711 */
dvmWaitForConcurrentGcToComplete()712 void dvmWaitForConcurrentGcToComplete()
713 {
714 Thread *self = dvmThreadSelf();
715 assert(self != NULL);
716 while (gDvm.gcHeap->gcRunning) {
717 ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
718 dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
719 dvmChangeStatus(self, oldStatus);
720 }
721 }
722