• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 /*
17  * Garbage-collecting memory allocator.
18  */
19 #include "Dalvik.h"
20 #include "alloc/HeapTable.h"
21 #include "alloc/Heap.h"
22 #include "alloc/HeapInternal.h"
23 #include "alloc/DdmHeap.h"
24 #include "alloc/HeapSource.h"
25 #include "alloc/MarkSweep.h"
26 
27 #include "utils/threads.h"      // need Android thread priorities
28 #define kInvalidPriority        10000
29 
30 #include <cutils/sched_policy.h>
31 
32 #include <sys/time.h>
33 #include <sys/resource.h>
34 #include <limits.h>
35 #include <errno.h>
36 
37 #define kNonCollectableRefDefault   16
38 #define kFinalizableRefDefault      128
39 
40 /*
41  * Initialize the GC heap.
42  *
43  * Returns true if successful, false otherwise.
44  */
dvmHeapStartup()45 bool dvmHeapStartup()
46 {
47     GcHeap *gcHeap;
48 
49 #if defined(WITH_ALLOC_LIMITS)
50     gDvm.checkAllocLimits = false;
51     gDvm.allocationLimit = -1;
52 #endif
53 
54     gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
55     if (gcHeap == NULL) {
56         return false;
57     }
58     gcHeap->heapWorkerCurrentObject = NULL;
59     gcHeap->heapWorkerCurrentMethod = NULL;
60     gcHeap->heapWorkerInterpStartTime = 0LL;
61     gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
62     gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
63     gcHeap->ddmHpifWhen = 0;
64     gcHeap->ddmHpsgWhen = 0;
65     gcHeap->ddmHpsgWhat = 0;
66     gcHeap->ddmNhsgWhen = 0;
67     gcHeap->ddmNhsgWhat = 0;
68 #if WITH_HPROF
69     gcHeap->hprofDumpOnGc = false;
70     gcHeap->hprofContext = NULL;
71 #endif
72 
73     /* This needs to be set before we call dvmHeapInitHeapRefTable().
74      */
75     gDvm.gcHeap = gcHeap;
76 
77     /* Set up the table we'll use for ALLOC_NO_GC.
78      */
79     if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
80                            kNonCollectableRefDefault))
81     {
82         LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
83         goto fail;
84     }
85 
86     /* Set up the lists and lock we'll use for finalizable
87      * and reference objects.
88      */
89     dvmInitMutex(&gDvm.heapWorkerListLock);
90     gcHeap->finalizableRefs = NULL;
91     gcHeap->pendingFinalizationRefs = NULL;
92     gcHeap->referenceOperations = NULL;
93 
94     /* Initialize the HeapWorker locks and other state
95      * that the GC uses.
96      */
97     dvmInitializeHeapWorkerState();
98 
99     return true;
100 
101 fail:
102     gDvm.gcHeap = NULL;
103     dvmHeapSourceShutdown(gcHeap);
104     return false;
105 }
106 
dvmHeapStartupAfterZygote()107 bool dvmHeapStartupAfterZygote()
108 {
109     /* Update our idea of the last GC start time so that we
110      * don't use the last time that Zygote happened to GC.
111      */
112     gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
113 
114     return dvmHeapSourceStartupAfterZygote();
115 }
116 
dvmHeapShutdown()117 void dvmHeapShutdown()
118 {
119 //TODO: make sure we're locked
120     if (gDvm.gcHeap != NULL) {
121         GcHeap *gcHeap;
122 
123         gcHeap = gDvm.gcHeap;
124         gDvm.gcHeap = NULL;
125 
126         /* Tables are allocated on the native heap;
127          * they need to be cleaned up explicitly.
128          * The process may stick around, so we don't
129          * want to leak any native memory.
130          */
131         dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
132 
133         dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
134         gcHeap->finalizableRefs = NULL;
135 
136         dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
137         gcHeap->pendingFinalizationRefs = NULL;
138 
139         dvmHeapFreeLargeTable(gcHeap->referenceOperations);
140         gcHeap->referenceOperations = NULL;
141 
142         /* Destroy the heap.  Any outstanding pointers
143          * will point to unmapped memory (unless/until
144          * someone else maps it).  This frees gcHeap
145          * as a side-effect.
146          */
147         dvmHeapSourceShutdown(gcHeap);
148     }
149 }
150 
151 /*
152  * We've been asked to allocate something we can't, e.g. an array so
153  * large that (length * elementWidth) is larger than 2^31.
154  *
155  * _The Java Programming Language_, 4th edition, says, "you can be sure
156  * that all SoftReferences to softly reachable objects will be cleared
157  * before an OutOfMemoryError is thrown."
158  *
159  * It's unclear whether that holds for all situations where an OOM can
160  * be thrown, or just in the context of an allocation that fails due
161  * to lack of heap space.  For simplicity we just throw the exception.
162  *
163  * (OOM due to actually running out of space is handled elsewhere.)
164  */
dvmThrowBadAllocException(const char * msg)165 void dvmThrowBadAllocException(const char* msg)
166 {
167     dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
168 }
169 
170 /*
171  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
172  * we're going to have to wait on the mutex.
173  */
dvmLockHeap()174 bool dvmLockHeap()
175 {
176     if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
177         Thread *self;
178         ThreadStatus oldStatus;
179         int cc;
180 
181         self = dvmThreadSelf();
182         if (self != NULL) {
183             oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
184         } else {
185             LOGI("ODD: waiting on heap lock, no self\n");
186             oldStatus = -1; // shut up gcc
187         }
188 
189         cc = pthread_mutex_lock(&gDvm.gcHeapLock);
190         assert(cc == 0);
191 
192         if (self != NULL) {
193             dvmChangeStatus(self, oldStatus);
194         }
195     }
196 
197     return true;
198 }
199 
dvmUnlockHeap()200 void dvmUnlockHeap()
201 {
202     dvmUnlockMutex(&gDvm.gcHeapLock);
203 }
204 
205 /* Pop an object from the list of pending finalizations and
206  * reference clears/enqueues, and return the object.
207  * The caller must call dvmReleaseTrackedAlloc()
208  * on the object when finished.
209  *
210  * Typically only called by the heap worker thread.
211  */
dvmGetNextHeapWorkerObject(HeapWorkerOperation * op)212 Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
213 {
214     Object *obj;
215     LargeHeapRefTable *table;
216     GcHeap *gcHeap = gDvm.gcHeap;
217 
218     assert(op != NULL);
219 
220     obj = NULL;
221 
222     dvmLockMutex(&gDvm.heapWorkerListLock);
223 
224     /* We must handle reference operations before finalizations.
225      * If:
226      *     a) Someone subclasses WeakReference and overrides clear()
227      *     b) A reference of this type is the last reference to
228      *        a finalizable object
229      * then we need to guarantee that the overridden clear() is called
230      * on the reference before finalize() is called on the referent.
231      * Both of these operations will always be scheduled at the same
232      * time, so handling reference operations first will guarantee
233      * the required order.
234      */
235     obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
236     if (obj != NULL) {
237         uintptr_t workBits;
238 
239         workBits = (uintptr_t)obj & (WORKER_CLEAR | WORKER_ENQUEUE);
240         assert(workBits != 0);
241         obj = (Object *)((uintptr_t)obj & ~(WORKER_CLEAR | WORKER_ENQUEUE));
242 
243         *op = workBits;
244     } else {
245         obj = dvmHeapGetNextObjectFromLargeTable(
246                 &gcHeap->pendingFinalizationRefs);
247         if (obj != NULL) {
248             *op = WORKER_FINALIZE;
249         }
250     }
251 
252     if (obj != NULL) {
253         /* Don't let the GC collect the object until the
254          * worker thread is done with it.
255          *
256          * This call is safe;  it uses thread-local storage
257          * and doesn't acquire any locks.
258          */
259         dvmAddTrackedAlloc(obj, NULL);
260     }
261 
262     dvmUnlockMutex(&gDvm.heapWorkerListLock);
263 
264     return obj;
265 }
266 
267 /* Used for a heap size change hysteresis to avoid collecting
268  * SoftReferences when the heap only grows by a small amount.
269  */
270 #define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
271 
272 /* Whenever the effective heap size may have changed,
273  * this function must be called.
274  */
dvmHeapSizeChanged()275 void dvmHeapSizeChanged()
276 {
277     GcHeap *gcHeap = gDvm.gcHeap;
278     size_t currentHeapSize;
279 
280     currentHeapSize = dvmHeapSourceGetIdealFootprint();
281 
282     /* See if the heap size has changed enough that we should care
283      * about it.
284      */
285     if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
286             4 * SOFT_REFERENCE_GROWTH_SLACK)
287     {
288         /* The heap has shrunk enough that we'll use this as a new
289          * threshold.  Since we're doing better on space, there's
290          * no need to collect any SoftReferences.
291          *
292          * This is 4x the growth hysteresis because we don't want
293          * to snap down so easily after a shrink.  If we just cleared
294          * up a bunch of SoftReferences, we don't want to disallow
295          * any new ones from being created.
296          * TODO: determine if the 4x is important, needed, or even good
297          */
298         gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
299         gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
300     } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
301             SOFT_REFERENCE_GROWTH_SLACK)
302     {
303         /* The heap has grown enough to warrant collecting SoftReferences.
304          */
305         gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
306         gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
307     }
308 }
309 
310 
311 /* Do a full garbage collection, which may grow the
312  * heap as a side-effect if the live set is large.
313  */
gcForMalloc(bool collectSoftReferences)314 static void gcForMalloc(bool collectSoftReferences)
315 {
316 #ifdef WITH_PROFILER
317     if (gDvm.allocProf.enabled) {
318         Thread* self = dvmThreadSelf();
319         gDvm.allocProf.gcCount++;
320         if (self != NULL) {
321             self->allocProf.gcCount++;
322         }
323     }
324 #endif
325     /* This may adjust the soft limit as a side-effect.
326      */
327     LOGD_HEAP("dvmMalloc initiating GC%s\n",
328             collectSoftReferences ? "(collect SoftReferences)" : "");
329     dvmCollectGarbageInternal(collectSoftReferences);
330 }
331 
332 /* Try as hard as possible to allocate some memory.
333  */
tryMalloc(size_t size)334 static DvmHeapChunk *tryMalloc(size_t size)
335 {
336     DvmHeapChunk *hc;
337 
338     /* Don't try too hard if there's no way the allocation is
339      * going to succeed.  We have to collect SoftReferences before
340      * throwing an OOME, though.
341      */
342     if (size >= gDvm.heapSizeMax) {
343         LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
344                 "someone's allocating a huge buffer\n", size, size);
345         hc = NULL;
346         goto collect_soft_refs;
347     }
348 
349 //TODO: figure out better heuristics
350 //    There will be a lot of churn if someone allocates a bunch of
351 //    big objects in a row, and we hit the frag case each time.
352 //    A full GC for each.
353 //    Maybe we grow the heap in bigger leaps
354 //    Maybe we skip the GC if the size is large and we did one recently
355 //      (number of allocations ago) (watch for thread effects)
356 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
357 //      (or, at least, there are only 0-5 objects swept each time)
358 
359     hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
360     if (hc != NULL) {
361         return hc;
362     }
363 
364     /* The allocation failed.  Free up some space by doing
365      * a full garbage collection.  This may grow the heap
366      * if the live set is sufficiently large.
367      */
368     gcForMalloc(false);
369     hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
370     if (hc != NULL) {
371         return hc;
372     }
373 
374     /* Even that didn't work;  this is an exceptional state.
375      * Try harder, growing the heap if necessary.
376      */
377     hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
378     dvmHeapSizeChanged();
379     if (hc != NULL) {
380         size_t newHeapSize;
381 
382         newHeapSize = dvmHeapSourceGetIdealFootprint();
383 //TODO: may want to grow a little bit more so that the amount of free
384 //      space is equal to the old free space + the utilization slop for
385 //      the new allocation.
386         LOGI_HEAP("Grow heap (frag case) to "
387                 "%zu.%03zuMB for %zu-byte allocation\n",
388                 FRACTIONAL_MB(newHeapSize), size);
389         return hc;
390     }
391 
392     /* Most allocations should have succeeded by now, so the heap
393      * is really full, really fragmented, or the requested size is
394      * really big.  Do another GC, collecting SoftReferences this
395      * time.  The VM spec requires that all SoftReferences have
396      * been collected and cleared before throwing an OOME.
397      */
398 //TODO: wait for the finalizers from the previous GC to finish
399 collect_soft_refs:
400     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
401             size);
402     gcForMalloc(true);
403     hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
404     dvmHeapSizeChanged();
405     if (hc != NULL) {
406         return hc;
407     }
408 //TODO: maybe wait for finalizers and try one last time
409 
410     LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
411 //TODO: tell the HeapSource to dump its state
412     dvmDumpThread(dvmThreadSelf(), false);
413 
414     return NULL;
415 }
416 
417 /* Throw an OutOfMemoryError if there's a thread to attach it to.
418  * Avoid recursing.
419  *
420  * The caller must not be holding the heap lock, or else the allocations
421  * in dvmThrowException() will deadlock.
422  */
throwOOME()423 static void throwOOME()
424 {
425     Thread *self;
426 
427     if ((self = dvmThreadSelf()) != NULL) {
428         /* If the current (failing) dvmMalloc() happened as part of thread
429          * creation/attachment before the thread became part of the root set,
430          * we can't rely on the thread-local trackedAlloc table, so
431          * we can't keep track of a real allocated OOME object.  But, since
432          * the thread is in the process of being created, it won't have
433          * a useful stack anyway, so we may as well make things easier
434          * by throwing the (stackless) pre-built OOME.
435          */
436         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
437             /* Let ourselves know that we tried to throw an OOM
438              * error in the normal way in case we run out of
439              * memory trying to allocate it inside dvmThrowException().
440              */
441             self->throwingOOME = true;
442 
443             /* Don't include a description string;
444              * one fewer allocation.
445              */
446             dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
447         } else {
448             /*
449              * This thread has already tried to throw an OutOfMemoryError,
450              * which probably means that we're running out of memory
451              * while recursively trying to throw.
452              *
453              * To avoid any more allocation attempts, "throw" a pre-built
454              * OutOfMemoryError object (which won't have a useful stack trace).
455              *
456              * Note that since this call can't possibly allocate anything,
457              * we don't care about the state of self->throwingOOME
458              * (which will usually already be set).
459              */
460             dvmSetException(self, gDvm.outOfMemoryObj);
461         }
462         /* We're done with the possible recursion.
463          */
464         self->throwingOOME = false;
465     }
466 }
467 
468 /*
469  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
470  *
471  * The new storage is zeroed out.
472  *
473  * Note that, in rare cases, this could get called while a GC is in
474  * progress.  If a non-VM thread tries to attach itself through JNI,
475  * it will need to allocate some objects.  If this becomes annoying to
476  * deal with, we can block it at the source, but holding the allocation
477  * mutex should be enough.
478  *
479  * In rare circumstances (JNI AttachCurrentThread) we can be called
480  * from a non-VM thread.
481  *
482  * We implement ALLOC_NO_GC by maintaining an internal list of objects
483  * that should not be collected.  This requires no actual flag storage in
484  * the object itself, which is good, but makes flag queries expensive.
485  *
486  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
487  * (because it's being done for the interpreter "new" operation and will
488  * be part of the root set immediately) or we can't (because this allocation
489  * is for a brand new thread).
490  *
491  * Returns NULL and throws an exception on failure.
492  *
493  * TODO: don't do a GC if the debugger thinks all threads are suspended
494  */
dvmMalloc(size_t size,int flags)495 void* dvmMalloc(size_t size, int flags)
496 {
497     GcHeap *gcHeap = gDvm.gcHeap;
498     DvmHeapChunk *hc;
499     void *ptr;
500     bool triedGc, triedGrowing;
501 
502 #if 0
503     /* handy for spotting large allocations */
504     if (size >= 100000) {
505         LOGI("dvmMalloc(%d):\n", size);
506         dvmDumpThread(dvmThreadSelf(), false);
507     }
508 #endif
509 
510 #if defined(WITH_ALLOC_LIMITS)
511     /*
512      * See if they've exceeded the allocation limit for this thread.
513      *
514      * A limit value of -1 means "no limit".
515      *
516      * This is enabled at compile time because it requires us to do a
517      * TLS lookup for the Thread pointer.  This has enough of a performance
518      * impact that we don't want to do it if we don't have to.  (Now that
519      * we're using gDvm.checkAllocLimits we may want to reconsider this,
520      * but it's probably still best to just compile the check out of
521      * production code -- one less thing to hit on every allocation.)
522      */
523     if (gDvm.checkAllocLimits) {
524         Thread* self = dvmThreadSelf();
525         if (self != NULL) {
526             int count = self->allocLimit;
527             if (count > 0) {
528                 self->allocLimit--;
529             } else if (count == 0) {
530                 /* fail! */
531                 assert(!gDvm.initializing);
532                 self->allocLimit = -1;
533                 dvmThrowException("Ldalvik/system/AllocationLimitError;",
534                     "thread allocation limit exceeded");
535                 return NULL;
536             }
537         }
538     }
539 
540     if (gDvm.allocationLimit >= 0) {
541         assert(!gDvm.initializing);
542         gDvm.allocationLimit = -1;
543         dvmThrowException("Ldalvik/system/AllocationLimitError;",
544             "global allocation limit exceeded");
545         return NULL;
546     }
547 #endif
548 
549     dvmLockHeap();
550 
551     /* Try as hard as possible to allocate some memory.
552      */
553     hc = tryMalloc(size);
554     if (hc != NULL) {
555 alloc_succeeded:
556         /* We've got the memory.
557          */
558         if ((flags & ALLOC_FINALIZABLE) != 0) {
559             /* This object is an instance of a class that
560              * overrides finalize().  Add it to the finalizable list.
561              *
562              * Note that until DVM_OBJECT_INIT() is called on this
563              * object, its clazz will be NULL.  Since the object is
564              * in this table, it will be scanned as part of the root
565              * set.  scanObject() explicitly deals with the NULL clazz.
566              */
567             if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
568                                     (Object *)hc->data))
569             {
570                 LOGE_HEAP("dvmMalloc(): no room for any more "
571                         "finalizable objects\n");
572                 dvmAbort();
573             }
574         }
575 
576 #if WITH_OBJECT_HEADERS
577         hc->header = OBJECT_HEADER;
578         hc->birthGeneration = gGeneration;
579 #endif
580         ptr = hc->data;
581 
582         /* The caller may not want us to collect this object.
583          * If not, throw it in the nonCollectableRefs table, which
584          * will be added to the root set when we GC.
585          *
586          * Note that until DVM_OBJECT_INIT() is called on this
587          * object, its clazz will be NULL.  Since the object is
588          * in this table, it will be scanned as part of the root
589          * set.  scanObject() explicitly deals with the NULL clazz.
590          */
591         if ((flags & ALLOC_NO_GC) != 0) {
592             if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
593                 LOGE_HEAP("dvmMalloc(): no room for any more "
594                         "ALLOC_NO_GC objects: %zd\n",
595                         dvmHeapNumHeapRefTableEntries(
596                                 &gcHeap->nonCollectableRefs));
597                 dvmAbort();
598             }
599         }
600 
601 #ifdef WITH_PROFILER
602         if (gDvm.allocProf.enabled) {
603             Thread* self = dvmThreadSelf();
604             gDvm.allocProf.allocCount++;
605             gDvm.allocProf.allocSize += size;
606             if (self != NULL) {
607                 self->allocProf.allocCount++;
608                 self->allocProf.allocSize += size;
609             }
610         }
611 #endif
612     } else {
613         /* The allocation failed.
614          */
615         ptr = NULL;
616 
617 #ifdef WITH_PROFILER
618         if (gDvm.allocProf.enabled) {
619             Thread* self = dvmThreadSelf();
620             gDvm.allocProf.failedAllocCount++;
621             gDvm.allocProf.failedAllocSize += size;
622             if (self != NULL) {
623                 self->allocProf.failedAllocCount++;
624                 self->allocProf.failedAllocSize += size;
625             }
626         }
627 #endif
628     }
629 
630     dvmUnlockHeap();
631 
632     if (ptr != NULL) {
633         /*
634          * If this block is immediately GCable, and they haven't asked us not
635          * to track it, add it to the internal tracking list.
636          *
637          * If there's no "self" yet, we can't track it.  Calls made before
638          * the Thread exists should use ALLOC_NO_GC.
639          */
640         if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
641             dvmAddTrackedAlloc(ptr, NULL);
642         }
643     } else {
644         /*
645          * The allocation failed; throw an OutOfMemoryError.
646          */
647         throwOOME();
648     }
649 
650     return ptr;
651 }
652 
653 /*
654  * Returns true iff <obj> points to a valid allocated object.
655  */
dvmIsValidObject(const Object * obj)656 bool dvmIsValidObject(const Object* obj)
657 {
658     const DvmHeapChunk *hc;
659 
660     /* Don't bother if it's NULL or not 8-byte aligned.
661      */
662     hc = ptr2chunk(obj);
663     if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
664         /* Even if the heap isn't locked, this shouldn't return
665          * any false negatives.  The only mutation that could
666          * be happening is allocation, which means that another
667          * thread could be in the middle of a read-modify-write
668          * to add a new bit for a new object.  However, that
669          * RMW will have completed by the time any other thread
670          * could possibly see the new pointer, so there is no
671          * danger of dvmIsValidObject() being called on a valid
672          * pointer whose bit isn't set.
673          *
674          * Freeing will only happen during the sweep phase, which
675          * only happens while the heap is locked.
676          */
677         return dvmHeapSourceContains(hc);
678     }
679     return false;
680 }
681 
682 /*
683  * Clear flags that were passed into dvmMalloc() et al.
684  * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
685  */
dvmClearAllocFlags(Object * obj,int mask)686 void dvmClearAllocFlags(Object *obj, int mask)
687 {
688     if ((mask & ALLOC_NO_GC) != 0) {
689         dvmLockHeap();
690         if (dvmIsValidObject(obj)) {
691             if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
692                                                obj))
693             {
694                 LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
695                         "object 0x%08x\n", (uintptr_t)obj);
696                 dvmAbort();
697             }
698 //TODO: shrink if the table is very empty
699         }
700         dvmUnlockHeap();
701     }
702 
703     if ((mask & ALLOC_DONT_TRACK) != 0) {
704         dvmReleaseTrackedAlloc(obj, NULL);
705     }
706 }
707 
dvmObjectSizeInHeap(const Object * obj)708 size_t dvmObjectSizeInHeap(const Object *obj)
709 {
710     return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
711 }
712 
713 /*
714  * Initiate garbage collection.
715  *
716  * NOTES:
717  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
718  *   be added to the thread list while we work.  The thread should NOT
719  *   start executing, so this is only interesting when we start chasing
720  *   thread stacks.  (Before we do so, grab the lock.)
721  *
722  * We are not allowed to GC when the debugger has suspended the VM, which
723  * is awkward because debugger requests can cause allocations.  The easiest
724  * way to enforce this is to refuse to GC on an allocation made by the
725  * JDWP thread -- we have to expand the heap or fail.
726  */
dvmCollectGarbageInternal(bool collectSoftReferences)727 void dvmCollectGarbageInternal(bool collectSoftReferences)
728 {
729     GcHeap *gcHeap = gDvm.gcHeap;
730     Object *softReferences;
731     Object *weakReferences;
732     Object *phantomReferences;
733 
734     u8 now;
735     s8 timeSinceLastGc;
736     s8 gcElapsedTime;
737     int numFreed;
738     size_t sizeFreed;
739 
740 #if DVM_TRACK_HEAP_MARKING
741     /* Since weak and soft references are always cleared,
742      * they don't require any marking.
743      * (Soft are lumped into strong when they aren't cleared.)
744      */
745     size_t strongMarkCount = 0;
746     size_t strongMarkSize = 0;
747     size_t finalizeMarkCount = 0;
748     size_t finalizeMarkSize = 0;
749     size_t phantomMarkCount = 0;
750     size_t phantomMarkSize = 0;
751 #endif
752 
753     /* The heap lock must be held.
754      */
755 
756     if (gcHeap->gcRunning) {
757         LOGW_HEAP("Attempted recursive GC\n");
758         return;
759     }
760     gcHeap->gcRunning = true;
761     now = dvmGetRelativeTimeUsec();
762     if (gcHeap->gcStartTime != 0) {
763         timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
764     } else {
765         timeSinceLastGc = 0;
766     }
767     gcHeap->gcStartTime = now;
768 
769     LOGV_HEAP("GC starting -- suspending threads\n");
770 
771     dvmSuspendAllThreads(SUSPEND_FOR_GC);
772 
773     /* Get the priority (the "nice" value) of the current thread.  The
774      * getpriority() call can legitimately return -1, so we have to
775      * explicitly test errno.
776      */
777     errno = 0;
778     int oldThreadPriority = kInvalidPriority;
779     int priorityResult = getpriority(PRIO_PROCESS, 0);
780     if (errno != 0) {
781         LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
782     } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
783         /* Current value is numerically greater than "normal", which
784          * in backward UNIX terms means lower priority.
785          */
786 
787         if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
788             set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
789         }
790 
791         if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
792             LOGI_HEAP("Unable to elevate priority from %d to %d\n",
793                 priorityResult, ANDROID_PRIORITY_NORMAL);
794         } else {
795             /* priority elevated; save value so we can restore it later */
796             LOGD_HEAP("Elevating priority from %d to %d\n",
797                 priorityResult, ANDROID_PRIORITY_NORMAL);
798             oldThreadPriority = priorityResult;
799         }
800     }
801 
802     /* Wait for the HeapWorker thread to block.
803      * (It may also already be suspended in interp code,
804      * in which case it's not holding heapWorkerLock.)
805      */
806     dvmLockMutex(&gDvm.heapWorkerLock);
807 
808     /* Make sure that the HeapWorker thread hasn't become
809      * wedged inside interp code.  If it has, this call will
810      * print a message and abort the VM.
811      */
812     dvmAssertHeapWorkerThreadRunning();
813 
814     /* Lock the pendingFinalizationRefs list.
815      *
816      * Acquire the lock after suspending so the finalizer
817      * thread can't block in the RUNNING state while
818      * we try to suspend.
819      */
820     dvmLockMutex(&gDvm.heapWorkerListLock);
821 
822 #ifdef WITH_PROFILER
823     dvmMethodTraceGCBegin();
824 #endif
825 
826 #if WITH_HPROF
827 
828 /* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
829  * whenever DDMS requests a heap update (HPIF chunk).
830  * The output files will appear in /data/misc, which must
831  * already exist.
832  * You must define "WITH_HPROF := true" in your buildspec.mk
833  * and recompile libdvm for this to work.
834  *
835  * To enable stack traces for each allocation, define
836  * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
837  * allocations and also requires 8 additional bytes per object on the
838  * GC heap.
839  */
840 #define DUMP_HEAP_ON_DDMS_UPDATE 0
841 #if DUMP_HEAP_ON_DDMS_UPDATE
842     gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
843 #endif
844 
845     if (gcHeap->hprofDumpOnGc) {
846         char nameBuf[128];
847 
848         gcHeap->hprofResult = -1;
849 
850         if (gcHeap->hprofFileName == NULL) {
851             /* no filename was provided; invent one */
852             sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
853                 (int) time(NULL), (int) getpid());
854             gcHeap->hprofFileName = nameBuf;
855         }
856         gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName);
857         if (gcHeap->hprofContext != NULL) {
858             hprofStartHeapDump(gcHeap->hprofContext);
859         }
860         gcHeap->hprofDumpOnGc = false;
861         gcHeap->hprofFileName = NULL;
862     }
863 #endif
864 
865     if (timeSinceLastGc < 10000) {
866         LOGD_HEAP("GC! (%dms since last GC)\n",
867                 (int)timeSinceLastGc);
868     } else {
869         LOGD_HEAP("GC! (%d sec since last GC)\n",
870                 (int)(timeSinceLastGc / 1000));
871     }
872 #if DVM_TRACK_HEAP_MARKING
873     gcHeap->markCount = 0;
874     gcHeap->markSize = 0;
875 #endif
876 
877     /* Set up the marking context.
878      */
879     if (!dvmHeapBeginMarkStep()) {
880         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
881         dvmAbort();
882     }
883 
884     /* Mark the set of objects that are strongly reachable from the roots.
885      */
886     LOGD_HEAP("Marking...");
887     dvmHeapMarkRootSet();
888 
889     /* dvmHeapScanMarkedObjects() will build the lists of known
890      * instances of the Reference classes.
891      */
892     gcHeap->softReferences = NULL;
893     gcHeap->weakReferences = NULL;
894     gcHeap->phantomReferences = NULL;
895 
896     /* Make sure that we don't hard-mark the referents of Reference
897      * objects by default.
898      */
899     gcHeap->markAllReferents = false;
900 
901     /* Don't mark SoftReferences if our caller wants us to collect them.
902      * This has to be set before calling dvmHeapScanMarkedObjects().
903      */
904     if (collectSoftReferences) {
905         gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
906     }
907 
908     /* Recursively mark any objects that marked objects point to strongly.
909      * If we're not collecting soft references, soft-reachable
910      * objects will also be marked.
911      */
912     LOGD_HEAP("Recursing...");
913     dvmHeapScanMarkedObjects();
914 #if DVM_TRACK_HEAP_MARKING
915     strongMarkCount = gcHeap->markCount;
916     strongMarkSize = gcHeap->markSize;
917     gcHeap->markCount = 0;
918     gcHeap->markSize = 0;
919 #endif
920 
921     /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
922      * mess with them.
923      */
924     softReferences = gcHeap->softReferences;
925     weakReferences = gcHeap->weakReferences;
926     phantomReferences = gcHeap->phantomReferences;
927 
928     /* All strongly-reachable objects have now been marked.
929      */
930     if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
931         LOGD_HEAP("Handling soft references...");
932         dvmHeapHandleReferences(softReferences, REF_SOFT);
933         // markCount always zero
934 
935         /* Now that we've tried collecting SoftReferences,
936          * fall back to not collecting them.  If the heap
937          * grows, we will start collecting again.
938          */
939         gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
940     } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
941     LOGD_HEAP("Handling weak references...");
942     dvmHeapHandleReferences(weakReferences, REF_WEAK);
943     // markCount always zero
944 
945     /* Once all weak-reachable objects have been taken
946      * care of, any remaining unmarked objects can be finalized.
947      */
948     LOGD_HEAP("Finding finalizations...");
949     dvmHeapScheduleFinalizations();
950 #if DVM_TRACK_HEAP_MARKING
951     finalizeMarkCount = gcHeap->markCount;
952     finalizeMarkSize = gcHeap->markSize;
953     gcHeap->markCount = 0;
954     gcHeap->markSize = 0;
955 #endif
956 
957     /* Any remaining objects that are not pending finalization
958      * could be phantom-reachable.  This will mark any phantom-reachable
959      * objects, as well as enqueue their references.
960      */
961     LOGD_HEAP("Handling phantom references...");
962     dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
963 #if DVM_TRACK_HEAP_MARKING
964     phantomMarkCount = gcHeap->markCount;
965     phantomMarkSize = gcHeap->markSize;
966     gcHeap->markCount = 0;
967     gcHeap->markSize = 0;
968 #endif
969 
970 //TODO: take care of JNI weak global references
971 
972 #if DVM_TRACK_HEAP_MARKING
973     LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
974             strongMarkSize, finalizeMarkSize, phantomMarkSize);
975 #endif
976 
977 #ifdef WITH_DEADLOCK_PREDICTION
978     dvmDumpMonitorInfo("before sweep");
979 #endif
980     LOGD_HEAP("Sweeping...");
981     dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
982 #ifdef WITH_DEADLOCK_PREDICTION
983     dvmDumpMonitorInfo("after sweep");
984 #endif
985 
986     LOGD_HEAP("Cleaning up...");
987     dvmHeapFinishMarkStep();
988 
989     LOGD_HEAP("Done.");
990 
991     /* Now's a good time to adjust the heap size, since
992      * we know what our utilization is.
993      *
994      * This doesn't actually resize any memory;
995      * it just lets the heap grow more when necessary.
996      */
997     dvmHeapSourceGrowForUtilization();
998     dvmHeapSizeChanged();
999 
1000 #if WITH_HPROF
1001     if (gcHeap->hprofContext != NULL) {
1002         hprofFinishHeapDump(gcHeap->hprofContext);
1003 //TODO: write a HEAP_SUMMARY record
1004         if (hprofShutdown(gcHeap->hprofContext))
1005             gcHeap->hprofResult = 0;    /* indicate success */
1006         gcHeap->hprofContext = NULL;
1007     }
1008 #endif
1009 
1010     /* Now that we've freed up the GC heap, return any large
1011      * free chunks back to the system.  They'll get paged back
1012      * in the next time they're used.  Don't do it immediately,
1013      * though;  if the process is still allocating a bunch of
1014      * memory, we'll be taking a ton of page faults that we don't
1015      * necessarily need to.
1016      *
1017      * Cancel any old scheduled trims, and schedule a new one.
1018      */
1019     dvmScheduleHeapSourceTrim(5);  // in seconds
1020 
1021 #ifdef WITH_PROFILER
1022     dvmMethodTraceGCEnd();
1023 #endif
1024     LOGV_HEAP("GC finished -- resuming threads\n");
1025 
1026     gcHeap->gcRunning = false;
1027 
1028     dvmUnlockMutex(&gDvm.heapWorkerListLock);
1029     dvmUnlockMutex(&gDvm.heapWorkerLock);
1030 
1031     dvmResumeAllThreads(SUSPEND_FOR_GC);
1032     if (oldThreadPriority != kInvalidPriority) {
1033         if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
1034             LOGW_HEAP("Unable to reset priority to %d: %s\n",
1035                 oldThreadPriority, strerror(errno));
1036         } else {
1037             LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
1038         }
1039 
1040         if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
1041             set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
1042         }
1043     }
1044     gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
1045     if (gcElapsedTime < 10000) {
1046         LOGD("GC freed %d objects / %zd bytes in %dms\n",
1047                 numFreed, sizeFreed, (int)gcElapsedTime);
1048     } else {
1049         LOGD("GC freed %d objects / %zd bytes in %d sec\n",
1050                 numFreed, sizeFreed, (int)(gcElapsedTime / 1000));
1051     }
1052     dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
1053 
1054     if (gcHeap->ddmHpifWhen != 0) {
1055         LOGD_HEAP("Sending VM heap info to DDM\n");
1056         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
1057     }
1058     if (gcHeap->ddmHpsgWhen != 0) {
1059         LOGD_HEAP("Dumping VM heap to DDM\n");
1060         dvmDdmSendHeapSegments(false, false);
1061     }
1062     if (gcHeap->ddmNhsgWhen != 0) {
1063         LOGD_HEAP("Dumping native heap to DDM\n");
1064         dvmDdmSendHeapSegments(false, true);
1065     }
1066 }
1067 
1068 #if WITH_HPROF
1069 /*
1070  * Perform garbage collection, writing heap information to the specified file.
1071  *
1072  * If "fileName" is NULL, a suitable name will be generated automatically.
1073  *
1074  * Returns 0 on success, or an error code on failure.
1075  */
hprofDumpHeap(const char * fileName)1076 int hprofDumpHeap(const char* fileName)
1077 {
1078     int result;
1079 
1080     dvmLockMutex(&gDvm.gcHeapLock);
1081 
1082     gDvm.gcHeap->hprofDumpOnGc = true;
1083     gDvm.gcHeap->hprofFileName = fileName;
1084     dvmCollectGarbageInternal(false);
1085     result = gDvm.gcHeap->hprofResult;
1086 
1087     dvmUnlockMutex(&gDvm.gcHeapLock);
1088 
1089     return result;
1090 }
1091 
dvmHeapSetHprofGcScanState(hprof_heap_tag_t state,u4 threadSerialNumber)1092 void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
1093 {
1094     if (gDvm.gcHeap->hprofContext != NULL) {
1095         hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
1096                 threadSerialNumber);
1097     }
1098 }
1099 #endif
1100