• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cutils/mspace.h>
18 #include <stdint.h>
19 #include <sys/mman.h>
20 #include <errno.h>
21 
22 #define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
23 
24 #include "Dalvik.h"
25 #include "alloc/Heap.h"
26 #include "alloc/HeapInternal.h"
27 #include "alloc/HeapSource.h"
28 #include "alloc/HeapBitmap.h"
29 #include "alloc/HeapBitmapInlines.h"
30 
31 // TODO: find a real header file for these.
32 extern "C" int dlmalloc_trim(size_t);
33 extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
34 
35 static void snapIdealFootprint();
36 static void setIdealFootprint(size_t max);
37 static size_t getMaximumSize(const HeapSource *hs);
38 static void trimHeaps();
39 
40 #define HEAP_UTILIZATION_MAX        1024
41 #define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
42 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
43 #define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
44 
45 /* Number of seconds to wait after a GC before performing a heap trim
46  * operation to reclaim unused pages.
47  */
48 #define HEAP_TRIM_IDLE_TIME_SECONDS 5
49 
50 /* Start a concurrent collection when free memory falls under this
51  * many bytes.
52  */
53 #define CONCURRENT_START (128 << 10)
54 
55 /* The next GC will not be concurrent when free memory after a GC is
56  * under this many bytes.
57  */
58 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
59 
60 #define HS_BOILERPLATE() \
61     do { \
62         assert(gDvm.gcHeap != NULL); \
63         assert(gDvm.gcHeap->heapSource != NULL); \
64         assert(gHs == gDvm.gcHeap->heapSource); \
65     } while (0)
66 
67 struct Heap {
68     /* The mspace to allocate from.
69      */
70     mspace msp;
71 
72     /* The largest size that this heap is allowed to grow to.
73      */
74     size_t maximumSize;
75 
76     /* Number of bytes allocated from this mspace for objects,
77      * including any overhead.  This value is NOT exact, and
78      * should only be used as an input for certain heuristics.
79      */
80     size_t bytesAllocated;
81 
82     /* Number of bytes allocated from this mspace at which a
83      * concurrent garbage collection will be started.
84      */
85     size_t concurrentStartBytes;
86 
87     /* Number of objects currently allocated from this mspace.
88      */
89     size_t objectsAllocated;
90 
91     /*
92      * The lowest address of this heap, inclusive.
93      */
94     char *base;
95 
96     /*
97      * The highest address of this heap, exclusive.
98      */
99     char *limit;
100 };
101 
102 struct HeapSource {
103     /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
104      */
105     size_t targetUtilization;
106 
107     /* The starting heap size.
108      */
109     size_t startSize;
110 
111     /* The largest that the heap source as a whole is allowed to grow.
112      */
113     size_t maximumSize;
114 
115     /*
116      * The largest size we permit the heap to grow.  This value allows
117      * the user to limit the heap growth below the maximum size.  This
118      * is a work around until we can dynamically set the maximum size.
119      * This value can range between the starting size and the maximum
120      * size but should never be set below the current footprint of the
121      * heap.
122      */
123     size_t growthLimit;
124 
125     /* The desired max size of the heap source as a whole.
126      */
127     size_t idealSize;
128 
129     /* The maximum number of bytes allowed to be allocated from the
130      * active heap before a GC is forced.  This is used to "shrink" the
131      * heap in lieu of actual compaction.
132      */
133     size_t softLimit;
134 
135     /* The heaps; heaps[0] is always the active heap,
136      * which new objects should be allocated from.
137      */
138     Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
139 
140     /* The current number of heaps.
141      */
142     size_t numHeaps;
143 
144     /* True if zygote mode was active when the HeapSource was created.
145      */
146     bool sawZygote;
147 
148     /*
149      * The base address of the virtual memory reservation.
150      */
151     char *heapBase;
152 
153     /*
154      * The length in bytes of the virtual memory reservation.
155      */
156     size_t heapLength;
157 
158     /*
159      * The live object bitmap.
160      */
161     HeapBitmap liveBits;
162 
163     /*
164      * The mark bitmap.
165      */
166     HeapBitmap markBits;
167 
168     /*
169      * State for the GC daemon.
170      */
171     bool hasGcThread;
172     pthread_t gcThread;
173     bool gcThreadShutdown;
174     pthread_mutex_t gcThreadMutex;
175     pthread_cond_t gcThreadCond;
176     bool gcThreadTrimNeeded;
177 };
178 
179 #define hs2heap(hs_) (&((hs_)->heaps[0]))
180 
181 /*
182  * Returns true iff a soft limit is in effect for the active heap.
183  */
isSoftLimited(const HeapSource * hs)184 static bool isSoftLimited(const HeapSource *hs)
185 {
186     /* softLimit will be either SIZE_MAX or the limit for the
187      * active mspace.  idealSize can be greater than softLimit
188      * if there is more than one heap.  If there is only one
189      * heap, a non-SIZE_MAX softLimit should always be the same
190      * as idealSize.
191      */
192     return hs->softLimit <= hs->idealSize;
193 }
194 
195 /*
196  * Returns approximately the maximum number of bytes allowed to be
197  * allocated from the active heap before a GC is forced.
198  */
getAllocLimit(const HeapSource * hs)199 static size_t getAllocLimit(const HeapSource *hs)
200 {
201     if (isSoftLimited(hs)) {
202         return hs->softLimit;
203     } else {
204         return mspace_max_allowed_footprint(hs2heap(hs)->msp);
205     }
206 }
207 
208 /*
209  * Returns the current footprint of all heaps.  If includeActive
210  * is false, don't count the heap at index 0.
211  */
oldHeapOverhead(const HeapSource * hs,bool includeActive)212 static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
213 {
214     size_t footprint = 0;
215     size_t i;
216 
217     if (includeActive) {
218         i = 0;
219     } else {
220         i = 1;
221     }
222     for (/* i = i */; i < hs->numHeaps; i++) {
223 //TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
224         footprint += mspace_footprint(hs->heaps[i].msp);
225     }
226     return footprint;
227 }
228 
229 /*
230  * Returns the heap that <ptr> could have come from, or NULL
231  * if it could not have come from any heap.
232  */
ptr2heap(const HeapSource * hs,const void * ptr)233 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
234 {
235     const size_t numHeaps = hs->numHeaps;
236 
237     if (ptr != NULL) {
238         for (size_t i = 0; i < numHeaps; i++) {
239             const Heap *const heap = &hs->heaps[i];
240 
241             if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
242                 return (Heap *)heap;
243             }
244         }
245     }
246     return NULL;
247 }
248 
249 /*
250  * Functions to update heapSource->bytesAllocated when an object
251  * is allocated or freed.  mspace_usable_size() will give
252  * us a much more accurate picture of heap utilization than
253  * the requested byte sizes would.
254  *
255  * These aren't exact, and should not be treated as such.
256  */
countAllocation(Heap * heap,const void * ptr)257 static void countAllocation(Heap *heap, const void *ptr)
258 {
259     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
260 
261     heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
262             HEAP_SOURCE_CHUNK_OVERHEAD;
263     heap->objectsAllocated++;
264     HeapSource* hs = gDvm.gcHeap->heapSource;
265     dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
266 
267     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
268 }
269 
countFree(Heap * heap,const void * ptr,size_t * numBytes)270 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
271 {
272     size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
273     assert(delta > 0);
274     if (delta < heap->bytesAllocated) {
275         heap->bytesAllocated -= delta;
276     } else {
277         heap->bytesAllocated = 0;
278     }
279     HeapSource* hs = gDvm.gcHeap->heapSource;
280     dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
281     if (heap->objectsAllocated > 0) {
282         heap->objectsAllocated--;
283     }
284     *numBytes += delta;
285 }
286 
287 static HeapSource *gHs = NULL;
288 
createMspace(void * base,size_t startSize,size_t maximumSize)289 static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
290 {
291     /* Create an unlocked dlmalloc mspace to use as
292      * a heap source.
293      *
294      * We start off reserving startSize / 2 bytes but
295      * letting the heap grow to startSize.  This saves
296      * memory in the case where a process uses even less
297      * than the starting size.
298      */
299     LOGV_HEAP("Creating VM heap of size %zu", startSize);
300     errno = 0;
301 
302     mspace msp = create_contiguous_mspace_with_base(startSize/2,
303             maximumSize, /*locked=*/false, base);
304     if (msp != NULL) {
305         /* Don't let the heap grow past the starting size without
306          * our intervention.
307          */
308         mspace_set_max_allowed_footprint(msp, startSize);
309     } else {
310         /* There's no guarantee that errno has meaning when the call
311          * fails, but it often does.
312          */
313         LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
314             startSize/2, maximumSize, strerror(errno));
315     }
316 
317     return msp;
318 }
319 
320 /*
321  * Add the initial heap.  Returns false if the initial heap was
322  * already added to the heap source.
323  */
addInitialHeap(HeapSource * hs,mspace msp,size_t maximumSize)324 static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
325 {
326     assert(hs != NULL);
327     assert(msp != NULL);
328     if (hs->numHeaps != 0) {
329         return false;
330     }
331     hs->heaps[0].msp = msp;
332     hs->heaps[0].maximumSize = maximumSize;
333     hs->heaps[0].concurrentStartBytes = SIZE_MAX;
334     hs->heaps[0].base = hs->heapBase;
335     hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
336     hs->numHeaps = 1;
337     return true;
338 }
339 
340 /*
341  * Adds an additional heap to the heap source.  Returns false if there
342  * are too many heaps or insufficient free space to add another heap.
343  */
addNewHeap(HeapSource * hs)344 static bool addNewHeap(HeapSource *hs)
345 {
346     Heap heap;
347 
348     assert(hs != NULL);
349     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
350         LOGE("Attempt to create too many heaps (%zd >= %zd)",
351                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
352         dvmAbort();
353         return false;
354     }
355 
356     memset(&heap, 0, sizeof(heap));
357 
358     /*
359      * Heap storage comes from a common virtual memory reservation.
360      * The new heap will start on the page after the old heap.
361      */
362     void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
363     char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
364     size_t overhead = base - hs->heaps[0].base;
365     assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
366 
367     if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
368         LOGE_HEAP("No room to create any more heaps "
369                   "(%zd overhead, %zd max)",
370                   overhead, hs->maximumSize);
371         return false;
372     }
373 
374     heap.maximumSize = hs->growthLimit - overhead;
375     heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
376     heap.base = base;
377     heap.limit = heap.base + heap.maximumSize;
378     heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
379     if (heap.msp == NULL) {
380         return false;
381     }
382 
383     /* Don't let the soon-to-be-old heap grow any further.
384      */
385     hs->heaps[0].maximumSize = overhead;
386     hs->heaps[0].limit = base;
387     mspace msp = hs->heaps[0].msp;
388     mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
389 
390     /* Put the new heap in the list, at heaps[0].
391      * Shift existing heaps down.
392      */
393     memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
394     hs->heaps[0] = heap;
395     hs->numHeaps++;
396 
397     return true;
398 }
399 
400 /*
401  * The garbage collection daemon.  Initiates a concurrent collection
402  * when signaled.  Also periodically trims the heaps when a few seconds
403  * have elapsed since the last concurrent GC.
404  */
gcDaemonThread(void * arg)405 static void *gcDaemonThread(void* arg)
406 {
407     dvmChangeStatus(NULL, THREAD_VMWAIT);
408     dvmLockMutex(&gHs->gcThreadMutex);
409     while (gHs->gcThreadShutdown != true) {
410         bool trim = false;
411         if (gHs->gcThreadTrimNeeded) {
412             int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
413                     HEAP_TRIM_IDLE_TIME_SECONDS, 0);
414             if (result == ETIMEDOUT) {
415                 /* Timed out waiting for a GC request, schedule a heap trim. */
416                 trim = true;
417             }
418         } else {
419             dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
420         }
421 
422         dvmLockHeap();
423         /*
424          * Another thread may have started a concurrent garbage
425          * collection before we were scheduled.  Check for this
426          * condition before proceeding.
427          */
428         if (!gDvm.gcHeap->gcRunning) {
429             dvmChangeStatus(NULL, THREAD_RUNNING);
430             if (trim) {
431                 trimHeaps();
432                 gHs->gcThreadTrimNeeded = false;
433             } else {
434                 dvmCollectGarbageInternal(GC_CONCURRENT);
435                 gHs->gcThreadTrimNeeded = true;
436             }
437             dvmChangeStatus(NULL, THREAD_VMWAIT);
438         }
439         dvmUnlockHeap();
440     }
441     dvmChangeStatus(NULL, THREAD_RUNNING);
442     return NULL;
443 }
444 
gcDaemonStartup()445 static bool gcDaemonStartup()
446 {
447     dvmInitMutex(&gHs->gcThreadMutex);
448     pthread_cond_init(&gHs->gcThreadCond, NULL);
449     gHs->gcThreadShutdown = false;
450     gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
451                                                gcDaemonThread, NULL);
452     return gHs->hasGcThread;
453 }
454 
gcDaemonShutdown()455 static void gcDaemonShutdown()
456 {
457     if (gHs->hasGcThread) {
458         dvmLockMutex(&gHs->gcThreadMutex);
459         gHs->gcThreadShutdown = true;
460         dvmSignalCond(&gHs->gcThreadCond);
461         dvmUnlockMutex(&gHs->gcThreadMutex);
462         pthread_join(gHs->gcThread, NULL);
463     }
464 }
465 
466 /*
467  * Create a stack big enough for the worst possible case, where the
468  * heap is perfectly full of the smallest object.
469  * TODO: be better about memory usage; use a smaller stack with
470  *       overflow detection and recovery.
471  */
allocMarkStack(GcMarkStack * stack,size_t maximumSize)472 static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
473 {
474     const char *name = "dalvik-mark-stack";
475     void *addr;
476 
477     assert(stack != NULL);
478     stack->length = maximumSize * sizeof(Object*) /
479         (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
480     addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
481     if (addr == NULL) {
482         return false;
483     }
484     stack->base = (const Object **)addr;
485     stack->limit = (const Object **)((char *)addr + stack->length);
486     stack->top = NULL;
487     madvise(stack->base, stack->length, MADV_DONTNEED);
488     return true;
489 }
490 
freeMarkStack(GcMarkStack * stack)491 static void freeMarkStack(GcMarkStack *stack)
492 {
493     assert(stack != NULL);
494     munmap(stack->base, stack->length);
495     memset(stack, 0, sizeof(*stack));
496 }
497 
498 /*
499  * Initializes the heap source; must be called before any other
500  * dvmHeapSource*() functions.  Returns a GcHeap structure
501  * allocated from the heap source.
502  */
dvmHeapSourceStartup(size_t startSize,size_t maximumSize,size_t growthLimit)503 GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
504                              size_t growthLimit)
505 {
506     GcHeap *gcHeap;
507     HeapSource *hs;
508     mspace msp;
509     size_t length;
510     void *base;
511 
512     assert(gHs == NULL);
513 
514     if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
515         LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
516              startSize, maximumSize, growthLimit);
517         return NULL;
518     }
519 
520     /*
521      * Allocate a contiguous region of virtual memory to subdivided
522      * among the heaps managed by the garbage collector.
523      */
524     length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
525     base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
526     if (base == NULL) {
527         return NULL;
528     }
529 
530     /* Create an unlocked dlmalloc mspace to use as
531      * a heap source.
532      */
533     msp = createMspace(base, startSize, maximumSize);
534     if (msp == NULL) {
535         goto fail;
536     }
537 
538     gcHeap = (GcHeap *)malloc(sizeof(*gcHeap));
539     if (gcHeap == NULL) {
540         LOGE_HEAP("Can't allocate heap descriptor");
541         goto fail;
542     }
543     memset(gcHeap, 0, sizeof(*gcHeap));
544 
545     hs = (HeapSource *)malloc(sizeof(*hs));
546     if (hs == NULL) {
547         LOGE_HEAP("Can't allocate heap source");
548         free(gcHeap);
549         goto fail;
550     }
551     memset(hs, 0, sizeof(*hs));
552 
553     hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
554     hs->startSize = startSize;
555     hs->maximumSize = maximumSize;
556     hs->growthLimit = growthLimit;
557     hs->idealSize = startSize;
558     hs->softLimit = SIZE_MAX;    // no soft limit at first
559     hs->numHeaps = 0;
560     hs->sawZygote = gDvm.zygote;
561     hs->hasGcThread = false;
562     hs->heapBase = (char *)base;
563     hs->heapLength = length;
564     if (!addInitialHeap(hs, msp, growthLimit)) {
565         LOGE_HEAP("Can't add initial heap");
566         goto fail;
567     }
568     if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
569         LOGE_HEAP("Can't create liveBits");
570         goto fail;
571     }
572     if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
573         LOGE_HEAP("Can't create markBits");
574         dvmHeapBitmapDelete(&hs->liveBits);
575         goto fail;
576     }
577     if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
578         LOGE("Can't create markStack");
579         dvmHeapBitmapDelete(&hs->markBits);
580         dvmHeapBitmapDelete(&hs->liveBits);
581         goto fail;
582     }
583     gcHeap->markContext.bitmap = &hs->markBits;
584     gcHeap->heapSource = hs;
585 
586     gHs = hs;
587     return gcHeap;
588 
589 fail:
590     munmap(base, length);
591     return NULL;
592 }
593 
dvmHeapSourceStartupAfterZygote()594 bool dvmHeapSourceStartupAfterZygote()
595 {
596     return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
597 }
598 
599 /*
600  * This is called while in zygote mode, right before we fork() for the
601  * first time.  We create a heap for all future zygote process allocations,
602  * in an attempt to avoid touching pages in the zygote heap.  (This would
603  * probably be unnecessary if we had a compacting GC -- the source of our
604  * troubles is small allocations filling in the gaps from larger ones.)
605  */
dvmHeapSourceStartupBeforeFork()606 bool dvmHeapSourceStartupBeforeFork()
607 {
608     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
609 
610     HS_BOILERPLATE();
611 
612     assert(gDvm.zygote);
613 
614     if (!gDvm.newZygoteHeapAllocated) {
615         /* Create a new heap for post-fork zygote allocations.  We only
616          * try once, even if it fails.
617          */
618         LOGV("Splitting out new zygote heap");
619         gDvm.newZygoteHeapAllocated = true;
620         return addNewHeap(hs);
621     }
622     return true;
623 }
624 
dvmHeapSourceThreadShutdown()625 void dvmHeapSourceThreadShutdown()
626 {
627     if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
628         gcDaemonShutdown();
629     }
630 }
631 
632 /*
633  * Tears down the entire GcHeap structure and all of the substructures
634  * attached to it.  This call has the side effect of setting the given
635  * gcHeap pointer and gHs to NULL.
636  */
dvmHeapSourceShutdown(GcHeap ** gcHeap)637 void dvmHeapSourceShutdown(GcHeap **gcHeap)
638 {
639     assert(gcHeap != NULL);
640     if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
641         HeapSource *hs = (*gcHeap)->heapSource;
642         dvmHeapBitmapDelete(&hs->liveBits);
643         dvmHeapBitmapDelete(&hs->markBits);
644         freeMarkStack(&(*gcHeap)->markContext.stack);
645         munmap(hs->heapBase, hs->heapLength);
646         free(hs);
647         gHs = NULL;
648         free(*gcHeap);
649         *gcHeap = NULL;
650     }
651 }
652 
653 /*
654  * Gets the begining of the allocation for the HeapSource.
655  */
dvmHeapSourceGetBase()656 void *dvmHeapSourceGetBase()
657 {
658     return gHs->heapBase;
659 }
660 
661 /*
662  * Returns the requested value. If the per-heap stats are requested, fill
663  * them as well.
664  *
665  * Caller must hold the heap lock.
666  */
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)667 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
668                              size_t arrayLen)
669 {
670     HeapSource *hs = gHs;
671     size_t value = 0;
672     size_t total = 0;
673 
674     HS_BOILERPLATE();
675 
676     assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
677     for (size_t i = 0; i < hs->numHeaps; i++) {
678         Heap *const heap = &hs->heaps[i];
679 
680         switch (spec) {
681         case HS_FOOTPRINT:
682             value = mspace_footprint(heap->msp);
683             break;
684         case HS_ALLOWED_FOOTPRINT:
685             value = mspace_max_allowed_footprint(heap->msp);
686             break;
687         case HS_BYTES_ALLOCATED:
688             value = heap->bytesAllocated;
689             break;
690         case HS_OBJECTS_ALLOCATED:
691             value = heap->objectsAllocated;
692             break;
693         default:
694             // quiet gcc
695             break;
696         }
697         if (perHeapStats) {
698             perHeapStats[i] = value;
699         }
700         total += value;
701     }
702     return total;
703 }
704 
dvmHeapSourceGetRegions(uintptr_t * base,uintptr_t * max,size_t numHeaps)705 void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
706 {
707     HeapSource *hs = gHs;
708 
709     HS_BOILERPLATE();
710 
711     assert(numHeaps <= hs->numHeaps);
712     for (size_t i = 0; i < numHeaps; ++i) {
713         base[i] = (uintptr_t)hs->heaps[i].base;
714         max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
715     }
716 }
717 
718 /*
719  * Get the bitmap representing all live objects.
720  */
dvmHeapSourceGetLiveBits()721 HeapBitmap *dvmHeapSourceGetLiveBits()
722 {
723     HS_BOILERPLATE();
724 
725     return &gHs->liveBits;
726 }
727 
728 /*
729  * Get the bitmap representing all marked objects.
730  */
dvmHeapSourceGetMarkBits()731 HeapBitmap *dvmHeapSourceGetMarkBits()
732 {
733     HS_BOILERPLATE();
734 
735     return &gHs->markBits;
736 }
737 
dvmHeapSourceSwapBitmaps()738 void dvmHeapSourceSwapBitmaps()
739 {
740     HeapBitmap tmp = gHs->liveBits;
741     gHs->liveBits = gHs->markBits;
742     gHs->markBits = tmp;
743 }
744 
dvmHeapSourceZeroMarkBitmap()745 void dvmHeapSourceZeroMarkBitmap()
746 {
747     HS_BOILERPLATE();
748 
749     dvmHeapBitmapZero(&gHs->markBits);
750 }
751 
dvmMarkImmuneObjects(const char * immuneLimit)752 void dvmMarkImmuneObjects(const char *immuneLimit)
753 {
754     /*
755      * Copy the contents of the live bit vector for immune object
756      * range into the mark bit vector.
757      */
758     /* The only values generated by dvmHeapSourceGetImmuneLimit() */
759     assert(immuneLimit == gHs->heaps[0].base ||
760            immuneLimit == NULL);
761     assert(gHs->liveBits.base == gHs->markBits.base);
762     assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
763     /* heap[0] is never immune */
764     assert(gHs->heaps[0].base >= immuneLimit);
765     assert(gHs->heaps[0].limit > immuneLimit);
766 
767     for (size_t i = 1; i < gHs->numHeaps; ++i) {
768         if (gHs->heaps[i].base < immuneLimit) {
769             assert(gHs->heaps[i].limit <= immuneLimit);
770             /* Compute the number of words to copy in the bitmap. */
771             size_t index = HB_OFFSET_TO_INDEX(
772                 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
773             /* Compute the starting offset in the live and mark bits. */
774             char *src = (char *)(gHs->liveBits.bits + index);
775             char *dst = (char *)(gHs->markBits.bits + index);
776             /* Compute the number of bytes of the live bitmap to copy. */
777             size_t length = HB_OFFSET_TO_BYTE_INDEX(
778                 gHs->heaps[i].limit - gHs->heaps[i].base);
779             /* Do the copy. */
780             memcpy(dst, src, length);
781             /* Make sure max points to the address of the highest set bit. */
782             if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
783                 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
784             }
785         }
786     }
787 }
788 
789 /*
790  * Allocates <n> bytes of zeroed data.
791  */
dvmHeapSourceAlloc(size_t n)792 void* dvmHeapSourceAlloc(size_t n)
793 {
794     HS_BOILERPLATE();
795 
796     HeapSource *hs = gHs;
797     Heap* heap = hs2heap(hs);
798     if (heap->bytesAllocated + n > hs->softLimit) {
799         /*
800          * This allocation would push us over the soft limit; act as
801          * if the heap is full.
802          */
803         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
804                   FRACTIONAL_MB(hs->softLimit), n);
805         return NULL;
806     }
807     void* ptr = mspace_calloc(heap->msp, 1, n);
808     if (ptr == NULL) {
809         return NULL;
810     }
811     countAllocation(heap, ptr);
812     /*
813      * Check to see if a concurrent GC should be initiated.
814      */
815     if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
816         /*
817          * The garbage collector thread is already running or has yet
818          * to be started.  Do nothing.
819          */
820         return ptr;
821     }
822     if (heap->bytesAllocated > heap->concurrentStartBytes) {
823         /*
824          * We have exceeded the allocation threshold.  Wake up the
825          * garbage collector.
826          */
827         dvmSignalCond(&gHs->gcThreadCond);
828     }
829     return ptr;
830 }
831 
832 /* Remove any hard limits, try to allocate, and shrink back down.
833  * Last resort when trying to allocate an object.
834  */
heapAllocAndGrow(HeapSource * hs,Heap * heap,size_t n)835 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
836 {
837     /* Grow as much as possible, but don't let the real footprint
838      * go over the absolute max.
839      */
840     size_t max = heap->maximumSize;
841 
842     mspace_set_max_allowed_footprint(heap->msp, max);
843     void* ptr = dvmHeapSourceAlloc(n);
844 
845     /* Shrink back down as small as possible.  Our caller may
846      * readjust max_allowed to a more appropriate value.
847      */
848     mspace_set_max_allowed_footprint(heap->msp,
849                                      mspace_footprint(heap->msp));
850     return ptr;
851 }
852 
853 /*
854  * Allocates <n> bytes of zeroed data, growing as much as possible
855  * if necessary.
856  */
dvmHeapSourceAllocAndGrow(size_t n)857 void* dvmHeapSourceAllocAndGrow(size_t n)
858 {
859     HS_BOILERPLATE();
860 
861     HeapSource *hs = gHs;
862     Heap* heap = hs2heap(hs);
863     void* ptr = dvmHeapSourceAlloc(n);
864     if (ptr != NULL) {
865         return ptr;
866     }
867 
868     size_t oldIdealSize = hs->idealSize;
869     if (isSoftLimited(hs)) {
870         /* We're soft-limited.  Try removing the soft limit to
871          * see if we can allocate without actually growing.
872          */
873         hs->softLimit = SIZE_MAX;
874         ptr = dvmHeapSourceAlloc(n);
875         if (ptr != NULL) {
876             /* Removing the soft limit worked;  fix things up to
877              * reflect the new effective ideal size.
878              */
879             snapIdealFootprint();
880             return ptr;
881         }
882         // softLimit intentionally left at SIZE_MAX.
883     }
884 
885     /* We're not soft-limited.  Grow the heap to satisfy the request.
886      * If this call fails, no footprints will have changed.
887      */
888     ptr = heapAllocAndGrow(hs, heap, n);
889     if (ptr != NULL) {
890         /* The allocation succeeded.  Fix up the ideal size to
891          * reflect any footprint modifications that had to happen.
892          */
893         snapIdealFootprint();
894     } else {
895         /* We just couldn't do it.  Restore the original ideal size,
896          * fixing up softLimit if necessary.
897          */
898         setIdealFootprint(oldIdealSize);
899     }
900     return ptr;
901 }
902 
903 /*
904  * Frees the first numPtrs objects in the ptrs list and returns the
905  * amount of reclaimed storage. The list must contain addresses all in
906  * the same mspace, and must be in increasing order. This implies that
907  * there are no duplicates, and no entries are NULL.
908  */
dvmHeapSourceFreeList(size_t numPtrs,void ** ptrs)909 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
910 {
911     HS_BOILERPLATE();
912 
913     if (numPtrs == 0) {
914         return 0;
915     }
916 
917     assert(ptrs != NULL);
918     assert(*ptrs != NULL);
919     Heap* heap = ptr2heap(gHs, *ptrs);
920     size_t numBytes = 0;
921     if (heap != NULL) {
922         mspace msp = heap->msp;
923         // Calling mspace_free on shared heaps disrupts sharing too
924         // much. For heap[0] -- the 'active heap' -- we call
925         // mspace_free, but on the other heaps we only do some
926         // accounting.
927         if (heap == gHs->heaps) {
928             // mspace_merge_objects takes two allocated objects, and
929             // if the second immediately follows the first, will merge
930             // them, returning a larger object occupying the same
931             // memory. This is a local operation, and doesn't require
932             // dlmalloc to manipulate any freelists. It's pretty
933             // inexpensive compared to free().
934 
935             // ptrs is an array of objects all in memory order, and if
936             // client code has been allocating lots of short-lived
937             // objects, this is likely to contain runs of objects all
938             // now garbage, and thus highly amenable to this optimization.
939 
940             // Unroll the 0th iteration around the loop below,
941             // countFree ptrs[0] and initializing merged.
942             assert(ptrs[0] != NULL);
943             assert(ptr2heap(gHs, ptrs[0]) == heap);
944             countFree(heap, ptrs[0], &numBytes);
945             void *merged = ptrs[0];
946             for (size_t i = 1; i < numPtrs; i++) {
947                 assert(merged != NULL);
948                 assert(ptrs[i] != NULL);
949                 assert((intptr_t)merged < (intptr_t)ptrs[i]);
950                 assert(ptr2heap(gHs, ptrs[i]) == heap);
951                 countFree(heap, ptrs[i], &numBytes);
952                 // Try to merge. If it works, merged now includes the
953                 // memory of ptrs[i]. If it doesn't, free merged, and
954                 // see if ptrs[i] starts a new run of adjacent
955                 // objects to merge.
956                 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
957                     mspace_free(msp, merged);
958                     merged = ptrs[i];
959                 }
960             }
961             assert(merged != NULL);
962             mspace_free(msp, merged);
963         } else {
964             // This is not an 'active heap'. Only do the accounting.
965             for (size_t i = 0; i < numPtrs; i++) {
966                 assert(ptrs[i] != NULL);
967                 assert(ptr2heap(gHs, ptrs[i]) == heap);
968                 countFree(heap, ptrs[i], &numBytes);
969             }
970         }
971     }
972     return numBytes;
973 }
974 
975 /*
976  * Returns true iff <ptr> is in the heap source.
977  */
dvmHeapSourceContainsAddress(const void * ptr)978 bool dvmHeapSourceContainsAddress(const void *ptr)
979 {
980     HS_BOILERPLATE();
981 
982     return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
983 }
984 
985 /*
986  * Returns true iff <ptr> was allocated from the heap source.
987  */
dvmHeapSourceContains(const void * ptr)988 bool dvmHeapSourceContains(const void *ptr)
989 {
990     HS_BOILERPLATE();
991 
992     if (dvmHeapSourceContainsAddress(ptr)) {
993         return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
994     }
995     return false;
996 }
997 
dvmIsZygoteObject(const Object * obj)998 bool dvmIsZygoteObject(const Object* obj)
999 {
1000     HeapSource *hs = gHs;
1001 
1002     HS_BOILERPLATE();
1003 
1004     if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1005         Heap *heap = ptr2heap(hs, obj);
1006         if (heap != NULL) {
1007             /* If the object is not in the active heap, we assume that
1008              * it was allocated as part of zygote.
1009              */
1010             return heap != hs->heaps;
1011         }
1012     }
1013     /* The pointer is outside of any known heap, or we are not
1014      * running in zygote mode.
1015      */
1016     return false;
1017 }
1018 
1019 /*
1020  * Returns the number of usable bytes in an allocated chunk; the size
1021  * may be larger than the size passed to dvmHeapSourceAlloc().
1022  */
dvmHeapSourceChunkSize(const void * ptr)1023 size_t dvmHeapSourceChunkSize(const void *ptr)
1024 {
1025     HS_BOILERPLATE();
1026 
1027     Heap* heap = ptr2heap(gHs, ptr);
1028     if (heap != NULL) {
1029         return mspace_usable_size(heap->msp, ptr);
1030     }
1031     return 0;
1032 }
1033 
1034 /*
1035  * Returns the number of bytes that the heap source has allocated
1036  * from the system using sbrk/mmap, etc.
1037  *
1038  * Caller must hold the heap lock.
1039  */
dvmHeapSourceFootprint()1040 size_t dvmHeapSourceFootprint()
1041 {
1042     HS_BOILERPLATE();
1043 
1044 //TODO: include size of bitmaps?
1045     return oldHeapOverhead(gHs, true);
1046 }
1047 
getMaximumSize(const HeapSource * hs)1048 static size_t getMaximumSize(const HeapSource *hs)
1049 {
1050     return hs->growthLimit;
1051 }
1052 
1053 /*
1054  * Returns the current maximum size of the heap source respecting any
1055  * growth limits.
1056  */
dvmHeapSourceGetMaximumSize()1057 size_t dvmHeapSourceGetMaximumSize()
1058 {
1059     HS_BOILERPLATE();
1060     return getMaximumSize(gHs);
1061 }
1062 
1063 /*
1064  * Removes any growth limits.  Allows the user to allocate up to the
1065  * maximum heap size.
1066  */
dvmClearGrowthLimit()1067 void dvmClearGrowthLimit()
1068 {
1069     HS_BOILERPLATE();
1070     dvmLockHeap();
1071     dvmWaitForConcurrentGcToComplete();
1072     gHs->growthLimit = gHs->maximumSize;
1073     size_t overhead = oldHeapOverhead(gHs, false);
1074     gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1075     gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1076     dvmUnlockHeap();
1077 }
1078 
1079 /*
1080  * Return the real bytes used by old heaps plus the soft usage of the
1081  * current heap.  When a soft limit is in effect, this is effectively
1082  * what it's compared against (though, in practice, it only looks at
1083  * the current heap).
1084  */
getSoftFootprint(bool includeActive)1085 static size_t getSoftFootprint(bool includeActive)
1086 {
1087     HS_BOILERPLATE();
1088 
1089     HeapSource *hs = gHs;
1090     size_t ret = oldHeapOverhead(hs, false);
1091     if (includeActive) {
1092         ret += hs->heaps[0].bytesAllocated;
1093     }
1094 
1095     return ret;
1096 }
1097 
1098 /*
1099  * Gets the maximum number of bytes that the heap source is allowed
1100  * to allocate from the system.
1101  */
dvmHeapSourceGetIdealFootprint()1102 size_t dvmHeapSourceGetIdealFootprint()
1103 {
1104     HeapSource *hs = gHs;
1105 
1106     HS_BOILERPLATE();
1107 
1108     return hs->idealSize;
1109 }
1110 
1111 /*
1112  * Sets the soft limit, handling any necessary changes to the allowed
1113  * footprint of the active heap.
1114  */
setSoftLimit(HeapSource * hs,size_t softLimit)1115 static void setSoftLimit(HeapSource *hs, size_t softLimit)
1116 {
1117     /* Compare against the actual footprint, rather than the
1118      * max_allowed, because the heap may not have grown all the
1119      * way to the allowed size yet.
1120      */
1121     mspace msp = hs->heaps[0].msp;
1122     size_t currentHeapSize = mspace_footprint(msp);
1123     if (softLimit < currentHeapSize) {
1124         /* Don't let the heap grow any more, and impose a soft limit.
1125          */
1126         mspace_set_max_allowed_footprint(msp, currentHeapSize);
1127         hs->softLimit = softLimit;
1128     } else {
1129         /* Let the heap grow to the requested max, and remove any
1130          * soft limit, if set.
1131          */
1132         mspace_set_max_allowed_footprint(msp, softLimit);
1133         hs->softLimit = SIZE_MAX;
1134     }
1135 }
1136 
1137 /*
1138  * Sets the maximum number of bytes that the heap source is allowed
1139  * to allocate from the system.  Clamps to the appropriate maximum
1140  * value.
1141  */
setIdealFootprint(size_t max)1142 static void setIdealFootprint(size_t max)
1143 {
1144     HS_BOILERPLATE();
1145 
1146     HeapSource *hs = gHs;
1147     size_t maximumSize = getMaximumSize(hs);
1148     if (max > maximumSize) {
1149         LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1150                 FRACTIONAL_MB(max),
1151                 FRACTIONAL_MB(maximumSize));
1152         max = maximumSize;
1153     }
1154 
1155     /* Convert max into a size that applies to the active heap.
1156      * Old heaps will count against the ideal size.
1157      */
1158     size_t overhead = getSoftFootprint(false);
1159     size_t activeMax;
1160     if (overhead < max) {
1161         activeMax = max - overhead;
1162     } else {
1163         activeMax = 0;
1164     }
1165 
1166     setSoftLimit(hs, activeMax);
1167     hs->idealSize = max;
1168 }
1169 
1170 /*
1171  * Make the ideal footprint equal to the current footprint.
1172  */
snapIdealFootprint()1173 static void snapIdealFootprint()
1174 {
1175     HS_BOILERPLATE();
1176 
1177     setIdealFootprint(getSoftFootprint(true));
1178 }
1179 
1180 /*
1181  * Gets the current ideal heap utilization, represented as a number
1182  * between zero and one.
1183  */
dvmGetTargetHeapUtilization()1184 float dvmGetTargetHeapUtilization()
1185 {
1186     HeapSource *hs = gHs;
1187 
1188     HS_BOILERPLATE();
1189 
1190     return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1191 }
1192 
1193 /*
1194  * Sets the new ideal heap utilization, represented as a number
1195  * between zero and one.
1196  */
dvmSetTargetHeapUtilization(float newTarget)1197 void dvmSetTargetHeapUtilization(float newTarget)
1198 {
1199     HeapSource *hs = gHs;
1200 
1201     HS_BOILERPLATE();
1202 
1203     /* Clamp it to a reasonable range.
1204      */
1205     // TODO: This may need some tuning.
1206     if (newTarget < 0.2) {
1207         newTarget = 0.2;
1208     } else if (newTarget > 0.8) {
1209         newTarget = 0.8;
1210     }
1211 
1212     hs->targetUtilization =
1213             (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1214     LOGV("Set heap target utilization to %zd/%d (%f)",
1215             hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1216 }
1217 
1218 /*
1219  * Given the size of a live set, returns the ideal heap size given
1220  * the current target utilization and MIN/MAX values.
1221  *
1222  * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1223  */
getUtilizationTarget(size_t liveSize,size_t targetUtilization)1224 static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1225 {
1226     /* Use the current target utilization ratio to determine the
1227      * ideal heap size based on the size of the live set.
1228      */
1229     size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1230 
1231     /* Cap the amount of free space, though, so we don't end up
1232      * with, e.g., 8MB of free space when the live set size hits 8MB.
1233      */
1234     if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1235         targetSize = liveSize + HEAP_IDEAL_FREE;
1236     } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1237         targetSize = liveSize + HEAP_MIN_FREE;
1238     }
1239     return targetSize;
1240 }
1241 
1242 /*
1243  * Given the current contents of the active heap, increase the allowed
1244  * heap footprint to match the target utilization ratio.  This
1245  * should only be called immediately after a full mark/sweep.
1246  */
dvmHeapSourceGrowForUtilization()1247 void dvmHeapSourceGrowForUtilization()
1248 {
1249     HS_BOILERPLATE();
1250 
1251     HeapSource *hs = gHs;
1252     Heap* heap = hs2heap(hs);
1253 
1254     /* Use the current target utilization ratio to determine the
1255      * ideal heap size based on the size of the live set.
1256      * Note that only the active heap plays any part in this.
1257      *
1258      * Avoid letting the old heaps influence the target free size,
1259      * because they may be full of objects that aren't actually
1260      * in the working set.  Just look at the allocated size of
1261      * the current heap.
1262      */
1263     size_t currentHeapUsed = heap->bytesAllocated;
1264     size_t targetHeapSize =
1265             getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1266 
1267     /* The ideal size includes the old heaps; add overhead so that
1268      * it can be immediately subtracted again in setIdealFootprint().
1269      * If the target heap size would exceed the max, setIdealFootprint()
1270      * will clamp it to a legal value.
1271      */
1272     size_t overhead = getSoftFootprint(false);
1273     setIdealFootprint(targetHeapSize + overhead);
1274 
1275     size_t freeBytes = getAllocLimit(hs);
1276     if (freeBytes < CONCURRENT_MIN_FREE) {
1277         /* Not enough free memory to allow a concurrent GC. */
1278         heap->concurrentStartBytes = SIZE_MAX;
1279     } else {
1280         heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1281     }
1282 }
1283 
1284 /*
1285  * Return free pages to the system.
1286  * TODO: move this somewhere else, especially the native heap part.
1287  */
releasePagesInRange(void * start,void * end,void * nbytes)1288 static void releasePagesInRange(void *start, void *end, void *nbytes)
1289 {
1290     /* Linux requires that the madvise() start address is page-aligned.
1291     * We also align the end address.
1292     */
1293     start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1294     end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1295     if (start < end) {
1296         size_t length = (char *)end - (char *)start;
1297         madvise(start, length, MADV_DONTNEED);
1298         *(size_t *)nbytes += length;
1299     }
1300 }
1301 
1302 /*
1303  * Return unused memory to the system if possible.
1304  */
trimHeaps()1305 static void trimHeaps()
1306 {
1307     HS_BOILERPLATE();
1308 
1309     HeapSource *hs = gHs;
1310     size_t heapBytes = 0;
1311     for (size_t i = 0; i < hs->numHeaps; i++) {
1312         Heap *heap = &hs->heaps[i];
1313 
1314         /* Return the wilderness chunk to the system.
1315          */
1316         mspace_trim(heap->msp, 0);
1317 
1318         /* Return any whole free pages to the system.
1319          */
1320         mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
1321     }
1322 
1323     /* Same for the native heap.
1324      */
1325     dlmalloc_trim(0);
1326     size_t nativeBytes = 0;
1327     dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1328 
1329     LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1330             heapBytes, nativeBytes, heapBytes + nativeBytes);
1331 }
1332 
1333 /*
1334  * Walks over the heap source and passes every allocated and
1335  * free chunk to the callback.
1336  */
dvmHeapSourceWalk(void (* callback)(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg),void * arg)1337 void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1338                                        const void *userptr, size_t userlen,
1339                                        void *arg),
1340                        void *arg)
1341 {
1342     HS_BOILERPLATE();
1343 
1344     /* Walk the heaps from oldest to newest.
1345      */
1346 //TODO: do this in address order
1347     HeapSource *hs = gHs;
1348     for (size_t i = hs->numHeaps; i > 0; --i) {
1349         mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1350     }
1351 }
1352 
1353 /*
1354  * Gets the number of heaps available in the heap source.
1355  *
1356  * Caller must hold the heap lock, because gHs caches a field
1357  * in gDvm.gcHeap.
1358  */
dvmHeapSourceGetNumHeaps()1359 size_t dvmHeapSourceGetNumHeaps()
1360 {
1361     HS_BOILERPLATE();
1362 
1363     return gHs->numHeaps;
1364 }
1365 
dvmHeapSourceGetImmuneLimit(bool isPartial)1366 void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1367 {
1368     if (isPartial) {
1369         return hs2heap(gHs)->base;
1370     } else {
1371         return NULL;
1372     }
1373 }
1374