• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <errno.h>
20 
21 #define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
22 
23 #include "Dalvik.h"
24 #include "alloc/DlMalloc.h"
25 #include "alloc/Heap.h"
26 #include "alloc/HeapInternal.h"
27 #include "alloc/HeapSource.h"
28 #include "alloc/HeapBitmap.h"
29 #include "alloc/HeapBitmapInlines.h"
30 
31 static void snapIdealFootprint();
32 static void setIdealFootprint(size_t max);
33 static size_t getMaximumSize(const HeapSource *hs);
34 static void trimHeaps();
35 
36 #define HEAP_UTILIZATION_MAX        1024
37 
38 /* How long to wait after a GC before performing a heap trim
39  * operation to reclaim unused pages.
40  */
41 #define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
42 
43 /* Start a concurrent collection when free memory falls under this
44  * many bytes.
45  */
46 #define CONCURRENT_START (128 << 10)
47 
48 /* The next GC will not be concurrent when free memory after a GC is
49  * under this many bytes.
50  */
51 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
52 
53 #define HS_BOILERPLATE() \
54     do { \
55         assert(gDvm.gcHeap != NULL); \
56         assert(gDvm.gcHeap->heapSource != NULL); \
57         assert(gHs == gDvm.gcHeap->heapSource); \
58     } while (0)
59 
60 struct Heap {
61     /* The mspace to allocate from.
62      */
63     mspace msp;
64 
65     /* The largest size that this heap is allowed to grow to.
66      */
67     size_t maximumSize;
68 
69     /* Number of bytes allocated from this mspace for objects,
70      * including any overhead.  This value is NOT exact, and
71      * should only be used as an input for certain heuristics.
72      */
73     size_t bytesAllocated;
74 
75     /* Number of bytes allocated from this mspace at which a
76      * concurrent garbage collection will be started.
77      */
78     size_t concurrentStartBytes;
79 
80     /* Number of objects currently allocated from this mspace.
81      */
82     size_t objectsAllocated;
83 
84     /*
85      * The lowest address of this heap, inclusive.
86      */
87     char *base;
88 
89     /*
90      * The highest address of this heap, exclusive.
91      */
92     char *limit;
93 
94     /*
95      * If the heap has an mspace, the current high water mark in
96      * allocations requested via dvmHeapSourceMorecore.
97      */
98     char *brk;
99 };
100 
101 struct HeapSource {
102     /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
103      */
104     size_t targetUtilization;
105 
106     /* The starting heap size.
107      */
108     size_t startSize;
109 
110     /* The largest that the heap source as a whole is allowed to grow.
111      */
112     size_t maximumSize;
113 
114     /*
115      * The largest size we permit the heap to grow.  This value allows
116      * the user to limit the heap growth below the maximum size.  This
117      * is a work around until we can dynamically set the maximum size.
118      * This value can range between the starting size and the maximum
119      * size but should never be set below the current footprint of the
120      * heap.
121      */
122     size_t growthLimit;
123 
124     /* The desired max size of the heap source as a whole.
125      */
126     size_t idealSize;
127 
128     /* The maximum number of bytes allowed to be allocated from the
129      * active heap before a GC is forced.  This is used to "shrink" the
130      * heap in lieu of actual compaction.
131      */
132     size_t softLimit;
133 
134     /* Minimum number of free bytes. Used with the target utilization when
135      * setting the softLimit. Never allows less bytes than this to be free
136      * when the heap size is below the maximum size or growth limit.
137      */
138     size_t minFree;
139 
140     /* Maximum number of free bytes. Used with the target utilization when
141      * setting the softLimit. Never allows more bytes than this to be free
142      * when the heap size is below the maximum size or growth limit.
143      */
144     size_t maxFree;
145 
146     /* The heaps; heaps[0] is always the active heap,
147      * which new objects should be allocated from.
148      */
149     Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
150 
151     /* The current number of heaps.
152      */
153     size_t numHeaps;
154 
155     /* True if zygote mode was active when the HeapSource was created.
156      */
157     bool sawZygote;
158 
159     /*
160      * The base address of the virtual memory reservation.
161      */
162     char *heapBase;
163 
164     /*
165      * The length in bytes of the virtual memory reservation.
166      */
167     size_t heapLength;
168 
169     /*
170      * The live object bitmap.
171      */
172     HeapBitmap liveBits;
173 
174     /*
175      * The mark bitmap.
176      */
177     HeapBitmap markBits;
178 
179     /*
180      * State for the GC daemon.
181      */
182     bool hasGcThread;
183     pthread_t gcThread;
184     bool gcThreadShutdown;
185     pthread_mutex_t gcThreadMutex;
186     pthread_cond_t gcThreadCond;
187     bool gcThreadTrimNeeded;
188 };
189 
190 #define hs2heap(hs_) (&((hs_)->heaps[0]))
191 
192 /*
193  * Returns true iff a soft limit is in effect for the active heap.
194  */
isSoftLimited(const HeapSource * hs)195 static bool isSoftLimited(const HeapSource *hs)
196 {
197     /* softLimit will be either SIZE_MAX or the limit for the
198      * active mspace.  idealSize can be greater than softLimit
199      * if there is more than one heap.  If there is only one
200      * heap, a non-SIZE_MAX softLimit should always be the same
201      * as idealSize.
202      */
203     return hs->softLimit <= hs->idealSize;
204 }
205 
206 /*
207  * Returns approximately the maximum number of bytes allowed to be
208  * allocated from the active heap before a GC is forced.
209  */
getAllocLimit(const HeapSource * hs)210 static size_t getAllocLimit(const HeapSource *hs)
211 {
212     if (isSoftLimited(hs)) {
213         return hs->softLimit;
214     } else {
215         return mspace_footprint_limit(hs2heap(hs)->msp);
216     }
217 }
218 
219 /*
220  * Returns the current footprint of all heaps.  If includeActive
221  * is false, don't count the heap at index 0.
222  */
oldHeapOverhead(const HeapSource * hs,bool includeActive)223 static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
224 {
225     size_t footprint = 0;
226     size_t i;
227 
228     if (includeActive) {
229         i = 0;
230     } else {
231         i = 1;
232     }
233     for (/* i = i */; i < hs->numHeaps; i++) {
234 //TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
235         footprint += mspace_footprint(hs->heaps[i].msp);
236     }
237     return footprint;
238 }
239 
240 /*
241  * Returns the heap that <ptr> could have come from, or NULL
242  * if it could not have come from any heap.
243  */
ptr2heap(const HeapSource * hs,const void * ptr)244 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
245 {
246     const size_t numHeaps = hs->numHeaps;
247 
248     if (ptr != NULL) {
249         for (size_t i = 0; i < numHeaps; i++) {
250             const Heap *const heap = &hs->heaps[i];
251 
252             if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
253                 return (Heap *)heap;
254             }
255         }
256     }
257     return NULL;
258 }
259 
260 /*
261  * Functions to update heapSource->bytesAllocated when an object
262  * is allocated or freed.  mspace_usable_size() will give
263  * us a much more accurate picture of heap utilization than
264  * the requested byte sizes would.
265  *
266  * These aren't exact, and should not be treated as such.
267  */
countAllocation(Heap * heap,const void * ptr)268 static void countAllocation(Heap *heap, const void *ptr)
269 {
270     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
271 
272     heap->bytesAllocated += mspace_usable_size(ptr) +
273             HEAP_SOURCE_CHUNK_OVERHEAD;
274     heap->objectsAllocated++;
275     HeapSource* hs = gDvm.gcHeap->heapSource;
276     dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
277 
278     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
279 }
280 
countFree(Heap * heap,const void * ptr,size_t * numBytes)281 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
282 {
283     size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
284     assert(delta > 0);
285     if (delta < heap->bytesAllocated) {
286         heap->bytesAllocated -= delta;
287     } else {
288         heap->bytesAllocated = 0;
289     }
290     HeapSource* hs = gDvm.gcHeap->heapSource;
291     dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
292     if (heap->objectsAllocated > 0) {
293         heap->objectsAllocated--;
294     }
295     *numBytes += delta;
296 }
297 
298 static HeapSource *gHs = NULL;
299 
createMspace(void * begin,size_t morecoreStart,size_t startingSize)300 static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
301 {
302     // Clear errno to allow strerror on error.
303     errno = 0;
304     // Allow access to inital pages that will hold mspace.
305     mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
306     // Create mspace using our backing storage starting at begin and with a footprint of
307     // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
308     // exhausted morecore will be called.
309     mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
310     if (msp != NULL) {
311         // Do not allow morecore requests to succeed beyond the starting size of the heap.
312         mspace_set_footprint_limit(msp, startingSize);
313     } else {
314         ALOGE("create_mspace_with_base failed %s", strerror(errno));
315     }
316     return msp;
317 }
318 
319 /*
320  * Service request from DlMalloc to increase heap size.
321  */
dvmHeapSourceMorecore(void * mspace,intptr_t increment)322 void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
323 {
324     Heap* heap = NULL;
325     for (size_t i = 0; i < gHs->numHeaps; i++) {
326         if (gHs->heaps[i].msp == mspace) {
327             heap = &gHs->heaps[i];
328             break;
329         }
330     }
331     if (heap == NULL) {
332         ALOGE("Failed to find heap for mspace %p", mspace);
333         dvmAbort();
334     }
335     char* original_brk = heap->brk;
336     if (increment != 0) {
337         char* new_brk = original_brk + increment;
338         if (increment > 0) {
339             // Should never be asked to increase the allocation beyond the capacity of the space.
340             // Enforced by mspace_set_footprint_limit.
341             assert(new_brk <= heap->limit);
342             mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
343         } else {
344             // Should never be asked for negative footprint (ie before base).
345             assert(original_brk + increment > heap->base);
346             // Advise we don't need the pages and protect them.
347             size_t size = -increment;
348             madvise(new_brk, size, MADV_DONTNEED);
349             mprotect(new_brk, size, PROT_NONE);
350         }
351         // Update brk.
352         heap->brk = new_brk;
353     }
354     return original_brk;
355 }
356 
357 const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
358 /*
359  * Add the initial heap.  Returns false if the initial heap was
360  * already added to the heap source.
361  */
addInitialHeap(HeapSource * hs,mspace msp,size_t maximumSize)362 static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
363 {
364     assert(hs != NULL);
365     assert(msp != NULL);
366     if (hs->numHeaps != 0) {
367         return false;
368     }
369     hs->heaps[0].msp = msp;
370     hs->heaps[0].maximumSize = maximumSize;
371     hs->heaps[0].concurrentStartBytes = SIZE_MAX;
372     hs->heaps[0].base = hs->heapBase;
373     hs->heaps[0].limit = hs->heapBase + maximumSize;
374     hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
375     hs->numHeaps = 1;
376     return true;
377 }
378 
379 /*
380  * Adds an additional heap to the heap source.  Returns false if there
381  * are too many heaps or insufficient free space to add another heap.
382  */
addNewHeap(HeapSource * hs)383 static bool addNewHeap(HeapSource *hs)
384 {
385     Heap heap;
386 
387     assert(hs != NULL);
388     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
389         ALOGE("Attempt to create too many heaps (%zd >= %zd)",
390                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
391         dvmAbort();
392         return false;
393     }
394 
395     memset(&heap, 0, sizeof(heap));
396 
397     /*
398      * Heap storage comes from a common virtual memory reservation.
399      * The new heap will start on the page after the old heap.
400      */
401     char *base = hs->heaps[0].brk;
402     size_t overhead = base - hs->heaps[0].base;
403     assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
404 
405     if (overhead + hs->minFree >= hs->maximumSize) {
406         LOGE_HEAP("No room to create any more heaps "
407                   "(%zd overhead, %zd max)",
408                   overhead, hs->maximumSize);
409         return false;
410     }
411     size_t morecoreStart = SYSTEM_PAGE_SIZE;
412     heap.maximumSize = hs->growthLimit - overhead;
413     heap.concurrentStartBytes = hs->minFree - CONCURRENT_START;
414     heap.base = base;
415     heap.limit = heap.base + heap.maximumSize;
416     heap.brk = heap.base + morecoreStart;
417     heap.msp = createMspace(base, morecoreStart, hs->minFree);
418     if (heap.msp == NULL) {
419         return false;
420     }
421 
422     /* Don't let the soon-to-be-old heap grow any further.
423      */
424     hs->heaps[0].maximumSize = overhead;
425     hs->heaps[0].limit = base;
426     mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
427 
428     /* Put the new heap in the list, at heaps[0].
429      * Shift existing heaps down.
430      */
431     memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
432     hs->heaps[0] = heap;
433     hs->numHeaps++;
434 
435     return true;
436 }
437 
438 /*
439  * The garbage collection daemon.  Initiates a concurrent collection
440  * when signaled.  Also periodically trims the heaps when a few seconds
441  * have elapsed since the last concurrent GC.
442  */
gcDaemonThread(void * arg)443 static void *gcDaemonThread(void* arg)
444 {
445     dvmChangeStatus(NULL, THREAD_VMWAIT);
446     dvmLockMutex(&gHs->gcThreadMutex);
447     while (gHs->gcThreadShutdown != true) {
448         bool trim = false;
449         if (gHs->gcThreadTrimNeeded) {
450             int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
451                     HEAP_TRIM_IDLE_TIME_MS, 0);
452             if (result == ETIMEDOUT) {
453                 /* Timed out waiting for a GC request, schedule a heap trim. */
454                 trim = true;
455             }
456         } else {
457             dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
458         }
459 
460         dvmLockHeap();
461         /*
462          * Another thread may have started a concurrent garbage
463          * collection before we were scheduled.  Check for this
464          * condition before proceeding.
465          */
466         if (!gDvm.gcHeap->gcRunning) {
467             dvmChangeStatus(NULL, THREAD_RUNNING);
468             if (trim) {
469                 trimHeaps();
470                 gHs->gcThreadTrimNeeded = false;
471             } else {
472                 dvmCollectGarbageInternal(GC_CONCURRENT);
473                 gHs->gcThreadTrimNeeded = true;
474             }
475             dvmChangeStatus(NULL, THREAD_VMWAIT);
476         }
477         dvmUnlockHeap();
478     }
479     dvmChangeStatus(NULL, THREAD_RUNNING);
480     return NULL;
481 }
482 
gcDaemonStartup()483 static bool gcDaemonStartup()
484 {
485     dvmInitMutex(&gHs->gcThreadMutex);
486     pthread_cond_init(&gHs->gcThreadCond, NULL);
487     gHs->gcThreadShutdown = false;
488     gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
489                                                gcDaemonThread, NULL);
490     return gHs->hasGcThread;
491 }
492 
gcDaemonShutdown()493 static void gcDaemonShutdown()
494 {
495     if (gHs->hasGcThread) {
496         dvmLockMutex(&gHs->gcThreadMutex);
497         gHs->gcThreadShutdown = true;
498         dvmSignalCond(&gHs->gcThreadCond);
499         dvmUnlockMutex(&gHs->gcThreadMutex);
500         pthread_join(gHs->gcThread, NULL);
501     }
502 }
503 
504 /*
505  * Create a stack big enough for the worst possible case, where the
506  * heap is perfectly full of the smallest object.
507  * TODO: be better about memory usage; use a smaller stack with
508  *       overflow detection and recovery.
509  */
allocMarkStack(GcMarkStack * stack,size_t maximumSize)510 static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
511 {
512     const char *name = "dalvik-mark-stack";
513     void *addr;
514 
515     assert(stack != NULL);
516     stack->length = maximumSize * sizeof(Object*) /
517         (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
518     addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
519     if (addr == NULL) {
520         return false;
521     }
522     stack->base = (const Object **)addr;
523     stack->limit = (const Object **)((char *)addr + stack->length);
524     stack->top = NULL;
525     madvise(stack->base, stack->length, MADV_DONTNEED);
526     return true;
527 }
528 
freeMarkStack(GcMarkStack * stack)529 static void freeMarkStack(GcMarkStack *stack)
530 {
531     assert(stack != NULL);
532     munmap(stack->base, stack->length);
533     memset(stack, 0, sizeof(*stack));
534 }
535 
536 /*
537  * Initializes the heap source; must be called before any other
538  * dvmHeapSource*() functions.  Returns a GcHeap structure
539  * allocated from the heap source.
540  */
dvmHeapSourceStartup(size_t startSize,size_t maximumSize,size_t growthLimit)541 GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
542                              size_t growthLimit)
543 {
544     GcHeap *gcHeap;
545     HeapSource *hs;
546     mspace msp;
547     size_t length;
548     void *base;
549 
550     assert(gHs == NULL);
551 
552     if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
553         ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
554              startSize, maximumSize, growthLimit);
555         return NULL;
556     }
557 
558     /*
559      * Allocate a contiguous region of virtual memory to subdivided
560      * among the heaps managed by the garbage collector.
561      */
562     length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
563     base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
564     if (base == NULL) {
565         return NULL;
566     }
567 
568     /* Create an unlocked dlmalloc mspace to use as
569      * a heap source.
570      */
571     msp = createMspace(base, kInitialMorecoreStart, startSize);
572     if (msp == NULL) {
573         goto fail;
574     }
575 
576     gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
577     if (gcHeap == NULL) {
578         LOGE_HEAP("Can't allocate heap descriptor");
579         goto fail;
580     }
581 
582     hs = (HeapSource *)calloc(1, sizeof(*hs));
583     if (hs == NULL) {
584         LOGE_HEAP("Can't allocate heap source");
585         free(gcHeap);
586         goto fail;
587     }
588 
589     hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
590     hs->minFree = gDvm.heapMinFree;
591     hs->maxFree = gDvm.heapMaxFree;
592     hs->startSize = startSize;
593     hs->maximumSize = maximumSize;
594     hs->growthLimit = growthLimit;
595     hs->idealSize = startSize;
596     hs->softLimit = SIZE_MAX;    // no soft limit at first
597     hs->numHeaps = 0;
598     hs->sawZygote = gDvm.zygote;
599     hs->hasGcThread = false;
600     hs->heapBase = (char *)base;
601     hs->heapLength = length;
602 
603     if (hs->maxFree > hs->maximumSize) {
604       hs->maxFree = hs->maximumSize;
605     }
606     if (hs->minFree < CONCURRENT_START) {
607       hs->minFree = CONCURRENT_START;
608     } else if (hs->minFree > hs->maxFree) {
609       hs->minFree = hs->maxFree;
610     }
611 
612     if (!addInitialHeap(hs, msp, growthLimit)) {
613         LOGE_HEAP("Can't add initial heap");
614         goto fail;
615     }
616     if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
617         LOGE_HEAP("Can't create liveBits");
618         goto fail;
619     }
620     if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
621         LOGE_HEAP("Can't create markBits");
622         dvmHeapBitmapDelete(&hs->liveBits);
623         goto fail;
624     }
625     if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
626         ALOGE("Can't create markStack");
627         dvmHeapBitmapDelete(&hs->markBits);
628         dvmHeapBitmapDelete(&hs->liveBits);
629         goto fail;
630     }
631     gcHeap->markContext.bitmap = &hs->markBits;
632     gcHeap->heapSource = hs;
633 
634     gHs = hs;
635     return gcHeap;
636 
637 fail:
638     munmap(base, length);
639     return NULL;
640 }
641 
dvmHeapSourceStartupAfterZygote()642 bool dvmHeapSourceStartupAfterZygote()
643 {
644     return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
645 }
646 
647 /*
648  * This is called while in zygote mode, right before we fork() for the
649  * first time.  We create a heap for all future zygote process allocations,
650  * in an attempt to avoid touching pages in the zygote heap.  (This would
651  * probably be unnecessary if we had a compacting GC -- the source of our
652  * troubles is small allocations filling in the gaps from larger ones.)
653  */
dvmHeapSourceStartupBeforeFork()654 bool dvmHeapSourceStartupBeforeFork()
655 {
656     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
657 
658     HS_BOILERPLATE();
659 
660     assert(gDvm.zygote);
661 
662     if (!gDvm.newZygoteHeapAllocated) {
663        /* Ensure heaps are trimmed to minimize footprint pre-fork.
664         */
665         trimHeaps();
666         /* Create a new heap for post-fork zygote allocations.  We only
667          * try once, even if it fails.
668          */
669         ALOGV("Splitting out new zygote heap");
670         gDvm.newZygoteHeapAllocated = true;
671         return addNewHeap(hs);
672     }
673     return true;
674 }
675 
dvmHeapSourceThreadShutdown()676 void dvmHeapSourceThreadShutdown()
677 {
678     if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
679         gcDaemonShutdown();
680     }
681 }
682 
683 /*
684  * Tears down the entire GcHeap structure and all of the substructures
685  * attached to it.  This call has the side effect of setting the given
686  * gcHeap pointer and gHs to NULL.
687  */
dvmHeapSourceShutdown(GcHeap ** gcHeap)688 void dvmHeapSourceShutdown(GcHeap **gcHeap)
689 {
690     assert(gcHeap != NULL);
691     if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
692         HeapSource *hs = (*gcHeap)->heapSource;
693         dvmHeapBitmapDelete(&hs->liveBits);
694         dvmHeapBitmapDelete(&hs->markBits);
695         freeMarkStack(&(*gcHeap)->markContext.stack);
696         munmap(hs->heapBase, hs->heapLength);
697         free(hs);
698         gHs = NULL;
699         free(*gcHeap);
700         *gcHeap = NULL;
701     }
702 }
703 
704 /*
705  * Gets the begining of the allocation for the HeapSource.
706  */
dvmHeapSourceGetBase()707 void *dvmHeapSourceGetBase()
708 {
709     return gHs->heapBase;
710 }
711 
712 /*
713  * Returns a high water mark, between base and limit all objects must have been
714  * allocated.
715  */
dvmHeapSourceGetLimit()716 void *dvmHeapSourceGetLimit()
717 {
718     HeapSource *hs = gHs;
719     void *max_brk = hs->heaps[0].brk;
720 
721 #ifndef NDEBUG
722     for (size_t i = 1; i < hs->numHeaps; i++) {
723         Heap *const heap = &hs->heaps[i];
724         void *heap_brk = heap->brk;
725         assert (max_brk > heap_brk);
726     }
727 #endif
728     return max_brk;
729 }
730 
731 /*
732  * Returns the requested value. If the per-heap stats are requested, fill
733  * them as well.
734  *
735  * Caller must hold the heap lock.
736  */
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)737 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
738                              size_t arrayLen)
739 {
740     HeapSource *hs = gHs;
741     size_t value = 0;
742     size_t total = 0;
743 
744     HS_BOILERPLATE();
745 
746     assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
747     for (size_t i = 0; i < hs->numHeaps; i++) {
748         Heap *const heap = &hs->heaps[i];
749 
750         switch (spec) {
751         case HS_FOOTPRINT:
752             value = heap->brk - heap->base;
753             assert(value == mspace_footprint(heap->msp));
754             break;
755         case HS_ALLOWED_FOOTPRINT:
756             value = mspace_footprint_limit(heap->msp);
757             break;
758         case HS_BYTES_ALLOCATED:
759             value = heap->bytesAllocated;
760             break;
761         case HS_OBJECTS_ALLOCATED:
762             value = heap->objectsAllocated;
763             break;
764         default:
765             // quiet gcc
766             break;
767         }
768         if (perHeapStats) {
769             perHeapStats[i] = value;
770         }
771         total += value;
772     }
773     return total;
774 }
775 
dvmHeapSourceGetRegions(uintptr_t * base,uintptr_t * max,size_t numHeaps)776 void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
777 {
778     HeapSource *hs = gHs;
779 
780     HS_BOILERPLATE();
781 
782     assert(numHeaps <= hs->numHeaps);
783     for (size_t i = 0; i < numHeaps; ++i) {
784         base[i] = (uintptr_t)hs->heaps[i].base;
785         max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
786     }
787 }
788 
789 /*
790  * Get the bitmap representing all live objects.
791  */
dvmHeapSourceGetLiveBits()792 HeapBitmap *dvmHeapSourceGetLiveBits()
793 {
794     HS_BOILERPLATE();
795 
796     return &gHs->liveBits;
797 }
798 
799 /*
800  * Get the bitmap representing all marked objects.
801  */
dvmHeapSourceGetMarkBits()802 HeapBitmap *dvmHeapSourceGetMarkBits()
803 {
804     HS_BOILERPLATE();
805 
806     return &gHs->markBits;
807 }
808 
dvmHeapSourceSwapBitmaps()809 void dvmHeapSourceSwapBitmaps()
810 {
811     HeapBitmap tmp = gHs->liveBits;
812     gHs->liveBits = gHs->markBits;
813     gHs->markBits = tmp;
814 }
815 
dvmHeapSourceZeroMarkBitmap()816 void dvmHeapSourceZeroMarkBitmap()
817 {
818     HS_BOILERPLATE();
819 
820     dvmHeapBitmapZero(&gHs->markBits);
821 }
822 
dvmMarkImmuneObjects(const char * immuneLimit)823 void dvmMarkImmuneObjects(const char *immuneLimit)
824 {
825     /*
826      * Copy the contents of the live bit vector for immune object
827      * range into the mark bit vector.
828      */
829     /* The only values generated by dvmHeapSourceGetImmuneLimit() */
830     assert(immuneLimit == gHs->heaps[0].base ||
831            immuneLimit == NULL);
832     assert(gHs->liveBits.base == gHs->markBits.base);
833     assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
834     /* heap[0] is never immune */
835     assert(gHs->heaps[0].base >= immuneLimit);
836     assert(gHs->heaps[0].limit > immuneLimit);
837 
838     for (size_t i = 1; i < gHs->numHeaps; ++i) {
839         if (gHs->heaps[i].base < immuneLimit) {
840             assert(gHs->heaps[i].limit <= immuneLimit);
841             /* Compute the number of words to copy in the bitmap. */
842             size_t index = HB_OFFSET_TO_INDEX(
843                 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
844             /* Compute the starting offset in the live and mark bits. */
845             char *src = (char *)(gHs->liveBits.bits + index);
846             char *dst = (char *)(gHs->markBits.bits + index);
847             /* Compute the number of bytes of the live bitmap to copy. */
848             size_t length = HB_OFFSET_TO_BYTE_INDEX(
849                 gHs->heaps[i].limit - gHs->heaps[i].base);
850             /* Do the copy. */
851             memcpy(dst, src, length);
852             /* Make sure max points to the address of the highest set bit. */
853             if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
854                 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
855             }
856         }
857     }
858 }
859 
860 /*
861  * Allocates <n> bytes of zeroed data.
862  */
dvmHeapSourceAlloc(size_t n)863 void* dvmHeapSourceAlloc(size_t n)
864 {
865     HS_BOILERPLATE();
866 
867     HeapSource *hs = gHs;
868     Heap* heap = hs2heap(hs);
869     if (heap->bytesAllocated + n > hs->softLimit) {
870         /*
871          * This allocation would push us over the soft limit; act as
872          * if the heap is full.
873          */
874         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
875                   FRACTIONAL_MB(hs->softLimit), n);
876         return NULL;
877     }
878     void* ptr = mspace_calloc(heap->msp, 1, n);
879     if (ptr == NULL) {
880         return NULL;
881     }
882     countAllocation(heap, ptr);
883     /*
884      * Check to see if a concurrent GC should be initiated.
885      */
886     if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
887         /*
888          * The garbage collector thread is already running or has yet
889          * to be started.  Do nothing.
890          */
891         return ptr;
892     }
893     if (heap->bytesAllocated > heap->concurrentStartBytes) {
894         /*
895          * We have exceeded the allocation threshold.  Wake up the
896          * garbage collector.
897          */
898         dvmSignalCond(&gHs->gcThreadCond);
899     }
900     return ptr;
901 }
902 
903 /* Remove any hard limits, try to allocate, and shrink back down.
904  * Last resort when trying to allocate an object.
905  */
heapAllocAndGrow(HeapSource * hs,Heap * heap,size_t n)906 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
907 {
908     /* Grow as much as possible, but don't let the real footprint
909      * go over the absolute max.
910      */
911     size_t max = heap->maximumSize;
912 
913     mspace_set_footprint_limit(heap->msp, max);
914     void* ptr = dvmHeapSourceAlloc(n);
915 
916     /* Shrink back down as small as possible.  Our caller may
917      * readjust max_allowed to a more appropriate value.
918      */
919     mspace_set_footprint_limit(heap->msp,
920                                mspace_footprint(heap->msp));
921     return ptr;
922 }
923 
924 /*
925  * Allocates <n> bytes of zeroed data, growing as much as possible
926  * if necessary.
927  */
dvmHeapSourceAllocAndGrow(size_t n)928 void* dvmHeapSourceAllocAndGrow(size_t n)
929 {
930     HS_BOILERPLATE();
931 
932     HeapSource *hs = gHs;
933     Heap* heap = hs2heap(hs);
934     void* ptr = dvmHeapSourceAlloc(n);
935     if (ptr != NULL) {
936         return ptr;
937     }
938 
939     size_t oldIdealSize = hs->idealSize;
940     if (isSoftLimited(hs)) {
941         /* We're soft-limited.  Try removing the soft limit to
942          * see if we can allocate without actually growing.
943          */
944         hs->softLimit = SIZE_MAX;
945         ptr = dvmHeapSourceAlloc(n);
946         if (ptr != NULL) {
947             /* Removing the soft limit worked;  fix things up to
948              * reflect the new effective ideal size.
949              */
950             snapIdealFootprint();
951             return ptr;
952         }
953         // softLimit intentionally left at SIZE_MAX.
954     }
955 
956     /* We're not soft-limited.  Grow the heap to satisfy the request.
957      * If this call fails, no footprints will have changed.
958      */
959     ptr = heapAllocAndGrow(hs, heap, n);
960     if (ptr != NULL) {
961         /* The allocation succeeded.  Fix up the ideal size to
962          * reflect any footprint modifications that had to happen.
963          */
964         snapIdealFootprint();
965     } else {
966         /* We just couldn't do it.  Restore the original ideal size,
967          * fixing up softLimit if necessary.
968          */
969         setIdealFootprint(oldIdealSize);
970     }
971     return ptr;
972 }
973 
974 /*
975  * Frees the first numPtrs objects in the ptrs list and returns the
976  * amount of reclaimed storage. The list must contain addresses all in
977  * the same mspace, and must be in increasing order. This implies that
978  * there are no duplicates, and no entries are NULL.
979  */
dvmHeapSourceFreeList(size_t numPtrs,void ** ptrs)980 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
981 {
982     HS_BOILERPLATE();
983 
984     if (numPtrs == 0) {
985         return 0;
986     }
987 
988     assert(ptrs != NULL);
989     assert(*ptrs != NULL);
990     Heap* heap = ptr2heap(gHs, *ptrs);
991     size_t numBytes = 0;
992     if (heap != NULL) {
993         mspace msp = heap->msp;
994         // Calling mspace_free on shared heaps disrupts sharing too
995         // much. For heap[0] -- the 'active heap' -- we call
996         // mspace_free, but on the other heaps we only do some
997         // accounting.
998         if (heap == gHs->heaps) {
999             // Count freed objects.
1000             for (size_t i = 0; i < numPtrs; i++) {
1001                 assert(ptrs[i] != NULL);
1002                 assert(ptr2heap(gHs, ptrs[i]) == heap);
1003                 countFree(heap, ptrs[i], &numBytes);
1004             }
1005             // Bulk free ptrs.
1006             mspace_bulk_free(msp, ptrs, numPtrs);
1007         } else {
1008             // This is not an 'active heap'. Only do the accounting.
1009             for (size_t i = 0; i < numPtrs; i++) {
1010                 assert(ptrs[i] != NULL);
1011                 assert(ptr2heap(gHs, ptrs[i]) == heap);
1012                 countFree(heap, ptrs[i], &numBytes);
1013             }
1014         }
1015     }
1016     return numBytes;
1017 }
1018 
1019 /*
1020  * Returns true iff <ptr> is in the heap source.
1021  */
dvmHeapSourceContainsAddress(const void * ptr)1022 bool dvmHeapSourceContainsAddress(const void *ptr)
1023 {
1024     HS_BOILERPLATE();
1025 
1026     return (dvmHeapSourceGetBase() <= ptr) && (ptr <= dvmHeapSourceGetLimit());
1027 }
1028 
1029 /*
1030  * Returns true iff <ptr> was allocated from the heap source.
1031  */
dvmHeapSourceContains(const void * ptr)1032 bool dvmHeapSourceContains(const void *ptr)
1033 {
1034     HS_BOILERPLATE();
1035 
1036     if (dvmHeapSourceContainsAddress(ptr)) {
1037         return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1038     }
1039     return false;
1040 }
1041 
dvmIsZygoteObject(const Object * obj)1042 bool dvmIsZygoteObject(const Object* obj)
1043 {
1044     HeapSource *hs = gHs;
1045 
1046     HS_BOILERPLATE();
1047 
1048     if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1049         Heap *heap = ptr2heap(hs, obj);
1050         if (heap != NULL) {
1051             /* If the object is not in the active heap, we assume that
1052              * it was allocated as part of zygote.
1053              */
1054             return heap != hs->heaps;
1055         }
1056     }
1057     /* The pointer is outside of any known heap, or we are not
1058      * running in zygote mode.
1059      */
1060     return false;
1061 }
1062 
1063 /*
1064  * Returns the number of usable bytes in an allocated chunk; the size
1065  * may be larger than the size passed to dvmHeapSourceAlloc().
1066  */
dvmHeapSourceChunkSize(const void * ptr)1067 size_t dvmHeapSourceChunkSize(const void *ptr)
1068 {
1069     HS_BOILERPLATE();
1070 
1071     Heap* heap = ptr2heap(gHs, ptr);
1072     if (heap != NULL) {
1073         return mspace_usable_size(ptr);
1074     }
1075     return 0;
1076 }
1077 
1078 /*
1079  * Returns the number of bytes that the heap source has allocated
1080  * from the system using sbrk/mmap, etc.
1081  *
1082  * Caller must hold the heap lock.
1083  */
dvmHeapSourceFootprint()1084 size_t dvmHeapSourceFootprint()
1085 {
1086     HS_BOILERPLATE();
1087 
1088 //TODO: include size of bitmaps?
1089     return oldHeapOverhead(gHs, true);
1090 }
1091 
getMaximumSize(const HeapSource * hs)1092 static size_t getMaximumSize(const HeapSource *hs)
1093 {
1094     return hs->growthLimit;
1095 }
1096 
1097 /*
1098  * Returns the current maximum size of the heap source respecting any
1099  * growth limits.
1100  */
dvmHeapSourceGetMaximumSize()1101 size_t dvmHeapSourceGetMaximumSize()
1102 {
1103     HS_BOILERPLATE();
1104     return getMaximumSize(gHs);
1105 }
1106 
1107 /*
1108  * Removes any growth limits.  Allows the user to allocate up to the
1109  * maximum heap size.
1110  */
dvmClearGrowthLimit()1111 void dvmClearGrowthLimit()
1112 {
1113     HS_BOILERPLATE();
1114     dvmLockHeap();
1115     dvmWaitForConcurrentGcToComplete();
1116     gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
1117     gHs->growthLimit = gHs->maximumSize;
1118     size_t overhead = oldHeapOverhead(gHs, false);
1119     gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1120     gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1121     dvmUnlockHeap();
1122 }
1123 
1124 /*
1125  * Return the real bytes used by old heaps plus the soft usage of the
1126  * current heap.  When a soft limit is in effect, this is effectively
1127  * what it's compared against (though, in practice, it only looks at
1128  * the current heap).
1129  */
getSoftFootprint(bool includeActive)1130 static size_t getSoftFootprint(bool includeActive)
1131 {
1132     HS_BOILERPLATE();
1133 
1134     HeapSource *hs = gHs;
1135     size_t ret = oldHeapOverhead(hs, false);
1136     if (includeActive) {
1137         ret += hs->heaps[0].bytesAllocated;
1138     }
1139 
1140     return ret;
1141 }
1142 
1143 /*
1144  * Gets the maximum number of bytes that the heap source is allowed
1145  * to allocate from the system.
1146  */
dvmHeapSourceGetIdealFootprint()1147 size_t dvmHeapSourceGetIdealFootprint()
1148 {
1149     HeapSource *hs = gHs;
1150 
1151     HS_BOILERPLATE();
1152 
1153     return hs->idealSize;
1154 }
1155 
1156 /*
1157  * Sets the soft limit, handling any necessary changes to the allowed
1158  * footprint of the active heap.
1159  */
setSoftLimit(HeapSource * hs,size_t softLimit)1160 static void setSoftLimit(HeapSource *hs, size_t softLimit)
1161 {
1162     /* Compare against the actual footprint, rather than the
1163      * max_allowed, because the heap may not have grown all the
1164      * way to the allowed size yet.
1165      */
1166     mspace msp = hs->heaps[0].msp;
1167     size_t currentHeapSize = mspace_footprint(msp);
1168     if (softLimit < currentHeapSize) {
1169         /* Don't let the heap grow any more, and impose a soft limit.
1170          */
1171         mspace_set_footprint_limit(msp, currentHeapSize);
1172         hs->softLimit = softLimit;
1173     } else {
1174         /* Let the heap grow to the requested max, and remove any
1175          * soft limit, if set.
1176          */
1177         mspace_set_footprint_limit(msp, softLimit);
1178         hs->softLimit = SIZE_MAX;
1179     }
1180 }
1181 
1182 /*
1183  * Sets the maximum number of bytes that the heap source is allowed
1184  * to allocate from the system.  Clamps to the appropriate maximum
1185  * value.
1186  */
setIdealFootprint(size_t max)1187 static void setIdealFootprint(size_t max)
1188 {
1189     HS_BOILERPLATE();
1190 
1191     HeapSource *hs = gHs;
1192     size_t maximumSize = getMaximumSize(hs);
1193     if (max > maximumSize) {
1194         LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1195                 FRACTIONAL_MB(max),
1196                 FRACTIONAL_MB(maximumSize));
1197         max = maximumSize;
1198     }
1199 
1200     /* Convert max into a size that applies to the active heap.
1201      * Old heaps will count against the ideal size.
1202      */
1203     size_t overhead = getSoftFootprint(false);
1204     size_t activeMax;
1205     if (overhead < max) {
1206         activeMax = max - overhead;
1207     } else {
1208         activeMax = 0;
1209     }
1210 
1211     setSoftLimit(hs, activeMax);
1212     hs->idealSize = max;
1213 }
1214 
1215 /*
1216  * Make the ideal footprint equal to the current footprint.
1217  */
snapIdealFootprint()1218 static void snapIdealFootprint()
1219 {
1220     HS_BOILERPLATE();
1221 
1222     setIdealFootprint(getSoftFootprint(true));
1223 }
1224 
1225 /*
1226  * Gets the current ideal heap utilization, represented as a number
1227  * between zero and one.
1228  */
dvmGetTargetHeapUtilization()1229 float dvmGetTargetHeapUtilization()
1230 {
1231     HeapSource *hs = gHs;
1232 
1233     HS_BOILERPLATE();
1234 
1235     return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1236 }
1237 
1238 /*
1239  * Sets the new ideal heap utilization, represented as a number
1240  * between zero and one.
1241  */
dvmSetTargetHeapUtilization(float newTarget)1242 void dvmSetTargetHeapUtilization(float newTarget)
1243 {
1244     HeapSource *hs = gHs;
1245 
1246     HS_BOILERPLATE();
1247 
1248     /* Clamp it to a reasonable range.
1249      */
1250     // TODO: This may need some tuning.
1251     if (newTarget < 0.2) {
1252         newTarget = 0.2;
1253     } else if (newTarget > 0.8) {
1254         newTarget = 0.8;
1255     }
1256 
1257     hs->targetUtilization =
1258             (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1259     ALOGV("Set heap target utilization to %zd/%d (%f)",
1260             hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1261 }
1262 
1263 /*
1264  * Given the size of a live set, returns the ideal heap size given
1265  * the current target utilization and MIN/MAX values.
1266  */
getUtilizationTarget(const HeapSource * hs,size_t liveSize)1267 static size_t getUtilizationTarget(const HeapSource* hs, size_t liveSize)
1268 {
1269     /* Use the current target utilization ratio to determine the
1270      * ideal heap size based on the size of the live set.
1271      */
1272     size_t targetSize = (liveSize / hs->targetUtilization) * HEAP_UTILIZATION_MAX;
1273 
1274     /* Cap the amount of free space, though, so we don't end up
1275      * with, e.g., 8MB of free space when the live set size hits 8MB.
1276      */
1277     if (targetSize > liveSize + hs->maxFree) {
1278         targetSize = liveSize + hs->maxFree;
1279     } else if (targetSize < liveSize + hs->minFree) {
1280         targetSize = liveSize + hs->minFree;
1281     }
1282     return targetSize;
1283 }
1284 
1285 /*
1286  * Given the current contents of the active heap, increase the allowed
1287  * heap footprint to match the target utilization ratio.  This
1288  * should only be called immediately after a full mark/sweep.
1289  */
dvmHeapSourceGrowForUtilization()1290 void dvmHeapSourceGrowForUtilization()
1291 {
1292     HS_BOILERPLATE();
1293 
1294     HeapSource *hs = gHs;
1295     Heap* heap = hs2heap(hs);
1296 
1297     /* Use the current target utilization ratio to determine the
1298      * ideal heap size based on the size of the live set.
1299      * Note that only the active heap plays any part in this.
1300      *
1301      * Avoid letting the old heaps influence the target free size,
1302      * because they may be full of objects that aren't actually
1303      * in the working set.  Just look at the allocated size of
1304      * the current heap.
1305      */
1306     size_t currentHeapUsed = heap->bytesAllocated;
1307     size_t targetHeapSize = getUtilizationTarget(hs, currentHeapUsed);
1308 
1309     /* The ideal size includes the old heaps; add overhead so that
1310      * it can be immediately subtracted again in setIdealFootprint().
1311      * If the target heap size would exceed the max, setIdealFootprint()
1312      * will clamp it to a legal value.
1313      */
1314     size_t overhead = getSoftFootprint(false);
1315     setIdealFootprint(targetHeapSize + overhead);
1316 
1317     size_t freeBytes = getAllocLimit(hs);
1318     if (freeBytes < CONCURRENT_MIN_FREE) {
1319         /* Not enough free memory to allow a concurrent GC. */
1320         heap->concurrentStartBytes = SIZE_MAX;
1321     } else {
1322         heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1323     }
1324 }
1325 
1326 /*
1327  * Return free pages to the system.
1328  * TODO: move this somewhere else, especially the native heap part.
1329  */
releasePagesInRange(void * start,void * end,size_t used_bytes,void * releasedBytes)1330 static void releasePagesInRange(void* start, void* end, size_t used_bytes,
1331                                 void* releasedBytes)
1332 {
1333     if (used_bytes == 0) {
1334         /*
1335          * We have a range of memory we can try to madvise()
1336          * back. Linux requires that the madvise() start address is
1337          * page-aligned.  We also align the end address.
1338          */
1339         start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1340         end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1341         if (end > start) {
1342             size_t length = (char *)end - (char *)start;
1343             madvise(start, length, MADV_DONTNEED);
1344             *(size_t *)releasedBytes += length;
1345         }
1346     }
1347 }
1348 
1349 /*
1350  * Return unused memory to the system if possible.
1351  */
trimHeaps()1352 static void trimHeaps()
1353 {
1354     HS_BOILERPLATE();
1355 
1356     HeapSource *hs = gHs;
1357     size_t heapBytes = 0;
1358     for (size_t i = 0; i < hs->numHeaps; i++) {
1359         Heap *heap = &hs->heaps[i];
1360 
1361         /* Return the wilderness chunk to the system. */
1362         mspace_trim(heap->msp, 0);
1363 
1364         /* Return any whole free pages to the system. */
1365         mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
1366     }
1367 
1368     /* Same for the native heap. */
1369     dlmalloc_trim(0);
1370     size_t nativeBytes = 0;
1371     dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
1372 
1373     LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1374             heapBytes, nativeBytes, heapBytes + nativeBytes);
1375 }
1376 
1377 /*
1378  * Walks over the heap source and passes every allocated and
1379  * free chunk to the callback.
1380  */
dvmHeapSourceWalk(void (* callback)(void * start,void * end,size_t used_bytes,void * arg),void * arg)1381 void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
1382                                        size_t used_bytes, void* arg),
1383                        void *arg)
1384 {
1385     HS_BOILERPLATE();
1386 
1387     /* Walk the heaps from oldest to newest.
1388      */
1389 //TODO: do this in address order
1390     HeapSource *hs = gHs;
1391     for (size_t i = hs->numHeaps; i > 0; --i) {
1392         mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
1393         callback(NULL, NULL, 0, arg);  // Indicate end of a heap.
1394     }
1395 }
1396 
1397 /*
1398  * Gets the number of heaps available in the heap source.
1399  *
1400  * Caller must hold the heap lock, because gHs caches a field
1401  * in gDvm.gcHeap.
1402  */
dvmHeapSourceGetNumHeaps()1403 size_t dvmHeapSourceGetNumHeaps()
1404 {
1405     HS_BOILERPLATE();
1406 
1407     return gHs->numHeaps;
1408 }
1409 
dvmHeapSourceGetImmuneLimit(bool isPartial)1410 void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1411 {
1412     if (isPartial) {
1413         return hs2heap(gHs)->base;
1414     } else {
1415         return NULL;
1416     }
1417 }
1418