1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <errno.h>
20
21 #define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
22
23 #include "Dalvik.h"
24 #include "alloc/DlMalloc.h"
25 #include "alloc/Heap.h"
26 #include "alloc/HeapInternal.h"
27 #include "alloc/HeapSource.h"
28 #include "alloc/HeapBitmap.h"
29 #include "alloc/HeapBitmapInlines.h"
30
31 static void snapIdealFootprint();
32 static void setIdealFootprint(size_t max);
33 static size_t getMaximumSize(const HeapSource *hs);
34 static void trimHeaps();
35
36 #define HEAP_UTILIZATION_MAX 1024
37
38 /* How long to wait after a GC before performing a heap trim
39 * operation to reclaim unused pages.
40 */
41 #define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
42
43 /* Start a concurrent collection when free memory falls under this
44 * many bytes.
45 */
46 #define CONCURRENT_START (128 << 10)
47
48 /* The next GC will not be concurrent when free memory after a GC is
49 * under this many bytes.
50 */
51 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
52
53 #define HS_BOILERPLATE() \
54 do { \
55 assert(gDvm.gcHeap != NULL); \
56 assert(gDvm.gcHeap->heapSource != NULL); \
57 assert(gHs == gDvm.gcHeap->heapSource); \
58 } while (0)
59
60 struct Heap {
61 /* The mspace to allocate from.
62 */
63 mspace msp;
64
65 /* The largest size that this heap is allowed to grow to.
66 */
67 size_t maximumSize;
68
69 /* Number of bytes allocated from this mspace for objects,
70 * including any overhead. This value is NOT exact, and
71 * should only be used as an input for certain heuristics.
72 */
73 size_t bytesAllocated;
74
75 /* Number of bytes allocated from this mspace at which a
76 * concurrent garbage collection will be started.
77 */
78 size_t concurrentStartBytes;
79
80 /* Number of objects currently allocated from this mspace.
81 */
82 size_t objectsAllocated;
83
84 /*
85 * The lowest address of this heap, inclusive.
86 */
87 char *base;
88
89 /*
90 * The highest address of this heap, exclusive.
91 */
92 char *limit;
93
94 /*
95 * If the heap has an mspace, the current high water mark in
96 * allocations requested via dvmHeapSourceMorecore.
97 */
98 char *brk;
99 };
100
101 struct HeapSource {
102 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
103 */
104 size_t targetUtilization;
105
106 /* The starting heap size.
107 */
108 size_t startSize;
109
110 /* The largest that the heap source as a whole is allowed to grow.
111 */
112 size_t maximumSize;
113
114 /*
115 * The largest size we permit the heap to grow. This value allows
116 * the user to limit the heap growth below the maximum size. This
117 * is a work around until we can dynamically set the maximum size.
118 * This value can range between the starting size and the maximum
119 * size but should never be set below the current footprint of the
120 * heap.
121 */
122 size_t growthLimit;
123
124 /* The desired max size of the heap source as a whole.
125 */
126 size_t idealSize;
127
128 /* The maximum number of bytes allowed to be allocated from the
129 * active heap before a GC is forced. This is used to "shrink" the
130 * heap in lieu of actual compaction.
131 */
132 size_t softLimit;
133
134 /* Minimum number of free bytes. Used with the target utilization when
135 * setting the softLimit. Never allows less bytes than this to be free
136 * when the heap size is below the maximum size or growth limit.
137 */
138 size_t minFree;
139
140 /* Maximum number of free bytes. Used with the target utilization when
141 * setting the softLimit. Never allows more bytes than this to be free
142 * when the heap size is below the maximum size or growth limit.
143 */
144 size_t maxFree;
145
146 /* The heaps; heaps[0] is always the active heap,
147 * which new objects should be allocated from.
148 */
149 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
150
151 /* The current number of heaps.
152 */
153 size_t numHeaps;
154
155 /* True if zygote mode was active when the HeapSource was created.
156 */
157 bool sawZygote;
158
159 /*
160 * The base address of the virtual memory reservation.
161 */
162 char *heapBase;
163
164 /*
165 * The length in bytes of the virtual memory reservation.
166 */
167 size_t heapLength;
168
169 /*
170 * The live object bitmap.
171 */
172 HeapBitmap liveBits;
173
174 /*
175 * The mark bitmap.
176 */
177 HeapBitmap markBits;
178
179 /*
180 * State for the GC daemon.
181 */
182 bool hasGcThread;
183 pthread_t gcThread;
184 bool gcThreadShutdown;
185 pthread_mutex_t gcThreadMutex;
186 pthread_cond_t gcThreadCond;
187 bool gcThreadTrimNeeded;
188 };
189
190 #define hs2heap(hs_) (&((hs_)->heaps[0]))
191
192 /*
193 * Returns true iff a soft limit is in effect for the active heap.
194 */
isSoftLimited(const HeapSource * hs)195 static bool isSoftLimited(const HeapSource *hs)
196 {
197 /* softLimit will be either SIZE_MAX or the limit for the
198 * active mspace. idealSize can be greater than softLimit
199 * if there is more than one heap. If there is only one
200 * heap, a non-SIZE_MAX softLimit should always be the same
201 * as idealSize.
202 */
203 return hs->softLimit <= hs->idealSize;
204 }
205
206 /*
207 * Returns approximately the maximum number of bytes allowed to be
208 * allocated from the active heap before a GC is forced.
209 */
getAllocLimit(const HeapSource * hs)210 static size_t getAllocLimit(const HeapSource *hs)
211 {
212 if (isSoftLimited(hs)) {
213 return hs->softLimit;
214 } else {
215 return mspace_footprint_limit(hs2heap(hs)->msp);
216 }
217 }
218
219 /*
220 * Returns the current footprint of all heaps. If includeActive
221 * is false, don't count the heap at index 0.
222 */
oldHeapOverhead(const HeapSource * hs,bool includeActive)223 static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
224 {
225 size_t footprint = 0;
226 size_t i;
227
228 if (includeActive) {
229 i = 0;
230 } else {
231 i = 1;
232 }
233 for (/* i = i */; i < hs->numHeaps; i++) {
234 //TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max
235 footprint += mspace_footprint(hs->heaps[i].msp);
236 }
237 return footprint;
238 }
239
240 /*
241 * Returns the heap that <ptr> could have come from, or NULL
242 * if it could not have come from any heap.
243 */
ptr2heap(const HeapSource * hs,const void * ptr)244 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
245 {
246 const size_t numHeaps = hs->numHeaps;
247
248 if (ptr != NULL) {
249 for (size_t i = 0; i < numHeaps; i++) {
250 const Heap *const heap = &hs->heaps[i];
251
252 if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
253 return (Heap *)heap;
254 }
255 }
256 }
257 return NULL;
258 }
259
260 /*
261 * Functions to update heapSource->bytesAllocated when an object
262 * is allocated or freed. mspace_usable_size() will give
263 * us a much more accurate picture of heap utilization than
264 * the requested byte sizes would.
265 *
266 * These aren't exact, and should not be treated as such.
267 */
countAllocation(Heap * heap,const void * ptr)268 static void countAllocation(Heap *heap, const void *ptr)
269 {
270 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
271
272 heap->bytesAllocated += mspace_usable_size(ptr) +
273 HEAP_SOURCE_CHUNK_OVERHEAD;
274 heap->objectsAllocated++;
275 HeapSource* hs = gDvm.gcHeap->heapSource;
276 dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
277
278 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
279 }
280
countFree(Heap * heap,const void * ptr,size_t * numBytes)281 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
282 {
283 size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
284 assert(delta > 0);
285 if (delta < heap->bytesAllocated) {
286 heap->bytesAllocated -= delta;
287 } else {
288 heap->bytesAllocated = 0;
289 }
290 HeapSource* hs = gDvm.gcHeap->heapSource;
291 dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
292 if (heap->objectsAllocated > 0) {
293 heap->objectsAllocated--;
294 }
295 *numBytes += delta;
296 }
297
298 static HeapSource *gHs = NULL;
299
createMspace(void * begin,size_t morecoreStart,size_t startingSize)300 static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
301 {
302 // Clear errno to allow strerror on error.
303 errno = 0;
304 // Allow access to inital pages that will hold mspace.
305 mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
306 // Create mspace using our backing storage starting at begin and with a footprint of
307 // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
308 // exhausted morecore will be called.
309 mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
310 if (msp != NULL) {
311 // Do not allow morecore requests to succeed beyond the starting size of the heap.
312 mspace_set_footprint_limit(msp, startingSize);
313 } else {
314 ALOGE("create_mspace_with_base failed %s", strerror(errno));
315 }
316 return msp;
317 }
318
319 /*
320 * Service request from DlMalloc to increase heap size.
321 */
dvmHeapSourceMorecore(void * mspace,intptr_t increment)322 void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
323 {
324 Heap* heap = NULL;
325 for (size_t i = 0; i < gHs->numHeaps; i++) {
326 if (gHs->heaps[i].msp == mspace) {
327 heap = &gHs->heaps[i];
328 break;
329 }
330 }
331 if (heap == NULL) {
332 ALOGE("Failed to find heap for mspace %p", mspace);
333 dvmAbort();
334 }
335 char* original_brk = heap->brk;
336 if (increment != 0) {
337 char* new_brk = original_brk + increment;
338 if (increment > 0) {
339 // Should never be asked to increase the allocation beyond the capacity of the space.
340 // Enforced by mspace_set_footprint_limit.
341 assert(new_brk <= heap->limit);
342 mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
343 } else {
344 // Should never be asked for negative footprint (ie before base).
345 assert(original_brk + increment > heap->base);
346 // Advise we don't need the pages and protect them.
347 size_t size = -increment;
348 madvise(new_brk, size, MADV_DONTNEED);
349 mprotect(new_brk, size, PROT_NONE);
350 }
351 // Update brk.
352 heap->brk = new_brk;
353 }
354 return original_brk;
355 }
356
357 const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
358 /*
359 * Add the initial heap. Returns false if the initial heap was
360 * already added to the heap source.
361 */
addInitialHeap(HeapSource * hs,mspace msp,size_t maximumSize)362 static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
363 {
364 assert(hs != NULL);
365 assert(msp != NULL);
366 if (hs->numHeaps != 0) {
367 return false;
368 }
369 hs->heaps[0].msp = msp;
370 hs->heaps[0].maximumSize = maximumSize;
371 hs->heaps[0].concurrentStartBytes = SIZE_MAX;
372 hs->heaps[0].base = hs->heapBase;
373 hs->heaps[0].limit = hs->heapBase + maximumSize;
374 hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
375 hs->numHeaps = 1;
376 return true;
377 }
378
379 /*
380 * Adds an additional heap to the heap source. Returns false if there
381 * are too many heaps or insufficient free space to add another heap.
382 */
addNewHeap(HeapSource * hs)383 static bool addNewHeap(HeapSource *hs)
384 {
385 Heap heap;
386
387 assert(hs != NULL);
388 if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
389 ALOGE("Attempt to create too many heaps (%zd >= %zd)",
390 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
391 dvmAbort();
392 return false;
393 }
394
395 memset(&heap, 0, sizeof(heap));
396
397 /*
398 * Heap storage comes from a common virtual memory reservation.
399 * The new heap will start on the page after the old heap.
400 */
401 char *base = hs->heaps[0].brk;
402 size_t overhead = base - hs->heaps[0].base;
403 assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
404
405 if (overhead + hs->minFree >= hs->maximumSize) {
406 LOGE_HEAP("No room to create any more heaps "
407 "(%zd overhead, %zd max)",
408 overhead, hs->maximumSize);
409 return false;
410 }
411 size_t morecoreStart = SYSTEM_PAGE_SIZE;
412 heap.maximumSize = hs->growthLimit - overhead;
413 heap.concurrentStartBytes = hs->minFree - CONCURRENT_START;
414 heap.base = base;
415 heap.limit = heap.base + heap.maximumSize;
416 heap.brk = heap.base + morecoreStart;
417 heap.msp = createMspace(base, morecoreStart, hs->minFree);
418 if (heap.msp == NULL) {
419 return false;
420 }
421
422 /* Don't let the soon-to-be-old heap grow any further.
423 */
424 hs->heaps[0].maximumSize = overhead;
425 hs->heaps[0].limit = base;
426 mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
427
428 /* Put the new heap in the list, at heaps[0].
429 * Shift existing heaps down.
430 */
431 memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
432 hs->heaps[0] = heap;
433 hs->numHeaps++;
434
435 return true;
436 }
437
438 /*
439 * The garbage collection daemon. Initiates a concurrent collection
440 * when signaled. Also periodically trims the heaps when a few seconds
441 * have elapsed since the last concurrent GC.
442 */
gcDaemonThread(void * arg)443 static void *gcDaemonThread(void* arg)
444 {
445 dvmChangeStatus(NULL, THREAD_VMWAIT);
446 dvmLockMutex(&gHs->gcThreadMutex);
447 while (gHs->gcThreadShutdown != true) {
448 bool trim = false;
449 if (gHs->gcThreadTrimNeeded) {
450 int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
451 HEAP_TRIM_IDLE_TIME_MS, 0);
452 if (result == ETIMEDOUT) {
453 /* Timed out waiting for a GC request, schedule a heap trim. */
454 trim = true;
455 }
456 } else {
457 dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
458 }
459
460 // Many JDWP requests cause allocation. We can't take the heap lock and wait to
461 // transition to runnable so we can start a GC if a debugger is connected, because
462 // we don't know that the JDWP thread isn't about to allocate and require the
463 // heap lock itself, leading to deadlock. http://b/8191824.
464 if (gDvm.debuggerConnected) {
465 continue;
466 }
467
468 dvmLockHeap();
469 /*
470 * Another thread may have started a concurrent garbage
471 * collection before we were scheduled. Check for this
472 * condition before proceeding.
473 */
474 if (!gDvm.gcHeap->gcRunning) {
475 dvmChangeStatus(NULL, THREAD_RUNNING);
476 if (trim) {
477 trimHeaps();
478 gHs->gcThreadTrimNeeded = false;
479 } else {
480 dvmCollectGarbageInternal(GC_CONCURRENT);
481 gHs->gcThreadTrimNeeded = true;
482 }
483 dvmChangeStatus(NULL, THREAD_VMWAIT);
484 }
485 dvmUnlockHeap();
486 }
487 dvmChangeStatus(NULL, THREAD_RUNNING);
488 return NULL;
489 }
490
gcDaemonStartup()491 static bool gcDaemonStartup()
492 {
493 dvmInitMutex(&gHs->gcThreadMutex);
494 pthread_cond_init(&gHs->gcThreadCond, NULL);
495 gHs->gcThreadShutdown = false;
496 gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
497 gcDaemonThread, NULL);
498 return gHs->hasGcThread;
499 }
500
gcDaemonShutdown()501 static void gcDaemonShutdown()
502 {
503 if (gHs->hasGcThread) {
504 dvmLockMutex(&gHs->gcThreadMutex);
505 gHs->gcThreadShutdown = true;
506 dvmSignalCond(&gHs->gcThreadCond);
507 dvmUnlockMutex(&gHs->gcThreadMutex);
508 pthread_join(gHs->gcThread, NULL);
509 }
510 }
511
512 /*
513 * Create a stack big enough for the worst possible case, where the
514 * heap is perfectly full of the smallest object.
515 * TODO: be better about memory usage; use a smaller stack with
516 * overflow detection and recovery.
517 */
allocMarkStack(GcMarkStack * stack,size_t maximumSize)518 static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
519 {
520 const char *name = "dalvik-mark-stack";
521 void *addr;
522
523 assert(stack != NULL);
524 stack->length = maximumSize * sizeof(Object*) /
525 (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
526 addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
527 if (addr == NULL) {
528 return false;
529 }
530 stack->base = (const Object **)addr;
531 stack->limit = (const Object **)((char *)addr + stack->length);
532 stack->top = NULL;
533 madvise(stack->base, stack->length, MADV_DONTNEED);
534 return true;
535 }
536
freeMarkStack(GcMarkStack * stack)537 static void freeMarkStack(GcMarkStack *stack)
538 {
539 assert(stack != NULL);
540 munmap(stack->base, stack->length);
541 memset(stack, 0, sizeof(*stack));
542 }
543
544 /*
545 * Initializes the heap source; must be called before any other
546 * dvmHeapSource*() functions. Returns a GcHeap structure
547 * allocated from the heap source.
548 */
dvmHeapSourceStartup(size_t startSize,size_t maximumSize,size_t growthLimit)549 GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
550 size_t growthLimit)
551 {
552 GcHeap *gcHeap;
553 HeapSource *hs;
554 mspace msp;
555 size_t length;
556 void *base;
557
558 assert(gHs == NULL);
559
560 if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
561 ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
562 startSize, maximumSize, growthLimit);
563 return NULL;
564 }
565
566 /*
567 * Allocate a contiguous region of virtual memory to subdivided
568 * among the heaps managed by the garbage collector.
569 */
570 length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
571 base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
572 if (base == NULL) {
573 return NULL;
574 }
575
576 /* Create an unlocked dlmalloc mspace to use as
577 * a heap source.
578 */
579 msp = createMspace(base, kInitialMorecoreStart, startSize);
580 if (msp == NULL) {
581 goto fail;
582 }
583
584 gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
585 if (gcHeap == NULL) {
586 LOGE_HEAP("Can't allocate heap descriptor");
587 goto fail;
588 }
589
590 hs = (HeapSource *)calloc(1, sizeof(*hs));
591 if (hs == NULL) {
592 LOGE_HEAP("Can't allocate heap source");
593 free(gcHeap);
594 goto fail;
595 }
596
597 hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
598 hs->minFree = gDvm.heapMinFree;
599 hs->maxFree = gDvm.heapMaxFree;
600 hs->startSize = startSize;
601 hs->maximumSize = maximumSize;
602 hs->growthLimit = growthLimit;
603 hs->idealSize = startSize;
604 hs->softLimit = SIZE_MAX; // no soft limit at first
605 hs->numHeaps = 0;
606 hs->sawZygote = gDvm.zygote;
607 hs->hasGcThread = false;
608 hs->heapBase = (char *)base;
609 hs->heapLength = length;
610
611 if (hs->maxFree > hs->maximumSize) {
612 hs->maxFree = hs->maximumSize;
613 }
614 if (hs->minFree < CONCURRENT_START) {
615 hs->minFree = CONCURRENT_START;
616 } else if (hs->minFree > hs->maxFree) {
617 hs->minFree = hs->maxFree;
618 }
619
620 if (!addInitialHeap(hs, msp, growthLimit)) {
621 LOGE_HEAP("Can't add initial heap");
622 goto fail;
623 }
624 if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
625 LOGE_HEAP("Can't create liveBits");
626 goto fail;
627 }
628 if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
629 LOGE_HEAP("Can't create markBits");
630 dvmHeapBitmapDelete(&hs->liveBits);
631 goto fail;
632 }
633 if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
634 ALOGE("Can't create markStack");
635 dvmHeapBitmapDelete(&hs->markBits);
636 dvmHeapBitmapDelete(&hs->liveBits);
637 goto fail;
638 }
639 gcHeap->markContext.bitmap = &hs->markBits;
640 gcHeap->heapSource = hs;
641
642 gHs = hs;
643 return gcHeap;
644
645 fail:
646 munmap(base, length);
647 return NULL;
648 }
649
dvmHeapSourceStartupAfterZygote()650 bool dvmHeapSourceStartupAfterZygote()
651 {
652 return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
653 }
654
655 /*
656 * This is called while in zygote mode, right before we fork() for the
657 * first time. We create a heap for all future zygote process allocations,
658 * in an attempt to avoid touching pages in the zygote heap. (This would
659 * probably be unnecessary if we had a compacting GC -- the source of our
660 * troubles is small allocations filling in the gaps from larger ones.)
661 */
dvmHeapSourceStartupBeforeFork()662 bool dvmHeapSourceStartupBeforeFork()
663 {
664 HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
665
666 HS_BOILERPLATE();
667
668 assert(gDvm.zygote);
669
670 if (!gDvm.newZygoteHeapAllocated) {
671 /* Ensure heaps are trimmed to minimize footprint pre-fork.
672 */
673 trimHeaps();
674 /* Create a new heap for post-fork zygote allocations. We only
675 * try once, even if it fails.
676 */
677 ALOGV("Splitting out new zygote heap");
678 gDvm.newZygoteHeapAllocated = true;
679 return addNewHeap(hs);
680 }
681 return true;
682 }
683
dvmHeapSourceThreadShutdown()684 void dvmHeapSourceThreadShutdown()
685 {
686 if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
687 gcDaemonShutdown();
688 }
689 }
690
691 /*
692 * Tears down the entire GcHeap structure and all of the substructures
693 * attached to it. This call has the side effect of setting the given
694 * gcHeap pointer and gHs to NULL.
695 */
dvmHeapSourceShutdown(GcHeap ** gcHeap)696 void dvmHeapSourceShutdown(GcHeap **gcHeap)
697 {
698 assert(gcHeap != NULL);
699 if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
700 HeapSource *hs = (*gcHeap)->heapSource;
701 dvmHeapBitmapDelete(&hs->liveBits);
702 dvmHeapBitmapDelete(&hs->markBits);
703 freeMarkStack(&(*gcHeap)->markContext.stack);
704 munmap(hs->heapBase, hs->heapLength);
705 free(hs);
706 gHs = NULL;
707 free(*gcHeap);
708 *gcHeap = NULL;
709 }
710 }
711
712 /*
713 * Gets the begining of the allocation for the HeapSource.
714 */
dvmHeapSourceGetBase()715 void *dvmHeapSourceGetBase()
716 {
717 return gHs->heapBase;
718 }
719
720 /*
721 * Returns a high water mark, between base and limit all objects must have been
722 * allocated.
723 */
dvmHeapSourceGetLimit()724 void *dvmHeapSourceGetLimit()
725 {
726 HeapSource *hs = gHs;
727 void *max_brk = hs->heaps[0].brk;
728
729 #ifndef NDEBUG
730 for (size_t i = 1; i < hs->numHeaps; i++) {
731 Heap *const heap = &hs->heaps[i];
732 void *heap_brk = heap->brk;
733 assert (max_brk > heap_brk);
734 }
735 #endif
736 return max_brk;
737 }
738
739 /*
740 * Returns the requested value. If the per-heap stats are requested, fill
741 * them as well.
742 *
743 * Caller must hold the heap lock.
744 */
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)745 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
746 size_t arrayLen)
747 {
748 HeapSource *hs = gHs;
749 size_t value = 0;
750 size_t total = 0;
751
752 HS_BOILERPLATE();
753
754 assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
755 for (size_t i = 0; i < hs->numHeaps; i++) {
756 Heap *const heap = &hs->heaps[i];
757
758 switch (spec) {
759 case HS_FOOTPRINT:
760 value = heap->brk - heap->base;
761 assert(value == mspace_footprint(heap->msp));
762 break;
763 case HS_ALLOWED_FOOTPRINT:
764 value = mspace_footprint_limit(heap->msp);
765 break;
766 case HS_BYTES_ALLOCATED:
767 value = heap->bytesAllocated;
768 break;
769 case HS_OBJECTS_ALLOCATED:
770 value = heap->objectsAllocated;
771 break;
772 default:
773 // quiet gcc
774 break;
775 }
776 if (perHeapStats) {
777 perHeapStats[i] = value;
778 }
779 total += value;
780 }
781 return total;
782 }
783
dvmHeapSourceGetRegions(uintptr_t * base,uintptr_t * max,size_t numHeaps)784 void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
785 {
786 HeapSource *hs = gHs;
787
788 HS_BOILERPLATE();
789
790 assert(numHeaps <= hs->numHeaps);
791 for (size_t i = 0; i < numHeaps; ++i) {
792 base[i] = (uintptr_t)hs->heaps[i].base;
793 max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
794 }
795 }
796
797 /*
798 * Get the bitmap representing all live objects.
799 */
dvmHeapSourceGetLiveBits()800 HeapBitmap *dvmHeapSourceGetLiveBits()
801 {
802 HS_BOILERPLATE();
803
804 return &gHs->liveBits;
805 }
806
807 /*
808 * Get the bitmap representing all marked objects.
809 */
dvmHeapSourceGetMarkBits()810 HeapBitmap *dvmHeapSourceGetMarkBits()
811 {
812 HS_BOILERPLATE();
813
814 return &gHs->markBits;
815 }
816
dvmHeapSourceSwapBitmaps()817 void dvmHeapSourceSwapBitmaps()
818 {
819 HeapBitmap tmp = gHs->liveBits;
820 gHs->liveBits = gHs->markBits;
821 gHs->markBits = tmp;
822 }
823
dvmHeapSourceZeroMarkBitmap()824 void dvmHeapSourceZeroMarkBitmap()
825 {
826 HS_BOILERPLATE();
827
828 dvmHeapBitmapZero(&gHs->markBits);
829 }
830
dvmMarkImmuneObjects(const char * immuneLimit)831 void dvmMarkImmuneObjects(const char *immuneLimit)
832 {
833 /*
834 * Copy the contents of the live bit vector for immune object
835 * range into the mark bit vector.
836 */
837 /* The only values generated by dvmHeapSourceGetImmuneLimit() */
838 assert(immuneLimit == gHs->heaps[0].base ||
839 immuneLimit == NULL);
840 assert(gHs->liveBits.base == gHs->markBits.base);
841 assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
842 /* heap[0] is never immune */
843 assert(gHs->heaps[0].base >= immuneLimit);
844 assert(gHs->heaps[0].limit > immuneLimit);
845
846 for (size_t i = 1; i < gHs->numHeaps; ++i) {
847 if (gHs->heaps[i].base < immuneLimit) {
848 assert(gHs->heaps[i].limit <= immuneLimit);
849 /* Compute the number of words to copy in the bitmap. */
850 size_t index = HB_OFFSET_TO_INDEX(
851 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
852 /* Compute the starting offset in the live and mark bits. */
853 char *src = (char *)(gHs->liveBits.bits + index);
854 char *dst = (char *)(gHs->markBits.bits + index);
855 /* Compute the number of bytes of the live bitmap to copy. */
856 size_t length = HB_OFFSET_TO_BYTE_INDEX(
857 gHs->heaps[i].limit - gHs->heaps[i].base);
858 /* Do the copy. */
859 memcpy(dst, src, length);
860 /* Make sure max points to the address of the highest set bit. */
861 if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
862 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
863 }
864 }
865 }
866 }
867
868 /*
869 * Allocates <n> bytes of zeroed data.
870 */
dvmHeapSourceAlloc(size_t n)871 void* dvmHeapSourceAlloc(size_t n)
872 {
873 HS_BOILERPLATE();
874
875 HeapSource *hs = gHs;
876 Heap* heap = hs2heap(hs);
877 if (heap->bytesAllocated + n > hs->softLimit) {
878 /*
879 * This allocation would push us over the soft limit; act as
880 * if the heap is full.
881 */
882 LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
883 FRACTIONAL_MB(hs->softLimit), n);
884 return NULL;
885 }
886 void* ptr = mspace_calloc(heap->msp, 1, n);
887 if (ptr == NULL) {
888 return NULL;
889 }
890 countAllocation(heap, ptr);
891 /*
892 * Check to see if a concurrent GC should be initiated.
893 */
894 if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
895 /*
896 * The garbage collector thread is already running or has yet
897 * to be started. Do nothing.
898 */
899 return ptr;
900 }
901 if (heap->bytesAllocated > heap->concurrentStartBytes) {
902 /*
903 * We have exceeded the allocation threshold. Wake up the
904 * garbage collector.
905 */
906 dvmSignalCond(&gHs->gcThreadCond);
907 }
908 return ptr;
909 }
910
911 /* Remove any hard limits, try to allocate, and shrink back down.
912 * Last resort when trying to allocate an object.
913 */
heapAllocAndGrow(HeapSource * hs,Heap * heap,size_t n)914 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
915 {
916 /* Grow as much as possible, but don't let the real footprint
917 * go over the absolute max.
918 */
919 size_t max = heap->maximumSize;
920
921 mspace_set_footprint_limit(heap->msp, max);
922 void* ptr = dvmHeapSourceAlloc(n);
923
924 /* Shrink back down as small as possible. Our caller may
925 * readjust max_allowed to a more appropriate value.
926 */
927 mspace_set_footprint_limit(heap->msp,
928 mspace_footprint(heap->msp));
929 return ptr;
930 }
931
932 /*
933 * Allocates <n> bytes of zeroed data, growing as much as possible
934 * if necessary.
935 */
dvmHeapSourceAllocAndGrow(size_t n)936 void* dvmHeapSourceAllocAndGrow(size_t n)
937 {
938 HS_BOILERPLATE();
939
940 HeapSource *hs = gHs;
941 Heap* heap = hs2heap(hs);
942 void* ptr = dvmHeapSourceAlloc(n);
943 if (ptr != NULL) {
944 return ptr;
945 }
946
947 size_t oldIdealSize = hs->idealSize;
948 if (isSoftLimited(hs)) {
949 /* We're soft-limited. Try removing the soft limit to
950 * see if we can allocate without actually growing.
951 */
952 hs->softLimit = SIZE_MAX;
953 ptr = dvmHeapSourceAlloc(n);
954 if (ptr != NULL) {
955 /* Removing the soft limit worked; fix things up to
956 * reflect the new effective ideal size.
957 */
958 snapIdealFootprint();
959 return ptr;
960 }
961 // softLimit intentionally left at SIZE_MAX.
962 }
963
964 /* We're not soft-limited. Grow the heap to satisfy the request.
965 * If this call fails, no footprints will have changed.
966 */
967 ptr = heapAllocAndGrow(hs, heap, n);
968 if (ptr != NULL) {
969 /* The allocation succeeded. Fix up the ideal size to
970 * reflect any footprint modifications that had to happen.
971 */
972 snapIdealFootprint();
973 } else {
974 /* We just couldn't do it. Restore the original ideal size,
975 * fixing up softLimit if necessary.
976 */
977 setIdealFootprint(oldIdealSize);
978 }
979 return ptr;
980 }
981
982 /*
983 * Frees the first numPtrs objects in the ptrs list and returns the
984 * amount of reclaimed storage. The list must contain addresses all in
985 * the same mspace, and must be in increasing order. This implies that
986 * there are no duplicates, and no entries are NULL.
987 */
dvmHeapSourceFreeList(size_t numPtrs,void ** ptrs)988 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
989 {
990 HS_BOILERPLATE();
991
992 if (numPtrs == 0) {
993 return 0;
994 }
995
996 assert(ptrs != NULL);
997 assert(*ptrs != NULL);
998 Heap* heap = ptr2heap(gHs, *ptrs);
999 size_t numBytes = 0;
1000 if (heap != NULL) {
1001 mspace msp = heap->msp;
1002 // Calling mspace_free on shared heaps disrupts sharing too
1003 // much. For heap[0] -- the 'active heap' -- we call
1004 // mspace_free, but on the other heaps we only do some
1005 // accounting.
1006 if (heap == gHs->heaps) {
1007 // Count freed objects.
1008 for (size_t i = 0; i < numPtrs; i++) {
1009 assert(ptrs[i] != NULL);
1010 assert(ptr2heap(gHs, ptrs[i]) == heap);
1011 countFree(heap, ptrs[i], &numBytes);
1012 }
1013 // Bulk free ptrs.
1014 mspace_bulk_free(msp, ptrs, numPtrs);
1015 } else {
1016 // This is not an 'active heap'. Only do the accounting.
1017 for (size_t i = 0; i < numPtrs; i++) {
1018 assert(ptrs[i] != NULL);
1019 assert(ptr2heap(gHs, ptrs[i]) == heap);
1020 countFree(heap, ptrs[i], &numBytes);
1021 }
1022 }
1023 }
1024 return numBytes;
1025 }
1026
1027 /*
1028 * Returns true iff <ptr> is in the heap source.
1029 */
dvmHeapSourceContainsAddress(const void * ptr)1030 bool dvmHeapSourceContainsAddress(const void *ptr)
1031 {
1032 HS_BOILERPLATE();
1033
1034 return (dvmHeapSourceGetBase() <= ptr) && (ptr <= dvmHeapSourceGetLimit());
1035 }
1036
1037 /*
1038 * Returns true iff <ptr> was allocated from the heap source.
1039 */
dvmHeapSourceContains(const void * ptr)1040 bool dvmHeapSourceContains(const void *ptr)
1041 {
1042 HS_BOILERPLATE();
1043
1044 if (dvmHeapSourceContainsAddress(ptr)) {
1045 return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1046 }
1047 return false;
1048 }
1049
dvmIsZygoteObject(const Object * obj)1050 bool dvmIsZygoteObject(const Object* obj)
1051 {
1052 HeapSource *hs = gHs;
1053
1054 HS_BOILERPLATE();
1055
1056 if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1057 Heap *heap = ptr2heap(hs, obj);
1058 if (heap != NULL) {
1059 /* If the object is not in the active heap, we assume that
1060 * it was allocated as part of zygote.
1061 */
1062 return heap != hs->heaps;
1063 }
1064 }
1065 /* The pointer is outside of any known heap, or we are not
1066 * running in zygote mode.
1067 */
1068 return false;
1069 }
1070
1071 /*
1072 * Returns the number of usable bytes in an allocated chunk; the size
1073 * may be larger than the size passed to dvmHeapSourceAlloc().
1074 */
dvmHeapSourceChunkSize(const void * ptr)1075 size_t dvmHeapSourceChunkSize(const void *ptr)
1076 {
1077 HS_BOILERPLATE();
1078
1079 Heap* heap = ptr2heap(gHs, ptr);
1080 if (heap != NULL) {
1081 return mspace_usable_size(ptr);
1082 }
1083 return 0;
1084 }
1085
1086 /*
1087 * Returns the number of bytes that the heap source has allocated
1088 * from the system using sbrk/mmap, etc.
1089 *
1090 * Caller must hold the heap lock.
1091 */
dvmHeapSourceFootprint()1092 size_t dvmHeapSourceFootprint()
1093 {
1094 HS_BOILERPLATE();
1095
1096 //TODO: include size of bitmaps?
1097 return oldHeapOverhead(gHs, true);
1098 }
1099
getMaximumSize(const HeapSource * hs)1100 static size_t getMaximumSize(const HeapSource *hs)
1101 {
1102 return hs->growthLimit;
1103 }
1104
1105 /*
1106 * Returns the current maximum size of the heap source respecting any
1107 * growth limits.
1108 */
dvmHeapSourceGetMaximumSize()1109 size_t dvmHeapSourceGetMaximumSize()
1110 {
1111 HS_BOILERPLATE();
1112 return getMaximumSize(gHs);
1113 }
1114
1115 /*
1116 * Removes any growth limits. Allows the user to allocate up to the
1117 * maximum heap size.
1118 */
dvmClearGrowthLimit()1119 void dvmClearGrowthLimit()
1120 {
1121 HS_BOILERPLATE();
1122 dvmLockHeap();
1123 dvmWaitForConcurrentGcToComplete();
1124 gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
1125 gHs->growthLimit = gHs->maximumSize;
1126 size_t overhead = oldHeapOverhead(gHs, false);
1127 gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1128 gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1129 dvmUnlockHeap();
1130 }
1131
1132 /*
1133 * Return the real bytes used by old heaps plus the soft usage of the
1134 * current heap. When a soft limit is in effect, this is effectively
1135 * what it's compared against (though, in practice, it only looks at
1136 * the current heap).
1137 */
getSoftFootprint(bool includeActive)1138 static size_t getSoftFootprint(bool includeActive)
1139 {
1140 HS_BOILERPLATE();
1141
1142 HeapSource *hs = gHs;
1143 size_t ret = oldHeapOverhead(hs, false);
1144 if (includeActive) {
1145 ret += hs->heaps[0].bytesAllocated;
1146 }
1147
1148 return ret;
1149 }
1150
1151 /*
1152 * Gets the maximum number of bytes that the heap source is allowed
1153 * to allocate from the system.
1154 */
dvmHeapSourceGetIdealFootprint()1155 size_t dvmHeapSourceGetIdealFootprint()
1156 {
1157 HeapSource *hs = gHs;
1158
1159 HS_BOILERPLATE();
1160
1161 return hs->idealSize;
1162 }
1163
1164 /*
1165 * Sets the soft limit, handling any necessary changes to the allowed
1166 * footprint of the active heap.
1167 */
setSoftLimit(HeapSource * hs,size_t softLimit)1168 static void setSoftLimit(HeapSource *hs, size_t softLimit)
1169 {
1170 /* Compare against the actual footprint, rather than the
1171 * max_allowed, because the heap may not have grown all the
1172 * way to the allowed size yet.
1173 */
1174 mspace msp = hs->heaps[0].msp;
1175 size_t currentHeapSize = mspace_footprint(msp);
1176 if (softLimit < currentHeapSize) {
1177 /* Don't let the heap grow any more, and impose a soft limit.
1178 */
1179 mspace_set_footprint_limit(msp, currentHeapSize);
1180 hs->softLimit = softLimit;
1181 } else {
1182 /* Let the heap grow to the requested max, and remove any
1183 * soft limit, if set.
1184 */
1185 mspace_set_footprint_limit(msp, softLimit);
1186 hs->softLimit = SIZE_MAX;
1187 }
1188 }
1189
1190 /*
1191 * Sets the maximum number of bytes that the heap source is allowed
1192 * to allocate from the system. Clamps to the appropriate maximum
1193 * value.
1194 */
setIdealFootprint(size_t max)1195 static void setIdealFootprint(size_t max)
1196 {
1197 HS_BOILERPLATE();
1198
1199 HeapSource *hs = gHs;
1200 size_t maximumSize = getMaximumSize(hs);
1201 if (max > maximumSize) {
1202 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1203 FRACTIONAL_MB(max),
1204 FRACTIONAL_MB(maximumSize));
1205 max = maximumSize;
1206 }
1207
1208 /* Convert max into a size that applies to the active heap.
1209 * Old heaps will count against the ideal size.
1210 */
1211 size_t overhead = getSoftFootprint(false);
1212 size_t activeMax;
1213 if (overhead < max) {
1214 activeMax = max - overhead;
1215 } else {
1216 activeMax = 0;
1217 }
1218
1219 setSoftLimit(hs, activeMax);
1220 hs->idealSize = max;
1221 }
1222
1223 /*
1224 * Make the ideal footprint equal to the current footprint.
1225 */
snapIdealFootprint()1226 static void snapIdealFootprint()
1227 {
1228 HS_BOILERPLATE();
1229
1230 setIdealFootprint(getSoftFootprint(true));
1231 }
1232
1233 /*
1234 * Gets the current ideal heap utilization, represented as a number
1235 * between zero and one.
1236 */
dvmGetTargetHeapUtilization()1237 float dvmGetTargetHeapUtilization()
1238 {
1239 HeapSource *hs = gHs;
1240
1241 HS_BOILERPLATE();
1242
1243 return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1244 }
1245
1246 /*
1247 * Sets the new ideal heap utilization, represented as a number
1248 * between zero and one.
1249 */
dvmSetTargetHeapUtilization(float newTarget)1250 void dvmSetTargetHeapUtilization(float newTarget)
1251 {
1252 HeapSource *hs = gHs;
1253
1254 HS_BOILERPLATE();
1255
1256 /* Clamp it to a reasonable range.
1257 */
1258 // TODO: This may need some tuning.
1259 if (newTarget < 0.2) {
1260 newTarget = 0.2;
1261 } else if (newTarget > 0.8) {
1262 newTarget = 0.8;
1263 }
1264
1265 hs->targetUtilization =
1266 (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1267 ALOGV("Set heap target utilization to %zd/%d (%f)",
1268 hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1269 }
1270
1271 /*
1272 * Given the size of a live set, returns the ideal heap size given
1273 * the current target utilization and MIN/MAX values.
1274 */
getUtilizationTarget(const HeapSource * hs,size_t liveSize)1275 static size_t getUtilizationTarget(const HeapSource* hs, size_t liveSize)
1276 {
1277 /* Use the current target utilization ratio to determine the
1278 * ideal heap size based on the size of the live set.
1279 */
1280 size_t targetSize = (liveSize / hs->targetUtilization) * HEAP_UTILIZATION_MAX;
1281
1282 /* Cap the amount of free space, though, so we don't end up
1283 * with, e.g., 8MB of free space when the live set size hits 8MB.
1284 */
1285 if (targetSize > liveSize + hs->maxFree) {
1286 targetSize = liveSize + hs->maxFree;
1287 } else if (targetSize < liveSize + hs->minFree) {
1288 targetSize = liveSize + hs->minFree;
1289 }
1290 return targetSize;
1291 }
1292
1293 /*
1294 * Given the current contents of the active heap, increase the allowed
1295 * heap footprint to match the target utilization ratio. This
1296 * should only be called immediately after a full mark/sweep.
1297 */
dvmHeapSourceGrowForUtilization()1298 void dvmHeapSourceGrowForUtilization()
1299 {
1300 HS_BOILERPLATE();
1301
1302 HeapSource *hs = gHs;
1303 Heap* heap = hs2heap(hs);
1304
1305 /* Use the current target utilization ratio to determine the
1306 * ideal heap size based on the size of the live set.
1307 * Note that only the active heap plays any part in this.
1308 *
1309 * Avoid letting the old heaps influence the target free size,
1310 * because they may be full of objects that aren't actually
1311 * in the working set. Just look at the allocated size of
1312 * the current heap.
1313 */
1314 size_t currentHeapUsed = heap->bytesAllocated;
1315 size_t targetHeapSize = getUtilizationTarget(hs, currentHeapUsed);
1316
1317 /* The ideal size includes the old heaps; add overhead so that
1318 * it can be immediately subtracted again in setIdealFootprint().
1319 * If the target heap size would exceed the max, setIdealFootprint()
1320 * will clamp it to a legal value.
1321 */
1322 size_t overhead = getSoftFootprint(false);
1323 setIdealFootprint(targetHeapSize + overhead);
1324
1325 size_t freeBytes = getAllocLimit(hs);
1326 if (freeBytes < CONCURRENT_MIN_FREE) {
1327 /* Not enough free memory to allow a concurrent GC. */
1328 heap->concurrentStartBytes = SIZE_MAX;
1329 } else {
1330 heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1331 }
1332 }
1333
1334 /*
1335 * Return free pages to the system.
1336 * TODO: move this somewhere else, especially the native heap part.
1337 */
releasePagesInRange(void * start,void * end,size_t used_bytes,void * releasedBytes)1338 static void releasePagesInRange(void* start, void* end, size_t used_bytes,
1339 void* releasedBytes)
1340 {
1341 if (used_bytes == 0) {
1342 /*
1343 * We have a range of memory we can try to madvise()
1344 * back. Linux requires that the madvise() start address is
1345 * page-aligned. We also align the end address.
1346 */
1347 start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1348 end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1349 if (end > start) {
1350 size_t length = (char *)end - (char *)start;
1351 madvise(start, length, MADV_DONTNEED);
1352 *(size_t *)releasedBytes += length;
1353 }
1354 }
1355 }
1356
1357 /*
1358 * Return unused memory to the system if possible.
1359 */
trimHeaps()1360 static void trimHeaps()
1361 {
1362 HS_BOILERPLATE();
1363
1364 HeapSource *hs = gHs;
1365 size_t heapBytes = 0;
1366 for (size_t i = 0; i < hs->numHeaps; i++) {
1367 Heap *heap = &hs->heaps[i];
1368
1369 /* Return the wilderness chunk to the system. */
1370 mspace_trim(heap->msp, 0);
1371
1372 /* Return any whole free pages to the system. */
1373 mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
1374 }
1375
1376 /* Same for the native heap. */
1377 dlmalloc_trim(0);
1378 size_t nativeBytes = 0;
1379 dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
1380
1381 LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1382 heapBytes, nativeBytes, heapBytes + nativeBytes);
1383 }
1384
1385 /*
1386 * Walks over the heap source and passes every allocated and
1387 * free chunk to the callback.
1388 */
dvmHeapSourceWalk(void (* callback)(void * start,void * end,size_t used_bytes,void * arg),void * arg)1389 void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
1390 size_t used_bytes, void* arg),
1391 void *arg)
1392 {
1393 HS_BOILERPLATE();
1394
1395 /* Walk the heaps from oldest to newest.
1396 */
1397 //TODO: do this in address order
1398 HeapSource *hs = gHs;
1399 for (size_t i = hs->numHeaps; i > 0; --i) {
1400 mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
1401 callback(NULL, NULL, 0, arg); // Indicate end of a heap.
1402 }
1403 }
1404
1405 /*
1406 * Gets the number of heaps available in the heap source.
1407 *
1408 * Caller must hold the heap lock, because gHs caches a field
1409 * in gDvm.gcHeap.
1410 */
dvmHeapSourceGetNumHeaps()1411 size_t dvmHeapSourceGetNumHeaps()
1412 {
1413 HS_BOILERPLATE();
1414
1415 return gHs->numHeaps;
1416 }
1417
dvmHeapSourceGetImmuneLimit(bool isPartial)1418 void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1419 {
1420 if (isPartial) {
1421 return hs2heap(gHs)->base;
1422 } else {
1423 return NULL;
1424 }
1425 }
1426