1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <errno.h>
20 #include <cutils/ashmem.h>
21
22 #define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
23
24 #include "Dalvik.h"
25 #include "alloc/DlMalloc.h"
26 #include "alloc/Heap.h"
27 #include "alloc/HeapInternal.h"
28 #include "alloc/HeapSource.h"
29 #include "alloc/HeapBitmap.h"
30 #include "alloc/HeapBitmapInlines.h"
31
32 static void dvmHeapSourceUpdateMaxNativeFootprint();
33 static void snapIdealFootprint();
34 static void setIdealFootprint(size_t max);
35 static size_t getMaximumSize(const HeapSource *hs);
36 static void trimHeaps();
37
38 #define HEAP_UTILIZATION_MAX 1024
39
40 /* How long to wait after a GC before performing a heap trim
41 * operation to reclaim unused pages.
42 */
43 #define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
44
45 /* Start a concurrent collection when free memory falls under this
46 * many bytes.
47 */
48 #define CONCURRENT_START (128 << 10)
49
50 /* The next GC will not be concurrent when free memory after a GC is
51 * under this many bytes.
52 */
53 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
54
55 #define HS_BOILERPLATE() \
56 do { \
57 assert(gDvm.gcHeap != NULL); \
58 assert(gDvm.gcHeap->heapSource != NULL); \
59 assert(gHs == gDvm.gcHeap->heapSource); \
60 } while (0)
61
62 struct Heap {
63 /* The mspace to allocate from.
64 */
65 mspace msp;
66
67 /* The largest size that this heap is allowed to grow to.
68 */
69 size_t maximumSize;
70
71 /* Number of bytes allocated from this mspace for objects,
72 * including any overhead. This value is NOT exact, and
73 * should only be used as an input for certain heuristics.
74 */
75 size_t bytesAllocated;
76
77 /* Number of bytes allocated from this mspace at which a
78 * concurrent garbage collection will be started.
79 */
80 size_t concurrentStartBytes;
81
82 /* Number of objects currently allocated from this mspace.
83 */
84 size_t objectsAllocated;
85
86 /*
87 * The lowest address of this heap, inclusive.
88 */
89 char *base;
90
91 /*
92 * The highest address of this heap, exclusive.
93 */
94 char *limit;
95
96 /*
97 * If the heap has an mspace, the current high water mark in
98 * allocations requested via dvmHeapSourceMorecore.
99 */
100 char *brk;
101 };
102
103 struct HeapSource {
104 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
105 */
106 size_t targetUtilization;
107
108 /* The starting heap size.
109 */
110 size_t startSize;
111
112 /* The largest that the heap source as a whole is allowed to grow.
113 */
114 size_t maximumSize;
115
116 /*
117 * The largest size we permit the heap to grow. This value allows
118 * the user to limit the heap growth below the maximum size. This
119 * is a work around until we can dynamically set the maximum size.
120 * This value can range between the starting size and the maximum
121 * size but should never be set below the current footprint of the
122 * heap.
123 */
124 size_t growthLimit;
125
126 /* The desired max size of the heap source as a whole.
127 */
128 size_t idealSize;
129
130 /* The maximum number of bytes allowed to be allocated from the
131 * active heap before a GC is forced. This is used to "shrink" the
132 * heap in lieu of actual compaction.
133 */
134 size_t softLimit;
135
136 /* Minimum number of free bytes. Used with the target utilization when
137 * setting the softLimit. Never allows less bytes than this to be free
138 * when the heap size is below the maximum size or growth limit.
139 */
140 size_t minFree;
141
142 /* Maximum number of free bytes. Used with the target utilization when
143 * setting the softLimit. Never allows more bytes than this to be free
144 * when the heap size is below the maximum size or growth limit.
145 */
146 size_t maxFree;
147
148 /* The heaps; heaps[0] is always the active heap,
149 * which new objects should be allocated from.
150 */
151 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
152
153 /* The current number of heaps.
154 */
155 size_t numHeaps;
156
157 /* True if zygote mode was active when the HeapSource was created.
158 */
159 bool sawZygote;
160
161 /*
162 * The base address of the virtual memory reservation.
163 */
164 char *heapBase;
165
166 /*
167 * The length in bytes of the virtual memory reservation.
168 */
169 size_t heapLength;
170
171 /*
172 * The live object bitmap.
173 */
174 HeapBitmap liveBits;
175
176 /*
177 * The mark bitmap.
178 */
179 HeapBitmap markBits;
180
181 /*
182 * Native allocations.
183 */
184 int32_t nativeBytesAllocated;
185 size_t nativeFootprintGCWatermark;
186 size_t nativeFootprintLimit;
187 bool nativeNeedToRunFinalization;
188
189 /*
190 * State for the GC daemon.
191 */
192 bool hasGcThread;
193 pthread_t gcThread;
194 bool gcThreadShutdown;
195 pthread_mutex_t gcThreadMutex;
196 pthread_cond_t gcThreadCond;
197 bool gcThreadTrimNeeded;
198 };
199
200 #define hs2heap(hs_) (&((hs_)->heaps[0]))
201
202 /*
203 * Returns true iff a soft limit is in effect for the active heap.
204 */
isSoftLimited(const HeapSource * hs)205 static bool isSoftLimited(const HeapSource *hs)
206 {
207 /* softLimit will be either SIZE_MAX or the limit for the
208 * active mspace. idealSize can be greater than softLimit
209 * if there is more than one heap. If there is only one
210 * heap, a non-SIZE_MAX softLimit should always be the same
211 * as idealSize.
212 */
213 return hs->softLimit <= hs->idealSize;
214 }
215
216 /*
217 * Returns approximately the maximum number of bytes allowed to be
218 * allocated from the active heap before a GC is forced.
219 */
getAllocLimit(const HeapSource * hs)220 static size_t getAllocLimit(const HeapSource *hs)
221 {
222 if (isSoftLimited(hs)) {
223 return hs->softLimit;
224 } else {
225 return mspace_footprint_limit(hs2heap(hs)->msp);
226 }
227 }
228
229 /*
230 * Returns the current footprint of all heaps. If includeActive
231 * is false, don't count the heap at index 0.
232 */
oldHeapOverhead(const HeapSource * hs,bool includeActive)233 static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
234 {
235 size_t footprint = 0;
236 size_t i;
237
238 if (includeActive) {
239 i = 0;
240 } else {
241 i = 1;
242 }
243 for (/* i = i */; i < hs->numHeaps; i++) {
244 //TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max
245 footprint += mspace_footprint(hs->heaps[i].msp);
246 }
247 return footprint;
248 }
249
250 /*
251 * Returns the heap that <ptr> could have come from, or NULL
252 * if it could not have come from any heap.
253 */
ptr2heap(const HeapSource * hs,const void * ptr)254 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
255 {
256 const size_t numHeaps = hs->numHeaps;
257
258 if (ptr != NULL) {
259 for (size_t i = 0; i < numHeaps; i++) {
260 const Heap *const heap = &hs->heaps[i];
261
262 if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
263 return (Heap *)heap;
264 }
265 }
266 }
267 return NULL;
268 }
269
270 /*
271 * Functions to update heapSource->bytesAllocated when an object
272 * is allocated or freed. mspace_usable_size() will give
273 * us a much more accurate picture of heap utilization than
274 * the requested byte sizes would.
275 *
276 * These aren't exact, and should not be treated as such.
277 */
countAllocation(Heap * heap,const void * ptr)278 static void countAllocation(Heap *heap, const void *ptr)
279 {
280 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
281
282 heap->bytesAllocated += mspace_usable_size(ptr) +
283 HEAP_SOURCE_CHUNK_OVERHEAD;
284 heap->objectsAllocated++;
285 HeapSource* hs = gDvm.gcHeap->heapSource;
286 dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
287
288 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
289 }
290
countFree(Heap * heap,const void * ptr,size_t * numBytes)291 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
292 {
293 size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
294 assert(delta > 0);
295 if (delta < heap->bytesAllocated) {
296 heap->bytesAllocated -= delta;
297 } else {
298 heap->bytesAllocated = 0;
299 }
300 HeapSource* hs = gDvm.gcHeap->heapSource;
301 dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
302 if (heap->objectsAllocated > 0) {
303 heap->objectsAllocated--;
304 }
305 *numBytes += delta;
306 }
307
308 static HeapSource *gHs = NULL;
309
createMspace(void * begin,size_t morecoreStart,size_t startingSize)310 static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
311 {
312 // Clear errno to allow strerror on error.
313 errno = 0;
314 // Allow access to inital pages that will hold mspace.
315 mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
316 // Create mspace using our backing storage starting at begin and with a footprint of
317 // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
318 // exhausted morecore will be called.
319 mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
320 if (msp != NULL) {
321 // Do not allow morecore requests to succeed beyond the starting size of the heap.
322 mspace_set_footprint_limit(msp, startingSize);
323 } else {
324 ALOGE("create_mspace_with_base failed %s", strerror(errno));
325 }
326 return msp;
327 }
328
329 /*
330 * Service request from DlMalloc to increase heap size.
331 */
dvmHeapSourceMorecore(void * mspace,intptr_t increment)332 void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
333 {
334 Heap* heap = NULL;
335 for (size_t i = 0; i < gHs->numHeaps; i++) {
336 if (gHs->heaps[i].msp == mspace) {
337 heap = &gHs->heaps[i];
338 break;
339 }
340 }
341 if (heap == NULL) {
342 ALOGE("Failed to find heap for mspace %p", mspace);
343 dvmAbort();
344 }
345 char* original_brk = heap->brk;
346 if (increment != 0) {
347 char* new_brk = original_brk + increment;
348 if (increment > 0) {
349 // Should never be asked to increase the allocation beyond the capacity of the space.
350 // Enforced by mspace_set_footprint_limit.
351 assert(new_brk <= heap->limit);
352 mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
353 } else {
354 // Should never be asked for negative footprint (ie before base).
355 assert(original_brk + increment > heap->base);
356 // Advise we don't need the pages and protect them.
357 size_t size = -increment;
358 madvise(new_brk, size, MADV_DONTNEED);
359 mprotect(new_brk, size, PROT_NONE);
360 }
361 // Update brk.
362 heap->brk = new_brk;
363 }
364 return original_brk;
365 }
366
367 const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
368 /*
369 * Add the initial heap. Returns false if the initial heap was
370 * already added to the heap source.
371 */
addInitialHeap(HeapSource * hs,mspace msp,size_t maximumSize)372 static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
373 {
374 assert(hs != NULL);
375 assert(msp != NULL);
376 if (hs->numHeaps != 0) {
377 return false;
378 }
379 hs->heaps[0].msp = msp;
380 hs->heaps[0].maximumSize = maximumSize;
381 hs->heaps[0].concurrentStartBytes = SIZE_MAX;
382 hs->heaps[0].base = hs->heapBase;
383 hs->heaps[0].limit = hs->heapBase + maximumSize;
384 hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
385 hs->numHeaps = 1;
386 return true;
387 }
388
389 /*
390 * A helper for addNewHeap(). Remap the new heap so that it will have
391 * a separate ashmem region with possibly a different name, etc. In
392 * practice, this is used to give the app heap a separate ashmem
393 * region from the zygote heap's.
394 */
remapNewHeap(HeapSource * hs,Heap * newHeap)395 static bool remapNewHeap(HeapSource* hs, Heap* newHeap)
396 {
397 char* newHeapBase = newHeap->base;
398 size_t rem_size = hs->heapBase + hs->heapLength - newHeapBase;
399 munmap(newHeapBase, rem_size);
400 int fd = ashmem_create_region("dalvik-heap", rem_size);
401 if (fd == -1) {
402 ALOGE("Unable to create an ashmem region for the new heap");
403 return false;
404 }
405 void* addr = mmap(newHeapBase, rem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
406 int ret = close(fd);
407 if (addr == MAP_FAILED) {
408 ALOGE("Unable to map an ashmem region for the new heap");
409 return false;
410 }
411 if (ret == -1) {
412 ALOGE("Unable to close fd for the ashmem region for the new heap");
413 munmap(newHeapBase, rem_size);
414 return false;
415 }
416 return true;
417 }
418
419 /*
420 * Adds an additional heap to the heap source. Returns false if there
421 * are too many heaps or insufficient free space to add another heap.
422 */
addNewHeap(HeapSource * hs)423 static bool addNewHeap(HeapSource *hs)
424 {
425 Heap heap;
426
427 assert(hs != NULL);
428 if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
429 ALOGE("Attempt to create too many heaps (%zd >= %zd)",
430 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
431 dvmAbort();
432 return false;
433 }
434
435 memset(&heap, 0, sizeof(heap));
436
437 /*
438 * Heap storage comes from a common virtual memory reservation.
439 * The new heap will start on the page after the old heap.
440 */
441 char *base = hs->heaps[0].brk;
442 size_t overhead = base - hs->heaps[0].base;
443 assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
444
445 if (overhead + hs->minFree >= hs->maximumSize) {
446 LOGE_HEAP("No room to create any more heaps "
447 "(%zd overhead, %zd max)",
448 overhead, hs->maximumSize);
449 return false;
450 }
451 size_t morecoreStart = SYSTEM_PAGE_SIZE;
452 heap.maximumSize = hs->growthLimit - overhead;
453 heap.concurrentStartBytes = hs->minFree - CONCURRENT_START;
454 heap.base = base;
455 heap.limit = heap.base + heap.maximumSize;
456 heap.brk = heap.base + morecoreStart;
457 if (!remapNewHeap(hs, &heap)) {
458 return false;
459 }
460 heap.msp = createMspace(base, morecoreStart, hs->minFree);
461 if (heap.msp == NULL) {
462 return false;
463 }
464
465 /* Don't let the soon-to-be-old heap grow any further.
466 */
467 hs->heaps[0].maximumSize = overhead;
468 hs->heaps[0].limit = base;
469 mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
470
471 /* Put the new heap in the list, at heaps[0].
472 * Shift existing heaps down.
473 */
474 memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
475 hs->heaps[0] = heap;
476 hs->numHeaps++;
477
478 return true;
479 }
480
481 /*
482 * The garbage collection daemon. Initiates a concurrent collection
483 * when signaled. Also periodically trims the heaps when a few seconds
484 * have elapsed since the last concurrent GC.
485 */
gcDaemonThread(void * arg)486 static void *gcDaemonThread(void* arg)
487 {
488 dvmChangeStatus(NULL, THREAD_VMWAIT);
489 dvmLockMutex(&gHs->gcThreadMutex);
490 while (gHs->gcThreadShutdown != true) {
491 bool trim = false;
492 if (gHs->gcThreadTrimNeeded) {
493 int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
494 HEAP_TRIM_IDLE_TIME_MS, 0);
495 if (result == ETIMEDOUT) {
496 /* Timed out waiting for a GC request, schedule a heap trim. */
497 trim = true;
498 }
499 } else {
500 dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
501 }
502
503 // Many JDWP requests cause allocation. We can't take the heap lock and wait to
504 // transition to runnable so we can start a GC if a debugger is connected, because
505 // we don't know that the JDWP thread isn't about to allocate and require the
506 // heap lock itself, leading to deadlock. http://b/8191824.
507 if (gDvm.debuggerConnected) {
508 continue;
509 }
510
511 dvmLockHeap();
512 /*
513 * Another thread may have started a concurrent garbage
514 * collection before we were scheduled. Check for this
515 * condition before proceeding.
516 */
517 if (!gDvm.gcHeap->gcRunning) {
518 dvmChangeStatus(NULL, THREAD_RUNNING);
519 if (trim) {
520 trimHeaps();
521 gHs->gcThreadTrimNeeded = false;
522 } else {
523 dvmCollectGarbageInternal(GC_CONCURRENT);
524 gHs->gcThreadTrimNeeded = true;
525 }
526 dvmChangeStatus(NULL, THREAD_VMWAIT);
527 }
528 dvmUnlockHeap();
529 }
530 dvmChangeStatus(NULL, THREAD_RUNNING);
531 return NULL;
532 }
533
gcDaemonStartup()534 static bool gcDaemonStartup()
535 {
536 dvmInitMutex(&gHs->gcThreadMutex);
537 pthread_cond_init(&gHs->gcThreadCond, NULL);
538 gHs->gcThreadShutdown = false;
539 gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
540 gcDaemonThread, NULL);
541 return gHs->hasGcThread;
542 }
543
gcDaemonShutdown()544 static void gcDaemonShutdown()
545 {
546 if (gHs->hasGcThread) {
547 dvmLockMutex(&gHs->gcThreadMutex);
548 gHs->gcThreadShutdown = true;
549 dvmSignalCond(&gHs->gcThreadCond);
550 dvmUnlockMutex(&gHs->gcThreadMutex);
551 pthread_join(gHs->gcThread, NULL);
552 }
553 }
554
555 /*
556 * Create a stack big enough for the worst possible case, where the
557 * heap is perfectly full of the smallest object.
558 * TODO: be better about memory usage; use a smaller stack with
559 * overflow detection and recovery.
560 */
allocMarkStack(GcMarkStack * stack,size_t maximumSize)561 static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
562 {
563 const char *name = "dalvik-mark-stack";
564 void *addr;
565
566 assert(stack != NULL);
567 stack->length = maximumSize * sizeof(Object*) /
568 (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
569 addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
570 if (addr == NULL) {
571 return false;
572 }
573 stack->base = (const Object **)addr;
574 stack->limit = (const Object **)((char *)addr + stack->length);
575 stack->top = NULL;
576 madvise(stack->base, stack->length, MADV_DONTNEED);
577 return true;
578 }
579
freeMarkStack(GcMarkStack * stack)580 static void freeMarkStack(GcMarkStack *stack)
581 {
582 assert(stack != NULL);
583 munmap(stack->base, stack->length);
584 memset(stack, 0, sizeof(*stack));
585 }
586
587 /*
588 * Initializes the heap source; must be called before any other
589 * dvmHeapSource*() functions. Returns a GcHeap structure
590 * allocated from the heap source.
591 */
dvmHeapSourceStartup(size_t startSize,size_t maximumSize,size_t growthLimit)592 GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
593 size_t growthLimit)
594 {
595 GcHeap *gcHeap;
596 HeapSource *hs;
597 mspace msp;
598 size_t length;
599 void *base;
600
601 assert(gHs == NULL);
602
603 if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
604 ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
605 startSize, maximumSize, growthLimit);
606 return NULL;
607 }
608
609 /*
610 * Allocate a contiguous region of virtual memory to subdivided
611 * among the heaps managed by the garbage collector.
612 */
613 length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
614 base = dvmAllocRegion(length, PROT_NONE, gDvm.zygote ? "dalvik-zygote" : "dalvik-heap");
615 if (base == NULL) {
616 return NULL;
617 }
618
619 /* Create an unlocked dlmalloc mspace to use as
620 * a heap source.
621 */
622 msp = createMspace(base, kInitialMorecoreStart, startSize);
623 if (msp == NULL) {
624 goto fail;
625 }
626
627 gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
628 if (gcHeap == NULL) {
629 LOGE_HEAP("Can't allocate heap descriptor");
630 goto fail;
631 }
632
633 hs = (HeapSource *)calloc(1, sizeof(*hs));
634 if (hs == NULL) {
635 LOGE_HEAP("Can't allocate heap source");
636 free(gcHeap);
637 goto fail;
638 }
639
640 hs->targetUtilization = gDvm.heapTargetUtilization * HEAP_UTILIZATION_MAX;
641 hs->minFree = gDvm.heapMinFree;
642 hs->maxFree = gDvm.heapMaxFree;
643 hs->startSize = startSize;
644 hs->maximumSize = maximumSize;
645 hs->growthLimit = growthLimit;
646 hs->idealSize = startSize;
647 hs->softLimit = SIZE_MAX; // no soft limit at first
648 hs->numHeaps = 0;
649 hs->sawZygote = gDvm.zygote;
650 hs->nativeBytesAllocated = 0;
651 hs->nativeFootprintGCWatermark = startSize;
652 hs->nativeFootprintLimit = startSize * 2;
653 hs->nativeNeedToRunFinalization = false;
654 hs->hasGcThread = false;
655 hs->heapBase = (char *)base;
656 hs->heapLength = length;
657
658 if (hs->maxFree > hs->maximumSize) {
659 hs->maxFree = hs->maximumSize;
660 }
661 if (hs->minFree < CONCURRENT_START) {
662 hs->minFree = CONCURRENT_START;
663 } else if (hs->minFree > hs->maxFree) {
664 hs->minFree = hs->maxFree;
665 }
666
667 if (!addInitialHeap(hs, msp, growthLimit)) {
668 LOGE_HEAP("Can't add initial heap");
669 goto fail;
670 }
671 if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
672 LOGE_HEAP("Can't create liveBits");
673 goto fail;
674 }
675 if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
676 LOGE_HEAP("Can't create markBits");
677 dvmHeapBitmapDelete(&hs->liveBits);
678 goto fail;
679 }
680 if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
681 ALOGE("Can't create markStack");
682 dvmHeapBitmapDelete(&hs->markBits);
683 dvmHeapBitmapDelete(&hs->liveBits);
684 goto fail;
685 }
686 gcHeap->markContext.bitmap = &hs->markBits;
687 gcHeap->heapSource = hs;
688
689 gHs = hs;
690 return gcHeap;
691
692 fail:
693 munmap(base, length);
694 return NULL;
695 }
696
dvmHeapSourceStartupAfterZygote()697 bool dvmHeapSourceStartupAfterZygote()
698 {
699 return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
700 }
701
702 /*
703 * This is called while in zygote mode, right before we fork() for the
704 * first time. We create a heap for all future zygote process allocations,
705 * in an attempt to avoid touching pages in the zygote heap. (This would
706 * probably be unnecessary if we had a compacting GC -- the source of our
707 * troubles is small allocations filling in the gaps from larger ones.)
708 */
dvmHeapSourceStartupBeforeFork()709 bool dvmHeapSourceStartupBeforeFork()
710 {
711 HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
712
713 HS_BOILERPLATE();
714
715 assert(gDvm.zygote);
716
717 if (!gDvm.newZygoteHeapAllocated) {
718 /* Ensure heaps are trimmed to minimize footprint pre-fork.
719 */
720 trimHeaps();
721 /* Create a new heap for post-fork zygote allocations. We only
722 * try once, even if it fails.
723 */
724 ALOGV("Splitting out new zygote heap");
725 gDvm.newZygoteHeapAllocated = true;
726 return addNewHeap(hs);
727 }
728 return true;
729 }
730
dvmHeapSourceThreadShutdown()731 void dvmHeapSourceThreadShutdown()
732 {
733 if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
734 gcDaemonShutdown();
735 }
736 }
737
738 /*
739 * Tears down the entire GcHeap structure and all of the substructures
740 * attached to it. This call has the side effect of setting the given
741 * gcHeap pointer and gHs to NULL.
742 */
dvmHeapSourceShutdown(GcHeap ** gcHeap)743 void dvmHeapSourceShutdown(GcHeap **gcHeap)
744 {
745 assert(gcHeap != NULL);
746 if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
747 HeapSource *hs = (*gcHeap)->heapSource;
748 dvmHeapBitmapDelete(&hs->liveBits);
749 dvmHeapBitmapDelete(&hs->markBits);
750 freeMarkStack(&(*gcHeap)->markContext.stack);
751 munmap(hs->heapBase, hs->heapLength);
752 free(hs);
753 gHs = NULL;
754 free(*gcHeap);
755 *gcHeap = NULL;
756 }
757 }
758
759 /*
760 * Gets the begining of the allocation for the HeapSource.
761 */
dvmHeapSourceGetBase()762 void *dvmHeapSourceGetBase()
763 {
764 return gHs->heapBase;
765 }
766
767 /*
768 * Returns a high water mark, between base and limit all objects must have been
769 * allocated.
770 */
dvmHeapSourceGetLimit()771 void *dvmHeapSourceGetLimit()
772 {
773 HeapSource *hs = gHs;
774 void *max_brk = hs->heaps[0].brk;
775
776 #ifndef NDEBUG
777 for (size_t i = 1; i < hs->numHeaps; i++) {
778 Heap *const heap = &hs->heaps[i];
779 void *heap_brk = heap->brk;
780 assert (max_brk > heap_brk);
781 }
782 #endif
783 return max_brk;
784 }
785
786 /*
787 * Returns the requested value. If the per-heap stats are requested, fill
788 * them as well.
789 *
790 * Caller must hold the heap lock.
791 */
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)792 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
793 size_t arrayLen)
794 {
795 HeapSource *hs = gHs;
796 size_t value = 0;
797 size_t total = 0;
798
799 HS_BOILERPLATE();
800
801 assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
802 for (size_t i = 0; i < hs->numHeaps; i++) {
803 Heap *const heap = &hs->heaps[i];
804
805 switch (spec) {
806 case HS_FOOTPRINT:
807 value = heap->brk - heap->base;
808 assert(value == mspace_footprint(heap->msp));
809 break;
810 case HS_ALLOWED_FOOTPRINT:
811 value = mspace_footprint_limit(heap->msp);
812 break;
813 case HS_BYTES_ALLOCATED:
814 value = heap->bytesAllocated;
815 break;
816 case HS_OBJECTS_ALLOCATED:
817 value = heap->objectsAllocated;
818 break;
819 default:
820 // quiet gcc
821 break;
822 }
823 if (perHeapStats) {
824 perHeapStats[i] = value;
825 }
826 total += value;
827 }
828 return total;
829 }
830
dvmHeapSourceGetRegions(uintptr_t * base,uintptr_t * max,size_t numHeaps)831 void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
832 {
833 HeapSource *hs = gHs;
834
835 HS_BOILERPLATE();
836
837 assert(numHeaps <= hs->numHeaps);
838 for (size_t i = 0; i < numHeaps; ++i) {
839 base[i] = (uintptr_t)hs->heaps[i].base;
840 max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
841 }
842 }
843
844 /*
845 * Get the bitmap representing all live objects.
846 */
dvmHeapSourceGetLiveBits()847 HeapBitmap *dvmHeapSourceGetLiveBits()
848 {
849 HS_BOILERPLATE();
850
851 return &gHs->liveBits;
852 }
853
854 /*
855 * Get the bitmap representing all marked objects.
856 */
dvmHeapSourceGetMarkBits()857 HeapBitmap *dvmHeapSourceGetMarkBits()
858 {
859 HS_BOILERPLATE();
860
861 return &gHs->markBits;
862 }
863
dvmHeapSourceSwapBitmaps()864 void dvmHeapSourceSwapBitmaps()
865 {
866 HeapBitmap tmp = gHs->liveBits;
867 gHs->liveBits = gHs->markBits;
868 gHs->markBits = tmp;
869 }
870
dvmHeapSourceZeroMarkBitmap()871 void dvmHeapSourceZeroMarkBitmap()
872 {
873 HS_BOILERPLATE();
874
875 dvmHeapBitmapZero(&gHs->markBits);
876 }
877
dvmMarkImmuneObjects(const char * immuneLimit)878 void dvmMarkImmuneObjects(const char *immuneLimit)
879 {
880 /*
881 * Copy the contents of the live bit vector for immune object
882 * range into the mark bit vector.
883 */
884 /* The only values generated by dvmHeapSourceGetImmuneLimit() */
885 assert(immuneLimit == gHs->heaps[0].base ||
886 immuneLimit == NULL);
887 assert(gHs->liveBits.base == gHs->markBits.base);
888 assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
889 /* heap[0] is never immune */
890 assert(gHs->heaps[0].base >= immuneLimit);
891 assert(gHs->heaps[0].limit > immuneLimit);
892
893 for (size_t i = 1; i < gHs->numHeaps; ++i) {
894 if (gHs->heaps[i].base < immuneLimit) {
895 assert(gHs->heaps[i].limit <= immuneLimit);
896 /* Compute the number of words to copy in the bitmap. */
897 size_t index = HB_OFFSET_TO_INDEX(
898 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
899 /* Compute the starting offset in the live and mark bits. */
900 char *src = (char *)(gHs->liveBits.bits + index);
901 char *dst = (char *)(gHs->markBits.bits + index);
902 /* Compute the number of bytes of the live bitmap to copy. */
903 size_t length = HB_OFFSET_TO_BYTE_INDEX(
904 gHs->heaps[i].limit - gHs->heaps[i].base);
905 /* Do the copy. */
906 memcpy(dst, src, length);
907 /* Make sure max points to the address of the highest set bit. */
908 if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
909 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
910 }
911 }
912 }
913 }
914
915 /*
916 * Allocates <n> bytes of zeroed data.
917 */
dvmHeapSourceAlloc(size_t n)918 void* dvmHeapSourceAlloc(size_t n)
919 {
920 HS_BOILERPLATE();
921
922 HeapSource *hs = gHs;
923 Heap* heap = hs2heap(hs);
924 if (heap->bytesAllocated + n > hs->softLimit) {
925 /*
926 * This allocation would push us over the soft limit; act as
927 * if the heap is full.
928 */
929 LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
930 FRACTIONAL_MB(hs->softLimit), n);
931 return NULL;
932 }
933 void* ptr;
934 if (gDvm.lowMemoryMode) {
935 /* This is only necessary because mspace_calloc always memsets the
936 * allocated memory to 0. This is bad for memory usage since it leads
937 * to dirty zero pages. If low memory mode is enabled, we use
938 * mspace_malloc which doesn't memset the allocated memory and madvise
939 * the page aligned region back to the kernel.
940 */
941 ptr = mspace_malloc(heap->msp, n);
942 if (ptr == NULL) {
943 return NULL;
944 }
945 uintptr_t zero_begin = (uintptr_t)ptr;
946 uintptr_t zero_end = (uintptr_t)ptr + n;
947 /* Calculate the page aligned region.
948 */
949 uintptr_t begin = ALIGN_UP_TO_PAGE_SIZE(zero_begin);
950 uintptr_t end = zero_end & ~(uintptr_t)(SYSTEM_PAGE_SIZE - 1);
951 /* If our allocation spans more than one page, we attempt to madvise.
952 */
953 if (begin < end) {
954 /* madvise the page aligned region to kernel.
955 */
956 madvise((void*)begin, end - begin, MADV_DONTNEED);
957 /* Zero the region after the page aligned region.
958 */
959 memset((void*)end, 0, zero_end - end);
960 /* Zero out the region before the page aligned region.
961 */
962 zero_end = begin;
963 }
964 memset((void*)zero_begin, 0, zero_end - zero_begin);
965 } else {
966 ptr = mspace_calloc(heap->msp, 1, n);
967 if (ptr == NULL) {
968 return NULL;
969 }
970 }
971
972 countAllocation(heap, ptr);
973 /*
974 * Check to see if a concurrent GC should be initiated.
975 */
976 if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
977 /*
978 * The garbage collector thread is already running or has yet
979 * to be started. Do nothing.
980 */
981 return ptr;
982 }
983 if (heap->bytesAllocated > heap->concurrentStartBytes) {
984 /*
985 * We have exceeded the allocation threshold. Wake up the
986 * garbage collector.
987 */
988 dvmSignalCond(&gHs->gcThreadCond);
989 }
990 return ptr;
991 }
992
993 /* Remove any hard limits, try to allocate, and shrink back down.
994 * Last resort when trying to allocate an object.
995 */
heapAllocAndGrow(HeapSource * hs,Heap * heap,size_t n)996 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
997 {
998 /* Grow as much as possible, but don't let the real footprint
999 * go over the absolute max.
1000 */
1001 size_t max = heap->maximumSize;
1002
1003 mspace_set_footprint_limit(heap->msp, max);
1004 void* ptr = dvmHeapSourceAlloc(n);
1005
1006 /* Shrink back down as small as possible. Our caller may
1007 * readjust max_allowed to a more appropriate value.
1008 */
1009 mspace_set_footprint_limit(heap->msp,
1010 mspace_footprint(heap->msp));
1011 return ptr;
1012 }
1013
1014 /*
1015 * Allocates <n> bytes of zeroed data, growing as much as possible
1016 * if necessary.
1017 */
dvmHeapSourceAllocAndGrow(size_t n)1018 void* dvmHeapSourceAllocAndGrow(size_t n)
1019 {
1020 HS_BOILERPLATE();
1021
1022 HeapSource *hs = gHs;
1023 Heap* heap = hs2heap(hs);
1024 void* ptr = dvmHeapSourceAlloc(n);
1025 if (ptr != NULL) {
1026 return ptr;
1027 }
1028
1029 size_t oldIdealSize = hs->idealSize;
1030 if (isSoftLimited(hs)) {
1031 /* We're soft-limited. Try removing the soft limit to
1032 * see if we can allocate without actually growing.
1033 */
1034 hs->softLimit = SIZE_MAX;
1035 ptr = dvmHeapSourceAlloc(n);
1036 if (ptr != NULL) {
1037 /* Removing the soft limit worked; fix things up to
1038 * reflect the new effective ideal size.
1039 */
1040 snapIdealFootprint();
1041 return ptr;
1042 }
1043 // softLimit intentionally left at SIZE_MAX.
1044 }
1045
1046 /* We're not soft-limited. Grow the heap to satisfy the request.
1047 * If this call fails, no footprints will have changed.
1048 */
1049 ptr = heapAllocAndGrow(hs, heap, n);
1050 if (ptr != NULL) {
1051 /* The allocation succeeded. Fix up the ideal size to
1052 * reflect any footprint modifications that had to happen.
1053 */
1054 snapIdealFootprint();
1055 } else {
1056 /* We just couldn't do it. Restore the original ideal size,
1057 * fixing up softLimit if necessary.
1058 */
1059 setIdealFootprint(oldIdealSize);
1060 }
1061 return ptr;
1062 }
1063
1064 /*
1065 * Frees the first numPtrs objects in the ptrs list and returns the
1066 * amount of reclaimed storage. The list must contain addresses all in
1067 * the same mspace, and must be in increasing order. This implies that
1068 * there are no duplicates, and no entries are NULL.
1069 */
dvmHeapSourceFreeList(size_t numPtrs,void ** ptrs)1070 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
1071 {
1072 HS_BOILERPLATE();
1073
1074 if (numPtrs == 0) {
1075 return 0;
1076 }
1077
1078 assert(ptrs != NULL);
1079 assert(*ptrs != NULL);
1080 Heap* heap = ptr2heap(gHs, *ptrs);
1081 size_t numBytes = 0;
1082 if (heap != NULL) {
1083 mspace msp = heap->msp;
1084 // Calling mspace_free on shared heaps disrupts sharing too
1085 // much. For heap[0] -- the 'active heap' -- we call
1086 // mspace_free, but on the other heaps we only do some
1087 // accounting.
1088 if (heap == gHs->heaps) {
1089 // Count freed objects.
1090 for (size_t i = 0; i < numPtrs; i++) {
1091 assert(ptrs[i] != NULL);
1092 assert(ptr2heap(gHs, ptrs[i]) == heap);
1093 countFree(heap, ptrs[i], &numBytes);
1094 }
1095 // Bulk free ptrs.
1096 mspace_bulk_free(msp, ptrs, numPtrs);
1097 } else {
1098 // This is not an 'active heap'. Only do the accounting.
1099 for (size_t i = 0; i < numPtrs; i++) {
1100 assert(ptrs[i] != NULL);
1101 assert(ptr2heap(gHs, ptrs[i]) == heap);
1102 countFree(heap, ptrs[i], &numBytes);
1103 }
1104 }
1105 }
1106 return numBytes;
1107 }
1108
1109 /*
1110 * Returns true iff <ptr> is in the heap source.
1111 */
dvmHeapSourceContainsAddress(const void * ptr)1112 bool dvmHeapSourceContainsAddress(const void *ptr)
1113 {
1114 HS_BOILERPLATE();
1115
1116 return (dvmHeapSourceGetBase() <= ptr) && (ptr <= dvmHeapSourceGetLimit());
1117 }
1118
1119 /*
1120 * Returns true iff <ptr> was allocated from the heap source.
1121 */
dvmHeapSourceContains(const void * ptr)1122 bool dvmHeapSourceContains(const void *ptr)
1123 {
1124 HS_BOILERPLATE();
1125
1126 if (dvmHeapSourceContainsAddress(ptr)) {
1127 return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1128 }
1129 return false;
1130 }
1131
dvmIsZygoteObject(const Object * obj)1132 bool dvmIsZygoteObject(const Object* obj)
1133 {
1134 HeapSource *hs = gHs;
1135
1136 HS_BOILERPLATE();
1137
1138 if (dvmHeapSourceContains(obj) && hs->sawZygote) {
1139 Heap *heap = ptr2heap(hs, obj);
1140 if (heap != NULL) {
1141 /* If the object is not in the active heap, we assume that
1142 * it was allocated as part of zygote.
1143 */
1144 return heap != hs->heaps;
1145 }
1146 }
1147 /* The pointer is outside of any known heap, or we are not
1148 * running in zygote mode.
1149 */
1150 return false;
1151 }
1152
1153 /*
1154 * Returns the number of usable bytes in an allocated chunk; the size
1155 * may be larger than the size passed to dvmHeapSourceAlloc().
1156 */
dvmHeapSourceChunkSize(const void * ptr)1157 size_t dvmHeapSourceChunkSize(const void *ptr)
1158 {
1159 HS_BOILERPLATE();
1160
1161 Heap* heap = ptr2heap(gHs, ptr);
1162 if (heap != NULL) {
1163 return mspace_usable_size(ptr);
1164 }
1165 return 0;
1166 }
1167
1168 /*
1169 * Returns the number of bytes that the heap source has allocated
1170 * from the system using sbrk/mmap, etc.
1171 *
1172 * Caller must hold the heap lock.
1173 */
dvmHeapSourceFootprint()1174 size_t dvmHeapSourceFootprint()
1175 {
1176 HS_BOILERPLATE();
1177
1178 //TODO: include size of bitmaps?
1179 return oldHeapOverhead(gHs, true);
1180 }
1181
getMaximumSize(const HeapSource * hs)1182 static size_t getMaximumSize(const HeapSource *hs)
1183 {
1184 return hs->growthLimit;
1185 }
1186
1187 /*
1188 * Returns the current maximum size of the heap source respecting any
1189 * growth limits.
1190 */
dvmHeapSourceGetMaximumSize()1191 size_t dvmHeapSourceGetMaximumSize()
1192 {
1193 HS_BOILERPLATE();
1194 return getMaximumSize(gHs);
1195 }
1196
1197 /*
1198 * Removes any growth limits. Allows the user to allocate up to the
1199 * maximum heap size.
1200 */
dvmClearGrowthLimit()1201 void dvmClearGrowthLimit()
1202 {
1203 HS_BOILERPLATE();
1204 dvmLockHeap();
1205 dvmWaitForConcurrentGcToComplete();
1206 gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
1207 gHs->growthLimit = gHs->maximumSize;
1208 size_t overhead = oldHeapOverhead(gHs, false);
1209 gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
1210 gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
1211 dvmUnlockHeap();
1212 }
1213
1214 /*
1215 * Return the real bytes used by old heaps plus the soft usage of the
1216 * current heap. When a soft limit is in effect, this is effectively
1217 * what it's compared against (though, in practice, it only looks at
1218 * the current heap).
1219 */
getSoftFootprint(bool includeActive)1220 static size_t getSoftFootprint(bool includeActive)
1221 {
1222 HS_BOILERPLATE();
1223
1224 HeapSource *hs = gHs;
1225 size_t ret = oldHeapOverhead(hs, false);
1226 if (includeActive) {
1227 ret += hs->heaps[0].bytesAllocated;
1228 }
1229
1230 return ret;
1231 }
1232
1233 /*
1234 * Gets the maximum number of bytes that the heap source is allowed
1235 * to allocate from the system.
1236 */
dvmHeapSourceGetIdealFootprint()1237 size_t dvmHeapSourceGetIdealFootprint()
1238 {
1239 HeapSource *hs = gHs;
1240
1241 HS_BOILERPLATE();
1242
1243 return hs->idealSize;
1244 }
1245
1246 /*
1247 * Sets the soft limit, handling any necessary changes to the allowed
1248 * footprint of the active heap.
1249 */
setSoftLimit(HeapSource * hs,size_t softLimit)1250 static void setSoftLimit(HeapSource *hs, size_t softLimit)
1251 {
1252 /* Compare against the actual footprint, rather than the
1253 * max_allowed, because the heap may not have grown all the
1254 * way to the allowed size yet.
1255 */
1256 mspace msp = hs->heaps[0].msp;
1257 size_t currentHeapSize = mspace_footprint(msp);
1258 if (softLimit < currentHeapSize) {
1259 /* Don't let the heap grow any more, and impose a soft limit.
1260 */
1261 mspace_set_footprint_limit(msp, currentHeapSize);
1262 hs->softLimit = softLimit;
1263 } else {
1264 /* Let the heap grow to the requested max, and remove any
1265 * soft limit, if set.
1266 */
1267 mspace_set_footprint_limit(msp, softLimit);
1268 hs->softLimit = SIZE_MAX;
1269 }
1270 }
1271
1272 /*
1273 * Sets the maximum number of bytes that the heap source is allowed
1274 * to allocate from the system. Clamps to the appropriate maximum
1275 * value.
1276 */
setIdealFootprint(size_t max)1277 static void setIdealFootprint(size_t max)
1278 {
1279 HS_BOILERPLATE();
1280
1281 HeapSource *hs = gHs;
1282 size_t maximumSize = getMaximumSize(hs);
1283 if (max > maximumSize) {
1284 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1285 FRACTIONAL_MB(max),
1286 FRACTIONAL_MB(maximumSize));
1287 max = maximumSize;
1288 }
1289
1290 /* Convert max into a size that applies to the active heap.
1291 * Old heaps will count against the ideal size.
1292 */
1293 size_t overhead = getSoftFootprint(false);
1294 size_t activeMax;
1295 if (overhead < max) {
1296 activeMax = max - overhead;
1297 } else {
1298 activeMax = 0;
1299 }
1300
1301 setSoftLimit(hs, activeMax);
1302 hs->idealSize = max;
1303 }
1304
1305 /*
1306 * Make the ideal footprint equal to the current footprint.
1307 */
snapIdealFootprint()1308 static void snapIdealFootprint()
1309 {
1310 HS_BOILERPLATE();
1311
1312 setIdealFootprint(getSoftFootprint(true));
1313 }
1314
1315 /*
1316 * Gets the current ideal heap utilization, represented as a number
1317 * between zero and one.
1318 */
dvmGetTargetHeapUtilization()1319 float dvmGetTargetHeapUtilization()
1320 {
1321 HeapSource *hs = gHs;
1322
1323 HS_BOILERPLATE();
1324
1325 return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1326 }
1327
1328 /*
1329 * Sets the new ideal heap utilization, represented as a number
1330 * between zero and one.
1331 */
dvmSetTargetHeapUtilization(float newTarget)1332 void dvmSetTargetHeapUtilization(float newTarget)
1333 {
1334 HeapSource *hs = gHs;
1335
1336 HS_BOILERPLATE();
1337
1338 /* Clamp it to a reasonable range.
1339 */
1340 // TODO: This may need some tuning.
1341 if (newTarget < 0.2) {
1342 newTarget = 0.2;
1343 } else if (newTarget > 0.8) {
1344 newTarget = 0.8;
1345 }
1346
1347 hs->targetUtilization =
1348 (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1349 ALOGV("Set heap target utilization to %zd/%d (%f)",
1350 hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1351 }
1352
1353 /*
1354 * Given the size of a live set, returns the ideal heap size given
1355 * the current target utilization and MIN/MAX values.
1356 */
getUtilizationTarget(const HeapSource * hs,size_t liveSize)1357 static size_t getUtilizationTarget(const HeapSource* hs, size_t liveSize)
1358 {
1359 /* Use the current target utilization ratio to determine the
1360 * ideal heap size based on the size of the live set.
1361 */
1362 size_t targetSize = (liveSize / hs->targetUtilization) * HEAP_UTILIZATION_MAX;
1363
1364 /* Cap the amount of free space, though, so we don't end up
1365 * with, e.g., 8MB of free space when the live set size hits 8MB.
1366 */
1367 if (targetSize > liveSize + hs->maxFree) {
1368 targetSize = liveSize + hs->maxFree;
1369 } else if (targetSize < liveSize + hs->minFree) {
1370 targetSize = liveSize + hs->minFree;
1371 }
1372 return targetSize;
1373 }
1374
1375 /*
1376 * Given the current contents of the active heap, increase the allowed
1377 * heap footprint to match the target utilization ratio. This
1378 * should only be called immediately after a full mark/sweep.
1379 */
dvmHeapSourceGrowForUtilization()1380 void dvmHeapSourceGrowForUtilization()
1381 {
1382 HS_BOILERPLATE();
1383
1384 HeapSource *hs = gHs;
1385 Heap* heap = hs2heap(hs);
1386
1387 /* Use the current target utilization ratio to determine the
1388 * ideal heap size based on the size of the live set.
1389 * Note that only the active heap plays any part in this.
1390 *
1391 * Avoid letting the old heaps influence the target free size,
1392 * because they may be full of objects that aren't actually
1393 * in the working set. Just look at the allocated size of
1394 * the current heap.
1395 */
1396 size_t currentHeapUsed = heap->bytesAllocated;
1397 size_t targetHeapSize = getUtilizationTarget(hs, currentHeapUsed);
1398
1399 /* The ideal size includes the old heaps; add overhead so that
1400 * it can be immediately subtracted again in setIdealFootprint().
1401 * If the target heap size would exceed the max, setIdealFootprint()
1402 * will clamp it to a legal value.
1403 */
1404 size_t overhead = getSoftFootprint(false);
1405 setIdealFootprint(targetHeapSize + overhead);
1406
1407 size_t freeBytes = getAllocLimit(hs);
1408 if (freeBytes < CONCURRENT_MIN_FREE) {
1409 /* Not enough free memory to allow a concurrent GC. */
1410 heap->concurrentStartBytes = SIZE_MAX;
1411 } else {
1412 heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1413 }
1414
1415 /* Mark that we need to run finalizers and update the native watermarks
1416 * next time we attempt to register a native allocation.
1417 */
1418 gHs->nativeNeedToRunFinalization = true;
1419 }
1420
1421 /*
1422 * Return free pages to the system.
1423 * TODO: move this somewhere else, especially the native heap part.
1424 */
releasePagesInRange(void * start,void * end,size_t used_bytes,void * releasedBytes)1425 static void releasePagesInRange(void* start, void* end, size_t used_bytes,
1426 void* releasedBytes)
1427 {
1428 if (used_bytes == 0) {
1429 /*
1430 * We have a range of memory we can try to madvise()
1431 * back. Linux requires that the madvise() start address is
1432 * page-aligned. We also align the end address.
1433 */
1434 start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1435 end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1436 if (end > start) {
1437 size_t length = (char *)end - (char *)start;
1438 madvise(start, length, MADV_DONTNEED);
1439 *(size_t *)releasedBytes += length;
1440 }
1441 }
1442 }
1443
1444 /*
1445 * Return unused memory to the system if possible.
1446 */
trimHeaps()1447 static void trimHeaps()
1448 {
1449 HS_BOILERPLATE();
1450
1451 HeapSource *hs = gHs;
1452 size_t heapBytes = 0;
1453 for (size_t i = 0; i < hs->numHeaps; i++) {
1454 Heap *heap = &hs->heaps[i];
1455
1456 /* Return the wilderness chunk to the system. */
1457 mspace_trim(heap->msp, 0);
1458
1459 /* Return any whole free pages to the system. */
1460 mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
1461 }
1462
1463 /* Same for the native heap. */
1464 dlmalloc_trim(0);
1465 size_t nativeBytes = 0;
1466 dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
1467
1468 LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
1469 heapBytes, nativeBytes, heapBytes + nativeBytes);
1470 }
1471
1472 /*
1473 * Walks over the heap source and passes every allocated and
1474 * free chunk to the callback.
1475 */
dvmHeapSourceWalk(void (* callback)(void * start,void * end,size_t used_bytes,void * arg),void * arg)1476 void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
1477 size_t used_bytes, void* arg),
1478 void *arg)
1479 {
1480 HS_BOILERPLATE();
1481
1482 /* Walk the heaps from oldest to newest.
1483 */
1484 //TODO: do this in address order
1485 HeapSource *hs = gHs;
1486 for (size_t i = hs->numHeaps; i > 0; --i) {
1487 mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
1488 callback(NULL, NULL, 0, arg); // Indicate end of a heap.
1489 }
1490 }
1491
1492 /*
1493 * Gets the number of heaps available in the heap source.
1494 *
1495 * Caller must hold the heap lock, because gHs caches a field
1496 * in gDvm.gcHeap.
1497 */
dvmHeapSourceGetNumHeaps()1498 size_t dvmHeapSourceGetNumHeaps()
1499 {
1500 HS_BOILERPLATE();
1501
1502 return gHs->numHeaps;
1503 }
1504
dvmHeapSourceGetImmuneLimit(bool isPartial)1505 void *dvmHeapSourceGetImmuneLimit(bool isPartial)
1506 {
1507 if (isPartial) {
1508 return hs2heap(gHs)->base;
1509 } else {
1510 return NULL;
1511 }
1512 }
1513
dvmHeapSourceUpdateMaxNativeFootprint()1514 static void dvmHeapSourceUpdateMaxNativeFootprint()
1515 {
1516 /* Use the current target utilization ratio to determine the new native GC
1517 * watermarks.
1518 */
1519 size_t nativeSize = gHs->nativeBytesAllocated;
1520 size_t targetSize =
1521 (nativeSize / gHs->targetUtilization) * HEAP_UTILIZATION_MAX;
1522
1523 if (targetSize > nativeSize + gHs->maxFree) {
1524 targetSize = nativeSize + gHs->maxFree;
1525 } else if (targetSize < nativeSize + gHs->minFree) {
1526 targetSize = nativeSize + gHs->minFree;
1527 }
1528 gHs->nativeFootprintGCWatermark = targetSize;
1529 gHs->nativeFootprintLimit = 2 * targetSize - nativeSize;
1530 }
1531
dvmHeapSourceRegisterNativeAllocation(int bytes)1532 void dvmHeapSourceRegisterNativeAllocation(int bytes)
1533 {
1534 /* If we have just done a GC, ensure that the finalizers are done and update
1535 * the native watermarks.
1536 */
1537 if (gHs->nativeNeedToRunFinalization) {
1538 dvmRunFinalization();
1539 dvmHeapSourceUpdateMaxNativeFootprint();
1540 gHs->nativeNeedToRunFinalization = false;
1541 }
1542
1543 android_atomic_add(bytes, &gHs->nativeBytesAllocated);
1544
1545 if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
1546 /* The second watermark is higher than the gc watermark. If you hit
1547 * this it means you are allocating native objects faster than the GC
1548 * can keep up with. If this occurs, we do a GC for alloc.
1549 */
1550 if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
1551 Thread* self = dvmThreadSelf();
1552 dvmRunFinalization();
1553 if (dvmCheckException(self)) {
1554 return;
1555 }
1556 dvmLockHeap();
1557 bool waited = dvmWaitForConcurrentGcToComplete();
1558 dvmUnlockHeap();
1559 if (waited) {
1560 // Just finished a GC, attempt to run finalizers.
1561 dvmRunFinalization();
1562 if (dvmCheckException(self)) {
1563 return;
1564 }
1565 }
1566
1567 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
1568 if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
1569 dvmLockHeap();
1570 dvmWaitForConcurrentGcToComplete();
1571 dvmCollectGarbageInternal(GC_FOR_MALLOC);
1572 dvmUnlockHeap();
1573 dvmRunFinalization();
1574 gHs->nativeNeedToRunFinalization = false;
1575 if (dvmCheckException(self)) {
1576 return;
1577 }
1578 }
1579 /* We have just run finalizers, update the native watermark since
1580 * it is very likely that finalizers released native managed
1581 * allocations.
1582 */
1583 dvmHeapSourceUpdateMaxNativeFootprint();
1584 } else {
1585 dvmSignalCond(&gHs->gcThreadCond);
1586 }
1587 }
1588 }
1589
1590 /*
1591 * Called from VMRuntime.registerNativeFree.
1592 */
dvmHeapSourceRegisterNativeFree(int bytes)1593 void dvmHeapSourceRegisterNativeFree(int bytes)
1594 {
1595 int expected_size, new_size;
1596 do {
1597 expected_size = gHs->nativeBytesAllocated;
1598 new_size = expected_size - bytes;
1599 if (new_size < 0) {
1600 break;
1601 }
1602 } while (android_atomic_cas(expected_size, new_size,
1603 &gHs->nativeBytesAllocated));
1604 }
1605