• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cutils/mspace.h>
18 #include <limits.h>     // for INT_MAX
19 #include <sys/mman.h>
20 #include <errno.h>
21 
22 #include "Dalvik.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/HeapSource.h"
26 #include "alloc/HeapBitmap.h"
27 
28 // TODO: find a real header file for these.
29 extern int dlmalloc_trim(size_t);
30 extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
31 
32 static void snapIdealFootprint(void);
33 static void setIdealFootprint(size_t max);
34 
35 #ifndef PAGE_SIZE
36 #define PAGE_SIZE 4096
37 #endif
38 #define ALIGN_UP_TO_PAGE_SIZE(p) \
39     (((size_t)(p) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))
40 #define ALIGN_DOWN_TO_PAGE_SIZE(p) \
41     ((size_t)(p) & ~(PAGE_SIZE - 1))
42 
43 #define HEAP_UTILIZATION_MAX        1024
44 #define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
45 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
46 #define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
47 
48 #define HS_BOILERPLATE() \
49     do { \
50         assert(gDvm.gcHeap != NULL); \
51         assert(gDvm.gcHeap->heapSource != NULL); \
52         assert(gHs == gDvm.gcHeap->heapSource); \
53     } while (0)
54 
55 #define DEBUG_HEAP_SOURCE 0
56 #if DEBUG_HEAP_SOURCE
57 #define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
58 #else
59 #define HSTRACE(...)  /**/
60 #endif
61 
62 /*
63 =======================================================
64 =======================================================
65 =======================================================
66 
67 How will this be used?
68 allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
69     - if allocating in large doesn't work, try allocating from small
70 Heap.c will use HeapSource.h; HeapSource.c will do the right thing
71     between small and large
72     - some operations should be abstracted; put in a structure
73 
74 How do we manage the size trade-offs?
75 - keep mspace max footprint clamped to actual footprint
76 - if small-alloc returns null, adjust large vs. small ratio
77     - give small all available slack and retry
78     - success or fail, snap back to actual footprint and give rest to large
79 
80 managed as "small actual" + "large actual" + "delta to allowed total footprint"
81 - when allocating from one source or the other, give the delta to the
82     active source, but snap back afterwards
83 - that may not work so great for a gc heap, because small will always consume.
84     - but we need to use the memory, and the current max is the amount we
85       need to fill before a GC.
86 
87 Find a way to permanently steal pages from the middle of the heap
88     - segment tricks?
89 
90 Allocate String and char[] in a separate heap?
91 
92 Maybe avoid growing small heap, even if there's slack?  Look at
93 live ratio of small heap after a gc; scale it based on that.
94 
95 =======================================================
96 =======================================================
97 =======================================================
98 */
99 
100 typedef struct {
101     /* The mspace to allocate from.
102      */
103     mspace *msp;
104 
105     /* The bitmap that keeps track of where objects are in the heap.
106      */
107     HeapBitmap objectBitmap;
108 
109     /* The largest size that this heap is allowed to grow to.
110      */
111     size_t absoluteMaxSize;
112 
113     /* Number of bytes allocated from this mspace for objects,
114      * including any overhead.  This value is NOT exact, and
115      * should only be used as an input for certain heuristics.
116      */
117     size_t bytesAllocated;
118 
119     /* Number of objects currently allocated from this mspace.
120      */
121     size_t objectsAllocated;
122 } Heap;
123 
124 struct HeapSource {
125     /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
126      */
127     size_t targetUtilization;
128 
129     /* Requested minimum heap size, or zero if there is no minimum.
130      */
131     size_t minimumSize;
132 
133     /* The starting heap size.
134      */
135     size_t startSize;
136 
137     /* The largest that the heap source as a whole is allowed to grow.
138      */
139     size_t absoluteMaxSize;
140 
141     /* The desired max size of the heap source as a whole.
142      */
143     size_t idealSize;
144 
145     /* The maximum number of bytes allowed to be allocated from the
146      * active heap before a GC is forced.  This is used to "shrink" the
147      * heap in lieu of actual compaction.
148      */
149     size_t softLimit;
150 
151     /* The heaps; heaps[0] is always the active heap,
152      * which new objects should be allocated from.
153      */
154     Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
155 
156     /* The current number of heaps.
157      */
158     size_t numHeaps;
159 
160     /* External allocation count.
161      */
162     size_t externalBytesAllocated;
163 
164     /* The maximum number of external bytes that may be allocated.
165      */
166     size_t externalLimit;
167 
168     /* True if zygote mode was active when the HeapSource was created.
169      */
170     bool sawZygote;
171 };
172 
173 #define hs2heap(hs_) (&((hs_)->heaps[0]))
174 
175 /*
176  * Returns true iff a soft limit is in effect for the active heap.
177  */
178 static inline bool
softLimited(const HeapSource * hs)179 softLimited(const HeapSource *hs)
180 {
181     /* softLimit will be either INT_MAX or the limit for the
182      * active mspace.  idealSize can be greater than softLimit
183      * if there is more than one heap.  If there is only one
184      * heap, a non-INT_MAX softLimit should always be the same
185      * as idealSize.
186      */
187     return hs->softLimit <= hs->idealSize;
188 }
189 
190 /*
191  * Returns the current footprint of all heaps.  If includeActive
192  * is false, don't count the heap at index 0.
193  */
194 static inline size_t
oldHeapOverhead(const HeapSource * hs,bool includeActive)195 oldHeapOverhead(const HeapSource *hs, bool includeActive)
196 {
197     size_t footprint = 0;
198     size_t i;
199 
200     if (includeActive) {
201         i = 0;
202     } else {
203         i = 1;
204     }
205     for (/* i = i */; i < hs->numHeaps; i++) {
206 //TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
207         footprint += mspace_footprint(hs->heaps[i].msp);
208     }
209     return footprint;
210 }
211 
212 /*
213  * Returns the heap that <ptr> could have come from, or NULL
214  * if it could not have come from any heap.
215  */
216 static inline Heap *
ptr2heap(const HeapSource * hs,const void * ptr)217 ptr2heap(const HeapSource *hs, const void *ptr)
218 {
219     const size_t numHeaps = hs->numHeaps;
220     size_t i;
221 
222 //TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
223     if (ptr != NULL) {
224         for (i = 0; i < numHeaps; i++) {
225             const Heap *const heap = &hs->heaps[i];
226 
227             if (dvmHeapBitmapMayContainObject(&heap->objectBitmap, ptr)) {
228                 return (Heap *)heap;
229             }
230         }
231     }
232     return NULL;
233 }
234 
235 /*
236  * Functions to update heapSource->bytesAllocated when an object
237  * is allocated or freed.  mspace_usable_size() will give
238  * us a much more accurate picture of heap utilization than
239  * the requested byte sizes would.
240  *
241  * These aren't exact, and should not be treated as such.
242  */
243 static inline void
countAllocation(Heap * heap,const void * ptr,bool isObj)244 countAllocation(Heap *heap, const void *ptr, bool isObj)
245 {
246     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
247 
248     heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
249             HEAP_SOURCE_CHUNK_OVERHEAD;
250     if (isObj) {
251         heap->objectsAllocated++;
252         dvmHeapBitmapSetObjectBit(&heap->objectBitmap, ptr);
253     }
254 
255     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
256 }
257 
258 static inline void
countFree(Heap * heap,const void * ptr,bool isObj)259 countFree(Heap *heap, const void *ptr, bool isObj)
260 {
261     size_t delta;
262 
263     delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
264     assert(delta > 0);
265     if (delta < heap->bytesAllocated) {
266         heap->bytesAllocated -= delta;
267     } else {
268         heap->bytesAllocated = 0;
269     }
270     if (isObj) {
271         dvmHeapBitmapClearObjectBit(&heap->objectBitmap, ptr);
272         if (heap->objectsAllocated > 0) {
273             heap->objectsAllocated--;
274         }
275     }
276 }
277 
278 static HeapSource *gHs = NULL;
279 
280 static mspace *
createMspace(size_t startSize,size_t absoluteMaxSize,size_t id)281 createMspace(size_t startSize, size_t absoluteMaxSize, size_t id)
282 {
283     mspace *msp;
284     char name[PATH_MAX];
285 
286     /* If two ashmem regions have the same name, only one gets
287      * the name when looking at the maps.
288      */
289     snprintf(name, sizeof(name)-1, "dalvik-heap%s/%zd",
290         gDvm.zygote ? "/zygote" : "", id);
291     name[sizeof(name)-1] = '\0';
292 
293     /* Create an unlocked dlmalloc mspace to use as
294      * a small-object heap source.
295      *
296      * We start off reserving heapSizeStart/2 bytes but
297      * letting the heap grow to heapSizeStart.  This saves
298      * memory in the case where a process uses even less
299      * than the starting size.
300      */
301     LOGV_HEAP("Creating VM heap of size %u\n", startSize);
302     errno = 0;
303     msp = create_contiguous_mspace_with_name(startSize/2,
304             absoluteMaxSize, /*locked=*/false, name);
305     if (msp != NULL) {
306         /* Don't let the heap grow past the starting size without
307          * our intervention.
308          */
309         mspace_set_max_allowed_footprint(msp, startSize);
310     } else {
311         /* There's no guarantee that errno has meaning when the call
312          * fails, but it often does.
313          */
314         LOGE_HEAP("Can't create VM heap of size (%u,%u) (errno=%d)\n",
315             startSize/2, absoluteMaxSize, errno);
316     }
317 
318     return msp;
319 }
320 
321 static bool
addNewHeap(HeapSource * hs,mspace * msp,size_t mspAbsoluteMaxSize)322 addNewHeap(HeapSource *hs, mspace *msp, size_t mspAbsoluteMaxSize)
323 {
324     Heap heap;
325 
326     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
327         LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
328                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
329         dvmAbort();
330         return false;
331     }
332 
333     memset(&heap, 0, sizeof(heap));
334 
335     if (msp != NULL) {
336         heap.msp = msp;
337         heap.absoluteMaxSize = mspAbsoluteMaxSize;
338     } else {
339         size_t overhead;
340 
341         overhead = oldHeapOverhead(hs, true);
342         if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
343             LOGE_HEAP("No room to create any more heaps "
344                     "(%zd overhead, %zd max)\n",
345                     overhead, hs->absoluteMaxSize);
346             return false;
347         }
348         heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
349         heap.msp = createMspace(HEAP_MIN_FREE, heap.absoluteMaxSize,
350                 hs->numHeaps);
351         if (heap.msp == NULL) {
352             return false;
353         }
354     }
355     if (!dvmHeapBitmapInit(&heap.objectBitmap,
356                            (void *)ALIGN_DOWN_TO_PAGE_SIZE(heap.msp),
357                            heap.absoluteMaxSize,
358                            "objects"))
359     {
360         LOGE_HEAP("Can't create objectBitmap\n");
361         goto fail;
362     }
363 
364     /* Don't let the soon-to-be-old heap grow any further.
365      */
366     if (hs->numHeaps > 0) {
367         mspace *msp = hs->heaps[0].msp;
368         mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
369     }
370 
371     /* Put the new heap in the list, at heaps[0].
372      * Shift existing heaps down.
373      */
374     memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
375     hs->heaps[0] = heap;
376     hs->numHeaps++;
377 
378     return true;
379 
380 fail:
381     if (msp == NULL) {
382         destroy_contiguous_mspace(heap.msp);
383     }
384     return false;
385 }
386 
387 /*
388  * Initializes the heap source; must be called before any other
389  * dvmHeapSource*() functions.  Returns a GcHeap structure
390  * allocated from the heap source.
391  */
392 GcHeap *
dvmHeapSourceStartup(size_t startSize,size_t absoluteMaxSize)393 dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
394 {
395     GcHeap *gcHeap;
396     HeapSource *hs;
397     Heap *heap;
398     mspace msp;
399 
400     assert(gHs == NULL);
401 
402     if (startSize > absoluteMaxSize) {
403         LOGE("Bad heap parameters (start=%d, max=%d)\n",
404            startSize, absoluteMaxSize);
405         return NULL;
406     }
407 
408     /* Create an unlocked dlmalloc mspace to use as
409      * the small object heap source.
410      */
411     msp = createMspace(startSize, absoluteMaxSize, 0);
412     if (msp == NULL) {
413         return false;
414     }
415 
416     /* Allocate a descriptor from the heap we just created.
417      */
418     gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
419     if (gcHeap == NULL) {
420         LOGE_HEAP("Can't allocate heap descriptor\n");
421         goto fail;
422     }
423     memset(gcHeap, 0, sizeof(*gcHeap));
424 
425     hs = mspace_malloc(msp, sizeof(*hs));
426     if (hs == NULL) {
427         LOGE_HEAP("Can't allocate heap source\n");
428         goto fail;
429     }
430     memset(hs, 0, sizeof(*hs));
431 
432     hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
433     hs->minimumSize = 0;
434     hs->startSize = startSize;
435     hs->absoluteMaxSize = absoluteMaxSize;
436     hs->idealSize = startSize;
437     hs->softLimit = INT_MAX;    // no soft limit at first
438     hs->numHeaps = 0;
439     hs->sawZygote = gDvm.zygote;
440     if (!addNewHeap(hs, msp, absoluteMaxSize)) {
441         LOGE_HEAP("Can't add initial heap\n");
442         goto fail;
443     }
444 
445     gcHeap->heapSource = hs;
446 
447     countAllocation(hs2heap(hs), gcHeap, false);
448     countAllocation(hs2heap(hs), hs, false);
449 
450     gHs = hs;
451     return gcHeap;
452 
453 fail:
454     destroy_contiguous_mspace(msp);
455     return NULL;
456 }
457 
458 /*
459  * If the HeapSource was created while in zygote mode, this
460  * will create a new heap for post-zygote allocations.
461  * Having a separate heap should maximize the number of pages
462  * that a given app_process shares with the zygote process.
463  */
464 bool
dvmHeapSourceStartupAfterZygote()465 dvmHeapSourceStartupAfterZygote()
466 {
467     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
468 
469     HS_BOILERPLATE();
470 
471     assert(!gDvm.zygote);
472 
473     if (hs->sawZygote) {
474         /* Create a new heap for post-zygote allocations.
475          */
476         return addNewHeap(hs, NULL, 0);
477     }
478     return true;
479 }
480 
481 /*
482  * This is called while in zygote mode, right before we fork() for the
483  * first time.  We create a heap for all future zygote process allocations,
484  * in an attempt to avoid touching pages in the zygote heap.  (This would
485  * probably be unnecessary if we had a compacting GC -- the source of our
486  * troubles is small allocations filling in the gaps from larger ones.)
487  */
488 bool
dvmHeapSourceStartupBeforeFork()489 dvmHeapSourceStartupBeforeFork()
490 {
491     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
492 
493     HS_BOILERPLATE();
494 
495     assert(gDvm.zygote);
496 
497     if (!gDvm.newZygoteHeapAllocated) {
498         /* Create a new heap for post-fork zygote allocations.  We only
499          * try once, even if it fails.
500          */
501         LOGI("Splitting out new zygote heap\n");
502         gDvm.newZygoteHeapAllocated = true;
503         return addNewHeap(hs, NULL, 0);
504     }
505     return true;
506 }
507 
508 /*
509  * Tears down the heap source and frees any resources associated with it.
510  */
511 void
dvmHeapSourceShutdown(GcHeap * gcHeap)512 dvmHeapSourceShutdown(GcHeap *gcHeap)
513 {
514     if (gcHeap != NULL && gcHeap->heapSource != NULL) {
515         HeapSource *hs;
516         size_t numHeaps;
517         size_t i;
518 
519         hs = gcHeap->heapSource;
520         gHs = NULL;
521 
522         /* Cache numHeaps because hs will be invalid after the last
523          * heap is freed.
524          */
525         numHeaps = hs->numHeaps;
526 
527         for (i = 0; i < numHeaps; i++) {
528             Heap *heap = &hs->heaps[i];
529 
530             dvmHeapBitmapDelete(&heap->objectBitmap);
531             destroy_contiguous_mspace(heap->msp);
532         }
533         /* The last heap is the original one, which contains the
534          * HeapSource object itself.
535          */
536     }
537 }
538 
539 /*
540  * Returns the requested value. If the per-heap stats are requested, fill
541  * them as well.
542  *
543  * Caller must hold the heap lock.
544  */
545 size_t
dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)546 dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
547                       size_t arrayLen)
548 {
549     HeapSource *hs = gHs;
550     size_t value = 0;
551     size_t total = 0;
552     size_t i;
553 
554     HS_BOILERPLATE();
555 
556     switch (spec) {
557     case HS_EXTERNAL_BYTES_ALLOCATED:
558         return hs->externalBytesAllocated;
559     case HS_EXTERNAL_LIMIT:
560         return hs->externalLimit;
561     default:
562         // look at all heaps.
563         ;
564     }
565 
566     assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
567     for (i = 0; i < hs->numHeaps; i++) {
568         Heap *const heap = &hs->heaps[i];
569 
570         switch (spec) {
571         case HS_FOOTPRINT:
572             value = mspace_footprint(heap->msp);
573             break;
574         case HS_ALLOWED_FOOTPRINT:
575             value = mspace_max_allowed_footprint(heap->msp);
576             break;
577         case HS_BYTES_ALLOCATED:
578             value = heap->bytesAllocated;
579             break;
580         case HS_OBJECTS_ALLOCATED:
581             value = heap->objectsAllocated;
582             break;
583         default:
584             // quiet gcc
585             break;
586         }
587         if (perHeapStats) {
588             perHeapStats[i] = value;
589         }
590         total += value;
591     }
592     return total;
593 }
594 
595 /*
596  * Writes shallow copies of the currently-used bitmaps into outBitmaps,
597  * returning the number of bitmaps written.  Returns <0 if the array
598  * was not long enough.
599  */
600 ssize_t
dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[],size_t maxBitmaps)601 dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[], size_t maxBitmaps)
602 {
603     HeapSource *hs = gHs;
604 
605     HS_BOILERPLATE();
606 
607     if (maxBitmaps >= hs->numHeaps) {
608         size_t i;
609 
610         for (i = 0; i < hs->numHeaps; i++) {
611             outBitmaps[i] = hs->heaps[i].objectBitmap;
612         }
613         return i;
614     }
615     return -1;
616 }
617 
618 /*
619  * Replaces the object location HeapBitmaps with the elements of
620  * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
621  * with shallow copies of the old bitmaps.
622  *
623  * Returns false if the number of bitmaps doesn't match the number
624  * of heaps.
625  */
626 bool
dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[],size_t nBitmaps)627 dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[], size_t nBitmaps)
628 {
629     HeapSource *hs = gHs;
630     size_t i;
631 
632     HS_BOILERPLATE();
633 
634     if (nBitmaps != hs->numHeaps) {
635         return false;
636     }
637 
638     for (i = 0; i < hs->numHeaps; i++) {
639         Heap *heap = &hs->heaps[i];
640         HeapBitmap swap;
641 
642         swap = heap->objectBitmap;
643         heap->objectBitmap = objectBitmaps[i];
644         objectBitmaps[i] = swap;
645     }
646     return true;
647 }
648 
649 /*
650  * Allocates <n> bytes of zeroed data.
651  */
652 void *
dvmHeapSourceAlloc(size_t n)653 dvmHeapSourceAlloc(size_t n)
654 {
655     HeapSource *hs = gHs;
656     Heap *heap;
657     void *ptr;
658 
659     HS_BOILERPLATE();
660     heap = hs2heap(hs);
661 
662     if (heap->bytesAllocated + n <= hs->softLimit) {
663 // TODO: allocate large blocks (>64k?) as separate mmap regions so that
664 //       they don't increase the high-water mark when they're freed.
665 // TODO: zero out large objects using madvise
666         ptr = mspace_calloc(heap->msp, 1, n);
667         if (ptr != NULL) {
668             countAllocation(heap, ptr, true);
669         }
670     } else {
671         /* This allocation would push us over the soft limit;
672          * act as if the heap is full.
673          */
674         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
675                 FRACTIONAL_MB(hs->softLimit), n);
676         ptr = NULL;
677     }
678     return ptr;
679 }
680 
681 /* Remove any hard limits, try to allocate, and shrink back down.
682  * Last resort when trying to allocate an object.
683  */
684 static void *
heapAllocAndGrow(HeapSource * hs,Heap * heap,size_t n)685 heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
686 {
687     void *ptr;
688     size_t max;
689 
690     /* Grow as much as possible, but don't let the real footprint
691      * plus external allocations go over the absolute max.
692      */
693     max = heap->absoluteMaxSize;
694     if (max > hs->externalBytesAllocated) {
695         max -= hs->externalBytesAllocated;
696 
697         mspace_set_max_allowed_footprint(heap->msp, max);
698         ptr = dvmHeapSourceAlloc(n);
699 
700         /* Shrink back down as small as possible.  Our caller may
701          * readjust max_allowed to a more appropriate value.
702          */
703         mspace_set_max_allowed_footprint(heap->msp,
704                 mspace_footprint(heap->msp));
705     } else {
706         ptr = NULL;
707     }
708 
709     return ptr;
710 }
711 
712 /*
713  * Allocates <n> bytes of zeroed data, growing as much as possible
714  * if necessary.
715  */
716 void *
dvmHeapSourceAllocAndGrow(size_t n)717 dvmHeapSourceAllocAndGrow(size_t n)
718 {
719     HeapSource *hs = gHs;
720     Heap *heap;
721     void *ptr;
722     size_t oldIdealSize;
723 
724     HS_BOILERPLATE();
725     heap = hs2heap(hs);
726 
727     ptr = dvmHeapSourceAlloc(n);
728     if (ptr != NULL) {
729         return ptr;
730     }
731 
732     oldIdealSize = hs->idealSize;
733     if (softLimited(hs)) {
734         /* We're soft-limited.  Try removing the soft limit to
735          * see if we can allocate without actually growing.
736          */
737         hs->softLimit = INT_MAX;
738         ptr = dvmHeapSourceAlloc(n);
739         if (ptr != NULL) {
740             /* Removing the soft limit worked;  fix things up to
741              * reflect the new effective ideal size.
742              */
743             snapIdealFootprint();
744             return ptr;
745         }
746         // softLimit intentionally left at INT_MAX.
747     }
748 
749     /* We're not soft-limited.  Grow the heap to satisfy the request.
750      * If this call fails, no footprints will have changed.
751      */
752     ptr = heapAllocAndGrow(hs, heap, n);
753     if (ptr != NULL) {
754         /* The allocation succeeded.  Fix up the ideal size to
755          * reflect any footprint modifications that had to happen.
756          */
757         snapIdealFootprint();
758     } else {
759         /* We just couldn't do it.  Restore the original ideal size,
760          * fixing up softLimit if necessary.
761          */
762         setIdealFootprint(oldIdealSize);
763     }
764     return ptr;
765 }
766 
767 /*
768  * Frees the memory pointed to by <ptr>, which may be NULL.
769  */
770 void
dvmHeapSourceFree(void * ptr)771 dvmHeapSourceFree(void *ptr)
772 {
773     Heap *heap;
774 
775     HS_BOILERPLATE();
776 
777     heap = ptr2heap(gHs, ptr);
778     if (heap != NULL) {
779         countFree(heap, ptr, true);
780         /* Only free objects that are in the active heap.
781          * Touching old heaps would pull pages into this process.
782          */
783         if (heap == gHs->heaps) {
784             mspace_free(heap->msp, ptr);
785         }
786     }
787 }
788 
789 /*
790  * Frees the first numPtrs objects in the ptrs list. The list must
791  * contain addresses all in the same mspace, and must be in increasing
792  * order. This implies that there are no duplicates, and no entries
793  * are NULL.
794  */
795 void
dvmHeapSourceFreeList(size_t numPtrs,void ** ptrs)796 dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
797 {
798     Heap *heap;
799 
800     HS_BOILERPLATE();
801 
802     if (numPtrs == 0) {
803         return;
804     }
805 
806     assert(ptrs != NULL);
807     assert(*ptrs != NULL);
808     heap = ptr2heap(gHs, *ptrs);
809     if (heap != NULL) {
810         mspace *msp = heap->msp;
811         // Calling mspace_free on shared heaps disrupts sharing too
812         // much. For heap[0] -- the 'active heap' -- we call
813         // mspace_free, but on the other heaps we only do some
814         // accounting.
815         if (heap == gHs->heaps) {
816             // mspace_merge_objects takes two allocated objects, and
817             // if the second immediately follows the first, will merge
818             // them, returning a larger object occupying the same
819             // memory. This is a local operation, and doesn't require
820             // dlmalloc to manipulate any freelists. It's pretty
821             // inexpensive compared to free().
822 
823             // ptrs is an array of objects all in memory order, and if
824             // client code has been allocating lots of short-lived
825             // objects, this is likely to contain runs of objects all
826             // now garbage, and thus highly amenable to this optimization.
827 
828             // Unroll the 0th iteration around the loop below,
829             // countFree ptrs[0] and initializing merged.
830             assert(ptrs[0] != NULL);
831             assert(ptr2heap(gHs, ptrs[0]) == heap);
832             countFree(heap, ptrs[0], true);
833             void *merged = ptrs[0];
834 
835             size_t i;
836             for (i = 1; i < numPtrs; i++) {
837                 assert(merged != NULL);
838                 assert(ptrs[i] != NULL);
839                 assert((intptr_t)merged < (intptr_t)ptrs[i]);
840                 assert(ptr2heap(gHs, ptrs[i]) == heap);
841                 countFree(heap, ptrs[i], true);
842                 // Try to merge. If it works, merged now includes the
843                 // memory of ptrs[i]. If it doesn't, free merged, and
844                 // see if ptrs[i] starts a new run of adjacent
845                 // objects to merge.
846                 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
847                     mspace_free(msp, merged);
848                     merged = ptrs[i];
849                 }
850             }
851             assert(merged != NULL);
852             mspace_free(msp, merged);
853         } else {
854             // This is not an 'active heap'. Only do the accounting.
855             size_t i;
856             for (i = 0; i < numPtrs; i++) {
857                 assert(ptrs[i] != NULL);
858                 assert(ptr2heap(gHs, ptrs[i]) == heap);
859                 countFree(heap, ptrs[i], true);
860             }
861         }
862     }
863 }
864 
865 /*
866  * Returns true iff <ptr> was allocated from the heap source.
867  */
868 bool
dvmHeapSourceContains(const void * ptr)869 dvmHeapSourceContains(const void *ptr)
870 {
871     Heap *heap;
872 
873     HS_BOILERPLATE();
874 
875     heap = ptr2heap(gHs, ptr);
876     if (heap != NULL) {
877         return dvmHeapBitmapIsObjectBitSet(&heap->objectBitmap, ptr) != 0;
878     }
879     return false;
880 }
881 
882 /*
883  * Returns the value of the requested flag.
884  */
885 bool
dvmHeapSourceGetPtrFlag(const void * ptr,enum HeapSourcePtrFlag flag)886 dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
887 {
888     if (ptr == NULL) {
889         return false;
890     }
891 
892     if (flag == HS_CONTAINS) {
893         return dvmHeapSourceContains(ptr);
894     } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
895         HeapSource *hs = gHs;
896 
897         HS_BOILERPLATE();
898 
899         if (hs->sawZygote) {
900             Heap *heap;
901 
902             heap = ptr2heap(hs, ptr);
903             if (heap != NULL) {
904                 /* If the object is not in the active heap, we assume that
905                  * it was allocated as part of zygote.
906                  */
907                 return heap != hs->heaps;
908             }
909         }
910         /* The pointer is outside of any known heap, or we are not
911          * running in zygote mode.
912          */
913         return false;
914     }
915 
916     return false;
917 }
918 
919 /*
920  * Returns the number of usable bytes in an allocated chunk; the size
921  * may be larger than the size passed to dvmHeapSourceAlloc().
922  */
923 size_t
dvmHeapSourceChunkSize(const void * ptr)924 dvmHeapSourceChunkSize(const void *ptr)
925 {
926     Heap *heap;
927 
928     HS_BOILERPLATE();
929 
930     heap = ptr2heap(gHs, ptr);
931     if (heap != NULL) {
932         return mspace_usable_size(heap->msp, ptr);
933     }
934     return 0;
935 }
936 
937 /*
938  * Returns the number of bytes that the heap source has allocated
939  * from the system using sbrk/mmap, etc.
940  *
941  * Caller must hold the heap lock.
942  */
943 size_t
dvmHeapSourceFootprint()944 dvmHeapSourceFootprint()
945 {
946     HS_BOILERPLATE();
947 
948 //TODO: include size of bitmaps?
949     return oldHeapOverhead(gHs, true);
950 }
951 
952 /*
953  * Return the real bytes used by old heaps and external memory
954  * plus the soft usage of the current heap.  When a soft limit
955  * is in effect, this is effectively what it's compared against
956  * (though, in practice, it only looks at the current heap).
957  */
958 static size_t
getSoftFootprint(bool includeActive)959 getSoftFootprint(bool includeActive)
960 {
961     HeapSource *hs = gHs;
962     size_t ret;
963 
964     HS_BOILERPLATE();
965 
966     ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
967     if (includeActive) {
968         ret += hs->heaps[0].bytesAllocated;
969     }
970 
971     return ret;
972 }
973 
974 /*
975  * Gets the maximum number of bytes that the heap source is allowed
976  * to allocate from the system.
977  */
978 size_t
dvmHeapSourceGetIdealFootprint()979 dvmHeapSourceGetIdealFootprint()
980 {
981     HeapSource *hs = gHs;
982 
983     HS_BOILERPLATE();
984 
985     return hs->idealSize;
986 }
987 
988 /*
989  * Sets the soft limit, handling any necessary changes to the allowed
990  * footprint of the active heap.
991  */
992 static void
setSoftLimit(HeapSource * hs,size_t softLimit)993 setSoftLimit(HeapSource *hs, size_t softLimit)
994 {
995     /* Compare against the actual footprint, rather than the
996      * max_allowed, because the heap may not have grown all the
997      * way to the allowed size yet.
998      */
999     mspace *msp = hs->heaps[0].msp;
1000     size_t currentHeapSize = mspace_footprint(msp);
1001     if (softLimit < currentHeapSize) {
1002         /* Don't let the heap grow any more, and impose a soft limit.
1003          */
1004         mspace_set_max_allowed_footprint(msp, currentHeapSize);
1005         hs->softLimit = softLimit;
1006     } else {
1007         /* Let the heap grow to the requested max, and remove any
1008          * soft limit, if set.
1009          */
1010         mspace_set_max_allowed_footprint(msp, softLimit);
1011         hs->softLimit = INT_MAX;
1012     }
1013 }
1014 
1015 /*
1016  * Sets the maximum number of bytes that the heap source is allowed
1017  * to allocate from the system.  Clamps to the appropriate maximum
1018  * value.
1019  */
1020 static void
setIdealFootprint(size_t max)1021 setIdealFootprint(size_t max)
1022 {
1023     HeapSource *hs = gHs;
1024 #if DEBUG_HEAP_SOURCE
1025     HeapSource oldHs = *hs;
1026     mspace *msp = hs->heaps[0].msp;
1027     size_t oldAllowedFootprint =
1028             mspace_max_allowed_footprint(msp);
1029 #endif
1030 
1031     HS_BOILERPLATE();
1032 
1033     if (max > hs->absoluteMaxSize) {
1034         LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1035                 FRACTIONAL_MB(max),
1036                 FRACTIONAL_MB(hs->absoluteMaxSize));
1037         max = hs->absoluteMaxSize;
1038     } else if (max < hs->minimumSize) {
1039         max = hs->minimumSize;
1040     }
1041 
1042     /* Convert max into a size that applies to the active heap.
1043      * Old heaps and external allocations will count against the ideal size.
1044      */
1045     size_t overhead = getSoftFootprint(false);
1046     size_t activeMax;
1047     if (overhead < max) {
1048         activeMax = max - overhead;
1049     } else {
1050         activeMax = 0;
1051     }
1052 
1053     setSoftLimit(hs, activeMax);
1054     hs->idealSize = max;
1055 
1056     HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1057             "ext %zd\n",
1058             oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1059             oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1060             oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1061             mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
1062             hs->externalBytesAllocated);
1063 
1064 }
1065 
1066 /*
1067  * Make the ideal footprint equal to the current footprint.
1068  */
1069 static void
snapIdealFootprint()1070 snapIdealFootprint()
1071 {
1072     HeapSource *hs = gHs;
1073 
1074     HS_BOILERPLATE();
1075 
1076     setIdealFootprint(getSoftFootprint(true));
1077 }
1078 
1079 /*
1080  * Gets the current ideal heap utilization, represented as a number
1081  * between zero and one.
1082  */
dvmGetTargetHeapUtilization()1083 float dvmGetTargetHeapUtilization()
1084 {
1085     HeapSource *hs = gHs;
1086 
1087     HS_BOILERPLATE();
1088 
1089     return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1090 }
1091 
1092 /*
1093  * Sets the new ideal heap utilization, represented as a number
1094  * between zero and one.
1095  */
dvmSetTargetHeapUtilization(float newTarget)1096 void dvmSetTargetHeapUtilization(float newTarget)
1097 {
1098     HeapSource *hs = gHs;
1099     size_t newUtilization;
1100 
1101     HS_BOILERPLATE();
1102 
1103     /* Clamp it to a reasonable range.
1104      */
1105     // TODO: This may need some tuning.
1106     if (newTarget < 0.2) {
1107         newTarget = 0.2;
1108     } else if (newTarget > 0.8) {
1109         newTarget = 0.8;
1110     }
1111 
1112     hs->targetUtilization =
1113             (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1114     LOGV("Set heap target utilization to %zd/%d (%f)\n",
1115             hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1116 }
1117 
1118 /*
1119  * If set is true, sets the new minimum heap size to size; always
1120  * returns the current (or previous) size.  If size is negative,
1121  * removes the current minimum constraint (if present).
1122  */
1123 size_t
dvmMinimumHeapSize(size_t size,bool set)1124 dvmMinimumHeapSize(size_t size, bool set)
1125 {
1126     HeapSource *hs = gHs;
1127     size_t oldMinimumSize;
1128 
1129     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1130      * heap lock if we're going to look at it.  We also need the
1131      * lock for the call to setIdealFootprint().
1132      */
1133     dvmLockHeap();
1134 
1135     HS_BOILERPLATE();
1136 
1137     oldMinimumSize = hs->minimumSize;
1138 
1139     if (set) {
1140         /* Don't worry about external allocations right now.
1141          * setIdealFootprint() will take them into account when
1142          * minimumSize is used, and it's better to hold onto the
1143          * intended minimumSize than to clamp it arbitrarily based
1144          * on the current allocations.
1145          */
1146         if (size > hs->absoluteMaxSize) {
1147             size = hs->absoluteMaxSize;
1148         }
1149         hs->minimumSize = size;
1150         if (size > hs->idealSize) {
1151             /* Force a snap to the minimum value, which we just set
1152              * and which setIdealFootprint() will take into consideration.
1153              */
1154             setIdealFootprint(hs->idealSize);
1155         }
1156         /* Otherwise we'll just keep it in mind the next time
1157          * setIdealFootprint() is called.
1158          */
1159     }
1160 
1161     dvmUnlockHeap();
1162 
1163     return oldMinimumSize;
1164 }
1165 
1166 /*
1167  * Given the size of a live set, returns the ideal heap size given
1168  * the current target utilization and MIN/MAX values.
1169  *
1170  * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1171  */
1172 static size_t
getUtilizationTarget(const HeapSource * hs,size_t liveSize,size_t targetUtilization)1173 getUtilizationTarget(const HeapSource *hs,
1174         size_t liveSize, size_t targetUtilization)
1175 {
1176     size_t targetSize;
1177 
1178     /* Use the current target utilization ratio to determine the
1179      * ideal heap size based on the size of the live set.
1180      */
1181     targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1182 
1183     /* Cap the amount of free space, though, so we don't end up
1184      * with, e.g., 8MB of free space when the live set size hits 8MB.
1185      */
1186     if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1187         targetSize = liveSize + HEAP_IDEAL_FREE;
1188     } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1189         targetSize = liveSize + HEAP_MIN_FREE;
1190     }
1191     return targetSize;
1192 }
1193 
1194 /*
1195  * Given the current contents of the active heap, increase the allowed
1196  * heap footprint to match the target utilization ratio.  This
1197  * should only be called immediately after a full mark/sweep.
1198  */
dvmHeapSourceGrowForUtilization()1199 void dvmHeapSourceGrowForUtilization()
1200 {
1201     HeapSource *hs = gHs;
1202     Heap *heap;
1203     size_t targetHeapSize;
1204     size_t currentHeapUsed;
1205     size_t oldIdealSize;
1206     size_t newHeapMax;
1207     size_t overhead;
1208 
1209     HS_BOILERPLATE();
1210     heap = hs2heap(hs);
1211 
1212     /* Use the current target utilization ratio to determine the
1213      * ideal heap size based on the size of the live set.
1214      * Note that only the active heap plays any part in this.
1215      *
1216      * Avoid letting the old heaps influence the target free size,
1217      * because they may be full of objects that aren't actually
1218      * in the working set.  Just look at the allocated size of
1219      * the current heap.
1220      */
1221     currentHeapUsed = heap->bytesAllocated;
1222 #define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
1223 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
1224     /* This is a hack to deal with the side-effects of moving
1225      * bitmap data out of the Dalvik heap.  Since the amount
1226      * of free space after a GC scales with the size of the
1227      * live set, many apps expected the large free space that
1228      * appeared along with megabytes' worth of bitmaps.  When
1229      * the bitmaps were removed, the free size shrank significantly,
1230      * and apps started GCing constantly.  This makes it so the
1231      * post-GC free space is the same size it would have been
1232      * if the bitmaps were still in the Dalvik heap.
1233      */
1234     currentHeapUsed += hs->externalBytesAllocated;
1235 #endif
1236     targetHeapSize =
1237             getUtilizationTarget(hs, currentHeapUsed, hs->targetUtilization);
1238 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
1239     currentHeapUsed -= hs->externalBytesAllocated;
1240     targetHeapSize -= hs->externalBytesAllocated;
1241 #endif
1242 
1243     /* The ideal size includes the old heaps; add overhead so that
1244      * it can be immediately subtracted again in setIdealFootprint().
1245      * If the target heap size would exceed the max, setIdealFootprint()
1246      * will clamp it to a legal value.
1247      */
1248     overhead = getSoftFootprint(false);
1249     oldIdealSize = hs->idealSize;
1250     setIdealFootprint(targetHeapSize + overhead);
1251 
1252     newHeapMax = mspace_max_allowed_footprint(heap->msp);
1253     if (softLimited(hs)) {
1254         LOGD_HEAP("GC old usage %zd.%zd%%; now "
1255                 "%zd.%03zdMB used / %zd.%03zdMB soft max "
1256                 "(%zd.%03zdMB over, "
1257                 "%zd.%03zdMB ext, "
1258                 "%zd.%03zdMB real max)\n",
1259                 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1260                 FRACTIONAL_MB(currentHeapUsed),
1261                 FRACTIONAL_MB(hs->softLimit),
1262                 FRACTIONAL_MB(overhead),
1263                 FRACTIONAL_MB(hs->externalBytesAllocated),
1264                 FRACTIONAL_MB(newHeapMax));
1265     } else {
1266         LOGD_HEAP("GC old usage %zd.%zd%%; now "
1267                 "%zd.%03zdMB used / %zd.%03zdMB real max "
1268                 "(%zd.%03zdMB over, "
1269                 "%zd.%03zdMB ext)\n",
1270                 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1271                 FRACTIONAL_MB(currentHeapUsed),
1272                 FRACTIONAL_MB(newHeapMax),
1273                 FRACTIONAL_MB(overhead),
1274                 FRACTIONAL_MB(hs->externalBytesAllocated));
1275     }
1276 }
1277 
1278 /*
1279  * Return free pages to the system.
1280  * TODO: move this somewhere else, especially the native heap part.
1281  */
1282 
releasePagesInRange(void * start,void * end,void * nbytes)1283 static void releasePagesInRange(void *start, void *end, void *nbytes)
1284 {
1285     /* Linux requires that the madvise() start address is page-aligned.
1286     * We also align the end address.
1287     */
1288     start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1289     end = (void *)((size_t)end & ~(PAGE_SIZE - 1));
1290     if (start < end) {
1291         size_t length = (char *)end - (char *)start;
1292         madvise(start, length, MADV_DONTNEED);
1293         *(size_t *)nbytes += length;
1294     }
1295 }
1296 
1297 /*
1298  * Return unused memory to the system if possible.
1299  */
1300 void
dvmHeapSourceTrim(size_t bytesTrimmed[],size_t arrayLen)1301 dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1302 {
1303     HeapSource *hs = gHs;
1304     size_t nativeBytes, heapBytes;
1305     size_t i;
1306 
1307     HS_BOILERPLATE();
1308 
1309     assert(arrayLen >= hs->numHeaps);
1310 
1311     heapBytes = 0;
1312     for (i = 0; i < hs->numHeaps; i++) {
1313         Heap *heap = &hs->heaps[i];
1314 
1315         /* Return the wilderness chunk to the system.
1316          */
1317         mspace_trim(heap->msp, 0);
1318 
1319         /* Return any whole free pages to the system.
1320          */
1321         bytesTrimmed[i] = 0;
1322         mspace_walk_free_pages(heap->msp, releasePagesInRange,
1323                                &bytesTrimmed[i]);
1324         heapBytes += bytesTrimmed[i];
1325     }
1326 
1327     /* Same for the native heap.
1328      */
1329     dlmalloc_trim(0);
1330     nativeBytes = 0;
1331     dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1332 
1333     LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1334             heapBytes, nativeBytes, heapBytes + nativeBytes);
1335 }
1336 
1337 /*
1338  * Walks over the heap source and passes every allocated and
1339  * free chunk to the callback.
1340  */
1341 void
dvmHeapSourceWalk(void (* callback)(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg),void * arg)1342 dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1343                                       const void *userptr, size_t userlen,
1344                                       void *arg),
1345                   void *arg)
1346 {
1347     HeapSource *hs = gHs;
1348     size_t i;
1349 
1350     HS_BOILERPLATE();
1351 
1352     /* Walk the heaps from oldest to newest.
1353      */
1354 //TODO: do this in address order
1355     for (i = hs->numHeaps; i > 0; --i) {
1356         mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1357     }
1358 }
1359 
1360 /*
1361  * Gets the number of heaps available in the heap source.
1362  *
1363  * Caller must hold the heap lock, because gHs caches a field
1364  * in gDvm.gcHeap.
1365  */
1366 size_t
dvmHeapSourceGetNumHeaps()1367 dvmHeapSourceGetNumHeaps()
1368 {
1369     HeapSource *hs = gHs;
1370 
1371     HS_BOILERPLATE();
1372 
1373     return hs->numHeaps;
1374 }
1375 
1376 
1377 /*
1378  * External allocation tracking
1379  *
1380  * In some situations, memory outside of the heap is tied to the
1381  * lifetime of objects in the heap.  Since that memory is kept alive
1382  * by heap objects, it should provide memory pressure that can influence
1383  * GCs.
1384  */
1385 
1386 
1387 static bool
externalAllocPossible(const HeapSource * hs,size_t n)1388 externalAllocPossible(const HeapSource *hs, size_t n)
1389 {
1390     const Heap *heap;
1391     size_t currentHeapSize;
1392 
1393     /* Make sure that this allocation is even possible.
1394      * Don't let the external size plus the actual heap size
1395      * go over the absolute max.  This essentially treats
1396      * external allocations as part of the active heap.
1397      *
1398      * Note that this will fail "mysteriously" if there's
1399      * a small softLimit but a large heap footprint.
1400      */
1401     heap = hs2heap(hs);
1402     currentHeapSize = mspace_max_allowed_footprint(heap->msp);
1403     if (currentHeapSize + hs->externalBytesAllocated + n <=
1404             heap->absoluteMaxSize)
1405     {
1406         return true;
1407     }
1408     HSTRACE("externalAllocPossible(): "
1409             "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
1410             currentHeapSize, hs->externalBytesAllocated, n,
1411             heap->absoluteMaxSize,
1412             heap->absoluteMaxSize -
1413                     (currentHeapSize + hs->externalBytesAllocated));
1414     return false;
1415 }
1416 
1417 #define EXTERNAL_TARGET_UTILIZATION 820  // 80%
1418 
1419 /*
1420  * Tries to update the internal count of externally-allocated memory.
1421  * If there's enough room for that memory, returns true.  If not, returns
1422  * false and does not update the count.
1423  *
1424  * The caller must ensure externalAllocPossible(hs, n) == true.
1425  */
1426 static bool
externalAlloc(HeapSource * hs,size_t n,bool grow)1427 externalAlloc(HeapSource *hs, size_t n, bool grow)
1428 {
1429     Heap *heap;
1430     size_t currentHeapSize;
1431     size_t newTotal;
1432     size_t max;
1433     bool grew;
1434 
1435     assert(hs->externalLimit >= hs->externalBytesAllocated);
1436 
1437     HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
1438     assert(externalAllocPossible(hs, n));  // The caller must ensure this.
1439 
1440     /* External allocations have their own "free space" that they
1441      * can allocate from without causing a GC.
1442      */
1443     if (hs->externalBytesAllocated + n <= hs->externalLimit) {
1444         hs->externalBytesAllocated += n;
1445 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1446         if (gDvm.allocProf.enabled) {
1447             Thread* self = dvmThreadSelf();
1448             gDvm.allocProf.externalAllocCount++;
1449             gDvm.allocProf.externalAllocSize += n;
1450             if (self != NULL) {
1451                 self->allocProf.externalAllocCount++;
1452                 self->allocProf.externalAllocSize += n;
1453             }
1454         }
1455 #endif
1456         return true;
1457     }
1458     if (!grow) {
1459         return false;
1460     }
1461 
1462     /* GROW */
1463     hs->externalBytesAllocated += n;
1464     hs->externalLimit = getUtilizationTarget(hs,
1465             hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1466     HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
1467     return true;
1468 }
1469 
1470 static void
gcForExternalAlloc(bool collectSoftReferences)1471 gcForExternalAlloc(bool collectSoftReferences)
1472 {
1473 #ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
1474     if (gDvm.allocProf.enabled) {
1475         Thread* self = dvmThreadSelf();
1476         gDvm.allocProf.gcCount++;
1477         if (self != NULL) {
1478             self->allocProf.gcCount++;
1479         }
1480     }
1481 #endif
1482     dvmCollectGarbageInternal(collectSoftReferences);
1483 }
1484 
1485 /*
1486  * Updates the internal count of externally-allocated memory.  If there's
1487  * enough room for that memory, returns true.  If not, returns false and
1488  * does not update the count.
1489  *
1490  * May cause a GC as a side-effect.
1491  */
1492 bool
dvmTrackExternalAllocation(size_t n)1493 dvmTrackExternalAllocation(size_t n)
1494 {
1495     HeapSource *hs = gHs;
1496     size_t overhead;
1497     bool ret = false;
1498 
1499     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1500      * heap lock if we're going to look at it.
1501      */
1502     dvmLockHeap();
1503 
1504     HS_BOILERPLATE();
1505     assert(hs->externalLimit >= hs->externalBytesAllocated);
1506 
1507     if (!externalAllocPossible(hs, n)) {
1508         LOGE_HEAP("%zd-byte external allocation "
1509                 "too large for this process.\n", n);
1510         goto out;
1511     }
1512 
1513     /* Try "allocating" using the existing "free space".
1514      */
1515     HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
1516             n, hs->externalBytesAllocated, hs->externalLimit);
1517     if (externalAlloc(hs, n, false)) {
1518         ret = true;
1519         goto out;
1520     }
1521 
1522     /* The "allocation" failed.  Free up some space by doing
1523      * a full garbage collection.  This may grow the heap source
1524      * if the live set is sufficiently large.
1525      */
1526     HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
1527     gcForExternalAlloc(false);  // don't collect SoftReferences
1528     if (externalAlloc(hs, n, false)) {
1529         ret = true;
1530         goto out;
1531     }
1532 
1533     /* Even that didn't work;  this is an exceptional state.
1534      * Try harder, growing the heap source if necessary.
1535      */
1536     HSTRACE("EXTERNAL alloc %zd: frag\n", n);
1537     ret = externalAlloc(hs, n, true);
1538     dvmHeapSizeChanged();
1539     if (ret) {
1540         goto out;
1541     }
1542 
1543     /* We couldn't even grow enough to satisfy the request.
1544      * Try one last GC, collecting SoftReferences this time.
1545      */
1546     HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
1547     gcForExternalAlloc(true);  // collect SoftReferences
1548     ret = externalAlloc(hs, n, true);
1549     dvmHeapSizeChanged();
1550     if (!ret) {
1551         LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
1552     }
1553 
1554 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1555     if (gDvm.allocProf.enabled) {
1556         Thread* self = dvmThreadSelf();
1557         gDvm.allocProf.failedExternalAllocCount++;
1558         gDvm.allocProf.failedExternalAllocSize += n;
1559         if (self != NULL) {
1560             self->allocProf.failedExternalAllocCount++;
1561             self->allocProf.failedExternalAllocSize += n;
1562         }
1563     }
1564 #endif
1565 
1566 out:
1567     dvmUnlockHeap();
1568 
1569     return ret;
1570 }
1571 
1572 /*
1573  * Reduces the internal count of externally-allocated memory.
1574  */
1575 void
dvmTrackExternalFree(size_t n)1576 dvmTrackExternalFree(size_t n)
1577 {
1578     HeapSource *hs = gHs;
1579     size_t newIdealSize;
1580     size_t newExternalLimit;
1581     size_t oldExternalBytesAllocated;
1582 
1583     HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
1584             n, hs->externalBytesAllocated, hs->externalLimit);
1585 
1586     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1587      * heap lock if we're going to look at it.
1588      */
1589     dvmLockHeap();
1590 
1591     HS_BOILERPLATE();
1592     assert(hs->externalLimit >= hs->externalBytesAllocated);
1593 
1594     oldExternalBytesAllocated = hs->externalBytesAllocated;
1595     if (n <= hs->externalBytesAllocated) {
1596         hs->externalBytesAllocated -= n;
1597     } else {
1598         n = hs->externalBytesAllocated;
1599         hs->externalBytesAllocated = 0;
1600     }
1601 
1602 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
1603     if (gDvm.allocProf.enabled) {
1604         Thread* self = dvmThreadSelf();
1605         gDvm.allocProf.externalFreeCount++;
1606         gDvm.allocProf.externalFreeSize += n;
1607         if (self != NULL) {
1608             self->allocProf.externalFreeCount++;
1609             self->allocProf.externalFreeSize += n;
1610         }
1611     }
1612 #endif
1613 
1614     /* Shrink as quickly as we can.
1615      */
1616     newExternalLimit = getUtilizationTarget(hs,
1617             hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1618     if (newExternalLimit < oldExternalBytesAllocated) {
1619         /* Make sure that the remaining free space is at least
1620          * big enough to allocate something of the size that was
1621          * just freed.  This makes it more likely that
1622          *     externalFree(N); externalAlloc(N);
1623          * will work without causing a GC.
1624          */
1625         HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
1626                 oldExternalBytesAllocated - newExternalLimit);
1627         newExternalLimit = oldExternalBytesAllocated;
1628     }
1629     if (newExternalLimit < hs->externalLimit) {
1630         hs->externalLimit = newExternalLimit;
1631     }
1632 
1633     dvmUnlockHeap();
1634 }
1635 
1636 /*
1637  * Returns the number of externally-allocated bytes being tracked by
1638  * dvmTrackExternalAllocation/Free().
1639  */
1640 size_t
dvmGetExternalBytesAllocated()1641 dvmGetExternalBytesAllocated()
1642 {
1643     const HeapSource *hs = gHs;
1644     size_t ret;
1645 
1646     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
1647      * heap lock if we're going to look at it.  We also need the
1648      * lock for the call to setIdealFootprint().
1649      */
1650     dvmLockHeap();
1651     HS_BOILERPLATE();
1652     ret = hs->externalBytesAllocated;
1653     dvmUnlockHeap();
1654 
1655     return ret;
1656 }
1657