• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <limits.h>
19 #include <sys/mman.h>
20 
21 #include "Dalvik.h"
22 #include "alloc/Heap.h"
23 #include "alloc/HeapBitmap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/HeapSource.h"
26 #include "alloc/Verify.h"
27 
28 /*
29  * A "mostly copying", generational, garbage collector.
30  *
31  * TODO: we allocate our own contiguous tract of page frames to back
32  * object allocations.  To cooperate with other heaps active in the
33  * virtual machine we need to move the responsibility of allocating
34  * pages someplace outside of this code.
35  *
36  * The other major data structures that maintain the state of the heap
37  * are the block space table and the block queue.
38  *
39  * The block space table records the state of a block.  We must track
40  * whether a block is:
41  *
42  * - Free or allocated in some space.
43  *
44  * - If the block holds part of a large object allocation, whether the
45  *   block is the initial or a continued block of the allocation.
46  *
47  * - Whether the block is pinned, that is to say whether at least one
48  *   object in the block must remain stationary.  Only needed during a
49  *   GC.
50  *
51  * - Which space the object belongs to.  At present this means
52  *   from-space or to-space.
53  *
54  * The block queue is used during garbage collection.  Unlike Cheney's
55  * algorithm, from-space and to-space are not contiguous.  Therefore,
56  * one cannot maintain the state of the copy with just two pointers.
57  * The block queue exists to thread lists of blocks from the various
58  * spaces together.
59  *
60  * Additionally, we record the free space frontier of the heap, as
61  * well as the address of the first object within a block, which is
62  * required to copy objects following a large object (not currently
63  * implemented).  This is stored in the heap source structure.  This
64  * should be moved elsewhere to support in-line allocations from Java
65  * threads.
66  *
67  * Allocation requests are satisfied by reserving storage from one or
68  * more contiguous blocks.  Objects that are small enough to fit
69  * inside a block are packed together within a block.  Objects that
70  * are larger than a block are allocated from contiguous sequences of
71  * blocks.  When half the available blocks are filled, a garbage
72  * collection occurs.  We "flip" spaces (exchange from- and to-space),
73  * copy live objects into to space, and perform pointer adjustment.
74  *
75  * Copying is made more complicated by the requirement that some
76  * objects must not be moved.  This property is known as "pinning".
77  * These objects must be dealt with specially.  We use Bartlett's
78  * scheme; blocks containing such objects are grayed (promoted) at the
79  * start of a garbage collection.  By virtue of this trick, tracing
80  * from the roots proceeds as usual but all objects on those pages are
81  * considered promoted and therefore not moved.
82  *
83  * TODO: there is sufficient information within the garbage collector
84  * to implement Attardi's scheme for evacuating unpinned objects from
85  * a page that is otherwise pinned.  This would eliminate false
86  * retention caused by the large pinning granularity.
87  *
88  * We need a scheme for medium and large objects.  Ignore that for
89  * now, we can return to this later.
90  *
91  * Eventually we need to worry about promoting objects out of the
92  * copy-collected heap (tenuring) into a less volatile space.  Copying
93  * may not always be the best policy for such spaces.  We should
94  * consider a variant of mark, sweep, compact.
95  *
96  * The block scheme allows us to use VM page faults to maintain a
97  * write barrier.  Consider having a special leaf state for a page.
98  *
99  * Bibliography:
100  *
101  * C. J. Cheney. 1970. A non-recursive list compacting
102  * algorithm. CACM. 13-11 pp677--678.
103  *
104  * Joel F. Bartlett. 1988. Compacting Garbage Collection with
105  * Ambiguous Roots. Digital Equipment Corporation.
106  *
107  * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
108  * Generations and C++. Digital Equipment Corporation.
109  *
110  * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
111  * Collection in Uncooperative Environments. Digital Equipment
112  * Corporation.
113  *
114  * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
115  * Management Framework. TR-94-010
116  *
117  * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
118  * memory management framework for C++. Software -- Practice and
119  * Experience. 28(11), 1143-1183.
120  *
121  */
122 
123 #define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
124 
125 #if 0
126 #define LOG_ALLOC LOGI
127 #define LOG_PIN LOGI
128 #define LOG_PROM LOGI
129 #define LOG_REF LOGI
130 #define LOG_SCAV LOGI
131 #define LOG_TRAN LOGI
132 #define LOG_VER LOGI
133 #else
134 #define LOG_ALLOC(...) ((void)0)
135 #define LOG_PIN(...) ((void)0)
136 #define LOG_PROM(...) ((void)0)
137 #define LOG_REF(...) ((void)0)
138 #define LOG_SCAV(...) ((void)0)
139 #define LOG_TRAN(...) ((void)0)
140 #define LOG_VER(...) ((void)0)
141 #endif
142 
143 static void enqueueBlock(HeapSource *heapSource, size_t block);
144 static void scavengeReference(Object **obj);
145 static bool toSpaceContains(const void *addr);
146 static bool fromSpaceContains(const void *addr);
147 static size_t sumHeapBitmap(const HeapBitmap *bitmap);
148 static size_t objectSize(const Object *obj);
149 static void scavengeDataObject(Object *obj);
150 static void scavengeBlockQueue();
151 
152 /*
153  * We use 512-byte blocks.
154  */
155 enum { BLOCK_SHIFT = 9 };
156 enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
157 
158 /*
159  * Space identifiers, stored into the blockSpace array.
160  */
161 enum {
162     BLOCK_FREE = 0,
163     BLOCK_FROM_SPACE = 1,
164     BLOCK_TO_SPACE = 2,
165     BLOCK_CONTINUED = 7
166 };
167 
168 /*
169  * Alignment for all allocations, in bytes.
170  */
171 enum { ALLOC_ALIGNMENT = 8 };
172 
173 /*
174  * Sentinel value for the queue end.
175  */
176 #define QUEUE_TAIL (~(size_t)0)
177 
178 struct HeapSource {
179 
180     /* The base address of backing store. */
181     u1 *blockBase;
182 
183     /* Total number of blocks available for allocation. */
184     size_t totalBlocks;
185     size_t allocBlocks;
186 
187     /*
188      * The scavenger work queue.  Implemented as an array of index
189      * values into the queue.
190      */
191     size_t *blockQueue;
192 
193     /*
194      * Base and limit blocks.  Basically the shifted start address of
195      * the block.  We convert blocks to a relative number when
196      * indexing in the block queue.  TODO: make the block queue base
197      * relative rather than the index into the block queue.
198      */
199     size_t baseBlock, limitBlock;
200 
201     size_t queueHead;
202     size_t queueTail;
203     size_t queueSize;
204 
205     /* The space of the current block 0 (free), 1 or 2. */
206     char *blockSpace;
207 
208     /* Start of free space in the current block. */
209     u1 *allocPtr;
210     /* Exclusive limit of free space in the current block. */
211     u1 *allocLimit;
212 
213     HeapBitmap allocBits;
214 
215     /*
216      * The starting size of the heap.  This value is the same as the
217      * value provided to the -Xms flag.
218      */
219     size_t minimumSize;
220 
221     /*
222      * The maximum size of the heap.  This value is the same as the
223      * -Xmx flag.
224      */
225     size_t maximumSize;
226 
227     /*
228      * The current, committed size of the heap.  At present, this is
229      * equivalent to the maximumSize.
230      */
231     size_t currentSize;
232 
233     size_t bytesAllocated;
234 };
235 
alignDown(unsigned long x,unsigned long n)236 static unsigned long alignDown(unsigned long x, unsigned long n)
237 {
238     return x & -n;
239 }
240 
alignUp(unsigned long x,unsigned long n)241 static unsigned long alignUp(unsigned long x, unsigned long n)
242 {
243     return alignDown(x + (n - 1), n);
244 }
245 
describeBlocks(const HeapSource * heapSource)246 static void describeBlocks(const HeapSource *heapSource)
247 {
248     for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
249         if ((i % 32) == 0) putchar('\n');
250         printf("%d ", heapSource->blockSpace[i]);
251     }
252     putchar('\n');
253 }
254 
255 /*
256  * Virtual memory interface.
257  */
258 
virtualAlloc(size_t length)259 static void *virtualAlloc(size_t length)
260 {
261     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
262     int prot = PROT_READ | PROT_WRITE;
263     void *addr = mmap(NULL, length, prot, flags, -1, 0);
264     if (addr == MAP_FAILED) {
265         LOGE_HEAP("mmap: %s", strerror(errno));
266         addr = NULL;
267     }
268     return addr;
269 }
270 
virtualFree(void * addr,size_t length)271 static void virtualFree(void *addr, size_t length)
272 {
273     assert(addr != NULL);
274     assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
275     int res = munmap(addr, length);
276     if (res == -1) {
277         LOGE_HEAP("munmap: %s", strerror(errno));
278     }
279 }
280 
281 #ifndef NDEBUG
isValidAddress(const HeapSource * heapSource,const u1 * addr)282 static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
283 {
284     size_t block;
285 
286     block = (uintptr_t)addr >> BLOCK_SHIFT;
287     return heapSource->baseBlock <= block &&
288            heapSource->limitBlock > block;
289 }
290 #endif
291 
292 /*
293  * Iterate over the block map looking for a contiguous run of free
294  * blocks.
295  */
allocateBlocks(HeapSource * heapSource,size_t blocks)296 static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
297 {
298     size_t allocBlocks = heapSource->allocBlocks;
299     size_t totalBlocks = heapSource->totalBlocks;
300     /* Check underflow. */
301     assert(blocks != 0);
302     /* Check overflow. */
303     if (allocBlocks + blocks > totalBlocks / 2) {
304         return NULL;
305     }
306     /* Scan block map. */
307     for (size_t i = 0; i < totalBlocks; ++i) {
308         /* Check fit. */
309         for (size_t j = 0; j < blocks; ++j) { /* runs over totalBlocks */
310             if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
311                 break;
312             }
313         }
314         /* No fit? */
315         if (j != blocks) {
316             i += j;
317             continue;
318         }
319         /* Fit, allocate. */
320         heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
321         for (size_t j = 1; j < blocks; ++j) {
322             heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
323         }
324         heapSource->allocBlocks += blocks;
325         void *addr = &heapSource->blockBase[i*BLOCK_SIZE];
326         memset(addr, 0, blocks*BLOCK_SIZE);
327         /* Collecting? */
328         if (heapSource->queueHead != QUEUE_TAIL) {
329             LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
330             /*
331              * This allocated was on behalf of the transporter when it
332              * shaded a white object gray.  We enqueue the block so
333              * the scavenger can further shade the gray objects black.
334              */
335             enqueueBlock(heapSource, i);
336         }
337 
338         return addr;
339     }
340     /* Insufficient space, fail. */
341     LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
342          heapSource->totalBlocks,
343          heapSource->allocBlocks,
344          heapSource->bytesAllocated);
345     return NULL;
346 }
347 
348 /* Converts an absolute address to a relative block number. */
addressToBlock(const HeapSource * heapSource,const void * addr)349 static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
350 {
351     assert(heapSource != NULL);
352     assert(isValidAddress(heapSource, addr));
353     return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
354 }
355 
356 /* Converts a relative block number to an absolute address. */
blockToAddress(const HeapSource * heapSource,size_t block)357 static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
358 {
359     u1 *addr;
360 
361     addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
362     assert(isValidAddress(heapSource, addr));
363     return addr;
364 }
365 
clearBlock(HeapSource * heapSource,size_t block)366 static void clearBlock(HeapSource *heapSource, size_t block)
367 {
368     assert(heapSource != NULL);
369     assert(block < heapSource->totalBlocks);
370     u1 *addr = heapSource->blockBase + block*BLOCK_SIZE;
371     memset(addr, 0xCC, BLOCK_SIZE);
372     for (size_t i = 0; i < BLOCK_SIZE; i += 8) {
373         dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
374     }
375 }
376 
clearFromSpace(HeapSource * heapSource)377 static void clearFromSpace(HeapSource *heapSource)
378 {
379     assert(heapSource != NULL);
380     size_t i = 0;
381     size_t count = 0;
382     while (i < heapSource->totalBlocks) {
383         if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
384             ++i;
385             continue;
386         }
387         heapSource->blockSpace[i] = BLOCK_FREE;
388         clearBlock(heapSource, i);
389         ++i;
390         ++count;
391         while (i < heapSource->totalBlocks &&
392                heapSource->blockSpace[i] == BLOCK_CONTINUED) {
393             heapSource->blockSpace[i] = BLOCK_FREE;
394             clearBlock(heapSource, i);
395             ++i;
396             ++count;
397         }
398     }
399     LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
400 }
401 
402 /*
403  * Appends the given block to the block queue.  The block queue is
404  * processed in-order by the scavenger.
405  */
enqueueBlock(HeapSource * heapSource,size_t block)406 static void enqueueBlock(HeapSource *heapSource, size_t block)
407 {
408     assert(heapSource != NULL);
409     assert(block < heapSource->totalBlocks);
410     if (heapSource->queueHead != QUEUE_TAIL) {
411         heapSource->blockQueue[heapSource->queueTail] = block;
412     } else {
413         heapSource->queueHead = block;
414     }
415     heapSource->blockQueue[block] = QUEUE_TAIL;
416     heapSource->queueTail = block;
417     ++heapSource->queueSize;
418 }
419 
420 /*
421  * Grays all objects within the block corresponding to the given
422  * address.
423  */
promoteBlockByAddr(HeapSource * heapSource,const void * addr)424 static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
425 {
426     size_t block;
427 
428     block = addressToBlock(heapSource, (const u1 *)addr);
429     if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
430         // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
431         heapSource->blockSpace[block] = BLOCK_TO_SPACE;
432         enqueueBlock(heapSource, block);
433         /* TODO(cshapiro): count continued blocks?*/
434         heapSource->allocBlocks += 1;
435     } else {
436         // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
437     }
438 }
439 
dvmHeapSourceStartup(size_t startSize,size_t absoluteMaxSize)440 GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
441 {
442     GcHeap* gcHeap;
443     HeapSource *heapSource;
444 
445     assert(startSize <= absoluteMaxSize);
446 
447     heapSource = malloc(sizeof(*heapSource));
448     assert(heapSource != NULL);
449     memset(heapSource, 0, sizeof(*heapSource));
450 
451     heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
452     heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
453 
454     heapSource->currentSize = heapSource->maximumSize;
455 
456     /* Allocate underlying storage for blocks. */
457     heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
458     assert(heapSource->blockBase != NULL);
459     heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
460     heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
461 
462     heapSource->allocBlocks = 0;
463     heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
464 
465     assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
466 
467     {
468         size_t size = sizeof(heapSource->blockQueue[0]);
469         heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
470         assert(heapSource->blockQueue != NULL);
471         memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
472         heapSource->queueHead = QUEUE_TAIL;
473     }
474 
475     /* Byte indicating space residence or free status of block. */
476     {
477         size_t size = sizeof(heapSource->blockSpace[0]);
478         heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
479         assert(heapSource->blockSpace != NULL);
480         memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
481     }
482 
483     dvmHeapBitmapInit(&heapSource->allocBits,
484                       heapSource->blockBase,
485                       heapSource->maximumSize,
486                       "blockBase");
487 
488     /* Initialize allocation pointers. */
489     heapSource->allocPtr = allocateBlocks(heapSource, 1);
490     heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
491 
492     gcHeap = malloc(sizeof(*gcHeap));
493     assert(gcHeap != NULL);
494     memset(gcHeap, 0, sizeof(*gcHeap));
495     gcHeap->heapSource = heapSource;
496 
497     return gcHeap;
498 }
499 
500 /*
501  * Perform any required heap initializations after forking from the
502  * zygote process.  This is a no-op for the time being.  Eventually
503  * this will demarcate the shared region of the heap.
504  */
dvmHeapSourceStartupAfterZygote()505 bool dvmHeapSourceStartupAfterZygote()
506 {
507     return true;
508 }
509 
dvmHeapSourceStartupBeforeFork()510 bool dvmHeapSourceStartupBeforeFork()
511 {
512     assert(!"implemented");
513     return false;
514 }
515 
dvmHeapSourceShutdown(GcHeap ** gcHeap)516 void dvmHeapSourceShutdown(GcHeap **gcHeap)
517 {
518     if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
519         return;
520     free((*gcHeap)->heapSource->blockQueue);
521     free((*gcHeap)->heapSource->blockSpace);
522     virtualFree((*gcHeap)->heapSource->blockBase,
523                 (*gcHeap)->heapSource->maximumSize);
524     free((*gcHeap)->heapSource);
525     (*gcHeap)->heapSource = NULL;
526     free(*gcHeap);
527     *gcHeap = NULL;
528 }
529 
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)530 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec,
531                              size_t perHeapStats[],
532                              size_t arrayLen)
533 {
534     HeapSource *heapSource;
535     size_t value;
536 
537     heapSource = gDvm.gcHeap->heapSource;
538     switch (spec) {
539     case HS_FOOTPRINT:
540         value = heapSource->maximumSize;
541         break;
542     case HS_ALLOWED_FOOTPRINT:
543         value = heapSource->maximumSize;
544         break;
545     case HS_BYTES_ALLOCATED:
546         value = heapSource->bytesAllocated;
547         break;
548     case HS_OBJECTS_ALLOCATED:
549         value = sumHeapBitmap(&heapSource->allocBits);
550         break;
551     default:
552         assert(!"implemented");
553         value = 0;
554     }
555     if (perHeapStats) {
556         *perHeapStats = value;
557     }
558     return value;
559 }
560 
561 /*
562  * Performs a shallow copy of the allocation bitmap into the given
563  * vector of heap bitmaps.
564  */
dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[],HeapBitmap markBits[],size_t numHeaps)565 void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
566                                    size_t numHeaps)
567 {
568     assert(!"implemented");
569 }
570 
dvmHeapSourceGetLiveBits()571 HeapBitmap *dvmHeapSourceGetLiveBits()
572 {
573     return &gDvm.gcHeap->heapSource->allocBits;
574 }
575 
576 /*
577  * Allocate the specified number of bytes from the heap.  The
578  * allocation cursor points into a block of free storage.  If the
579  * given allocation fits in the remaining space of the block, we
580  * advance the cursor and return a pointer to the free storage.  If
581  * the allocation cannot fit in the current block but is smaller than
582  * a block we request a new block and allocate from it instead.  If
583  * the allocation is larger than a block we must allocate from a span
584  * of contiguous blocks.
585  */
dvmHeapSourceAlloc(size_t length)586 void *dvmHeapSourceAlloc(size_t length)
587 {
588     HeapSource *heapSource;
589     unsigned char *addr;
590     size_t aligned, available, blocks;
591 
592     heapSource = gDvm.gcHeap->heapSource;
593     assert(heapSource != NULL);
594     assert(heapSource->allocPtr != NULL);
595     assert(heapSource->allocLimit != NULL);
596 
597     aligned = alignUp(length, ALLOC_ALIGNMENT);
598     available = heapSource->allocLimit - heapSource->allocPtr;
599 
600     /* Try allocating inside the current block. */
601     if (aligned <= available) {
602         addr = heapSource->allocPtr;
603         heapSource->allocPtr += aligned;
604         heapSource->bytesAllocated += aligned;
605         dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
606         return addr;
607     }
608 
609     /* Try allocating in a new block. */
610     if (aligned <= BLOCK_SIZE) {
611         addr =  allocateBlocks(heapSource, 1);
612         if (addr != NULL) {
613             heapSource->allocLimit = addr + BLOCK_SIZE;
614             heapSource->allocPtr = addr + aligned;
615             heapSource->bytesAllocated += aligned;
616             dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
617             /* TODO(cshapiro): pad out the current block. */
618         }
619         return addr;
620     }
621 
622     /* Try allocating in a span of blocks. */
623     blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
624 
625     addr = allocateBlocks(heapSource, blocks);
626     /* Propagate failure upward. */
627     if (addr != NULL) {
628         heapSource->bytesAllocated += aligned;
629         dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
630         /* TODO(cshapiro): pad out free space in the last block. */
631     }
632     return addr;
633 }
634 
dvmHeapSourceAllocAndGrow(size_t size)635 void *dvmHeapSourceAllocAndGrow(size_t size)
636 {
637     return dvmHeapSourceAlloc(size);
638 }
639 
640 /* TODO: refactor along with dvmHeapSourceAlloc */
allocateGray(size_t size)641 void *allocateGray(size_t size)
642 {
643     HeapSource *heapSource;
644     void *addr;
645     size_t block;
646 
647     /* TODO: add a check that we are in a GC. */
648     heapSource = gDvm.gcHeap->heapSource;
649     addr = dvmHeapSourceAlloc(size);
650     assert(addr != NULL);
651     block = addressToBlock(heapSource, (const u1 *)addr);
652     if (heapSource->queueHead == QUEUE_TAIL) {
653         /*
654          * Forcibly append the underlying block to the queue.  This
655          * condition occurs when referents are transported following
656          * the initial trace.
657          */
658         enqueueBlock(heapSource, block);
659         LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
660     }
661     return addr;
662 }
663 
dvmHeapSourceContainsAddress(const void * ptr)664 bool dvmHeapSourceContainsAddress(const void *ptr)
665 {
666     HeapSource *heapSource = gDvm.gcHeap->heapSource;
667     return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
668 }
669 
670 /*
671  * Returns true if the given address is within the heap and points to
672  * the header of a live object.
673  */
dvmHeapSourceContains(const void * addr)674 bool dvmHeapSourceContains(const void *addr)
675 {
676     HeapSource *heapSource;
677     HeapBitmap *bitmap;
678 
679     heapSource = gDvm.gcHeap->heapSource;
680     bitmap = &heapSource->allocBits;
681     if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
682         return false;
683     } else {
684         return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
685     }
686 }
687 
dvmHeapSourceGetPtrFlag(const void * ptr,HeapSourcePtrFlag flag)688 bool dvmHeapSourceGetPtrFlag(const void *ptr, HeapSourcePtrFlag flag)
689 {
690     assert(!"implemented");
691     return false;
692 }
693 
dvmHeapSourceChunkSize(const void * ptr)694 size_t dvmHeapSourceChunkSize(const void *ptr)
695 {
696     assert(!"implemented");
697     return 0;
698 }
699 
dvmHeapSourceFootprint()700 size_t dvmHeapSourceFootprint()
701 {
702     assert(!"implemented");
703     return 0;
704 }
705 
706 /*
707  * Returns the "ideal footprint" which appears to be the number of
708  * bytes currently committed to the heap.  This starts out at the
709  * start size of the heap and grows toward the maximum size.
710  */
dvmHeapSourceGetIdealFootprint()711 size_t dvmHeapSourceGetIdealFootprint()
712 {
713     return gDvm.gcHeap->heapSource->currentSize;
714 }
715 
dvmGetTargetHeapUtilization()716 float dvmGetTargetHeapUtilization()
717 {
718     return 0.5f;
719 }
720 
dvmSetTargetHeapUtilization(float newTarget)721 void dvmSetTargetHeapUtilization(float newTarget)
722 {
723     assert(newTarget > 0.0f && newTarget < 1.0f);
724 }
725 
726 /*
727  * Expands the size of the heap after a collection.  At present we
728  * commit the pages for maximum size of the heap so this routine is
729  * just a no-op.  Eventually, we will either allocate or commit pages
730  * on an as-need basis.
731  */
dvmHeapSourceGrowForUtilization()732 void dvmHeapSourceGrowForUtilization()
733 {
734     /* do nothing */
735 }
736 
dvmHeapSourceWalk(void (* callback)(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg),void * arg)737 void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
738                                         const void *userptr, size_t userlen,
739                                         void *arg),
740                        void *arg)
741 {
742     assert(!"implemented");
743 }
744 
dvmHeapSourceGetNumHeaps()745 size_t dvmHeapSourceGetNumHeaps()
746 {
747     return 1;
748 }
749 
dvmTrackExternalAllocation(size_t n)750 bool dvmTrackExternalAllocation(size_t n)
751 {
752     /* do nothing */
753     return true;
754 }
755 
dvmTrackExternalFree(size_t n)756 void dvmTrackExternalFree(size_t n)
757 {
758     /* do nothing */
759 }
760 
dvmGetExternalBytesAllocated()761 size_t dvmGetExternalBytesAllocated()
762 {
763     assert(!"implemented");
764     return 0;
765 }
766 
dvmHeapSourceFlip()767 void dvmHeapSourceFlip()
768 {
769     HeapSource *heapSource = gDvm.gcHeap->heapSource;
770 
771     /* Reset the block queue. */
772     heapSource->allocBlocks = 0;
773     heapSource->queueSize = 0;
774     heapSource->queueHead = QUEUE_TAIL;
775 
776     /* TODO(cshapiro): pad the current (prev) block. */
777 
778     heapSource->allocPtr = NULL;
779     heapSource->allocLimit = NULL;
780 
781     /* Whiten all allocated blocks. */
782     for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
783         if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
784             heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
785         }
786     }
787 }
788 
room(size_t * alloc,size_t * avail,size_t * total)789 static void room(size_t *alloc, size_t *avail, size_t *total)
790 {
791     HeapSource *heapSource = gDvm.gcHeap->heapSource;
792     *total = heapSource->totalBlocks*BLOCK_SIZE;
793     *alloc = heapSource->allocBlocks*BLOCK_SIZE;
794     *avail = *total - *alloc;
795 }
796 
isSpaceInternal(u1 * addr,int space)797 static bool isSpaceInternal(u1 *addr, int space)
798 {
799     HeapSource *heapSource;
800     u1 *base, *limit;
801     size_t offset;
802     char space2;
803 
804     heapSource = gDvm.gcHeap->heapSource;
805     base = heapSource->blockBase;
806     assert(addr >= base);
807     limit = heapSource->blockBase + heapSource->maximumSize;
808     assert(addr < limit);
809     offset = addr - base;
810     space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
811     return space == space2;
812 }
813 
fromSpaceContains(const void * addr)814 static bool fromSpaceContains(const void *addr)
815 {
816     return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
817 }
818 
toSpaceContains(const void * addr)819 static bool toSpaceContains(const void *addr)
820 {
821     return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
822 }
823 
824 /*
825  * Notifies the collector that the object at the given address must
826  * remain stationary during the current collection.
827  */
pinObject(const Object * obj)828 static void pinObject(const Object *obj)
829 {
830     promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
831 }
832 
sumHeapBitmap(const HeapBitmap * bitmap)833 static size_t sumHeapBitmap(const HeapBitmap *bitmap)
834 {
835     size_t sum = 0;
836     for (size_t i = 0; i < bitmap->bitsLen >> 2; ++i) {
837         sum += CLZ(bitmap->bits[i]);
838     }
839     return sum;
840 }
841 
842 /*
843  * Miscellaneous functionality.
844  */
845 
isForward(const void * addr)846 static int isForward(const void *addr)
847 {
848     return (uintptr_t)addr & 0x1;
849 }
850 
setForward(const void * toObj,void * fromObj)851 static void setForward(const void *toObj, void *fromObj)
852 {
853     *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
854 }
855 
getForward(const void * fromObj)856 static void* getForward(const void *fromObj)
857 {
858     return (void *)((uintptr_t)fromObj & ~0x1);
859 }
860 
861 /* Beware, uses the same encoding as a forwarding pointers! */
isPermanentString(const StringObject * obj)862 static int isPermanentString(const StringObject *obj) {
863     return (uintptr_t)obj & 0x1;
864 }
865 
getPermanentString(const StringObject * obj)866 static void* getPermanentString(const StringObject *obj)
867 {
868     return (void *)((uintptr_t)obj & ~0x1);
869 }
870 
871 
872 /*
873  * Scavenging and transporting routines follow.  A transporter grays
874  * an object.  A scavenger blackens an object.  We define these
875  * routines for each fundamental object type.  Dispatch is performed
876  * in scavengeObject.
877  */
878 
879 /*
880  * Class object scavenging.
881  */
scavengeClassObject(ClassObject * obj)882 static void scavengeClassObject(ClassObject *obj)
883 {
884     LOG_SCAV("scavengeClassObject(obj=%p)", obj);
885     assert(obj != NULL);
886     assert(obj->obj.clazz != NULL);
887     assert(obj->obj.clazz->descriptor != NULL);
888     assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
889     assert(obj->descriptor != NULL);
890     LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
891              obj->descriptor, obj->vtableCount);
892     /* Delegate class object and instance field scavenging. */
893     scavengeDataObject((Object *)obj);
894     /* Scavenge the array element class object. */
895     if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
896         scavengeReference((Object **)(void *)&obj->elementClass);
897     }
898     /* Scavenge the superclass. */
899     scavengeReference((Object **)(void *)&obj->super);
900     /* Scavenge the class loader. */
901     scavengeReference(&obj->classLoader);
902     /* Scavenge static fields. */
903     for (int i = 0; i < obj->sfieldCount; ++i) {
904         char ch = obj->sfields[i].field.signature[0];
905         if (ch == '[' || ch == 'L') {
906             scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
907         }
908     }
909     /* Scavenge interface class objects. */
910     for (int i = 0; i < obj->interfaceCount; ++i) {
911         scavengeReference((Object **) &obj->interfaces[i]);
912     }
913 }
914 
915 /*
916  * Array object scavenging.
917  */
scavengeArrayObject(ArrayObject * array)918 static size_t scavengeArrayObject(ArrayObject *array)
919 {
920     LOG_SCAV("scavengeArrayObject(array=%p)", array);
921     /* Scavenge the class object. */
922     assert(toSpaceContains(array));
923     assert(array != NULL);
924     assert(array->obj.clazz != NULL);
925     scavengeReference((Object **) array);
926     size_t length = dvmArrayObjectSize(array);
927     /* Scavenge the array contents. */
928     if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
929         Object **contents = (Object **)array->contents;
930         for (size_t i = 0; i < array->length; ++i) {
931             scavengeReference(&contents[i]);
932         }
933     }
934     return length;
935 }
936 
937 /*
938  * Reference object scavenging.
939  */
940 
getReferenceFlags(const Object * obj)941 static int getReferenceFlags(const Object *obj)
942 {
943     int flags;
944 
945     flags = CLASS_ISREFERENCE |
946             CLASS_ISWEAKREFERENCE |
947             CLASS_ISPHANTOMREFERENCE;
948     return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
949 }
950 
isSoftReference(const Object * obj)951 static int isSoftReference(const Object *obj)
952 {
953     return getReferenceFlags(obj) == CLASS_ISREFERENCE;
954 }
955 
isWeakReference(const Object * obj)956 static int isWeakReference(const Object *obj)
957 {
958     return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
959 }
960 
961 #ifndef NDEBUG
isPhantomReference(const Object * obj)962 static bool isPhantomReference(const Object *obj)
963 {
964     return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
965 }
966 #endif
967 
968 /*
969  * Returns true if the reference was registered with a reference queue
970  * but has not yet been appended to it.
971  */
isReferenceEnqueuable(const Object * ref)972 static bool isReferenceEnqueuable(const Object *ref)
973 {
974     Object *queue, *queueNext;
975 
976     queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
977     queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
978     if (queue == NULL || queueNext != NULL) {
979         /*
980          * There is no queue, or the reference has already
981          * been enqueued.  The Reference.enqueue() method
982          * will do nothing even if we call it.
983          */
984         return false;
985     }
986 
987     /*
988      * We need to call enqueue(), but if we called it from
989      * here we'd probably deadlock.  Schedule a call.
990      */
991     return true;
992 }
993 
994 /*
995  * Schedules a reference to be appended to its reference queue.
996  */
enqueueReference(Object * ref)997 static void enqueueReference(Object *ref)
998 {
999     assert(ref != NULL);
1000     assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1001     assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1002     if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
1003         LOGE("no room for any more reference operations");
1004         dvmAbort();
1005     }
1006 }
1007 
1008 /*
1009  * Sets the referent field of a reference object to NULL.
1010  */
clearReference(Object * obj)1011 static void clearReference(Object *obj)
1012 {
1013     dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1014 }
1015 
1016 /*
1017  * Clears reference objects with white referents.
1018  */
clearWhiteReferences(Object ** list)1019 void clearWhiteReferences(Object **list)
1020 {
1021     size_t referentOffset, queueNextOffset;
1022     bool doSignal;
1023 
1024     queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1025     referentOffset = gDvm.offJavaLangRefReference_referent;
1026     doSignal = false;
1027     while (*list != NULL) {
1028         Object *ref = *list;
1029         JValue *field = dvmFieldPtr(ref, referentOffset);
1030         Object *referent = field->l;
1031         *list = dvmGetFieldObject(ref, queueNextOffset);
1032         dvmSetFieldObject(ref, queueNextOffset, NULL);
1033         assert(referent != NULL);
1034         if (isForward(referent->clazz)) {
1035             field->l = referent = getForward(referent->clazz);
1036             continue;
1037         }
1038         if (fromSpaceContains(referent)) {
1039             /* Referent is white, clear it. */
1040             clearReference(ref);
1041             if (isReferenceEnqueuable(ref)) {
1042                 enqueueReference(ref);
1043                 doSignal = true;
1044             }
1045         }
1046     }
1047     /*
1048      * If we cleared a reference with a reference queue we must notify
1049      * the heap worker to append the reference.
1050      */
1051     if (doSignal) {
1052         dvmSignalHeapWorker(false);
1053     }
1054     assert(*list == NULL);
1055 }
1056 
1057 /*
1058  * Blackens referents subject to the soft reference preservation
1059  * policy.
1060  */
preserveSoftReferences(Object ** list)1061 void preserveSoftReferences(Object **list)
1062 {
1063     Object *ref;
1064     Object *prev, *next;
1065     size_t referentOffset, queueNextOffset;
1066     unsigned counter;
1067     bool white;
1068 
1069     queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1070     referentOffset = gDvm.offJavaLangRefReference_referent;
1071     counter = 0;
1072     prev = next = NULL;
1073     ref = *list;
1074     while (ref != NULL) {
1075         JValue *field = dvmFieldPtr(ref, referentOffset);
1076         Object *referent = field->l;
1077         next = dvmGetFieldObject(ref, queueNextOffset);
1078         assert(referent != NULL);
1079         if (isForward(referent->clazz)) {
1080             /* Referent is black. */
1081             field->l = referent = getForward(referent->clazz);
1082             white = false;
1083         } else {
1084             white = fromSpaceContains(referent);
1085         }
1086         if (!white && ((++counter) & 1)) {
1087             /* Referent is white and biased toward saving, gray it. */
1088             scavengeReference((Object **)(void *)&field->l);
1089             white = true;
1090         }
1091         if (white) {
1092             /* Referent is black, unlink it. */
1093             if (prev != NULL) {
1094                 dvmSetFieldObject(ref, queueNextOffset, NULL);
1095                 dvmSetFieldObject(prev, queueNextOffset, next);
1096             }
1097         } else {
1098             /* Referent is white, skip over it. */
1099             prev = ref;
1100         }
1101         ref = next;
1102     }
1103     /*
1104      * Restart the trace with the newly gray references added to the
1105      * root set.
1106      */
1107     scavengeBlockQueue();
1108 }
1109 
processFinalizableReferences()1110 void processFinalizableReferences()
1111 {
1112     HeapRefTable newPendingRefs;
1113     LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1114     Object **ref;
1115     Object **lastRef;
1116     size_t totalPendCount;
1117 
1118     /*
1119      * All strongly, reachable objects are black.
1120      * Any white finalizable objects need to be finalized.
1121      */
1122 
1123     /* Create a table that the new pending refs will
1124      * be added to.
1125      */
1126     if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
1127         //TODO: mark all finalizable refs and hope that
1128         //      we can schedule them next time.  Watch out,
1129         //      because we may be expecting to free up space
1130         //      by calling finalizers.
1131         LOG_REF("no room for pending finalizations");
1132         dvmAbort();
1133     }
1134 
1135     /*
1136      * Walk through finalizableRefs and move any white references to
1137      * the list of new pending refs.
1138      */
1139     totalPendCount = 0;
1140     while (finRefs != NULL) {
1141         Object **gapRef;
1142         size_t newPendCount = 0;
1143 
1144         gapRef = ref = finRefs->refs.table;
1145         lastRef = finRefs->refs.nextEntry;
1146         while (ref < lastRef) {
1147             if (fromSpaceContains(*ref)) {
1148                 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1149                     //TODO: add the current table and allocate
1150                     //      a new, smaller one.
1151                     LOG_REF("no room for any more pending finalizations: %zd",
1152                             dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1153                     dvmAbort();
1154                 }
1155                 newPendCount++;
1156             } else {
1157                 /* This ref is black, so will remain on finalizableRefs.
1158                  */
1159                 if (newPendCount > 0) {
1160                     /* Copy it up to fill the holes.
1161                      */
1162                     *gapRef++ = *ref;
1163                 } else {
1164                     /* No holes yet; don't bother copying.
1165                      */
1166                     gapRef++;
1167                 }
1168             }
1169             ref++;
1170         }
1171         finRefs->refs.nextEntry = gapRef;
1172         //TODO: if the table is empty when we're done, free it.
1173         totalPendCount += newPendCount;
1174         finRefs = finRefs->next;
1175     }
1176     LOG_REF("%zd finalizers triggered.", totalPendCount);
1177     if (totalPendCount == 0) {
1178         /* No objects required finalization.
1179          * Free the empty temporary table.
1180          */
1181         dvmClearReferenceTable(&newPendingRefs);
1182         return;
1183     }
1184 
1185     /* Add the new pending refs to the main list.
1186      */
1187     if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1188                 &newPendingRefs))
1189     {
1190         LOG_REF("can't insert new pending finalizations");
1191         dvmAbort();
1192     }
1193 
1194     //TODO: try compacting the main list with a memcpy loop
1195 
1196     /* Blacken the refs we just moved; we don't want them or their
1197      * children to get swept yet.
1198      */
1199     ref = newPendingRefs.table;
1200     lastRef = newPendingRefs.nextEntry;
1201     assert(ref < lastRef);
1202     HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1203     while (ref < lastRef) {
1204         scavengeReference(ref);
1205         ref++;
1206     }
1207     HPROF_CLEAR_GC_SCAN_STATE();
1208     scavengeBlockQueue();
1209     dvmSignalHeapWorker(false);
1210 }
1211 
1212 /*
1213  * If a reference points to from-space and has been forwarded, we snap
1214  * the pointer to its new to-space address.  If the reference points
1215  * to an unforwarded from-space address we must enqueue the reference
1216  * for later processing.  TODO: implement proper reference processing
1217  * and move the referent scavenging elsewhere.
1218  */
scavengeReferenceObject(Object * obj)1219 static void scavengeReferenceObject(Object *obj)
1220 {
1221     Object *referent;
1222     Object **queue;
1223     size_t referentOffset, queueNextOffset;
1224 
1225     assert(obj != NULL);
1226     LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
1227     scavengeDataObject(obj);
1228     referentOffset = gDvm.offJavaLangRefReference_referent;
1229     referent = dvmGetFieldObject(obj, referentOffset);
1230     if (referent == NULL || toSpaceContains(referent)) {
1231         return;
1232     }
1233     if (isSoftReference(obj)) {
1234         queue = &gDvm.gcHeap->softReferences;
1235     } else if (isWeakReference(obj)) {
1236         queue = &gDvm.gcHeap->weakReferences;
1237     } else {
1238         assert(isPhantomReference(obj));
1239         queue = &gDvm.gcHeap->phantomReferences;
1240     }
1241     queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1242     dvmSetFieldObject(obj, queueNextOffset, *queue);
1243     *queue = obj;
1244     LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
1245 }
1246 
1247 /*
1248  * Data object scavenging.
1249  */
scavengeDataObject(Object * obj)1250 static void scavengeDataObject(Object *obj)
1251 {
1252     // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
1253     assert(obj != NULL);
1254     assert(obj->clazz != NULL);
1255     assert(obj->clazz->objectSize != 0);
1256     assert(toSpaceContains(obj));
1257     /* Scavenge the class object. */
1258     ClassObject *clazz = obj->clazz;
1259     scavengeReference((Object **) obj);
1260     /* Scavenge instance fields. */
1261     if (clazz->refOffsets != CLASS_WALK_SUPER) {
1262         size_t refOffsets = clazz->refOffsets;
1263         while (refOffsets != 0) {
1264             size_t rshift = CLZ(refOffsets);
1265             size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1266             Object **ref = (Object **)((u1 *)obj + offset);
1267             scavengeReference(ref);
1268             refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1269         }
1270     } else {
1271         for (; clazz != NULL; clazz = clazz->super) {
1272             InstField *field = clazz->ifields;
1273             for (int i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1274                 size_t offset = field->byteOffset;
1275                 Object **ref = (Object **)((u1 *)obj + offset);
1276                 scavengeReference(ref);
1277             }
1278         }
1279     }
1280 }
1281 
transportObject(const Object * fromObj)1282 static Object *transportObject(const Object *fromObj)
1283 {
1284     Object *toObj;
1285     size_t allocSize, copySize;
1286 
1287     LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
1288                   fromObj,
1289                   gDvm.gcHeap->heapSource->allocBlocks);
1290     assert(fromObj != NULL);
1291     assert(fromSpaceContains(fromObj));
1292     allocSize = copySize = objectSize(fromObj);
1293     if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1294         /*
1295          * The object has been hashed or hashed and moved.  We must
1296          * reserve an additional word for a hash code.
1297          */
1298         allocSize += sizeof(u4);
1299     }
1300     if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1301         /*
1302          * The object has its hash code allocated.  Ensure the hash
1303          * code is copied along with the instance data.
1304          */
1305         copySize += sizeof(u4);
1306     }
1307     /* TODO(cshapiro): don't copy, re-map large data objects. */
1308     assert(copySize <= allocSize);
1309     toObj = allocateGray(allocSize);
1310     assert(toObj != NULL);
1311     assert(toSpaceContains(toObj));
1312     memcpy(toObj, fromObj, copySize);
1313     if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1314         /*
1315          * The object has had its hash code exposed.  Append it to the
1316          * instance and set a bit so we know to look for it there.
1317          */
1318         *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1319         toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1320     }
1321     LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1322              fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1323              toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1324              copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1325     return toObj;
1326 }
1327 
1328 /*
1329  * Generic reference scavenging.
1330  */
1331 
1332 /*
1333  * Given a reference to an object, the scavenge routine will gray the
1334  * reference.  Any objects pointed to by the scavenger object will be
1335  * transported to new space and a forwarding pointer will be installed
1336  * in the header of the object.
1337  */
1338 
1339 /*
1340  * Blacken the given pointer.  If the pointer is in from space, it is
1341  * transported to new space.  If the object has a forwarding pointer
1342  * installed it has already been transported and the referent is
1343  * snapped to the new address.
1344  */
scavengeReference(Object ** obj)1345 static void scavengeReference(Object **obj)
1346 {
1347     ClassObject *clazz;
1348     Object *fromObj, *toObj;
1349 
1350     assert(obj);
1351 
1352     if (*obj == NULL) return;
1353 
1354     assert(dvmIsValidObject(*obj));
1355 
1356     /* The entire block is black. */
1357     if (toSpaceContains(*obj)) {
1358         LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
1359         return;
1360     }
1361     LOG_SCAV("scavengeReference(*obj=%p)", *obj);
1362 
1363     assert(fromSpaceContains(*obj));
1364 
1365     clazz = (*obj)->clazz;
1366 
1367     if (isForward(clazz)) {
1368         // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1369         *obj = (Object *)getForward(clazz);
1370         return;
1371     }
1372     fromObj = *obj;
1373     if (clazz == NULL) {
1374         // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
1375         assert(!"implemented");
1376         toObj = NULL;
1377     } else {
1378         toObj = transportObject(fromObj);
1379     }
1380     setForward(toObj, fromObj);
1381     *obj = (Object *)toObj;
1382 }
1383 
1384 /*
1385  * Generic object scavenging.
1386  */
scavengeObject(Object * obj)1387 static void scavengeObject(Object *obj)
1388 {
1389     ClassObject *clazz;
1390 
1391     assert(obj != NULL);
1392     assert(obj->clazz != NULL);
1393     assert(!((uintptr_t)obj->clazz & 0x1));
1394     clazz = obj->clazz;
1395     if (dvmIsTheClassClass(clazz)) {
1396         scavengeClassObject((ClassObject *)obj);
1397     } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1398         scavengeArrayObject((ArrayObject *)obj);
1399     } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1400         scavengeReferenceObject(obj);
1401     } else {
1402         scavengeDataObject(obj);
1403     }
1404 }
1405 
1406 /*
1407  * External root scavenging routines.
1408  */
1409 
pinHashTableEntries(HashTable * table)1410 static void pinHashTableEntries(HashTable *table)
1411 {
1412     LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
1413     if (table == NULL) {
1414         return;
1415     }
1416     dvmHashTableLock(table);
1417     for (int i = 0; i < table->tableSize; ++i) {
1418         HashEntry *entry = &table->pEntries[i];
1419         void *obj = entry->data;
1420         if (obj == NULL || obj == HASH_TOMBSTONE) {
1421             continue;
1422         }
1423         pinObject(entry->data);
1424     }
1425     dvmHashTableUnlock(table);
1426     LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
1427 }
1428 
pinPrimitiveClasses()1429 static void pinPrimitiveClasses()
1430 {
1431     size_t length = ARRAYSIZE(gDvm.primitiveClass);
1432     for (size_t i = 0; i < length; i++) {
1433         if (gDvm.primitiveClass[i] != NULL) {
1434             pinObject((Object *)gDvm.primitiveClass[i]);
1435         }
1436     }
1437 }
1438 
1439 /*
1440  * Scavenge interned strings.  Permanent interned strings will have
1441  * been pinned and are therefore ignored.  Non-permanent strings that
1442  * have been forwarded are snapped.  All other entries are removed.
1443  */
scavengeInternedStrings()1444 static void scavengeInternedStrings()
1445 {
1446     HashTable *table = gDvm.internedStrings;
1447     if (table == NULL) {
1448         return;
1449     }
1450     dvmHashTableLock(table);
1451     for (int i = 0; i < table->tableSize; ++i) {
1452         HashEntry *entry = &table->pEntries[i];
1453         Object *obj = (Object *)entry->data;
1454         if (obj == NULL || obj == HASH_TOMBSTONE) {
1455             continue;
1456         } else if (!isPermanentString((StringObject *)obj)) {
1457             // LOG_SCAV("entry->data=%p", entry->data);
1458             LOG_SCAV(">>> string obj=%p", entry->data);
1459             /* TODO(cshapiro): detach white string objects */
1460             scavengeReference((Object **)(void *)&entry->data);
1461             LOG_SCAV("<<< string obj=%p", entry->data);
1462         }
1463     }
1464     dvmHashTableUnlock(table);
1465 }
1466 
pinInternedStrings()1467 static void pinInternedStrings()
1468 {
1469     HashTable *table = gDvm.internedStrings;
1470     if (table == NULL) {
1471         return;
1472     }
1473     dvmHashTableLock(table);
1474     for (int i = 0; i < table->tableSize; ++i) {
1475         HashEntry *entry = &table->pEntries[i];
1476         Object *obj = (Object *)entry->data;
1477         if (obj == NULL || obj == HASH_TOMBSTONE) {
1478             continue;
1479         } else if (isPermanentString((StringObject *)obj)) {
1480             obj = (Object *)getPermanentString((StringObject*)obj);
1481             LOG_PROM(">>> pin string obj=%p", obj);
1482             pinObject(obj);
1483             LOG_PROM("<<< pin string obj=%p", obj);
1484         }
1485      }
1486     dvmHashTableUnlock(table);
1487 }
1488 
1489 /*
1490  * At present, reference tables contain references that must not be
1491  * moved by the collector.  Instead of scavenging each reference in
1492  * the table we pin each referenced object.
1493  */
pinReferenceTable(const ReferenceTable * table)1494 static void pinReferenceTable(const ReferenceTable *table)
1495 {
1496     assert(table != NULL);
1497     assert(table->table != NULL);
1498     assert(table->nextEntry != NULL);
1499     for (Object **entry = table->table; entry < table->nextEntry; ++entry) {
1500         assert(entry != NULL);
1501         assert(!isForward(*entry));
1502         pinObject(*entry);
1503     }
1504 }
1505 
scavengeLargeHeapRefTable(LargeHeapRefTable * table)1506 static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1507 {
1508     for (; table != NULL; table = table->next) {
1509         Object **ref = table->refs.table;
1510         for (; ref < table->refs.nextEntry; ++ref) {
1511             scavengeReference(ref);
1512         }
1513     }
1514 }
1515 
1516 /* This code was copied from Thread.c */
scavengeThreadStack(Thread * thread)1517 static void scavengeThreadStack(Thread *thread)
1518 {
1519     const u4 *framePtr;
1520 #if WITH_EXTRA_GC_CHECKS > 1
1521     bool first = true;
1522 #endif
1523 
1524     framePtr = (const u4 *)thread->interpSave.curFrame;
1525     while (framePtr != NULL) {
1526         const StackSaveArea *saveArea;
1527         const Method *method;
1528 
1529         saveArea = SAVEAREA_FROM_FP(framePtr);
1530         method = saveArea->method;
1531         if (method != NULL && !dvmIsNativeMethod(method)) {
1532 #ifdef COUNT_PRECISE_METHODS
1533             /* the GC is running, so no lock required */
1534             if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1535                 LOG_SCAV("PGC: added %s.%s %p",
1536                              method->clazz->descriptor, method->name, method);
1537 #endif
1538 #if WITH_EXTRA_GC_CHECKS > 1
1539             /*
1540              * May also want to enable the memset() in the "invokeMethod"
1541              * goto target in the portable interpreter.  That sets the stack
1542              * to a pattern that makes referring to uninitialized data
1543              * very obvious.
1544              */
1545 
1546             if (first) {
1547                 /*
1548                  * First frame, isn't native, check the "alternate" saved PC
1549                  * as a sanity check.
1550                  *
1551                  * It seems like we could check the second frame if the first
1552                  * is native, since the PCs should be the same.  It turns out
1553                  * this doesn't always work.  The problem is that we could
1554                  * have calls in the sequence:
1555                  *   interp method #2
1556                  *   native method
1557                  *   interp method #1
1558                  *
1559                  * and then GC while in the native method after returning
1560                  * from interp method #2.  The currentPc on the stack is
1561                  * for interp method #1, but thread->currentPc2 is still
1562                  * set for the last thing interp method #2 did.
1563                  *
1564                  * This can also happen in normal execution:
1565                  * - sget-object on not-yet-loaded class
1566                  * - class init updates currentPc2
1567                  * - static field init is handled by parsing annotations;
1568                  *   static String init requires creation of a String object,
1569                  *   which can cause a GC
1570                  *
1571                  * Essentially, any pattern that involves executing
1572                  * interpreted code and then causes an allocation without
1573                  * executing instructions in the original method will hit
1574                  * this.  These are rare enough that the test still has
1575                  * some value.
1576                  */
1577                 if (saveArea->xtra.currentPc != thread->currentPc2) {
1578                     LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p",
1579                         saveArea->xtra.currentPc, thread->currentPc2,
1580                         method->clazz->descriptor, method->name, method->insns);
1581                     if (saveArea->xtra.currentPc != NULL)
1582                         LOGE("  pc inst = 0x%04x", *saveArea->xtra.currentPc);
1583                     if (thread->currentPc2 != NULL)
1584                         LOGE("  pc2 inst = 0x%04x", *thread->currentPc2);
1585                     dvmDumpThread(thread, false);
1586                 }
1587             } else {
1588                 /*
1589                  * It's unusual, but not impossible, for a non-first frame
1590                  * to be at something other than a method invocation.  For
1591                  * example, if we do a new-instance on a nonexistent class,
1592                  * we'll have a lot of class loader activity on the stack
1593                  * above the frame with the "new" operation.  Could also
1594                  * happen while we initialize a Throwable when an instruction
1595                  * fails.
1596                  *
1597                  * So there's not much we can do here to verify the PC,
1598                  * except to verify that it's a GC point.
1599                  */
1600             }
1601             assert(saveArea->xtra.currentPc != NULL);
1602 #endif
1603 
1604             const RegisterMap* pMap;
1605             const u1* regVector;
1606 
1607             Method* nonConstMethod = (Method*) method;  // quiet gcc
1608             pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1609 
1610             //LOG_SCAV("PGC: %s.%s", method->clazz->descriptor, method->name);
1611 
1612             if (pMap != NULL) {
1613                 /* found map, get registers for this address */
1614                 int addr = saveArea->xtra.currentPc - method->insns;
1615                 regVector = dvmRegisterMapGetLine(pMap, addr);
1616                 /*
1617                 if (regVector == NULL) {
1618                     LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x",
1619                                  method->clazz->descriptor, method->name, addr);
1620                 } else {
1621                     LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)",
1622                                  method->clazz->descriptor, method->name, addr,
1623                                  thread->threadId);
1624                 }
1625                 */
1626             } else {
1627                 /*
1628                  * No map found.  If precise GC is disabled this is
1629                  * expected -- we don't create pointers to the map data even
1630                  * if it's present -- but if it's enabled it means we're
1631                  * unexpectedly falling back on a conservative scan, so it's
1632                  * worth yelling a little.
1633                  */
1634                 if (gDvm.preciseGc) {
1635                     LOG_SCAV("PGC: no map for %s.%s", method->clazz->descriptor, method->name);
1636                 }
1637                 regVector = NULL;
1638             }
1639             if (regVector == NULL) {
1640                 /*
1641                  * There are no roots to scavenge.  Skip over the entire frame.
1642                  */
1643                 framePtr += method->registersSize;
1644             } else {
1645                 /*
1646                  * Precise scan.  v0 is at the lowest address on the
1647                  * interpreted stack, and is the first bit in the register
1648                  * vector, so we can walk through the register map and
1649                  * memory in the same direction.
1650                  *
1651                  * A '1' bit indicates a live reference.
1652                  */
1653                 u2 bits = 1 << 1;
1654                 for (int i = method->registersSize - 1; i >= 0; i--) {
1655                     u4 rval = *framePtr;
1656 
1657                     bits >>= 1;
1658                     if (bits == 1) {
1659                         /* set bit 9 so we can tell when we're empty */
1660                         bits = *regVector++ | 0x0100;
1661                     }
1662 
1663                     if (rval != 0 && (bits & 0x01) != 0) {
1664                         /*
1665                          * Non-null, register marked as live reference.  This
1666                          * should always be a valid object.
1667                          */
1668 #if WITH_EXTRA_GC_CHECKS > 0
1669                         if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1670                             /* this is very bad */
1671                             LOGE("PGC: invalid ref in reg %d: 0x%08x",
1672                                 method->registersSize-1 - i, rval);
1673                         } else
1674 #endif
1675                         {
1676 
1677                             // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
1678                             /* dvmMarkObjectNonNull((Object *)rval); */
1679                             scavengeReference((Object **) framePtr);
1680                         }
1681                     } else {
1682                         /*
1683                          * Null or non-reference, do nothing at all.
1684                          */
1685 #if WITH_EXTRA_GC_CHECKS > 1
1686                         if (dvmIsValidObject((Object*) rval)) {
1687                             /* this is normal, but we feel chatty */
1688                             LOGD("PGC: ignoring valid ref in reg %d: 0x%08x",
1689                                  method->registersSize-1 - i, rval);
1690                         }
1691 #endif
1692                     }
1693                     ++framePtr;
1694                 }
1695                 dvmReleaseRegisterMapLine(pMap, regVector);
1696             }
1697         }
1698         /* else this is a break frame and there is nothing to gray, or
1699          * this is a native method and the registers are just the "ins",
1700          * copied from various registers in the caller's set.
1701          */
1702 
1703 #if WITH_EXTRA_GC_CHECKS > 1
1704         first = false;
1705 #endif
1706 
1707         /* Don't fall into an infinite loop if things get corrupted.
1708          */
1709         assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1710                saveArea->prevFrame == NULL);
1711         framePtr = saveArea->prevFrame;
1712     }
1713 }
1714 
scavengeThread(Thread * thread)1715 static void scavengeThread(Thread *thread)
1716 {
1717     // LOG_SCAV("scavengeThread(thread=%p)", thread);
1718 
1719     // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
1720     scavengeReference(&thread->threadObj);
1721 
1722     // LOG_SCAV("Scavenging exception=%p", thread->exception);
1723     scavengeReference(&thread->exception);
1724 
1725     scavengeThreadStack(thread);
1726 }
1727 
scavengeThreadList()1728 static void scavengeThreadList()
1729 {
1730     Thread *thread;
1731 
1732     dvmLockThreadList(dvmThreadSelf());
1733     thread = gDvm.threadList;
1734     while (thread) {
1735         scavengeThread(thread);
1736         thread = thread->next;
1737     }
1738     dvmUnlockThreadList();
1739 }
1740 
pinThreadStack(const Thread * thread)1741 static void pinThreadStack(const Thread *thread)
1742 {
1743     const u4 *framePtr;
1744     const StackSaveArea *saveArea;
1745     Method *method;
1746     const char *shorty;
1747     Object *obj;
1748 
1749     saveArea = NULL;
1750     framePtr = (const u4 *)thread->interpSave.curFrame;
1751     for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1752         saveArea = SAVEAREA_FROM_FP(framePtr);
1753         method = (Method *)saveArea->method;
1754         if (method != NULL && dvmIsNativeMethod(method)) {
1755             /*
1756              * This is native method, pin its arguments.
1757              *
1758              * For purposes of graying references, we don't need to do
1759              * anything here, because all of the native "ins" were copied
1760              * from registers in the caller's stack frame and won't be
1761              * changed (an interpreted method can freely use registers
1762              * with parameters like any other register, but natives don't
1763              * work that way).
1764              *
1765              * However, we need to ensure that references visible to
1766              * native methods don't move around.  We can do a precise scan
1767              * of the arguments by examining the method signature.
1768              */
1769             LOG_PIN("+++ native scan %s.%s",
1770                     method->clazz->descriptor, method->name);
1771             assert(method->registersSize == method->insSize);
1772             if (!dvmIsStaticMethod(method)) {
1773                 /* grab the "this" pointer */
1774                 obj = (Object *)*framePtr++;
1775                 if (obj == NULL) {
1776                     /*
1777                      * This can happen for the "fake" entry frame inserted
1778                      * for threads created outside the VM.  There's no actual
1779                      * call so there's no object.  If we changed the fake
1780                      * entry method to be declared "static" then this
1781                      * situation should never occur.
1782                      */
1783                 } else {
1784                     assert(dvmIsValidObject(obj));
1785                     pinObject(obj);
1786                 }
1787             }
1788             shorty = method->shorty+1;      // skip return value
1789             for (int i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1790                 switch (*shorty++) {
1791                 case 'L':
1792                     obj = (Object *)*framePtr;
1793                     if (obj != NULL) {
1794                         assert(dvmIsValidObject(obj));
1795                         pinObject(obj);
1796                     }
1797                     break;
1798                 case 'D':
1799                 case 'J':
1800                     framePtr++;
1801                     break;
1802                 default:
1803                     /* 32-bit non-reference value */
1804                     obj = (Object *)*framePtr;          // debug, remove
1805                     if (dvmIsValidObject(obj)) {        // debug, remove
1806                         /* if we see a lot of these, our scan might be off */
1807                         LOG_PIN("+++ did NOT pin obj %p", obj);
1808                     }
1809                     break;
1810                 }
1811             }
1812         } else if (method != NULL && !dvmIsNativeMethod(method)) {
1813             const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1814             const u1* regVector = NULL;
1815 
1816             LOGI("conservative : %s.%s", method->clazz->descriptor, method->name);
1817 
1818             if (pMap != NULL) {
1819                 int addr = saveArea->xtra.currentPc - method->insns;
1820                 regVector = dvmRegisterMapGetLine(pMap, addr);
1821             }
1822             if (regVector == NULL) {
1823                 /*
1824                  * No register info for this frame, conservatively pin.
1825                  */
1826                 for (int i = 0; i < method->registersSize; ++i) {
1827                     u4 regValue = framePtr[i];
1828                     if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1829                         pinObject((Object *)regValue);
1830                     }
1831                 }
1832             }
1833         }
1834         /*
1835          * Don't fall into an infinite loop if things get corrupted.
1836          */
1837         assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1838                saveArea->prevFrame == NULL);
1839     }
1840 }
1841 
pinThread(const Thread * thread)1842 static void pinThread(const Thread *thread)
1843 {
1844     assert(thread != NULL);
1845     LOG_PIN("pinThread(thread=%p)", thread);
1846 
1847     LOG_PIN("Pin native method arguments");
1848     pinThreadStack(thread);
1849 
1850     LOG_PIN("Pin internalLocalRefTable");
1851     pinReferenceTable(&thread->internalLocalRefTable);
1852 
1853     LOG_PIN("Pin jniLocalRefTable");
1854     pinReferenceTable(&thread->jniLocalRefTable);
1855 
1856     /* Can the check be pushed into the promote routine? */
1857     if (thread->jniMonitorRefTable.table) {
1858         LOG_PIN("Pin jniMonitorRefTable");
1859         pinReferenceTable(&thread->jniMonitorRefTable);
1860     }
1861 }
1862 
pinThreadList()1863 static void pinThreadList()
1864 {
1865     Thread *thread;
1866 
1867     dvmLockThreadList(dvmThreadSelf());
1868     thread = gDvm.threadList;
1869     while (thread) {
1870         pinThread(thread);
1871         thread = thread->next;
1872     }
1873     dvmUnlockThreadList();
1874 }
1875 
1876 /*
1877  * Heap block scavenging.
1878  */
1879 
1880 /*
1881  * Scavenge objects in the current block.  Scavenging terminates when
1882  * the pointer reaches the highest address in the block or when a run
1883  * of zero words that continues to the highest address is reached.
1884  */
scavengeBlock(HeapSource * heapSource,size_t block)1885 static void scavengeBlock(HeapSource *heapSource, size_t block)
1886 {
1887     u1 *cursor;
1888     u1 *end;
1889     size_t size;
1890 
1891     LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1892 
1893     assert(heapSource != NULL);
1894     assert(block < heapSource->totalBlocks);
1895     assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1896 
1897     cursor = blockToAddress(heapSource, block);
1898     end = cursor + BLOCK_SIZE;
1899     LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
1900 
1901     /* Parse and scavenge the current block. */
1902     size = 0;
1903     while (cursor < end) {
1904         u4 word = *(u4 *)cursor;
1905         if (word != 0) {
1906             scavengeObject((Object *)cursor);
1907             size = objectSize((Object *)cursor);
1908             size = alignUp(size, ALLOC_ALIGNMENT);
1909             cursor += size;
1910         } else {
1911             /* Check for padding. */
1912             while (*(u4 *)cursor == 0) {
1913                 cursor += 4;
1914                 if (cursor == end) break;
1915             }
1916             /* Punt if something went wrong. */
1917             assert(cursor == end);
1918         }
1919     }
1920 }
1921 
objectSize(const Object * obj)1922 static size_t objectSize(const Object *obj)
1923 {
1924     size_t size;
1925 
1926     assert(obj != NULL);
1927     assert(obj->clazz != NULL);
1928     if (obj->clazz == gDvm.classJavaLangClass) {
1929         size = dvmClassObjectSize((ClassObject *)obj);
1930     } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1931         size = dvmArrayObjectSize((ArrayObject *)obj);
1932     } else {
1933         assert(obj->clazz->objectSize != 0);
1934         size = obj->clazz->objectSize;
1935     }
1936     if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1937         size += sizeof(u4);
1938     }
1939     return size;
1940 }
1941 
verifyBlock(HeapSource * heapSource,size_t block)1942 static void verifyBlock(HeapSource *heapSource, size_t block)
1943 {
1944     u1 *cursor;
1945     u1 *end;
1946     size_t size;
1947 
1948     // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
1949 
1950     assert(heapSource != NULL);
1951     assert(block < heapSource->totalBlocks);
1952     assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1953 
1954     cursor = blockToAddress(heapSource, block);
1955     end = cursor + BLOCK_SIZE;
1956     // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
1957 
1958     /* Parse and scavenge the current block. */
1959     size = 0;
1960     while (cursor < end) {
1961         u4 word = *(u4 *)cursor;
1962         if (word != 0) {
1963             dvmVerifyObject((Object *)cursor);
1964             size = objectSize((Object *)cursor);
1965             size = alignUp(size, ALLOC_ALIGNMENT);
1966             cursor += size;
1967         } else {
1968             /* Check for padding. */
1969             while (*(unsigned long *)cursor == 0) {
1970                 cursor += 4;
1971                 if (cursor == end) break;
1972             }
1973             /* Punt if something went wrong. */
1974             assert(cursor == end);
1975         }
1976     }
1977 }
1978 
describeBlockQueue(const HeapSource * heapSource)1979 static void describeBlockQueue(const HeapSource *heapSource)
1980 {
1981     size_t block, count;
1982     char space;
1983 
1984     block = heapSource->queueHead;
1985     count = 0;
1986     LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
1987     /* Count the number of blocks enqueued. */
1988     while (block != QUEUE_TAIL) {
1989         block = heapSource->blockQueue[block];
1990         ++count;
1991     }
1992     LOG_SCAV("blockQueue %zu elements, enqueued %zu",
1993                  count, heapSource->queueSize);
1994     block = heapSource->queueHead;
1995     while (block != QUEUE_TAIL) {
1996         space = heapSource->blockSpace[block];
1997         LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
1998         block = heapSource->blockQueue[block];
1999     }
2000 
2001     LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
2002 }
2003 
2004 /*
2005  * Blackens promoted objects.
2006  */
scavengeBlockQueue()2007 static void scavengeBlockQueue()
2008 {
2009     HeapSource *heapSource;
2010     size_t block;
2011 
2012     LOG_SCAV(">>> scavengeBlockQueue()");
2013     heapSource = gDvm.gcHeap->heapSource;
2014     describeBlockQueue(heapSource);
2015     while (heapSource->queueHead != QUEUE_TAIL) {
2016         block = heapSource->queueHead;
2017         LOG_SCAV("Dequeueing block %zu", block);
2018         scavengeBlock(heapSource, block);
2019         heapSource->queueHead = heapSource->blockQueue[block];
2020         LOG_SCAV("New queue head is %zu", heapSource->queueHead);
2021     }
2022     LOG_SCAV("<<< scavengeBlockQueue()");
2023 }
2024 
2025 /*
2026  * Scan the block list and verify all blocks that are marked as being
2027  * in new space.  This should be parametrized so we can invoke this
2028  * routine outside of the context of a collection.
2029  */
verifyNewSpace()2030 static void verifyNewSpace()
2031 {
2032     HeapSource *heapSource = gDvm.gcHeap->heapSource;
2033     size_t c0 = 0, c1 = 0, c2 = 0, c7 = 0;
2034     for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2035         switch (heapSource->blockSpace[i]) {
2036         case BLOCK_FREE: ++c0; break;
2037         case BLOCK_TO_SPACE: ++c1; break;
2038         case BLOCK_FROM_SPACE: ++c2; break;
2039         case BLOCK_CONTINUED: ++c7; break;
2040         default: assert(!"reached");
2041         }
2042     }
2043     LOG_VER("Block Demographics: "
2044             "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2045             c0, c1, c2, c7);
2046     for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2047         if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2048             continue;
2049         }
2050         verifyBlock(heapSource, i);
2051     }
2052 }
2053 
describeHeap()2054 void describeHeap()
2055 {
2056     HeapSource *heapSource = gDvm.gcHeap->heapSource;
2057     describeBlocks(heapSource);
2058 }
2059 
2060 /*
2061  * The collection interface.  Collection has a few distinct phases.
2062  * The first is flipping AKA condemning AKA whitening the heap.  The
2063  * second is to promote all objects which are pointed to by pinned or
2064  * ambiguous references.  The third phase is tracing from the stacks,
2065  * registers and various globals.  Lastly, a verification of the heap
2066  * is performed.  The last phase should be optional.
2067  */
dvmScavengeRoots()2068 void dvmScavengeRoots()  /* Needs a new name badly */
2069 {
2070     GcHeap *gcHeap;
2071 
2072     {
2073         size_t alloc, unused, total;
2074 
2075         room(&alloc, &unused, &total);
2076         LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2077                      alloc, unused, total);
2078     }
2079 
2080     gcHeap = gDvm.gcHeap;
2081     dvmHeapSourceFlip();
2082 
2083     /*
2084      * Promote blocks with stationary objects.
2085      */
2086     pinThreadList();
2087     pinReferenceTable(&gDvm.jniGlobalRefTable);
2088     pinReferenceTable(&gDvm.jniPinRefTable);
2089     pinHashTableEntries(gDvm.loadedClasses);
2090     pinHashTableEntries(gDvm.dbgRegistry);
2091     pinPrimitiveClasses();
2092     pinInternedStrings();
2093 
2094     // describeBlocks(gcHeap->heapSource);
2095 
2096     /*
2097      * Create first, open new-space page right here.
2098      */
2099 
2100     /* Reset allocation to an unallocated block. */
2101     gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2102     gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2103     /*
2104      * Hack: promote the empty block allocated above.  If the
2105      * promotions that occurred above did not actually gray any
2106      * objects, the block queue may be empty.  We must force a
2107      * promotion to be safe.
2108      */
2109     promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2110 
2111     /*
2112      * Scavenge blocks and relocate movable objects.
2113      */
2114 
2115     LOG_SCAV("Scavenging gDvm.threadList");
2116     scavengeThreadList();
2117 
2118     LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
2119     scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2120 
2121     LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2122     scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2123 
2124     LOG_SCAV("Scavenging random global stuff");
2125     scavengeReference(&gDvm.outOfMemoryObj);
2126     scavengeReference(&gDvm.internalErrorObj);
2127     scavengeReference(&gDvm.noClassDefFoundErrorObj);
2128 
2129     // LOG_SCAV("Scavenging gDvm.internedString");
2130     scavengeInternedStrings();
2131 
2132     LOG_SCAV("Root scavenge has completed.");
2133 
2134     scavengeBlockQueue();
2135 
2136     // LOG_SCAV("Re-snap global class pointers.");
2137     // scavengeGlobals();
2138 
2139     LOG_SCAV("New space scavenge has completed.");
2140 
2141     /*
2142      * Process reference objects in strength order.
2143      */
2144 
2145     LOG_REF("Processing soft references...");
2146     preserveSoftReferences(&gDvm.gcHeap->softReferences);
2147     clearWhiteReferences(&gDvm.gcHeap->softReferences);
2148 
2149     LOG_REF("Processing weak references...");
2150     clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2151 
2152     LOG_REF("Finding finalizations...");
2153     processFinalizableReferences();
2154 
2155     LOG_REF("Processing f-reachable soft references...");
2156     clearWhiteReferences(&gDvm.gcHeap->softReferences);
2157 
2158     LOG_REF("Processing f-reachable weak references...");
2159     clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2160 
2161     LOG_REF("Processing phantom references...");
2162     clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2163 
2164     /*
2165      * Verify the stack and heap.
2166      */
2167     dvmVerifyRoots();
2168     verifyNewSpace();
2169 
2170     //describeBlocks(gcHeap->heapSource);
2171 
2172     clearFromSpace(gcHeap->heapSource);
2173 
2174     {
2175         size_t alloc, rem, total;
2176 
2177         room(&alloc, &rem, &total);
2178         LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2179     }
2180 }
2181 
2182 /*
2183  * Interface compatibility routines.
2184  */
2185 
dvmClearWhiteRefs(Object ** list)2186 void dvmClearWhiteRefs(Object **list)
2187 {
2188     /* do nothing */
2189     assert(*list == NULL);
2190 }
2191 
dvmHandleSoftRefs(Object ** list)2192 void dvmHandleSoftRefs(Object **list)
2193 {
2194     /* do nothing */
2195     assert(*list == NULL);
2196 }
2197 
dvmHeapBeginMarkStep(GcMode mode)2198 bool dvmHeapBeginMarkStep(GcMode mode)
2199 {
2200     /* do nothing */
2201     return true;
2202 }
2203 
dvmHeapFinishMarkStep()2204 void dvmHeapFinishMarkStep()
2205 {
2206     /* do nothing */
2207 }
2208 
dvmHeapMarkRootSet()2209 void dvmHeapMarkRootSet()
2210 {
2211     /* do nothing */
2212 }
2213 
dvmHeapScanMarkedObjects()2214 void dvmHeapScanMarkedObjects()
2215 {
2216     dvmScavengeRoots();
2217 }
2218 
dvmHeapScheduleFinalizations()2219 void dvmHeapScheduleFinalizations()
2220 {
2221     /* do nothing */
2222 }
2223 
dvmHeapSweepUnmarkedObjects(GcMode mode,int * numFreed,size_t * sizeFreed)2224 void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2225 {
2226     *numFreed = 0;
2227     *sizeFreed = 0;
2228     /* do nothing */
2229 }
2230 
dvmMarkDirtyObjects()2231 void dvmMarkDirtyObjects()
2232 {
2233     assert(!"implemented");
2234 }
2235 
dvmHeapSourceThreadShutdown()2236 void dvmHeapSourceThreadShutdown()
2237 {
2238     /* do nothing */
2239 }
2240