1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <limits.h>
19 #include <sys/mman.h>
20
21 #include "Dalvik.h"
22 #include "alloc/Heap.h"
23 #include "alloc/HeapBitmap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/HeapSource.h"
26 #include "alloc/Verify.h"
27 #include "alloc/clz.h"
28
29 /*
30 * A "mostly copying", generational, garbage collector.
31 *
32 * TODO: we allocate our own contiguous tract of page frames to back
33 * object allocations. To cooperate with other heaps active in the
34 * virtual machine we need to move the responsibility of allocating
35 * pages someplace outside of this code.
36 *
37 * The other major data structures that maintain the state of the heap
38 * are the block space table and the block queue.
39 *
40 * The block space table records the state of a block. We must track
41 * whether a block is:
42 *
43 * - Free or allocated in some space.
44 *
45 * - If the block holds part of a large object allocation, whether the
46 * block is the initial or a continued block of the allocation.
47 *
48 * - Whether the block is pinned, that is to say whether at least one
49 * object in the block must remain stationary. Only needed during a
50 * GC.
51 *
52 * - Which space the object belongs to. At present this means
53 * from-space or to-space.
54 *
55 * The block queue is used during garbage collection. Unlike Cheney's
56 * algorithm, from-space and to-space are not contiguous. Therefore,
57 * one cannot maintain the state of the copy with just two pointers.
58 * The block queue exists to thread lists of blocks from the various
59 * spaces together.
60 *
61 * Additionally, we record the free space frontier of the heap, as
62 * well as the address of the first object within a block, which is
63 * required to copy objects following a large object (not currently
64 * implemented). This is stored in the heap source structure. This
65 * should be moved elsewhere to support in-line allocations from Java
66 * threads.
67 *
68 * Allocation requests are satisfied by reserving storage from one or
69 * more contiguous blocks. Objects that are small enough to fit
70 * inside a block are packed together within a block. Objects that
71 * are larger than a block are allocated from contiguous sequences of
72 * blocks. When half the available blocks are filled, a garbage
73 * collection occurs. We "flip" spaces (exchange from- and to-space),
74 * copy live objects into to space, and perform pointer adjustment.
75 *
76 * Copying is made more complicated by the requirement that some
77 * objects must not be moved. This property is known as "pinning".
78 * These objects must be dealt with specially. We use Bartlett's
79 * scheme; blocks containing such objects are grayed (promoted) at the
80 * start of a garbage collection. By virtue of this trick, tracing
81 * from the roots proceeds as usual but all objects on those pages are
82 * considered promoted and therefore not moved.
83 *
84 * TODO: there is sufficient information within the garbage collector
85 * to implement Attardi's scheme for evacuating unpinned objects from
86 * a page that is otherwise pinned. This would eliminate false
87 * retention caused by the large pinning granularity.
88 *
89 * We need a scheme for medium and large objects. Ignore that for
90 * now, we can return to this later.
91 *
92 * Eventually we need to worry about promoting objects out of the
93 * copy-collected heap (tenuring) into a less volatile space. Copying
94 * may not always be the best policy for such spaces. We should
95 * consider a variant of mark, sweep, compact.
96 *
97 * The block scheme allows us to use VM page faults to maintain a
98 * write barrier. Consider having a special leaf state for a page.
99 *
100 * Bibliography:
101 *
102 * C. J. Cheney. 1970. A non-recursive list compacting
103 * algorithm. CACM. 13-11 pp677--678.
104 *
105 * Joel F. Bartlett. 1988. Compacting Garbage Collection with
106 * Ambiguous Roots. Digital Equipment Corporation.
107 *
108 * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
109 * Generations and C++. Digital Equipment Corporation.
110 *
111 * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
112 * Collection in Uncooperative Environments. Digital Equipment
113 * Corporation.
114 *
115 * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
116 * Management Framework. TR-94-010
117 *
118 * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
119 * memory management framework for C++. Software -- Practice and
120 * Experience. 28(11), 1143-1183.
121 *
122 */
123
124 #define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
125
126 #if 0
127 #define LOG_ALLOC LOGI
128 #define LOG_PIN LOGI
129 #define LOG_PROM LOGI
130 #define LOG_REF LOGI
131 #define LOG_SCAV LOGI
132 #define LOG_TRAN LOGI
133 #define LOG_VER LOGI
134 #else
135 #define LOG_ALLOC(...) ((void)0)
136 #define LOG_PIN(...) ((void)0)
137 #define LOG_PROM(...) ((void)0)
138 #define LOG_REF(...) ((void)0)
139 #define LOG_SCAV(...) ((void)0)
140 #define LOG_TRAN(...) ((void)0)
141 #define LOG_VER(...) ((void)0)
142 #endif
143
144 static void enqueueBlock(HeapSource *heapSource, size_t block);
145 static void scavengeReference(Object **obj);
146 static bool toSpaceContains(const void *addr);
147 static bool fromSpaceContains(const void *addr);
148 static size_t sumHeapBitmap(const HeapBitmap *bitmap);
149 static size_t objectSize(const Object *obj);
150 static void scavengeDataObject(Object *obj);
151 static void scavengeBlockQueue(void);
152
153 /*
154 * We use 512-byte blocks.
155 */
156 enum { BLOCK_SHIFT = 9 };
157 enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
158
159 /*
160 * Space identifiers, stored into the blockSpace array.
161 */
162 enum {
163 BLOCK_FREE = 0,
164 BLOCK_FROM_SPACE = 1,
165 BLOCK_TO_SPACE = 2,
166 BLOCK_CONTINUED = 7
167 };
168
169 /*
170 * Alignment for all allocations, in bytes.
171 */
172 enum { ALLOC_ALIGNMENT = 8 };
173
174 /*
175 * Sentinel value for the queue end.
176 */
177 #define QUEUE_TAIL (~(size_t)0)
178
179 struct HeapSource {
180
181 /* The base address of backing store. */
182 u1 *blockBase;
183
184 /* Total number of blocks available for allocation. */
185 size_t totalBlocks;
186 size_t allocBlocks;
187
188 /*
189 * The scavenger work queue. Implemented as an array of index
190 * values into the queue.
191 */
192 size_t *blockQueue;
193
194 /*
195 * Base and limit blocks. Basically the shifted start address of
196 * the block. We convert blocks to a relative number when
197 * indexing in the block queue. TODO: make the block queue base
198 * relative rather than the index into the block queue.
199 */
200 size_t baseBlock, limitBlock;
201
202 size_t queueHead;
203 size_t queueTail;
204 size_t queueSize;
205
206 /* The space of the current block 0 (free), 1 or 2. */
207 char *blockSpace;
208
209 /* Start of free space in the current block. */
210 u1 *allocPtr;
211 /* Exclusive limit of free space in the current block. */
212 u1 *allocLimit;
213
214 HeapBitmap allocBits;
215
216 /*
217 * The starting size of the heap. This value is the same as the
218 * value provided to the -Xms flag.
219 */
220 size_t minimumSize;
221
222 /*
223 * The maximum size of the heap. This value is the same as the
224 * -Xmx flag.
225 */
226 size_t maximumSize;
227
228 /*
229 * The current, committed size of the heap. At present, this is
230 * equivalent to the maximumSize.
231 */
232 size_t currentSize;
233
234 size_t bytesAllocated;
235 };
236
alignDown(unsigned long x,unsigned long n)237 static unsigned long alignDown(unsigned long x, unsigned long n)
238 {
239 return x & -n;
240 }
241
alignUp(unsigned long x,unsigned long n)242 static unsigned long alignUp(unsigned long x, unsigned long n)
243 {
244 return alignDown(x + (n - 1), n);
245 }
246
describeBlocks(const HeapSource * heapSource)247 static void describeBlocks(const HeapSource *heapSource)
248 {
249 size_t i;
250
251 for (i = 0; i < heapSource->totalBlocks; ++i) {
252 if ((i % 32) == 0) putchar('\n');
253 printf("%d ", heapSource->blockSpace[i]);
254 }
255 putchar('\n');
256 }
257
258 /*
259 * Virtual memory interface.
260 */
261
virtualAlloc(size_t length)262 static void *virtualAlloc(size_t length)
263 {
264 void *addr;
265 int flags, prot;
266
267 flags = MAP_PRIVATE | MAP_ANONYMOUS;
268 prot = PROT_READ | PROT_WRITE;
269 addr = mmap(NULL, length, prot, flags, -1, 0);
270 if (addr == MAP_FAILED) {
271 LOGE_HEAP("mmap: %s", strerror(errno));
272 addr = NULL;
273 }
274 return addr;
275 }
276
virtualFree(void * addr,size_t length)277 static void virtualFree(void *addr, size_t length)
278 {
279 int res;
280
281 assert(addr != NULL);
282 assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
283 res = munmap(addr, length);
284 if (res == -1) {
285 LOGE_HEAP("munmap: %s", strerror(errno));
286 }
287 }
288
289 #ifndef NDEBUG
isValidAddress(const HeapSource * heapSource,const u1 * addr)290 static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
291 {
292 size_t block;
293
294 block = (uintptr_t)addr >> BLOCK_SHIFT;
295 return heapSource->baseBlock <= block &&
296 heapSource->limitBlock > block;
297 }
298 #endif
299
300 /*
301 * Iterate over the block map looking for a contiguous run of free
302 * blocks.
303 */
allocateBlocks(HeapSource * heapSource,size_t blocks)304 static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
305 {
306 void *addr;
307 size_t allocBlocks, totalBlocks;
308 size_t i, j;
309
310 allocBlocks = heapSource->allocBlocks;
311 totalBlocks = heapSource->totalBlocks;
312 /* Check underflow. */
313 assert(blocks != 0);
314 /* Check overflow. */
315 if (allocBlocks + blocks > totalBlocks / 2) {
316 return NULL;
317 }
318 /* Scan block map. */
319 for (i = 0; i < totalBlocks; ++i) {
320 /* Check fit. */
321 for (j = 0; j < blocks; ++j) { /* runs over totalBlocks */
322 if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
323 break;
324 }
325 }
326 /* No fit? */
327 if (j != blocks) {
328 i += j;
329 continue;
330 }
331 /* Fit, allocate. */
332 heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
333 for (j = 1; j < blocks; ++j) {
334 heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
335 }
336 heapSource->allocBlocks += blocks;
337 addr = &heapSource->blockBase[i*BLOCK_SIZE];
338 memset(addr, 0, blocks*BLOCK_SIZE);
339 /* Collecting? */
340 if (heapSource->queueHead != QUEUE_TAIL) {
341 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
342 /*
343 * This allocated was on behalf of the transporter when it
344 * shaded a white object gray. We enqueue the block so
345 * the scavenger can further shade the gray objects black.
346 */
347 enqueueBlock(heapSource, i);
348 }
349
350 return addr;
351 }
352 /* Insufficient space, fail. */
353 LOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
354 heapSource->totalBlocks,
355 heapSource->allocBlocks,
356 heapSource->bytesAllocated);
357 return NULL;
358 }
359
360 /* Converts an absolute address to a relative block number. */
addressToBlock(const HeapSource * heapSource,const void * addr)361 static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
362 {
363 assert(heapSource != NULL);
364 assert(isValidAddress(heapSource, addr));
365 return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
366 }
367
368 /* Converts a relative block number to an absolute address. */
blockToAddress(const HeapSource * heapSource,size_t block)369 static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
370 {
371 u1 *addr;
372
373 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
374 assert(isValidAddress(heapSource, addr));
375 return addr;
376 }
377
clearBlock(HeapSource * heapSource,size_t block)378 static void clearBlock(HeapSource *heapSource, size_t block)
379 {
380 u1 *addr;
381 size_t i;
382
383 assert(heapSource != NULL);
384 assert(block < heapSource->totalBlocks);
385 addr = heapSource->blockBase + block*BLOCK_SIZE;
386 memset(addr, 0xCC, BLOCK_SIZE);
387 for (i = 0; i < BLOCK_SIZE; i += 8) {
388 dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
389 }
390 }
391
clearFromSpace(HeapSource * heapSource)392 static void clearFromSpace(HeapSource *heapSource)
393 {
394 size_t i, count;
395
396 assert(heapSource != NULL);
397 i = count = 0;
398 while (i < heapSource->totalBlocks) {
399 if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
400 ++i;
401 continue;
402 }
403 heapSource->blockSpace[i] = BLOCK_FREE;
404 clearBlock(heapSource, i);
405 ++i;
406 ++count;
407 while (i < heapSource->totalBlocks &&
408 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
409 heapSource->blockSpace[i] = BLOCK_FREE;
410 clearBlock(heapSource, i);
411 ++i;
412 ++count;
413 }
414 }
415 LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
416 }
417
418 /*
419 * Appends the given block to the block queue. The block queue is
420 * processed in-order by the scavenger.
421 */
enqueueBlock(HeapSource * heapSource,size_t block)422 static void enqueueBlock(HeapSource *heapSource, size_t block)
423 {
424 assert(heapSource != NULL);
425 assert(block < heapSource->totalBlocks);
426 if (heapSource->queueHead != QUEUE_TAIL) {
427 heapSource->blockQueue[heapSource->queueTail] = block;
428 } else {
429 heapSource->queueHead = block;
430 }
431 heapSource->blockQueue[block] = QUEUE_TAIL;
432 heapSource->queueTail = block;
433 ++heapSource->queueSize;
434 }
435
436 /*
437 * Grays all objects within the block corresponding to the given
438 * address.
439 */
promoteBlockByAddr(HeapSource * heapSource,const void * addr)440 static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
441 {
442 size_t block;
443
444 block = addressToBlock(heapSource, (const u1 *)addr);
445 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
446 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
447 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
448 enqueueBlock(heapSource, block);
449 /* TODO(cshapiro): count continued blocks?*/
450 heapSource->allocBlocks += 1;
451 } else {
452 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
453 }
454 }
455
dvmHeapSourceStartup(size_t startSize,size_t absoluteMaxSize)456 GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
457 {
458 GcHeap* gcHeap;
459 HeapSource *heapSource;
460
461 assert(startSize <= absoluteMaxSize);
462
463 heapSource = malloc(sizeof(*heapSource));
464 assert(heapSource != NULL);
465 memset(heapSource, 0, sizeof(*heapSource));
466
467 heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
468 heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
469
470 heapSource->currentSize = heapSource->maximumSize;
471
472 /* Allocate underlying storage for blocks. */
473 heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
474 assert(heapSource->blockBase != NULL);
475 heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
476 heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
477
478 heapSource->allocBlocks = 0;
479 heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
480
481 assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
482
483 {
484 size_t size = sizeof(heapSource->blockQueue[0]);
485 heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
486 assert(heapSource->blockQueue != NULL);
487 memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
488 heapSource->queueHead = QUEUE_TAIL;
489 }
490
491 /* Byte indicating space residence or free status of block. */
492 {
493 size_t size = sizeof(heapSource->blockSpace[0]);
494 heapSource->blockSpace = malloc(heapSource->totalBlocks*size);
495 assert(heapSource->blockSpace != NULL);
496 memset(heapSource->blockSpace, 0, heapSource->totalBlocks*size);
497 }
498
499 dvmHeapBitmapInit(&heapSource->allocBits,
500 heapSource->blockBase,
501 heapSource->maximumSize,
502 "blockBase");
503
504 /* Initialize allocation pointers. */
505 heapSource->allocPtr = allocateBlocks(heapSource, 1);
506 heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
507
508 gcHeap = malloc(sizeof(*gcHeap));
509 assert(gcHeap != NULL);
510 memset(gcHeap, 0, sizeof(*gcHeap));
511 gcHeap->heapSource = heapSource;
512
513 return gcHeap;
514 }
515
516 /*
517 * Perform any required heap initializations after forking from the
518 * zygote process. This is a no-op for the time being. Eventually
519 * this will demarcate the shared region of the heap.
520 */
dvmHeapSourceStartupAfterZygote(void)521 bool dvmHeapSourceStartupAfterZygote(void)
522 {
523 return true;
524 }
525
dvmHeapSourceStartupBeforeFork(void)526 bool dvmHeapSourceStartupBeforeFork(void)
527 {
528 assert(!"implemented");
529 return false;
530 }
531
dvmHeapSourceShutdown(GcHeap ** gcHeap)532 void dvmHeapSourceShutdown(GcHeap **gcHeap)
533 {
534 if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
535 return;
536 free((*gcHeap)->heapSource->blockQueue);
537 free((*gcHeap)->heapSource->blockSpace);
538 virtualFree((*gcHeap)->heapSource->blockBase,
539 (*gcHeap)->heapSource->maximumSize);
540 free((*gcHeap)->heapSource);
541 (*gcHeap)->heapSource = NULL;
542 free(*gcHeap);
543 *gcHeap = NULL;
544 }
545
dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)546 size_t dvmHeapSourceGetValue(enum HeapSourceValueSpec spec,
547 size_t perHeapStats[],
548 size_t arrayLen)
549 {
550 HeapSource *heapSource;
551 size_t value;
552
553 heapSource = gDvm.gcHeap->heapSource;
554 switch (spec) {
555 case HS_EXTERNAL_BYTES_ALLOCATED:
556 value = 0;
557 break;
558 case HS_EXTERNAL_LIMIT:
559 value = 0;
560 break;
561 case HS_FOOTPRINT:
562 value = heapSource->maximumSize;
563 break;
564 case HS_ALLOWED_FOOTPRINT:
565 value = heapSource->maximumSize;
566 break;
567 case HS_BYTES_ALLOCATED:
568 value = heapSource->bytesAllocated;
569 break;
570 case HS_OBJECTS_ALLOCATED:
571 value = sumHeapBitmap(&heapSource->allocBits);
572 break;
573 default:
574 assert(!"implemented");
575 value = 0;
576 }
577 if (perHeapStats) {
578 *perHeapStats = value;
579 }
580 return value;
581 }
582
583 /*
584 * Performs a shallow copy of the allocation bitmap into the given
585 * vector of heap bitmaps.
586 */
dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[],HeapBitmap markBits[],size_t numHeaps)587 void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
588 size_t numHeaps)
589 {
590 assert(!"implemented");
591 }
592
dvmHeapSourceGetLiveBits(void)593 HeapBitmap *dvmHeapSourceGetLiveBits(void)
594 {
595 return &gDvm.gcHeap->heapSource->allocBits;
596 }
597
598 /*
599 * Allocate the specified number of bytes from the heap. The
600 * allocation cursor points into a block of free storage. If the
601 * given allocation fits in the remaining space of the block, we
602 * advance the cursor and return a pointer to the free storage. If
603 * the allocation cannot fit in the current block but is smaller than
604 * a block we request a new block and allocate from it instead. If
605 * the allocation is larger than a block we must allocate from a span
606 * of contiguous blocks.
607 */
dvmHeapSourceAlloc(size_t length)608 void *dvmHeapSourceAlloc(size_t length)
609 {
610 HeapSource *heapSource;
611 unsigned char *addr;
612 size_t aligned, available, blocks;
613
614 heapSource = gDvm.gcHeap->heapSource;
615 assert(heapSource != NULL);
616 assert(heapSource->allocPtr != NULL);
617 assert(heapSource->allocLimit != NULL);
618
619 aligned = alignUp(length, ALLOC_ALIGNMENT);
620 available = heapSource->allocLimit - heapSource->allocPtr;
621
622 /* Try allocating inside the current block. */
623 if (aligned <= available) {
624 addr = heapSource->allocPtr;
625 heapSource->allocPtr += aligned;
626 heapSource->bytesAllocated += aligned;
627 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
628 return addr;
629 }
630
631 /* Try allocating in a new block. */
632 if (aligned <= BLOCK_SIZE) {
633 addr = allocateBlocks(heapSource, 1);
634 if (addr != NULL) {
635 heapSource->allocLimit = addr + BLOCK_SIZE;
636 heapSource->allocPtr = addr + aligned;
637 heapSource->bytesAllocated += aligned;
638 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
639 /* TODO(cshapiro): pad out the current block. */
640 }
641 return addr;
642 }
643
644 /* Try allocating in a span of blocks. */
645 blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
646
647 addr = allocateBlocks(heapSource, blocks);
648 /* Propagate failure upward. */
649 if (addr != NULL) {
650 heapSource->bytesAllocated += aligned;
651 dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
652 /* TODO(cshapiro): pad out free space in the last block. */
653 }
654 return addr;
655 }
656
dvmHeapSourceAllocAndGrow(size_t size)657 void *dvmHeapSourceAllocAndGrow(size_t size)
658 {
659 return dvmHeapSourceAlloc(size);
660 }
661
662 /* TODO: refactor along with dvmHeapSourceAlloc */
allocateGray(size_t size)663 void *allocateGray(size_t size)
664 {
665 HeapSource *heapSource;
666 void *addr;
667 size_t block;
668
669 /* TODO: add a check that we are in a GC. */
670 heapSource = gDvm.gcHeap->heapSource;
671 addr = dvmHeapSourceAlloc(size);
672 assert(addr != NULL);
673 block = addressToBlock(heapSource, (const u1 *)addr);
674 if (heapSource->queueHead == QUEUE_TAIL) {
675 /*
676 * Forcibly append the underlying block to the queue. This
677 * condition occurs when referents are transported following
678 * the initial trace.
679 */
680 enqueueBlock(heapSource, block);
681 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
682 }
683 return addr;
684 }
685
dvmHeapSourceContainsAddress(const void * ptr)686 bool dvmHeapSourceContainsAddress(const void *ptr)
687 {
688 HeapSource *heapSource = gDvm.gcHeap->heapSource;
689 return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
690 }
691
692 /*
693 * Returns true if the given address is within the heap and points to
694 * the header of a live object.
695 */
dvmHeapSourceContains(const void * addr)696 bool dvmHeapSourceContains(const void *addr)
697 {
698 HeapSource *heapSource;
699 HeapBitmap *bitmap;
700
701 heapSource = gDvm.gcHeap->heapSource;
702 bitmap = &heapSource->allocBits;
703 if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
704 return false;
705 } else {
706 return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
707 }
708 }
709
dvmHeapSourceGetPtrFlag(const void * ptr,enum HeapSourcePtrFlag flag)710 bool dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
711 {
712 assert(!"implemented");
713 return false;
714 }
715
dvmHeapSourceChunkSize(const void * ptr)716 size_t dvmHeapSourceChunkSize(const void *ptr)
717 {
718 assert(!"implemented");
719 return 0;
720 }
721
dvmHeapSourceFootprint(void)722 size_t dvmHeapSourceFootprint(void)
723 {
724 assert(!"implemented");
725 return 0;
726 }
727
728 /*
729 * Returns the "ideal footprint" which appears to be the number of
730 * bytes currently committed to the heap. This starts out at the
731 * start size of the heap and grows toward the maximum size.
732 */
dvmHeapSourceGetIdealFootprint(void)733 size_t dvmHeapSourceGetIdealFootprint(void)
734 {
735 return gDvm.gcHeap->heapSource->currentSize;
736 }
737
dvmGetTargetHeapUtilization(void)738 float dvmGetTargetHeapUtilization(void)
739 {
740 return 0.5f;
741 }
742
dvmSetTargetHeapUtilization(float newTarget)743 void dvmSetTargetHeapUtilization(float newTarget)
744 {
745 assert(newTarget > 0.0f && newTarget < 1.0f);
746 }
747
dvmMinimumHeapSize(size_t size,bool set)748 size_t dvmMinimumHeapSize(size_t size, bool set)
749 {
750 return gDvm.gcHeap->heapSource->minimumSize;
751 }
752
753 /*
754 * Expands the size of the heap after a collection. At present we
755 * commit the pages for maximum size of the heap so this routine is
756 * just a no-op. Eventually, we will either allocate or commit pages
757 * on an as-need basis.
758 */
dvmHeapSourceGrowForUtilization(void)759 void dvmHeapSourceGrowForUtilization(void)
760 {
761 /* do nothing */
762 }
763
dvmHeapSourceTrim(size_t bytesTrimmed[],size_t arrayLen)764 void dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
765 {
766 /* do nothing */
767 }
768
dvmHeapSourceWalk(void (* callback)(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg),void * arg)769 void dvmHeapSourceWalk(void (*callback)(const void *chunkptr, size_t chunklen,
770 const void *userptr, size_t userlen,
771 void *arg),
772 void *arg)
773 {
774 assert(!"implemented");
775 }
776
dvmHeapSourceGetNumHeaps(void)777 size_t dvmHeapSourceGetNumHeaps(void)
778 {
779 return 1;
780 }
781
dvmTrackExternalAllocation(size_t n)782 bool dvmTrackExternalAllocation(size_t n)
783 {
784 /* do nothing */
785 return true;
786 }
787
dvmTrackExternalFree(size_t n)788 void dvmTrackExternalFree(size_t n)
789 {
790 /* do nothing */
791 }
792
dvmGetExternalBytesAllocated(void)793 size_t dvmGetExternalBytesAllocated(void)
794 {
795 assert(!"implemented");
796 return 0;
797 }
798
dvmHeapSourceFlip(void)799 void dvmHeapSourceFlip(void)
800 {
801 HeapSource *heapSource;
802 size_t i;
803
804 heapSource = gDvm.gcHeap->heapSource;
805
806 /* Reset the block queue. */
807 heapSource->allocBlocks = 0;
808 heapSource->queueSize = 0;
809 heapSource->queueHead = QUEUE_TAIL;
810
811 /* TODO(cshapiro): pad the current (prev) block. */
812
813 heapSource->allocPtr = NULL;
814 heapSource->allocLimit = NULL;
815
816 /* Whiten all allocated blocks. */
817 for (i = 0; i < heapSource->totalBlocks; ++i) {
818 if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
819 heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
820 }
821 }
822 }
823
room(size_t * alloc,size_t * avail,size_t * total)824 static void room(size_t *alloc, size_t *avail, size_t *total)
825 {
826 HeapSource *heapSource;
827
828 heapSource = gDvm.gcHeap->heapSource;
829 *total = heapSource->totalBlocks*BLOCK_SIZE;
830 *alloc = heapSource->allocBlocks*BLOCK_SIZE;
831 *avail = *total - *alloc;
832 }
833
isSpaceInternal(u1 * addr,int space)834 static bool isSpaceInternal(u1 *addr, int space)
835 {
836 HeapSource *heapSource;
837 u1 *base, *limit;
838 size_t offset;
839 char space2;
840
841 heapSource = gDvm.gcHeap->heapSource;
842 base = heapSource->blockBase;
843 assert(addr >= base);
844 limit = heapSource->blockBase + heapSource->maximumSize;
845 assert(addr < limit);
846 offset = addr - base;
847 space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
848 return space == space2;
849 }
850
fromSpaceContains(const void * addr)851 static bool fromSpaceContains(const void *addr)
852 {
853 return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
854 }
855
toSpaceContains(const void * addr)856 static bool toSpaceContains(const void *addr)
857 {
858 return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
859 }
860
861 /*
862 * Notifies the collector that the object at the given address must
863 * remain stationary during the current collection.
864 */
pinObject(const Object * obj)865 static void pinObject(const Object *obj)
866 {
867 promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
868 }
869
sumHeapBitmap(const HeapBitmap * bitmap)870 static size_t sumHeapBitmap(const HeapBitmap *bitmap)
871 {
872 size_t i, sum;
873
874 sum = 0;
875 for (i = 0; i < bitmap->bitsLen >> 2; ++i) {
876 sum += CLZ(bitmap->bits[i]);
877 }
878 return sum;
879 }
880
881 /*
882 * Miscellaneous functionality.
883 */
884
isForward(const void * addr)885 static int isForward(const void *addr)
886 {
887 return (uintptr_t)addr & 0x1;
888 }
889
setForward(const void * toObj,void * fromObj)890 static void setForward(const void *toObj, void *fromObj)
891 {
892 *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
893 }
894
getForward(const void * fromObj)895 static void* getForward(const void *fromObj)
896 {
897 return (void *)((uintptr_t)fromObj & ~0x1);
898 }
899
900 /* Beware, uses the same encoding as a forwarding pointers! */
isPermanentString(const StringObject * obj)901 static int isPermanentString(const StringObject *obj) {
902 return (uintptr_t)obj & 0x1;
903 }
904
getPermanentString(const StringObject * obj)905 static void* getPermanentString(const StringObject *obj)
906 {
907 return (void *)((uintptr_t)obj & ~0x1);
908 }
909
910
911 /*
912 * Scavenging and transporting routines follow. A transporter grays
913 * an object. A scavenger blackens an object. We define these
914 * routines for each fundamental object type. Dispatch is performed
915 * in scavengeObject.
916 */
917
918 /*
919 * Class object scavenging.
920 */
scavengeClassObject(ClassObject * obj)921 static void scavengeClassObject(ClassObject *obj)
922 {
923 int i;
924
925 LOG_SCAV("scavengeClassObject(obj=%p)", obj);
926 assert(obj != NULL);
927 assert(obj->obj.clazz != NULL);
928 assert(obj->obj.clazz->descriptor != NULL);
929 assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
930 assert(obj->descriptor != NULL);
931 LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
932 obj->descriptor, obj->vtableCount);
933 /* Delegate class object and instance field scavenging. */
934 scavengeDataObject((Object *)obj);
935 /* Scavenge the array element class object. */
936 if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
937 scavengeReference((Object **)(void *)&obj->elementClass);
938 }
939 /* Scavenge the superclass. */
940 scavengeReference((Object **)(void *)&obj->super);
941 /* Scavenge the class loader. */
942 scavengeReference(&obj->classLoader);
943 /* Scavenge static fields. */
944 for (i = 0; i < obj->sfieldCount; ++i) {
945 char ch = obj->sfields[i].field.signature[0];
946 if (ch == '[' || ch == 'L') {
947 scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
948 }
949 }
950 /* Scavenge interface class objects. */
951 for (i = 0; i < obj->interfaceCount; ++i) {
952 scavengeReference((Object **) &obj->interfaces[i]);
953 }
954 }
955
956 /*
957 * Array object scavenging.
958 */
scavengeArrayObject(ArrayObject * array)959 static size_t scavengeArrayObject(ArrayObject *array)
960 {
961 size_t i, length;
962
963 LOG_SCAV("scavengeArrayObject(array=%p)", array);
964 /* Scavenge the class object. */
965 assert(toSpaceContains(array));
966 assert(array != NULL);
967 assert(array->obj.clazz != NULL);
968 scavengeReference((Object **) array);
969 length = dvmArrayObjectSize(array);
970 /* Scavenge the array contents. */
971 if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
972 Object **contents = (Object **)array->contents;
973 for (i = 0; i < array->length; ++i) {
974 scavengeReference(&contents[i]);
975 }
976 }
977 return length;
978 }
979
980 /*
981 * Reference object scavenging.
982 */
983
getReferenceFlags(const Object * obj)984 static int getReferenceFlags(const Object *obj)
985 {
986 int flags;
987
988 flags = CLASS_ISREFERENCE |
989 CLASS_ISWEAKREFERENCE |
990 CLASS_ISPHANTOMREFERENCE;
991 return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
992 }
993
isSoftReference(const Object * obj)994 static int isSoftReference(const Object *obj)
995 {
996 return getReferenceFlags(obj) == CLASS_ISREFERENCE;
997 }
998
isWeakReference(const Object * obj)999 static int isWeakReference(const Object *obj)
1000 {
1001 return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
1002 }
1003
1004 #ifndef NDEBUG
isPhantomReference(const Object * obj)1005 static bool isPhantomReference(const Object *obj)
1006 {
1007 return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
1008 }
1009 #endif
1010
1011 /*
1012 * Returns true if the reference was registered with a reference queue
1013 * but has not yet been appended to it.
1014 */
isReferenceEnqueuable(const Object * ref)1015 static bool isReferenceEnqueuable(const Object *ref)
1016 {
1017 Object *queue, *queueNext;
1018
1019 queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
1020 queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
1021 if (queue == NULL || queueNext != NULL) {
1022 /*
1023 * There is no queue, or the reference has already
1024 * been enqueued. The Reference.enqueue() method
1025 * will do nothing even if we call it.
1026 */
1027 return false;
1028 }
1029
1030 /*
1031 * We need to call enqueue(), but if we called it from
1032 * here we'd probably deadlock. Schedule a call.
1033 */
1034 return true;
1035 }
1036
1037 /*
1038 * Schedules a reference to be appended to its reference queue.
1039 */
enqueueReference(Object * ref)1040 static void enqueueReference(Object *ref)
1041 {
1042 assert(ref != NULL);
1043 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
1044 assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
1045 if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
1046 LOGE("no room for any more reference operations");
1047 dvmAbort();
1048 }
1049 }
1050
1051 /*
1052 * Sets the referent field of a reference object to NULL.
1053 */
clearReference(Object * obj)1054 static void clearReference(Object *obj)
1055 {
1056 dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1057 }
1058
1059 /*
1060 * Clears reference objects with white referents.
1061 */
clearWhiteReferences(Object ** list)1062 void clearWhiteReferences(Object **list)
1063 {
1064 size_t referentOffset, queueNextOffset;
1065 bool doSignal;
1066
1067 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1068 referentOffset = gDvm.offJavaLangRefReference_referent;
1069 doSignal = false;
1070 while (*list != NULL) {
1071 Object *ref = *list;
1072 JValue *field = dvmFieldPtr(ref, referentOffset);
1073 Object *referent = field->l;
1074 *list = dvmGetFieldObject(ref, queueNextOffset);
1075 dvmSetFieldObject(ref, queueNextOffset, NULL);
1076 assert(referent != NULL);
1077 if (isForward(referent->clazz)) {
1078 field->l = referent = getForward(referent->clazz);
1079 continue;
1080 }
1081 if (fromSpaceContains(referent)) {
1082 /* Referent is white, clear it. */
1083 clearReference(ref);
1084 if (isReferenceEnqueuable(ref)) {
1085 enqueueReference(ref);
1086 doSignal = true;
1087 }
1088 }
1089 }
1090 /*
1091 * If we cleared a reference with a reference queue we must notify
1092 * the heap worker to append the reference.
1093 */
1094 if (doSignal) {
1095 dvmSignalHeapWorker(false);
1096 }
1097 assert(*list == NULL);
1098 }
1099
1100 /*
1101 * Blackens referents subject to the soft reference preservation
1102 * policy.
1103 */
preserveSoftReferences(Object ** list)1104 void preserveSoftReferences(Object **list)
1105 {
1106 Object *ref;
1107 Object *prev, *next;
1108 size_t referentOffset, queueNextOffset;
1109 unsigned counter;
1110 bool white;
1111
1112 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1113 referentOffset = gDvm.offJavaLangRefReference_referent;
1114 counter = 0;
1115 prev = next = NULL;
1116 ref = *list;
1117 while (ref != NULL) {
1118 JValue *field = dvmFieldPtr(ref, referentOffset);
1119 Object *referent = field->l;
1120 next = dvmGetFieldObject(ref, queueNextOffset);
1121 assert(referent != NULL);
1122 if (isForward(referent->clazz)) {
1123 /* Referent is black. */
1124 field->l = referent = getForward(referent->clazz);
1125 white = false;
1126 } else {
1127 white = fromSpaceContains(referent);
1128 }
1129 if (!white && ((++counter) & 1)) {
1130 /* Referent is white and biased toward saving, gray it. */
1131 scavengeReference((Object **)(void *)&field->l);
1132 white = true;
1133 }
1134 if (white) {
1135 /* Referent is black, unlink it. */
1136 if (prev != NULL) {
1137 dvmSetFieldObject(ref, queueNextOffset, NULL);
1138 dvmSetFieldObject(prev, queueNextOffset, next);
1139 }
1140 } else {
1141 /* Referent is white, skip over it. */
1142 prev = ref;
1143 }
1144 ref = next;
1145 }
1146 /*
1147 * Restart the trace with the newly gray references added to the
1148 * root set.
1149 */
1150 scavengeBlockQueue();
1151 }
1152
processFinalizableReferences(void)1153 void processFinalizableReferences(void)
1154 {
1155 HeapRefTable newPendingRefs;
1156 LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1157 Object **ref;
1158 Object **lastRef;
1159 size_t totalPendCount;
1160
1161 /*
1162 * All strongly, reachable objects are black.
1163 * Any white finalizable objects need to be finalized.
1164 */
1165
1166 /* Create a table that the new pending refs will
1167 * be added to.
1168 */
1169 if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
1170 //TODO: mark all finalizable refs and hope that
1171 // we can schedule them next time. Watch out,
1172 // because we may be expecting to free up space
1173 // by calling finalizers.
1174 LOG_REF("no room for pending finalizations\n");
1175 dvmAbort();
1176 }
1177
1178 /*
1179 * Walk through finalizableRefs and move any white references to
1180 * the list of new pending refs.
1181 */
1182 totalPendCount = 0;
1183 while (finRefs != NULL) {
1184 Object **gapRef;
1185 size_t newPendCount = 0;
1186
1187 gapRef = ref = finRefs->refs.table;
1188 lastRef = finRefs->refs.nextEntry;
1189 while (ref < lastRef) {
1190 if (fromSpaceContains(*ref)) {
1191 if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1192 //TODO: add the current table and allocate
1193 // a new, smaller one.
1194 LOG_REF("no room for any more pending finalizations: %zd\n",
1195 dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1196 dvmAbort();
1197 }
1198 newPendCount++;
1199 } else {
1200 /* This ref is black, so will remain on finalizableRefs.
1201 */
1202 if (newPendCount > 0) {
1203 /* Copy it up to fill the holes.
1204 */
1205 *gapRef++ = *ref;
1206 } else {
1207 /* No holes yet; don't bother copying.
1208 */
1209 gapRef++;
1210 }
1211 }
1212 ref++;
1213 }
1214 finRefs->refs.nextEntry = gapRef;
1215 //TODO: if the table is empty when we're done, free it.
1216 totalPendCount += newPendCount;
1217 finRefs = finRefs->next;
1218 }
1219 LOG_REF("%zd finalizers triggered.\n", totalPendCount);
1220 if (totalPendCount == 0) {
1221 /* No objects required finalization.
1222 * Free the empty temporary table.
1223 */
1224 dvmClearReferenceTable(&newPendingRefs);
1225 return;
1226 }
1227
1228 /* Add the new pending refs to the main list.
1229 */
1230 if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1231 &newPendingRefs))
1232 {
1233 LOG_REF("can't insert new pending finalizations\n");
1234 dvmAbort();
1235 }
1236
1237 //TODO: try compacting the main list with a memcpy loop
1238
1239 /* Blacken the refs we just moved; we don't want them or their
1240 * children to get swept yet.
1241 */
1242 ref = newPendingRefs.table;
1243 lastRef = newPendingRefs.nextEntry;
1244 assert(ref < lastRef);
1245 HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1246 while (ref < lastRef) {
1247 scavengeReference(ref);
1248 ref++;
1249 }
1250 HPROF_CLEAR_GC_SCAN_STATE();
1251 scavengeBlockQueue();
1252 dvmSignalHeapWorker(false);
1253 }
1254
1255 /*
1256 * If a reference points to from-space and has been forwarded, we snap
1257 * the pointer to its new to-space address. If the reference points
1258 * to an unforwarded from-space address we must enqueue the reference
1259 * for later processing. TODO: implement proper reference processing
1260 * and move the referent scavenging elsewhere.
1261 */
scavengeReferenceObject(Object * obj)1262 static void scavengeReferenceObject(Object *obj)
1263 {
1264 Object *referent;
1265 Object **queue;
1266 size_t referentOffset, queueNextOffset;
1267
1268 assert(obj != NULL);
1269 LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
1270 scavengeDataObject(obj);
1271 referentOffset = gDvm.offJavaLangRefReference_referent;
1272 referent = dvmGetFieldObject(obj, referentOffset);
1273 if (referent == NULL || toSpaceContains(referent)) {
1274 return;
1275 }
1276 if (isSoftReference(obj)) {
1277 queue = &gDvm.gcHeap->softReferences;
1278 } else if (isWeakReference(obj)) {
1279 queue = &gDvm.gcHeap->weakReferences;
1280 } else {
1281 assert(isPhantomReference(obj));
1282 queue = &gDvm.gcHeap->phantomReferences;
1283 }
1284 queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1285 dvmSetFieldObject(obj, queueNextOffset, *queue);
1286 *queue = obj;
1287 LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
1288 }
1289
1290 /*
1291 * Data object scavenging.
1292 */
scavengeDataObject(Object * obj)1293 static void scavengeDataObject(Object *obj)
1294 {
1295 ClassObject *clazz;
1296 int i;
1297
1298 // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
1299 assert(obj != NULL);
1300 assert(obj->clazz != NULL);
1301 assert(obj->clazz->objectSize != 0);
1302 assert(toSpaceContains(obj));
1303 /* Scavenge the class object. */
1304 clazz = obj->clazz;
1305 scavengeReference((Object **) obj);
1306 /* Scavenge instance fields. */
1307 if (clazz->refOffsets != CLASS_WALK_SUPER) {
1308 size_t refOffsets = clazz->refOffsets;
1309 while (refOffsets != 0) {
1310 size_t rshift = CLZ(refOffsets);
1311 size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1312 Object **ref = (Object **)((u1 *)obj + offset);
1313 scavengeReference(ref);
1314 refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1315 }
1316 } else {
1317 for (; clazz != NULL; clazz = clazz->super) {
1318 InstField *field = clazz->ifields;
1319 for (i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1320 size_t offset = field->byteOffset;
1321 Object **ref = (Object **)((u1 *)obj + offset);
1322 scavengeReference(ref);
1323 }
1324 }
1325 }
1326 }
1327
transportObject(const Object * fromObj)1328 static Object *transportObject(const Object *fromObj)
1329 {
1330 Object *toObj;
1331 size_t allocSize, copySize;
1332
1333 LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
1334 fromObj,
1335 gDvm.gcHeap->heapSource->allocBlocks);
1336 assert(fromObj != NULL);
1337 assert(fromSpaceContains(fromObj));
1338 allocSize = copySize = objectSize(fromObj);
1339 if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1340 /*
1341 * The object has been hashed or hashed and moved. We must
1342 * reserve an additional word for a hash code.
1343 */
1344 allocSize += sizeof(u4);
1345 }
1346 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1347 /*
1348 * The object has its hash code allocated. Ensure the hash
1349 * code is copied along with the instance data.
1350 */
1351 copySize += sizeof(u4);
1352 }
1353 /* TODO(cshapiro): don't copy, re-map large data objects. */
1354 assert(copySize <= allocSize);
1355 toObj = allocateGray(allocSize);
1356 assert(toObj != NULL);
1357 assert(toSpaceContains(toObj));
1358 memcpy(toObj, fromObj, copySize);
1359 if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1360 /*
1361 * The object has had its hash code exposed. Append it to the
1362 * instance and set a bit so we know to look for it there.
1363 */
1364 *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1365 toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1366 }
1367 LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1368 fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1369 toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1370 copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1371 return toObj;
1372 }
1373
1374 /*
1375 * Generic reference scavenging.
1376 */
1377
1378 /*
1379 * Given a reference to an object, the scavenge routine will gray the
1380 * reference. Any objects pointed to by the scavenger object will be
1381 * transported to new space and a forwarding pointer will be installed
1382 * in the header of the object.
1383 */
1384
1385 /*
1386 * Blacken the given pointer. If the pointer is in from space, it is
1387 * transported to new space. If the object has a forwarding pointer
1388 * installed it has already been transported and the referent is
1389 * snapped to the new address.
1390 */
scavengeReference(Object ** obj)1391 static void scavengeReference(Object **obj)
1392 {
1393 ClassObject *clazz;
1394 Object *fromObj, *toObj;
1395
1396 assert(obj);
1397
1398 if (*obj == NULL) return;
1399
1400 assert(dvmIsValidObject(*obj));
1401
1402 /* The entire block is black. */
1403 if (toSpaceContains(*obj)) {
1404 LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
1405 return;
1406 }
1407 LOG_SCAV("scavengeReference(*obj=%p)", *obj);
1408
1409 assert(fromSpaceContains(*obj));
1410
1411 clazz = (*obj)->clazz;
1412
1413 if (isForward(clazz)) {
1414 // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1415 *obj = (Object *)getForward(clazz);
1416 return;
1417 }
1418 fromObj = *obj;
1419 if (clazz == NULL) {
1420 // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
1421 assert(!"implemented");
1422 toObj = NULL;
1423 } else {
1424 toObj = transportObject(fromObj);
1425 }
1426 setForward(toObj, fromObj);
1427 *obj = (Object *)toObj;
1428 }
1429
1430 /*
1431 * Generic object scavenging.
1432 */
scavengeObject(Object * obj)1433 static void scavengeObject(Object *obj)
1434 {
1435 ClassObject *clazz;
1436
1437 assert(obj != NULL);
1438 assert(obj->clazz != NULL);
1439 assert(!((uintptr_t)obj->clazz & 0x1));
1440 clazz = obj->clazz;
1441 if (clazz == gDvm.classJavaLangClass) {
1442 scavengeClassObject((ClassObject *)obj);
1443 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1444 scavengeArrayObject((ArrayObject *)obj);
1445 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1446 scavengeReferenceObject(obj);
1447 } else {
1448 scavengeDataObject(obj);
1449 }
1450 }
1451
1452 /*
1453 * External root scavenging routines.
1454 */
1455
pinHashTableEntries(HashTable * table)1456 static void pinHashTableEntries(HashTable *table)
1457 {
1458 HashEntry *entry;
1459 void *obj;
1460 int i;
1461
1462 LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
1463 if (table == NULL) {
1464 return;
1465 }
1466 dvmHashTableLock(table);
1467 for (i = 0; i < table->tableSize; ++i) {
1468 entry = &table->pEntries[i];
1469 obj = entry->data;
1470 if (obj == NULL || obj == HASH_TOMBSTONE) {
1471 continue;
1472 }
1473 pinObject(entry->data);
1474 }
1475 dvmHashTableUnlock(table);
1476 LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
1477 }
1478
pinPrimitiveClasses(void)1479 static void pinPrimitiveClasses(void)
1480 {
1481 size_t length;
1482 size_t i;
1483
1484 length = ARRAYSIZE(gDvm.primitiveClass);
1485 for (i = 0; i < length; i++) {
1486 if (gDvm.primitiveClass[i] != NULL) {
1487 pinObject((Object *)gDvm.primitiveClass[i]);
1488 }
1489 }
1490 }
1491
1492 /*
1493 * Scavenge interned strings. Permanent interned strings will have
1494 * been pinned and are therefore ignored. Non-permanent strings that
1495 * have been forwarded are snapped. All other entries are removed.
1496 */
scavengeInternedStrings(void)1497 static void scavengeInternedStrings(void)
1498 {
1499 HashTable *table;
1500 HashEntry *entry;
1501 Object *obj;
1502 int i;
1503
1504 table = gDvm.internedStrings;
1505 if (table == NULL) {
1506 return;
1507 }
1508 dvmHashTableLock(table);
1509 for (i = 0; i < table->tableSize; ++i) {
1510 entry = &table->pEntries[i];
1511 obj = (Object *)entry->data;
1512 if (obj == NULL || obj == HASH_TOMBSTONE) {
1513 continue;
1514 } else if (!isPermanentString((StringObject *)obj)) {
1515 // LOG_SCAV("entry->data=%p", entry->data);
1516 LOG_SCAV(">>> string obj=%p", entry->data);
1517 /* TODO(cshapiro): detach white string objects */
1518 scavengeReference((Object **)(void *)&entry->data);
1519 LOG_SCAV("<<< string obj=%p", entry->data);
1520 }
1521 }
1522 dvmHashTableUnlock(table);
1523 }
1524
pinInternedStrings(void)1525 static void pinInternedStrings(void)
1526 {
1527 HashTable *table;
1528 HashEntry *entry;
1529 Object *obj;
1530 int i;
1531
1532 table = gDvm.internedStrings;
1533 if (table == NULL) {
1534 return;
1535 }
1536 dvmHashTableLock(table);
1537 for (i = 0; i < table->tableSize; ++i) {
1538 entry = &table->pEntries[i];
1539 obj = (Object *)entry->data;
1540 if (obj == NULL || obj == HASH_TOMBSTONE) {
1541 continue;
1542 } else if (isPermanentString((StringObject *)obj)) {
1543 obj = (Object *)getPermanentString((StringObject*)obj);
1544 LOG_PROM(">>> pin string obj=%p", obj);
1545 pinObject(obj);
1546 LOG_PROM("<<< pin string obj=%p", obj);
1547 }
1548 }
1549 dvmHashTableUnlock(table);
1550 }
1551
1552 /*
1553 * At present, reference tables contain references that must not be
1554 * moved by the collector. Instead of scavenging each reference in
1555 * the table we pin each referenced object.
1556 */
pinReferenceTable(const ReferenceTable * table)1557 static void pinReferenceTable(const ReferenceTable *table)
1558 {
1559 Object **entry;
1560
1561 assert(table != NULL);
1562 assert(table->table != NULL);
1563 assert(table->nextEntry != NULL);
1564 for (entry = table->table; entry < table->nextEntry; ++entry) {
1565 assert(entry != NULL);
1566 assert(!isForward(*entry));
1567 pinObject(*entry);
1568 }
1569 }
1570
scavengeLargeHeapRefTable(LargeHeapRefTable * table)1571 static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1572 {
1573 for (; table != NULL; table = table->next) {
1574 Object **ref = table->refs.table;
1575 for (; ref < table->refs.nextEntry; ++ref) {
1576 scavengeReference(ref);
1577 }
1578 }
1579 }
1580
1581 /* This code was copied from Thread.c */
scavengeThreadStack(Thread * thread)1582 static void scavengeThreadStack(Thread *thread)
1583 {
1584 const u4 *framePtr;
1585 #if WITH_EXTRA_GC_CHECKS > 1
1586 bool first = true;
1587 #endif
1588
1589 framePtr = (const u4 *)thread->curFrame;
1590 while (framePtr != NULL) {
1591 const StackSaveArea *saveArea;
1592 const Method *method;
1593
1594 saveArea = SAVEAREA_FROM_FP(framePtr);
1595 method = saveArea->method;
1596 if (method != NULL && !dvmIsNativeMethod(method)) {
1597 #ifdef COUNT_PRECISE_METHODS
1598 /* the GC is running, so no lock required */
1599 if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1600 LOG_SCAV("PGC: added %s.%s %p\n",
1601 method->clazz->descriptor, method->name, method);
1602 #endif
1603 #if WITH_EXTRA_GC_CHECKS > 1
1604 /*
1605 * May also want to enable the memset() in the "invokeMethod"
1606 * goto target in the portable interpreter. That sets the stack
1607 * to a pattern that makes referring to uninitialized data
1608 * very obvious.
1609 */
1610
1611 if (first) {
1612 /*
1613 * First frame, isn't native, check the "alternate" saved PC
1614 * as a sanity check.
1615 *
1616 * It seems like we could check the second frame if the first
1617 * is native, since the PCs should be the same. It turns out
1618 * this doesn't always work. The problem is that we could
1619 * have calls in the sequence:
1620 * interp method #2
1621 * native method
1622 * interp method #1
1623 *
1624 * and then GC while in the native method after returning
1625 * from interp method #2. The currentPc on the stack is
1626 * for interp method #1, but thread->currentPc2 is still
1627 * set for the last thing interp method #2 did.
1628 *
1629 * This can also happen in normal execution:
1630 * - sget-object on not-yet-loaded class
1631 * - class init updates currentPc2
1632 * - static field init is handled by parsing annotations;
1633 * static String init requires creation of a String object,
1634 * which can cause a GC
1635 *
1636 * Essentially, any pattern that involves executing
1637 * interpreted code and then causes an allocation without
1638 * executing instructions in the original method will hit
1639 * this. These are rare enough that the test still has
1640 * some value.
1641 */
1642 if (saveArea->xtra.currentPc != thread->currentPc2) {
1643 LOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p\n",
1644 saveArea->xtra.currentPc, thread->currentPc2,
1645 method->clazz->descriptor, method->name, method->insns);
1646 if (saveArea->xtra.currentPc != NULL)
1647 LOGE(" pc inst = 0x%04x\n", *saveArea->xtra.currentPc);
1648 if (thread->currentPc2 != NULL)
1649 LOGE(" pc2 inst = 0x%04x\n", *thread->currentPc2);
1650 dvmDumpThread(thread, false);
1651 }
1652 } else {
1653 /*
1654 * It's unusual, but not impossible, for a non-first frame
1655 * to be at something other than a method invocation. For
1656 * example, if we do a new-instance on a nonexistent class,
1657 * we'll have a lot of class loader activity on the stack
1658 * above the frame with the "new" operation. Could also
1659 * happen while we initialize a Throwable when an instruction
1660 * fails.
1661 *
1662 * So there's not much we can do here to verify the PC,
1663 * except to verify that it's a GC point.
1664 */
1665 }
1666 assert(saveArea->xtra.currentPc != NULL);
1667 #endif
1668
1669 const RegisterMap* pMap;
1670 const u1* regVector;
1671 int i;
1672
1673 Method* nonConstMethod = (Method*) method; // quiet gcc
1674 pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1675
1676 //LOG_SCAV("PGC: %s.%s\n", method->clazz->descriptor, method->name);
1677
1678 if (pMap != NULL) {
1679 /* found map, get registers for this address */
1680 int addr = saveArea->xtra.currentPc - method->insns;
1681 regVector = dvmRegisterMapGetLine(pMap, addr);
1682 /*
1683 if (regVector == NULL) {
1684 LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x\n",
1685 method->clazz->descriptor, method->name, addr);
1686 } else {
1687 LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)\n",
1688 method->clazz->descriptor, method->name, addr,
1689 thread->threadId);
1690 }
1691 */
1692 } else {
1693 /*
1694 * No map found. If precise GC is disabled this is
1695 * expected -- we don't create pointers to the map data even
1696 * if it's present -- but if it's enabled it means we're
1697 * unexpectedly falling back on a conservative scan, so it's
1698 * worth yelling a little.
1699 */
1700 if (gDvm.preciseGc) {
1701 LOG_SCAV("PGC: no map for %s.%s\n", method->clazz->descriptor, method->name);
1702 }
1703 regVector = NULL;
1704 }
1705 if (regVector == NULL) {
1706 /*
1707 * There are no roots to scavenge. Skip over the entire frame.
1708 */
1709 framePtr += method->registersSize;
1710 } else {
1711 /*
1712 * Precise scan. v0 is at the lowest address on the
1713 * interpreted stack, and is the first bit in the register
1714 * vector, so we can walk through the register map and
1715 * memory in the same direction.
1716 *
1717 * A '1' bit indicates a live reference.
1718 */
1719 u2 bits = 1 << 1;
1720 for (i = method->registersSize - 1; i >= 0; i--) {
1721 u4 rval = *framePtr;
1722
1723 bits >>= 1;
1724 if (bits == 1) {
1725 /* set bit 9 so we can tell when we're empty */
1726 bits = *regVector++ | 0x0100;
1727 }
1728
1729 if (rval != 0 && (bits & 0x01) != 0) {
1730 /*
1731 * Non-null, register marked as live reference. This
1732 * should always be a valid object.
1733 */
1734 #if WITH_EXTRA_GC_CHECKS > 0
1735 if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1736 /* this is very bad */
1737 LOGE("PGC: invalid ref in reg %d: 0x%08x\n",
1738 method->registersSize-1 - i, rval);
1739 } else
1740 #endif
1741 {
1742
1743 // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
1744 /* dvmMarkObjectNonNull((Object *)rval); */
1745 scavengeReference((Object **) framePtr);
1746 }
1747 } else {
1748 /*
1749 * Null or non-reference, do nothing at all.
1750 */
1751 #if WITH_EXTRA_GC_CHECKS > 1
1752 if (dvmIsValidObject((Object*) rval)) {
1753 /* this is normal, but we feel chatty */
1754 LOGD("PGC: ignoring valid ref in reg %d: 0x%08x\n",
1755 method->registersSize-1 - i, rval);
1756 }
1757 #endif
1758 }
1759 ++framePtr;
1760 }
1761 dvmReleaseRegisterMapLine(pMap, regVector);
1762 }
1763 }
1764 /* else this is a break frame and there is nothing to gray, or
1765 * this is a native method and the registers are just the "ins",
1766 * copied from various registers in the caller's set.
1767 */
1768
1769 #if WITH_EXTRA_GC_CHECKS > 1
1770 first = false;
1771 #endif
1772
1773 /* Don't fall into an infinite loop if things get corrupted.
1774 */
1775 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1776 saveArea->prevFrame == NULL);
1777 framePtr = saveArea->prevFrame;
1778 }
1779 }
1780
scavengeThread(Thread * thread)1781 static void scavengeThread(Thread *thread)
1782 {
1783 // LOG_SCAV("scavengeThread(thread=%p)", thread);
1784
1785 // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
1786 scavengeReference(&thread->threadObj);
1787
1788 // LOG_SCAV("Scavenging exception=%p", thread->exception);
1789 scavengeReference(&thread->exception);
1790
1791 scavengeThreadStack(thread);
1792 }
1793
scavengeThreadList(void)1794 static void scavengeThreadList(void)
1795 {
1796 Thread *thread;
1797
1798 dvmLockThreadList(dvmThreadSelf());
1799 thread = gDvm.threadList;
1800 while (thread) {
1801 scavengeThread(thread);
1802 thread = thread->next;
1803 }
1804 dvmUnlockThreadList();
1805 }
1806
pinThreadStack(const Thread * thread)1807 static void pinThreadStack(const Thread *thread)
1808 {
1809 const u4 *framePtr;
1810 const StackSaveArea *saveArea;
1811 Method *method;
1812 const char *shorty;
1813 Object *obj;
1814 int i;
1815
1816 saveArea = NULL;
1817 framePtr = (const u4 *)thread->curFrame;
1818 for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1819 saveArea = SAVEAREA_FROM_FP(framePtr);
1820 method = (Method *)saveArea->method;
1821 if (method != NULL && dvmIsNativeMethod(method)) {
1822 /*
1823 * This is native method, pin its arguments.
1824 *
1825 * For purposes of graying references, we don't need to do
1826 * anything here, because all of the native "ins" were copied
1827 * from registers in the caller's stack frame and won't be
1828 * changed (an interpreted method can freely use registers
1829 * with parameters like any other register, but natives don't
1830 * work that way).
1831 *
1832 * However, we need to ensure that references visible to
1833 * native methods don't move around. We can do a precise scan
1834 * of the arguments by examining the method signature.
1835 */
1836 LOG_PIN("+++ native scan %s.%s\n",
1837 method->clazz->descriptor, method->name);
1838 assert(method->registersSize == method->insSize);
1839 if (!dvmIsStaticMethod(method)) {
1840 /* grab the "this" pointer */
1841 obj = (Object *)*framePtr++;
1842 if (obj == NULL) {
1843 /*
1844 * This can happen for the "fake" entry frame inserted
1845 * for threads created outside the VM. There's no actual
1846 * call so there's no object. If we changed the fake
1847 * entry method to be declared "static" then this
1848 * situation should never occur.
1849 */
1850 } else {
1851 assert(dvmIsValidObject(obj));
1852 pinObject(obj);
1853 }
1854 }
1855 shorty = method->shorty+1; // skip return value
1856 for (i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1857 switch (*shorty++) {
1858 case 'L':
1859 obj = (Object *)*framePtr;
1860 if (obj != NULL) {
1861 assert(dvmIsValidObject(obj));
1862 pinObject(obj);
1863 }
1864 break;
1865 case 'D':
1866 case 'J':
1867 framePtr++;
1868 break;
1869 default:
1870 /* 32-bit non-reference value */
1871 obj = (Object *)*framePtr; // debug, remove
1872 if (dvmIsValidObject(obj)) { // debug, remove
1873 /* if we see a lot of these, our scan might be off */
1874 LOG_PIN("+++ did NOT pin obj %p\n", obj);
1875 }
1876 break;
1877 }
1878 }
1879 } else if (method != NULL && !dvmIsNativeMethod(method)) {
1880 const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1881 const u1* regVector = NULL;
1882
1883 LOGI("conservative : %s.%s\n", method->clazz->descriptor, method->name);
1884
1885 if (pMap != NULL) {
1886 int addr = saveArea->xtra.currentPc - method->insns;
1887 regVector = dvmRegisterMapGetLine(pMap, addr);
1888 }
1889 if (regVector == NULL) {
1890 /*
1891 * No register info for this frame, conservatively pin.
1892 */
1893 for (i = 0; i < method->registersSize; ++i) {
1894 u4 regValue = framePtr[i];
1895 if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1896 pinObject((Object *)regValue);
1897 }
1898 }
1899 }
1900 }
1901 /*
1902 * Don't fall into an infinite loop if things get corrupted.
1903 */
1904 assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1905 saveArea->prevFrame == NULL);
1906 }
1907 }
1908
pinThread(const Thread * thread)1909 static void pinThread(const Thread *thread)
1910 {
1911 assert(thread != NULL);
1912 LOG_PIN("pinThread(thread=%p)", thread);
1913
1914 LOG_PIN("Pin native method arguments");
1915 pinThreadStack(thread);
1916
1917 LOG_PIN("Pin internalLocalRefTable");
1918 pinReferenceTable(&thread->internalLocalRefTable);
1919
1920 LOG_PIN("Pin jniLocalRefTable");
1921 pinReferenceTable(&thread->jniLocalRefTable);
1922
1923 /* Can the check be pushed into the promote routine? */
1924 if (thread->jniMonitorRefTable.table) {
1925 LOG_PIN("Pin jniMonitorRefTable");
1926 pinReferenceTable(&thread->jniMonitorRefTable);
1927 }
1928 }
1929
pinThreadList(void)1930 static void pinThreadList(void)
1931 {
1932 Thread *thread;
1933
1934 dvmLockThreadList(dvmThreadSelf());
1935 thread = gDvm.threadList;
1936 while (thread) {
1937 pinThread(thread);
1938 thread = thread->next;
1939 }
1940 dvmUnlockThreadList();
1941 }
1942
1943 /*
1944 * Heap block scavenging.
1945 */
1946
1947 /*
1948 * Scavenge objects in the current block. Scavenging terminates when
1949 * the pointer reaches the highest address in the block or when a run
1950 * of zero words that continues to the highest address is reached.
1951 */
scavengeBlock(HeapSource * heapSource,size_t block)1952 static void scavengeBlock(HeapSource *heapSource, size_t block)
1953 {
1954 u1 *cursor;
1955 u1 *end;
1956 size_t size;
1957
1958 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1959
1960 assert(heapSource != NULL);
1961 assert(block < heapSource->totalBlocks);
1962 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1963
1964 cursor = blockToAddress(heapSource, block);
1965 end = cursor + BLOCK_SIZE;
1966 LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
1967
1968 /* Parse and scavenge the current block. */
1969 size = 0;
1970 while (cursor < end) {
1971 u4 word = *(u4 *)cursor;
1972 if (word != 0) {
1973 scavengeObject((Object *)cursor);
1974 size = objectSize((Object *)cursor);
1975 size = alignUp(size, ALLOC_ALIGNMENT);
1976 cursor += size;
1977 } else {
1978 /* Check for padding. */
1979 while (*(u4 *)cursor == 0) {
1980 cursor += 4;
1981 if (cursor == end) break;
1982 }
1983 /* Punt if something went wrong. */
1984 assert(cursor == end);
1985 }
1986 }
1987 }
1988
objectSize(const Object * obj)1989 static size_t objectSize(const Object *obj)
1990 {
1991 size_t size;
1992
1993 assert(obj != NULL);
1994 assert(obj->clazz != NULL);
1995 if (obj->clazz == gDvm.classJavaLangClass) {
1996 size = dvmClassObjectSize((ClassObject *)obj);
1997 } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1998 size = dvmArrayObjectSize((ArrayObject *)obj);
1999 } else {
2000 assert(obj->clazz->objectSize != 0);
2001 size = obj->clazz->objectSize;
2002 }
2003 if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
2004 size += sizeof(u4);
2005 }
2006 return size;
2007 }
2008
verifyBlock(HeapSource * heapSource,size_t block)2009 static void verifyBlock(HeapSource *heapSource, size_t block)
2010 {
2011 u1 *cursor;
2012 u1 *end;
2013 size_t size;
2014
2015 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
2016
2017 assert(heapSource != NULL);
2018 assert(block < heapSource->totalBlocks);
2019 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
2020
2021 cursor = blockToAddress(heapSource, block);
2022 end = cursor + BLOCK_SIZE;
2023 // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
2024
2025 /* Parse and scavenge the current block. */
2026 size = 0;
2027 while (cursor < end) {
2028 u4 word = *(u4 *)cursor;
2029 if (word != 0) {
2030 dvmVerifyObject((Object *)cursor);
2031 size = objectSize((Object *)cursor);
2032 size = alignUp(size, ALLOC_ALIGNMENT);
2033 cursor += size;
2034 } else {
2035 /* Check for padding. */
2036 while (*(unsigned long *)cursor == 0) {
2037 cursor += 4;
2038 if (cursor == end) break;
2039 }
2040 /* Punt if something went wrong. */
2041 assert(cursor == end);
2042 }
2043 }
2044 }
2045
describeBlockQueue(const HeapSource * heapSource)2046 static void describeBlockQueue(const HeapSource *heapSource)
2047 {
2048 size_t block, count;
2049 char space;
2050
2051 block = heapSource->queueHead;
2052 count = 0;
2053 LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
2054 /* Count the number of blocks enqueued. */
2055 while (block != QUEUE_TAIL) {
2056 block = heapSource->blockQueue[block];
2057 ++count;
2058 }
2059 LOG_SCAV("blockQueue %zu elements, enqueued %zu",
2060 count, heapSource->queueSize);
2061 block = heapSource->queueHead;
2062 while (block != QUEUE_TAIL) {
2063 space = heapSource->blockSpace[block];
2064 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
2065 block = heapSource->blockQueue[block];
2066 }
2067
2068 LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
2069 }
2070
2071 /*
2072 * Blackens promoted objects.
2073 */
scavengeBlockQueue(void)2074 static void scavengeBlockQueue(void)
2075 {
2076 HeapSource *heapSource;
2077 size_t block;
2078
2079 LOG_SCAV(">>> scavengeBlockQueue()");
2080 heapSource = gDvm.gcHeap->heapSource;
2081 describeBlockQueue(heapSource);
2082 while (heapSource->queueHead != QUEUE_TAIL) {
2083 block = heapSource->queueHead;
2084 LOG_SCAV("Dequeueing block %zu\n", block);
2085 scavengeBlock(heapSource, block);
2086 heapSource->queueHead = heapSource->blockQueue[block];
2087 LOG_SCAV("New queue head is %zu\n", heapSource->queueHead);
2088 }
2089 LOG_SCAV("<<< scavengeBlockQueue()");
2090 }
2091
2092 /*
2093 * Scan the block list and verify all blocks that are marked as being
2094 * in new space. This should be parametrized so we can invoke this
2095 * routine outside of the context of a collection.
2096 */
verifyNewSpace(void)2097 static void verifyNewSpace(void)
2098 {
2099 HeapSource *heapSource;
2100 size_t i;
2101 size_t c0, c1, c2, c7;
2102
2103 c0 = c1 = c2 = c7 = 0;
2104 heapSource = gDvm.gcHeap->heapSource;
2105 for (i = 0; i < heapSource->totalBlocks; ++i) {
2106 switch (heapSource->blockSpace[i]) {
2107 case BLOCK_FREE: ++c0; break;
2108 case BLOCK_TO_SPACE: ++c1; break;
2109 case BLOCK_FROM_SPACE: ++c2; break;
2110 case BLOCK_CONTINUED: ++c7; break;
2111 default: assert(!"reached");
2112 }
2113 }
2114 LOG_VER("Block Demographics: "
2115 "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2116 c0, c1, c2, c7);
2117 for (i = 0; i < heapSource->totalBlocks; ++i) {
2118 if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2119 continue;
2120 }
2121 verifyBlock(heapSource, i);
2122 }
2123 }
2124
scavengeGlobals(void)2125 static void scavengeGlobals(void)
2126 {
2127 scavengeReference((Object **)(void *)&gDvm.classJavaLangClass);
2128 scavengeReference((Object **)(void *)&gDvm.classJavaLangClassArray);
2129 scavengeReference((Object **)(void *)&gDvm.classJavaLangError);
2130 scavengeReference((Object **)(void *)&gDvm.classJavaLangObject);
2131 scavengeReference((Object **)(void *)&gDvm.classJavaLangObjectArray);
2132 scavengeReference((Object **)(void *)&gDvm.classJavaLangRuntimeException);
2133 scavengeReference((Object **)(void *)&gDvm.classJavaLangString);
2134 scavengeReference((Object **)(void *)&gDvm.classJavaLangThread);
2135 scavengeReference((Object **)(void *)&gDvm.classJavaLangVMThread);
2136 scavengeReference((Object **)(void *)&gDvm.classJavaLangThreadGroup);
2137 scavengeReference((Object **)(void *)&gDvm.classJavaLangThrowable);
2138 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElement);
2139 scavengeReference((Object **)(void *)&gDvm.classJavaLangStackTraceElementArray);
2140 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArray);
2141 scavengeReference((Object **)(void *)&gDvm.classJavaLangAnnotationAnnotationArrayArray);
2142 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectAccessibleObject);
2143 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructor);
2144 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectConstructorArray);
2145 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectField);
2146 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectFieldArray);
2147 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethod);
2148 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectMethodArray);
2149 scavengeReference((Object **)(void *)&gDvm.classJavaLangReflectProxy);
2150 scavengeReference((Object **)(void *)&gDvm.classJavaLangExceptionInInitializerError);
2151 scavengeReference((Object **)(void *)&gDvm.classJavaLangRefReference);
2152 scavengeReference((Object **)(void *)&gDvm.classJavaNioReadWriteDirectByteBuffer);
2153 scavengeReference((Object **)(void *)&gDvm.classJavaSecurityAccessController);
2154 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
2155 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
2156 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
2157 scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
2158 scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
2159 scavengeReference((Object **)(void *)&gDvm.classArrayChar);
2160 scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
2161 scavengeReference((Object **)(void *)&gDvm.classArrayDouble);
2162 scavengeReference((Object **)(void *)&gDvm.classArrayByte);
2163 scavengeReference((Object **)(void *)&gDvm.classArrayShort);
2164 scavengeReference((Object **)(void *)&gDvm.classArrayInt);
2165 scavengeReference((Object **)(void *)&gDvm.classArrayLong);
2166 }
2167
describeHeap(void)2168 void describeHeap(void)
2169 {
2170 HeapSource *heapSource;
2171
2172 heapSource = gDvm.gcHeap->heapSource;
2173 describeBlocks(heapSource);
2174 }
2175
2176 /*
2177 * The collection interface. Collection has a few distinct phases.
2178 * The first is flipping AKA condemning AKA whitening the heap. The
2179 * second is to promote all objects which are pointed to by pinned or
2180 * ambiguous references. The third phase is tracing from the stacks,
2181 * registers and various globals. Lastly, a verification of the heap
2182 * is performed. The last phase should be optional.
2183 */
dvmScavengeRoots(void)2184 void dvmScavengeRoots(void) /* Needs a new name badly */
2185 {
2186 GcHeap *gcHeap;
2187
2188 {
2189 size_t alloc, unused, total;
2190
2191 room(&alloc, &unused, &total);
2192 LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2193 alloc, unused, total);
2194 }
2195
2196 gcHeap = gDvm.gcHeap;
2197 dvmHeapSourceFlip();
2198
2199 /*
2200 * Promote blocks with stationary objects.
2201 */
2202 pinThreadList();
2203 pinReferenceTable(&gDvm.jniGlobalRefTable);
2204 pinReferenceTable(&gDvm.jniPinRefTable);
2205 pinHashTableEntries(gDvm.loadedClasses);
2206 pinHashTableEntries(gDvm.dbgRegistry);
2207 pinPrimitiveClasses();
2208 pinInternedStrings();
2209
2210 // describeBlocks(gcHeap->heapSource);
2211
2212 /*
2213 * Create first, open new-space page right here.
2214 */
2215
2216 /* Reset allocation to an unallocated block. */
2217 gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2218 gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2219 /*
2220 * Hack: promote the empty block allocated above. If the
2221 * promotions that occurred above did not actually gray any
2222 * objects, the block queue may be empty. We must force a
2223 * promotion to be safe.
2224 */
2225 promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2226
2227 /*
2228 * Scavenge blocks and relocate movable objects.
2229 */
2230
2231 LOG_SCAV("Scavenging gDvm.threadList");
2232 scavengeThreadList();
2233
2234 LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
2235 scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2236
2237 LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2238 scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2239
2240 LOG_SCAV("Scavenging random global stuff");
2241 scavengeReference(&gDvm.outOfMemoryObj);
2242 scavengeReference(&gDvm.internalErrorObj);
2243 scavengeReference(&gDvm.noClassDefFoundErrorObj);
2244
2245 // LOG_SCAV("Scavenging gDvm.internedString");
2246 scavengeInternedStrings();
2247
2248 LOG_SCAV("Root scavenge has completed.");
2249
2250 scavengeBlockQueue();
2251
2252 LOG_SCAV("Re-snap global class pointers.");
2253 scavengeGlobals();
2254
2255 LOG_SCAV("New space scavenge has completed.");
2256
2257 /*
2258 * Process reference objects in strength order.
2259 */
2260
2261 LOG_REF("Processing soft references...");
2262 preserveSoftReferences(&gDvm.gcHeap->softReferences);
2263 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2264
2265 LOG_REF("Processing weak references...");
2266 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2267
2268 LOG_REF("Finding finalizations...");
2269 processFinalizableReferences();
2270
2271 LOG_REF("Processing f-reachable soft references...");
2272 clearWhiteReferences(&gDvm.gcHeap->softReferences);
2273
2274 LOG_REF("Processing f-reachable weak references...");
2275 clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2276
2277 LOG_REF("Processing phantom references...");
2278 clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2279
2280 /*
2281 * Verify the stack and heap.
2282 */
2283 dvmVerifyRoots();
2284 verifyNewSpace();
2285
2286 //describeBlocks(gcHeap->heapSource);
2287
2288 clearFromSpace(gcHeap->heapSource);
2289
2290 {
2291 size_t alloc, rem, total;
2292
2293 room(&alloc, &rem, &total);
2294 LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2295 }
2296 }
2297
2298 /*
2299 * Interface compatibility routines.
2300 */
2301
dvmClearWhiteRefs(Object ** list)2302 void dvmClearWhiteRefs(Object **list)
2303 {
2304 /* do nothing */
2305 assert(*list == NULL);
2306 }
2307
dvmHandleSoftRefs(Object ** list)2308 void dvmHandleSoftRefs(Object **list)
2309 {
2310 /* do nothing */
2311 assert(*list == NULL);
2312 }
2313
dvmHeapBeginMarkStep(GcMode mode)2314 bool dvmHeapBeginMarkStep(GcMode mode)
2315 {
2316 /* do nothing */
2317 return true;
2318 }
2319
dvmHeapFinishMarkStep(void)2320 void dvmHeapFinishMarkStep(void)
2321 {
2322 /* do nothing */
2323 }
2324
dvmHeapMarkRootSet(void)2325 void dvmHeapMarkRootSet(void)
2326 {
2327 /* do nothing */
2328 }
2329
dvmHeapScanMarkedObjects(void)2330 void dvmHeapScanMarkedObjects(void)
2331 {
2332 dvmScavengeRoots();
2333 }
2334
dvmHeapScheduleFinalizations(void)2335 void dvmHeapScheduleFinalizations(void)
2336 {
2337 /* do nothing */
2338 }
2339
dvmHeapSweepUnmarkedObjects(GcMode mode,int * numFreed,size_t * sizeFreed)2340 void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2341 {
2342 *numFreed = 0;
2343 *sizeFreed = 0;
2344 /* do nothing */
2345 }
2346
dvmMarkObjectNonNull(const Object * obj)2347 void dvmMarkObjectNonNull(const Object *obj)
2348 {
2349 assert(!"implemented");
2350 }
2351
dvmMarkDirtyObjects(void)2352 void dvmMarkDirtyObjects(void)
2353 {
2354 assert(!"implemented");
2355 }
2356
dvmHeapSourceThreadShutdown(void)2357 void dvmHeapSourceThreadShutdown(void)
2358 {
2359 /* do nothing */
2360 }
2361