• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2009 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #include <errno.h>
18  #include <limits.h>
19  #include <sys/mman.h>
20  
21  #include "Dalvik.h"
22  #include "alloc/Heap.h"
23  #include "alloc/HeapBitmap.h"
24  #include "alloc/HeapInternal.h"
25  #include "alloc/HeapSource.h"
26  #include "alloc/Verify.h"
27  
28  /*
29   * A "mostly copying", generational, garbage collector.
30   *
31   * TODO: we allocate our own contiguous tract of page frames to back
32   * object allocations.  To cooperate with other heaps active in the
33   * virtual machine we need to move the responsibility of allocating
34   * pages someplace outside of this code.
35   *
36   * The other major data structures that maintain the state of the heap
37   * are the block space table and the block queue.
38   *
39   * The block space table records the state of a block.  We must track
40   * whether a block is:
41   *
42   * - Free or allocated in some space.
43   *
44   * - If the block holds part of a large object allocation, whether the
45   *   block is the initial or a continued block of the allocation.
46   *
47   * - Whether the block is pinned, that is to say whether at least one
48   *   object in the block must remain stationary.  Only needed during a
49   *   GC.
50   *
51   * - Which space the object belongs to.  At present this means
52   *   from-space or to-space.
53   *
54   * The block queue is used during garbage collection.  Unlike Cheney's
55   * algorithm, from-space and to-space are not contiguous.  Therefore,
56   * one cannot maintain the state of the copy with just two pointers.
57   * The block queue exists to thread lists of blocks from the various
58   * spaces together.
59   *
60   * Additionally, we record the free space frontier of the heap, as
61   * well as the address of the first object within a block, which is
62   * required to copy objects following a large object (not currently
63   * implemented).  This is stored in the heap source structure.  This
64   * should be moved elsewhere to support in-line allocations from Java
65   * threads.
66   *
67   * Allocation requests are satisfied by reserving storage from one or
68   * more contiguous blocks.  Objects that are small enough to fit
69   * inside a block are packed together within a block.  Objects that
70   * are larger than a block are allocated from contiguous sequences of
71   * blocks.  When half the available blocks are filled, a garbage
72   * collection occurs.  We "flip" spaces (exchange from- and to-space),
73   * copy live objects into to space, and perform pointer adjustment.
74   *
75   * Copying is made more complicated by the requirement that some
76   * objects must not be moved.  This property is known as "pinning".
77   * These objects must be dealt with specially.  We use Bartlett's
78   * scheme; blocks containing such objects are grayed (promoted) at the
79   * start of a garbage collection.  By virtue of this trick, tracing
80   * from the roots proceeds as usual but all objects on those pages are
81   * considered promoted and therefore not moved.
82   *
83   * TODO: there is sufficient information within the garbage collector
84   * to implement Attardi's scheme for evacuating unpinned objects from
85   * a page that is otherwise pinned.  This would eliminate false
86   * retention caused by the large pinning granularity.
87   *
88   * We need a scheme for medium and large objects.  Ignore that for
89   * now, we can return to this later.
90   *
91   * Eventually we need to worry about promoting objects out of the
92   * copy-collected heap (tenuring) into a less volatile space.  Copying
93   * may not always be the best policy for such spaces.  We should
94   * consider a variant of mark, sweep, compact.
95   *
96   * The block scheme allows us to use VM page faults to maintain a
97   * write barrier.  Consider having a special leaf state for a page.
98   *
99   * Bibliography:
100   *
101   * C. J. Cheney. 1970. A non-recursive list compacting
102   * algorithm. CACM. 13-11 pp677--678.
103   *
104   * Joel F. Bartlett. 1988. Compacting Garbage Collection with
105   * Ambiguous Roots. Digital Equipment Corporation.
106   *
107   * Joel F. Bartlett. 1989. Mostly-Copying Garbage Collection Picks Up
108   * Generations and C++. Digital Equipment Corporation.
109   *
110   * G. May Yip. 1991. Incremental, Generational Mostly-Copying Garbage
111   * Collection in Uncooperative Environments. Digital Equipment
112   * Corporation.
113   *
114   * Giuseppe Attardi, Tito Flagella. 1994. A Customisable Memory
115   * Management Framework. TR-94-010
116   *
117   * Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. A customisable
118   * memory management framework for C++. Software -- Practice and
119   * Experience. 28(11), 1143-1183.
120   *
121   */
122  
123  #define ARRAYSIZE(x) (sizeof(x) / sizeof(x[0]))
124  
125  #if 0
126  #define LOG_ALLOC ALOGI
127  #define LOG_PIN ALOGI
128  #define LOG_PROM ALOGI
129  #define LOG_REF ALOGI
130  #define LOG_SCAV ALOGI
131  #define LOG_TRAN ALOGI
132  #define LOG_VER ALOGI
133  #else
134  #define LOG_ALLOC(...) ((void)0)
135  #define LOG_PIN(...) ((void)0)
136  #define LOG_PROM(...) ((void)0)
137  #define LOG_REF(...) ((void)0)
138  #define LOG_SCAV(...) ((void)0)
139  #define LOG_TRAN(...) ((void)0)
140  #define LOG_VER(...) ((void)0)
141  #endif
142  
143  static void enqueueBlock(HeapSource *heapSource, size_t block);
144  static void scavengeReference(Object **obj);
145  static bool toSpaceContains(const void *addr);
146  static bool fromSpaceContains(const void *addr);
147  static size_t sumHeapBitmap(const HeapBitmap *bitmap);
148  static size_t objectSize(const Object *obj);
149  static void scavengeDataObject(Object *obj);
150  static void scavengeBlockQueue();
151  
152  /*
153   * We use 512-byte blocks.
154   */
155  enum { BLOCK_SHIFT = 9 };
156  enum { BLOCK_SIZE = 1 << BLOCK_SHIFT };
157  
158  /*
159   * Space identifiers, stored into the blockSpace array.
160   */
161  enum {
162      BLOCK_FREE = 0,
163      BLOCK_FROM_SPACE = 1,
164      BLOCK_TO_SPACE = 2,
165      BLOCK_CONTINUED = 7
166  };
167  
168  /*
169   * Alignment for all allocations, in bytes.
170   */
171  enum { ALLOC_ALIGNMENT = 8 };
172  
173  /*
174   * Sentinel value for the queue end.
175   */
176  #define QUEUE_TAIL (~(size_t)0)
177  
178  struct HeapSource {
179  
180      /* The base address of backing store. */
181      u1 *blockBase;
182  
183      /* Total number of blocks available for allocation. */
184      size_t totalBlocks;
185      size_t allocBlocks;
186  
187      /*
188       * The scavenger work queue.  Implemented as an array of index
189       * values into the queue.
190       */
191      size_t *blockQueue;
192  
193      /*
194       * Base and limit blocks.  Basically the shifted start address of
195       * the block.  We convert blocks to a relative number when
196       * indexing in the block queue.  TODO: make the block queue base
197       * relative rather than the index into the block queue.
198       */
199      size_t baseBlock, limitBlock;
200  
201      size_t queueHead;
202      size_t queueTail;
203      size_t queueSize;
204  
205      /* The space of the current block 0 (free), 1 or 2. */
206      char *blockSpace;
207  
208      /* Start of free space in the current block. */
209      u1 *allocPtr;
210      /* Exclusive limit of free space in the current block. */
211      u1 *allocLimit;
212  
213      HeapBitmap allocBits;
214  
215      /*
216       * The starting size of the heap.  This value is the same as the
217       * value provided to the -Xms flag.
218       */
219      size_t minimumSize;
220  
221      /*
222       * The maximum size of the heap.  This value is the same as the
223       * -Xmx flag.
224       */
225      size_t maximumSize;
226  
227      /*
228       * The current, committed size of the heap.  At present, this is
229       * equivalent to the maximumSize.
230       */
231      size_t currentSize;
232  
233      size_t bytesAllocated;
234  };
235  
alignDown(unsigned long x,unsigned long n)236  static unsigned long alignDown(unsigned long x, unsigned long n)
237  {
238      return x & -n;
239  }
240  
alignUp(unsigned long x,unsigned long n)241  static unsigned long alignUp(unsigned long x, unsigned long n)
242  {
243      return alignDown(x + (n - 1), n);
244  }
245  
describeBlocks(const HeapSource * heapSource)246  static void describeBlocks(const HeapSource *heapSource)
247  {
248      for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
249          if ((i % 32) == 0) putchar('\n');
250          printf("%d ", heapSource->blockSpace[i]);
251      }
252      putchar('\n');
253  }
254  
255  /*
256   * Virtual memory interface.
257   */
258  
virtualAlloc(size_t length)259  static void *virtualAlloc(size_t length)
260  {
261      int flags = MAP_PRIVATE | MAP_ANONYMOUS;
262      int prot = PROT_READ | PROT_WRITE;
263      void *addr = mmap(NULL, length, prot, flags, -1, 0);
264      if (addr == MAP_FAILED) {
265          LOGE_HEAP("mmap: %s", strerror(errno));
266          addr = NULL;
267      }
268      return addr;
269  }
270  
virtualFree(void * addr,size_t length)271  static void virtualFree(void *addr, size_t length)
272  {
273      assert(addr != NULL);
274      assert((uintptr_t)addr % SYSTEM_PAGE_SIZE == 0);
275      int res = munmap(addr, length);
276      if (res == -1) {
277          LOGE_HEAP("munmap: %s", strerror(errno));
278      }
279  }
280  
281  #ifndef NDEBUG
isValidAddress(const HeapSource * heapSource,const u1 * addr)282  static int isValidAddress(const HeapSource *heapSource, const u1 *addr)
283  {
284      size_t block;
285  
286      block = (uintptr_t)addr >> BLOCK_SHIFT;
287      return heapSource->baseBlock <= block &&
288             heapSource->limitBlock > block;
289  }
290  #endif
291  
292  /*
293   * Iterate over the block map looking for a contiguous run of free
294   * blocks.
295   */
allocateBlocks(HeapSource * heapSource,size_t blocks)296  static void *allocateBlocks(HeapSource *heapSource, size_t blocks)
297  {
298      size_t allocBlocks = heapSource->allocBlocks;
299      size_t totalBlocks = heapSource->totalBlocks;
300      /* Check underflow. */
301      assert(blocks != 0);
302      /* Check overflow. */
303      if (allocBlocks + blocks > totalBlocks / 2) {
304          return NULL;
305      }
306      /* Scan block map. */
307      for (size_t i = 0; i < totalBlocks; ++i) {
308          /* Check fit. */
309          for (size_t j = 0; j < blocks; ++j) { /* runs over totalBlocks */
310              if (heapSource->blockSpace[i+j] != BLOCK_FREE) {
311                  break;
312              }
313          }
314          /* No fit? */
315          if (j != blocks) {
316              i += j;
317              continue;
318          }
319          /* Fit, allocate. */
320          heapSource->blockSpace[i] = BLOCK_TO_SPACE; /* why to-space? */
321          for (size_t j = 1; j < blocks; ++j) {
322              heapSource->blockSpace[i+j] = BLOCK_CONTINUED;
323          }
324          heapSource->allocBlocks += blocks;
325          void *addr = &heapSource->blockBase[i*BLOCK_SIZE];
326          memset(addr, 0, blocks*BLOCK_SIZE);
327          /* Collecting? */
328          if (heapSource->queueHead != QUEUE_TAIL) {
329              LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
330              /*
331               * This allocated was on behalf of the transporter when it
332               * shaded a white object gray.  We enqueue the block so
333               * the scavenger can further shade the gray objects black.
334               */
335              enqueueBlock(heapSource, i);
336          }
337  
338          return addr;
339      }
340      /* Insufficient space, fail. */
341      ALOGE("Insufficient space, %zu blocks, %zu blocks allocated and %zu bytes allocated",
342           heapSource->totalBlocks,
343           heapSource->allocBlocks,
344           heapSource->bytesAllocated);
345      return NULL;
346  }
347  
348  /* Converts an absolute address to a relative block number. */
addressToBlock(const HeapSource * heapSource,const void * addr)349  static size_t addressToBlock(const HeapSource *heapSource, const void *addr)
350  {
351      assert(heapSource != NULL);
352      assert(isValidAddress(heapSource, addr));
353      return (((uintptr_t)addr) >> BLOCK_SHIFT) - heapSource->baseBlock;
354  }
355  
356  /* Converts a relative block number to an absolute address. */
blockToAddress(const HeapSource * heapSource,size_t block)357  static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
358  {
359      u1 *addr;
360  
361      addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
362      assert(isValidAddress(heapSource, addr));
363      return addr;
364  }
365  
clearBlock(HeapSource * heapSource,size_t block)366  static void clearBlock(HeapSource *heapSource, size_t block)
367  {
368      assert(heapSource != NULL);
369      assert(block < heapSource->totalBlocks);
370      u1 *addr = heapSource->blockBase + block*BLOCK_SIZE;
371      memset(addr, 0xCC, BLOCK_SIZE);
372      for (size_t i = 0; i < BLOCK_SIZE; i += 8) {
373          dvmHeapBitmapClearObjectBit(&heapSource->allocBits, addr + i);
374      }
375  }
376  
clearFromSpace(HeapSource * heapSource)377  static void clearFromSpace(HeapSource *heapSource)
378  {
379      assert(heapSource != NULL);
380      size_t i = 0;
381      size_t count = 0;
382      while (i < heapSource->totalBlocks) {
383          if (heapSource->blockSpace[i] != BLOCK_FROM_SPACE) {
384              ++i;
385              continue;
386          }
387          heapSource->blockSpace[i] = BLOCK_FREE;
388          clearBlock(heapSource, i);
389          ++i;
390          ++count;
391          while (i < heapSource->totalBlocks &&
392                 heapSource->blockSpace[i] == BLOCK_CONTINUED) {
393              heapSource->blockSpace[i] = BLOCK_FREE;
394              clearBlock(heapSource, i);
395              ++i;
396              ++count;
397          }
398      }
399      LOG_SCAV("freed %zu blocks (%zu bytes)", count, count*BLOCK_SIZE);
400  }
401  
402  /*
403   * Appends the given block to the block queue.  The block queue is
404   * processed in-order by the scavenger.
405   */
enqueueBlock(HeapSource * heapSource,size_t block)406  static void enqueueBlock(HeapSource *heapSource, size_t block)
407  {
408      assert(heapSource != NULL);
409      assert(block < heapSource->totalBlocks);
410      if (heapSource->queueHead != QUEUE_TAIL) {
411          heapSource->blockQueue[heapSource->queueTail] = block;
412      } else {
413          heapSource->queueHead = block;
414      }
415      heapSource->blockQueue[block] = QUEUE_TAIL;
416      heapSource->queueTail = block;
417      ++heapSource->queueSize;
418  }
419  
420  /*
421   * Grays all objects within the block corresponding to the given
422   * address.
423   */
promoteBlockByAddr(HeapSource * heapSource,const void * addr)424  static void promoteBlockByAddr(HeapSource *heapSource, const void *addr)
425  {
426      size_t block;
427  
428      block = addressToBlock(heapSource, (const u1 *)addr);
429      if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
430          // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
431          heapSource->blockSpace[block] = BLOCK_TO_SPACE;
432          enqueueBlock(heapSource, block);
433          /* TODO(cshapiro): count continued blocks?*/
434          heapSource->allocBlocks += 1;
435      } else {
436          // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
437      }
438  }
439  
dvmHeapSourceStartup(size_t startSize,size_t absoluteMaxSize)440  GcHeap *dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
441  {
442      GcHeap* gcHeap;
443      HeapSource *heapSource;
444  
445      assert(startSize <= absoluteMaxSize);
446  
447      heapSource = calloc(1, sizeof(*heapSource));
448      assert(heapSource != NULL);
449  
450      heapSource->minimumSize = alignUp(startSize, BLOCK_SIZE);
451      heapSource->maximumSize = alignUp(absoluteMaxSize, BLOCK_SIZE);
452  
453      heapSource->currentSize = heapSource->maximumSize;
454  
455      /* Allocate underlying storage for blocks. */
456      heapSource->blockBase = virtualAlloc(heapSource->maximumSize);
457      assert(heapSource->blockBase != NULL);
458      heapSource->baseBlock = (uintptr_t) heapSource->blockBase >> BLOCK_SHIFT;
459      heapSource->limitBlock = ((uintptr_t) heapSource->blockBase + heapSource->maximumSize) >> BLOCK_SHIFT;
460  
461      heapSource->allocBlocks = 0;
462      heapSource->totalBlocks = (heapSource->limitBlock - heapSource->baseBlock);
463  
464      assert(heapSource->totalBlocks = heapSource->maximumSize / BLOCK_SIZE);
465  
466      {
467          size_t size = sizeof(heapSource->blockQueue[0]);
468          heapSource->blockQueue = malloc(heapSource->totalBlocks*size);
469          assert(heapSource->blockQueue != NULL);
470          memset(heapSource->blockQueue, 0xCC, heapSource->totalBlocks*size);
471          heapSource->queueHead = QUEUE_TAIL;
472      }
473  
474      /* Byte indicating space residence or free status of block. */
475      {
476          size_t size = sizeof(heapSource->blockSpace[0]);
477          heapSource->blockSpace = calloc(1, heapSource->totalBlocks*size);
478          assert(heapSource->blockSpace != NULL);
479      }
480  
481      dvmHeapBitmapInit(&heapSource->allocBits,
482                        heapSource->blockBase,
483                        heapSource->maximumSize,
484                        "blockBase");
485  
486      /* Initialize allocation pointers. */
487      heapSource->allocPtr = allocateBlocks(heapSource, 1);
488      heapSource->allocLimit = heapSource->allocPtr + BLOCK_SIZE;
489  
490      gcHeap = calloc(1, sizeof(*gcHeap));
491      assert(gcHeap != NULL);
492      gcHeap->heapSource = heapSource;
493  
494      return gcHeap;
495  }
496  
497  /*
498   * Perform any required heap initializations after forking from the
499   * zygote process.  This is a no-op for the time being.  Eventually
500   * this will demarcate the shared region of the heap.
501   */
dvmHeapSourceStartupAfterZygote()502  bool dvmHeapSourceStartupAfterZygote()
503  {
504      return true;
505  }
506  
dvmHeapSourceStartupBeforeFork()507  bool dvmHeapSourceStartupBeforeFork()
508  {
509      assert(!"implemented");
510      return false;
511  }
512  
dvmHeapSourceShutdown(GcHeap ** gcHeap)513  void dvmHeapSourceShutdown(GcHeap **gcHeap)
514  {
515      if (*gcHeap == NULL || (*gcHeap)->heapSource == NULL)
516          return;
517      free((*gcHeap)->heapSource->blockQueue);
518      free((*gcHeap)->heapSource->blockSpace);
519      virtualFree((*gcHeap)->heapSource->blockBase,
520                  (*gcHeap)->heapSource->maximumSize);
521      free((*gcHeap)->heapSource);
522      (*gcHeap)->heapSource = NULL;
523      free(*gcHeap);
524      *gcHeap = NULL;
525  }
526  
dvmHeapSourceGetValue(HeapSourceValueSpec spec,size_t perHeapStats[],size_t arrayLen)527  size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec,
528                               size_t perHeapStats[],
529                               size_t arrayLen)
530  {
531      HeapSource *heapSource;
532      size_t value;
533  
534      heapSource = gDvm.gcHeap->heapSource;
535      switch (spec) {
536      case HS_FOOTPRINT:
537          value = heapSource->maximumSize;
538          break;
539      case HS_ALLOWED_FOOTPRINT:
540          value = heapSource->maximumSize;
541          break;
542      case HS_BYTES_ALLOCATED:
543          value = heapSource->bytesAllocated;
544          break;
545      case HS_OBJECTS_ALLOCATED:
546          value = sumHeapBitmap(&heapSource->allocBits);
547          break;
548      default:
549          assert(!"implemented");
550          value = 0;
551      }
552      if (perHeapStats) {
553          *perHeapStats = value;
554      }
555      return value;
556  }
557  
558  /*
559   * Performs a shallow copy of the allocation bitmap into the given
560   * vector of heap bitmaps.
561   */
dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[],HeapBitmap markBits[],size_t numHeaps)562  void dvmHeapSourceGetObjectBitmaps(HeapBitmap objBits[], HeapBitmap markBits[],
563                                     size_t numHeaps)
564  {
565      assert(!"implemented");
566  }
567  
dvmHeapSourceGetLiveBits()568  HeapBitmap *dvmHeapSourceGetLiveBits()
569  {
570      return &gDvm.gcHeap->heapSource->allocBits;
571  }
572  
573  /*
574   * Allocate the specified number of bytes from the heap.  The
575   * allocation cursor points into a block of free storage.  If the
576   * given allocation fits in the remaining space of the block, we
577   * advance the cursor and return a pointer to the free storage.  If
578   * the allocation cannot fit in the current block but is smaller than
579   * a block we request a new block and allocate from it instead.  If
580   * the allocation is larger than a block we must allocate from a span
581   * of contiguous blocks.
582   */
dvmHeapSourceAlloc(size_t length)583  void *dvmHeapSourceAlloc(size_t length)
584  {
585      HeapSource *heapSource;
586      unsigned char *addr;
587      size_t aligned, available, blocks;
588  
589      heapSource = gDvm.gcHeap->heapSource;
590      assert(heapSource != NULL);
591      assert(heapSource->allocPtr != NULL);
592      assert(heapSource->allocLimit != NULL);
593  
594      aligned = alignUp(length, ALLOC_ALIGNMENT);
595      available = heapSource->allocLimit - heapSource->allocPtr;
596  
597      /* Try allocating inside the current block. */
598      if (aligned <= available) {
599          addr = heapSource->allocPtr;
600          heapSource->allocPtr += aligned;
601          heapSource->bytesAllocated += aligned;
602          dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
603          return addr;
604      }
605  
606      /* Try allocating in a new block. */
607      if (aligned <= BLOCK_SIZE) {
608          addr =  allocateBlocks(heapSource, 1);
609          if (addr != NULL) {
610              heapSource->allocLimit = addr + BLOCK_SIZE;
611              heapSource->allocPtr = addr + aligned;
612              heapSource->bytesAllocated += aligned;
613              dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
614              /* TODO(cshapiro): pad out the current block. */
615          }
616          return addr;
617      }
618  
619      /* Try allocating in a span of blocks. */
620      blocks = alignUp(aligned, BLOCK_SIZE) / BLOCK_SIZE;
621  
622      addr = allocateBlocks(heapSource, blocks);
623      /* Propagate failure upward. */
624      if (addr != NULL) {
625          heapSource->bytesAllocated += aligned;
626          dvmHeapBitmapSetObjectBit(&heapSource->allocBits, addr);
627          /* TODO(cshapiro): pad out free space in the last block. */
628      }
629      return addr;
630  }
631  
dvmHeapSourceAllocAndGrow(size_t size)632  void *dvmHeapSourceAllocAndGrow(size_t size)
633  {
634      return dvmHeapSourceAlloc(size);
635  }
636  
637  /* TODO: refactor along with dvmHeapSourceAlloc */
allocateGray(size_t size)638  void *allocateGray(size_t size)
639  {
640      HeapSource *heapSource;
641      void *addr;
642      size_t block;
643  
644      /* TODO: add a check that we are in a GC. */
645      heapSource = gDvm.gcHeap->heapSource;
646      addr = dvmHeapSourceAlloc(size);
647      assert(addr != NULL);
648      block = addressToBlock(heapSource, (const u1 *)addr);
649      if (heapSource->queueHead == QUEUE_TAIL) {
650          /*
651           * Forcibly append the underlying block to the queue.  This
652           * condition occurs when referents are transported following
653           * the initial trace.
654           */
655          enqueueBlock(heapSource, block);
656          LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
657      }
658      return addr;
659  }
660  
dvmHeapSourceContainsAddress(const void * ptr)661  bool dvmHeapSourceContainsAddress(const void *ptr)
662  {
663      HeapSource *heapSource = gDvm.gcHeap->heapSource;
664      return dvmHeapBitmapCoversAddress(&heapSource->allocBits, ptr);
665  }
666  
667  /*
668   * Returns true if the given address is within the heap and points to
669   * the header of a live object.
670   */
dvmHeapSourceContains(const void * addr)671  bool dvmHeapSourceContains(const void *addr)
672  {
673      HeapSource *heapSource;
674      HeapBitmap *bitmap;
675  
676      heapSource = gDvm.gcHeap->heapSource;
677      bitmap = &heapSource->allocBits;
678      if (!dvmHeapBitmapCoversAddress(bitmap, addr)) {
679          return false;
680      } else {
681          return dvmHeapBitmapIsObjectBitSet(bitmap, addr);
682      }
683  }
684  
dvmHeapSourceGetPtrFlag(const void * ptr,HeapSourcePtrFlag flag)685  bool dvmHeapSourceGetPtrFlag(const void *ptr, HeapSourcePtrFlag flag)
686  {
687      assert(!"implemented");
688      return false;
689  }
690  
dvmHeapSourceChunkSize(const void * ptr)691  size_t dvmHeapSourceChunkSize(const void *ptr)
692  {
693      assert(!"implemented");
694      return 0;
695  }
696  
dvmHeapSourceFootprint()697  size_t dvmHeapSourceFootprint()
698  {
699      assert(!"implemented");
700      return 0;
701  }
702  
703  /*
704   * Returns the "ideal footprint" which appears to be the number of
705   * bytes currently committed to the heap.  This starts out at the
706   * start size of the heap and grows toward the maximum size.
707   */
dvmHeapSourceGetIdealFootprint()708  size_t dvmHeapSourceGetIdealFootprint()
709  {
710      return gDvm.gcHeap->heapSource->currentSize;
711  }
712  
dvmGetTargetHeapUtilization()713  float dvmGetTargetHeapUtilization()
714  {
715      return 0.5f;
716  }
717  
dvmSetTargetHeapUtilization(float newTarget)718  void dvmSetTargetHeapUtilization(float newTarget)
719  {
720      assert(newTarget > 0.0f && newTarget < 1.0f);
721  }
722  
723  /*
724   * Expands the size of the heap after a collection.  At present we
725   * commit the pages for maximum size of the heap so this routine is
726   * just a no-op.  Eventually, we will either allocate or commit pages
727   * on an as-need basis.
728   */
dvmHeapSourceGrowForUtilization()729  void dvmHeapSourceGrowForUtilization()
730  {
731      /* do nothing */
732  }
733  
dvmHeapSourceWalk(void (* callback)(void * start,void * end,size_t used_bytes,void * arg),void * arg)734  void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
735                                         size_t used_bytes, void* arg),
736                         void *arg)
737  {
738      assert(!"implemented");
739  }
740  
dvmHeapSourceGetNumHeaps()741  size_t dvmHeapSourceGetNumHeaps()
742  {
743      return 1;
744  }
745  
dvmTrackExternalAllocation(size_t n)746  bool dvmTrackExternalAllocation(size_t n)
747  {
748      /* do nothing */
749      return true;
750  }
751  
dvmTrackExternalFree(size_t n)752  void dvmTrackExternalFree(size_t n)
753  {
754      /* do nothing */
755  }
756  
dvmGetExternalBytesAllocated()757  size_t dvmGetExternalBytesAllocated()
758  {
759      assert(!"implemented");
760      return 0;
761  }
762  
dvmHeapSourceFlip()763  void dvmHeapSourceFlip()
764  {
765      HeapSource *heapSource = gDvm.gcHeap->heapSource;
766  
767      /* Reset the block queue. */
768      heapSource->allocBlocks = 0;
769      heapSource->queueSize = 0;
770      heapSource->queueHead = QUEUE_TAIL;
771  
772      /* TODO(cshapiro): pad the current (prev) block. */
773  
774      heapSource->allocPtr = NULL;
775      heapSource->allocLimit = NULL;
776  
777      /* Whiten all allocated blocks. */
778      for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
779          if (heapSource->blockSpace[i] == BLOCK_TO_SPACE) {
780              heapSource->blockSpace[i] = BLOCK_FROM_SPACE;
781          }
782      }
783  }
784  
room(size_t * alloc,size_t * avail,size_t * total)785  static void room(size_t *alloc, size_t *avail, size_t *total)
786  {
787      HeapSource *heapSource = gDvm.gcHeap->heapSource;
788      *total = heapSource->totalBlocks*BLOCK_SIZE;
789      *alloc = heapSource->allocBlocks*BLOCK_SIZE;
790      *avail = *total - *alloc;
791  }
792  
isSpaceInternal(u1 * addr,int space)793  static bool isSpaceInternal(u1 *addr, int space)
794  {
795      HeapSource *heapSource;
796      u1 *base, *limit;
797      size_t offset;
798      char space2;
799  
800      heapSource = gDvm.gcHeap->heapSource;
801      base = heapSource->blockBase;
802      assert(addr >= base);
803      limit = heapSource->blockBase + heapSource->maximumSize;
804      assert(addr < limit);
805      offset = addr - base;
806      space2 = heapSource->blockSpace[offset >> BLOCK_SHIFT];
807      return space == space2;
808  }
809  
fromSpaceContains(const void * addr)810  static bool fromSpaceContains(const void *addr)
811  {
812      return isSpaceInternal((u1 *)addr, BLOCK_FROM_SPACE);
813  }
814  
toSpaceContains(const void * addr)815  static bool toSpaceContains(const void *addr)
816  {
817      return isSpaceInternal((u1 *)addr, BLOCK_TO_SPACE);
818  }
819  
820  /*
821   * Notifies the collector that the object at the given address must
822   * remain stationary during the current collection.
823   */
pinObject(const Object * obj)824  static void pinObject(const Object *obj)
825  {
826      promoteBlockByAddr(gDvm.gcHeap->heapSource, obj);
827  }
828  
sumHeapBitmap(const HeapBitmap * bitmap)829  static size_t sumHeapBitmap(const HeapBitmap *bitmap)
830  {
831      size_t sum = 0;
832      for (size_t i = 0; i < bitmap->bitsLen >> 2; ++i) {
833          sum += CLZ(bitmap->bits[i]);
834      }
835      return sum;
836  }
837  
838  /*
839   * Miscellaneous functionality.
840   */
841  
isForward(const void * addr)842  static int isForward(const void *addr)
843  {
844      return (uintptr_t)addr & 0x1;
845  }
846  
setForward(const void * toObj,void * fromObj)847  static void setForward(const void *toObj, void *fromObj)
848  {
849      *(unsigned long *)fromObj = (uintptr_t)toObj | 0x1;
850  }
851  
getForward(const void * fromObj)852  static void* getForward(const void *fromObj)
853  {
854      return (void *)((uintptr_t)fromObj & ~0x1);
855  }
856  
857  /* Beware, uses the same encoding as a forwarding pointers! */
isPermanentString(const StringObject * obj)858  static int isPermanentString(const StringObject *obj) {
859      return (uintptr_t)obj & 0x1;
860  }
861  
getPermanentString(const StringObject * obj)862  static void* getPermanentString(const StringObject *obj)
863  {
864      return (void *)((uintptr_t)obj & ~0x1);
865  }
866  
867  
868  /*
869   * Scavenging and transporting routines follow.  A transporter grays
870   * an object.  A scavenger blackens an object.  We define these
871   * routines for each fundamental object type.  Dispatch is performed
872   * in scavengeObject.
873   */
874  
875  /*
876   * Class object scavenging.
877   */
scavengeClassObject(ClassObject * obj)878  static void scavengeClassObject(ClassObject *obj)
879  {
880      LOG_SCAV("scavengeClassObject(obj=%p)", obj);
881      assert(obj != NULL);
882      assert(obj->obj.clazz != NULL);
883      assert(obj->obj.clazz->descriptor != NULL);
884      assert(!strcmp(obj->obj.clazz->descriptor, "Ljava/lang/Class;"));
885      assert(obj->descriptor != NULL);
886      LOG_SCAV("scavengeClassObject: descriptor='%s',vtableCount=%zu",
887               obj->descriptor, obj->vtableCount);
888      /* Delegate class object and instance field scavenging. */
889      scavengeDataObject((Object *)obj);
890      /* Scavenge the array element class object. */
891      if (IS_CLASS_FLAG_SET(obj, CLASS_ISARRAY)) {
892          scavengeReference((Object **)(void *)&obj->elementClass);
893      }
894      /* Scavenge the superclass. */
895      scavengeReference((Object **)(void *)&obj->super);
896      /* Scavenge the class loader. */
897      scavengeReference(&obj->classLoader);
898      /* Scavenge static fields. */
899      for (int i = 0; i < obj->sfieldCount; ++i) {
900          char ch = obj->sfields[i].field.signature[0];
901          if (ch == '[' || ch == 'L') {
902              scavengeReference((Object **)(void *)&obj->sfields[i].value.l);
903          }
904      }
905      /* Scavenge interface class objects. */
906      for (int i = 0; i < obj->interfaceCount; ++i) {
907          scavengeReference((Object **) &obj->interfaces[i]);
908      }
909  }
910  
911  /*
912   * Array object scavenging.
913   */
scavengeArrayObject(ArrayObject * array)914  static size_t scavengeArrayObject(ArrayObject *array)
915  {
916      LOG_SCAV("scavengeArrayObject(array=%p)", array);
917      /* Scavenge the class object. */
918      assert(toSpaceContains(array));
919      assert(array != NULL);
920      assert(array->obj.clazz != NULL);
921      scavengeReference((Object **) array);
922      size_t length = dvmArrayObjectSize(array);
923      /* Scavenge the array contents. */
924      if (IS_CLASS_FLAG_SET(array->obj.clazz, CLASS_ISOBJECTARRAY)) {
925          Object **contents = (Object **)array->contents;
926          for (size_t i = 0; i < array->length; ++i) {
927              scavengeReference(&contents[i]);
928          }
929      }
930      return length;
931  }
932  
933  /*
934   * Reference object scavenging.
935   */
936  
getReferenceFlags(const Object * obj)937  static int getReferenceFlags(const Object *obj)
938  {
939      int flags;
940  
941      flags = CLASS_ISREFERENCE |
942              CLASS_ISWEAKREFERENCE |
943              CLASS_ISPHANTOMREFERENCE;
944      return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
945  }
946  
isSoftReference(const Object * obj)947  static int isSoftReference(const Object *obj)
948  {
949      return getReferenceFlags(obj) == CLASS_ISREFERENCE;
950  }
951  
isWeakReference(const Object * obj)952  static int isWeakReference(const Object *obj)
953  {
954      return getReferenceFlags(obj) & CLASS_ISWEAKREFERENCE;
955  }
956  
957  #ifndef NDEBUG
isPhantomReference(const Object * obj)958  static bool isPhantomReference(const Object *obj)
959  {
960      return getReferenceFlags(obj) & CLASS_ISPHANTOMREFERENCE;
961  }
962  #endif
963  
964  /*
965   * Returns true if the reference was registered with a reference queue
966   * but has not yet been appended to it.
967   */
isReferenceEnqueuable(const Object * ref)968  static bool isReferenceEnqueuable(const Object *ref)
969  {
970      Object *queue, *queueNext;
971  
972      queue = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue);
973      queueNext = dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext);
974      if (queue == NULL || queueNext != NULL) {
975          /*
976           * There is no queue, or the reference has already
977           * been enqueued.  The Reference.enqueue() method
978           * will do nothing even if we call it.
979           */
980          return false;
981      }
982  
983      /*
984       * We need to call enqueue(), but if we called it from
985       * here we'd probably deadlock.  Schedule a call.
986       */
987      return true;
988  }
989  
990  /*
991   * Schedules a reference to be appended to its reference queue.
992   */
enqueueReference(Object * ref)993  static void enqueueReference(Object *ref)
994  {
995      assert(ref != NULL);
996      assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queue) != NULL);
997      assert(dvmGetFieldObject(ref, gDvm.offJavaLangRefReference_queueNext) == NULL);
998      if (!dvmHeapAddRefToLargeTable(&gDvm.gcHeap->referenceOperations, ref)) {
999          ALOGE("no room for any more reference operations");
1000          dvmAbort();
1001      }
1002  }
1003  
1004  /*
1005   * Sets the referent field of a reference object to NULL.
1006   */
clearReference(Object * obj)1007  static void clearReference(Object *obj)
1008  {
1009      dvmSetFieldObject(obj, gDvm.offJavaLangRefReference_referent, NULL);
1010  }
1011  
1012  /*
1013   * Clears reference objects with white referents.
1014   */
clearWhiteReferences(Object ** list)1015  void clearWhiteReferences(Object **list)
1016  {
1017      size_t referentOffset, queueNextOffset;
1018      bool doSignal;
1019  
1020      queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1021      referentOffset = gDvm.offJavaLangRefReference_referent;
1022      doSignal = false;
1023      while (*list != NULL) {
1024          Object *ref = *list;
1025          JValue *field = dvmFieldPtr(ref, referentOffset);
1026          Object *referent = field->l;
1027          *list = dvmGetFieldObject(ref, queueNextOffset);
1028          dvmSetFieldObject(ref, queueNextOffset, NULL);
1029          assert(referent != NULL);
1030          if (isForward(referent->clazz)) {
1031              field->l = referent = getForward(referent->clazz);
1032              continue;
1033          }
1034          if (fromSpaceContains(referent)) {
1035              /* Referent is white, clear it. */
1036              clearReference(ref);
1037              if (isReferenceEnqueuable(ref)) {
1038                  enqueueReference(ref);
1039                  doSignal = true;
1040              }
1041          }
1042      }
1043      /*
1044       * If we cleared a reference with a reference queue we must notify
1045       * the heap worker to append the reference.
1046       */
1047      if (doSignal) {
1048          dvmSignalHeapWorker(false);
1049      }
1050      assert(*list == NULL);
1051  }
1052  
1053  /*
1054   * Blackens referents subject to the soft reference preservation
1055   * policy.
1056   */
preserveSoftReferences(Object ** list)1057  void preserveSoftReferences(Object **list)
1058  {
1059      Object *ref;
1060      Object *prev, *next;
1061      size_t referentOffset, queueNextOffset;
1062      unsigned counter;
1063      bool white;
1064  
1065      queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1066      referentOffset = gDvm.offJavaLangRefReference_referent;
1067      counter = 0;
1068      prev = next = NULL;
1069      ref = *list;
1070      while (ref != NULL) {
1071          JValue *field = dvmFieldPtr(ref, referentOffset);
1072          Object *referent = field->l;
1073          next = dvmGetFieldObject(ref, queueNextOffset);
1074          assert(referent != NULL);
1075          if (isForward(referent->clazz)) {
1076              /* Referent is black. */
1077              field->l = referent = getForward(referent->clazz);
1078              white = false;
1079          } else {
1080              white = fromSpaceContains(referent);
1081          }
1082          if (!white && ((++counter) & 1)) {
1083              /* Referent is white and biased toward saving, gray it. */
1084              scavengeReference((Object **)(void *)&field->l);
1085              white = true;
1086          }
1087          if (white) {
1088              /* Referent is black, unlink it. */
1089              if (prev != NULL) {
1090                  dvmSetFieldObject(ref, queueNextOffset, NULL);
1091                  dvmSetFieldObject(prev, queueNextOffset, next);
1092              }
1093          } else {
1094              /* Referent is white, skip over it. */
1095              prev = ref;
1096          }
1097          ref = next;
1098      }
1099      /*
1100       * Restart the trace with the newly gray references added to the
1101       * root set.
1102       */
1103      scavengeBlockQueue();
1104  }
1105  
processFinalizableReferences()1106  void processFinalizableReferences()
1107  {
1108      HeapRefTable newPendingRefs;
1109      LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
1110      Object **ref;
1111      Object **lastRef;
1112      size_t totalPendCount;
1113  
1114      /*
1115       * All strongly, reachable objects are black.
1116       * Any white finalizable objects need to be finalized.
1117       */
1118  
1119      /* Create a table that the new pending refs will
1120       * be added to.
1121       */
1122      if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
1123          //TODO: mark all finalizable refs and hope that
1124          //      we can schedule them next time.  Watch out,
1125          //      because we may be expecting to free up space
1126          //      by calling finalizers.
1127          LOG_REF("no room for pending finalizations");
1128          dvmAbort();
1129      }
1130  
1131      /*
1132       * Walk through finalizableRefs and move any white references to
1133       * the list of new pending refs.
1134       */
1135      totalPendCount = 0;
1136      while (finRefs != NULL) {
1137          Object **gapRef;
1138          size_t newPendCount = 0;
1139  
1140          gapRef = ref = finRefs->refs.table;
1141          lastRef = finRefs->refs.nextEntry;
1142          while (ref < lastRef) {
1143              if (fromSpaceContains(*ref)) {
1144                  if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
1145                      //TODO: add the current table and allocate
1146                      //      a new, smaller one.
1147                      LOG_REF("no room for any more pending finalizations: %zd",
1148                              dvmHeapNumHeapRefTableEntries(&newPendingRefs));
1149                      dvmAbort();
1150                  }
1151                  newPendCount++;
1152              } else {
1153                  /* This ref is black, so will remain on finalizableRefs.
1154                   */
1155                  if (newPendCount > 0) {
1156                      /* Copy it up to fill the holes.
1157                       */
1158                      *gapRef++ = *ref;
1159                  } else {
1160                      /* No holes yet; don't bother copying.
1161                       */
1162                      gapRef++;
1163                  }
1164              }
1165              ref++;
1166          }
1167          finRefs->refs.nextEntry = gapRef;
1168          //TODO: if the table is empty when we're done, free it.
1169          totalPendCount += newPendCount;
1170          finRefs = finRefs->next;
1171      }
1172      LOG_REF("%zd finalizers triggered.", totalPendCount);
1173      if (totalPendCount == 0) {
1174          /* No objects required finalization.
1175           * Free the empty temporary table.
1176           */
1177          dvmClearReferenceTable(&newPendingRefs);
1178          return;
1179      }
1180  
1181      /* Add the new pending refs to the main list.
1182       */
1183      if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
1184                  &newPendingRefs))
1185      {
1186          LOG_REF("can't insert new pending finalizations");
1187          dvmAbort();
1188      }
1189  
1190      //TODO: try compacting the main list with a memcpy loop
1191  
1192      /* Blacken the refs we just moved; we don't want them or their
1193       * children to get swept yet.
1194       */
1195      ref = newPendingRefs.table;
1196      lastRef = newPendingRefs.nextEntry;
1197      assert(ref < lastRef);
1198      HPROF_SET_GC_SCAN_STATE(HPROF_ROOT_FINALIZING, 0);
1199      while (ref < lastRef) {
1200          scavengeReference(ref);
1201          ref++;
1202      }
1203      HPROF_CLEAR_GC_SCAN_STATE();
1204      scavengeBlockQueue();
1205      dvmSignalHeapWorker(false);
1206  }
1207  
1208  /*
1209   * If a reference points to from-space and has been forwarded, we snap
1210   * the pointer to its new to-space address.  If the reference points
1211   * to an unforwarded from-space address we must enqueue the reference
1212   * for later processing.  TODO: implement proper reference processing
1213   * and move the referent scavenging elsewhere.
1214   */
scavengeReferenceObject(Object * obj)1215  static void scavengeReferenceObject(Object *obj)
1216  {
1217      Object *referent;
1218      Object **queue;
1219      size_t referentOffset, queueNextOffset;
1220  
1221      assert(obj != NULL);
1222      LOG_SCAV("scavengeReferenceObject(obj=%p),'%s'", obj, obj->clazz->descriptor);
1223      scavengeDataObject(obj);
1224      referentOffset = gDvm.offJavaLangRefReference_referent;
1225      referent = dvmGetFieldObject(obj, referentOffset);
1226      if (referent == NULL || toSpaceContains(referent)) {
1227          return;
1228      }
1229      if (isSoftReference(obj)) {
1230          queue = &gDvm.gcHeap->softReferences;
1231      } else if (isWeakReference(obj)) {
1232          queue = &gDvm.gcHeap->weakReferences;
1233      } else {
1234          assert(isPhantomReference(obj));
1235          queue = &gDvm.gcHeap->phantomReferences;
1236      }
1237      queueNextOffset = gDvm.offJavaLangRefReference_queueNext;
1238      dvmSetFieldObject(obj, queueNextOffset, *queue);
1239      *queue = obj;
1240      LOG_SCAV("scavengeReferenceObject: enqueueing %p", obj);
1241  }
1242  
1243  /*
1244   * Data object scavenging.
1245   */
scavengeDataObject(Object * obj)1246  static void scavengeDataObject(Object *obj)
1247  {
1248      // LOG_SCAV("scavengeDataObject(obj=%p)", obj);
1249      assert(obj != NULL);
1250      assert(obj->clazz != NULL);
1251      assert(obj->clazz->objectSize != 0);
1252      assert(toSpaceContains(obj));
1253      /* Scavenge the class object. */
1254      ClassObject *clazz = obj->clazz;
1255      scavengeReference((Object **) obj);
1256      /* Scavenge instance fields. */
1257      if (clazz->refOffsets != CLASS_WALK_SUPER) {
1258          size_t refOffsets = clazz->refOffsets;
1259          while (refOffsets != 0) {
1260              size_t rshift = CLZ(refOffsets);
1261              size_t offset = CLASS_OFFSET_FROM_CLZ(rshift);
1262              Object **ref = (Object **)((u1 *)obj + offset);
1263              scavengeReference(ref);
1264              refOffsets &= ~(CLASS_HIGH_BIT >> rshift);
1265          }
1266      } else {
1267          for (; clazz != NULL; clazz = clazz->super) {
1268              InstField *field = clazz->ifields;
1269              for (int i = 0; i < clazz->ifieldRefCount; ++i, ++field) {
1270                  size_t offset = field->byteOffset;
1271                  Object **ref = (Object **)((u1 *)obj + offset);
1272                  scavengeReference(ref);
1273              }
1274          }
1275      }
1276  }
1277  
transportObject(const Object * fromObj)1278  static Object *transportObject(const Object *fromObj)
1279  {
1280      Object *toObj;
1281      size_t allocSize, copySize;
1282  
1283      LOG_TRAN("transportObject(fromObj=%p) allocBlocks=%zu",
1284                    fromObj,
1285                    gDvm.gcHeap->heapSource->allocBlocks);
1286      assert(fromObj != NULL);
1287      assert(fromSpaceContains(fromObj));
1288      allocSize = copySize = objectSize(fromObj);
1289      if (LW_HASH_STATE(fromObj->lock) != LW_HASH_STATE_UNHASHED) {
1290          /*
1291           * The object has been hashed or hashed and moved.  We must
1292           * reserve an additional word for a hash code.
1293           */
1294          allocSize += sizeof(u4);
1295      }
1296      if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1297          /*
1298           * The object has its hash code allocated.  Ensure the hash
1299           * code is copied along with the instance data.
1300           */
1301          copySize += sizeof(u4);
1302      }
1303      /* TODO(cshapiro): don't copy, re-map large data objects. */
1304      assert(copySize <= allocSize);
1305      toObj = allocateGray(allocSize);
1306      assert(toObj != NULL);
1307      assert(toSpaceContains(toObj));
1308      memcpy(toObj, fromObj, copySize);
1309      if (LW_HASH_STATE(fromObj->lock) == LW_HASH_STATE_HASHED) {
1310          /*
1311           * The object has had its hash code exposed.  Append it to the
1312           * instance and set a bit so we know to look for it there.
1313           */
1314          *(u4 *)(((char *)toObj) + copySize) = (u4)fromObj >> 3;
1315          toObj->lock |= LW_HASH_STATE_HASHED_AND_MOVED << LW_HASH_STATE_SHIFT;
1316      }
1317      LOG_TRAN("transportObject: from %p/%zu to %p/%zu (%zu,%zu) %s",
1318               fromObj, addressToBlock(gDvm.gcHeap->heapSource,fromObj),
1319               toObj, addressToBlock(gDvm.gcHeap->heapSource,toObj),
1320               copySize, allocSize, copySize < allocSize ? "DIFFERENT" : "");
1321      return toObj;
1322  }
1323  
1324  /*
1325   * Generic reference scavenging.
1326   */
1327  
1328  /*
1329   * Given a reference to an object, the scavenge routine will gray the
1330   * reference.  Any objects pointed to by the scavenger object will be
1331   * transported to new space and a forwarding pointer will be installed
1332   * in the header of the object.
1333   */
1334  
1335  /*
1336   * Blacken the given pointer.  If the pointer is in from space, it is
1337   * transported to new space.  If the object has a forwarding pointer
1338   * installed it has already been transported and the referent is
1339   * snapped to the new address.
1340   */
scavengeReference(Object ** obj)1341  static void scavengeReference(Object **obj)
1342  {
1343      ClassObject *clazz;
1344      Object *fromObj, *toObj;
1345  
1346      assert(obj);
1347  
1348      if (*obj == NULL) return;
1349  
1350      assert(dvmIsValidObject(*obj));
1351  
1352      /* The entire block is black. */
1353      if (toSpaceContains(*obj)) {
1354          LOG_SCAV("scavengeReference skipping pinned object @ %p", *obj);
1355          return;
1356      }
1357      LOG_SCAV("scavengeReference(*obj=%p)", *obj);
1358  
1359      assert(fromSpaceContains(*obj));
1360  
1361      clazz = (*obj)->clazz;
1362  
1363      if (isForward(clazz)) {
1364          // LOG_SCAV("forwarding %p @ %p to %p", *obj, obj, (void *)((uintptr_t)clazz & ~0x1));
1365          *obj = (Object *)getForward(clazz);
1366          return;
1367      }
1368      fromObj = *obj;
1369      if (clazz == NULL) {
1370          // LOG_SCAV("scavangeReference %p has a NULL class object", fromObj);
1371          assert(!"implemented");
1372          toObj = NULL;
1373      } else {
1374          toObj = transportObject(fromObj);
1375      }
1376      setForward(toObj, fromObj);
1377      *obj = (Object *)toObj;
1378  }
1379  
1380  /*
1381   * Generic object scavenging.
1382   */
scavengeObject(Object * obj)1383  static void scavengeObject(Object *obj)
1384  {
1385      ClassObject *clazz;
1386  
1387      assert(obj != NULL);
1388      assert(obj->clazz != NULL);
1389      assert(!((uintptr_t)obj->clazz & 0x1));
1390      clazz = obj->clazz;
1391      if (dvmIsTheClassClass(clazz)) {
1392          scavengeClassObject((ClassObject *)obj);
1393      } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
1394          scavengeArrayObject((ArrayObject *)obj);
1395      } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISREFERENCE)) {
1396          scavengeReferenceObject(obj);
1397      } else {
1398          scavengeDataObject(obj);
1399      }
1400  }
1401  
1402  /*
1403   * External root scavenging routines.
1404   */
1405  
pinHashTableEntries(HashTable * table)1406  static void pinHashTableEntries(HashTable *table)
1407  {
1408      LOG_PIN(">>> pinHashTableEntries(table=%p)", table);
1409      if (table == NULL) {
1410          return;
1411      }
1412      dvmHashTableLock(table);
1413      for (int i = 0; i < table->tableSize; ++i) {
1414          HashEntry *entry = &table->pEntries[i];
1415          void *obj = entry->data;
1416          if (obj == NULL || obj == HASH_TOMBSTONE) {
1417              continue;
1418          }
1419          pinObject(entry->data);
1420      }
1421      dvmHashTableUnlock(table);
1422      LOG_PIN("<<< pinHashTableEntries(table=%p)", table);
1423  }
1424  
pinPrimitiveClasses()1425  static void pinPrimitiveClasses()
1426  {
1427      size_t length = ARRAYSIZE(gDvm.primitiveClass);
1428      for (size_t i = 0; i < length; i++) {
1429          if (gDvm.primitiveClass[i] != NULL) {
1430              pinObject((Object *)gDvm.primitiveClass[i]);
1431          }
1432      }
1433  }
1434  
1435  /*
1436   * Scavenge interned strings.  Permanent interned strings will have
1437   * been pinned and are therefore ignored.  Non-permanent strings that
1438   * have been forwarded are snapped.  All other entries are removed.
1439   */
scavengeInternedStrings()1440  static void scavengeInternedStrings()
1441  {
1442      HashTable *table = gDvm.internedStrings;
1443      if (table == NULL) {
1444          return;
1445      }
1446      dvmHashTableLock(table);
1447      for (int i = 0; i < table->tableSize; ++i) {
1448          HashEntry *entry = &table->pEntries[i];
1449          Object *obj = (Object *)entry->data;
1450          if (obj == NULL || obj == HASH_TOMBSTONE) {
1451              continue;
1452          } else if (!isPermanentString((StringObject *)obj)) {
1453              // LOG_SCAV("entry->data=%p", entry->data);
1454              LOG_SCAV(">>> string obj=%p", entry->data);
1455              /* TODO(cshapiro): detach white string objects */
1456              scavengeReference((Object **)(void *)&entry->data);
1457              LOG_SCAV("<<< string obj=%p", entry->data);
1458          }
1459      }
1460      dvmHashTableUnlock(table);
1461  }
1462  
pinInternedStrings()1463  static void pinInternedStrings()
1464  {
1465      HashTable *table = gDvm.internedStrings;
1466      if (table == NULL) {
1467          return;
1468      }
1469      dvmHashTableLock(table);
1470      for (int i = 0; i < table->tableSize; ++i) {
1471          HashEntry *entry = &table->pEntries[i];
1472          Object *obj = (Object *)entry->data;
1473          if (obj == NULL || obj == HASH_TOMBSTONE) {
1474              continue;
1475          } else if (isPermanentString((StringObject *)obj)) {
1476              obj = (Object *)getPermanentString((StringObject*)obj);
1477              LOG_PROM(">>> pin string obj=%p", obj);
1478              pinObject(obj);
1479              LOG_PROM("<<< pin string obj=%p", obj);
1480          }
1481       }
1482      dvmHashTableUnlock(table);
1483  }
1484  
1485  /*
1486   * At present, reference tables contain references that must not be
1487   * moved by the collector.  Instead of scavenging each reference in
1488   * the table we pin each referenced object.
1489   */
pinReferenceTable(const ReferenceTable * table)1490  static void pinReferenceTable(const ReferenceTable *table)
1491  {
1492      assert(table != NULL);
1493      assert(table->table != NULL);
1494      assert(table->nextEntry != NULL);
1495      for (Object **entry = table->table; entry < table->nextEntry; ++entry) {
1496          assert(entry != NULL);
1497          assert(!isForward(*entry));
1498          pinObject(*entry);
1499      }
1500  }
1501  
scavengeLargeHeapRefTable(LargeHeapRefTable * table)1502  static void scavengeLargeHeapRefTable(LargeHeapRefTable *table)
1503  {
1504      for (; table != NULL; table = table->next) {
1505          Object **ref = table->refs.table;
1506          for (; ref < table->refs.nextEntry; ++ref) {
1507              scavengeReference(ref);
1508          }
1509      }
1510  }
1511  
1512  /* This code was copied from Thread.c */
scavengeThreadStack(Thread * thread)1513  static void scavengeThreadStack(Thread *thread)
1514  {
1515      const u4 *framePtr;
1516  #if WITH_EXTRA_GC_CHECKS > 1
1517      bool first = true;
1518  #endif
1519  
1520      framePtr = (const u4 *)thread->interpSave.curFrame;
1521      while (framePtr != NULL) {
1522          const StackSaveArea *saveArea;
1523          const Method *method;
1524  
1525          saveArea = SAVEAREA_FROM_FP(framePtr);
1526          method = saveArea->method;
1527          if (method != NULL && !dvmIsNativeMethod(method)) {
1528  #ifdef COUNT_PRECISE_METHODS
1529              /* the GC is running, so no lock required */
1530              if (dvmPointerSetAddEntry(gDvm.preciseMethods, method))
1531                  LOG_SCAV("PGC: added %s.%s %p",
1532                               method->clazz->descriptor, method->name, method);
1533  #endif
1534  #if WITH_EXTRA_GC_CHECKS > 1
1535              /*
1536               * May also want to enable the memset() in the "invokeMethod"
1537               * goto target in the portable interpreter.  That sets the stack
1538               * to a pattern that makes referring to uninitialized data
1539               * very obvious.
1540               */
1541  
1542              if (first) {
1543                  /*
1544                   * First frame, isn't native, check the "alternate" saved PC
1545                   * as a sanity check.
1546                   *
1547                   * It seems like we could check the second frame if the first
1548                   * is native, since the PCs should be the same.  It turns out
1549                   * this doesn't always work.  The problem is that we could
1550                   * have calls in the sequence:
1551                   *   interp method #2
1552                   *   native method
1553                   *   interp method #1
1554                   *
1555                   * and then GC while in the native method after returning
1556                   * from interp method #2.  The currentPc on the stack is
1557                   * for interp method #1, but thread->currentPc2 is still
1558                   * set for the last thing interp method #2 did.
1559                   *
1560                   * This can also happen in normal execution:
1561                   * - sget-object on not-yet-loaded class
1562                   * - class init updates currentPc2
1563                   * - static field init is handled by parsing annotations;
1564                   *   static String init requires creation of a String object,
1565                   *   which can cause a GC
1566                   *
1567                   * Essentially, any pattern that involves executing
1568                   * interpreted code and then causes an allocation without
1569                   * executing instructions in the original method will hit
1570                   * this.  These are rare enough that the test still has
1571                   * some value.
1572                   */
1573                  if (saveArea->xtra.currentPc != thread->currentPc2) {
1574                      ALOGW("PGC: savedPC(%p) != current PC(%p), %s.%s ins=%p",
1575                          saveArea->xtra.currentPc, thread->currentPc2,
1576                          method->clazz->descriptor, method->name, method->insns);
1577                      if (saveArea->xtra.currentPc != NULL)
1578                          ALOGE("  pc inst = 0x%04x", *saveArea->xtra.currentPc);
1579                      if (thread->currentPc2 != NULL)
1580                          ALOGE("  pc2 inst = 0x%04x", *thread->currentPc2);
1581                      dvmDumpThread(thread, false);
1582                  }
1583              } else {
1584                  /*
1585                   * It's unusual, but not impossible, for a non-first frame
1586                   * to be at something other than a method invocation.  For
1587                   * example, if we do a new-instance on a nonexistent class,
1588                   * we'll have a lot of class loader activity on the stack
1589                   * above the frame with the "new" operation.  Could also
1590                   * happen while we initialize a Throwable when an instruction
1591                   * fails.
1592                   *
1593                   * So there's not much we can do here to verify the PC,
1594                   * except to verify that it's a GC point.
1595                   */
1596              }
1597              assert(saveArea->xtra.currentPc != NULL);
1598  #endif
1599  
1600              const RegisterMap* pMap;
1601              const u1* regVector;
1602  
1603              Method* nonConstMethod = (Method*) method;  // quiet gcc
1604              pMap = dvmGetExpandedRegisterMap(nonConstMethod);
1605  
1606              //LOG_SCAV("PGC: %s.%s", method->clazz->descriptor, method->name);
1607  
1608              if (pMap != NULL) {
1609                  /* found map, get registers for this address */
1610                  int addr = saveArea->xtra.currentPc - method->insns;
1611                  regVector = dvmRegisterMapGetLine(pMap, addr);
1612                  /*
1613                  if (regVector == NULL) {
1614                      LOG_SCAV("PGC: map but no entry for %s.%s addr=0x%04x",
1615                                   method->clazz->descriptor, method->name, addr);
1616                  } else {
1617                      LOG_SCAV("PGC: found map for %s.%s 0x%04x (t=%d)",
1618                                   method->clazz->descriptor, method->name, addr,
1619                                   thread->threadId);
1620                  }
1621                  */
1622              } else {
1623                  /*
1624                   * No map found.  If precise GC is disabled this is
1625                   * expected -- we don't create pointers to the map data even
1626                   * if it's present -- but if it's enabled it means we're
1627                   * unexpectedly falling back on a conservative scan, so it's
1628                   * worth yelling a little.
1629                   */
1630                  if (gDvm.preciseGc) {
1631                      LOG_SCAV("PGC: no map for %s.%s", method->clazz->descriptor, method->name);
1632                  }
1633                  regVector = NULL;
1634              }
1635              if (regVector == NULL) {
1636                  /*
1637                   * There are no roots to scavenge.  Skip over the entire frame.
1638                   */
1639                  framePtr += method->registersSize;
1640              } else {
1641                  /*
1642                   * Precise scan.  v0 is at the lowest address on the
1643                   * interpreted stack, and is the first bit in the register
1644                   * vector, so we can walk through the register map and
1645                   * memory in the same direction.
1646                   *
1647                   * A '1' bit indicates a live reference.
1648                   */
1649                  u2 bits = 1 << 1;
1650                  for (int i = method->registersSize - 1; i >= 0; i--) {
1651                      u4 rval = *framePtr;
1652  
1653                      bits >>= 1;
1654                      if (bits == 1) {
1655                          /* set bit 9 so we can tell when we're empty */
1656                          bits = *regVector++ | 0x0100;
1657                      }
1658  
1659                      if (rval != 0 && (bits & 0x01) != 0) {
1660                          /*
1661                           * Non-null, register marked as live reference.  This
1662                           * should always be a valid object.
1663                           */
1664  #if WITH_EXTRA_GC_CHECKS > 0
1665                          if ((rval & 0x3) != 0 || !dvmIsValidObject((Object*) rval)) {
1666                              /* this is very bad */
1667                              ALOGE("PGC: invalid ref in reg %d: 0x%08x",
1668                                  method->registersSize-1 - i, rval);
1669                          } else
1670  #endif
1671                          {
1672  
1673                              // LOG_SCAV("stack reference %u@%p", *framePtr, framePtr);
1674                              /* dvmMarkObjectNonNull((Object *)rval); */
1675                              scavengeReference((Object **) framePtr);
1676                          }
1677                      } else {
1678                          /*
1679                           * Null or non-reference, do nothing at all.
1680                           */
1681  #if WITH_EXTRA_GC_CHECKS > 1
1682                          if (dvmIsValidObject((Object*) rval)) {
1683                              /* this is normal, but we feel chatty */
1684                              ALOGD("PGC: ignoring valid ref in reg %d: 0x%08x",
1685                                   method->registersSize-1 - i, rval);
1686                          }
1687  #endif
1688                      }
1689                      ++framePtr;
1690                  }
1691                  dvmReleaseRegisterMapLine(pMap, regVector);
1692              }
1693          }
1694          /* else this is a break frame and there is nothing to gray, or
1695           * this is a native method and the registers are just the "ins",
1696           * copied from various registers in the caller's set.
1697           */
1698  
1699  #if WITH_EXTRA_GC_CHECKS > 1
1700          first = false;
1701  #endif
1702  
1703          /* Don't fall into an infinite loop if things get corrupted.
1704           */
1705          assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1706                 saveArea->prevFrame == NULL);
1707          framePtr = saveArea->prevFrame;
1708      }
1709  }
1710  
scavengeThread(Thread * thread)1711  static void scavengeThread(Thread *thread)
1712  {
1713      // LOG_SCAV("scavengeThread(thread=%p)", thread);
1714  
1715      // LOG_SCAV("Scavenging threadObj=%p", thread->threadObj);
1716      scavengeReference(&thread->threadObj);
1717  
1718      // LOG_SCAV("Scavenging exception=%p", thread->exception);
1719      scavengeReference(&thread->exception);
1720  
1721      scavengeThreadStack(thread);
1722  }
1723  
scavengeThreadList()1724  static void scavengeThreadList()
1725  {
1726      Thread *thread;
1727  
1728      dvmLockThreadList(dvmThreadSelf());
1729      thread = gDvm.threadList;
1730      while (thread) {
1731          scavengeThread(thread);
1732          thread = thread->next;
1733      }
1734      dvmUnlockThreadList();
1735  }
1736  
pinThreadStack(const Thread * thread)1737  static void pinThreadStack(const Thread *thread)
1738  {
1739      const u4 *framePtr;
1740      const StackSaveArea *saveArea;
1741      Method *method;
1742      const char *shorty;
1743      Object *obj;
1744  
1745      saveArea = NULL;
1746      framePtr = (const u4 *)thread->interpSave.curFrame;
1747      for (; framePtr != NULL; framePtr = saveArea->prevFrame) {
1748          saveArea = SAVEAREA_FROM_FP(framePtr);
1749          method = (Method *)saveArea->method;
1750          if (method != NULL && dvmIsNativeMethod(method)) {
1751              /*
1752               * This is native method, pin its arguments.
1753               *
1754               * For purposes of graying references, we don't need to do
1755               * anything here, because all of the native "ins" were copied
1756               * from registers in the caller's stack frame and won't be
1757               * changed (an interpreted method can freely use registers
1758               * with parameters like any other register, but natives don't
1759               * work that way).
1760               *
1761               * However, we need to ensure that references visible to
1762               * native methods don't move around.  We can do a precise scan
1763               * of the arguments by examining the method signature.
1764               */
1765              LOG_PIN("+++ native scan %s.%s",
1766                      method->clazz->descriptor, method->name);
1767              assert(method->registersSize == method->insSize);
1768              if (!dvmIsStaticMethod(method)) {
1769                  /* grab the "this" pointer */
1770                  obj = (Object *)*framePtr++;
1771                  if (obj == NULL) {
1772                      /*
1773                       * This can happen for the "fake" entry frame inserted
1774                       * for threads created outside the VM.  There's no actual
1775                       * call so there's no object.  If we changed the fake
1776                       * entry method to be declared "static" then this
1777                       * situation should never occur.
1778                       */
1779                  } else {
1780                      assert(dvmIsValidObject(obj));
1781                      pinObject(obj);
1782                  }
1783              }
1784              shorty = method->shorty+1;      // skip return value
1785              for (int i = method->registersSize - 1; i >= 0; i--, framePtr++) {
1786                  switch (*shorty++) {
1787                  case 'L':
1788                      obj = (Object *)*framePtr;
1789                      if (obj != NULL) {
1790                          assert(dvmIsValidObject(obj));
1791                          pinObject(obj);
1792                      }
1793                      break;
1794                  case 'D':
1795                  case 'J':
1796                      framePtr++;
1797                      break;
1798                  default:
1799                      /* 32-bit non-reference value */
1800                      obj = (Object *)*framePtr;          // debug, remove
1801                      if (dvmIsValidObject(obj)) {        // debug, remove
1802                          /* if we see a lot of these, our scan might be off */
1803                          LOG_PIN("+++ did NOT pin obj %p", obj);
1804                      }
1805                      break;
1806                  }
1807              }
1808          } else if (method != NULL && !dvmIsNativeMethod(method)) {
1809              const RegisterMap* pMap = dvmGetExpandedRegisterMap(method);
1810              const u1* regVector = NULL;
1811  
1812              ALOGI("conservative : %s.%s", method->clazz->descriptor, method->name);
1813  
1814              if (pMap != NULL) {
1815                  int addr = saveArea->xtra.currentPc - method->insns;
1816                  regVector = dvmRegisterMapGetLine(pMap, addr);
1817              }
1818              if (regVector == NULL) {
1819                  /*
1820                   * No register info for this frame, conservatively pin.
1821                   */
1822                  for (int i = 0; i < method->registersSize; ++i) {
1823                      u4 regValue = framePtr[i];
1824                      if (regValue != 0 && (regValue & 0x3) == 0 && dvmIsValidObject((Object *)regValue)) {
1825                          pinObject((Object *)regValue);
1826                      }
1827                  }
1828              }
1829          }
1830          /*
1831           * Don't fall into an infinite loop if things get corrupted.
1832           */
1833          assert((uintptr_t)saveArea->prevFrame > (uintptr_t)framePtr ||
1834                 saveArea->prevFrame == NULL);
1835      }
1836  }
1837  
pinThread(const Thread * thread)1838  static void pinThread(const Thread *thread)
1839  {
1840      assert(thread != NULL);
1841      LOG_PIN("pinThread(thread=%p)", thread);
1842  
1843      LOG_PIN("Pin native method arguments");
1844      pinThreadStack(thread);
1845  
1846      LOG_PIN("Pin internalLocalRefTable");
1847      pinReferenceTable(&thread->internalLocalRefTable);
1848  
1849      LOG_PIN("Pin jniLocalRefTable");
1850      pinReferenceTable(&thread->jniLocalRefTable);
1851  
1852      /* Can the check be pushed into the promote routine? */
1853      if (thread->jniMonitorRefTable.table) {
1854          LOG_PIN("Pin jniMonitorRefTable");
1855          pinReferenceTable(&thread->jniMonitorRefTable);
1856      }
1857  }
1858  
pinThreadList()1859  static void pinThreadList()
1860  {
1861      Thread *thread;
1862  
1863      dvmLockThreadList(dvmThreadSelf());
1864      thread = gDvm.threadList;
1865      while (thread) {
1866          pinThread(thread);
1867          thread = thread->next;
1868      }
1869      dvmUnlockThreadList();
1870  }
1871  
1872  /*
1873   * Heap block scavenging.
1874   */
1875  
1876  /*
1877   * Scavenge objects in the current block.  Scavenging terminates when
1878   * the pointer reaches the highest address in the block or when a run
1879   * of zero words that continues to the highest address is reached.
1880   */
scavengeBlock(HeapSource * heapSource,size_t block)1881  static void scavengeBlock(HeapSource *heapSource, size_t block)
1882  {
1883      u1 *cursor;
1884      u1 *end;
1885      size_t size;
1886  
1887      LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1888  
1889      assert(heapSource != NULL);
1890      assert(block < heapSource->totalBlocks);
1891      assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1892  
1893      cursor = blockToAddress(heapSource, block);
1894      end = cursor + BLOCK_SIZE;
1895      LOG_SCAV("scavengeBlock start=%p, end=%p", cursor, end);
1896  
1897      /* Parse and scavenge the current block. */
1898      size = 0;
1899      while (cursor < end) {
1900          u4 word = *(u4 *)cursor;
1901          if (word != 0) {
1902              scavengeObject((Object *)cursor);
1903              size = objectSize((Object *)cursor);
1904              size = alignUp(size, ALLOC_ALIGNMENT);
1905              cursor += size;
1906          } else {
1907              /* Check for padding. */
1908              while (*(u4 *)cursor == 0) {
1909                  cursor += 4;
1910                  if (cursor == end) break;
1911              }
1912              /* Punt if something went wrong. */
1913              assert(cursor == end);
1914          }
1915      }
1916  }
1917  
objectSize(const Object * obj)1918  static size_t objectSize(const Object *obj)
1919  {
1920      size_t size;
1921  
1922      assert(obj != NULL);
1923      assert(obj->clazz != NULL);
1924      if (obj->clazz == gDvm.classJavaLangClass) {
1925          size = dvmClassObjectSize((ClassObject *)obj);
1926      } else if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1927          size = dvmArrayObjectSize((ArrayObject *)obj);
1928      } else {
1929          assert(obj->clazz->objectSize != 0);
1930          size = obj->clazz->objectSize;
1931      }
1932      if (LW_HASH_STATE(obj->lock) == LW_HASH_STATE_HASHED_AND_MOVED) {
1933          size += sizeof(u4);
1934      }
1935      return size;
1936  }
1937  
verifyBlock(HeapSource * heapSource,size_t block)1938  static void verifyBlock(HeapSource *heapSource, size_t block)
1939  {
1940      u1 *cursor;
1941      u1 *end;
1942      size_t size;
1943  
1944      // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
1945  
1946      assert(heapSource != NULL);
1947      assert(block < heapSource->totalBlocks);
1948      assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1949  
1950      cursor = blockToAddress(heapSource, block);
1951      end = cursor + BLOCK_SIZE;
1952      // LOG_VER("verifyBlock start=%p, end=%p", cursor, end);
1953  
1954      /* Parse and scavenge the current block. */
1955      size = 0;
1956      while (cursor < end) {
1957          u4 word = *(u4 *)cursor;
1958          if (word != 0) {
1959              dvmVerifyObject((Object *)cursor);
1960              size = objectSize((Object *)cursor);
1961              size = alignUp(size, ALLOC_ALIGNMENT);
1962              cursor += size;
1963          } else {
1964              /* Check for padding. */
1965              while (*(unsigned long *)cursor == 0) {
1966                  cursor += 4;
1967                  if (cursor == end) break;
1968              }
1969              /* Punt if something went wrong. */
1970              assert(cursor == end);
1971          }
1972      }
1973  }
1974  
describeBlockQueue(const HeapSource * heapSource)1975  static void describeBlockQueue(const HeapSource *heapSource)
1976  {
1977      size_t block, count;
1978      char space;
1979  
1980      block = heapSource->queueHead;
1981      count = 0;
1982      LOG_SCAV(">>> describeBlockQueue(heapSource=%p)", heapSource);
1983      /* Count the number of blocks enqueued. */
1984      while (block != QUEUE_TAIL) {
1985          block = heapSource->blockQueue[block];
1986          ++count;
1987      }
1988      LOG_SCAV("blockQueue %zu elements, enqueued %zu",
1989                   count, heapSource->queueSize);
1990      block = heapSource->queueHead;
1991      while (block != QUEUE_TAIL) {
1992          space = heapSource->blockSpace[block];
1993          LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
1994          block = heapSource->blockQueue[block];
1995      }
1996  
1997      LOG_SCAV("<<< describeBlockQueue(heapSource=%p)", heapSource);
1998  }
1999  
2000  /*
2001   * Blackens promoted objects.
2002   */
scavengeBlockQueue()2003  static void scavengeBlockQueue()
2004  {
2005      HeapSource *heapSource;
2006      size_t block;
2007  
2008      LOG_SCAV(">>> scavengeBlockQueue()");
2009      heapSource = gDvm.gcHeap->heapSource;
2010      describeBlockQueue(heapSource);
2011      while (heapSource->queueHead != QUEUE_TAIL) {
2012          block = heapSource->queueHead;
2013          LOG_SCAV("Dequeueing block %zu", block);
2014          scavengeBlock(heapSource, block);
2015          heapSource->queueHead = heapSource->blockQueue[block];
2016          LOG_SCAV("New queue head is %zu", heapSource->queueHead);
2017      }
2018      LOG_SCAV("<<< scavengeBlockQueue()");
2019  }
2020  
2021  /*
2022   * Scan the block list and verify all blocks that are marked as being
2023   * in new space.  This should be parametrized so we can invoke this
2024   * routine outside of the context of a collection.
2025   */
verifyNewSpace()2026  static void verifyNewSpace()
2027  {
2028      HeapSource *heapSource = gDvm.gcHeap->heapSource;
2029      size_t c0 = 0, c1 = 0, c2 = 0, c7 = 0;
2030      for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2031          switch (heapSource->blockSpace[i]) {
2032          case BLOCK_FREE: ++c0; break;
2033          case BLOCK_TO_SPACE: ++c1; break;
2034          case BLOCK_FROM_SPACE: ++c2; break;
2035          case BLOCK_CONTINUED: ++c7; break;
2036          default: assert(!"reached");
2037          }
2038      }
2039      LOG_VER("Block Demographics: "
2040              "Free=%zu,ToSpace=%zu,FromSpace=%zu,Continued=%zu",
2041              c0, c1, c2, c7);
2042      for (size_t i = 0; i < heapSource->totalBlocks; ++i) {
2043          if (heapSource->blockSpace[i] != BLOCK_TO_SPACE) {
2044              continue;
2045          }
2046          verifyBlock(heapSource, i);
2047      }
2048  }
2049  
describeHeap()2050  void describeHeap()
2051  {
2052      HeapSource *heapSource = gDvm.gcHeap->heapSource;
2053      describeBlocks(heapSource);
2054  }
2055  
2056  /*
2057   * The collection interface.  Collection has a few distinct phases.
2058   * The first is flipping AKA condemning AKA whitening the heap.  The
2059   * second is to promote all objects which are pointed to by pinned or
2060   * ambiguous references.  The third phase is tracing from the stacks,
2061   * registers and various globals.  Lastly, a verification of the heap
2062   * is performed.  The last phase should be optional.
2063   */
dvmScavengeRoots()2064  void dvmScavengeRoots()  /* Needs a new name badly */
2065  {
2066      GcHeap *gcHeap;
2067  
2068      {
2069          size_t alloc, unused, total;
2070  
2071          room(&alloc, &unused, &total);
2072          LOG_SCAV("BEFORE GC: %zu alloc, %zu free, %zu total.",
2073                       alloc, unused, total);
2074      }
2075  
2076      gcHeap = gDvm.gcHeap;
2077      dvmHeapSourceFlip();
2078  
2079      /*
2080       * Promote blocks with stationary objects.
2081       */
2082      pinThreadList();
2083      pinReferenceTable(&gDvm.jniGlobalRefTable);
2084      pinReferenceTable(&gDvm.jniPinRefTable);
2085      pinHashTableEntries(gDvm.loadedClasses);
2086      pinHashTableEntries(gDvm.dbgRegistry);
2087      pinPrimitiveClasses();
2088      pinInternedStrings();
2089  
2090      // describeBlocks(gcHeap->heapSource);
2091  
2092      /*
2093       * Create first, open new-space page right here.
2094       */
2095  
2096      /* Reset allocation to an unallocated block. */
2097      gDvm.gcHeap->heapSource->allocPtr = allocateBlocks(gDvm.gcHeap->heapSource, 1);
2098      gDvm.gcHeap->heapSource->allocLimit = gDvm.gcHeap->heapSource->allocPtr + BLOCK_SIZE;
2099      /*
2100       * Hack: promote the empty block allocated above.  If the
2101       * promotions that occurred above did not actually gray any
2102       * objects, the block queue may be empty.  We must force a
2103       * promotion to be safe.
2104       */
2105      promoteBlockByAddr(gDvm.gcHeap->heapSource, gDvm.gcHeap->heapSource->allocPtr);
2106  
2107      /*
2108       * Scavenge blocks and relocate movable objects.
2109       */
2110  
2111      LOG_SCAV("Scavenging gDvm.threadList");
2112      scavengeThreadList();
2113  
2114      LOG_SCAV("Scavenging gDvm.gcHeap->referenceOperations");
2115      scavengeLargeHeapRefTable(gcHeap->referenceOperations);
2116  
2117      LOG_SCAV("Scavenging gDvm.gcHeap->pendingFinalizationRefs");
2118      scavengeLargeHeapRefTable(gcHeap->pendingFinalizationRefs);
2119  
2120      LOG_SCAV("Scavenging random global stuff");
2121      scavengeReference(&gDvm.outOfMemoryObj);
2122      scavengeReference(&gDvm.internalErrorObj);
2123      scavengeReference(&gDvm.noClassDefFoundErrorObj);
2124  
2125      // LOG_SCAV("Scavenging gDvm.internedString");
2126      scavengeInternedStrings();
2127  
2128      LOG_SCAV("Root scavenge has completed.");
2129  
2130      scavengeBlockQueue();
2131  
2132      // LOG_SCAV("Re-snap global class pointers.");
2133      // scavengeGlobals();
2134  
2135      LOG_SCAV("New space scavenge has completed.");
2136  
2137      /*
2138       * Process reference objects in strength order.
2139       */
2140  
2141      LOG_REF("Processing soft references...");
2142      preserveSoftReferences(&gDvm.gcHeap->softReferences);
2143      clearWhiteReferences(&gDvm.gcHeap->softReferences);
2144  
2145      LOG_REF("Processing weak references...");
2146      clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2147  
2148      LOG_REF("Finding finalizations...");
2149      processFinalizableReferences();
2150  
2151      LOG_REF("Processing f-reachable soft references...");
2152      clearWhiteReferences(&gDvm.gcHeap->softReferences);
2153  
2154      LOG_REF("Processing f-reachable weak references...");
2155      clearWhiteReferences(&gDvm.gcHeap->weakReferences);
2156  
2157      LOG_REF("Processing phantom references...");
2158      clearWhiteReferences(&gDvm.gcHeap->phantomReferences);
2159  
2160      /*
2161       * Verify the stack and heap.
2162       */
2163      dvmVerifyRoots();
2164      verifyNewSpace();
2165  
2166      //describeBlocks(gcHeap->heapSource);
2167  
2168      clearFromSpace(gcHeap->heapSource);
2169  
2170      {
2171          size_t alloc, rem, total;
2172  
2173          room(&alloc, &rem, &total);
2174          LOG_SCAV("AFTER GC: %zu alloc, %zu free, %zu total.", alloc, rem, total);
2175      }
2176  }
2177  
2178  /*
2179   * Interface compatibility routines.
2180   */
2181  
dvmClearWhiteRefs(Object ** list)2182  void dvmClearWhiteRefs(Object **list)
2183  {
2184      /* do nothing */
2185      assert(*list == NULL);
2186  }
2187  
dvmHandleSoftRefs(Object ** list)2188  void dvmHandleSoftRefs(Object **list)
2189  {
2190      /* do nothing */
2191      assert(*list == NULL);
2192  }
2193  
dvmHeapBeginMarkStep(GcMode mode)2194  bool dvmHeapBeginMarkStep(GcMode mode)
2195  {
2196      /* do nothing */
2197      return true;
2198  }
2199  
dvmHeapFinishMarkStep()2200  void dvmHeapFinishMarkStep()
2201  {
2202      /* do nothing */
2203  }
2204  
dvmHeapMarkRootSet()2205  void dvmHeapMarkRootSet()
2206  {
2207      /* do nothing */
2208  }
2209  
dvmHeapScanMarkedObjects()2210  void dvmHeapScanMarkedObjects()
2211  {
2212      dvmScavengeRoots();
2213  }
2214  
dvmHeapScheduleFinalizations()2215  void dvmHeapScheduleFinalizations()
2216  {
2217      /* do nothing */
2218  }
2219  
dvmHeapSweepUnmarkedObjects(GcMode mode,int * numFreed,size_t * sizeFreed)2220  void dvmHeapSweepUnmarkedObjects(GcMode mode, int *numFreed, size_t *sizeFreed)
2221  {
2222      *numFreed = 0;
2223      *sizeFreed = 0;
2224      /* do nothing */
2225  }
2226  
dvmMarkDirtyObjects()2227  void dvmMarkDirtyObjects()
2228  {
2229      assert(!"implemented");
2230  }
2231  
dvmHeapSourceThreadShutdown()2232  void dvmHeapSourceThreadShutdown()
2233  {
2234      /* do nothing */
2235  }
2236