• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2008 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  /*
18   * Linear memory allocation, tied to class loaders.
19   */
20  #include "Dalvik.h"
21  
22  #include <sys/mman.h>
23  #include <limits.h>
24  #include <errno.h>
25  
26  //#define DISABLE_LINEAR_ALLOC
27  
28  // Use ashmem to name the LinearAlloc section
29  #define USE_ASHMEM 1
30  
31  #ifdef USE_ASHMEM
32  #include <cutils/ashmem.h>
33  #endif /* USE_ASHMEM */
34  
35  /*
36  Overview
37  
38  This is intended to be a simple, fast allocator for "write-once" storage.
39  The expectation is that this will hold small allocations that don't change,
40  such as parts of classes (vtables, fields, methods, interfaces).  Because
41  the lifetime of these items is tied to classes, which in turn are tied
42  to class loaders, we associate the storage with a ClassLoader object.
43  
44  [ We don't yet support class unloading, and our ClassLoader implementation
45  is in flux, so for now we just have a single global region and the
46  "classLoader" argument is ignored. ]
47  
48  By storing the data here, rather than on the system heap, we reduce heap
49  clutter, speed class loading, reduce the memory footprint (reduced heap
50  structure overhead), and most importantly we increase the number of pages
51  that remain shared between processes launched in "Zygote mode".
52  
53  The 4 bytes preceding each block contain the block length.  This allows us
54  to support "free" and "realloc" calls in a limited way.  We don't free
55  storage once it has been allocated, but in some circumstances it could be
56  useful to erase storage to garbage values after a "free" or "realloc".
57  (Bad idea if we're trying to share pages.)  We need to align to 8-byte
58  boundaries for some architectures, so we have a 50-50 chance of getting
59  this for free in a given block.
60  
61  A NULL value for the "classLoader" argument refers to the bootstrap class
62  loader, which is never unloaded (until the VM shuts down).
63  
64  Because the memory is not expected to be updated, we can use mprotect to
65  guard the pages on debug builds.  Handy when tracking down corruption.
66  */
67  
68  /* alignment for allocations; must be power of 2, and currently >= hdr_xtra */
69  #define BLOCK_ALIGN         8
70  
71  /* default length of memory segment (worst case is probably "dexopt") */
72  #define DEFAULT_MAX_LENGTH  (5*1024*1024)
73  
74  /* leave enough space for a length word */
75  #define HEADER_EXTRA        4
76  
77  /* overload the length word */
78  #define LENGTHFLAG_FREE    0x80000000
79  #define LENGTHFLAG_RW      0x40000000
80  #define LENGTHFLAG_MASK    (~(LENGTHFLAG_FREE|LENGTHFLAG_RW))
81  
82  
83  /* fwd */
84  static void checkAllFree(Object* classLoader);
85  
86  
87  /*
88   * Someday, retrieve the linear alloc struct associated with a particular
89   * class loader.  For now, always use the boostrap loader's instance.
90   */
getHeader(Object * classLoader)91  static inline LinearAllocHdr* getHeader(Object* classLoader)
92  {
93      return gDvm.pBootLoaderAlloc;
94  }
95  
96  /*
97   * Convert a pointer to memory to a pointer to the block header (which is
98   * currently just a length word).
99   */
getBlockHeader(void * mem)100  static inline u4* getBlockHeader(void* mem)
101  {
102      return ((u4*) mem) -1;
103  }
104  
105  /*
106   * Create a new linear allocation block.
107   */
dvmLinearAllocCreate(Object * classLoader)108  LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader)
109  {
110  #ifdef DISABLE_LINEAR_ALLOC
111      return (LinearAllocHdr*) 0x12345;
112  #endif
113      LinearAllocHdr* pHdr;
114  
115      pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr));
116  
117  
118      /*
119       * "curOffset" points to the location of the next pre-block header,
120       * which means we have to advance to the next BLOCK_ALIGN address and
121       * back up.
122       *
123       * Note we leave the first page empty (see below), and start the
124       * first entry on the second page at an offset that ensures the next
125       * chunk of data will be properly aligned.
126       */
127      assert(BLOCK_ALIGN >= HEADER_EXTRA);
128      pHdr->curOffset = pHdr->firstOffset =
129          (BLOCK_ALIGN-HEADER_EXTRA) + SYSTEM_PAGE_SIZE;
130      pHdr->mapLength = DEFAULT_MAX_LENGTH;
131  
132  #ifdef USE_ASHMEM
133      int fd;
134  
135      fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH);
136      if (fd < 0) {
137          LOGE("ashmem LinearAlloc failed %s", strerror(errno));
138          free(pHdr);
139          return NULL;
140      }
141  
142      pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
143          MAP_PRIVATE, fd, 0);
144      if (pHdr->mapAddr == MAP_FAILED) {
145          LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
146              strerror(errno));
147          free(pHdr);
148          close(fd);
149          return NULL;
150      }
151  
152      close(fd);
153  #else /*USE_ASHMEM*/
154      // MAP_ANON is listed as "deprecated" on Linux,
155      // but MAP_ANONYMOUS is not defined under Mac OS X.
156      pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
157          MAP_PRIVATE | MAP_ANON, -1, 0);
158      if (pHdr->mapAddr == MAP_FAILED) {
159          LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
160              strerror(errno));
161          free(pHdr);
162          return NULL;
163      }
164  #endif /*USE_ASHMEM*/
165  
166      /* region expected to begin on a page boundary */
167      assert(((int) pHdr->mapAddr & (SYSTEM_PAGE_SIZE-1)) == 0);
168  
169      /* the system should initialize newly-mapped memory to zero */
170      assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0);
171  
172      /*
173       * Disable access to all except starting page.  We will enable pages
174       * as we use them.  This helps prevent bad pointers from working.  The
175       * pages start out PROT_NONE, become read/write while we access them,
176       * then go to read-only after we finish our changes.
177       *
178       * We have to make the first page readable because we have 4 pad bytes,
179       * followed by 4 length bytes, giving an initial offset of 8.  The
180       * generic code below assumes that there could have been a previous
181       * allocation that wrote into those 4 pad bytes, therefore the page
182       * must have been marked readable by the previous allocation.
183       *
184       * We insert an extra page in here to force a break in the memory map
185       * so we can see ourselves more easily in "showmap".  Otherwise this
186       * stuff blends into the neighboring pages.  [TODO: do we still need
187       * the extra page now that we have ashmem?]
188       */
189      if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) {
190          LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno));
191          free(pHdr);
192          return NULL;
193      }
194      if (mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE, SYSTEM_PAGE_SIZE,
195              ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0)
196      {
197          LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno));
198          free(pHdr);
199          return NULL;
200      }
201  
202      if (ENFORCE_READ_ONLY) {
203          /* allocate the per-page ref count */
204          int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
205          pHdr->writeRefCount = calloc(numPages, sizeof(short));
206          if (pHdr->writeRefCount == NULL) {
207              free(pHdr);
208              return NULL;
209          }
210      }
211  
212      dvmInitMutex(&pHdr->lock);
213  
214      LOGV("LinearAlloc: created region at %p-%p\n",
215          pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1);
216  
217      return pHdr;
218  }
219  
220  /*
221   * Destroy a linear allocation area.
222   *
223   * We do a trivial "has everything been freed?" check before unmapping the
224   * memory and freeing the LinearAllocHdr.
225   */
dvmLinearAllocDestroy(Object * classLoader)226  void dvmLinearAllocDestroy(Object* classLoader)
227  {
228  #ifdef DISABLE_LINEAR_ALLOC
229      return;
230  #endif
231      LinearAllocHdr* pHdr = getHeader(classLoader);
232      if (pHdr == NULL)
233          return;
234  
235      checkAllFree(classLoader);
236  
237      //dvmLinearAllocDump(classLoader);
238  
239      if (gDvm.verboseShutdown) {
240          LOGV("Unmapping linear allocator base=%p\n", pHdr->mapAddr);
241          LOGD("LinearAlloc %p used %d of %d (%d%%)\n",
242              classLoader, pHdr->curOffset, pHdr->mapLength,
243              (pHdr->curOffset * 100) / pHdr->mapLength);
244      }
245  
246      if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) {
247          LOGW("LinearAlloc munmap(%p, %d) failed: %s\n",
248              pHdr->mapAddr, pHdr->mapLength, strerror(errno));
249      }
250      free(pHdr);
251  }
252  
253  /*
254   * Allocate "size" bytes of storage, associated with a particular class
255   * loader.
256   *
257   * It's okay for size to be zero.
258   *
259   * We always leave "curOffset" pointing at the next place where we will
260   * store the header that precedes the returned storage.
261   *
262   * This aborts the VM on failure, so it's not necessary to check for a
263   * NULL return value.
264   */
dvmLinearAlloc(Object * classLoader,size_t size)265  void* dvmLinearAlloc(Object* classLoader, size_t size)
266  {
267      LinearAllocHdr* pHdr = getHeader(classLoader);
268      int startOffset, nextOffset;
269      int lastGoodOff, firstWriteOff, lastWriteOff;
270  
271  #ifdef DISABLE_LINEAR_ALLOC
272      return calloc(1, size);
273  #endif
274  
275      LOGVV("--- LinearAlloc(%p, %d)\n", classLoader, size);
276  
277      /*
278       * What we'd like to do is just determine the new end-of-alloc size
279       * and atomic-swap the updated value in.  The trouble is that, the
280       * first time we reach a new page, we need to call mprotect() to
281       * make the page available, and we don't want to call mprotect() on
282       * every allocation.  The troubled situation is:
283       *  - thread A allocs across a page boundary, but gets preempted
284       *    before mprotect() completes
285       *  - thread B allocs within the new page, and doesn't call mprotect()
286       */
287      dvmLockMutex(&pHdr->lock);
288  
289      startOffset = pHdr->curOffset;
290      assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);
291  
292      /*
293       * Compute the new offset.  The old offset points at the address where
294       * we will store the hidden block header, so we advance past that,
295       * add the size of data they want, add another header's worth so we
296       * know we have room for that, and round up to BLOCK_ALIGN.  That's
297       * the next location where we'll put user data.  We then subtract the
298       * chunk header size off so we're back to the header pointer.
299       *
300       * Examples:
301       *   old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
302       *   old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
303       */
304      nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
305                      & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
306      LOGVV("--- old=%d size=%d new=%d\n", startOffset, size, nextOffset);
307  
308      if (nextOffset > pHdr->mapLength) {
309          /*
310           * We don't have to abort here.  We could fall back on the system
311           * malloc(), and have our "free" call figure out what to do.  Only
312           * works if the users of these functions actually free everything
313           * they allocate.
314           */
315          LOGE("LinearAlloc exceeded capacity (%d), last=%d\n",
316              pHdr->mapLength, (int) size);
317          dvmAbort();
318      }
319  
320      /*
321       * Round up "size" to encompass the entire region, including the 0-7
322       * pad bytes before the next chunk header.  This way we get maximum
323       * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
324       * stuff we always treat the full extent.
325       */
326      size = nextOffset - (startOffset + HEADER_EXTRA);
327      LOGVV("--- (size now %d)\n", size);
328  
329      /*
330       * See if we are starting on or have crossed into a new page.  If so,
331       * call mprotect on the page(s) we're about to write to.  We have to
332       * page-align the start address, but don't have to make the length a
333       * SYSTEM_PAGE_SIZE multiple (but we do it anyway).
334       *
335       * Note that "startOffset" is not the last *allocated* byte, but rather
336       * the offset of the first *unallocated* byte (which we are about to
337       * write the chunk header to).  "nextOffset" is similar.
338       *
339       * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
340       * we've written to this page before, because it might be read-only.
341       */
342      lastGoodOff = (startOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
343      firstWriteOff = startOffset & ~(SYSTEM_PAGE_SIZE-1);
344      lastWriteOff = (nextOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
345      LOGVV("---  lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x\n",
346          lastGoodOff, firstWriteOff, lastWriteOff);
347      if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
348          int cc, start, len;
349  
350          start = firstWriteOff;
351          assert(start <= nextOffset);
352          len = (lastWriteOff - firstWriteOff) + SYSTEM_PAGE_SIZE;
353  
354          LOGVV("---    calling mprotect(start=%d len=%d RW)\n", start, len);
355          cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
356          if (cc != 0) {
357              LOGE("LinearAlloc mprotect (+%d %d) failed: %s\n",
358                  start, len, strerror(errno));
359              /* we're going to fail soon, might as do it now */
360              dvmAbort();
361          }
362      }
363  
364      /* update the ref counts on the now-writable pages */
365      if (ENFORCE_READ_ONLY) {
366          int i, start, end;
367  
368          start = firstWriteOff / SYSTEM_PAGE_SIZE;
369          end = lastWriteOff / SYSTEM_PAGE_SIZE;
370  
371          LOGVV("---  marking pages %d-%d RW (alloc %d at %p)\n",
372              start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
373          for (i = start; i <= end; i++)
374              pHdr->writeRefCount[i]++;
375      }
376  
377      /* stow the size in the header */
378      if (ENFORCE_READ_ONLY)
379          *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
380      else
381          *(u4*)(pHdr->mapAddr + startOffset) = size;
382  
383      /*
384       * Update data structure.
385       */
386      pHdr->curOffset = nextOffset;
387  
388      dvmUnlockMutex(&pHdr->lock);
389      return pHdr->mapAddr + startOffset + HEADER_EXTRA;
390  }
391  
392  /*
393   * Helper function, replaces strdup().
394   */
dvmLinearStrdup(Object * classLoader,const char * str)395  char* dvmLinearStrdup(Object* classLoader, const char* str)
396  {
397  #ifdef DISABLE_LINEAR_ALLOC
398      return strdup(str);
399  #endif
400      int len = strlen(str);
401      void* mem = dvmLinearAlloc(classLoader, len+1);
402      memcpy(mem, str, len+1);
403      if (ENFORCE_READ_ONLY)
404          dvmLinearSetReadOnly(classLoader, mem);
405      return (char*) mem;
406  }
407  
408  /*
409   * "Reallocate" a piece of memory.
410   *
411   * If the new size is <= the old size, we return the original pointer
412   * without doing anything.
413   *
414   * If the new size is > the old size, we allocate new storage, copy the
415   * old stuff over, and mark the new stuff as free.
416   */
dvmLinearRealloc(Object * classLoader,void * mem,size_t newSize)417  void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
418  {
419  #ifdef DISABLE_LINEAR_ALLOC
420      return realloc(mem, newSize);
421  #endif
422      /* make sure we have the right region (and mem != NULL) */
423      assert(mem != NULL);
424      assert(mem >= (void*) getHeader(classLoader)->mapAddr &&
425             mem < (void*) (getHeader(classLoader)->mapAddr +
426                            getHeader(classLoader)->curOffset));
427  
428      const u4* pLen = getBlockHeader(mem);
429      LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen);
430  
431      /* handle size reduction case */
432      if (*pLen >= newSize) {
433          if (ENFORCE_READ_ONLY)
434              dvmLinearSetReadWrite(classLoader, mem);
435          return mem;
436      }
437  
438      void* newMem;
439  
440      newMem = dvmLinearAlloc(classLoader, newSize);
441      assert(newMem != NULL);
442      memcpy(newMem, mem, *pLen);
443      dvmLinearFree(classLoader, mem);
444  
445      return newMem;
446  }
447  
448  
449  /*
450   * Update the read/write status of one or more pages.
451   */
updatePages(Object * classLoader,void * mem,int direction)452  static void updatePages(Object* classLoader, void* mem, int direction)
453  {
454      LinearAllocHdr* pHdr = getHeader(classLoader);
455      dvmLockMutex(&pHdr->lock);
456  
457      /* make sure we have the right region */
458      assert(mem >= (void*) pHdr->mapAddr &&
459             mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
460  
461      u4* pLen = getBlockHeader(mem);
462      u4 len = *pLen & LENGTHFLAG_MASK;
463      int firstPage, lastPage;
464  
465      firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / SYSTEM_PAGE_SIZE;
466      lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / SYSTEM_PAGE_SIZE;
467      LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction);
468  
469      int i, cc;
470  
471      /*
472       * Update individual pages.  We could do some sort of "lazy update" to
473       * combine mprotect calls, but that's almost certainly more trouble
474       * than it's worth.
475       */
476      for (i = firstPage; i <= lastPage; i++) {
477          if (direction < 0) {
478              /*
479               * Trying to mark read-only.
480               */
481              if (i == firstPage) {
482                  if ((*pLen & LENGTHFLAG_RW) == 0) {
483                      LOGW("Double RO on %p\n", mem);
484                      dvmAbort();
485                  } else
486                      *pLen &= ~LENGTHFLAG_RW;
487              }
488  
489              if (pHdr->writeRefCount[i] == 0) {
490                  LOGE("Can't make page %d any less writable\n", i);
491                  dvmAbort();
492              }
493              pHdr->writeRefCount[i]--;
494              if (pHdr->writeRefCount[i] == 0) {
495                  LOGVV("---  prot page %d RO\n", i);
496                  cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
497                          SYSTEM_PAGE_SIZE, PROT_READ);
498                  assert(cc == 0);
499              }
500          } else {
501              /*
502               * Trying to mark writable.
503               */
504              if (pHdr->writeRefCount[i] >= 32767) {
505                  LOGE("Can't make page %d any more writable\n", i);
506                  dvmAbort();
507              }
508              if (pHdr->writeRefCount[i] == 0) {
509                  LOGVV("---  prot page %d RW\n", i);
510                  cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
511                          SYSTEM_PAGE_SIZE, PROT_READ | PROT_WRITE);
512                  assert(cc == 0);
513              }
514              pHdr->writeRefCount[i]++;
515  
516              if (i == firstPage) {
517                  if ((*pLen & LENGTHFLAG_RW) != 0) {
518                      LOGW("Double RW on %p\n", mem);
519                      dvmAbort();
520                  } else
521                      *pLen |= LENGTHFLAG_RW;
522              }
523          }
524      }
525  
526      dvmUnlockMutex(&pHdr->lock);
527  }
528  
529  /*
530   * Try to mark the pages in which a chunk of memory lives as read-only.
531   * Whether or not the pages actually change state depends on how many
532   * others are trying to access the same pages.
533   *
534   * Only call here if ENFORCE_READ_ONLY is true.
535   */
dvmLinearSetReadOnly(Object * classLoader,void * mem)536  void dvmLinearSetReadOnly(Object* classLoader, void* mem)
537  {
538  #ifdef DISABLE_LINEAR_ALLOC
539      return;
540  #endif
541      updatePages(classLoader, mem, -1);
542  }
543  
544  /*
545   * Make the pages on which "mem" sits read-write.
546   *
547   * This covers the header as well as the data itself.  (We could add a
548   * "header-only" mode for dvmLinearFree.)
549   *
550   * Only call here if ENFORCE_READ_ONLY is true.
551   */
dvmLinearSetReadWrite(Object * classLoader,void * mem)552  void dvmLinearSetReadWrite(Object* classLoader, void* mem)
553  {
554  #ifdef DISABLE_LINEAR_ALLOC
555      return;
556  #endif
557      updatePages(classLoader, mem, 1);
558  }
559  
560  /*
561   * Mark an allocation as free.
562   */
dvmLinearFree(Object * classLoader,void * mem)563  void dvmLinearFree(Object* classLoader, void* mem)
564  {
565  #ifdef DISABLE_LINEAR_ALLOC
566      free(mem);
567      return;
568  #endif
569      if (mem == NULL)
570          return;
571  
572      /* make sure we have the right region */
573      assert(mem >= (void*) getHeader(classLoader)->mapAddr &&
574             mem < (void*) (getHeader(classLoader)->mapAddr +
575                            getHeader(classLoader)->curOffset));
576  
577      if (ENFORCE_READ_ONLY)
578          dvmLinearSetReadWrite(classLoader, mem);
579  
580      u4* pLen = getBlockHeader(mem);
581      *pLen |= LENGTHFLAG_FREE;
582  
583      if (ENFORCE_READ_ONLY)
584          dvmLinearSetReadOnly(classLoader, mem);
585  }
586  
587  /*
588   * For debugging, dump the contents of a linear alloc area.
589   *
590   * We grab the lock so that the header contents and list output are
591   * consistent.
592   */
dvmLinearAllocDump(Object * classLoader)593  void dvmLinearAllocDump(Object* classLoader)
594  {
595  #ifdef DISABLE_LINEAR_ALLOC
596      return;
597  #endif
598      LinearAllocHdr* pHdr = getHeader(classLoader);
599  
600      dvmLockMutex(&pHdr->lock);
601  
602      LOGI("LinearAlloc classLoader=%p\n", classLoader);
603      LOGI("  mapAddr=%p mapLength=%d firstOffset=%d\n",
604          pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
605      LOGI("  curOffset=%d\n", pHdr->curOffset);
606  
607      int off = pHdr->firstOffset;
608      u4 rawLen, fullLen;
609  
610      while (off < pHdr->curOffset) {
611          rawLen = *(u4*) (pHdr->mapAddr + off);
612          fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
613                      & ~(BLOCK_ALIGN-1));
614  
615          LOGI("  %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA,
616              (int) ((off + HEADER_EXTRA) / SYSTEM_PAGE_SIZE),
617              (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
618              rawLen & LENGTHFLAG_MASK,
619              (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");
620  
621          off += fullLen;
622      }
623  
624      if (ENFORCE_READ_ONLY) {
625          LOGI("writeRefCount map:\n");
626  
627          int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
628          int zstart = 0;
629          int i;
630  
631          for (i = 0; i < numPages; i++) {
632              int count = pHdr->writeRefCount[i];
633  
634              if (count != 0) {
635                  if (zstart < i-1)
636                      printf(" %d-%d: zero\n", zstart, i-1);
637                  else if (zstart == i-1)
638                      printf(" %d: zero\n", zstart);
639                  zstart = i+1;
640                  printf(" %d: %d\n", i, count);
641              }
642          }
643          if (zstart < i)
644              printf(" %d-%d: zero\n", zstart, i-1);
645      }
646  
647      LOGD("LinearAlloc %p using %d of %d (%d%%)\n",
648          classLoader, pHdr->curOffset, pHdr->mapLength,
649          (pHdr->curOffset * 100) / pHdr->mapLength);
650  
651      dvmUnlockMutex(&pHdr->lock);
652  }
653  
654  /*
655   * Verify that all blocks are freed.
656   *
657   * This should only be done as we're shutting down, but there could be a
658   * daemon thread that's still trying to do something, so we grab the locks.
659   */
checkAllFree(Object * classLoader)660  static void checkAllFree(Object* classLoader)
661  {
662  #ifdef DISABLE_LINEAR_ALLOC
663      return;
664  #endif
665      LinearAllocHdr* pHdr = getHeader(classLoader);
666  
667      dvmLockMutex(&pHdr->lock);
668  
669      int off = pHdr->firstOffset;
670      u4 rawLen, fullLen;
671  
672      while (off < pHdr->curOffset) {
673          rawLen = *(u4*) (pHdr->mapAddr + off);
674          fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
675                      & ~(BLOCK_ALIGN-1));
676  
677          if ((rawLen & LENGTHFLAG_FREE) == 0) {
678              LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader,
679                  pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
680          }
681  
682          off += fullLen;
683      }
684  
685      dvmUnlockMutex(&pHdr->lock);
686  }
687  
688  /*
689   * Determine if [start, start+length) is contained in the in-use area of
690   * a single LinearAlloc.  The full set of linear allocators is scanned.
691   *
692   * [ Since we currently only have one region, this is pretty simple.  In
693   * the future we'll need to traverse a table of class loaders. ]
694   */
dvmLinearAllocContains(const void * start,size_t length)695  bool dvmLinearAllocContains(const void* start, size_t length)
696  {
697      LinearAllocHdr* pHdr = getHeader(NULL);
698  
699      if (pHdr == NULL)
700          return false;
701  
702      return (char*) start >= pHdr->mapAddr &&
703             ((char*)start + length) <= (pHdr->mapAddr + pHdr->curOffset);
704  }
705