• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "list-inl.h"
32 #include "log.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // -----------------------------------------------------------------------------
38 // Heap structures:
39 //
40 // A JS heap consists of a young generation, an old generation, and a large
41 // object space. The young generation is divided into two semispaces. A
42 // scavenger implements Cheney's copying algorithm. The old generation is
43 // separated into a map space and an old object space. The map space contains
44 // all (and only) map objects, the rest of old objects go into the old space.
45 // The old generation is collected by a mark-sweep-compact collector.
46 //
47 // The semispaces of the young generation are contiguous.  The old and map
48 // spaces consists of a list of pages. A page has a page header, a remembered
49 // set area, and an object area. A page size is deliberately chosen as 8K
50 // bytes. The first word of a page is an opaque page header that has the
51 // address of the next page and its ownership information. The second word may
52 // have the allocation top address of this page. The next 248 bytes are
53 // remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54 // remembered set bit corresponds to a pointer in the object area.
55 //
56 // There is a separate large object space for objects larger than
57 // Page::kMaxHeapObjectSize, so that they do not have to move during
58 // collection.  The large object space is paged and uses the same remembered
59 // set implementation.  Pages in large object space may be larger than 8K.
60 //
61 // NOTE: The mark-compact collector rebuilds the remembered set after a
62 // collection. It reuses first a few words of the remembered set for
63 // bookkeeping relocation information.
64 
65 
66 // Some assertion macros used in the debugging mode.
67 
68 #define ASSERT_PAGE_ALIGNED(address)                                           \
69   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70 
71 #define ASSERT_OBJECT_ALIGNED(address)                                         \
72   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73 
74 #define ASSERT_MAP_ALIGNED(address)                                            \
75   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
76 
77 #define ASSERT_OBJECT_SIZE(size)                                               \
78   ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
79 
80 #define ASSERT_PAGE_OFFSET(offset)                                             \
81   ASSERT((Page::kObjectStartOffset <= offset)                                  \
82       && (offset <= Page::kPageSize))
83 
84 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
85   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
86 
87 
88 class PagedSpace;
89 class MemoryAllocator;
90 class AllocationInfo;
91 
92 // -----------------------------------------------------------------------------
93 // A page normally has 8K bytes. Large object pages may be larger.  A page
94 // address is always aligned to the 8K page size.  A page is divided into
95 // three areas: the first two words are used for bookkeeping, the next 248
96 // bytes are used as remembered set, and the rest of the page is the object
97 // area.
98 //
99 // Pointers are aligned to the pointer size (4), only 1 bit is needed
100 // for a pointer in the remembered set. Given an address, its remembered set
101 // bit position (offset from the start of the page) is calculated by dividing
102 // its page offset by 32. Therefore, the object area in a page starts at the
103 // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
104 // the first two words (64 bits) in a page can be used for other purposes.
105 //
106 // On the 64-bit platform, we add an offset to the start of the remembered set,
107 // and pointers are aligned to 8-byte pointer size. This means that we need
108 // only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
109 // For this reason we add an offset to get room for the Page data at the start.
110 //
111 // The mark-compact collector transforms a map pointer into a page index and a
112 // page offset. The excact encoding is described in the comments for
113 // class MapWord in objects.h.
114 //
115 // The only way to get a page pointer is by calling factory methods:
116 //   Page* p = Page::FromAddress(addr); or
117 //   Page* p = Page::FromAllocationTop(top);
118 class Page {
119  public:
120   // Returns the page containing a given address. The address ranges
121   // from [page_addr .. page_addr + kPageSize[
122   //
123   // Note that this function only works for addresses in normal paged
124   // spaces and addresses in the first 8K of large object pages (i.e.,
125   // the start of large objects but not necessarily derived pointers
126   // within them).
INLINE(static Page * FromAddress (Address a))127   INLINE(static Page* FromAddress(Address a)) {
128     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129   }
130 
131   // Returns the page containing an allocation top. Because an allocation
132   // top address can be the upper bound of the page, we need to subtract
133   // it with kPointerSize first. The address ranges from
134   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))135   INLINE(static Page* FromAllocationTop(Address top)) {
136     Page* p = FromAddress(top - kPointerSize);
137     ASSERT_PAGE_OFFSET(p->Offset(top));
138     return p;
139   }
140 
141   // Returns the start address of this page.
address()142   Address address() { return reinterpret_cast<Address>(this); }
143 
144   // Checks whether this is a valid page address.
is_valid()145   bool is_valid() { return address() != NULL; }
146 
147   // Returns the next page of this page.
148   inline Page* next_page();
149 
150   // Return the end of allocation in this page. Undefined for unused pages.
151   inline Address AllocationTop();
152 
153   // Returns the start address of the object area in this page.
ObjectAreaStart()154   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155 
156   // Returns the end address (exclusive) of the object area in this page.
ObjectAreaEnd()157   Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158 
159   // Returns the start address of the remembered set area.
RSetStart()160   Address RSetStart() { return address() + kRSetStartOffset; }
161 
162   // Returns the end address of the remembered set area (exclusive).
RSetEnd()163   Address RSetEnd() { return address() + kRSetEndOffset; }
164 
165   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)166   static bool IsAlignedToPageSize(Address a) {
167     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168   }
169 
170   // True if this page is a large object page.
IsLargeObjectPage()171   bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
172 
173   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))174   INLINE(int Offset(Address a)) {
175     int offset = static_cast<int>(a - address());
176     ASSERT_PAGE_OFFSET(offset);
177     return offset;
178   }
179 
180   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)181   Address OffsetToAddress(int offset) {
182     ASSERT_PAGE_OFFSET(offset);
183     return address() + offset;
184   }
185 
186   // ---------------------------------------------------------------------
187   // Remembered set support
188 
189   // Clears remembered set in this page.
190   inline void ClearRSet();
191 
192   // Return the address of the remembered set word corresponding to an
193   // object address/offset pair, and the bit encoded as a single-bit
194   // mask in the output parameter 'bitmask'.
195   INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
196                                                uint32_t* bitmask));
197 
198   // Sets the corresponding remembered set bit for a given address.
199   INLINE(static void SetRSet(Address address, int offset));
200 
201   // Clears the corresponding remembered set bit for a given address.
202   static inline void UnsetRSet(Address address, int offset);
203 
204   // Checks whether the remembered set bit for a given address is set.
205   static inline bool IsRSetSet(Address address, int offset);
206 
207 #ifdef DEBUG
208   // Use a state to mark whether remembered set space can be used for other
209   // purposes.
210   enum RSetState { IN_USE,  NOT_IN_USE };
is_rset_in_use()211   static bool is_rset_in_use() { return rset_state_ == IN_USE; }
set_rset_state(RSetState state)212   static void set_rset_state(RSetState state) { rset_state_ = state; }
213 #endif
214 
215   // Page size in bytes.  This must be a multiple of the OS page size.
216   static const int kPageSize = 1 << kPageSizeBits;
217 
218   // Page size mask.
219   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
220 
221   // The offset of the remembered set in a page, in addition to the empty bytes
222   // formed as the remembered bits of the remembered set itself.
223 #ifdef V8_TARGET_ARCH_X64
224   static const int kRSetOffset = 4 * kPointerSize;  // Room for four pointers.
225 #else
226   static const int kRSetOffset = 0;
227 #endif
228   // The end offset of the remembered set in a page
229   // (heaps are aligned to pointer size).
230   static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
231 
232   // The start offset of the object area in a page.
233   // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
234   // to align start of rset to a uint32_t address.
235   static const int kObjectStartOffset = 256;
236 
237   // The start offset of the used part of the remembered set in a page.
238   static const int kRSetStartOffset = kRSetOffset +
239       kObjectStartOffset / kBitsPerPointer;
240 
241   // Object area size in bytes.
242   static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
243 
244   // Maximum object size that fits in a page.
245   static const int kMaxHeapObjectSize = kObjectAreaSize;
246 
247   //---------------------------------------------------------------------------
248   // Page header description.
249   //
250   // If a page is not in the large object space, the first word,
251   // opaque_header, encodes the next page address (aligned to kPageSize 8K)
252   // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
253   // opaque_header. The value range of the opaque_header is [0..kPageSize[,
254   // or [next_page_start, next_page_end[. It cannot point to a valid address
255   // in the current page.  If a page is in the large object space, the first
256   // word *may* (if the page start and large object chunk start are the
257   // same) contain the address of the next large object chunk.
258   intptr_t opaque_header;
259 
260   // If the page is not in the large object space, the low-order bit of the
261   // second word is set. If the page is in the large object space, the
262   // second word *may* (if the page start and large object chunk start are
263   // the same) contain the large object chunk size.  In either case, the
264   // low-order bit for large object pages will be cleared.
265   int is_normal_page;
266 
267   // The following fields may overlap with remembered set, they can only
268   // be used in the mark-compact collector when remembered set is not
269   // used.
270 
271   // The index of the page in its owner space.
272   int mc_page_index;
273 
274   // The allocation pointer after relocating objects to this page.
275   Address mc_relocation_top;
276 
277   // The forwarding address of the first live object in this page.
278   Address mc_first_forwarded;
279 
280 #ifdef DEBUG
281  private:
282   static RSetState rset_state_;  // state of the remembered set
283 #endif
284 };
285 
286 
287 // ----------------------------------------------------------------------------
288 // Space is the abstract superclass for all allocation spaces.
289 class Space : public Malloced {
290  public:
Space(AllocationSpace id,Executability executable)291   Space(AllocationSpace id, Executability executable)
292       : id_(id), executable_(executable) {}
293 
~Space()294   virtual ~Space() {}
295 
296   // Does the space need executable memory?
executable()297   Executability executable() { return executable_; }
298 
299   // Identity used in error reporting.
identity()300   AllocationSpace identity() { return id_; }
301 
302   virtual int Size() = 0;
303 
304 #ifdef DEBUG
305   virtual void Print() = 0;
306 #endif
307 
308   // After calling this we can allocate a certain number of bytes using only
309   // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
310   // without using freelists or causing a GC.  This is used by partial
311   // snapshots.  It returns true of space was reserved or false if a GC is
312   // needed.  For paged spaces the space requested must include the space wasted
313   // at the end of each when allocating linearly.
314   virtual bool ReserveSpace(int bytes) = 0;
315 
316  private:
317   AllocationSpace id_;
318   Executability executable_;
319 };
320 
321 
322 // ----------------------------------------------------------------------------
323 // All heap objects containing executable code (code objects) must be allocated
324 // from a 2 GB range of memory, so that they can call each other using 32-bit
325 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
326 // displacements cover the entire 4GB virtual address space.  On 64-bit
327 // platforms, we support this using the CodeRange object, which reserves and
328 // manages a range of virtual memory.
329 class CodeRange : public AllStatic {
330  public:
331   // Reserves a range of virtual memory, but does not commit any of it.
332   // Can only be called once, at heap initialization time.
333   // Returns false on failure.
334   static bool Setup(const size_t requested_size);
335 
336   // Frees the range of virtual memory, and frees the data structures used to
337   // manage it.
338   static void TearDown();
339 
exists()340   static bool exists() { return code_range_ != NULL; }
contains(Address address)341   static bool contains(Address address) {
342     if (code_range_ == NULL) return false;
343     Address start = static_cast<Address>(code_range_->address());
344     return start <= address && address < start + code_range_->size();
345   }
346 
347   // Allocates a chunk of memory from the large-object portion of
348   // the code range.  On platforms with no separate code range, should
349   // not be called.
350   static void* AllocateRawMemory(const size_t requested, size_t* allocated);
351   static void FreeRawMemory(void* buf, size_t length);
352 
353  private:
354   // The reserved range of virtual memory that all code objects are put in.
355   static VirtualMemory* code_range_;
356   // Plain old data class, just a struct plus a constructor.
357   class FreeBlock {
358    public:
FreeBlock(Address start_arg,size_t size_arg)359     FreeBlock(Address start_arg, size_t size_arg)
360         : start(start_arg), size(size_arg) {}
FreeBlock(void * start_arg,size_t size_arg)361     FreeBlock(void* start_arg, size_t size_arg)
362         : start(static_cast<Address>(start_arg)), size(size_arg) {}
363 
364     Address start;
365     size_t size;
366   };
367 
368   // Freed blocks of memory are added to the free list.  When the allocation
369   // list is exhausted, the free list is sorted and merged to make the new
370   // allocation list.
371   static List<FreeBlock> free_list_;
372   // Memory is allocated from the free blocks on the allocation list.
373   // The block at current_allocation_block_index_ is the current block.
374   static List<FreeBlock> allocation_list_;
375   static int current_allocation_block_index_;
376 
377   // Finds a block on the allocation list that contains at least the
378   // requested amount of memory.  If none is found, sorts and merges
379   // the existing free memory blocks, and searches again.
380   // If none can be found, terminates V8 with FatalProcessOutOfMemory.
381   static void GetNextAllocationBlock(size_t requested);
382   // Compares the start addresses of two free blocks.
383   static int CompareFreeBlockAddress(const FreeBlock* left,
384                                      const FreeBlock* right);
385 };
386 
387 
388 // ----------------------------------------------------------------------------
389 // A space acquires chunks of memory from the operating system. The memory
390 // allocator manages chunks for the paged heap spaces (old space and map
391 // space).  A paged chunk consists of pages. Pages in a chunk have contiguous
392 // addresses and are linked as a list.
393 //
394 // The allocator keeps an initial chunk which is used for the new space.  The
395 // leftover regions of the initial chunk are used for the initial chunks of
396 // old space and map space if they are big enough to hold at least one page.
397 // The allocator assumes that there is one old space and one map space, each
398 // expands the space by allocating kPagesPerChunk pages except the last
399 // expansion (before running out of space).  The first chunk may contain fewer
400 // than kPagesPerChunk pages as well.
401 //
402 // The memory allocator also allocates chunks for the large object space, but
403 // they are managed by the space itself.  The new space does not expand.
404 
405 class MemoryAllocator : public AllStatic {
406  public:
407   // Initializes its internal bookkeeping structures.
408   // Max capacity of the total space.
409   static bool Setup(int max_capacity);
410 
411   // Deletes valid chunks.
412   static void TearDown();
413 
414   // Reserves an initial address range of virtual memory to be split between
415   // the two new space semispaces, the old space, and the map space.  The
416   // memory is not yet committed or assigned to spaces and split into pages.
417   // The initial chunk is unmapped when the memory allocator is torn down.
418   // This function should only be called when there is not already a reserved
419   // initial chunk (initial_chunk_ should be NULL).  It returns the start
420   // address of the initial chunk if successful, with the side effect of
421   // setting the initial chunk, or else NULL if unsuccessful and leaves the
422   // initial chunk NULL.
423   static void* ReserveInitialChunk(const size_t requested);
424 
425   // Commits pages from an as-yet-unmanaged block of virtual memory into a
426   // paged space.  The block should be part of the initial chunk reserved via
427   // a call to ReserveInitialChunk.  The number of pages is always returned in
428   // the output parameter num_pages.  This function assumes that the start
429   // address is non-null and that it is big enough to hold at least one
430   // page-aligned page.  The call always succeeds, and num_pages is always
431   // greater than zero.
432   static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
433                            int* num_pages);
434 
435   // Commit a contiguous block of memory from the initial chunk.  Assumes that
436   // the address is not NULL, the size is greater than zero, and that the
437   // block is contained in the initial chunk.  Returns true if it succeeded
438   // and false otherwise.
439   static bool CommitBlock(Address start, size_t size, Executability executable);
440 
441   // Uncommit a contiguous block of memory [start..(start+size)[.
442   // start is not NULL, the size is greater than zero, and the
443   // block is contained in the initial chunk.  Returns true if it succeeded
444   // and false otherwise.
445   static bool UncommitBlock(Address start, size_t size);
446 
447   // Zaps a contiguous block of memory [start..(start+size)[ thus
448   // filling it up with a recognizable non-NULL bit pattern.
449   static void ZapBlock(Address start, size_t size);
450 
451   // Attempts to allocate the requested (non-zero) number of pages from the
452   // OS.  Fewer pages might be allocated than requested. If it fails to
453   // allocate memory for the OS or cannot allocate a single page, this
454   // function returns an invalid page pointer (NULL). The caller must check
455   // whether the returned page is valid (by calling Page::is_valid()).  It is
456   // guaranteed that allocated pages have contiguous addresses.  The actual
457   // number of allocated pages is returned in the output parameter
458   // allocated_pages.  If the PagedSpace owner is executable and there is
459   // a code range, the pages are allocated from the code range.
460   static Page* AllocatePages(int requested_pages, int* allocated_pages,
461                              PagedSpace* owner);
462 
463   // Frees pages from a given page and after. If 'p' is the first page
464   // of a chunk, pages from 'p' are freed and this function returns an
465   // invalid page pointer. Otherwise, the function searches a page
466   // after 'p' that is the first page of a chunk. Pages after the
467   // found page are freed and the function returns 'p'.
468   static Page* FreePages(Page* p);
469 
470   // Allocates and frees raw memory of certain size.
471   // These are just thin wrappers around OS::Allocate and OS::Free,
472   // but keep track of allocated bytes as part of heap.
473   // If the flag is EXECUTABLE and a code range exists, the requested
474   // memory is allocated from the code range.  If a code range exists
475   // and the freed memory is in it, the code range manages the freed memory.
476   static void* AllocateRawMemory(const size_t requested,
477                                  size_t* allocated,
478                                  Executability executable);
479   static void FreeRawMemory(void* buf, size_t length);
480 
481   // Returns the maximum available bytes of heaps.
Available()482   static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
483 
484   // Returns allocated spaces in bytes.
Size()485   static int Size() { return size_; }
486 
487   // Returns maximum available bytes that the old space can have.
MaxAvailable()488   static int MaxAvailable() {
489     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
490   }
491 
492   // Links two pages.
493   static inline void SetNextPage(Page* prev, Page* next);
494 
495   // Returns the next page of a given page.
496   static inline Page* GetNextPage(Page* p);
497 
498   // Checks whether a page belongs to a space.
499   static inline bool IsPageInSpace(Page* p, PagedSpace* space);
500 
501   // Returns the space that owns the given page.
502   static inline PagedSpace* PageOwner(Page* page);
503 
504   // Finds the first/last page in the same chunk as a given page.
505   static Page* FindFirstPageInSameChunk(Page* p);
506   static Page* FindLastPageInSameChunk(Page* p);
507 
508 #ifdef ENABLE_HEAP_PROTECTION
509   // Protect/unprotect a block of memory by marking it read-only/writable.
510   static inline void Protect(Address start, size_t size);
511   static inline void Unprotect(Address start, size_t size,
512                                Executability executable);
513 
514   // Protect/unprotect a chunk given a page in the chunk.
515   static inline void ProtectChunkFromPage(Page* page);
516   static inline void UnprotectChunkFromPage(Page* page);
517 #endif
518 
519 #ifdef DEBUG
520   // Reports statistic info of the space.
521   static void ReportStatistics();
522 #endif
523 
524   // Due to encoding limitation, we can only have 8K chunks.
525   static const int kMaxNofChunks = 1 << kPageSizeBits;
526   // If a chunk has at least 16 pages, the maximum heap size is about
527   // 8K * 8K * 16 = 1G bytes.
528 #ifdef V8_TARGET_ARCH_X64
529   static const int kPagesPerChunk = 32;
530 #else
531   static const int kPagesPerChunk = 16;
532 #endif
533   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
534 
535  private:
536   // Maximum space size in bytes.
537   static int capacity_;
538 
539   // Allocated space size in bytes.
540   static int size_;
541 
542   // The initial chunk of virtual memory.
543   static VirtualMemory* initial_chunk_;
544 
545   // Allocated chunk info: chunk start address, chunk size, and owning space.
546   class ChunkInfo BASE_EMBEDDED {
547    public:
ChunkInfo()548     ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
init(Address a,size_t s,PagedSpace * o)549     void init(Address a, size_t s, PagedSpace* o) {
550       address_ = a;
551       size_ = s;
552       owner_ = o;
553     }
address()554     Address address() { return address_; }
size()555     size_t size() { return size_; }
owner()556     PagedSpace* owner() { return owner_; }
557 
558    private:
559     Address address_;
560     size_t size_;
561     PagedSpace* owner_;
562   };
563 
564   // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
565   static List<ChunkInfo> chunks_;
566   static List<int> free_chunk_ids_;
567   static int max_nof_chunks_;
568   static int top_;
569 
570   // Push/pop a free chunk id onto/from the stack.
571   static void Push(int free_chunk_id);
572   static int Pop();
OutOfChunkIds()573   static bool OutOfChunkIds() { return top_ == 0; }
574 
575   // Frees a chunk.
576   static void DeleteChunk(int chunk_id);
577 
578   // Basic check whether a chunk id is in the valid range.
579   static inline bool IsValidChunkId(int chunk_id);
580 
581   // Checks whether a chunk id identifies an allocated chunk.
582   static inline bool IsValidChunk(int chunk_id);
583 
584   // Returns the chunk id that a page belongs to.
585   static inline int GetChunkId(Page* p);
586 
587   // True if the address lies in the initial chunk.
588   static inline bool InInitialChunk(Address address);
589 
590   // Initializes pages in a chunk. Returns the first page address.
591   // This function and GetChunkId() are provided for the mark-compact
592   // collector to rebuild page headers in the from space, which is
593   // used as a marking stack and its page headers are destroyed.
594   static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
595                                       PagedSpace* owner);
596 };
597 
598 
599 // -----------------------------------------------------------------------------
600 // Interface for heap object iterator to be implemented by all object space
601 // object iterators.
602 //
603 // NOTE: The space specific object iterators also implements the own next()
604 //       method which is used to avoid using virtual functions
605 //       iterating a specific space.
606 
607 class ObjectIterator : public Malloced {
608  public:
~ObjectIterator()609   virtual ~ObjectIterator() { }
610 
611   virtual HeapObject* next_object() = 0;
612 };
613 
614 
615 // -----------------------------------------------------------------------------
616 // Heap object iterator in new/old/map spaces.
617 //
618 // A HeapObjectIterator iterates objects from a given address to the
619 // top of a space. The given address must be below the current
620 // allocation pointer (space top). There are some caveats.
621 //
622 // (1) If the space top changes upward during iteration (because of
623 //     allocating new objects), the iterator does not iterate objects
624 //     above the original space top. The caller must create a new
625 //     iterator starting from the old top in order to visit these new
626 //     objects.
627 //
628 // (2) If new objects are allocated below the original allocation top
629 //     (e.g., free-list allocation in paged spaces), the new objects
630 //     may or may not be iterated depending on their position with
631 //     respect to the current point of iteration.
632 //
633 // (3) The space top should not change downward during iteration,
634 //     otherwise the iterator will return not-necessarily-valid
635 //     objects.
636 
637 class HeapObjectIterator: public ObjectIterator {
638  public:
639   // Creates a new object iterator in a given space. If a start
640   // address is not given, the iterator starts from the space bottom.
641   // If the size function is not given, the iterator calls the default
642   // Object::Size().
643   explicit HeapObjectIterator(PagedSpace* space);
644   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
645   HeapObjectIterator(PagedSpace* space, Address start);
646   HeapObjectIterator(PagedSpace* space,
647                      Address start,
648                      HeapObjectCallback size_func);
649 
next()650   inline HeapObject* next() {
651     return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
652   }
653 
654   // implementation of ObjectIterator.
next_object()655   virtual HeapObject* next_object() { return next(); }
656 
657  private:
658   Address cur_addr_;  // current iteration point
659   Address end_addr_;  // end iteration point
660   Address cur_limit_;  // current page limit
661   HeapObjectCallback size_func_;  // size function
662   Page* end_page_;  // caches the page of the end address
663 
FromCurrentPage()664   HeapObject* FromCurrentPage() {
665     ASSERT(cur_addr_ < cur_limit_);
666 
667     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
668     int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
669     ASSERT_OBJECT_SIZE(obj_size);
670 
671     cur_addr_ += obj_size;
672     ASSERT(cur_addr_ <= cur_limit_);
673 
674     return obj;
675   }
676 
677   // Slow path of next, goes into the next page.
678   HeapObject* FromNextPage();
679 
680   // Initializes fields.
681   void Initialize(Address start, Address end, HeapObjectCallback size_func);
682 
683 #ifdef DEBUG
684   // Verifies whether fields have valid values.
685   void Verify();
686 #endif
687 };
688 
689 
690 // -----------------------------------------------------------------------------
691 // A PageIterator iterates the pages in a paged space.
692 //
693 // The PageIterator class provides three modes for iterating pages in a space:
694 //   PAGES_IN_USE iterates pages containing allocated objects.
695 //   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
696 //                    mark-compact collection.
697 //   ALL_PAGES iterates all pages in the space.
698 //
699 // There are some caveats.
700 //
701 // (1) If the space expands during iteration, new pages will not be
702 //     returned by the iterator in any mode.
703 //
704 // (2) If new objects are allocated during iteration, they will appear
705 //     in pages returned by the iterator.  Allocation may cause the
706 //     allocation pointer or MC allocation pointer in the last page to
707 //     change between constructing the iterator and iterating the last
708 //     page.
709 //
710 // (3) The space should not shrink during iteration, otherwise the
711 //     iterator will return deallocated pages.
712 
713 class PageIterator BASE_EMBEDDED {
714  public:
715   enum Mode {
716     PAGES_IN_USE,
717     PAGES_USED_BY_MC,
718     ALL_PAGES
719   };
720 
721   PageIterator(PagedSpace* space, Mode mode);
722 
723   inline bool has_next();
724   inline Page* next();
725 
726  private:
727   PagedSpace* space_;
728   Page* prev_page_;  // Previous page returned.
729   Page* stop_page_;  // Page to stop at (last page returned by the iterator).
730 };
731 
732 
733 // -----------------------------------------------------------------------------
734 // A space has a list of pages. The next page can be accessed via
735 // Page::next_page() call. The next page of the last page is an
736 // invalid page pointer. A space can expand and shrink dynamically.
737 
738 // An abstraction of allocation and relocation pointers in a page-structured
739 // space.
740 class AllocationInfo {
741  public:
742   Address top;  // current allocation top
743   Address limit;  // current allocation limit
744 
745 #ifdef DEBUG
VerifyPagedAllocation()746   bool VerifyPagedAllocation() {
747     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
748         && (top <= limit);
749   }
750 #endif
751 };
752 
753 
754 // An abstraction of the accounting statistics of a page-structured space.
755 // The 'capacity' of a space is the number of object-area bytes (ie, not
756 // including page bookkeeping structures) currently in the space. The 'size'
757 // of a space is the number of allocated bytes, the 'waste' in the space is
758 // the number of bytes that are not allocated and not available to
759 // allocation without reorganizing the space via a GC (eg, small blocks due
760 // to internal fragmentation, top of page areas in map space), and the bytes
761 // 'available' is the number of unallocated bytes that are not waste.  The
762 // capacity is the sum of size, waste, and available.
763 //
764 // The stats are only set by functions that ensure they stay balanced. These
765 // functions increase or decrease one of the non-capacity stats in
766 // conjunction with capacity, or else they always balance increases and
767 // decreases to the non-capacity stats.
768 class AllocationStats BASE_EMBEDDED {
769  public:
AllocationStats()770   AllocationStats() { Clear(); }
771 
772   // Zero out all the allocation statistics (ie, no capacity).
Clear()773   void Clear() {
774     capacity_ = 0;
775     available_ = 0;
776     size_ = 0;
777     waste_ = 0;
778   }
779 
780   // Reset the allocation statistics (ie, available = capacity with no
781   // wasted or allocated bytes).
Reset()782   void Reset() {
783     available_ = capacity_;
784     size_ = 0;
785     waste_ = 0;
786   }
787 
788   // Accessors for the allocation statistics.
Capacity()789   int Capacity() { return capacity_; }
Available()790   int Available() { return available_; }
Size()791   int Size() { return size_; }
Waste()792   int Waste() { return waste_; }
793 
794   // Grow the space by adding available bytes.
ExpandSpace(int size_in_bytes)795   void ExpandSpace(int size_in_bytes) {
796     capacity_ += size_in_bytes;
797     available_ += size_in_bytes;
798   }
799 
800   // Shrink the space by removing available bytes.
ShrinkSpace(int size_in_bytes)801   void ShrinkSpace(int size_in_bytes) {
802     capacity_ -= size_in_bytes;
803     available_ -= size_in_bytes;
804   }
805 
806   // Allocate from available bytes (available -> size).
AllocateBytes(int size_in_bytes)807   void AllocateBytes(int size_in_bytes) {
808     available_ -= size_in_bytes;
809     size_ += size_in_bytes;
810   }
811 
812   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(int size_in_bytes)813   void DeallocateBytes(int size_in_bytes) {
814     size_ -= size_in_bytes;
815     available_ += size_in_bytes;
816   }
817 
818   // Waste free bytes (available -> waste).
WasteBytes(int size_in_bytes)819   void WasteBytes(int size_in_bytes) {
820     available_ -= size_in_bytes;
821     waste_ += size_in_bytes;
822   }
823 
824   // Consider the wasted bytes to be allocated, as they contain filler
825   // objects (waste -> size).
FillWastedBytes(int size_in_bytes)826   void FillWastedBytes(int size_in_bytes) {
827     waste_ -= size_in_bytes;
828     size_ += size_in_bytes;
829   }
830 
831  private:
832   int capacity_;
833   int available_;
834   int size_;
835   int waste_;
836 };
837 
838 
839 class PagedSpace : public Space {
840  public:
841   // Creates a space with a maximum capacity, and an id.
842   PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
843 
~PagedSpace()844   virtual ~PagedSpace() {}
845 
846   // Set up the space using the given address range of virtual memory (from
847   // the memory allocator's initial chunk) if possible.  If the block of
848   // addresses is not big enough to contain a single page-aligned page, a
849   // fresh chunk will be allocated.
850   bool Setup(Address start, size_t size);
851 
852   // Returns true if the space has been successfully set up and not
853   // subsequently torn down.
854   bool HasBeenSetup();
855 
856   // Cleans up the space, frees all pages in this space except those belonging
857   // to the initial chunk, uncommits addresses in the initial chunk.
858   void TearDown();
859 
860   // Checks whether an object/address is in this space.
861   inline bool Contains(Address a);
Contains(HeapObject * o)862   bool Contains(HeapObject* o) { return Contains(o->address()); }
863 
864   // Given an address occupied by a live object, return that object if it is
865   // in this space, or Failure::Exception() if it is not. The implementation
866   // iterates over objects in the page containing the address, the cost is
867   // linear in the number of objects in the page. It may be slow.
868   Object* FindObject(Address addr);
869 
870   // Checks whether page is currently in use by this space.
871   bool IsUsed(Page* page);
872 
873   // Clears remembered sets of pages in this space.
874   void ClearRSet();
875 
876   // Prepares for a mark-compact GC.
877   virtual void PrepareForMarkCompact(bool will_compact) = 0;
878 
879   virtual Address PageAllocationTop(Page* page) = 0;
880 
881   // Current capacity without growing (Size() + Available() + Waste()).
Capacity()882   int Capacity() { return accounting_stats_.Capacity(); }
883 
884   // Total amount of memory committed for this space.  For paged
885   // spaces this equals the capacity.
CommittedMemory()886   int CommittedMemory() { return Capacity(); }
887 
888   // Available bytes without growing.
Available()889   int Available() { return accounting_stats_.Available(); }
890 
891   // Allocated bytes in this space.
Size()892   virtual int Size() { return accounting_stats_.Size(); }
893 
894   // Wasted bytes due to fragmentation and not recoverable until the
895   // next GC of this space.
Waste()896   int Waste() { return accounting_stats_.Waste(); }
897 
898   // Returns the address of the first object in this space.
bottom()899   Address bottom() { return first_page_->ObjectAreaStart(); }
900 
901   // Returns the allocation pointer in this space.
top()902   Address top() { return allocation_info_.top; }
903 
904   // Allocate the requested number of bytes in the space if possible, return a
905   // failure object if not.
906   inline Object* AllocateRaw(int size_in_bytes);
907 
908   // Allocate the requested number of bytes for relocation during mark-compact
909   // collection.
910   inline Object* MCAllocateRaw(int size_in_bytes);
911 
912   virtual bool ReserveSpace(int bytes);
913 
914   // Used by ReserveSpace.
915   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
916 
917   // ---------------------------------------------------------------------------
918   // Mark-compact collection support functions
919 
920   // Set the relocation point to the beginning of the space.
921   void MCResetRelocationInfo();
922 
923   // Writes relocation info to the top page.
MCWriteRelocationInfoToPage()924   void MCWriteRelocationInfoToPage() {
925     TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
926   }
927 
928   // Computes the offset of a given address in this space to the beginning
929   // of the space.
930   int MCSpaceOffsetForAddress(Address addr);
931 
932   // Updates the allocation pointer to the relocation top after a mark-compact
933   // collection.
934   virtual void MCCommitRelocationInfo() = 0;
935 
936   // Releases half of unused pages.
937   void Shrink();
938 
939   // Ensures that the capacity is at least 'capacity'. Returns false on failure.
940   bool EnsureCapacity(int capacity);
941 
942 #ifdef ENABLE_HEAP_PROTECTION
943   // Protect/unprotect the space by marking it read-only/writable.
944   void Protect();
945   void Unprotect();
946 #endif
947 
948 #ifdef DEBUG
949   // Print meta info and objects in this space.
950   virtual void Print();
951 
952   // Verify integrity of this space.
953   virtual void Verify(ObjectVisitor* visitor);
954 
955   // Overridden by subclasses to verify space-specific object
956   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)957   virtual void VerifyObject(HeapObject* obj) {}
958 
959   // Report code object related statistics
960   void CollectCodeStatistics();
961   static void ReportCodeStatistics();
962   static void ResetCodeStatistics();
963 #endif
964 
965  protected:
966   // Maximum capacity of this space.
967   int max_capacity_;
968 
969   // Accounting information for this space.
970   AllocationStats accounting_stats_;
971 
972   // The first page in this space.
973   Page* first_page_;
974 
975   // The last page in this space.  Initially set in Setup, updated in
976   // Expand and Shrink.
977   Page* last_page_;
978 
979   // Normal allocation information.
980   AllocationInfo allocation_info_;
981 
982   // Relocation information during mark-compact collections.
983   AllocationInfo mc_forwarding_info_;
984 
985   // Bytes of each page that cannot be allocated.  Possibly non-zero
986   // for pages in spaces with only fixed-size objects.  Always zero
987   // for pages in spaces with variable sized objects (those pages are
988   // padded with free-list nodes).
989   int page_extra_;
990 
991   // Sets allocation pointer to a page bottom.
992   static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
993 
994   // Returns the top page specified by an allocation info structure.
TopPageOf(AllocationInfo alloc_info)995   static Page* TopPageOf(AllocationInfo alloc_info) {
996     return Page::FromAllocationTop(alloc_info.limit);
997   }
998 
CountPagesToTop()999   int CountPagesToTop() {
1000     Page* p = Page::FromAllocationTop(allocation_info_.top);
1001     PageIterator it(this, PageIterator::ALL_PAGES);
1002     int counter = 1;
1003     while (it.has_next()) {
1004       if (it.next() == p) return counter;
1005       counter++;
1006     }
1007     UNREACHABLE();
1008     return -1;
1009   }
1010 
1011   // Expands the space by allocating a fixed number of pages. Returns false if
1012   // it cannot allocate requested number of pages from OS. Newly allocated
1013   // pages are append to the last_page;
1014   bool Expand(Page* last_page);
1015 
1016   // Generic fast case allocation function that tries linear allocation in
1017   // the top page of 'alloc_info'.  Returns NULL on failure.
1018   inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1019                                       int size_in_bytes);
1020 
1021   // During normal allocation or deserialization, roll to the next page in
1022   // the space (there is assumed to be one) and allocate there.  This
1023   // function is space-dependent.
1024   virtual HeapObject* AllocateInNextPage(Page* current_page,
1025                                          int size_in_bytes) = 0;
1026 
1027   // Slow path of AllocateRaw.  This function is space-dependent.
1028   virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1029 
1030   // Slow path of MCAllocateRaw.
1031   HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1032 
1033 #ifdef DEBUG
1034   // Returns the number of total pages in this space.
1035   int CountTotalPages();
1036 
1037   void DoPrintRSet(const char* space_name);
1038 #endif
1039  private:
1040   // Returns the page of the allocation pointer.
AllocationTopPage()1041   Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1042 
1043   // Returns a pointer to the page of the relocation pointer.
MCRelocationTopPage()1044   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1045 
1046   friend class PageIterator;
1047 };
1048 
1049 
1050 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1051 class NumberAndSizeInfo BASE_EMBEDDED {
1052  public:
NumberAndSizeInfo()1053   NumberAndSizeInfo() : number_(0), bytes_(0) {}
1054 
number()1055   int number() const { return number_; }
increment_number(int num)1056   void increment_number(int num) { number_ += num; }
1057 
bytes()1058   int bytes() const { return bytes_; }
increment_bytes(int size)1059   void increment_bytes(int size) { bytes_ += size; }
1060 
clear()1061   void clear() {
1062     number_ = 0;
1063     bytes_ = 0;
1064   }
1065 
1066  private:
1067   int number_;
1068   int bytes_;
1069 };
1070 
1071 
1072 // HistogramInfo class for recording a single "bar" of a histogram.  This
1073 // class is used for collecting statistics to print to stdout (when compiled
1074 // with DEBUG) or to the log file (when compiled with
1075 // ENABLE_LOGGING_AND_PROFILING).
1076 class HistogramInfo: public NumberAndSizeInfo {
1077  public:
HistogramInfo()1078   HistogramInfo() : NumberAndSizeInfo() {}
1079 
name()1080   const char* name() { return name_; }
set_name(const char * name)1081   void set_name(const char* name) { name_ = name; }
1082 
1083  private:
1084   const char* name_;
1085 };
1086 #endif
1087 
1088 
1089 // -----------------------------------------------------------------------------
1090 // SemiSpace in young generation
1091 //
1092 // A semispace is a contiguous chunk of memory. The mark-compact collector
1093 // uses the memory in the from space as a marking stack when tracing live
1094 // objects.
1095 
1096 class SemiSpace : public Space {
1097  public:
1098   // Constructor.
SemiSpace()1099   SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1100     start_ = NULL;
1101     age_mark_ = NULL;
1102   }
1103 
1104   // Sets up the semispace using the given chunk.
1105   bool Setup(Address start, int initial_capacity, int maximum_capacity);
1106 
1107   // Tear down the space.  Heap memory was not allocated by the space, so it
1108   // is not deallocated here.
1109   void TearDown();
1110 
1111   // True if the space has been set up but not torn down.
HasBeenSetup()1112   bool HasBeenSetup() { return start_ != NULL; }
1113 
1114   // Grow the size of the semispace by committing extra virtual memory.
1115   // Assumes that the caller has checked that the semispace has not reached
1116   // its maximum capacity (and thus there is space available in the reserved
1117   // address range to grow).
1118   bool Grow();
1119 
1120   // Grow the semispace to the new capacity.  The new capacity
1121   // requested must be larger than the current capacity.
1122   bool GrowTo(int new_capacity);
1123 
1124   // Shrinks the semispace to the new capacity.  The new capacity
1125   // requested must be more than the amount of used memory in the
1126   // semispace and less than the current capacity.
1127   bool ShrinkTo(int new_capacity);
1128 
1129   // Returns the start address of the space.
low()1130   Address low() { return start_; }
1131   // Returns one past the end address of the space.
high()1132   Address high() { return low() + capacity_; }
1133 
1134   // Age mark accessors.
age_mark()1135   Address age_mark() { return age_mark_; }
set_age_mark(Address mark)1136   void set_age_mark(Address mark) { age_mark_ = mark; }
1137 
1138   // True if the address is in the address range of this semispace (not
1139   // necessarily below the allocation pointer).
Contains(Address a)1140   bool Contains(Address a) {
1141     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1142            == reinterpret_cast<uintptr_t>(start_);
1143   }
1144 
1145   // True if the object is a heap object in the address range of this
1146   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)1147   bool Contains(Object* o) {
1148     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1149   }
1150 
1151   // The offset of an address from the beginning of the space.
SpaceOffsetForAddress(Address addr)1152   int SpaceOffsetForAddress(Address addr) {
1153     return static_cast<int>(addr - low());
1154   }
1155 
1156   // If we don't have these here then SemiSpace will be abstract.  However
1157   // they should never be called.
Size()1158   virtual int Size() {
1159     UNREACHABLE();
1160     return 0;
1161   }
1162 
ReserveSpace(int bytes)1163   virtual bool ReserveSpace(int bytes) {
1164     UNREACHABLE();
1165     return false;
1166   }
1167 
is_committed()1168   bool is_committed() { return committed_; }
1169   bool Commit();
1170   bool Uncommit();
1171 
1172 #ifdef DEBUG
1173   virtual void Print();
1174   virtual void Verify();
1175 #endif
1176 
1177   // Returns the current capacity of the semi space.
Capacity()1178   int Capacity() { return capacity_; }
1179 
1180   // Returns the maximum capacity of the semi space.
MaximumCapacity()1181   int MaximumCapacity() { return maximum_capacity_; }
1182 
1183   // Returns the initial capacity of the semi space.
InitialCapacity()1184   int InitialCapacity() { return initial_capacity_; }
1185 
1186  private:
1187   // The current and maximum capacity of the space.
1188   int capacity_;
1189   int maximum_capacity_;
1190   int initial_capacity_;
1191 
1192   // The start address of the space.
1193   Address start_;
1194   // Used to govern object promotion during mark-compact collection.
1195   Address age_mark_;
1196 
1197   // Masks and comparison values to test for containment in this semispace.
1198   uintptr_t address_mask_;
1199   uintptr_t object_mask_;
1200   uintptr_t object_expected_;
1201 
1202   bool committed_;
1203 
1204  public:
1205   TRACK_MEMORY("SemiSpace")
1206 };
1207 
1208 
1209 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
1210 // semispace of the heap's new space.  It iterates over the objects in the
1211 // semispace from a given start address (defaulting to the bottom of the
1212 // semispace) to the top of the semispace.  New objects allocated after the
1213 // iterator is created are not iterated.
1214 class SemiSpaceIterator : public ObjectIterator {
1215  public:
1216   // Create an iterator over the objects in the given space.  If no start
1217   // address is given, the iterator starts from the bottom of the space.  If
1218   // no size function is given, the iterator calls Object::Size().
1219   explicit SemiSpaceIterator(NewSpace* space);
1220   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1221   SemiSpaceIterator(NewSpace* space, Address start);
1222 
next()1223   HeapObject* next() {
1224     if (current_ == limit_) return NULL;
1225 
1226     HeapObject* object = HeapObject::FromAddress(current_);
1227     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1228 
1229     current_ += size;
1230     return object;
1231   }
1232 
1233   // Implementation of the ObjectIterator functions.
next_object()1234   virtual HeapObject* next_object() { return next(); }
1235 
1236  private:
1237   void Initialize(NewSpace* space, Address start, Address end,
1238                   HeapObjectCallback size_func);
1239 
1240   // The semispace.
1241   SemiSpace* space_;
1242   // The current iteration point.
1243   Address current_;
1244   // The end of iteration.
1245   Address limit_;
1246   // The callback function.
1247   HeapObjectCallback size_func_;
1248 };
1249 
1250 
1251 // -----------------------------------------------------------------------------
1252 // The young generation space.
1253 //
1254 // The new space consists of a contiguous pair of semispaces.  It simply
1255 // forwards most functions to the appropriate semispace.
1256 
1257 class NewSpace : public Space {
1258  public:
1259   // Constructor.
NewSpace()1260   NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1261 
1262   // Sets up the new space using the given chunk.
1263   bool Setup(Address start, int size);
1264 
1265   // Tears down the space.  Heap memory was not allocated by the space, so it
1266   // is not deallocated here.
1267   void TearDown();
1268 
1269   // True if the space has been set up but not torn down.
HasBeenSetup()1270   bool HasBeenSetup() {
1271     return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1272   }
1273 
1274   // Flip the pair of spaces.
1275   void Flip();
1276 
1277   // Grow the capacity of the semispaces.  Assumes that they are not at
1278   // their maximum capacity.
1279   void Grow();
1280 
1281   // Shrink the capacity of the semispaces.
1282   void Shrink();
1283 
1284   // True if the address or object lies in the address range of either
1285   // semispace (not necessarily below the allocation pointer).
Contains(Address a)1286   bool Contains(Address a) {
1287     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1288         == reinterpret_cast<uintptr_t>(start_);
1289   }
Contains(Object * o)1290   bool Contains(Object* o) {
1291     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1292   }
1293 
1294   // Return the allocated bytes in the active semispace.
Size()1295   virtual int Size() { return static_cast<int>(top() - bottom()); }
1296 
1297   // Return the current capacity of a semispace.
Capacity()1298   int Capacity() {
1299     ASSERT(to_space_.Capacity() == from_space_.Capacity());
1300     return to_space_.Capacity();
1301   }
1302 
1303   // Return the total amount of memory committed for new space.
CommittedMemory()1304   int CommittedMemory() {
1305     if (from_space_.is_committed()) return 2 * Capacity();
1306     return Capacity();
1307   }
1308 
1309   // Return the available bytes without growing in the active semispace.
Available()1310   int Available() { return Capacity() - Size(); }
1311 
1312   // Return the maximum capacity of a semispace.
MaximumCapacity()1313   int MaximumCapacity() {
1314     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1315     return to_space_.MaximumCapacity();
1316   }
1317 
1318   // Returns the initial capacity of a semispace.
InitialCapacity()1319   int InitialCapacity() {
1320     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1321     return to_space_.InitialCapacity();
1322   }
1323 
1324   // Return the address of the allocation pointer in the active semispace.
top()1325   Address top() { return allocation_info_.top; }
1326   // Return the address of the first object in the active semispace.
bottom()1327   Address bottom() { return to_space_.low(); }
1328 
1329   // Get the age mark of the inactive semispace.
age_mark()1330   Address age_mark() { return from_space_.age_mark(); }
1331   // Set the age mark in the active semispace.
set_age_mark(Address mark)1332   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1333 
1334   // The start address of the space and a bit mask. Anding an address in the
1335   // new space with the mask will result in the start address.
start()1336   Address start() { return start_; }
mask()1337   uintptr_t mask() { return address_mask_; }
1338 
1339   // The allocation top and limit addresses.
allocation_top_address()1340   Address* allocation_top_address() { return &allocation_info_.top; }
allocation_limit_address()1341   Address* allocation_limit_address() { return &allocation_info_.limit; }
1342 
AllocateRaw(int size_in_bytes)1343   Object* AllocateRaw(int size_in_bytes) {
1344     return AllocateRawInternal(size_in_bytes, &allocation_info_);
1345   }
1346 
1347   // Allocate the requested number of bytes for relocation during mark-compact
1348   // collection.
MCAllocateRaw(int size_in_bytes)1349   Object* MCAllocateRaw(int size_in_bytes) {
1350     return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1351   }
1352 
1353   // Reset the allocation pointer to the beginning of the active semispace.
1354   void ResetAllocationInfo();
1355   // Reset the reloction pointer to the bottom of the inactive semispace in
1356   // preparation for mark-compact collection.
1357   void MCResetRelocationInfo();
1358   // Update the allocation pointer in the active semispace after a
1359   // mark-compact collection.
1360   void MCCommitRelocationInfo();
1361 
1362   // Get the extent of the inactive semispace (for use as a marking stack).
FromSpaceLow()1363   Address FromSpaceLow() { return from_space_.low(); }
FromSpaceHigh()1364   Address FromSpaceHigh() { return from_space_.high(); }
1365 
1366   // Get the extent of the active semispace (to sweep newly copied objects
1367   // during a scavenge collection).
ToSpaceLow()1368   Address ToSpaceLow() { return to_space_.low(); }
ToSpaceHigh()1369   Address ToSpaceHigh() { return to_space_.high(); }
1370 
1371   // Offsets from the beginning of the semispaces.
ToSpaceOffsetForAddress(Address a)1372   int ToSpaceOffsetForAddress(Address a) {
1373     return to_space_.SpaceOffsetForAddress(a);
1374   }
FromSpaceOffsetForAddress(Address a)1375   int FromSpaceOffsetForAddress(Address a) {
1376     return from_space_.SpaceOffsetForAddress(a);
1377   }
1378 
1379   // True if the object is a heap object in the address range of the
1380   // respective semispace (not necessarily below the allocation pointer of the
1381   // semispace).
ToSpaceContains(Object * o)1382   bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)1383   bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1384 
ToSpaceContains(Address a)1385   bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
FromSpaceContains(Address a)1386   bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1387 
1388   virtual bool ReserveSpace(int bytes);
1389 
1390 #ifdef ENABLE_HEAP_PROTECTION
1391   // Protect/unprotect the space by marking it read-only/writable.
1392   virtual void Protect();
1393   virtual void Unprotect();
1394 #endif
1395 
1396 #ifdef DEBUG
1397   // Verify the active semispace.
1398   virtual void Verify();
1399   // Print the active semispace.
Print()1400   virtual void Print() { to_space_.Print(); }
1401 #endif
1402 
1403 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1404   // Iterates the active semispace to collect statistics.
1405   void CollectStatistics();
1406   // Reports previously collected statistics of the active semispace.
1407   void ReportStatistics();
1408   // Clears previously collected statistics.
1409   void ClearHistograms();
1410 
1411   // Record the allocation or promotion of a heap object.  Note that we don't
1412   // record every single allocation, but only those that happen in the
1413   // to space during a scavenge GC.
1414   void RecordAllocation(HeapObject* obj);
1415   void RecordPromotion(HeapObject* obj);
1416 #endif
1417 
1418   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()1419   bool CommitFromSpaceIfNeeded() {
1420     if (from_space_.is_committed()) return true;
1421     return from_space_.Commit();
1422   }
1423 
UncommitFromSpace()1424   bool UncommitFromSpace() {
1425     if (!from_space_.is_committed()) return true;
1426     return from_space_.Uncommit();
1427   }
1428 
1429  private:
1430   // The semispaces.
1431   SemiSpace to_space_;
1432   SemiSpace from_space_;
1433 
1434   // Start address and bit mask for containment testing.
1435   Address start_;
1436   uintptr_t address_mask_;
1437   uintptr_t object_mask_;
1438   uintptr_t object_expected_;
1439 
1440   // Allocation pointer and limit for normal allocation and allocation during
1441   // mark-compact collection.
1442   AllocationInfo allocation_info_;
1443   AllocationInfo mc_forwarding_info_;
1444 
1445 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1446   HistogramInfo* allocated_histogram_;
1447   HistogramInfo* promoted_histogram_;
1448 #endif
1449 
1450   // Implementation of AllocateRaw and MCAllocateRaw.
1451   inline Object* AllocateRawInternal(int size_in_bytes,
1452                                      AllocationInfo* alloc_info);
1453 
1454   friend class SemiSpaceIterator;
1455 
1456  public:
1457   TRACK_MEMORY("NewSpace")
1458 };
1459 
1460 
1461 // -----------------------------------------------------------------------------
1462 // Free lists for old object spaces
1463 //
1464 // Free-list nodes are free blocks in the heap.  They look like heap objects
1465 // (free-list node pointers have the heap object tag, and they have a map like
1466 // a heap object).  They have a size and a next pointer.  The next pointer is
1467 // the raw address of the next free list node (or NULL).
1468 class FreeListNode: public HeapObject {
1469  public:
1470   // Obtain a free-list node from a raw address.  This is not a cast because
1471   // it does not check nor require that the first word at the address is a map
1472   // pointer.
FromAddress(Address address)1473   static FreeListNode* FromAddress(Address address) {
1474     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1475   }
1476 
1477   static inline bool IsFreeListNode(HeapObject* object);
1478 
1479   // Set the size in bytes, which can be read with HeapObject::Size().  This
1480   // function also writes a map to the first word of the block so that it
1481   // looks like a heap object to the garbage collector and heap iteration
1482   // functions.
1483   void set_size(int size_in_bytes);
1484 
1485   // Accessors for the next field.
1486   inline Address next();
1487   inline void set_next(Address next);
1488 
1489  private:
1490   static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1491 
1492   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1493 };
1494 
1495 
1496 // The free list for the old space.
1497 class OldSpaceFreeList BASE_EMBEDDED {
1498  public:
1499   explicit OldSpaceFreeList(AllocationSpace owner);
1500 
1501   // Clear the free list.
1502   void Reset();
1503 
1504   // Return the number of bytes available on the free list.
available()1505   int available() { return available_; }
1506 
1507   // Place a node on the free list.  The block of size 'size_in_bytes'
1508   // starting at 'start' is placed on the free list.  The return value is the
1509   // number of bytes that have been lost due to internal fragmentation by
1510   // freeing the block.  Bookkeeping information will be written to the block,
1511   // ie, its contents will be destroyed.  The start address should be word
1512   // aligned, and the size should be a non-zero multiple of the word size.
1513   int Free(Address start, int size_in_bytes);
1514 
1515   // Allocate a block of size 'size_in_bytes' from the free list.  The block
1516   // is unitialized.  A failure is returned if no block is available.  The
1517   // number of bytes lost to fragmentation is returned in the output parameter
1518   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1519   Object* Allocate(int size_in_bytes, int* wasted_bytes);
1520 
1521  private:
1522   // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1523   // will always result in waste.)
1524   static const int kMinBlockSize = 2 * kPointerSize;
1525   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1526 
1527   // The identity of the owning space, for building allocation Failure
1528   // objects.
1529   AllocationSpace owner_;
1530 
1531   // Total available bytes in all blocks on this free list.
1532   int available_;
1533 
1534   // Blocks are put on exact free lists in an array, indexed by size in words.
1535   // The available sizes are kept in an increasingly ordered list. Entries
1536   // corresponding to sizes < kMinBlockSize always have an empty free list
1537   // (but index kHead is used for the head of the size list).
1538   struct SizeNode {
1539     // Address of the head FreeListNode of the implied block size or NULL.
1540     Address head_node_;
1541     // Size (words) of the next larger available size if head_node_ != NULL.
1542     int next_size_;
1543   };
1544   static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1545   SizeNode free_[kFreeListsLength];
1546 
1547   // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1548   static const int kHead = kMinBlockSize / kPointerSize - 1;
1549   static const int kEnd = kMaxInt;
1550 
1551   // We keep a "finger" in the size list to speed up a common pattern:
1552   // repeated requests for the same or increasing sizes.
1553   int finger_;
1554 
1555   // Starting from *prev, find and return the smallest size >= index (words),
1556   // or kEnd. Update *prev to be the largest size < index, or kHead.
FindSize(int index,int * prev)1557   int FindSize(int index, int* prev) {
1558     int cur = free_[*prev].next_size_;
1559     while (cur < index) {
1560       *prev = cur;
1561       cur = free_[cur].next_size_;
1562     }
1563     return cur;
1564   }
1565 
1566   // Remove an existing element from the size list.
RemoveSize(int index)1567   void RemoveSize(int index) {
1568     int prev = kHead;
1569     int cur = FindSize(index, &prev);
1570     ASSERT(cur == index);
1571     free_[prev].next_size_ = free_[cur].next_size_;
1572     finger_ = prev;
1573   }
1574 
1575   // Insert a new element into the size list.
InsertSize(int index)1576   void InsertSize(int index) {
1577     int prev = kHead;
1578     int cur = FindSize(index, &prev);
1579     ASSERT(cur != index);
1580     free_[prev].next_size_ = index;
1581     free_[index].next_size_ = cur;
1582   }
1583 
1584   // The size list is not updated during a sequence of calls to Free, but is
1585   // rebuilt before the next allocation.
1586   void RebuildSizeList();
1587   bool needs_rebuild_;
1588 
1589 #ifdef DEBUG
1590   // Does this free list contain a free block located at the address of 'node'?
1591   bool Contains(FreeListNode* node);
1592 #endif
1593 
1594   DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1595 };
1596 
1597 
1598 // The free list for the map space.
1599 class FixedSizeFreeList BASE_EMBEDDED {
1600  public:
1601   FixedSizeFreeList(AllocationSpace owner, int object_size);
1602 
1603   // Clear the free list.
1604   void Reset();
1605 
1606   // Return the number of bytes available on the free list.
available()1607   int available() { return available_; }
1608 
1609   // Place a node on the free list.  The block starting at 'start' (assumed to
1610   // have size object_size_) is placed on the free list.  Bookkeeping
1611   // information will be written to the block, ie, its contents will be
1612   // destroyed.  The start address should be word aligned.
1613   void Free(Address start);
1614 
1615   // Allocate a fixed sized block from the free list.  The block is unitialized.
1616   // A failure is returned if no block is available.
1617   Object* Allocate();
1618 
1619  private:
1620   // Available bytes on the free list.
1621   int available_;
1622 
1623   // The head of the free list.
1624   Address head_;
1625 
1626   // The identity of the owning space, for building allocation Failure
1627   // objects.
1628   AllocationSpace owner_;
1629 
1630   // The size of the objects in this space.
1631   int object_size_;
1632 
1633   DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1634 };
1635 
1636 
1637 // -----------------------------------------------------------------------------
1638 // Old object space (excluding map objects)
1639 
1640 class OldSpace : public PagedSpace {
1641  public:
1642   // Creates an old space object with a given maximum capacity.
1643   // The constructor does not allocate pages from OS.
OldSpace(int max_capacity,AllocationSpace id,Executability executable)1644   explicit OldSpace(int max_capacity,
1645                     AllocationSpace id,
1646                     Executability executable)
1647       : PagedSpace(max_capacity, id, executable), free_list_(id) {
1648     page_extra_ = 0;
1649   }
1650 
1651   // The bytes available on the free list (ie, not above the linear allocation
1652   // pointer).
AvailableFree()1653   int AvailableFree() { return free_list_.available(); }
1654 
1655   // The top of allocation in a page in this space. Undefined if page is unused.
PageAllocationTop(Page * page)1656   virtual Address PageAllocationTop(Page* page) {
1657     return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1658   }
1659 
1660   // Give a block of memory to the space's free list.  It might be added to
1661   // the free list or accounted as waste.
Free(Address start,int size_in_bytes)1662   void Free(Address start, int size_in_bytes) {
1663     int wasted_bytes = free_list_.Free(start, size_in_bytes);
1664     accounting_stats_.DeallocateBytes(size_in_bytes);
1665     accounting_stats_.WasteBytes(wasted_bytes);
1666   }
1667 
1668   // Prepare for full garbage collection.  Resets the relocation pointer and
1669   // clears the free list.
1670   virtual void PrepareForMarkCompact(bool will_compact);
1671 
1672   // Updates the allocation pointer to the relocation top after a mark-compact
1673   // collection.
1674   virtual void MCCommitRelocationInfo();
1675 
1676   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1677 
1678 #ifdef DEBUG
1679   // Reports statistics for the space
1680   void ReportStatistics();
1681   // Dump the remembered sets in the space to stdout.
1682   void PrintRSet();
1683 #endif
1684 
1685  protected:
1686   // Virtual function in the superclass.  Slow path of AllocateRaw.
1687   HeapObject* SlowAllocateRaw(int size_in_bytes);
1688 
1689   // Virtual function in the superclass.  Allocate linearly at the start of
1690   // the page after current_page (there is assumed to be one).
1691   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1692 
1693  private:
1694   // The space's free list.
1695   OldSpaceFreeList free_list_;
1696 
1697  public:
1698   TRACK_MEMORY("OldSpace")
1699 };
1700 
1701 
1702 // -----------------------------------------------------------------------------
1703 // Old space for objects of a fixed size
1704 
1705 class FixedSpace : public PagedSpace {
1706  public:
FixedSpace(int max_capacity,AllocationSpace id,int object_size_in_bytes,const char * name)1707   FixedSpace(int max_capacity,
1708              AllocationSpace id,
1709              int object_size_in_bytes,
1710              const char* name)
1711       : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1712         object_size_in_bytes_(object_size_in_bytes),
1713         name_(name),
1714         free_list_(id, object_size_in_bytes) {
1715     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1716   }
1717 
1718   // The top of allocation in a page in this space. Undefined if page is unused.
PageAllocationTop(Page * page)1719   virtual Address PageAllocationTop(Page* page) {
1720     return page == TopPageOf(allocation_info_) ? top()
1721         : page->ObjectAreaEnd() - page_extra_;
1722   }
1723 
object_size_in_bytes()1724   int object_size_in_bytes() { return object_size_in_bytes_; }
1725 
1726   // Give a fixed sized block of memory to the space's free list.
Free(Address start)1727   void Free(Address start) {
1728     free_list_.Free(start);
1729     accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1730   }
1731 
1732   // Prepares for a mark-compact GC.
1733   virtual void PrepareForMarkCompact(bool will_compact);
1734 
1735   // Updates the allocation pointer to the relocation top after a mark-compact
1736   // collection.
1737   virtual void MCCommitRelocationInfo();
1738 
1739   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1740 
1741 #ifdef DEBUG
1742   // Reports statistic info of the space
1743   void ReportStatistics();
1744 
1745   // Dump the remembered sets in the space to stdout.
1746   void PrintRSet();
1747 #endif
1748 
1749  protected:
1750   // Virtual function in the superclass.  Slow path of AllocateRaw.
1751   HeapObject* SlowAllocateRaw(int size_in_bytes);
1752 
1753   // Virtual function in the superclass.  Allocate linearly at the start of
1754   // the page after current_page (there is assumed to be one).
1755   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1756 
ResetFreeList()1757   void ResetFreeList() {
1758     free_list_.Reset();
1759   }
1760 
1761  private:
1762   // The size of objects in this space.
1763   int object_size_in_bytes_;
1764 
1765   // The name of this space.
1766   const char* name_;
1767 
1768   // The space's free list.
1769   FixedSizeFreeList free_list_;
1770 };
1771 
1772 
1773 // -----------------------------------------------------------------------------
1774 // Old space for all map objects
1775 
1776 class MapSpace : public FixedSpace {
1777  public:
1778   // Creates a map space object with a maximum capacity.
MapSpace(int max_capacity,int max_map_space_pages,AllocationSpace id)1779   MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1780       : FixedSpace(max_capacity, id, Map::kSize, "map"),
1781         max_map_space_pages_(max_map_space_pages) {
1782     ASSERT(max_map_space_pages < kMaxMapPageIndex);
1783   }
1784 
1785   // Prepares for a mark-compact GC.
1786   virtual void PrepareForMarkCompact(bool will_compact);
1787 
1788   // Given an index, returns the page address.
PageAddress(int page_index)1789   Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1790 
1791   static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
1792 
1793   // Are map pointers encodable into map word?
MapPointersEncodable()1794   bool MapPointersEncodable() {
1795     if (!FLAG_use_big_map_space) {
1796       ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
1797       return true;
1798     }
1799     return CountPagesToTop() <= max_map_space_pages_;
1800   }
1801 
1802   // Should be called after forced sweep to find out if map space needs
1803   // compaction.
NeedsCompaction(int live_maps)1804   bool NeedsCompaction(int live_maps) {
1805     return !MapPointersEncodable() && live_maps <= CompactionThreshold();
1806   }
1807 
TopAfterCompaction(int live_maps)1808   Address TopAfterCompaction(int live_maps) {
1809     ASSERT(NeedsCompaction(live_maps));
1810 
1811     int pages_left = live_maps / kMapsPerPage;
1812     PageIterator it(this, PageIterator::ALL_PAGES);
1813     while (pages_left-- > 0) {
1814       ASSERT(it.has_next());
1815       it.next()->ClearRSet();
1816     }
1817     ASSERT(it.has_next());
1818     Page* top_page = it.next();
1819     top_page->ClearRSet();
1820     ASSERT(top_page->is_valid());
1821 
1822     int offset = live_maps % kMapsPerPage * Map::kSize;
1823     Address top = top_page->ObjectAreaStart() + offset;
1824     ASSERT(top < top_page->ObjectAreaEnd());
1825     ASSERT(Contains(top));
1826 
1827     return top;
1828   }
1829 
FinishCompaction(Address new_top,int live_maps)1830   void FinishCompaction(Address new_top, int live_maps) {
1831     Page* top_page = Page::FromAddress(new_top);
1832     ASSERT(top_page->is_valid());
1833 
1834     SetAllocationInfo(&allocation_info_, top_page);
1835     allocation_info_.top = new_top;
1836 
1837     int new_size = live_maps * Map::kSize;
1838     accounting_stats_.DeallocateBytes(accounting_stats_.Size());
1839     accounting_stats_.AllocateBytes(new_size);
1840 
1841 #ifdef DEBUG
1842     if (FLAG_enable_slow_asserts) {
1843       intptr_t actual_size = 0;
1844       for (Page* p = first_page_; p != top_page; p = p->next_page())
1845         actual_size += kMapsPerPage * Map::kSize;
1846       actual_size += (new_top - top_page->ObjectAreaStart());
1847       ASSERT(accounting_stats_.Size() == actual_size);
1848     }
1849 #endif
1850 
1851     Shrink();
1852     ResetFreeList();
1853   }
1854 
1855  protected:
1856 #ifdef DEBUG
1857   virtual void VerifyObject(HeapObject* obj);
1858 #endif
1859 
1860  private:
1861   static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
1862 
1863   // Do map space compaction if there is a page gap.
CompactionThreshold()1864   int CompactionThreshold() {
1865     return kMapsPerPage * (max_map_space_pages_ - 1);
1866   }
1867 
1868   const int max_map_space_pages_;
1869 
1870   // An array of page start address in a map space.
1871   Address page_addresses_[kMaxMapPageIndex];
1872 
1873  public:
1874   TRACK_MEMORY("MapSpace")
1875 };
1876 
1877 
1878 // -----------------------------------------------------------------------------
1879 // Old space for all global object property cell objects
1880 
1881 class CellSpace : public FixedSpace {
1882  public:
1883   // Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity,AllocationSpace id)1884   CellSpace(int max_capacity, AllocationSpace id)
1885       : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1886 
1887  protected:
1888 #ifdef DEBUG
1889   virtual void VerifyObject(HeapObject* obj);
1890 #endif
1891 
1892  public:
1893   TRACK_MEMORY("CellSpace")
1894 };
1895 
1896 
1897 // -----------------------------------------------------------------------------
1898 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1899 // the large object space. A large object is allocated from OS heap with
1900 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1901 // A large object always starts at Page::kObjectStartOffset to a page.
1902 // Large objects do not move during garbage collections.
1903 
1904 // A LargeObjectChunk holds exactly one large object page with exactly one
1905 // large object.
1906 class LargeObjectChunk {
1907  public:
1908   // Allocates a new LargeObjectChunk that contains a large object page
1909   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1910   // object and possibly extra remembered set words) bytes after the object
1911   // area start of that page. The allocated chunk size is set in the output
1912   // parameter chunk_size.
1913   static LargeObjectChunk* New(int size_in_bytes,
1914                                size_t* chunk_size,
1915                                Executability executable);
1916 
1917   // Interpret a raw address as a large object chunk.
FromAddress(Address address)1918   static LargeObjectChunk* FromAddress(Address address) {
1919     return reinterpret_cast<LargeObjectChunk*>(address);
1920   }
1921 
1922   // Returns the address of this chunk.
address()1923   Address address() { return reinterpret_cast<Address>(this); }
1924 
1925   // Accessors for the fields of the chunk.
next()1926   LargeObjectChunk* next() { return next_; }
set_next(LargeObjectChunk * chunk)1927   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1928 
size()1929   size_t size() { return size_; }
set_size(size_t size_in_bytes)1930   void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1931 
1932   // Returns the object in this chunk.
1933   inline HeapObject* GetObject();
1934 
1935   // Given a requested size (including any extra remembered set words),
1936   // returns the physical size of a chunk to be allocated.
1937   static int ChunkSizeFor(int size_in_bytes);
1938 
1939   // Given a chunk size, returns the object size it can accommodate (not
1940   // including any extra remembered set words).  Used by
1941   // LargeObjectSpace::Available.  Note that this can overestimate the size
1942   // of object that will fit in a chunk---if the object requires extra
1943   // remembered set words (eg, for large fixed arrays), the actual object
1944   // size for the chunk will be smaller than reported by this function.
ObjectSizeFor(int chunk_size)1945   static int ObjectSizeFor(int chunk_size) {
1946     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1947     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1948   }
1949 
1950  private:
1951   // A pointer to the next large object chunk in the space or NULL.
1952   LargeObjectChunk* next_;
1953 
1954   // The size of this chunk.
1955   size_t size_;
1956 
1957  public:
1958   TRACK_MEMORY("LargeObjectChunk")
1959 };
1960 
1961 
1962 class LargeObjectSpace : public Space {
1963  public:
1964   explicit LargeObjectSpace(AllocationSpace id);
~LargeObjectSpace()1965   virtual ~LargeObjectSpace() {}
1966 
1967   // Initializes internal data structures.
1968   bool Setup();
1969 
1970   // Releases internal resources, frees objects in this space.
1971   void TearDown();
1972 
1973   // Allocates a (non-FixedArray, non-Code) large object.
1974   Object* AllocateRaw(int size_in_bytes);
1975   // Allocates a large Code object.
1976   Object* AllocateRawCode(int size_in_bytes);
1977   // Allocates a large FixedArray.
1978   Object* AllocateRawFixedArray(int size_in_bytes);
1979 
1980   // Available bytes for objects in this space, not including any extra
1981   // remembered set words.
Available()1982   int Available() {
1983     return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1984   }
1985 
Size()1986   virtual int Size() {
1987     return size_;
1988   }
1989 
PageCount()1990   int PageCount() {
1991     return page_count_;
1992   }
1993 
1994   // Finds an object for a given address, returns Failure::Exception()
1995   // if it is not found. The function iterates through all objects in this
1996   // space, may be slow.
1997   Object* FindObject(Address a);
1998 
1999   // Clears remembered sets.
2000   void ClearRSet();
2001 
2002   // Iterates objects whose remembered set bits are set.
2003   void IterateRSet(ObjectSlotCallback func);
2004 
2005   // Frees unmarked objects.
2006   void FreeUnmarkedObjects();
2007 
2008   // Checks whether a heap object is in this space; O(1).
2009   bool Contains(HeapObject* obj);
2010 
2011   // Checks whether the space is empty.
IsEmpty()2012   bool IsEmpty() { return first_chunk_ == NULL; }
2013 
2014   // See the comments for ReserveSpace in the Space class.  This has to be
2015   // called after ReserveSpace has been called on the paged spaces, since they
2016   // may use some memory, leaving less for large objects.
2017   virtual bool ReserveSpace(int bytes);
2018 
2019 #ifdef ENABLE_HEAP_PROTECTION
2020   // Protect/unprotect the space by marking it read-only/writable.
2021   void Protect();
2022   void Unprotect();
2023 #endif
2024 
2025 #ifdef DEBUG
2026   virtual void Verify();
2027   virtual void Print();
2028   void ReportStatistics();
2029   void CollectCodeStatistics();
2030   // Dump the remembered sets in the space to stdout.
2031   void PrintRSet();
2032 #endif
2033   // Checks whether an address is in the object area in this space.  It
2034   // iterates all objects in the space. May be slow.
SlowContains(Address addr)2035   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2036 
2037  private:
2038   // The head of the linked list of large object chunks.
2039   LargeObjectChunk* first_chunk_;
2040   int size_;  // allocated bytes
2041   int page_count_;  // number of chunks
2042 
2043 
2044   // Shared implementation of AllocateRaw, AllocateRawCode and
2045   // AllocateRawFixedArray.
2046   Object* AllocateRawInternal(int requested_size,
2047                               int object_size,
2048                               Executability executable);
2049 
2050   // Returns the number of extra bytes (rounded up to the nearest full word)
2051   // required for extra_object_bytes of extra pointers (in bytes).
2052   static inline int ExtraRSetBytesFor(int extra_object_bytes);
2053 
2054   friend class LargeObjectIterator;
2055 
2056  public:
2057   TRACK_MEMORY("LargeObjectSpace")
2058 };
2059 
2060 
2061 class LargeObjectIterator: public ObjectIterator {
2062  public:
2063   explicit LargeObjectIterator(LargeObjectSpace* space);
2064   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2065 
2066   HeapObject* next();
2067 
2068   // implementation of ObjectIterator.
next_object()2069   virtual HeapObject* next_object() { return next(); }
2070 
2071  private:
2072   LargeObjectChunk* current_;
2073   HeapObjectCallback size_func_;
2074 };
2075 
2076 
2077 } }  // namespace v8::internal
2078 
2079 #endif  // V8_SPACES_H_
2080