• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "list-inl.h"
32 #include "log.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // -----------------------------------------------------------------------------
38 // Heap structures:
39 //
40 // A JS heap consists of a young generation, an old generation, and a large
41 // object space. The young generation is divided into two semispaces. A
42 // scavenger implements Cheney's copying algorithm. The old generation is
43 // separated into a map space and an old object space. The map space contains
44 // all (and only) map objects, the rest of old objects go into the old space.
45 // The old generation is collected by a mark-sweep-compact collector.
46 //
47 // The semispaces of the young generation are contiguous.  The old and map
48 // spaces consists of a list of pages. A page has a page header, a remembered
49 // set area, and an object area. A page size is deliberately chosen as 8K
50 // bytes. The first word of a page is an opaque page header that has the
51 // address of the next page and its ownership information. The second word may
52 // have the allocation top address of this page. The next 248 bytes are
53 // remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54 // remembered set bit corresponds to a pointer in the object area.
55 //
56 // There is a separate large object space for objects larger than
57 // Page::kMaxHeapObjectSize, so that they do not have to move during
58 // collection.  The large object space is paged and uses the same remembered
59 // set implementation.  Pages in large object space may be larger than 8K.
60 //
61 // NOTE: The mark-compact collector rebuilds the remembered set after a
62 // collection. It reuses first a few words of the remembered set for
63 // bookkeeping relocation information.
64 
65 
66 // Some assertion macros used in the debugging mode.
67 
68 #define ASSERT_PAGE_ALIGNED(address)                  \
69   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70 
71 #define ASSERT_OBJECT_ALIGNED(address)                \
72   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73 
74 #define ASSERT_OBJECT_SIZE(size)                      \
75   ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
76 
77 #define ASSERT_PAGE_OFFSET(offset)                    \
78   ASSERT((Page::kObjectStartOffset <= offset)         \
79       && (offset <= Page::kPageSize))
80 
81 #define ASSERT_MAP_PAGE_INDEX(index)                            \
82   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
83 
84 
85 class PagedSpace;
86 class MemoryAllocator;
87 class AllocationInfo;
88 
89 // -----------------------------------------------------------------------------
90 // A page normally has 8K bytes. Large object pages may be larger.  A page
91 // address is always aligned to the 8K page size.  A page is divided into
92 // three areas: the first two words are used for bookkeeping, the next 248
93 // bytes are used as remembered set, and the rest of the page is the object
94 // area.
95 //
96 // Pointers are aligned to the pointer size (4), only 1 bit is needed
97 // for a pointer in the remembered set. Given an address, its remembered set
98 // bit position (offset from the start of the page) is calculated by dividing
99 // its page offset by 32. Therefore, the object area in a page starts at the
100 // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
101 // the first two words (64 bits) in a page can be used for other purposes.
102 //
103 // On the 64-bit platform, we add an offset to the start of the remembered set,
104 // and pointers are aligned to 8-byte pointer size. This means that we need
105 // only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
106 // For this reason we add an offset to get room for the Page data at the start.
107 //
108 // The mark-compact collector transforms a map pointer into a page index and a
109 // page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
110 // 8K) in total.  Because a map pointer is aligned to the pointer size (4
111 // bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
112 // page index + 11 for the offset in the page) are required to encode a map
113 // pointer.
114 //
115 // The only way to get a page pointer is by calling factory methods:
116 //   Page* p = Page::FromAddress(addr); or
117 //   Page* p = Page::FromAllocationTop(top);
118 class Page {
119  public:
120   // Returns the page containing a given address. The address ranges
121   // from [page_addr .. page_addr + kPageSize[
122   //
123   // Note that this function only works for addresses in normal paged
124   // spaces and addresses in the first 8K of large object pages (i.e.,
125   // the start of large objects but not necessarily derived pointers
126   // within them).
INLINE(static Page * FromAddress (Address a))127   INLINE(static Page* FromAddress(Address a)) {
128     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129   }
130 
131   // Returns the page containing an allocation top. Because an allocation
132   // top address can be the upper bound of the page, we need to subtract
133   // it with kPointerSize first. The address ranges from
134   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))135   INLINE(static Page* FromAllocationTop(Address top)) {
136     Page* p = FromAddress(top - kPointerSize);
137     ASSERT_PAGE_OFFSET(p->Offset(top));
138     return p;
139   }
140 
141   // Returns the start address of this page.
address()142   Address address() { return reinterpret_cast<Address>(this); }
143 
144   // Checks whether this is a valid page address.
is_valid()145   bool is_valid() { return address() != NULL; }
146 
147   // Returns the next page of this page.
148   inline Page* next_page();
149 
150   // Return the end of allocation in this page. Undefined for unused pages.
151   inline Address AllocationTop();
152 
153   // Returns the start address of the object area in this page.
ObjectAreaStart()154   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155 
156   // Returns the end address (exclusive) of the object area in this page.
ObjectAreaEnd()157   Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158 
159   // Returns the start address of the remembered set area.
RSetStart()160   Address RSetStart() { return address() + kRSetStartOffset; }
161 
162   // Returns the end address of the remembered set area (exclusive).
RSetEnd()163   Address RSetEnd() { return address() + kRSetEndOffset; }
164 
165   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)166   static bool IsAlignedToPageSize(Address a) {
167     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168   }
169 
170   // True if this page is a large object page.
IsLargeObjectPage()171   bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
172 
173   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))174   INLINE(int Offset(Address a)) {
175     int offset = a - address();
176     ASSERT_PAGE_OFFSET(offset);
177     return offset;
178   }
179 
180   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)181   Address OffsetToAddress(int offset) {
182     ASSERT_PAGE_OFFSET(offset);
183     return address() + offset;
184   }
185 
186   // ---------------------------------------------------------------------
187   // Remembered set support
188 
189   // Clears remembered set in this page.
190   inline void ClearRSet();
191 
192   // Return the address of the remembered set word corresponding to an
193   // object address/offset pair, and the bit encoded as a single-bit
194   // mask in the output parameter 'bitmask'.
195   INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
196                                                uint32_t* bitmask));
197 
198   // Sets the corresponding remembered set bit for a given address.
199   INLINE(static void SetRSet(Address address, int offset));
200 
201   // Clears the corresponding remembered set bit for a given address.
202   static inline void UnsetRSet(Address address, int offset);
203 
204   // Checks whether the remembered set bit for a given address is set.
205   static inline bool IsRSetSet(Address address, int offset);
206 
207 #ifdef DEBUG
208   // Use a state to mark whether remembered set space can be used for other
209   // purposes.
210   enum RSetState { IN_USE,  NOT_IN_USE };
is_rset_in_use()211   static bool is_rset_in_use() { return rset_state_ == IN_USE; }
set_rset_state(RSetState state)212   static void set_rset_state(RSetState state) { rset_state_ = state; }
213 #endif
214 
215   // 8K bytes per page.
216   static const int kPageSizeBits = 13;
217 
218   // Page size in bytes.  This must be a multiple of the OS page size.
219   static const int kPageSize = 1 << kPageSizeBits;
220 
221   // Page size mask.
222   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
223 
224   // The offset of the remembered set in a page, in addition to the empty bytes
225   // formed as the remembered bits of the remembered set itself.
226 #ifdef V8_TARGET_ARCH_X64
227   static const int kRSetOffset = 4 * kPointerSize;  // Room for four pointers.
228 #else
229   static const int kRSetOffset = 0;
230 #endif
231   // The end offset of the remembered set in a page
232   // (heaps are aligned to pointer size).
233   static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
234 
235   // The start offset of the object area in a page.
236   // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
237   // to align start of rset to a uint32_t address.
238   static const int kObjectStartOffset = 256;
239 
240   // The start offset of the used part of the remembered set in a page.
241   static const int kRSetStartOffset = kRSetOffset +
242       kObjectStartOffset / kBitsPerPointer;
243 
244   // Object area size in bytes.
245   static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
246 
247   // Maximum object size that fits in a page.
248   static const int kMaxHeapObjectSize = kObjectAreaSize;
249 
250   //---------------------------------------------------------------------------
251   // Page header description.
252   //
253   // If a page is not in the large object space, the first word,
254   // opaque_header, encodes the next page address (aligned to kPageSize 8K)
255   // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
256   // opaque_header. The value range of the opaque_header is [0..kPageSize[,
257   // or [next_page_start, next_page_end[. It cannot point to a valid address
258   // in the current page.  If a page is in the large object space, the first
259   // word *may* (if the page start and large object chunk start are the
260   // same) contain the address of the next large object chunk.
261   intptr_t opaque_header;
262 
263   // If the page is not in the large object space, the low-order bit of the
264   // second word is set. If the page is in the large object space, the
265   // second word *may* (if the page start and large object chunk start are
266   // the same) contain the large object chunk size.  In either case, the
267   // low-order bit for large object pages will be cleared.
268   int is_normal_page;
269 
270   // The following fields may overlap with remembered set, they can only
271   // be used in the mark-compact collector when remembered set is not
272   // used.
273 
274   // The index of the page in its owner space.
275   int mc_page_index;
276 
277   // The allocation pointer after relocating objects to this page.
278   Address mc_relocation_top;
279 
280   // The forwarding address of the first live object in this page.
281   Address mc_first_forwarded;
282 
283 #ifdef DEBUG
284  private:
285   static RSetState rset_state_;  // state of the remembered set
286 #endif
287 };
288 
289 
290 // ----------------------------------------------------------------------------
291 // Space is the abstract superclass for all allocation spaces.
292 class Space : public Malloced {
293  public:
Space(AllocationSpace id,Executability executable)294   Space(AllocationSpace id, Executability executable)
295       : id_(id), executable_(executable) {}
296 
~Space()297   virtual ~Space() {}
298 
299   // Does the space need executable memory?
executable()300   Executability executable() { return executable_; }
301 
302   // Identity used in error reporting.
identity()303   AllocationSpace identity() { return id_; }
304 
305   virtual int Size() = 0;
306 
307 #ifdef DEBUG
308   virtual void Print() = 0;
309 #endif
310 
311  private:
312   AllocationSpace id_;
313   Executability executable_;
314 };
315 
316 
317 // ----------------------------------------------------------------------------
318 // A space acquires chunks of memory from the operating system. The memory
319 // allocator manages chunks for the paged heap spaces (old space and map
320 // space).  A paged chunk consists of pages. Pages in a chunk have contiguous
321 // addresses and are linked as a list.
322 //
323 // The allocator keeps an initial chunk which is used for the new space.  The
324 // leftover regions of the initial chunk are used for the initial chunks of
325 // old space and map space if they are big enough to hold at least one page.
326 // The allocator assumes that there is one old space and one map space, each
327 // expands the space by allocating kPagesPerChunk pages except the last
328 // expansion (before running out of space).  The first chunk may contain fewer
329 // than kPagesPerChunk pages as well.
330 //
331 // The memory allocator also allocates chunks for the large object space, but
332 // they are managed by the space itself.  The new space does not expand.
333 
334 class MemoryAllocator : public AllStatic {
335  public:
336   // Initializes its internal bookkeeping structures.
337   // Max capacity of the total space.
338   static bool Setup(int max_capacity);
339 
340   // Deletes valid chunks.
341   static void TearDown();
342 
343   // Reserves an initial address range of virtual memory to be split between
344   // the two new space semispaces, the old space, and the map space.  The
345   // memory is not yet committed or assigned to spaces and split into pages.
346   // The initial chunk is unmapped when the memory allocator is torn down.
347   // This function should only be called when there is not already a reserved
348   // initial chunk (initial_chunk_ should be NULL).  It returns the start
349   // address of the initial chunk if successful, with the side effect of
350   // setting the initial chunk, or else NULL if unsuccessful and leaves the
351   // initial chunk NULL.
352   static void* ReserveInitialChunk(const size_t requested);
353 
354   // Commits pages from an as-yet-unmanaged block of virtual memory into a
355   // paged space.  The block should be part of the initial chunk reserved via
356   // a call to ReserveInitialChunk.  The number of pages is always returned in
357   // the output parameter num_pages.  This function assumes that the start
358   // address is non-null and that it is big enough to hold at least one
359   // page-aligned page.  The call always succeeds, and num_pages is always
360   // greater than zero.
361   static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
362                            int* num_pages);
363 
364   // Commit a contiguous block of memory from the initial chunk.  Assumes that
365   // the address is not NULL, the size is greater than zero, and that the
366   // block is contained in the initial chunk.  Returns true if it succeeded
367   // and false otherwise.
368   static bool CommitBlock(Address start, size_t size, Executability executable);
369 
370 
371   // Uncommit a contiguous block of memory [start..(start+size)[.
372   // start is not NULL, the size is greater than zero, and the
373   // block is contained in the initial chunk.  Returns true if it succeeded
374   // and false otherwise.
375   static bool UncommitBlock(Address start, size_t size);
376 
377   // Attempts to allocate the requested (non-zero) number of pages from the
378   // OS.  Fewer pages might be allocated than requested. If it fails to
379   // allocate memory for the OS or cannot allocate a single page, this
380   // function returns an invalid page pointer (NULL). The caller must check
381   // whether the returned page is valid (by calling Page::is_valid()).  It is
382   // guaranteed that allocated pages have contiguous addresses.  The actual
383   // number of allocated page is returned in the output parameter
384   // allocated_pages.
385   static Page* AllocatePages(int requested_pages, int* allocated_pages,
386                              PagedSpace* owner);
387 
388   // Frees pages from a given page and after. If 'p' is the first page
389   // of a chunk, pages from 'p' are freed and this function returns an
390   // invalid page pointer. Otherwise, the function searches a page
391   // after 'p' that is the first page of a chunk. Pages after the
392   // found page are freed and the function returns 'p'.
393   static Page* FreePages(Page* p);
394 
395   // Allocates and frees raw memory of certain size.
396   // These are just thin wrappers around OS::Allocate and OS::Free,
397   // but keep track of allocated bytes as part of heap.
398   static void* AllocateRawMemory(const size_t requested,
399                                  size_t* allocated,
400                                  Executability executable);
401   static void FreeRawMemory(void* buf, size_t length);
402 
403   // Returns the maximum available bytes of heaps.
Available()404   static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
405 
406   // Returns allocated spaces in bytes.
Size()407   static int Size() { return size_; }
408 
409   // Returns maximum available bytes that the old space can have.
MaxAvailable()410   static int MaxAvailable() {
411     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
412   }
413 
414   // Links two pages.
415   static inline void SetNextPage(Page* prev, Page* next);
416 
417   // Returns the next page of a given page.
418   static inline Page* GetNextPage(Page* p);
419 
420   // Checks whether a page belongs to a space.
421   static inline bool IsPageInSpace(Page* p, PagedSpace* space);
422 
423   // Returns the space that owns the given page.
424   static inline PagedSpace* PageOwner(Page* page);
425 
426   // Finds the first/last page in the same chunk as a given page.
427   static Page* FindFirstPageInSameChunk(Page* p);
428   static Page* FindLastPageInSameChunk(Page* p);
429 
430 #ifdef ENABLE_HEAP_PROTECTION
431   // Protect/unprotect a block of memory by marking it read-only/writable.
432   static inline void Protect(Address start, size_t size);
433   static inline void Unprotect(Address start, size_t size,
434                                Executability executable);
435 
436   // Protect/unprotect a chunk given a page in the chunk.
437   static inline void ProtectChunkFromPage(Page* page);
438   static inline void UnprotectChunkFromPage(Page* page);
439 #endif
440 
441 #ifdef DEBUG
442   // Reports statistic info of the space.
443   static void ReportStatistics();
444 #endif
445 
446   // Due to encoding limitation, we can only have 8K chunks.
447   static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
448   // If a chunk has at least 32 pages, the maximum heap size is about
449   // 8 * 1024 * 32 * 8K = 2G bytes.
450 #if defined(ANDROID)
451   static const int kPagesPerChunk = 16;
452 #else
453   static const int kPagesPerChunk = 64;
454 #endif
455   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
456 
457  private:
458   // Maximum space size in bytes.
459   static int capacity_;
460 
461   // Allocated space size in bytes.
462   static int size_;
463 
464   // The initial chunk of virtual memory.
465   static VirtualMemory* initial_chunk_;
466 
467   // Allocated chunk info: chunk start address, chunk size, and owning space.
468   class ChunkInfo BASE_EMBEDDED {
469    public:
ChunkInfo()470     ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
init(Address a,size_t s,PagedSpace * o)471     void init(Address a, size_t s, PagedSpace* o) {
472       address_ = a;
473       size_ = s;
474       owner_ = o;
475     }
address()476     Address address() { return address_; }
size()477     size_t size() { return size_; }
owner()478     PagedSpace* owner() { return owner_; }
479 
480    private:
481     Address address_;
482     size_t size_;
483     PagedSpace* owner_;
484   };
485 
486   // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
487   static List<ChunkInfo> chunks_;
488   static List<int> free_chunk_ids_;
489   static int max_nof_chunks_;
490   static int top_;
491 
492   // Push/pop a free chunk id onto/from the stack.
493   static void Push(int free_chunk_id);
494   static int Pop();
OutOfChunkIds()495   static bool OutOfChunkIds() { return top_ == 0; }
496 
497   // Frees a chunk.
498   static void DeleteChunk(int chunk_id);
499 
500   // Basic check whether a chunk id is in the valid range.
501   static inline bool IsValidChunkId(int chunk_id);
502 
503   // Checks whether a chunk id identifies an allocated chunk.
504   static inline bool IsValidChunk(int chunk_id);
505 
506   // Returns the chunk id that a page belongs to.
507   static inline int GetChunkId(Page* p);
508 
509   // True if the address lies in the initial chunk.
510   static inline bool InInitialChunk(Address address);
511 
512   // Initializes pages in a chunk. Returns the first page address.
513   // This function and GetChunkId() are provided for the mark-compact
514   // collector to rebuild page headers in the from space, which is
515   // used as a marking stack and its page headers are destroyed.
516   static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
517                                       PagedSpace* owner);
518 };
519 
520 
521 // -----------------------------------------------------------------------------
522 // Interface for heap object iterator to be implemented by all object space
523 // object iterators.
524 //
525 // NOTE: The space specific object iterators also implements the own has_next()
526 //       and next() methods which are used to avoid using virtual functions
527 //       iterating a specific space.
528 
529 class ObjectIterator : public Malloced {
530  public:
~ObjectIterator()531   virtual ~ObjectIterator() { }
532 
533   virtual bool has_next_object() = 0;
534   virtual HeapObject* next_object() = 0;
535 };
536 
537 
538 // -----------------------------------------------------------------------------
539 // Heap object iterator in new/old/map spaces.
540 //
541 // A HeapObjectIterator iterates objects from a given address to the
542 // top of a space. The given address must be below the current
543 // allocation pointer (space top). There are some caveats.
544 //
545 // (1) If the space top changes upward during iteration (because of
546 //     allocating new objects), the iterator does not iterate objects
547 //     above the original space top. The caller must create a new
548 //     iterator starting from the old top in order to visit these new
549 //     objects.
550 //
551 // (2) If new objects are allocated below the original allocation top
552 //     (e.g., free-list allocation in paged spaces), the new objects
553 //     may or may not be iterated depending on their position with
554 //     respect to the current point of iteration.
555 //
556 // (3) The space top should not change downward during iteration,
557 //     otherwise the iterator will return not-necessarily-valid
558 //     objects.
559 
560 class HeapObjectIterator: public ObjectIterator {
561  public:
562   // Creates a new object iterator in a given space. If a start
563   // address is not given, the iterator starts from the space bottom.
564   // If the size function is not given, the iterator calls the default
565   // Object::Size().
566   explicit HeapObjectIterator(PagedSpace* space);
567   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
568   HeapObjectIterator(PagedSpace* space, Address start);
569   HeapObjectIterator(PagedSpace* space,
570                      Address start,
571                      HeapObjectCallback size_func);
572 
573   inline bool has_next();
574   inline HeapObject* next();
575 
576   // implementation of ObjectIterator.
has_next_object()577   virtual bool has_next_object() { return has_next(); }
next_object()578   virtual HeapObject* next_object() { return next(); }
579 
580  private:
581   Address cur_addr_;  // current iteration point
582   Address end_addr_;  // end iteration point
583   Address cur_limit_;  // current page limit
584   HeapObjectCallback size_func_;  // size function
585   Page* end_page_;  // caches the page of the end address
586 
587   // Slow path of has_next, checks whether there are more objects in
588   // the next page.
589   bool HasNextInNextPage();
590 
591   // Initializes fields.
592   void Initialize(Address start, Address end, HeapObjectCallback size_func);
593 
594 #ifdef DEBUG
595   // Verifies whether fields have valid values.
596   void Verify();
597 #endif
598 };
599 
600 
601 // -----------------------------------------------------------------------------
602 // A PageIterator iterates the pages in a paged space.
603 //
604 // The PageIterator class provides three modes for iterating pages in a space:
605 //   PAGES_IN_USE iterates pages containing allocated objects.
606 //   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
607 //                    mark-compact collection.
608 //   ALL_PAGES iterates all pages in the space.
609 //
610 // There are some caveats.
611 //
612 // (1) If the space expands during iteration, new pages will not be
613 //     returned by the iterator in any mode.
614 //
615 // (2) If new objects are allocated during iteration, they will appear
616 //     in pages returned by the iterator.  Allocation may cause the
617 //     allocation pointer or MC allocation pointer in the last page to
618 //     change between constructing the iterator and iterating the last
619 //     page.
620 //
621 // (3) The space should not shrink during iteration, otherwise the
622 //     iterator will return deallocated pages.
623 
624 class PageIterator BASE_EMBEDDED {
625  public:
626   enum Mode {
627     PAGES_IN_USE,
628     PAGES_USED_BY_MC,
629     ALL_PAGES
630   };
631 
632   PageIterator(PagedSpace* space, Mode mode);
633 
634   inline bool has_next();
635   inline Page* next();
636 
637  private:
638   PagedSpace* space_;
639   Page* prev_page_;  // Previous page returned.
640   Page* stop_page_;  // Page to stop at (last page returned by the iterator).
641 };
642 
643 
644 // -----------------------------------------------------------------------------
645 // A space has a list of pages. The next page can be accessed via
646 // Page::next_page() call. The next page of the last page is an
647 // invalid page pointer. A space can expand and shrink dynamically.
648 
649 // An abstraction of allocation and relocation pointers in a page-structured
650 // space.
651 class AllocationInfo {
652  public:
653   Address top;  // current allocation top
654   Address limit;  // current allocation limit
655 
656 #ifdef DEBUG
VerifyPagedAllocation()657   bool VerifyPagedAllocation() {
658     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
659         && (top <= limit);
660   }
661 #endif
662 };
663 
664 
665 // An abstraction of the accounting statistics of a page-structured space.
666 // The 'capacity' of a space is the number of object-area bytes (ie, not
667 // including page bookkeeping structures) currently in the space. The 'size'
668 // of a space is the number of allocated bytes, the 'waste' in the space is
669 // the number of bytes that are not allocated and not available to
670 // allocation without reorganizing the space via a GC (eg, small blocks due
671 // to internal fragmentation, top of page areas in map space), and the bytes
672 // 'available' is the number of unallocated bytes that are not waste.  The
673 // capacity is the sum of size, waste, and available.
674 //
675 // The stats are only set by functions that ensure they stay balanced. These
676 // functions increase or decrease one of the non-capacity stats in
677 // conjunction with capacity, or else they always balance increases and
678 // decreases to the non-capacity stats.
679 class AllocationStats BASE_EMBEDDED {
680  public:
AllocationStats()681   AllocationStats() { Clear(); }
682 
683   // Zero out all the allocation statistics (ie, no capacity).
Clear()684   void Clear() {
685     capacity_ = 0;
686     available_ = 0;
687     size_ = 0;
688     waste_ = 0;
689   }
690 
691   // Reset the allocation statistics (ie, available = capacity with no
692   // wasted or allocated bytes).
Reset()693   void Reset() {
694     available_ = capacity_;
695     size_ = 0;
696     waste_ = 0;
697   }
698 
699   // Accessors for the allocation statistics.
Capacity()700   int Capacity() { return capacity_; }
Available()701   int Available() { return available_; }
Size()702   int Size() { return size_; }
Waste()703   int Waste() { return waste_; }
704 
705   // Grow the space by adding available bytes.
ExpandSpace(int size_in_bytes)706   void ExpandSpace(int size_in_bytes) {
707     capacity_ += size_in_bytes;
708     available_ += size_in_bytes;
709   }
710 
711   // Shrink the space by removing available bytes.
ShrinkSpace(int size_in_bytes)712   void ShrinkSpace(int size_in_bytes) {
713     capacity_ -= size_in_bytes;
714     available_ -= size_in_bytes;
715   }
716 
717   // Allocate from available bytes (available -> size).
AllocateBytes(int size_in_bytes)718   void AllocateBytes(int size_in_bytes) {
719     available_ -= size_in_bytes;
720     size_ += size_in_bytes;
721   }
722 
723   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(int size_in_bytes)724   void DeallocateBytes(int size_in_bytes) {
725     size_ -= size_in_bytes;
726     available_ += size_in_bytes;
727   }
728 
729   // Waste free bytes (available -> waste).
WasteBytes(int size_in_bytes)730   void WasteBytes(int size_in_bytes) {
731     available_ -= size_in_bytes;
732     waste_ += size_in_bytes;
733   }
734 
735   // Consider the wasted bytes to be allocated, as they contain filler
736   // objects (waste -> size).
FillWastedBytes(int size_in_bytes)737   void FillWastedBytes(int size_in_bytes) {
738     waste_ -= size_in_bytes;
739     size_ += size_in_bytes;
740   }
741 
742  private:
743   int capacity_;
744   int available_;
745   int size_;
746   int waste_;
747 };
748 
749 
750 class PagedSpace : public Space {
751  public:
752   // Creates a space with a maximum capacity, and an id.
753   PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
754 
~PagedSpace()755   virtual ~PagedSpace() {}
756 
757   // Set up the space using the given address range of virtual memory (from
758   // the memory allocator's initial chunk) if possible.  If the block of
759   // addresses is not big enough to contain a single page-aligned page, a
760   // fresh chunk will be allocated.
761   bool Setup(Address start, size_t size);
762 
763   // Returns true if the space has been successfully set up and not
764   // subsequently torn down.
765   bool HasBeenSetup();
766 
767   // Cleans up the space, frees all pages in this space except those belonging
768   // to the initial chunk, uncommits addresses in the initial chunk.
769   void TearDown();
770 
771   // Checks whether an object/address is in this space.
772   inline bool Contains(Address a);
Contains(HeapObject * o)773   bool Contains(HeapObject* o) { return Contains(o->address()); }
774 
775   // Given an address occupied by a live object, return that object if it is
776   // in this space, or Failure::Exception() if it is not. The implementation
777   // iterates over objects in the page containing the address, the cost is
778   // linear in the number of objects in the page. It may be slow.
779   Object* FindObject(Address addr);
780 
781   // Checks whether page is currently in use by this space.
782   bool IsUsed(Page* page);
783 
784   // Clears remembered sets of pages in this space.
785   void ClearRSet();
786 
787   // Prepares for a mark-compact GC.
788   virtual void PrepareForMarkCompact(bool will_compact) = 0;
789 
790   virtual Address PageAllocationTop(Page* page) = 0;
791 
792   // Current capacity without growing (Size() + Available() + Waste()).
Capacity()793   int Capacity() { return accounting_stats_.Capacity(); }
794 
795   // Available bytes without growing.
Available()796   int Available() { return accounting_stats_.Available(); }
797 
798   // Allocated bytes in this space.
Size()799   virtual int Size() { return accounting_stats_.Size(); }
800 
801   // Wasted bytes due to fragmentation and not recoverable until the
802   // next GC of this space.
Waste()803   int Waste() { return accounting_stats_.Waste(); }
804 
805   // Returns the address of the first object in this space.
bottom()806   Address bottom() { return first_page_->ObjectAreaStart(); }
807 
808   // Returns the allocation pointer in this space.
top()809   Address top() { return allocation_info_.top; }
810 
811   // Allocate the requested number of bytes in the space if possible, return a
812   // failure object if not.
813   inline Object* AllocateRaw(int size_in_bytes);
814 
815   // Allocate the requested number of bytes for relocation during mark-compact
816   // collection.
817   inline Object* MCAllocateRaw(int size_in_bytes);
818 
819 
820   // ---------------------------------------------------------------------------
821   // Mark-compact collection support functions
822 
823   // Set the relocation point to the beginning of the space.
824   void MCResetRelocationInfo();
825 
826   // Writes relocation info to the top page.
MCWriteRelocationInfoToPage()827   void MCWriteRelocationInfoToPage() {
828     TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
829   }
830 
831   // Computes the offset of a given address in this space to the beginning
832   // of the space.
833   int MCSpaceOffsetForAddress(Address addr);
834 
835   // Updates the allocation pointer to the relocation top after a mark-compact
836   // collection.
837   virtual void MCCommitRelocationInfo() = 0;
838 
839   // Releases half of unused pages.
840   void Shrink();
841 
842   // Ensures that the capacity is at least 'capacity'. Returns false on failure.
843   bool EnsureCapacity(int capacity);
844 
845 #ifdef ENABLE_HEAP_PROTECTION
846   // Protect/unprotect the space by marking it read-only/writable.
847   void Protect();
848   void Unprotect();
849 #endif
850 
851 #ifdef DEBUG
852   // Print meta info and objects in this space.
853   virtual void Print();
854 
855   // Verify integrity of this space.
856   virtual void Verify(ObjectVisitor* visitor);
857 
858   // Overridden by subclasses to verify space-specific object
859   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)860   virtual void VerifyObject(HeapObject* obj) {}
861 
862   // Report code object related statistics
863   void CollectCodeStatistics();
864   static void ReportCodeStatistics();
865   static void ResetCodeStatistics();
866 #endif
867 
868  protected:
869   // Maximum capacity of this space.
870   int max_capacity_;
871 
872   // Accounting information for this space.
873   AllocationStats accounting_stats_;
874 
875   // The first page in this space.
876   Page* first_page_;
877 
878   // The last page in this space.  Initially set in Setup, updated in
879   // Expand and Shrink.
880   Page* last_page_;
881 
882   // Normal allocation information.
883   AllocationInfo allocation_info_;
884 
885   // Relocation information during mark-compact collections.
886   AllocationInfo mc_forwarding_info_;
887 
888   // Bytes of each page that cannot be allocated.  Possibly non-zero
889   // for pages in spaces with only fixed-size objects.  Always zero
890   // for pages in spaces with variable sized objects (those pages are
891   // padded with free-list nodes).
892   int page_extra_;
893 
894   // Sets allocation pointer to a page bottom.
895   static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
896 
897   // Returns the top page specified by an allocation info structure.
TopPageOf(AllocationInfo alloc_info)898   static Page* TopPageOf(AllocationInfo alloc_info) {
899     return Page::FromAllocationTop(alloc_info.limit);
900   }
901 
902   // Expands the space by allocating a fixed number of pages. Returns false if
903   // it cannot allocate requested number of pages from OS. Newly allocated
904   // pages are append to the last_page;
905   bool Expand(Page* last_page);
906 
907   // Generic fast case allocation function that tries linear allocation in
908   // the top page of 'alloc_info'.  Returns NULL on failure.
909   inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
910                                       int size_in_bytes);
911 
912   // During normal allocation or deserialization, roll to the next page in
913   // the space (there is assumed to be one) and allocate there.  This
914   // function is space-dependent.
915   virtual HeapObject* AllocateInNextPage(Page* current_page,
916                                          int size_in_bytes) = 0;
917 
918   // Slow path of AllocateRaw.  This function is space-dependent.
919   virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
920 
921   // Slow path of MCAllocateRaw.
922   HeapObject* SlowMCAllocateRaw(int size_in_bytes);
923 
924 #ifdef DEBUG
925   void DoPrintRSet(const char* space_name);
926 #endif
927  private:
928   // Returns the page of the allocation pointer.
AllocationTopPage()929   Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
930 
931   // Returns a pointer to the page of the relocation pointer.
MCRelocationTopPage()932   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
933 
934 #ifdef DEBUG
935   // Returns the number of total pages in this space.
936   int CountTotalPages();
937 #endif
938 
939   friend class PageIterator;
940 };
941 
942 
943 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
944 class NumberAndSizeInfo BASE_EMBEDDED {
945  public:
NumberAndSizeInfo()946   NumberAndSizeInfo() : number_(0), bytes_(0) {}
947 
number()948   int number() const { return number_; }
increment_number(int num)949   void increment_number(int num) { number_ += num; }
950 
bytes()951   int bytes() const { return bytes_; }
increment_bytes(int size)952   void increment_bytes(int size) { bytes_ += size; }
953 
clear()954   void clear() {
955     number_ = 0;
956     bytes_ = 0;
957   }
958 
959  private:
960   int number_;
961   int bytes_;
962 };
963 
964 
965 // HistogramInfo class for recording a single "bar" of a histogram.  This
966 // class is used for collecting statistics to print to stdout (when compiled
967 // with DEBUG) or to the log file (when compiled with
968 // ENABLE_LOGGING_AND_PROFILING).
969 class HistogramInfo: public NumberAndSizeInfo {
970  public:
HistogramInfo()971   HistogramInfo() : NumberAndSizeInfo() {}
972 
name()973   const char* name() { return name_; }
set_name(const char * name)974   void set_name(const char* name) { name_ = name; }
975 
976  private:
977   const char* name_;
978 };
979 #endif
980 
981 
982 // -----------------------------------------------------------------------------
983 // SemiSpace in young generation
984 //
985 // A semispace is a contiguous chunk of memory. The mark-compact collector
986 // uses the memory in the from space as a marking stack when tracing live
987 // objects.
988 
989 class SemiSpace : public Space {
990  public:
991   // Constructor.
SemiSpace()992   SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
993     start_ = NULL;
994     age_mark_ = NULL;
995   }
996 
997   // Sets up the semispace using the given chunk.
998   bool Setup(Address start, int initial_capacity, int maximum_capacity);
999 
1000   // Tear down the space.  Heap memory was not allocated by the space, so it
1001   // is not deallocated here.
1002   void TearDown();
1003 
1004   // True if the space has been set up but not torn down.
HasBeenSetup()1005   bool HasBeenSetup() { return start_ != NULL; }
1006 
1007   // Grow the size of the semispace by committing extra virtual memory.
1008   // Assumes that the caller has checked that the semispace has not reached
1009   // its maximum capacity (and thus there is space available in the reserved
1010   // address range to grow).
1011   bool Grow();
1012 
1013   // Grow the semispace to the new capacity.  The new capacity
1014   // requested must be larger than the current capacity.
1015   bool GrowTo(int new_capacity);
1016 
1017   // Shrinks the semispace to the new capacity.  The new capacity
1018   // requested must be more than the amount of used memory in the
1019   // semispace and less than the current capacity.
1020   bool ShrinkTo(int new_capacity);
1021 
1022   // Returns the start address of the space.
low()1023   Address low() { return start_; }
1024   // Returns one past the end address of the space.
high()1025   Address high() { return low() + capacity_; }
1026 
1027   // Age mark accessors.
age_mark()1028   Address age_mark() { return age_mark_; }
set_age_mark(Address mark)1029   void set_age_mark(Address mark) { age_mark_ = mark; }
1030 
1031   // True if the address is in the address range of this semispace (not
1032   // necessarily below the allocation pointer).
Contains(Address a)1033   bool Contains(Address a) {
1034     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1035            == reinterpret_cast<uintptr_t>(start_);
1036   }
1037 
1038   // True if the object is a heap object in the address range of this
1039   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)1040   bool Contains(Object* o) {
1041     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1042   }
1043 
1044   // The offset of an address from the beginning of the space.
SpaceOffsetForAddress(Address addr)1045   int SpaceOffsetForAddress(Address addr) { return addr - low(); }
1046 
1047   // If we don't have this here then SemiSpace will be abstract.  However
1048   // it should never be called.
Size()1049   virtual int Size() {
1050     UNREACHABLE();
1051     return 0;
1052   }
1053 
is_committed()1054   bool is_committed() { return committed_; }
1055   bool Commit();
1056   bool Uncommit();
1057 
1058 #ifdef DEBUG
1059   virtual void Print();
1060   virtual void Verify();
1061 #endif
1062 
1063   // Returns the current capacity of the semi space.
Capacity()1064   int Capacity() { return capacity_; }
1065 
1066   // Returns the maximum capacity of the semi space.
MaximumCapacity()1067   int MaximumCapacity() { return maximum_capacity_; }
1068 
1069   // Returns the initial capacity of the semi space.
InitialCapacity()1070   int InitialCapacity() { return initial_capacity_; }
1071 
1072  private:
1073   // The current and maximum capacity of the space.
1074   int capacity_;
1075   int maximum_capacity_;
1076   int initial_capacity_;
1077 
1078   // The start address of the space.
1079   Address start_;
1080   // Used to govern object promotion during mark-compact collection.
1081   Address age_mark_;
1082 
1083   // Masks and comparison values to test for containment in this semispace.
1084   uintptr_t address_mask_;
1085   uintptr_t object_mask_;
1086   uintptr_t object_expected_;
1087 
1088   bool committed_;
1089 
1090  public:
1091   TRACK_MEMORY("SemiSpace")
1092 };
1093 
1094 
1095 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
1096 // semispace of the heap's new space.  It iterates over the objects in the
1097 // semispace from a given start address (defaulting to the bottom of the
1098 // semispace) to the top of the semispace.  New objects allocated after the
1099 // iterator is created are not iterated.
1100 class SemiSpaceIterator : public ObjectIterator {
1101  public:
1102   // Create an iterator over the objects in the given space.  If no start
1103   // address is given, the iterator starts from the bottom of the space.  If
1104   // no size function is given, the iterator calls Object::Size().
1105   explicit SemiSpaceIterator(NewSpace* space);
1106   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1107   SemiSpaceIterator(NewSpace* space, Address start);
1108 
has_next()1109   bool has_next() {return current_ < limit_; }
1110 
next()1111   HeapObject* next() {
1112     ASSERT(has_next());
1113 
1114     HeapObject* object = HeapObject::FromAddress(current_);
1115     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1116 
1117     current_ += size;
1118     return object;
1119   }
1120 
1121   // Implementation of the ObjectIterator functions.
has_next_object()1122   virtual bool has_next_object() { return has_next(); }
next_object()1123   virtual HeapObject* next_object() { return next(); }
1124 
1125  private:
1126   void Initialize(NewSpace* space, Address start, Address end,
1127                   HeapObjectCallback size_func);
1128 
1129   // The semispace.
1130   SemiSpace* space_;
1131   // The current iteration point.
1132   Address current_;
1133   // The end of iteration.
1134   Address limit_;
1135   // The callback function.
1136   HeapObjectCallback size_func_;
1137 };
1138 
1139 
1140 // -----------------------------------------------------------------------------
1141 // The young generation space.
1142 //
1143 // The new space consists of a contiguous pair of semispaces.  It simply
1144 // forwards most functions to the appropriate semispace.
1145 
1146 class NewSpace : public Space {
1147  public:
1148   // Constructor.
NewSpace()1149   NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1150 
1151   // Sets up the new space using the given chunk.
1152   bool Setup(Address start, int size);
1153 
1154   // Tears down the space.  Heap memory was not allocated by the space, so it
1155   // is not deallocated here.
1156   void TearDown();
1157 
1158   // True if the space has been set up but not torn down.
HasBeenSetup()1159   bool HasBeenSetup() {
1160     return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1161   }
1162 
1163   // Flip the pair of spaces.
1164   void Flip();
1165 
1166   // Grow the capacity of the semispaces.  Assumes that they are not at
1167   // their maximum capacity.
1168   void Grow();
1169 
1170   // Shrink the capacity of the semispaces.
1171   void Shrink();
1172 
1173   // True if the address or object lies in the address range of either
1174   // semispace (not necessarily below the allocation pointer).
Contains(Address a)1175   bool Contains(Address a) {
1176     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1177         == reinterpret_cast<uintptr_t>(start_);
1178   }
Contains(Object * o)1179   bool Contains(Object* o) {
1180     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1181   }
1182 
1183   // Return the allocated bytes in the active semispace.
Size()1184   virtual int Size() { return top() - bottom(); }
1185   // Return the current capacity of a semispace.
Capacity()1186   int Capacity() {
1187     ASSERT(to_space_.Capacity() == from_space_.Capacity());
1188     return to_space_.Capacity();
1189   }
1190   // Return the available bytes without growing in the active semispace.
Available()1191   int Available() { return Capacity() - Size(); }
1192 
1193   // Return the maximum capacity of a semispace.
MaximumCapacity()1194   int MaximumCapacity() {
1195     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1196     return to_space_.MaximumCapacity();
1197   }
1198 
1199   // Returns the initial capacity of a semispace.
InitialCapacity()1200   int InitialCapacity() {
1201     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1202     return to_space_.InitialCapacity();
1203   }
1204 
1205   // Return the address of the allocation pointer in the active semispace.
top()1206   Address top() { return allocation_info_.top; }
1207   // Return the address of the first object in the active semispace.
bottom()1208   Address bottom() { return to_space_.low(); }
1209 
1210   // Get the age mark of the inactive semispace.
age_mark()1211   Address age_mark() { return from_space_.age_mark(); }
1212   // Set the age mark in the active semispace.
set_age_mark(Address mark)1213   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1214 
1215   // The start address of the space and a bit mask. Anding an address in the
1216   // new space with the mask will result in the start address.
start()1217   Address start() { return start_; }
mask()1218   uintptr_t mask() { return address_mask_; }
1219 
1220   // The allocation top and limit addresses.
allocation_top_address()1221   Address* allocation_top_address() { return &allocation_info_.top; }
allocation_limit_address()1222   Address* allocation_limit_address() { return &allocation_info_.limit; }
1223 
AllocateRaw(int size_in_bytes)1224   Object* AllocateRaw(int size_in_bytes) {
1225     return AllocateRawInternal(size_in_bytes, &allocation_info_);
1226   }
1227 
1228   // Allocate the requested number of bytes for relocation during mark-compact
1229   // collection.
MCAllocateRaw(int size_in_bytes)1230   Object* MCAllocateRaw(int size_in_bytes) {
1231     return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1232   }
1233 
1234   // Reset the allocation pointer to the beginning of the active semispace.
1235   void ResetAllocationInfo();
1236   // Reset the reloction pointer to the bottom of the inactive semispace in
1237   // preparation for mark-compact collection.
1238   void MCResetRelocationInfo();
1239   // Update the allocation pointer in the active semispace after a
1240   // mark-compact collection.
1241   void MCCommitRelocationInfo();
1242 
1243   // Get the extent of the inactive semispace (for use as a marking stack).
FromSpaceLow()1244   Address FromSpaceLow() { return from_space_.low(); }
FromSpaceHigh()1245   Address FromSpaceHigh() { return from_space_.high(); }
1246 
1247   // Get the extent of the active semispace (to sweep newly copied objects
1248   // during a scavenge collection).
ToSpaceLow()1249   Address ToSpaceLow() { return to_space_.low(); }
ToSpaceHigh()1250   Address ToSpaceHigh() { return to_space_.high(); }
1251 
1252   // Offsets from the beginning of the semispaces.
ToSpaceOffsetForAddress(Address a)1253   int ToSpaceOffsetForAddress(Address a) {
1254     return to_space_.SpaceOffsetForAddress(a);
1255   }
FromSpaceOffsetForAddress(Address a)1256   int FromSpaceOffsetForAddress(Address a) {
1257     return from_space_.SpaceOffsetForAddress(a);
1258   }
1259 
1260   // True if the object is a heap object in the address range of the
1261   // respective semispace (not necessarily below the allocation pointer of the
1262   // semispace).
ToSpaceContains(Object * o)1263   bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)1264   bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1265 
ToSpaceContains(Address a)1266   bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
FromSpaceContains(Address a)1267   bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1268 
1269 #ifdef ENABLE_HEAP_PROTECTION
1270   // Protect/unprotect the space by marking it read-only/writable.
1271   virtual void Protect();
1272   virtual void Unprotect();
1273 #endif
1274 
1275 #ifdef DEBUG
1276   // Verify the active semispace.
1277   virtual void Verify();
1278   // Print the active semispace.
Print()1279   virtual void Print() { to_space_.Print(); }
1280 #endif
1281 
1282 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1283   // Iterates the active semispace to collect statistics.
1284   void CollectStatistics();
1285   // Reports previously collected statistics of the active semispace.
1286   void ReportStatistics();
1287   // Clears previously collected statistics.
1288   void ClearHistograms();
1289 
1290   // Record the allocation or promotion of a heap object.  Note that we don't
1291   // record every single allocation, but only those that happen in the
1292   // to space during a scavenge GC.
1293   void RecordAllocation(HeapObject* obj);
1294   void RecordPromotion(HeapObject* obj);
1295 #endif
1296 
1297   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()1298   bool CommitFromSpaceIfNeeded() {
1299     if (from_space_.is_committed()) return true;
1300     return from_space_.Commit();
1301   }
1302 
UncommitFromSpace()1303   bool UncommitFromSpace() {
1304     if (!from_space_.is_committed()) return true;
1305     return from_space_.Uncommit();
1306   }
1307 
1308  private:
1309   // The semispaces.
1310   SemiSpace to_space_;
1311   SemiSpace from_space_;
1312 
1313   // Start address and bit mask for containment testing.
1314   Address start_;
1315   uintptr_t address_mask_;
1316   uintptr_t object_mask_;
1317   uintptr_t object_expected_;
1318 
1319   // Allocation pointer and limit for normal allocation and allocation during
1320   // mark-compact collection.
1321   AllocationInfo allocation_info_;
1322   AllocationInfo mc_forwarding_info_;
1323 
1324 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1325   HistogramInfo* allocated_histogram_;
1326   HistogramInfo* promoted_histogram_;
1327 #endif
1328 
1329   // Implementation of AllocateRaw and MCAllocateRaw.
1330   inline Object* AllocateRawInternal(int size_in_bytes,
1331                                      AllocationInfo* alloc_info);
1332 
1333   friend class SemiSpaceIterator;
1334 
1335  public:
1336   TRACK_MEMORY("NewSpace")
1337 };
1338 
1339 
1340 // -----------------------------------------------------------------------------
1341 // Free lists for old object spaces
1342 //
1343 // Free-list nodes are free blocks in the heap.  They look like heap objects
1344 // (free-list node pointers have the heap object tag, and they have a map like
1345 // a heap object).  They have a size and a next pointer.  The next pointer is
1346 // the raw address of the next free list node (or NULL).
1347 class FreeListNode: public HeapObject {
1348  public:
1349   // Obtain a free-list node from a raw address.  This is not a cast because
1350   // it does not check nor require that the first word at the address is a map
1351   // pointer.
FromAddress(Address address)1352   static FreeListNode* FromAddress(Address address) {
1353     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1354   }
1355 
1356   // Set the size in bytes, which can be read with HeapObject::Size().  This
1357   // function also writes a map to the first word of the block so that it
1358   // looks like a heap object to the garbage collector and heap iteration
1359   // functions.
1360   void set_size(int size_in_bytes);
1361 
1362   // Accessors for the next field.
1363   inline Address next();
1364   inline void set_next(Address next);
1365 
1366  private:
1367   static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1368 
1369   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1370 };
1371 
1372 
1373 // The free list for the old space.
1374 class OldSpaceFreeList BASE_EMBEDDED {
1375  public:
1376   explicit OldSpaceFreeList(AllocationSpace owner);
1377 
1378   // Clear the free list.
1379   void Reset();
1380 
1381   // Return the number of bytes available on the free list.
available()1382   int available() { return available_; }
1383 
1384   // Place a node on the free list.  The block of size 'size_in_bytes'
1385   // starting at 'start' is placed on the free list.  The return value is the
1386   // number of bytes that have been lost due to internal fragmentation by
1387   // freeing the block.  Bookkeeping information will be written to the block,
1388   // ie, its contents will be destroyed.  The start address should be word
1389   // aligned, and the size should be a non-zero multiple of the word size.
1390   int Free(Address start, int size_in_bytes);
1391 
1392   // Allocate a block of size 'size_in_bytes' from the free list.  The block
1393   // is unitialized.  A failure is returned if no block is available.  The
1394   // number of bytes lost to fragmentation is returned in the output parameter
1395   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1396   Object* Allocate(int size_in_bytes, int* wasted_bytes);
1397 
1398  private:
1399   // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1400   // will always result in waste.)
1401   static const int kMinBlockSize = 2 * kPointerSize;
1402   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1403 
1404   // The identity of the owning space, for building allocation Failure
1405   // objects.
1406   AllocationSpace owner_;
1407 
1408   // Total available bytes in all blocks on this free list.
1409   int available_;
1410 
1411   // Blocks are put on exact free lists in an array, indexed by size in words.
1412   // The available sizes are kept in an increasingly ordered list. Entries
1413   // corresponding to sizes < kMinBlockSize always have an empty free list
1414   // (but index kHead is used for the head of the size list).
1415   struct SizeNode {
1416     // Address of the head FreeListNode of the implied block size or NULL.
1417     Address head_node_;
1418     // Size (words) of the next larger available size if head_node_ != NULL.
1419     int next_size_;
1420   };
1421   static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1422   SizeNode free_[kFreeListsLength];
1423 
1424   // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1425   static const int kHead = kMinBlockSize / kPointerSize - 1;
1426   static const int kEnd = kMaxInt;
1427 
1428   // We keep a "finger" in the size list to speed up a common pattern:
1429   // repeated requests for the same or increasing sizes.
1430   int finger_;
1431 
1432   // Starting from *prev, find and return the smallest size >= index (words),
1433   // or kEnd. Update *prev to be the largest size < index, or kHead.
FindSize(int index,int * prev)1434   int FindSize(int index, int* prev) {
1435     int cur = free_[*prev].next_size_;
1436     while (cur < index) {
1437       *prev = cur;
1438       cur = free_[cur].next_size_;
1439     }
1440     return cur;
1441   }
1442 
1443   // Remove an existing element from the size list.
RemoveSize(int index)1444   void RemoveSize(int index) {
1445     int prev = kHead;
1446     int cur = FindSize(index, &prev);
1447     ASSERT(cur == index);
1448     free_[prev].next_size_ = free_[cur].next_size_;
1449     finger_ = prev;
1450   }
1451 
1452   // Insert a new element into the size list.
InsertSize(int index)1453   void InsertSize(int index) {
1454     int prev = kHead;
1455     int cur = FindSize(index, &prev);
1456     ASSERT(cur != index);
1457     free_[prev].next_size_ = index;
1458     free_[index].next_size_ = cur;
1459   }
1460 
1461   // The size list is not updated during a sequence of calls to Free, but is
1462   // rebuilt before the next allocation.
1463   void RebuildSizeList();
1464   bool needs_rebuild_;
1465 
1466 #ifdef DEBUG
1467   // Does this free list contain a free block located at the address of 'node'?
1468   bool Contains(FreeListNode* node);
1469 #endif
1470 
1471   DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1472 };
1473 
1474 
1475 // The free list for the map space.
1476 class FixedSizeFreeList BASE_EMBEDDED {
1477  public:
1478   FixedSizeFreeList(AllocationSpace owner, int object_size);
1479 
1480   // Clear the free list.
1481   void Reset();
1482 
1483   // Return the number of bytes available on the free list.
available()1484   int available() { return available_; }
1485 
1486   // Place a node on the free list.  The block starting at 'start' (assumed to
1487   // have size object_size_) is placed on the free list.  Bookkeeping
1488   // information will be written to the block, ie, its contents will be
1489   // destroyed.  The start address should be word aligned.
1490   void Free(Address start);
1491 
1492   // Allocate a fixed sized block from the free list.  The block is unitialized.
1493   // A failure is returned if no block is available.
1494   Object* Allocate();
1495 
1496  private:
1497   // Available bytes on the free list.
1498   int available_;
1499 
1500   // The head of the free list.
1501   Address head_;
1502 
1503   // The identity of the owning space, for building allocation Failure
1504   // objects.
1505   AllocationSpace owner_;
1506 
1507   // The size of the objects in this space.
1508   int object_size_;
1509 
1510   DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1511 };
1512 
1513 
1514 // -----------------------------------------------------------------------------
1515 // Old object space (excluding map objects)
1516 
1517 class OldSpace : public PagedSpace {
1518  public:
1519   // Creates an old space object with a given maximum capacity.
1520   // The constructor does not allocate pages from OS.
OldSpace(int max_capacity,AllocationSpace id,Executability executable)1521   explicit OldSpace(int max_capacity,
1522                     AllocationSpace id,
1523                     Executability executable)
1524       : PagedSpace(max_capacity, id, executable), free_list_(id) {
1525     page_extra_ = 0;
1526   }
1527 
1528   // The bytes available on the free list (ie, not above the linear allocation
1529   // pointer).
AvailableFree()1530   int AvailableFree() { return free_list_.available(); }
1531 
1532   // The top of allocation in a page in this space. Undefined if page is unused.
PageAllocationTop(Page * page)1533   virtual Address PageAllocationTop(Page* page) {
1534     return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1535   }
1536 
1537   // Give a block of memory to the space's free list.  It might be added to
1538   // the free list or accounted as waste.
Free(Address start,int size_in_bytes)1539   void Free(Address start, int size_in_bytes) {
1540     int wasted_bytes = free_list_.Free(start, size_in_bytes);
1541     accounting_stats_.DeallocateBytes(size_in_bytes);
1542     accounting_stats_.WasteBytes(wasted_bytes);
1543   }
1544 
1545   // Prepare for full garbage collection.  Resets the relocation pointer and
1546   // clears the free list.
1547   virtual void PrepareForMarkCompact(bool will_compact);
1548 
1549   // Updates the allocation pointer to the relocation top after a mark-compact
1550   // collection.
1551   virtual void MCCommitRelocationInfo();
1552 
1553 #ifdef DEBUG
1554   // Reports statistics for the space
1555   void ReportStatistics();
1556   // Dump the remembered sets in the space to stdout.
1557   void PrintRSet();
1558 #endif
1559 
1560  protected:
1561   // Virtual function in the superclass.  Slow path of AllocateRaw.
1562   HeapObject* SlowAllocateRaw(int size_in_bytes);
1563 
1564   // Virtual function in the superclass.  Allocate linearly at the start of
1565   // the page after current_page (there is assumed to be one).
1566   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1567 
1568  private:
1569   // The space's free list.
1570   OldSpaceFreeList free_list_;
1571 
1572  public:
1573   TRACK_MEMORY("OldSpace")
1574 };
1575 
1576 
1577 // -----------------------------------------------------------------------------
1578 // Old space for objects of a fixed size
1579 
1580 class FixedSpace : public PagedSpace {
1581  public:
FixedSpace(int max_capacity,AllocationSpace id,int object_size_in_bytes,const char * name)1582   FixedSpace(int max_capacity,
1583              AllocationSpace id,
1584              int object_size_in_bytes,
1585              const char* name)
1586       : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1587         object_size_in_bytes_(object_size_in_bytes),
1588         name_(name),
1589         free_list_(id, object_size_in_bytes) {
1590     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1591   }
1592 
1593   // The top of allocation in a page in this space. Undefined if page is unused.
PageAllocationTop(Page * page)1594   virtual Address PageAllocationTop(Page* page) {
1595     return page == TopPageOf(allocation_info_) ? top()
1596         : page->ObjectAreaEnd() - page_extra_;
1597   }
1598 
object_size_in_bytes()1599   int object_size_in_bytes() { return object_size_in_bytes_; }
1600 
1601   // Give a fixed sized block of memory to the space's free list.
Free(Address start)1602   void Free(Address start) {
1603     free_list_.Free(start);
1604     accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1605   }
1606 
1607   // Prepares for a mark-compact GC.
1608   virtual void PrepareForMarkCompact(bool will_compact);
1609 
1610   // Updates the allocation pointer to the relocation top after a mark-compact
1611   // collection.
1612   virtual void MCCommitRelocationInfo();
1613 
1614 #ifdef DEBUG
1615   // Reports statistic info of the space
1616   void ReportStatistics();
1617 
1618   // Dump the remembered sets in the space to stdout.
1619   void PrintRSet();
1620 #endif
1621 
1622  protected:
1623   // Virtual function in the superclass.  Slow path of AllocateRaw.
1624   HeapObject* SlowAllocateRaw(int size_in_bytes);
1625 
1626   // Virtual function in the superclass.  Allocate linearly at the start of
1627   // the page after current_page (there is assumed to be one).
1628   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1629 
1630  private:
1631   // The size of objects in this space.
1632   int object_size_in_bytes_;
1633 
1634   // The name of this space.
1635   const char* name_;
1636 
1637   // The space's free list.
1638   FixedSizeFreeList free_list_;
1639 };
1640 
1641 
1642 // -----------------------------------------------------------------------------
1643 // Old space for all map objects
1644 
1645 class MapSpace : public FixedSpace {
1646  public:
1647   // Creates a map space object with a maximum capacity.
MapSpace(int max_capacity,AllocationSpace id)1648   MapSpace(int max_capacity, AllocationSpace id)
1649       : FixedSpace(max_capacity, id, Map::kSize, "map") {}
1650 
1651   // Prepares for a mark-compact GC.
1652   virtual void PrepareForMarkCompact(bool will_compact);
1653 
1654   // Given an index, returns the page address.
PageAddress(int page_index)1655   Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1656 
1657   // Constants.
1658   static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
1659 
1660  protected:
1661 #ifdef DEBUG
1662   virtual void VerifyObject(HeapObject* obj);
1663 #endif
1664 
1665  private:
1666   // An array of page start address in a map space.
1667   Address page_addresses_[kMaxMapPageIndex + 1];
1668 
1669  public:
1670   TRACK_MEMORY("MapSpace")
1671 };
1672 
1673 
1674 // -----------------------------------------------------------------------------
1675 // Old space for all global object property cell objects
1676 
1677 class CellSpace : public FixedSpace {
1678  public:
1679   // Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity,AllocationSpace id)1680   CellSpace(int max_capacity, AllocationSpace id)
1681       : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1682 
1683  protected:
1684 #ifdef DEBUG
1685   virtual void VerifyObject(HeapObject* obj);
1686 #endif
1687 
1688  public:
1689   TRACK_MEMORY("MapSpace")
1690 };
1691 
1692 
1693 // -----------------------------------------------------------------------------
1694 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1695 // the large object space. A large object is allocated from OS heap with
1696 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1697 // A large object always starts at Page::kObjectStartOffset to a page.
1698 // Large objects do not move during garbage collections.
1699 
1700 // A LargeObjectChunk holds exactly one large object page with exactly one
1701 // large object.
1702 class LargeObjectChunk {
1703  public:
1704   // Allocates a new LargeObjectChunk that contains a large object page
1705   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1706   // object and possibly extra remembered set words) bytes after the object
1707   // area start of that page. The allocated chunk size is set in the output
1708   // parameter chunk_size.
1709   static LargeObjectChunk* New(int size_in_bytes,
1710                                size_t* chunk_size,
1711                                Executability executable);
1712 
1713   // Interpret a raw address as a large object chunk.
FromAddress(Address address)1714   static LargeObjectChunk* FromAddress(Address address) {
1715     return reinterpret_cast<LargeObjectChunk*>(address);
1716   }
1717 
1718   // Returns the address of this chunk.
address()1719   Address address() { return reinterpret_cast<Address>(this); }
1720 
1721   // Accessors for the fields of the chunk.
next()1722   LargeObjectChunk* next() { return next_; }
set_next(LargeObjectChunk * chunk)1723   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1724 
size()1725   size_t size() { return size_; }
set_size(size_t size_in_bytes)1726   void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1727 
1728   // Returns the object in this chunk.
1729   inline HeapObject* GetObject();
1730 
1731   // Given a requested size (including any extra remembered set words),
1732   // returns the physical size of a chunk to be allocated.
1733   static int ChunkSizeFor(int size_in_bytes);
1734 
1735   // Given a chunk size, returns the object size it can accommodate (not
1736   // including any extra remembered set words).  Used by
1737   // LargeObjectSpace::Available.  Note that this can overestimate the size
1738   // of object that will fit in a chunk---if the object requires extra
1739   // remembered set words (eg, for large fixed arrays), the actual object
1740   // size for the chunk will be smaller than reported by this function.
ObjectSizeFor(int chunk_size)1741   static int ObjectSizeFor(int chunk_size) {
1742     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1743     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1744   }
1745 
1746  private:
1747   // A pointer to the next large object chunk in the space or NULL.
1748   LargeObjectChunk* next_;
1749 
1750   // The size of this chunk.
1751   size_t size_;
1752 
1753  public:
1754   TRACK_MEMORY("LargeObjectChunk")
1755 };
1756 
1757 
1758 class LargeObjectSpace : public Space {
1759  public:
1760   explicit LargeObjectSpace(AllocationSpace id);
~LargeObjectSpace()1761   virtual ~LargeObjectSpace() {}
1762 
1763   // Initializes internal data structures.
1764   bool Setup();
1765 
1766   // Releases internal resources, frees objects in this space.
1767   void TearDown();
1768 
1769   // Allocates a (non-FixedArray, non-Code) large object.
1770   Object* AllocateRaw(int size_in_bytes);
1771   // Allocates a large Code object.
1772   Object* AllocateRawCode(int size_in_bytes);
1773   // Allocates a large FixedArray.
1774   Object* AllocateRawFixedArray(int size_in_bytes);
1775 
1776   // Available bytes for objects in this space, not including any extra
1777   // remembered set words.
Available()1778   int Available() {
1779     return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1780   }
1781 
Size()1782   virtual int Size() {
1783     return size_;
1784   }
1785 
PageCount()1786   int PageCount() {
1787     return page_count_;
1788   }
1789 
1790   // Finds an object for a given address, returns Failure::Exception()
1791   // if it is not found. The function iterates through all objects in this
1792   // space, may be slow.
1793   Object* FindObject(Address a);
1794 
1795   // Clears remembered sets.
1796   void ClearRSet();
1797 
1798   // Iterates objects whose remembered set bits are set.
1799   void IterateRSet(ObjectSlotCallback func);
1800 
1801   // Frees unmarked objects.
1802   void FreeUnmarkedObjects();
1803 
1804   // Checks whether a heap object is in this space; O(1).
1805   bool Contains(HeapObject* obj);
1806 
1807   // Checks whether the space is empty.
IsEmpty()1808   bool IsEmpty() { return first_chunk_ == NULL; }
1809 
1810 #ifdef ENABLE_HEAP_PROTECTION
1811   // Protect/unprotect the space by marking it read-only/writable.
1812   void Protect();
1813   void Unprotect();
1814 #endif
1815 
1816 #ifdef DEBUG
1817   virtual void Verify();
1818   virtual void Print();
1819   void ReportStatistics();
1820   void CollectCodeStatistics();
1821   // Dump the remembered sets in the space to stdout.
1822   void PrintRSet();
1823 #endif
1824   // Checks whether an address is in the object area in this space.  It
1825   // iterates all objects in the space. May be slow.
SlowContains(Address addr)1826   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
1827 
1828  private:
1829   // The head of the linked list of large object chunks.
1830   LargeObjectChunk* first_chunk_;
1831   int size_;  // allocated bytes
1832   int page_count_;  // number of chunks
1833 
1834 
1835   // Shared implementation of AllocateRaw, AllocateRawCode and
1836   // AllocateRawFixedArray.
1837   Object* AllocateRawInternal(int requested_size,
1838                               int object_size,
1839                               Executability executable);
1840 
1841   // Returns the number of extra bytes (rounded up to the nearest full word)
1842   // required for extra_object_bytes of extra pointers (in bytes).
1843   static inline int ExtraRSetBytesFor(int extra_object_bytes);
1844 
1845   friend class LargeObjectIterator;
1846 
1847  public:
1848   TRACK_MEMORY("LargeObjectSpace")
1849 };
1850 
1851 
1852 class LargeObjectIterator: public ObjectIterator {
1853  public:
1854   explicit LargeObjectIterator(LargeObjectSpace* space);
1855   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
1856 
has_next()1857   bool has_next() { return current_ != NULL; }
1858   HeapObject* next();
1859 
1860   // implementation of ObjectIterator.
has_next_object()1861   virtual bool has_next_object() { return has_next(); }
next_object()1862   virtual HeapObject* next_object() { return next(); }
1863 
1864  private:
1865   LargeObjectChunk* current_;
1866   HeapObjectCallback size_func_;
1867 };
1868 
1869 
1870 } }  // namespace v8::internal
1871 
1872 #endif  // V8_SPACES_H_
1873