• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "allocation.h"
32 #include "hashmap.h"
33 #include "list.h"
34 #include "log.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 class Isolate;
40 
41 // -----------------------------------------------------------------------------
42 // Heap structures:
43 //
44 // A JS heap consists of a young generation, an old generation, and a large
45 // object space. The young generation is divided into two semispaces. A
46 // scavenger implements Cheney's copying algorithm. The old generation is
47 // separated into a map space and an old object space. The map space contains
48 // all (and only) map objects, the rest of old objects go into the old space.
49 // The old generation is collected by a mark-sweep-compact collector.
50 //
51 // The semispaces of the young generation are contiguous.  The old and map
52 // spaces consists of a list of pages. A page has a page header and an object
53 // area.
54 //
55 // There is a separate large object space for objects larger than
56 // Page::kMaxHeapObjectSize, so that they do not have to move during
57 // collection. The large object space is paged. Pages in large object space
58 // may be larger than the page size.
59 //
60 // A store-buffer based write barrier is used to keep track of intergenerational
61 // references.  See store-buffer.h.
62 //
63 // During scavenges and mark-sweep collections we sometimes (after a store
64 // buffer overflow) iterate intergenerational pointers without decoding heap
65 // object maps so if the page belongs to old pointer space or large object
66 // space it is essential to guarantee that the page does not contain any
67 // garbage pointers to new space: every pointer aligned word which satisfies
68 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
69 // new space. Thus objects in old pointer and large object spaces should have a
70 // special layout (e.g. no bare integer fields). This requirement does not
71 // apply to map space which is iterated in a special fashion. However we still
72 // require pointer fields of dead maps to be cleaned.
73 //
74 // To enable lazy cleaning of old space pages we can mark chunks of the page
75 // as being garbage.  Garbage sections are marked with a special map.  These
76 // sections are skipped when scanning the page, even if we are otherwise
77 // scanning without regard for object boundaries.  Garbage sections are chained
78 // together to form a free list after a GC.  Garbage sections created outside
79 // of GCs by object trunctation etc. may not be in the free list chain.  Very
80 // small free spaces are ignored, they need only be cleaned of bogus pointers
81 // into new space.
82 //
83 // Each page may have up to one special garbage section.  The start of this
84 // section is denoted by the top field in the space.  The end of the section
85 // is denoted by the limit field in the space.  This special garbage section
86 // is not marked with a free space map in the data.  The point of this section
87 // is to enable linear allocation without having to constantly update the byte
88 // array every time the top field is updated and a new object is created.  The
89 // special garbage section is not in the chain of garbage sections.
90 //
91 // Since the top and limit fields are in the space, not the page, only one page
92 // has a special garbage section, and if the top and limit are equal then there
93 // is no special garbage section.
94 
95 // Some assertion macros used in the debugging mode.
96 
97 #define ASSERT_PAGE_ALIGNED(address)                                           \
98   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
99 
100 #define ASSERT_OBJECT_ALIGNED(address)                                         \
101   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
102 
103 #define ASSERT_MAP_ALIGNED(address)                                            \
104   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
105 
106 #define ASSERT_OBJECT_SIZE(size)                                               \
107   ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
108 
109 #define ASSERT_PAGE_OFFSET(offset)                                             \
110   ASSERT((Page::kObjectStartOffset <= offset)                                  \
111       && (offset <= Page::kPageSize))
112 
113 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
114   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
115 
116 
117 class PagedSpace;
118 class MemoryAllocator;
119 class AllocationInfo;
120 class Space;
121 class FreeList;
122 class MemoryChunk;
123 
124 class MarkBit {
125  public:
126   typedef uint32_t CellType;
127 
MarkBit(CellType * cell,CellType mask,bool data_only)128   inline MarkBit(CellType* cell, CellType mask, bool data_only)
129       : cell_(cell), mask_(mask), data_only_(data_only) { }
130 
cell()131   inline CellType* cell() { return cell_; }
mask()132   inline CellType mask() { return mask_; }
133 
134 #ifdef DEBUG
135   bool operator==(const MarkBit& other) {
136     return cell_ == other.cell_ && mask_ == other.mask_;
137   }
138 #endif
139 
Set()140   inline void Set() { *cell_ |= mask_; }
Get()141   inline bool Get() { return (*cell_ & mask_) != 0; }
Clear()142   inline void Clear() { *cell_ &= ~mask_; }
143 
data_only()144   inline bool data_only() { return data_only_; }
145 
Next()146   inline MarkBit Next() {
147     CellType new_mask = mask_ << 1;
148     if (new_mask == 0) {
149       return MarkBit(cell_ + 1, 1, data_only_);
150     } else {
151       return MarkBit(cell_, new_mask, data_only_);
152     }
153   }
154 
155  private:
156   CellType* cell_;
157   CellType mask_;
158   // This boolean indicates that the object is in a data-only space with no
159   // pointers.  This enables some optimizations when marking.
160   // It is expected that this field is inlined and turned into control flow
161   // at the place where the MarkBit object is created.
162   bool data_only_;
163 };
164 
165 
166 // Bitmap is a sequence of cells each containing fixed number of bits.
167 class Bitmap {
168  public:
169   static const uint32_t kBitsPerCell = 32;
170   static const uint32_t kBitsPerCellLog2 = 5;
171   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
172   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
173   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
174 
175   static const size_t kLength =
176     (1 << kPageSizeBits) >> (kPointerSizeLog2);
177 
178   static const size_t kSize =
179     (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
180 
181 
CellsForLength(int length)182   static int CellsForLength(int length) {
183     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
184   }
185 
CellsCount()186   int CellsCount() {
187     return CellsForLength(kLength);
188   }
189 
SizeFor(int cells_count)190   static int SizeFor(int cells_count) {
191     return sizeof(MarkBit::CellType) * cells_count;
192   }
193 
INLINE(static uint32_t IndexToCell (uint32_t index))194   INLINE(static uint32_t IndexToCell(uint32_t index)) {
195     return index >> kBitsPerCellLog2;
196   }
197 
INLINE(static uint32_t CellToIndex (uint32_t index))198   INLINE(static uint32_t CellToIndex(uint32_t index)) {
199     return index << kBitsPerCellLog2;
200   }
201 
INLINE(static uint32_t CellAlignIndex (uint32_t index))202   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
203     return (index + kBitIndexMask) & ~kBitIndexMask;
204   }
205 
INLINE(MarkBit::CellType * cells ())206   INLINE(MarkBit::CellType* cells()) {
207     return reinterpret_cast<MarkBit::CellType*>(this);
208   }
209 
INLINE(Address address ())210   INLINE(Address address()) {
211     return reinterpret_cast<Address>(this);
212   }
213 
INLINE(static Bitmap * FromAddress (Address addr))214   INLINE(static Bitmap* FromAddress(Address addr)) {
215     return reinterpret_cast<Bitmap*>(addr);
216   }
217 
218   inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
219     MarkBit::CellType mask = 1 << (index & kBitIndexMask);
220     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
221     return MarkBit(cell, mask, data_only);
222   }
223 
224   static inline void Clear(MemoryChunk* chunk);
225 
226   static void PrintWord(uint32_t word, uint32_t himask = 0) {
227     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
228       if ((mask & himask) != 0) PrintF("[");
229       PrintF((mask & word) ? "1" : "0");
230       if ((mask & himask) != 0) PrintF("]");
231     }
232   }
233 
234   class CellPrinter {
235    public:
CellPrinter()236     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
237 
Print(uint32_t pos,uint32_t cell)238     void Print(uint32_t pos, uint32_t cell) {
239       if (cell == seq_type) {
240         seq_length++;
241         return;
242       }
243 
244       Flush();
245 
246       if (IsSeq(cell)) {
247         seq_start = pos;
248         seq_length = 0;
249         seq_type = cell;
250         return;
251       }
252 
253       PrintF("%d: ", pos);
254       PrintWord(cell);
255       PrintF("\n");
256     }
257 
Flush()258     void Flush() {
259       if (seq_length > 0) {
260         PrintF("%d: %dx%d\n",
261                seq_start,
262                seq_type == 0 ? 0 : 1,
263                seq_length * kBitsPerCell);
264         seq_length = 0;
265       }
266     }
267 
IsSeq(uint32_t cell)268     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
269 
270    private:
271     uint32_t seq_start;
272     uint32_t seq_type;
273     uint32_t seq_length;
274   };
275 
Print()276   void Print() {
277     CellPrinter printer;
278     for (int i = 0; i < CellsCount(); i++) {
279       printer.Print(i, cells()[i]);
280     }
281     printer.Flush();
282     PrintF("\n");
283   }
284 
IsClean()285   bool IsClean() {
286     for (int i = 0; i < CellsCount(); i++) {
287       if (cells()[i] != 0) return false;
288     }
289     return true;
290   }
291 };
292 
293 
294 class SkipList;
295 class SlotsBuffer;
296 
297 // MemoryChunk represents a memory region owned by a specific space.
298 // It is divided into the header and the body. Chunk start is always
299 // 1MB aligned. Start of the body is aligned so it can accommodate
300 // any heap object.
301 class MemoryChunk {
302  public:
303   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)304   static MemoryChunk* FromAddress(Address a) {
305     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
306   }
307 
308   // Only works for addresses in pointer spaces, not data or code spaces.
309   static inline MemoryChunk* FromAnyPointerAddress(Address addr);
310 
address()311   Address address() { return reinterpret_cast<Address>(this); }
312 
is_valid()313   bool is_valid() { return address() != NULL; }
314 
next_chunk()315   MemoryChunk* next_chunk() const { return next_chunk_; }
prev_chunk()316   MemoryChunk* prev_chunk() const { return prev_chunk_; }
317 
set_next_chunk(MemoryChunk * next)318   void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
set_prev_chunk(MemoryChunk * prev)319   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
320 
owner()321   Space* owner() const {
322     if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
323         kFailureTag) {
324       return reinterpret_cast<Space*>(owner_ - kFailureTag);
325     } else {
326       return NULL;
327     }
328   }
329 
set_owner(Space * space)330   void set_owner(Space* space) {
331     ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
332     owner_ = reinterpret_cast<Address>(space) + kFailureTag;
333     ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
334            kFailureTag);
335   }
336 
reserved_memory()337   VirtualMemory* reserved_memory() {
338     return &reservation_;
339   }
340 
InitializeReservedMemory()341   void InitializeReservedMemory() {
342     reservation_.Reset();
343   }
344 
set_reserved_memory(VirtualMemory * reservation)345   void set_reserved_memory(VirtualMemory* reservation) {
346     ASSERT_NOT_NULL(reservation);
347     reservation_.TakeControl(reservation);
348   }
349 
scan_on_scavenge()350   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
initialize_scan_on_scavenge(bool scan)351   void initialize_scan_on_scavenge(bool scan) {
352     if (scan) {
353       SetFlag(SCAN_ON_SCAVENGE);
354     } else {
355       ClearFlag(SCAN_ON_SCAVENGE);
356     }
357   }
358   inline void set_scan_on_scavenge(bool scan);
359 
store_buffer_counter()360   int store_buffer_counter() { return store_buffer_counter_; }
set_store_buffer_counter(int counter)361   void set_store_buffer_counter(int counter) {
362     store_buffer_counter_ = counter;
363   }
364 
Contains(Address addr)365   bool Contains(Address addr) {
366     return addr >= area_start() && addr < area_end();
367   }
368 
369   // Checks whether addr can be a limit of addresses in this page.
370   // It's a limit if it's in the page, or if it's just after the
371   // last byte of the page.
ContainsLimit(Address addr)372   bool ContainsLimit(Address addr) {
373     return addr >= area_start() && addr <= area_end();
374   }
375 
376   enum MemoryChunkFlags {
377     IS_EXECUTABLE,
378     ABOUT_TO_BE_FREED,
379     POINTERS_TO_HERE_ARE_INTERESTING,
380     POINTERS_FROM_HERE_ARE_INTERESTING,
381     SCAN_ON_SCAVENGE,
382     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
383     IN_TO_SPACE,    // All pages in new space has one of these two set.
384     NEW_SPACE_BELOW_AGE_MARK,
385     CONTAINS_ONLY_DATA,
386     EVACUATION_CANDIDATE,
387     RESCAN_ON_EVACUATION,
388 
389     // Pages swept precisely can be iterated, hitting only the live objects.
390     // Whereas those swept conservatively cannot be iterated over. Both flags
391     // indicate that marking bits have been cleared by the sweeper, otherwise
392     // marking bits are still intact.
393     WAS_SWEPT_PRECISELY,
394     WAS_SWEPT_CONSERVATIVELY,
395 
396     // Last flag, keep at bottom.
397     NUM_MEMORY_CHUNK_FLAGS
398   };
399 
400 
401   static const int kPointersToHereAreInterestingMask =
402       1 << POINTERS_TO_HERE_ARE_INTERESTING;
403 
404   static const int kPointersFromHereAreInterestingMask =
405       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
406 
407   static const int kEvacuationCandidateMask =
408       1 << EVACUATION_CANDIDATE;
409 
410   static const int kSkipEvacuationSlotsRecordingMask =
411       (1 << EVACUATION_CANDIDATE) |
412       (1 << RESCAN_ON_EVACUATION) |
413       (1 << IN_FROM_SPACE) |
414       (1 << IN_TO_SPACE);
415 
416 
SetFlag(int flag)417   void SetFlag(int flag) {
418     flags_ |= static_cast<uintptr_t>(1) << flag;
419   }
420 
ClearFlag(int flag)421   void ClearFlag(int flag) {
422     flags_ &= ~(static_cast<uintptr_t>(1) << flag);
423   }
424 
SetFlagTo(int flag,bool value)425   void SetFlagTo(int flag, bool value) {
426     if (value) {
427       SetFlag(flag);
428     } else {
429       ClearFlag(flag);
430     }
431   }
432 
IsFlagSet(int flag)433   bool IsFlagSet(int flag) {
434     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
435   }
436 
437   // Set or clear multiple flags at a time. The flags in the mask
438   // are set to the value in "flags", the rest retain the current value
439   // in flags_.
SetFlags(intptr_t flags,intptr_t mask)440   void SetFlags(intptr_t flags, intptr_t mask) {
441     flags_ = (flags_ & ~mask) | (flags & mask);
442   }
443 
444   // Return all current flags.
GetFlags()445   intptr_t GetFlags() { return flags_; }
446 
447   // Manage live byte count (count of bytes known to be live,
448   // because they are marked black).
ResetLiveBytes()449   void ResetLiveBytes() {
450     if (FLAG_gc_verbose) {
451       PrintF("ResetLiveBytes:%p:%x->0\n",
452              static_cast<void*>(this), live_byte_count_);
453     }
454     live_byte_count_ = 0;
455   }
IncrementLiveBytes(int by)456   void IncrementLiveBytes(int by) {
457     if (FLAG_gc_verbose) {
458       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
459              static_cast<void*>(this), live_byte_count_,
460              ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
461              live_byte_count_ + by);
462     }
463     live_byte_count_ += by;
464     ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
465   }
LiveBytes()466   int LiveBytes() {
467     ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
468     return live_byte_count_;
469   }
470 
IncrementLiveBytesFromGC(Address address,int by)471   static void IncrementLiveBytesFromGC(Address address, int by) {
472     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
473   }
474 
475   static void IncrementLiveBytesFromMutator(Address address, int by);
476 
477   static const intptr_t kAlignment =
478       (static_cast<uintptr_t>(1) << kPageSizeBits);
479 
480   static const intptr_t kAlignmentMask = kAlignment - 1;
481 
482   static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
483 
484   static const intptr_t kLiveBytesOffset =
485      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
486      kPointerSize + kPointerSize +
487      kPointerSize + kPointerSize + kPointerSize + kIntSize;
488 
489   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
490 
491   static const size_t kHeaderSize =
492       kSlotsBufferOffset + kPointerSize + kPointerSize;
493 
494   static const int kBodyOffset =
495     CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
496 
497   // The start offset of the object area in a page. Aligned to both maps and
498   // code alignment to be suitable for both.  Also aligned to 32 words because
499   // the marking bitmap is arranged in 32 bit chunks.
500   static const int kObjectStartAlignment = 32 * kPointerSize;
501   static const int kObjectStartOffset = kBodyOffset - 1 +
502       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
503 
size()504   size_t size() const { return size_; }
505 
set_size(size_t size)506   void set_size(size_t size) {
507     size_ = size;
508   }
509 
SetArea(Address area_start,Address area_end)510   void SetArea(Address area_start, Address area_end) {
511     area_start_ = area_start;
512     area_end_ = area_end;
513   }
514 
executable()515   Executability executable() {
516     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
517   }
518 
ContainsOnlyData()519   bool ContainsOnlyData() {
520     return IsFlagSet(CONTAINS_ONLY_DATA);
521   }
522 
InNewSpace()523   bool InNewSpace() {
524     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
525   }
526 
InToSpace()527   bool InToSpace() {
528     return IsFlagSet(IN_TO_SPACE);
529   }
530 
InFromSpace()531   bool InFromSpace() {
532     return IsFlagSet(IN_FROM_SPACE);
533   }
534 
535   // ---------------------------------------------------------------------
536   // Markbits support
537 
markbits()538   inline Bitmap* markbits() {
539     return Bitmap::FromAddress(address() + kHeaderSize);
540   }
541 
PrintMarkbits()542   void PrintMarkbits() { markbits()->Print(); }
543 
AddressToMarkbitIndex(Address addr)544   inline uint32_t AddressToMarkbitIndex(Address addr) {
545     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
546   }
547 
FastAddressToMarkbitIndex(Address addr)548   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
549     const intptr_t offset =
550         reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
551 
552     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
553   }
554 
MarkbitIndexToAddress(uint32_t index)555   inline Address MarkbitIndexToAddress(uint32_t index) {
556     return this->address() + (index << kPointerSizeLog2);
557   }
558 
559   void InsertAfter(MemoryChunk* other);
560   void Unlink();
561 
heap()562   inline Heap* heap() { return heap_; }
563 
564   static const int kFlagsOffset = kPointerSize * 3;
565 
IsEvacuationCandidate()566   bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
567 
ShouldSkipEvacuationSlotRecording()568   bool ShouldSkipEvacuationSlotRecording() {
569     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
570   }
571 
skip_list()572   inline SkipList* skip_list() {
573     return skip_list_;
574   }
575 
set_skip_list(SkipList * skip_list)576   inline void set_skip_list(SkipList* skip_list) {
577     skip_list_ = skip_list;
578   }
579 
slots_buffer()580   inline SlotsBuffer* slots_buffer() {
581     return slots_buffer_;
582   }
583 
slots_buffer_address()584   inline SlotsBuffer** slots_buffer_address() {
585     return &slots_buffer_;
586   }
587 
MarkEvacuationCandidate()588   void MarkEvacuationCandidate() {
589     ASSERT(slots_buffer_ == NULL);
590     SetFlag(EVACUATION_CANDIDATE);
591   }
592 
ClearEvacuationCandidate()593   void ClearEvacuationCandidate() {
594     ASSERT(slots_buffer_ == NULL);
595     ClearFlag(EVACUATION_CANDIDATE);
596   }
597 
area_start()598   Address area_start() { return area_start_; }
area_end()599   Address area_end() { return area_end_; }
area_size()600   int area_size() {
601     return static_cast<int>(area_end() - area_start());
602   }
603 
604  protected:
605   MemoryChunk* next_chunk_;
606   MemoryChunk* prev_chunk_;
607   size_t size_;
608   intptr_t flags_;
609 
610   // Start and end of allocatable memory on this chunk.
611   Address area_start_;
612   Address area_end_;
613 
614   // If the chunk needs to remember its memory reservation, it is stored here.
615   VirtualMemory reservation_;
616   // The identity of the owning space.  This is tagged as a failure pointer, but
617   // no failure can be in an object, so this can be distinguished from any entry
618   // in a fixed array.
619   Address owner_;
620   Heap* heap_;
621   // Used by the store buffer to keep track of which pages to mark scan-on-
622   // scavenge.
623   int store_buffer_counter_;
624   // Count of bytes marked black on page.
625   int live_byte_count_;
626   SlotsBuffer* slots_buffer_;
627   SkipList* skip_list_;
628 
629   static MemoryChunk* Initialize(Heap* heap,
630                                  Address base,
631                                  size_t size,
632                                  Address area_start,
633                                  Address area_end,
634                                  Executability executable,
635                                  Space* owner);
636 
637   friend class MemoryAllocator;
638 };
639 
640 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
641 
642 // -----------------------------------------------------------------------------
643 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
644 //
645 // The only way to get a page pointer is by calling factory methods:
646 //   Page* p = Page::FromAddress(addr); or
647 //   Page* p = Page::FromAllocationTop(top);
648 class Page : public MemoryChunk {
649  public:
650   // Returns the page containing a given address. The address ranges
651   // from [page_addr .. page_addr + kPageSize[
652   // This only works if the object is in fact in a page.  See also MemoryChunk::
653   // FromAddress() and FromAnyAddress().
INLINE(static Page * FromAddress (Address a))654   INLINE(static Page* FromAddress(Address a)) {
655     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
656   }
657 
658   // Returns the page containing an allocation top. Because an allocation
659   // top address can be the upper bound of the page, we need to subtract
660   // it with kPointerSize first. The address ranges from
661   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))662   INLINE(static Page* FromAllocationTop(Address top)) {
663     Page* p = FromAddress(top - kPointerSize);
664     return p;
665   }
666 
667   // Returns the next page in the chain of pages owned by a space.
668   inline Page* next_page();
669   inline Page* prev_page();
670   inline void set_next_page(Page* page);
671   inline void set_prev_page(Page* page);
672 
673   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)674   static bool IsAlignedToPageSize(Address a) {
675     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
676   }
677 
678   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))679   INLINE(int Offset(Address a)) {
680     int offset = static_cast<int>(a - address());
681     return offset;
682   }
683 
684   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)685   Address OffsetToAddress(int offset) {
686     ASSERT_PAGE_OFFSET(offset);
687     return address() + offset;
688   }
689 
690   // ---------------------------------------------------------------------
691 
692   // Page size in bytes.  This must be a multiple of the OS page size.
693   static const int kPageSize = 1 << kPageSizeBits;
694 
695   // Object area size in bytes.
696   static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
697 
698   // Maximum object size that fits in a page.
699   static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
700 
701   // Page size mask.
702   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
703 
704   inline void ClearGCFields();
705 
706   static inline Page* Initialize(Heap* heap,
707                                  MemoryChunk* chunk,
708                                  Executability executable,
709                                  PagedSpace* owner);
710 
711   void InitializeAsAnchor(PagedSpace* owner);
712 
WasSweptPrecisely()713   bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
WasSweptConservatively()714   bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
WasSwept()715   bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
716 
MarkSweptPrecisely()717   void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
MarkSweptConservatively()718   void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
719 
ClearSweptPrecisely()720   void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
ClearSweptConservatively()721   void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
722 
723 #ifdef DEBUG
724   void Print();
725 #endif  // DEBUG
726 
727   friend class MemoryAllocator;
728 };
729 
730 
731 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
732 
733 
734 class LargePage : public MemoryChunk {
735  public:
GetObject()736   HeapObject* GetObject() {
737     return HeapObject::FromAddress(area_start());
738   }
739 
next_page()740   inline LargePage* next_page() const {
741     return static_cast<LargePage*>(next_chunk());
742   }
743 
set_next_page(LargePage * page)744   inline void set_next_page(LargePage* page) {
745     set_next_chunk(page);
746   }
747  private:
748   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
749 
750   friend class MemoryAllocator;
751 };
752 
753 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
754 
755 // ----------------------------------------------------------------------------
756 // Space is the abstract superclass for all allocation spaces.
757 class Space : public Malloced {
758  public:
Space(Heap * heap,AllocationSpace id,Executability executable)759   Space(Heap* heap, AllocationSpace id, Executability executable)
760       : heap_(heap), id_(id), executable_(executable) {}
761 
~Space()762   virtual ~Space() {}
763 
heap()764   Heap* heap() const { return heap_; }
765 
766   // Does the space need executable memory?
executable()767   Executability executable() { return executable_; }
768 
769   // Identity used in error reporting.
identity()770   AllocationSpace identity() { return id_; }
771 
772   // Returns allocated size.
773   virtual intptr_t Size() = 0;
774 
775   // Returns size of objects. Can differ from the allocated size
776   // (e.g. see LargeObjectSpace).
SizeOfObjects()777   virtual intptr_t SizeOfObjects() { return Size(); }
778 
RoundSizeDownToObjectAlignment(int size)779   virtual int RoundSizeDownToObjectAlignment(int size) {
780     if (id_ == CODE_SPACE) {
781       return RoundDown(size, kCodeAlignment);
782     } else {
783       return RoundDown(size, kPointerSize);
784     }
785   }
786 
787 #ifdef DEBUG
788   virtual void Print() = 0;
789 #endif
790 
791   // After calling this we can allocate a certain number of bytes using only
792   // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
793   // without using freelists or causing a GC.  This is used by partial
794   // snapshots.  It returns true of space was reserved or false if a GC is
795   // needed.  For paged spaces the space requested must include the space wasted
796   // at the end of each when allocating linearly.
797   virtual bool ReserveSpace(int bytes) = 0;
798 
799  private:
800   Heap* heap_;
801   AllocationSpace id_;
802   Executability executable_;
803 };
804 
805 
806 // ----------------------------------------------------------------------------
807 // All heap objects containing executable code (code objects) must be allocated
808 // from a 2 GB range of memory, so that they can call each other using 32-bit
809 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
810 // displacements cover the entire 4GB virtual address space.  On 64-bit
811 // platforms, we support this using the CodeRange object, which reserves and
812 // manages a range of virtual memory.
813 class CodeRange {
814  public:
815   explicit CodeRange(Isolate* isolate);
~CodeRange()816   ~CodeRange() { TearDown(); }
817 
818   // Reserves a range of virtual memory, but does not commit any of it.
819   // Can only be called once, at heap initialization time.
820   // Returns false on failure.
821   bool SetUp(const size_t requested_size);
822 
823   // Frees the range of virtual memory, and frees the data structures used to
824   // manage it.
825   void TearDown();
826 
exists()827   bool exists() { return this != NULL && code_range_ != NULL; }
contains(Address address)828   bool contains(Address address) {
829     if (this == NULL || code_range_ == NULL) return false;
830     Address start = static_cast<Address>(code_range_->address());
831     return start <= address && address < start + code_range_->size();
832   }
833 
834   // Allocates a chunk of memory from the large-object portion of
835   // the code range.  On platforms with no separate code range, should
836   // not be called.
837   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
838                                             size_t* allocated);
839   void FreeRawMemory(Address buf, size_t length);
840 
841  private:
842   Isolate* isolate_;
843 
844   // The reserved range of virtual memory that all code objects are put in.
845   VirtualMemory* code_range_;
846   // Plain old data class, just a struct plus a constructor.
847   class FreeBlock {
848    public:
FreeBlock(Address start_arg,size_t size_arg)849     FreeBlock(Address start_arg, size_t size_arg)
850         : start(start_arg), size(size_arg) {
851       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
852       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
853     }
FreeBlock(void * start_arg,size_t size_arg)854     FreeBlock(void* start_arg, size_t size_arg)
855         : start(static_cast<Address>(start_arg)), size(size_arg) {
856       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
857       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
858     }
859 
860     Address start;
861     size_t size;
862   };
863 
864   // Freed blocks of memory are added to the free list.  When the allocation
865   // list is exhausted, the free list is sorted and merged to make the new
866   // allocation list.
867   List<FreeBlock> free_list_;
868   // Memory is allocated from the free blocks on the allocation list.
869   // The block at current_allocation_block_index_ is the current block.
870   List<FreeBlock> allocation_list_;
871   int current_allocation_block_index_;
872 
873   // Finds a block on the allocation list that contains at least the
874   // requested amount of memory.  If none is found, sorts and merges
875   // the existing free memory blocks, and searches again.
876   // If none can be found, terminates V8 with FatalProcessOutOfMemory.
877   void GetNextAllocationBlock(size_t requested);
878   // Compares the start addresses of two free blocks.
879   static int CompareFreeBlockAddress(const FreeBlock* left,
880                                      const FreeBlock* right);
881 
882   DISALLOW_COPY_AND_ASSIGN(CodeRange);
883 };
884 
885 
886 class SkipList {
887  public:
SkipList()888   SkipList() {
889     Clear();
890   }
891 
Clear()892   void Clear() {
893     for (int idx = 0; idx < kSize; idx++) {
894       starts_[idx] = reinterpret_cast<Address>(-1);
895     }
896   }
897 
StartFor(Address addr)898   Address StartFor(Address addr) {
899     return starts_[RegionNumber(addr)];
900   }
901 
AddObject(Address addr,int size)902   void AddObject(Address addr, int size) {
903     int start_region = RegionNumber(addr);
904     int end_region = RegionNumber(addr + size - kPointerSize);
905     for (int idx = start_region; idx <= end_region; idx++) {
906       if (starts_[idx] > addr) starts_[idx] = addr;
907     }
908   }
909 
RegionNumber(Address addr)910   static inline int RegionNumber(Address addr) {
911     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
912   }
913 
Update(Address addr,int size)914   static void Update(Address addr, int size) {
915     Page* page = Page::FromAddress(addr);
916     SkipList* list = page->skip_list();
917     if (list == NULL) {
918       list = new SkipList();
919       page->set_skip_list(list);
920     }
921 
922     list->AddObject(addr, size);
923   }
924 
925  private:
926   static const int kRegionSizeLog2 = 13;
927   static const int kRegionSize = 1 << kRegionSizeLog2;
928   static const int kSize = Page::kPageSize / kRegionSize;
929 
930   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
931 
932   Address starts_[kSize];
933 };
934 
935 
936 // ----------------------------------------------------------------------------
937 // A space acquires chunks of memory from the operating system. The memory
938 // allocator allocated and deallocates pages for the paged heap spaces and large
939 // pages for large object space.
940 //
941 // Each space has to manage it's own pages.
942 //
943 class MemoryAllocator {
944  public:
945   explicit MemoryAllocator(Isolate* isolate);
946 
947   // Initializes its internal bookkeeping structures.
948   // Max capacity of the total space and executable memory limit.
949   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
950 
951   void TearDown();
952 
953   Page* AllocatePage(PagedSpace* owner, Executability executable);
954 
955   LargePage* AllocateLargePage(intptr_t object_size,
956                                       Executability executable,
957                                       Space* owner);
958 
959   void Free(MemoryChunk* chunk);
960 
961   // Returns the maximum available bytes of heaps.
Available()962   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
963 
964   // Returns allocated spaces in bytes.
Size()965   intptr_t Size() { return size_; }
966 
967   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()968   intptr_t AvailableExecutable() {
969     if (capacity_executable_ < size_executable_) return 0;
970     return capacity_executable_ - size_executable_;
971   }
972 
973   // Returns allocated executable spaces in bytes.
SizeExecutable()974   intptr_t SizeExecutable() { return size_executable_; }
975 
976   // Returns maximum available bytes that the old space can have.
MaxAvailable()977   intptr_t MaxAvailable() {
978     return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
979   }
980 
981 #ifdef DEBUG
982   // Reports statistic info of the space.
983   void ReportStatistics();
984 #endif
985 
986   MemoryChunk* AllocateChunk(intptr_t body_size,
987                              Executability executable,
988                              Space* space);
989 
990   Address ReserveAlignedMemory(size_t requested,
991                                size_t alignment,
992                                VirtualMemory* controller);
993   Address AllocateAlignedMemory(size_t requested,
994                                 size_t alignment,
995                                 Executability executable,
996                                 VirtualMemory* controller);
997 
998   void FreeMemory(VirtualMemory* reservation, Executability executable);
999   void FreeMemory(Address addr, size_t size, Executability executable);
1000 
1001   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1002   // the address is not NULL, the size is greater than zero, and that the
1003   // block is contained in the initial chunk.  Returns true if it succeeded
1004   // and false otherwise.
1005   bool CommitBlock(Address start, size_t size, Executability executable);
1006 
1007   // Uncommit a contiguous block of memory [start..(start+size)[.
1008   // start is not NULL, the size is greater than zero, and the
1009   // block is contained in the initial chunk.  Returns true if it succeeded
1010   // and false otherwise.
1011   bool UncommitBlock(Address start, size_t size);
1012 
1013   // Zaps a contiguous block of memory [start..(start+size)[ thus
1014   // filling it up with a recognizable non-NULL bit pattern.
1015   void ZapBlock(Address start, size_t size);
1016 
1017   void PerformAllocationCallback(ObjectSpace space,
1018                                  AllocationAction action,
1019                                  size_t size);
1020 
1021   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1022                                           ObjectSpace space,
1023                                           AllocationAction action);
1024 
1025   void RemoveMemoryAllocationCallback(
1026       MemoryAllocationCallback callback);
1027 
1028   bool MemoryAllocationCallbackRegistered(
1029       MemoryAllocationCallback callback);
1030 
1031   static int CodePageGuardStartOffset();
1032 
1033   static int CodePageGuardSize();
1034 
1035   static int CodePageAreaStartOffset();
1036 
1037   static int CodePageAreaEndOffset();
1038 
CodePageAreaSize()1039   static int CodePageAreaSize() {
1040     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1041   }
1042 
1043   MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
1044                                              Address start,
1045                                              size_t size);
1046 
1047  private:
1048   Isolate* isolate_;
1049 
1050   // Maximum space size in bytes.
1051   size_t capacity_;
1052   // Maximum subset of capacity_ that can be executable
1053   size_t capacity_executable_;
1054 
1055   // Allocated space size in bytes.
1056   size_t size_;
1057   // Allocated executable space size in bytes.
1058   size_t size_executable_;
1059 
1060   struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistrationMemoryAllocationCallbackRegistration1061     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1062                                          ObjectSpace space,
1063                                          AllocationAction action)
1064         : callback(callback), space(space), action(action) {
1065     }
1066     MemoryAllocationCallback callback;
1067     ObjectSpace space;
1068     AllocationAction action;
1069   };
1070 
1071   // A List of callback that are triggered when memory is allocated or free'd
1072   List<MemoryAllocationCallbackRegistration>
1073       memory_allocation_callbacks_;
1074 
1075   // Initializes pages in a chunk. Returns the first page address.
1076   // This function and GetChunkId() are provided for the mark-compact
1077   // collector to rebuild page headers in the from space, which is
1078   // used as a marking stack and its page headers are destroyed.
1079   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1080                                PagedSpace* owner);
1081 
1082   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1083 };
1084 
1085 
1086 // -----------------------------------------------------------------------------
1087 // Interface for heap object iterator to be implemented by all object space
1088 // object iterators.
1089 //
1090 // NOTE: The space specific object iterators also implements the own next()
1091 //       method which is used to avoid using virtual functions
1092 //       iterating a specific space.
1093 
1094 class ObjectIterator : public Malloced {
1095  public:
~ObjectIterator()1096   virtual ~ObjectIterator() { }
1097 
1098   virtual HeapObject* next_object() = 0;
1099 };
1100 
1101 
1102 // -----------------------------------------------------------------------------
1103 // Heap object iterator in new/old/map spaces.
1104 //
1105 // A HeapObjectIterator iterates objects from the bottom of the given space
1106 // to its top or from the bottom of the given page to its top.
1107 //
1108 // If objects are allocated in the page during iteration the iterator may
1109 // or may not iterate over those objects.  The caller must create a new
1110 // iterator in order to be sure to visit these new objects.
1111 class HeapObjectIterator: public ObjectIterator {
1112  public:
1113   // Creates a new object iterator in a given space.
1114   // If the size function is not given, the iterator calls the default
1115   // Object::Size().
1116   explicit HeapObjectIterator(PagedSpace* space);
1117   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1118   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1119 
1120   // Advance to the next object, skipping free spaces and other fillers and
1121   // skipping the special garbage section of which there is one per space.
1122   // Returns NULL when the iteration has ended.
Next()1123   inline HeapObject* Next() {
1124     do {
1125       HeapObject* next_obj = FromCurrentPage();
1126       if (next_obj != NULL) return next_obj;
1127     } while (AdvanceToNextPage());
1128     return NULL;
1129   }
1130 
next_object()1131   virtual HeapObject* next_object() {
1132     return Next();
1133   }
1134 
1135  private:
1136   enum PageMode { kOnePageOnly, kAllPagesInSpace };
1137 
1138   Address cur_addr_;  // Current iteration point.
1139   Address cur_end_;   // End iteration point.
1140   HeapObjectCallback size_func_;  // Size function or NULL.
1141   PagedSpace* space_;
1142   PageMode page_mode_;
1143 
1144   // Fast (inlined) path of next().
1145   inline HeapObject* FromCurrentPage();
1146 
1147   // Slow path of next(), goes into the next page.  Returns false if the
1148   // iteration has ended.
1149   bool AdvanceToNextPage();
1150 
1151   // Initializes fields.
1152   inline void Initialize(PagedSpace* owner,
1153                          Address start,
1154                          Address end,
1155                          PageMode mode,
1156                          HeapObjectCallback size_func);
1157 };
1158 
1159 
1160 // -----------------------------------------------------------------------------
1161 // A PageIterator iterates the pages in a paged space.
1162 
1163 class PageIterator BASE_EMBEDDED {
1164  public:
1165   explicit inline PageIterator(PagedSpace* space);
1166 
1167   inline bool has_next();
1168   inline Page* next();
1169 
1170  private:
1171   PagedSpace* space_;
1172   Page* prev_page_;  // Previous page returned.
1173   // Next page that will be returned.  Cached here so that we can use this
1174   // iterator for operations that deallocate pages.
1175   Page* next_page_;
1176 };
1177 
1178 
1179 // -----------------------------------------------------------------------------
1180 // A space has a circular list of pages. The next page can be accessed via
1181 // Page::next_page() call.
1182 
1183 // An abstraction of allocation and relocation pointers in a page-structured
1184 // space.
1185 class AllocationInfo {
1186  public:
AllocationInfo()1187   AllocationInfo() : top(NULL), limit(NULL) {
1188   }
1189 
1190   Address top;  // Current allocation top.
1191   Address limit;  // Current allocation limit.
1192 
1193 #ifdef DEBUG
VerifyPagedAllocation()1194   bool VerifyPagedAllocation() {
1195     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1196         && (top <= limit);
1197   }
1198 #endif
1199 };
1200 
1201 
1202 // An abstraction of the accounting statistics of a page-structured space.
1203 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1204 // including page bookkeeping structures) currently in the space. The 'size'
1205 // of a space is the number of allocated bytes, the 'waste' in the space is
1206 // the number of bytes that are not allocated and not available to
1207 // allocation without reorganizing the space via a GC (e.g. small blocks due
1208 // to internal fragmentation, top of page areas in map space), and the bytes
1209 // 'available' is the number of unallocated bytes that are not waste.  The
1210 // capacity is the sum of size, waste, and available.
1211 //
1212 // The stats are only set by functions that ensure they stay balanced. These
1213 // functions increase or decrease one of the non-capacity stats in
1214 // conjunction with capacity, or else they always balance increases and
1215 // decreases to the non-capacity stats.
1216 class AllocationStats BASE_EMBEDDED {
1217  public:
AllocationStats()1218   AllocationStats() { Clear(); }
1219 
1220   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1221   void Clear() {
1222     capacity_ = 0;
1223     size_ = 0;
1224     waste_ = 0;
1225   }
1226 
ClearSizeWaste()1227   void ClearSizeWaste() {
1228     size_ = capacity_;
1229     waste_ = 0;
1230   }
1231 
1232   // Reset the allocation statistics (i.e., available = capacity with no
1233   // wasted or allocated bytes).
Reset()1234   void Reset() {
1235     size_ = 0;
1236     waste_ = 0;
1237   }
1238 
1239   // Accessors for the allocation statistics.
Capacity()1240   intptr_t Capacity() { return capacity_; }
Size()1241   intptr_t Size() { return size_; }
Waste()1242   intptr_t Waste() { return waste_; }
1243 
1244   // Grow the space by adding available bytes.  They are initially marked as
1245   // being in use (part of the size), but will normally be immediately freed,
1246   // putting them on the free list and removing them from size_.
ExpandSpace(int size_in_bytes)1247   void ExpandSpace(int size_in_bytes) {
1248     capacity_ += size_in_bytes;
1249     size_ += size_in_bytes;
1250     ASSERT(size_ >= 0);
1251   }
1252 
1253   // Shrink the space by removing available bytes.  Since shrinking is done
1254   // during sweeping, bytes have been marked as being in use (part of the size)
1255   // and are hereby freed.
ShrinkSpace(int size_in_bytes)1256   void ShrinkSpace(int size_in_bytes) {
1257     capacity_ -= size_in_bytes;
1258     size_ -= size_in_bytes;
1259     ASSERT(size_ >= 0);
1260   }
1261 
1262   // Allocate from available bytes (available -> size).
AllocateBytes(intptr_t size_in_bytes)1263   void AllocateBytes(intptr_t size_in_bytes) {
1264     size_ += size_in_bytes;
1265     ASSERT(size_ >= 0);
1266   }
1267 
1268   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(intptr_t size_in_bytes)1269   void DeallocateBytes(intptr_t size_in_bytes) {
1270     size_ -= size_in_bytes;
1271     ASSERT(size_ >= 0);
1272   }
1273 
1274   // Waste free bytes (available -> waste).
WasteBytes(int size_in_bytes)1275   void WasteBytes(int size_in_bytes) {
1276     size_ -= size_in_bytes;
1277     waste_ += size_in_bytes;
1278     ASSERT(size_ >= 0);
1279   }
1280 
1281  private:
1282   intptr_t capacity_;
1283   intptr_t size_;
1284   intptr_t waste_;
1285 };
1286 
1287 
1288 // -----------------------------------------------------------------------------
1289 // Free lists for old object spaces
1290 //
1291 // Free-list nodes are free blocks in the heap.  They look like heap objects
1292 // (free-list node pointers have the heap object tag, and they have a map like
1293 // a heap object).  They have a size and a next pointer.  The next pointer is
1294 // the raw address of the next free list node (or NULL).
1295 class FreeListNode: public HeapObject {
1296  public:
1297   // Obtain a free-list node from a raw address.  This is not a cast because
1298   // it does not check nor require that the first word at the address is a map
1299   // pointer.
FromAddress(Address address)1300   static FreeListNode* FromAddress(Address address) {
1301     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1302   }
1303 
1304   static inline bool IsFreeListNode(HeapObject* object);
1305 
1306   // Set the size in bytes, which can be read with HeapObject::Size().  This
1307   // function also writes a map to the first word of the block so that it
1308   // looks like a heap object to the garbage collector and heap iteration
1309   // functions.
1310   void set_size(Heap* heap, int size_in_bytes);
1311 
1312   // Accessors for the next field.
1313   inline FreeListNode* next();
1314   inline FreeListNode** next_address();
1315   inline void set_next(FreeListNode* next);
1316 
1317   inline void Zap();
1318 
1319  private:
1320   static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1321 
1322   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1323 };
1324 
1325 
1326 // The free list for the old space.  The free list is organized in such a way
1327 // as to encourage objects allocated around the same time to be near each
1328 // other.  The normal way to allocate is intended to be by bumping a 'top'
1329 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1330 // find a new space to allocate from.  This is done with the free list, which
1331 // is divided up into rough categories to cut down on waste.  Having finer
1332 // categories would scatter allocation more.
1333 
1334 // The old space free list is organized in categories.
1335 // 1-31 words:  Such small free areas are discarded for efficiency reasons.
1336 //     They can be reclaimed by the compactor.  However the distance between top
1337 //     and limit may be this small.
1338 // 32-255 words: There is a list of spaces this large.  It is used for top and
1339 //     limit when the object we need to allocate is 1-31 words in size.  These
1340 //     spaces are called small.
1341 // 256-2047 words: There is a list of spaces this large.  It is used for top and
1342 //     limit when the object we need to allocate is 32-255 words in size.  These
1343 //     spaces are called medium.
1344 // 1048-16383 words: There is a list of spaces this large.  It is used for top
1345 //     and limit when the object we need to allocate is 256-2047 words in size.
1346 //     These spaces are call large.
1347 // At least 16384 words.  This list is for objects of 2048 words or larger.
1348 //     Empty pages are added to this list.  These spaces are called huge.
1349 class FreeList BASE_EMBEDDED {
1350  public:
1351   explicit FreeList(PagedSpace* owner);
1352 
1353   // Clear the free list.
1354   void Reset();
1355 
1356   // Return the number of bytes available on the free list.
available()1357   intptr_t available() { return available_; }
1358 
1359   // Place a node on the free list.  The block of size 'size_in_bytes'
1360   // starting at 'start' is placed on the free list.  The return value is the
1361   // number of bytes that have been lost due to internal fragmentation by
1362   // freeing the block.  Bookkeeping information will be written to the block,
1363   // i.e., its contents will be destroyed.  The start address should be word
1364   // aligned, and the size should be a non-zero multiple of the word size.
1365   int Free(Address start, int size_in_bytes);
1366 
1367   // Allocate a block of size 'size_in_bytes' from the free list.  The block
1368   // is unitialized.  A failure is returned if no block is available.  The
1369   // number of bytes lost to fragmentation is returned in the output parameter
1370   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1371   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1372 
1373 #ifdef DEBUG
1374   void Zap();
1375   static intptr_t SumFreeList(FreeListNode* node);
1376   static int FreeListLength(FreeListNode* cur);
1377   intptr_t SumFreeLists();
1378   bool IsVeryLong();
1379 #endif
1380 
1381   struct SizeStats {
TotalSizeStats1382     intptr_t Total() {
1383       return small_size_ + medium_size_ + large_size_ + huge_size_;
1384     }
1385 
1386     intptr_t small_size_;
1387     intptr_t medium_size_;
1388     intptr_t large_size_;
1389     intptr_t huge_size_;
1390   };
1391 
1392   void CountFreeListItems(Page* p, SizeStats* sizes);
1393 
1394   intptr_t EvictFreeListItems(Page* p);
1395 
1396  private:
1397   // The size range of blocks, in bytes.
1398   static const int kMinBlockSize = 3 * kPointerSize;
1399   static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1400 
1401   FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1402 
1403   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1404 
1405   PagedSpace* owner_;
1406   Heap* heap_;
1407 
1408   // Total available bytes in all blocks on this free list.
1409   int available_;
1410 
1411   static const int kSmallListMin = 0x20 * kPointerSize;
1412   static const int kSmallListMax = 0xff * kPointerSize;
1413   static const int kMediumListMax = 0x7ff * kPointerSize;
1414   static const int kLargeListMax = 0x3fff * kPointerSize;
1415   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1416   static const int kMediumAllocationMax = kSmallListMax;
1417   static const int kLargeAllocationMax = kMediumListMax;
1418   FreeListNode* small_list_;
1419   FreeListNode* medium_list_;
1420   FreeListNode* large_list_;
1421   FreeListNode* huge_list_;
1422 
1423   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1424 };
1425 
1426 
1427 class PagedSpace : public Space {
1428  public:
1429   // Creates a space with a maximum capacity, and an id.
1430   PagedSpace(Heap* heap,
1431              intptr_t max_capacity,
1432              AllocationSpace id,
1433              Executability executable);
1434 
~PagedSpace()1435   virtual ~PagedSpace() {}
1436 
1437   // Set up the space using the given address range of virtual memory (from
1438   // the memory allocator's initial chunk) if possible.  If the block of
1439   // addresses is not big enough to contain a single page-aligned page, a
1440   // fresh chunk will be allocated.
1441   bool SetUp();
1442 
1443   // Returns true if the space has been successfully set up and not
1444   // subsequently torn down.
1445   bool HasBeenSetUp();
1446 
1447   // Cleans up the space, frees all pages in this space except those belonging
1448   // to the initial chunk, uncommits addresses in the initial chunk.
1449   void TearDown();
1450 
1451   // Checks whether an object/address is in this space.
1452   inline bool Contains(Address a);
Contains(HeapObject * o)1453   bool Contains(HeapObject* o) { return Contains(o->address()); }
1454 
1455   // Given an address occupied by a live object, return that object if it is
1456   // in this space, or Failure::Exception() if it is not. The implementation
1457   // iterates over objects in the page containing the address, the cost is
1458   // linear in the number of objects in the page. It may be slow.
1459   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1460 
1461   // Prepares for a mark-compact GC.
1462   virtual void PrepareForMarkCompact();
1463 
1464   // Current capacity without growing (Size() + Available()).
Capacity()1465   intptr_t Capacity() { return accounting_stats_.Capacity(); }
1466 
1467   // Total amount of memory committed for this space.  For paged
1468   // spaces this equals the capacity.
CommittedMemory()1469   intptr_t CommittedMemory() { return Capacity(); }
1470 
1471   // Sets the capacity, the available space and the wasted space to zero.
1472   // The stats are rebuilt during sweeping by adding each page to the
1473   // capacity and the size when it is encountered.  As free spaces are
1474   // discovered during the sweeping they are subtracted from the size and added
1475   // to the available and wasted totals.
ClearStats()1476   void ClearStats() {
1477     accounting_stats_.ClearSizeWaste();
1478   }
1479 
1480   // Available bytes without growing.  These are the bytes on the free list.
1481   // The bytes in the linear allocation area are not included in this total
1482   // because updating the stats would slow down allocation.  New pages are
1483   // immediately added to the free list so they show up here.
Available()1484   intptr_t Available() { return free_list_.available(); }
1485 
1486   // Allocated bytes in this space.  Garbage bytes that were not found due to
1487   // lazy sweeping are counted as being allocated!  The bytes in the current
1488   // linear allocation area (between top and limit) are also counted here.
Size()1489   virtual intptr_t Size() { return accounting_stats_.Size(); }
1490 
1491   // As size, but the bytes in lazily swept pages are estimated and the bytes
1492   // in the current linear allocation area are not included.
SizeOfObjects()1493   virtual intptr_t SizeOfObjects() {
1494     ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1495     return Size() - unswept_free_bytes_ - (limit() - top());
1496   }
1497 
1498   // Wasted bytes in this space.  These are just the bytes that were thrown away
1499   // due to being too small to use for allocation.  They do not include the
1500   // free bytes that were not found at all due to lazy sweeping.
Waste()1501   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1502 
1503   // Returns the allocation pointer in this space.
top()1504   Address top() { return allocation_info_.top; }
limit()1505   Address limit() { return allocation_info_.limit; }
1506 
1507   // Allocate the requested number of bytes in the space if possible, return a
1508   // failure object if not.
1509   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1510 
1511   virtual bool ReserveSpace(int bytes);
1512 
1513   // Give a block of memory to the space's free list.  It might be added to
1514   // the free list or accounted as waste.
1515   // If add_to_freelist is false then just accounting stats are updated and
1516   // no attempt to add area to free list is made.
Free(Address start,int size_in_bytes)1517   int Free(Address start, int size_in_bytes) {
1518     int wasted = free_list_.Free(start, size_in_bytes);
1519     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1520     return size_in_bytes - wasted;
1521   }
1522 
1523   // Set space allocation info.
SetTop(Address top,Address limit)1524   void SetTop(Address top, Address limit) {
1525     ASSERT(top == limit ||
1526            Page::FromAddress(top) == Page::FromAddress(limit - 1));
1527     allocation_info_.top = top;
1528     allocation_info_.limit = limit;
1529   }
1530 
Allocate(int bytes)1531   void Allocate(int bytes) {
1532     accounting_stats_.AllocateBytes(bytes);
1533   }
1534 
IncreaseCapacity(int size)1535   void IncreaseCapacity(int size) {
1536     accounting_stats_.ExpandSpace(size);
1537   }
1538 
1539   // Releases an unused page and shrinks the space.
1540   void ReleasePage(Page* page);
1541 
1542   // Releases all of the unused pages.
1543   void ReleaseAllUnusedPages();
1544 
1545   // The dummy page that anchors the linked list of pages.
anchor()1546   Page* anchor() { return &anchor_; }
1547 
1548 #ifdef DEBUG
1549   // Print meta info and objects in this space.
1550   virtual void Print();
1551 
1552   // Verify integrity of this space.
1553   virtual void Verify(ObjectVisitor* visitor);
1554 
1555   // Reports statistics for the space
1556   void ReportStatistics();
1557 
1558   // Overridden by subclasses to verify space-specific object
1559   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)1560   virtual void VerifyObject(HeapObject* obj) {}
1561 
1562   // Report code object related statistics
1563   void CollectCodeStatistics();
1564   static void ReportCodeStatistics();
1565   static void ResetCodeStatistics();
1566 #endif
1567 
was_swept_conservatively()1568   bool was_swept_conservatively() { return was_swept_conservatively_; }
set_was_swept_conservatively(bool b)1569   void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
1570 
1571   // Evacuation candidates are swept by evacuator.  Needs to return a valid
1572   // result before _and_ after evacuation has finished.
ShouldBeSweptLazily(Page * p)1573   static bool ShouldBeSweptLazily(Page* p) {
1574     return !p->IsEvacuationCandidate() &&
1575            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1576            !p->WasSweptPrecisely();
1577   }
1578 
SetPagesToSweep(Page * first)1579   void SetPagesToSweep(Page* first) {
1580     ASSERT(unswept_free_bytes_ == 0);
1581     if (first == &anchor_) first = NULL;
1582     first_unswept_page_ = first;
1583   }
1584 
IncrementUnsweptFreeBytes(int by)1585   void IncrementUnsweptFreeBytes(int by) {
1586     unswept_free_bytes_ += by;
1587   }
1588 
IncreaseUnsweptFreeBytes(Page * p)1589   void IncreaseUnsweptFreeBytes(Page* p) {
1590     ASSERT(ShouldBeSweptLazily(p));
1591     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1592   }
1593 
DecreaseUnsweptFreeBytes(Page * p)1594   void DecreaseUnsweptFreeBytes(Page* p) {
1595     ASSERT(ShouldBeSweptLazily(p));
1596     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1597   }
1598 
1599   bool AdvanceSweeper(intptr_t bytes_to_sweep);
1600 
IsSweepingComplete()1601   bool IsSweepingComplete() {
1602     return !first_unswept_page_->is_valid();
1603   }
1604 
FirstPage()1605   Page* FirstPage() { return anchor_.next_page(); }
LastPage()1606   Page* LastPage() { return anchor_.prev_page(); }
1607 
CountFreeListItems(Page * p,FreeList::SizeStats * sizes)1608   void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1609     free_list_.CountFreeListItems(p, sizes);
1610   }
1611 
1612   void EvictEvacuationCandidatesFromFreeLists();
1613 
1614   bool CanExpand();
1615 
1616   // Returns the number of total pages in this space.
1617   int CountTotalPages();
1618 
1619   // Return size of allocatable area on a page in this space.
AreaSize()1620   inline int AreaSize() {
1621     return area_size_;
1622   }
1623 
1624  protected:
1625   int area_size_;
1626 
1627   // Maximum capacity of this space.
1628   intptr_t max_capacity_;
1629 
1630   // Accounting information for this space.
1631   AllocationStats accounting_stats_;
1632 
1633   // The dummy page that anchors the double linked list of pages.
1634   Page anchor_;
1635 
1636   // The space's free list.
1637   FreeList free_list_;
1638 
1639   // Normal allocation information.
1640   AllocationInfo allocation_info_;
1641 
1642   // Bytes of each page that cannot be allocated.  Possibly non-zero
1643   // for pages in spaces with only fixed-size objects.  Always zero
1644   // for pages in spaces with variable sized objects (those pages are
1645   // padded with free-list nodes).
1646   int page_extra_;
1647 
1648   bool was_swept_conservatively_;
1649 
1650   // The first page to be swept when the lazy sweeper advances. Is set
1651   // to NULL when all pages have been swept.
1652   Page* first_unswept_page_;
1653 
1654   // The number of free bytes which could be reclaimed by advancing the
1655   // lazy sweeper.  This is only an estimation because lazy sweeping is
1656   // done conservatively.
1657   intptr_t unswept_free_bytes_;
1658 
1659   // Expands the space by allocating a fixed number of pages. Returns false if
1660   // it cannot allocate requested number of pages from OS, or if the hard heap
1661   // size limit has been hit.
1662   bool Expand();
1663 
1664   // Generic fast case allocation function that tries linear allocation at the
1665   // address denoted by top in allocation_info_.
1666   inline HeapObject* AllocateLinearly(int size_in_bytes);
1667 
1668   // Slow path of AllocateRaw.  This function is space-dependent.
1669   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1670 
1671   friend class PageIterator;
1672 };
1673 
1674 
1675 class NumberAndSizeInfo BASE_EMBEDDED {
1676  public:
NumberAndSizeInfo()1677   NumberAndSizeInfo() : number_(0), bytes_(0) {}
1678 
number()1679   int number() const { return number_; }
increment_number(int num)1680   void increment_number(int num) { number_ += num; }
1681 
bytes()1682   int bytes() const { return bytes_; }
increment_bytes(int size)1683   void increment_bytes(int size) { bytes_ += size; }
1684 
clear()1685   void clear() {
1686     number_ = 0;
1687     bytes_ = 0;
1688   }
1689 
1690  private:
1691   int number_;
1692   int bytes_;
1693 };
1694 
1695 
1696 // HistogramInfo class for recording a single "bar" of a histogram.  This
1697 // class is used for collecting statistics to print to the log file.
1698 class HistogramInfo: public NumberAndSizeInfo {
1699  public:
HistogramInfo()1700   HistogramInfo() : NumberAndSizeInfo() {}
1701 
name()1702   const char* name() { return name_; }
set_name(const char * name)1703   void set_name(const char* name) { name_ = name; }
1704 
1705  private:
1706   const char* name_;
1707 };
1708 
1709 
1710 enum SemiSpaceId {
1711   kFromSpace = 0,
1712   kToSpace = 1
1713 };
1714 
1715 
1716 class SemiSpace;
1717 
1718 
1719 class NewSpacePage : public MemoryChunk {
1720  public:
1721   // GC related flags copied from from-space to to-space when
1722   // flipping semispaces.
1723   static const intptr_t kCopyOnFlipFlagsMask =
1724     (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1725     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1726     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
1727 
1728   static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1729 
next_page()1730   inline NewSpacePage* next_page() const {
1731     return static_cast<NewSpacePage*>(next_chunk());
1732   }
1733 
set_next_page(NewSpacePage * page)1734   inline void set_next_page(NewSpacePage* page) {
1735     set_next_chunk(page);
1736   }
1737 
prev_page()1738   inline NewSpacePage* prev_page() const {
1739     return static_cast<NewSpacePage*>(prev_chunk());
1740   }
1741 
set_prev_page(NewSpacePage * page)1742   inline void set_prev_page(NewSpacePage* page) {
1743     set_prev_chunk(page);
1744   }
1745 
semi_space()1746   SemiSpace* semi_space() {
1747     return reinterpret_cast<SemiSpace*>(owner());
1748   }
1749 
is_anchor()1750   bool is_anchor() { return !this->InNewSpace(); }
1751 
IsAtStart(Address addr)1752   static bool IsAtStart(Address addr) {
1753     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1754         == kObjectStartOffset;
1755   }
1756 
IsAtEnd(Address addr)1757   static bool IsAtEnd(Address addr) {
1758     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1759   }
1760 
address()1761   Address address() {
1762     return reinterpret_cast<Address>(this);
1763   }
1764 
1765   // Finds the NewSpacePage containg the given address.
FromAddress(Address address_in_page)1766   static inline NewSpacePage* FromAddress(Address address_in_page) {
1767     Address page_start =
1768         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
1769                                   ~Page::kPageAlignmentMask);
1770     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1771     return page;
1772   }
1773 
1774   // Find the page for a limit address. A limit address is either an address
1775   // inside a page, or the address right after the last byte of a page.
FromLimit(Address address_limit)1776   static inline NewSpacePage* FromLimit(Address address_limit) {
1777     return NewSpacePage::FromAddress(address_limit - 1);
1778   }
1779 
1780  private:
1781   // Create a NewSpacePage object that is only used as anchor
1782   // for the doubly-linked list of real pages.
NewSpacePage(SemiSpace * owner)1783   explicit NewSpacePage(SemiSpace* owner) {
1784     InitializeAsAnchor(owner);
1785   }
1786 
1787   static NewSpacePage* Initialize(Heap* heap,
1788                                   Address start,
1789                                   SemiSpace* semi_space);
1790 
1791   // Intialize a fake NewSpacePage used as sentinel at the ends
1792   // of a doubly-linked list of real NewSpacePages.
1793   // Only uses the prev/next links, and sets flags to not be in new-space.
1794   void InitializeAsAnchor(SemiSpace* owner);
1795 
1796   friend class SemiSpace;
1797   friend class SemiSpaceIterator;
1798 };
1799 
1800 
1801 // -----------------------------------------------------------------------------
1802 // SemiSpace in young generation
1803 //
1804 // A semispace is a contiguous chunk of memory holding page-like memory
1805 // chunks. The mark-compact collector  uses the memory of the first page in
1806 // the from space as a marking stack when tracing live objects.
1807 
1808 class SemiSpace : public Space {
1809  public:
1810   // Constructor.
SemiSpace(Heap * heap,SemiSpaceId semispace)1811   SemiSpace(Heap* heap, SemiSpaceId semispace)
1812     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1813       start_(NULL),
1814       age_mark_(NULL),
1815       id_(semispace),
1816       anchor_(this),
1817       current_page_(NULL) { }
1818 
1819   // Sets up the semispace using the given chunk.
1820   void SetUp(Address start, int initial_capacity, int maximum_capacity);
1821 
1822   // Tear down the space.  Heap memory was not allocated by the space, so it
1823   // is not deallocated here.
1824   void TearDown();
1825 
1826   // True if the space has been set up but not torn down.
HasBeenSetUp()1827   bool HasBeenSetUp() { return start_ != NULL; }
1828 
1829   // Grow the semispace to the new capacity.  The new capacity
1830   // requested must be larger than the current capacity and less than
1831   // the maximum capacity.
1832   bool GrowTo(int new_capacity);
1833 
1834   // Shrinks the semispace to the new capacity.  The new capacity
1835   // requested must be more than the amount of used memory in the
1836   // semispace and less than the current capacity.
1837   bool ShrinkTo(int new_capacity);
1838 
1839   // Returns the start address of the first page of the space.
space_start()1840   Address space_start() {
1841     ASSERT(anchor_.next_page() != &anchor_);
1842     return anchor_.next_page()->area_start();
1843   }
1844 
1845   // Returns the start address of the current page of the space.
page_low()1846   Address page_low() {
1847     return current_page_->area_start();
1848   }
1849 
1850   // Returns one past the end address of the space.
space_end()1851   Address space_end() {
1852     return anchor_.prev_page()->area_end();
1853   }
1854 
1855   // Returns one past the end address of the current page of the space.
page_high()1856   Address page_high() {
1857     return current_page_->area_end();
1858   }
1859 
AdvancePage()1860   bool AdvancePage() {
1861     NewSpacePage* next_page = current_page_->next_page();
1862     if (next_page == anchor()) return false;
1863     current_page_ = next_page;
1864     return true;
1865   }
1866 
1867   // Resets the space to using the first page.
1868   void Reset();
1869 
1870   // Age mark accessors.
age_mark()1871   Address age_mark() { return age_mark_; }
1872   void set_age_mark(Address mark);
1873 
1874   // True if the address is in the address range of this semispace (not
1875   // necessarily below the allocation pointer).
Contains(Address a)1876   bool Contains(Address a) {
1877     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1878            == reinterpret_cast<uintptr_t>(start_);
1879   }
1880 
1881   // True if the object is a heap object in the address range of this
1882   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)1883   bool Contains(Object* o) {
1884     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1885   }
1886 
1887   // If we don't have these here then SemiSpace will be abstract.  However
1888   // they should never be called.
Size()1889   virtual intptr_t Size() {
1890     UNREACHABLE();
1891     return 0;
1892   }
1893 
ReserveSpace(int bytes)1894   virtual bool ReserveSpace(int bytes) {
1895     UNREACHABLE();
1896     return false;
1897   }
1898 
is_committed()1899   bool is_committed() { return committed_; }
1900   bool Commit();
1901   bool Uncommit();
1902 
first_page()1903   NewSpacePage* first_page() { return anchor_.next_page(); }
current_page()1904   NewSpacePage* current_page() { return current_page_; }
1905 
1906 #ifdef DEBUG
1907   virtual void Print();
1908   virtual void Verify();
1909   // Validate a range of of addresses in a SemiSpace.
1910   // The "from" address must be on a page prior to the "to" address,
1911   // in the linked page order, or it must be earlier on the same page.
1912   static void AssertValidRange(Address from, Address to);
1913 #else
1914   // Do nothing.
AssertValidRange(Address from,Address to)1915   inline static void AssertValidRange(Address from, Address to) {}
1916 #endif
1917 
1918   // Returns the current capacity of the semi space.
Capacity()1919   int Capacity() { return capacity_; }
1920 
1921   // Returns the maximum capacity of the semi space.
MaximumCapacity()1922   int MaximumCapacity() { return maximum_capacity_; }
1923 
1924   // Returns the initial capacity of the semi space.
InitialCapacity()1925   int InitialCapacity() { return initial_capacity_; }
1926 
id()1927   SemiSpaceId id() { return id_; }
1928 
1929   static void Swap(SemiSpace* from, SemiSpace* to);
1930 
1931  private:
1932   // Flips the semispace between being from-space and to-space.
1933   // Copies the flags into the masked positions on all pages in the space.
1934   void FlipPages(intptr_t flags, intptr_t flag_mask);
1935 
anchor()1936   NewSpacePage* anchor() { return &anchor_; }
1937 
1938   // The current and maximum capacity of the space.
1939   int capacity_;
1940   int maximum_capacity_;
1941   int initial_capacity_;
1942 
1943   // The start address of the space.
1944   Address start_;
1945   // Used to govern object promotion during mark-compact collection.
1946   Address age_mark_;
1947 
1948   // Masks and comparison values to test for containment in this semispace.
1949   uintptr_t address_mask_;
1950   uintptr_t object_mask_;
1951   uintptr_t object_expected_;
1952 
1953   bool committed_;
1954   SemiSpaceId id_;
1955 
1956   NewSpacePage anchor_;
1957   NewSpacePage* current_page_;
1958 
1959   friend class SemiSpaceIterator;
1960   friend class NewSpacePageIterator;
1961  public:
1962   TRACK_MEMORY("SemiSpace")
1963 };
1964 
1965 
1966 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
1967 // semispace of the heap's new space.  It iterates over the objects in the
1968 // semispace from a given start address (defaulting to the bottom of the
1969 // semispace) to the top of the semispace.  New objects allocated after the
1970 // iterator is created are not iterated.
1971 class SemiSpaceIterator : public ObjectIterator {
1972  public:
1973   // Create an iterator over the objects in the given space.  If no start
1974   // address is given, the iterator starts from the bottom of the space.  If
1975   // no size function is given, the iterator calls Object::Size().
1976 
1977   // Iterate over all of allocated to-space.
1978   explicit SemiSpaceIterator(NewSpace* space);
1979   // Iterate over all of allocated to-space, with a custome size function.
1980   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1981   // Iterate over part of allocated to-space, from start to the end
1982   // of allocation.
1983   SemiSpaceIterator(NewSpace* space, Address start);
1984   // Iterate from one address to another in the same semi-space.
1985   SemiSpaceIterator(Address from, Address to);
1986 
Next()1987   HeapObject* Next() {
1988     if (current_ == limit_) return NULL;
1989     if (NewSpacePage::IsAtEnd(current_)) {
1990       NewSpacePage* page = NewSpacePage::FromLimit(current_);
1991       page = page->next_page();
1992       ASSERT(!page->is_anchor());
1993       current_ = page->area_start();
1994       if (current_ == limit_) return NULL;
1995     }
1996 
1997     HeapObject* object = HeapObject::FromAddress(current_);
1998     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1999 
2000     current_ += size;
2001     return object;
2002   }
2003 
2004   // Implementation of the ObjectIterator functions.
next_object()2005   virtual HeapObject* next_object() { return Next(); }
2006 
2007  private:
2008   void Initialize(Address start,
2009                   Address end,
2010                   HeapObjectCallback size_func);
2011 
2012   // The current iteration point.
2013   Address current_;
2014   // The end of iteration.
2015   Address limit_;
2016   // The callback function.
2017   HeapObjectCallback size_func_;
2018 };
2019 
2020 
2021 // -----------------------------------------------------------------------------
2022 // A PageIterator iterates the pages in a semi-space.
2023 class NewSpacePageIterator BASE_EMBEDDED {
2024  public:
2025   // Make an iterator that runs over all pages in to-space.
2026   explicit inline NewSpacePageIterator(NewSpace* space);
2027 
2028   // Make an iterator that runs over all pages in the given semispace,
2029   // even those not used in allocation.
2030   explicit inline NewSpacePageIterator(SemiSpace* space);
2031 
2032   // Make iterator that iterates from the page containing start
2033   // to the page that contains limit in the same semispace.
2034   inline NewSpacePageIterator(Address start, Address limit);
2035 
2036   inline bool has_next();
2037   inline NewSpacePage* next();
2038 
2039  private:
2040   NewSpacePage* prev_page_;  // Previous page returned.
2041   // Next page that will be returned.  Cached here so that we can use this
2042   // iterator for operations that deallocate pages.
2043   NewSpacePage* next_page_;
2044   // Last page returned.
2045   NewSpacePage* last_page_;
2046 };
2047 
2048 
2049 // -----------------------------------------------------------------------------
2050 // The young generation space.
2051 //
2052 // The new space consists of a contiguous pair of semispaces.  It simply
2053 // forwards most functions to the appropriate semispace.
2054 
2055 class NewSpace : public Space {
2056  public:
2057   // Constructor.
NewSpace(Heap * heap)2058   explicit NewSpace(Heap* heap)
2059     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2060       to_space_(heap, kToSpace),
2061       from_space_(heap, kFromSpace),
2062       reservation_(),
2063       inline_allocation_limit_step_(0) {}
2064 
2065   // Sets up the new space using the given chunk.
2066   bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2067 
2068   // Tears down the space.  Heap memory was not allocated by the space, so it
2069   // is not deallocated here.
2070   void TearDown();
2071 
2072   // True if the space has been set up but not torn down.
HasBeenSetUp()2073   bool HasBeenSetUp() {
2074     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2075   }
2076 
2077   // Flip the pair of spaces.
2078   void Flip();
2079 
2080   // Grow the capacity of the semispaces.  Assumes that they are not at
2081   // their maximum capacity.
2082   void Grow();
2083 
2084   // Shrink the capacity of the semispaces.
2085   void Shrink();
2086 
2087   // True if the address or object lies in the address range of either
2088   // semispace (not necessarily below the allocation pointer).
Contains(Address a)2089   bool Contains(Address a) {
2090     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2091         == reinterpret_cast<uintptr_t>(start_);
2092   }
2093 
Contains(Object * o)2094   bool Contains(Object* o) {
2095     Address a = reinterpret_cast<Address>(o);
2096     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2097   }
2098 
2099   // Return the allocated bytes in the active semispace.
Size()2100   virtual intptr_t Size() {
2101     return pages_used_ * NewSpacePage::kAreaSize +
2102         static_cast<int>(top() - to_space_.page_low());
2103   }
2104 
2105   // The same, but returning an int.  We have to have the one that returns
2106   // intptr_t because it is inherited, but if we know we are dealing with the
2107   // new space, which can't get as big as the other spaces then this is useful:
SizeAsInt()2108   int SizeAsInt() { return static_cast<int>(Size()); }
2109 
2110   // Return the current capacity of a semispace.
EffectiveCapacity()2111   intptr_t EffectiveCapacity() {
2112     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2113     return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2114   }
2115 
2116   // Return the current capacity of a semispace.
Capacity()2117   intptr_t Capacity() {
2118     ASSERT(to_space_.Capacity() == from_space_.Capacity());
2119     return to_space_.Capacity();
2120   }
2121 
2122   // Return the total amount of memory committed for new space.
CommittedMemory()2123   intptr_t CommittedMemory() {
2124     if (from_space_.is_committed()) return 2 * Capacity();
2125     return Capacity();
2126   }
2127 
2128   // Return the available bytes without growing.
Available()2129   intptr_t Available() {
2130     return Capacity() - Size();
2131   }
2132 
2133   // Return the maximum capacity of a semispace.
MaximumCapacity()2134   int MaximumCapacity() {
2135     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2136     return to_space_.MaximumCapacity();
2137   }
2138 
2139   // Returns the initial capacity of a semispace.
InitialCapacity()2140   int InitialCapacity() {
2141     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2142     return to_space_.InitialCapacity();
2143   }
2144 
2145   // Return the address of the allocation pointer in the active semispace.
top()2146   Address top() {
2147     ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
2148     return allocation_info_.top;
2149   }
2150   // Return the address of the first object in the active semispace.
bottom()2151   Address bottom() { return to_space_.space_start(); }
2152 
2153   // Get the age mark of the inactive semispace.
age_mark()2154   Address age_mark() { return from_space_.age_mark(); }
2155   // Set the age mark in the active semispace.
set_age_mark(Address mark)2156   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2157 
2158   // The start address of the space and a bit mask. Anding an address in the
2159   // new space with the mask will result in the start address.
start()2160   Address start() { return start_; }
mask()2161   uintptr_t mask() { return address_mask_; }
2162 
INLINE(uint32_t AddressToMarkbitIndex (Address addr))2163   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2164     ASSERT(Contains(addr));
2165     ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2166            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2167     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2168   }
2169 
INLINE(Address MarkbitIndexToAddress (uint32_t index))2170   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2171     return reinterpret_cast<Address>(index << kPointerSizeLog2);
2172   }
2173 
2174   // The allocation top and limit addresses.
allocation_top_address()2175   Address* allocation_top_address() { return &allocation_info_.top; }
allocation_limit_address()2176   Address* allocation_limit_address() { return &allocation_info_.limit; }
2177 
2178   MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2179 
2180   // Reset the allocation pointer to the beginning of the active semispace.
2181   void ResetAllocationInfo();
2182 
LowerInlineAllocationLimit(intptr_t step)2183   void LowerInlineAllocationLimit(intptr_t step) {
2184     inline_allocation_limit_step_ = step;
2185     if (step == 0) {
2186       allocation_info_.limit = to_space_.page_high();
2187     } else {
2188       allocation_info_.limit = Min(
2189           allocation_info_.top + inline_allocation_limit_step_,
2190           allocation_info_.limit);
2191     }
2192     top_on_previous_step_ = allocation_info_.top;
2193   }
2194 
2195   // Get the extent of the inactive semispace (for use as a marking stack,
2196   // or to zap it). Notice: space-addresses are not necessarily on the
2197   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2198   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2199   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2200   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2201   Address FromSpaceEnd() { return from_space_.space_end(); }
2202 
2203   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2204   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2205   Address ToSpaceEnd() { return to_space_.space_end(); }
2206 
ToSpaceContains(Address address)2207   inline bool ToSpaceContains(Address address) {
2208     return to_space_.Contains(address);
2209   }
FromSpaceContains(Address address)2210   inline bool FromSpaceContains(Address address) {
2211     return from_space_.Contains(address);
2212   }
2213 
2214   // True if the object is a heap object in the address range of the
2215   // respective semispace (not necessarily below the allocation pointer of the
2216   // semispace).
ToSpaceContains(Object * o)2217   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)2218   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2219 
2220   // Try to switch the active semispace to a new, empty, page.
2221   // Returns false if this isn't possible or reasonable (i.e., there
2222   // are no pages, or the current page is already empty), or true
2223   // if successful.
2224   bool AddFreshPage();
2225 
2226   virtual bool ReserveSpace(int bytes);
2227 
2228   // Resizes a sequential string which must be the most recent thing that was
2229   // allocated in new space.
2230   template <typename StringType>
2231   inline void ShrinkStringAtAllocationBoundary(String* string, int len);
2232 
2233 #ifdef DEBUG
2234   // Verify the active semispace.
2235   virtual void Verify();
2236   // Print the active semispace.
Print()2237   virtual void Print() { to_space_.Print(); }
2238 #endif
2239 
2240   // Iterates the active semispace to collect statistics.
2241   void CollectStatistics();
2242   // Reports previously collected statistics of the active semispace.
2243   void ReportStatistics();
2244   // Clears previously collected statistics.
2245   void ClearHistograms();
2246 
2247   // Record the allocation or promotion of a heap object.  Note that we don't
2248   // record every single allocation, but only those that happen in the
2249   // to space during a scavenge GC.
2250   void RecordAllocation(HeapObject* obj);
2251   void RecordPromotion(HeapObject* obj);
2252 
2253   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2254   bool CommitFromSpaceIfNeeded() {
2255     if (from_space_.is_committed()) return true;
2256     return from_space_.Commit();
2257   }
2258 
UncommitFromSpace()2259   bool UncommitFromSpace() {
2260     if (!from_space_.is_committed()) return true;
2261     return from_space_.Uncommit();
2262   }
2263 
inline_allocation_limit_step()2264   inline intptr_t inline_allocation_limit_step() {
2265     return inline_allocation_limit_step_;
2266   }
2267 
active_space()2268   SemiSpace* active_space() { return &to_space_; }
2269 
2270  private:
2271   // Update allocation info to match the current to-space page.
2272   void UpdateAllocationInfo();
2273 
2274   Address chunk_base_;
2275   uintptr_t chunk_size_;
2276 
2277   // The semispaces.
2278   SemiSpace to_space_;
2279   SemiSpace from_space_;
2280   VirtualMemory reservation_;
2281   int pages_used_;
2282 
2283   // Start address and bit mask for containment testing.
2284   Address start_;
2285   uintptr_t address_mask_;
2286   uintptr_t object_mask_;
2287   uintptr_t object_expected_;
2288 
2289   // Allocation pointer and limit for normal allocation and allocation during
2290   // mark-compact collection.
2291   AllocationInfo allocation_info_;
2292 
2293   // When incremental marking is active we will set allocation_info_.limit
2294   // to be lower than actual limit and then will gradually increase it
2295   // in steps to guarantee that we do incremental marking steps even
2296   // when all allocation is performed from inlined generated code.
2297   intptr_t inline_allocation_limit_step_;
2298 
2299   Address top_on_previous_step_;
2300 
2301   HistogramInfo* allocated_histogram_;
2302   HistogramInfo* promoted_histogram_;
2303 
2304   MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2305 
2306   friend class SemiSpaceIterator;
2307 
2308  public:
2309   TRACK_MEMORY("NewSpace")
2310 };
2311 
2312 
2313 // -----------------------------------------------------------------------------
2314 // Old object space (excluding map objects)
2315 
2316 class OldSpace : public PagedSpace {
2317  public:
2318   // Creates an old space object with a given maximum capacity.
2319   // The constructor does not allocate pages from OS.
OldSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)2320   OldSpace(Heap* heap,
2321            intptr_t max_capacity,
2322            AllocationSpace id,
2323            Executability executable)
2324       : PagedSpace(heap, max_capacity, id, executable) {
2325     page_extra_ = 0;
2326   }
2327 
2328   // The limit of allocation for a page in this space.
PageAllocationLimit(Page * page)2329   virtual Address PageAllocationLimit(Page* page) {
2330     return page->area_end();
2331   }
2332 
2333  public:
2334   TRACK_MEMORY("OldSpace")
2335 };
2336 
2337 
2338 // For contiguous spaces, top should be in the space (or at the end) and limit
2339 // should be the end of the space.
2340 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2341   SLOW_ASSERT((space).page_low() <= (info).top             \
2342               && (info).top <= (space).page_high()         \
2343               && (info).limit <= (space).page_high())
2344 
2345 
2346 // -----------------------------------------------------------------------------
2347 // Old space for objects of a fixed size
2348 
2349 class FixedSpace : public PagedSpace {
2350  public:
FixedSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,int object_size_in_bytes,const char * name)2351   FixedSpace(Heap* heap,
2352              intptr_t max_capacity,
2353              AllocationSpace id,
2354              int object_size_in_bytes,
2355              const char* name)
2356       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2357         object_size_in_bytes_(object_size_in_bytes),
2358         name_(name) {
2359     page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2360   }
2361 
2362   // The limit of allocation for a page in this space.
PageAllocationLimit(Page * page)2363   virtual Address PageAllocationLimit(Page* page) {
2364     return page->area_end() - page_extra_;
2365   }
2366 
object_size_in_bytes()2367   int object_size_in_bytes() { return object_size_in_bytes_; }
2368 
2369   // Prepares for a mark-compact GC.
2370   virtual void PrepareForMarkCompact();
2371 
2372  protected:
ResetFreeList()2373   void ResetFreeList() {
2374     free_list_.Reset();
2375   }
2376 
2377  private:
2378   // The size of objects in this space.
2379   int object_size_in_bytes_;
2380 
2381   // The name of this space.
2382   const char* name_;
2383 };
2384 
2385 
2386 // -----------------------------------------------------------------------------
2387 // Old space for all map objects
2388 
2389 class MapSpace : public FixedSpace {
2390  public:
2391   // Creates a map space object with a maximum capacity.
MapSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2392   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2393       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
2394         max_map_space_pages_(kMaxMapPageIndex - 1) {
2395   }
2396 
2397   // Given an index, returns the page address.
2398   // TODO(1600): this limit is artifical just to keep code compilable
2399   static const int kMaxMapPageIndex = 1 << 16;
2400 
RoundSizeDownToObjectAlignment(int size)2401   virtual int RoundSizeDownToObjectAlignment(int size) {
2402     if (IsPowerOf2(Map::kSize)) {
2403       return RoundDown(size, Map::kSize);
2404     } else {
2405       return (size / Map::kSize) * Map::kSize;
2406     }
2407   }
2408 
2409  protected:
2410 #ifdef DEBUG
2411   virtual void VerifyObject(HeapObject* obj);
2412 #endif
2413 
2414  private:
2415   static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2416 
2417   // Do map space compaction if there is a page gap.
CompactionThreshold()2418   int CompactionThreshold() {
2419     return kMapsPerPage * (max_map_space_pages_ - 1);
2420   }
2421 
2422   const int max_map_space_pages_;
2423 
2424  public:
2425   TRACK_MEMORY("MapSpace")
2426 };
2427 
2428 
2429 // -----------------------------------------------------------------------------
2430 // Old space for all global object property cell objects
2431 
2432 class CellSpace : public FixedSpace {
2433  public:
2434   // Creates a property cell space object with a maximum capacity.
CellSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2435   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2436       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2437   {}
2438 
RoundSizeDownToObjectAlignment(int size)2439   virtual int RoundSizeDownToObjectAlignment(int size) {
2440     if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
2441       return RoundDown(size, JSGlobalPropertyCell::kSize);
2442     } else {
2443       return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
2444     }
2445   }
2446 
2447  protected:
2448 #ifdef DEBUG
2449   virtual void VerifyObject(HeapObject* obj);
2450 #endif
2451 
2452  public:
2453   TRACK_MEMORY("CellSpace")
2454 };
2455 
2456 
2457 // -----------------------------------------------------------------------------
2458 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2459 // the large object space. A large object is allocated from OS heap with
2460 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2461 // A large object always starts at Page::kObjectStartOffset to a page.
2462 // Large objects do not move during garbage collections.
2463 
2464 class LargeObjectSpace : public Space {
2465  public:
2466   LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
~LargeObjectSpace()2467   virtual ~LargeObjectSpace() {}
2468 
2469   // Initializes internal data structures.
2470   bool SetUp();
2471 
2472   // Releases internal resources, frees objects in this space.
2473   void TearDown();
2474 
ObjectSizeFor(intptr_t chunk_size)2475   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2476     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2477     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2478   }
2479 
2480   // Shared implementation of AllocateRaw, AllocateRawCode and
2481   // AllocateRawFixedArray.
2482   MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2483                                            Executability executable);
2484 
2485   // Available bytes for objects in this space.
2486   inline intptr_t Available();
2487 
Size()2488   virtual intptr_t Size() {
2489     return size_;
2490   }
2491 
SizeOfObjects()2492   virtual intptr_t SizeOfObjects() {
2493     return objects_size_;
2494   }
2495 
PageCount()2496   int PageCount() {
2497     return page_count_;
2498   }
2499 
2500   // Finds an object for a given address, returns Failure::Exception()
2501   // if it is not found. The function iterates through all objects in this
2502   // space, may be slow.
2503   MaybeObject* FindObject(Address a);
2504 
2505   // Finds a large object page containing the given address, returns NULL
2506   // if such a page doesn't exist.
2507   LargePage* FindPage(Address a);
2508 
2509   // Frees unmarked objects.
2510   void FreeUnmarkedObjects();
2511 
2512   // Checks whether a heap object is in this space; O(1).
2513   bool Contains(HeapObject* obj);
2514 
2515   // Checks whether the space is empty.
IsEmpty()2516   bool IsEmpty() { return first_page_ == NULL; }
2517 
2518   // See the comments for ReserveSpace in the Space class.  This has to be
2519   // called after ReserveSpace has been called on the paged spaces, since they
2520   // may use some memory, leaving less for large objects.
2521   virtual bool ReserveSpace(int bytes);
2522 
first_page()2523   LargePage* first_page() { return first_page_; }
2524 
2525 #ifdef DEBUG
2526   virtual void Verify();
2527   virtual void Print();
2528   void ReportStatistics();
2529   void CollectCodeStatistics();
2530 #endif
2531   // Checks whether an address is in the object area in this space.  It
2532   // iterates all objects in the space. May be slow.
SlowContains(Address addr)2533   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2534 
2535  private:
2536   intptr_t max_capacity_;
2537   // The head of the linked list of large object chunks.
2538   LargePage* first_page_;
2539   intptr_t size_;  // allocated bytes
2540   int page_count_;  // number of chunks
2541   intptr_t objects_size_;  // size of objects
2542   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2543   HashMap chunk_map_;
2544 
2545   friend class LargeObjectIterator;
2546 
2547  public:
2548   TRACK_MEMORY("LargeObjectSpace")
2549 };
2550 
2551 
2552 class LargeObjectIterator: public ObjectIterator {
2553  public:
2554   explicit LargeObjectIterator(LargeObjectSpace* space);
2555   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2556 
2557   HeapObject* Next();
2558 
2559   // implementation of ObjectIterator.
next_object()2560   virtual HeapObject* next_object() { return Next(); }
2561 
2562  private:
2563   LargePage* current_;
2564   HeapObjectCallback size_func_;
2565 };
2566 
2567 
2568 // Iterates over the chunks (pages and large object pages) that can contain
2569 // pointers to new space.
2570 class PointerChunkIterator BASE_EMBEDDED {
2571  public:
2572   inline explicit PointerChunkIterator(Heap* heap);
2573 
2574   // Return NULL when the iterator is done.
next()2575   MemoryChunk* next() {
2576     switch (state_) {
2577       case kOldPointerState: {
2578         if (old_pointer_iterator_.has_next()) {
2579           return old_pointer_iterator_.next();
2580         }
2581         state_ = kMapState;
2582         // Fall through.
2583       }
2584       case kMapState: {
2585         if (map_iterator_.has_next()) {
2586           return map_iterator_.next();
2587         }
2588         state_ = kLargeObjectState;
2589         // Fall through.
2590       }
2591       case kLargeObjectState: {
2592         HeapObject* heap_object;
2593         do {
2594           heap_object = lo_iterator_.Next();
2595           if (heap_object == NULL) {
2596             state_ = kFinishedState;
2597             return NULL;
2598           }
2599           // Fixed arrays are the only pointer-containing objects in large
2600           // object space.
2601         } while (!heap_object->IsFixedArray());
2602         MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2603         return answer;
2604       }
2605       case kFinishedState:
2606         return NULL;
2607       default:
2608         break;
2609     }
2610     UNREACHABLE();
2611     return NULL;
2612   }
2613 
2614 
2615  private:
2616   enum State {
2617     kOldPointerState,
2618     kMapState,
2619     kLargeObjectState,
2620     kFinishedState
2621   };
2622   State state_;
2623   PageIterator old_pointer_iterator_;
2624   PageIterator map_iterator_;
2625   LargeObjectIterator lo_iterator_;
2626 };
2627 
2628 
2629 #ifdef DEBUG
2630 struct CommentStatistic {
2631   const char* comment;
2632   int size;
2633   int count;
ClearCommentStatistic2634   void Clear() {
2635     comment = NULL;
2636     size = 0;
2637     count = 0;
2638   }
2639   // Must be small, since an iteration is used for lookup.
2640   static const int kMaxComments = 64;
2641 };
2642 #endif
2643 
2644 
2645 } }  // namespace v8::internal
2646 
2647 #endif  // V8_SPACES_H_
2648