• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/base/bits.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/hashmap.h"
13 #include "src/list.h"
14 #include "src/log.h"
15 #include "src/utils.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 class Isolate;
21 
22 // -----------------------------------------------------------------------------
23 // Heap structures:
24 //
25 // A JS heap consists of a young generation, an old generation, and a large
26 // object space. The young generation is divided into two semispaces. A
27 // scavenger implements Cheney's copying algorithm. The old generation is
28 // separated into a map space and an old object space. The map space contains
29 // all (and only) map objects, the rest of old objects go into the old space.
30 // The old generation is collected by a mark-sweep-compact collector.
31 //
32 // The semispaces of the young generation are contiguous.  The old and map
33 // spaces consists of a list of pages. A page has a page header and an object
34 // area.
35 //
36 // There is a separate large object space for objects larger than
37 // Page::kMaxHeapObjectSize, so that they do not have to move during
38 // collection. The large object space is paged. Pages in large object space
39 // may be larger than the page size.
40 //
41 // A store-buffer based write barrier is used to keep track of intergenerational
42 // references.  See heap/store-buffer.h.
43 //
44 // During scavenges and mark-sweep collections we sometimes (after a store
45 // buffer overflow) iterate intergenerational pointers without decoding heap
46 // object maps so if the page belongs to old pointer space or large object
47 // space it is essential to guarantee that the page does not contain any
48 // garbage pointers to new space: every pointer aligned word which satisfies
49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
50 // new space. Thus objects in old pointer and large object spaces should have a
51 // special layout (e.g. no bare integer fields). This requirement does not
52 // apply to map space which is iterated in a special fashion. However we still
53 // require pointer fields of dead maps to be cleaned.
54 //
55 // To enable lazy cleaning of old space pages we can mark chunks of the page
56 // as being garbage.  Garbage sections are marked with a special map.  These
57 // sections are skipped when scanning the page, even if we are otherwise
58 // scanning without regard for object boundaries.  Garbage sections are chained
59 // together to form a free list after a GC.  Garbage sections created outside
60 // of GCs by object trunctation etc. may not be in the free list chain.  Very
61 // small free spaces are ignored, they need only be cleaned of bogus pointers
62 // into new space.
63 //
64 // Each page may have up to one special garbage section.  The start of this
65 // section is denoted by the top field in the space.  The end of the section
66 // is denoted by the limit field in the space.  This special garbage section
67 // is not marked with a free space map in the data.  The point of this section
68 // is to enable linear allocation without having to constantly update the byte
69 // array every time the top field is updated and a new object is created.  The
70 // special garbage section is not in the chain of garbage sections.
71 //
72 // Since the top and limit fields are in the space, not the page, only one page
73 // has a special garbage section, and if the top and limit are equal then there
74 // is no special garbage section.
75 
76 // Some assertion macros used in the debugging mode.
77 
78 #define DCHECK_PAGE_ALIGNED(address) \
79   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
80 
81 #define DCHECK_OBJECT_ALIGNED(address) \
82   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
83 
84 #define DCHECK_OBJECT_SIZE(size) \
85   DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
86 
87 #define DCHECK_PAGE_OFFSET(offset) \
88   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
89 
90 #define DCHECK_MAP_PAGE_INDEX(index) \
91   DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
92 
93 
94 class PagedSpace;
95 class MemoryAllocator;
96 class AllocationInfo;
97 class Space;
98 class FreeList;
99 class MemoryChunk;
100 
101 class MarkBit {
102  public:
103   typedef uint32_t CellType;
104 
MarkBit(CellType * cell,CellType mask,bool data_only)105   inline MarkBit(CellType* cell, CellType mask, bool data_only)
106       : cell_(cell), mask_(mask), data_only_(data_only) {}
107 
cell()108   inline CellType* cell() { return cell_; }
mask()109   inline CellType mask() { return mask_; }
110 
111 #ifdef DEBUG
112   bool operator==(const MarkBit& other) {
113     return cell_ == other.cell_ && mask_ == other.mask_;
114   }
115 #endif
116 
Set()117   inline void Set() { *cell_ |= mask_; }
Get()118   inline bool Get() { return (*cell_ & mask_) != 0; }
Clear()119   inline void Clear() { *cell_ &= ~mask_; }
120 
data_only()121   inline bool data_only() { return data_only_; }
122 
Next()123   inline MarkBit Next() {
124     CellType new_mask = mask_ << 1;
125     if (new_mask == 0) {
126       return MarkBit(cell_ + 1, 1, data_only_);
127     } else {
128       return MarkBit(cell_, new_mask, data_only_);
129     }
130   }
131 
132  private:
133   CellType* cell_;
134   CellType mask_;
135   // This boolean indicates that the object is in a data-only space with no
136   // pointers.  This enables some optimizations when marking.
137   // It is expected that this field is inlined and turned into control flow
138   // at the place where the MarkBit object is created.
139   bool data_only_;
140 };
141 
142 
143 // Bitmap is a sequence of cells each containing fixed number of bits.
144 class Bitmap {
145  public:
146   static const uint32_t kBitsPerCell = 32;
147   static const uint32_t kBitsPerCellLog2 = 5;
148   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
151 
152   static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
153 
154   static const size_t kSize =
155       (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
156 
157 
CellsForLength(int length)158   static int CellsForLength(int length) {
159     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
160   }
161 
CellsCount()162   int CellsCount() { return CellsForLength(kLength); }
163 
SizeFor(int cells_count)164   static int SizeFor(int cells_count) {
165     return sizeof(MarkBit::CellType) * cells_count;
166   }
167 
INLINE(static uint32_t IndexToCell (uint32_t index))168   INLINE(static uint32_t IndexToCell(uint32_t index)) {
169     return index >> kBitsPerCellLog2;
170   }
171 
INLINE(static uint32_t CellToIndex (uint32_t index))172   INLINE(static uint32_t CellToIndex(uint32_t index)) {
173     return index << kBitsPerCellLog2;
174   }
175 
INLINE(static uint32_t CellAlignIndex (uint32_t index))176   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
177     return (index + kBitIndexMask) & ~kBitIndexMask;
178   }
179 
INLINE(MarkBit::CellType * cells ())180   INLINE(MarkBit::CellType* cells()) {
181     return reinterpret_cast<MarkBit::CellType*>(this);
182   }
183 
INLINE(Address address ())184   INLINE(Address address()) { return reinterpret_cast<Address>(this); }
185 
INLINE(static Bitmap * FromAddress (Address addr))186   INLINE(static Bitmap* FromAddress(Address addr)) {
187     return reinterpret_cast<Bitmap*>(addr);
188   }
189 
190   inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
191     MarkBit::CellType mask = 1 << (index & kBitIndexMask);
192     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
193     return MarkBit(cell, mask, data_only);
194   }
195 
196   static inline void Clear(MemoryChunk* chunk);
197 
198   static void PrintWord(uint32_t word, uint32_t himask = 0) {
199     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
200       if ((mask & himask) != 0) PrintF("[");
201       PrintF((mask & word) ? "1" : "0");
202       if ((mask & himask) != 0) PrintF("]");
203     }
204   }
205 
206   class CellPrinter {
207    public:
CellPrinter()208     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
209 
Print(uint32_t pos,uint32_t cell)210     void Print(uint32_t pos, uint32_t cell) {
211       if (cell == seq_type) {
212         seq_length++;
213         return;
214       }
215 
216       Flush();
217 
218       if (IsSeq(cell)) {
219         seq_start = pos;
220         seq_length = 0;
221         seq_type = cell;
222         return;
223       }
224 
225       PrintF("%d: ", pos);
226       PrintWord(cell);
227       PrintF("\n");
228     }
229 
Flush()230     void Flush() {
231       if (seq_length > 0) {
232         PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
233                seq_length * kBitsPerCell);
234         seq_length = 0;
235       }
236     }
237 
IsSeq(uint32_t cell)238     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
239 
240    private:
241     uint32_t seq_start;
242     uint32_t seq_type;
243     uint32_t seq_length;
244   };
245 
Print()246   void Print() {
247     CellPrinter printer;
248     for (int i = 0; i < CellsCount(); i++) {
249       printer.Print(i, cells()[i]);
250     }
251     printer.Flush();
252     PrintF("\n");
253   }
254 
IsClean()255   bool IsClean() {
256     for (int i = 0; i < CellsCount(); i++) {
257       if (cells()[i] != 0) {
258         return false;
259       }
260     }
261     return true;
262   }
263 };
264 
265 
266 class SkipList;
267 class SlotsBuffer;
268 
269 // MemoryChunk represents a memory region owned by a specific space.
270 // It is divided into the header and the body. Chunk start is always
271 // 1MB aligned. Start of the body is aligned so it can accommodate
272 // any heap object.
273 class MemoryChunk {
274  public:
275   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)276   static MemoryChunk* FromAddress(Address a) {
277     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
278   }
FromAddress(const byte * a)279   static const MemoryChunk* FromAddress(const byte* a) {
280     return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
281                                                 ~kAlignmentMask);
282   }
283 
284   // Only works for addresses in pointer spaces, not data or code spaces.
285   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
286 
address()287   Address address() { return reinterpret_cast<Address>(this); }
288 
is_valid()289   bool is_valid() { return address() != NULL; }
290 
next_chunk()291   MemoryChunk* next_chunk() const {
292     return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
293   }
294 
prev_chunk()295   MemoryChunk* prev_chunk() const {
296     return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
297   }
298 
set_next_chunk(MemoryChunk * next)299   void set_next_chunk(MemoryChunk* next) {
300     base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
301   }
302 
set_prev_chunk(MemoryChunk * prev)303   void set_prev_chunk(MemoryChunk* prev) {
304     base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
305   }
306 
owner()307   Space* owner() const {
308     if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
309         kPageHeaderTag) {
310       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
311                                       kPageHeaderTag);
312     } else {
313       return NULL;
314     }
315   }
316 
set_owner(Space * space)317   void set_owner(Space* space) {
318     DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
319     owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
320     DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
321            kPageHeaderTag);
322   }
323 
reserved_memory()324   base::VirtualMemory* reserved_memory() { return &reservation_; }
325 
InitializeReservedMemory()326   void InitializeReservedMemory() { reservation_.Reset(); }
327 
set_reserved_memory(base::VirtualMemory * reservation)328   void set_reserved_memory(base::VirtualMemory* reservation) {
329     DCHECK_NOT_NULL(reservation);
330     reservation_.TakeControl(reservation);
331   }
332 
scan_on_scavenge()333   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
initialize_scan_on_scavenge(bool scan)334   void initialize_scan_on_scavenge(bool scan) {
335     if (scan) {
336       SetFlag(SCAN_ON_SCAVENGE);
337     } else {
338       ClearFlag(SCAN_ON_SCAVENGE);
339     }
340   }
341   inline void set_scan_on_scavenge(bool scan);
342 
store_buffer_counter()343   int store_buffer_counter() { return store_buffer_counter_; }
set_store_buffer_counter(int counter)344   void set_store_buffer_counter(int counter) {
345     store_buffer_counter_ = counter;
346   }
347 
Contains(Address addr)348   bool Contains(Address addr) {
349     return addr >= area_start() && addr < area_end();
350   }
351 
352   // Checks whether addr can be a limit of addresses in this page.
353   // It's a limit if it's in the page, or if it's just after the
354   // last byte of the page.
ContainsLimit(Address addr)355   bool ContainsLimit(Address addr) {
356     return addr >= area_start() && addr <= area_end();
357   }
358 
359   // Every n write barrier invocations we go to runtime even though
360   // we could have handled it in generated code.  This lets us check
361   // whether we have hit the limit and should do some more marking.
362   static const int kWriteBarrierCounterGranularity = 500;
363 
364   enum MemoryChunkFlags {
365     IS_EXECUTABLE,
366     ABOUT_TO_BE_FREED,
367     POINTERS_TO_HERE_ARE_INTERESTING,
368     POINTERS_FROM_HERE_ARE_INTERESTING,
369     SCAN_ON_SCAVENGE,
370     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
371     IN_TO_SPACE,    // All pages in new space has one of these two set.
372     NEW_SPACE_BELOW_AGE_MARK,
373     CONTAINS_ONLY_DATA,
374     EVACUATION_CANDIDATE,
375     RESCAN_ON_EVACUATION,
376 
377     // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
378     // otherwise marking bits are still intact.
379     WAS_SWEPT,
380 
381     // Large objects can have a progress bar in their page header. These object
382     // are scanned in increments and will be kept black while being scanned.
383     // Even if the mutator writes to them they will be kept black and a white
384     // to grey transition is performed in the value.
385     HAS_PROGRESS_BAR,
386 
387     // Last flag, keep at bottom.
388     NUM_MEMORY_CHUNK_FLAGS
389   };
390 
391 
392   static const int kPointersToHereAreInterestingMask =
393       1 << POINTERS_TO_HERE_ARE_INTERESTING;
394 
395   static const int kPointersFromHereAreInterestingMask =
396       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
397 
398   static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
399 
400   static const int kSkipEvacuationSlotsRecordingMask =
401       (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
402       (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
403 
404 
SetFlag(int flag)405   void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
406 
ClearFlag(int flag)407   void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
408 
SetFlagTo(int flag,bool value)409   void SetFlagTo(int flag, bool value) {
410     if (value) {
411       SetFlag(flag);
412     } else {
413       ClearFlag(flag);
414     }
415   }
416 
IsFlagSet(int flag)417   bool IsFlagSet(int flag) {
418     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
419   }
420 
421   // Set or clear multiple flags at a time. The flags in the mask
422   // are set to the value in "flags", the rest retain the current value
423   // in flags_.
SetFlags(intptr_t flags,intptr_t mask)424   void SetFlags(intptr_t flags, intptr_t mask) {
425     flags_ = (flags_ & ~mask) | (flags & mask);
426   }
427 
428   // Return all current flags.
GetFlags()429   intptr_t GetFlags() { return flags_; }
430 
431 
432   // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
433   // not be performed on that page.
434   // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
435   // not touch the page memory anymore.
436   // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
437   // SWEEPING_PENDING - This page is ready for parallel sweeping.
438   enum ParallelSweepingState {
439     SWEEPING_DONE,
440     SWEEPING_FINALIZE,
441     SWEEPING_IN_PROGRESS,
442     SWEEPING_PENDING
443   };
444 
parallel_sweeping()445   ParallelSweepingState parallel_sweeping() {
446     return static_cast<ParallelSweepingState>(
447         base::Acquire_Load(&parallel_sweeping_));
448   }
449 
set_parallel_sweeping(ParallelSweepingState state)450   void set_parallel_sweeping(ParallelSweepingState state) {
451     base::Release_Store(&parallel_sweeping_, state);
452   }
453 
TryParallelSweeping()454   bool TryParallelSweeping() {
455     return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
456                                         SWEEPING_IN_PROGRESS) ==
457            SWEEPING_PENDING;
458   }
459 
SweepingCompleted()460   bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
461 
462   // Manage live byte count (count of bytes known to be live,
463   // because they are marked black).
ResetLiveBytes()464   void ResetLiveBytes() {
465     if (FLAG_gc_verbose) {
466       PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
467              live_byte_count_);
468     }
469     live_byte_count_ = 0;
470   }
IncrementLiveBytes(int by)471   void IncrementLiveBytes(int by) {
472     if (FLAG_gc_verbose) {
473       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
474              live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
475              live_byte_count_ + by);
476     }
477     live_byte_count_ += by;
478     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
479   }
LiveBytes()480   int LiveBytes() {
481     DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
482     return live_byte_count_;
483   }
484 
write_barrier_counter()485   int write_barrier_counter() {
486     return static_cast<int>(write_barrier_counter_);
487   }
488 
set_write_barrier_counter(int counter)489   void set_write_barrier_counter(int counter) {
490     write_barrier_counter_ = counter;
491   }
492 
progress_bar()493   int progress_bar() {
494     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
495     return progress_bar_;
496   }
497 
set_progress_bar(int progress_bar)498   void set_progress_bar(int progress_bar) {
499     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
500     progress_bar_ = progress_bar;
501   }
502 
ResetProgressBar()503   void ResetProgressBar() {
504     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
505       set_progress_bar(0);
506       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
507     }
508   }
509 
IsLeftOfProgressBar(Object ** slot)510   bool IsLeftOfProgressBar(Object** slot) {
511     Address slot_address = reinterpret_cast<Address>(slot);
512     DCHECK(slot_address > this->address());
513     return (slot_address - (this->address() + kObjectStartOffset)) <
514            progress_bar();
515   }
516 
IncrementLiveBytesFromGC(Address address,int by)517   static void IncrementLiveBytesFromGC(Address address, int by) {
518     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
519   }
520 
521   static void IncrementLiveBytesFromMutator(Address address, int by);
522 
523   static const intptr_t kAlignment =
524       (static_cast<uintptr_t>(1) << kPageSizeBits);
525 
526   static const intptr_t kAlignmentMask = kAlignment - 1;
527 
528   static const intptr_t kSizeOffset = 0;
529 
530   static const intptr_t kLiveBytesOffset =
531       kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
532       kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
533 
534   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
535 
536   static const size_t kWriteBarrierCounterOffset =
537       kSlotsBufferOffset + kPointerSize + kPointerSize;
538 
539   static const size_t kHeaderSize =
540       kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
541       kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
542 
543   static const int kBodyOffset =
544       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
545 
546   // The start offset of the object area in a page. Aligned to both maps and
547   // code alignment to be suitable for both.  Also aligned to 32 words because
548   // the marking bitmap is arranged in 32 bit chunks.
549   static const int kObjectStartAlignment = 32 * kPointerSize;
550   static const int kObjectStartOffset =
551       kBodyOffset - 1 +
552       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
553 
size()554   size_t size() const { return size_; }
555 
set_size(size_t size)556   void set_size(size_t size) { size_ = size; }
557 
SetArea(Address area_start,Address area_end)558   void SetArea(Address area_start, Address area_end) {
559     area_start_ = area_start;
560     area_end_ = area_end;
561   }
562 
executable()563   Executability executable() {
564     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
565   }
566 
ContainsOnlyData()567   bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
568 
InNewSpace()569   bool InNewSpace() {
570     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
571   }
572 
InToSpace()573   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
574 
InFromSpace()575   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
576 
577   // ---------------------------------------------------------------------
578   // Markbits support
579 
markbits()580   inline Bitmap* markbits() {
581     return Bitmap::FromAddress(address() + kHeaderSize);
582   }
583 
PrintMarkbits()584   void PrintMarkbits() { markbits()->Print(); }
585 
AddressToMarkbitIndex(Address addr)586   inline uint32_t AddressToMarkbitIndex(Address addr) {
587     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
588   }
589 
FastAddressToMarkbitIndex(Address addr)590   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
591     const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
592 
593     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
594   }
595 
MarkbitIndexToAddress(uint32_t index)596   inline Address MarkbitIndexToAddress(uint32_t index) {
597     return this->address() + (index << kPointerSizeLog2);
598   }
599 
600   void InsertAfter(MemoryChunk* other);
601   void Unlink();
602 
heap()603   inline Heap* heap() const { return heap_; }
604 
605   static const int kFlagsOffset = kPointerSize;
606 
IsEvacuationCandidate()607   bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
608 
ShouldSkipEvacuationSlotRecording()609   bool ShouldSkipEvacuationSlotRecording() {
610     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
611   }
612 
skip_list()613   inline SkipList* skip_list() { return skip_list_; }
614 
set_skip_list(SkipList * skip_list)615   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
616 
slots_buffer()617   inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
618 
slots_buffer_address()619   inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
620 
MarkEvacuationCandidate()621   void MarkEvacuationCandidate() {
622     DCHECK(slots_buffer_ == NULL);
623     SetFlag(EVACUATION_CANDIDATE);
624   }
625 
ClearEvacuationCandidate()626   void ClearEvacuationCandidate() {
627     DCHECK(slots_buffer_ == NULL);
628     ClearFlag(EVACUATION_CANDIDATE);
629   }
630 
area_start()631   Address area_start() { return area_start_; }
area_end()632   Address area_end() { return area_end_; }
area_size()633   int area_size() { return static_cast<int>(area_end() - area_start()); }
634   bool CommitArea(size_t requested);
635 
636   // Approximate amount of physical memory committed for this chunk.
CommittedPhysicalMemory()637   size_t CommittedPhysicalMemory() { return high_water_mark_; }
638 
639   static inline void UpdateHighWaterMark(Address mark);
640 
641  protected:
642   size_t size_;
643   intptr_t flags_;
644 
645   // Start and end of allocatable memory on this chunk.
646   Address area_start_;
647   Address area_end_;
648 
649   // If the chunk needs to remember its memory reservation, it is stored here.
650   base::VirtualMemory reservation_;
651   // The identity of the owning space.  This is tagged as a failure pointer, but
652   // no failure can be in an object, so this can be distinguished from any entry
653   // in a fixed array.
654   Address owner_;
655   Heap* heap_;
656   // Used by the store buffer to keep track of which pages to mark scan-on-
657   // scavenge.
658   int store_buffer_counter_;
659   // Count of bytes marked black on page.
660   int live_byte_count_;
661   SlotsBuffer* slots_buffer_;
662   SkipList* skip_list_;
663   intptr_t write_barrier_counter_;
664   // Used by the incremental marker to keep track of the scanning progress in
665   // large objects that have a progress bar and are scanned in increments.
666   int progress_bar_;
667   // Assuming the initial allocation on a page is sequential,
668   // count highest number of bytes ever allocated on the page.
669   int high_water_mark_;
670 
671   base::AtomicWord parallel_sweeping_;
672 
673   // PagedSpace free-list statistics.
674   intptr_t available_in_small_free_list_;
675   intptr_t available_in_medium_free_list_;
676   intptr_t available_in_large_free_list_;
677   intptr_t available_in_huge_free_list_;
678   intptr_t non_available_small_blocks_;
679 
680   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
681                                  Address area_start, Address area_end,
682                                  Executability executable, Space* owner);
683 
684  private:
685   // next_chunk_ holds a pointer of type MemoryChunk
686   base::AtomicWord next_chunk_;
687   // prev_chunk_ holds a pointer of type MemoryChunk
688   base::AtomicWord prev_chunk_;
689 
690   friend class MemoryAllocator;
691 };
692 
693 
694 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
695 
696 
697 // -----------------------------------------------------------------------------
698 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
699 //
700 // The only way to get a page pointer is by calling factory methods:
701 //   Page* p = Page::FromAddress(addr); or
702 //   Page* p = Page::FromAllocationTop(top);
703 class Page : public MemoryChunk {
704  public:
705   // Returns the page containing a given address. The address ranges
706   // from [page_addr .. page_addr + kPageSize[
707   // This only works if the object is in fact in a page.  See also MemoryChunk::
708   // FromAddress() and FromAnyAddress().
INLINE(static Page * FromAddress (Address a))709   INLINE(static Page* FromAddress(Address a)) {
710     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
711   }
712 
713   // Returns the page containing an allocation top. Because an allocation
714   // top address can be the upper bound of the page, we need to subtract
715   // it with kPointerSize first. The address ranges from
716   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))717   INLINE(static Page* FromAllocationTop(Address top)) {
718     Page* p = FromAddress(top - kPointerSize);
719     return p;
720   }
721 
722   // Returns the next page in the chain of pages owned by a space.
723   inline Page* next_page();
724   inline Page* prev_page();
725   inline void set_next_page(Page* page);
726   inline void set_prev_page(Page* page);
727 
728   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)729   static bool IsAlignedToPageSize(Address a) {
730     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
731   }
732 
733   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))734   INLINE(int Offset(Address a)) {
735     int offset = static_cast<int>(a - address());
736     return offset;
737   }
738 
739   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)740   Address OffsetToAddress(int offset) {
741     DCHECK_PAGE_OFFSET(offset);
742     return address() + offset;
743   }
744 
745   // ---------------------------------------------------------------------
746 
747   // Page size in bytes.  This must be a multiple of the OS page size.
748   static const int kPageSize = 1 << kPageSizeBits;
749 
750   // Maximum object size that fits in a page. Objects larger than that size
751   // are allocated in large object space and are never moved in memory. This
752   // also applies to new space allocation, since objects are never migrated
753   // from new space to large object space.  Takes double alignment into account.
754   static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
755 
756   // Page size mask.
757   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
758 
759   inline void ClearGCFields();
760 
761   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
762                                  Executability executable, PagedSpace* owner);
763 
764   void InitializeAsAnchor(PagedSpace* owner);
765 
WasSwept()766   bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
SetWasSwept()767   void SetWasSwept() { SetFlag(WAS_SWEPT); }
ClearWasSwept()768   void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
769 
770   void ResetFreeListStatistics();
771 
772 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
773   type name() { return name##_; }                 \
774   void set_##name(type name) { name##_ = name; }  \
775   void add_##name(type name) { name##_ += name; }
776 
777   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
778   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
779   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
780   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
781   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
782 
783 #undef FRAGMENTATION_STATS_ACCESSORS
784 
785 #ifdef DEBUG
786   void Print();
787 #endif  // DEBUG
788 
789   friend class MemoryAllocator;
790 };
791 
792 
793 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
794 
795 
796 class LargePage : public MemoryChunk {
797  public:
GetObject()798   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
799 
next_page()800   inline LargePage* next_page() const {
801     return static_cast<LargePage*>(next_chunk());
802   }
803 
set_next_page(LargePage * page)804   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
805 
806  private:
807   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
808 
809   friend class MemoryAllocator;
810 };
811 
812 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
813 
814 // ----------------------------------------------------------------------------
815 // Space is the abstract superclass for all allocation spaces.
816 class Space : public Malloced {
817  public:
Space(Heap * heap,AllocationSpace id,Executability executable)818   Space(Heap* heap, AllocationSpace id, Executability executable)
819       : heap_(heap), id_(id), executable_(executable) {}
820 
~Space()821   virtual ~Space() {}
822 
heap()823   Heap* heap() const { return heap_; }
824 
825   // Does the space need executable memory?
executable()826   Executability executable() { return executable_; }
827 
828   // Identity used in error reporting.
identity()829   AllocationSpace identity() { return id_; }
830 
831   // Returns allocated size.
832   virtual intptr_t Size() = 0;
833 
834   // Returns size of objects. Can differ from the allocated size
835   // (e.g. see LargeObjectSpace).
SizeOfObjects()836   virtual intptr_t SizeOfObjects() { return Size(); }
837 
RoundSizeDownToObjectAlignment(int size)838   virtual int RoundSizeDownToObjectAlignment(int size) {
839     if (id_ == CODE_SPACE) {
840       return RoundDown(size, kCodeAlignment);
841     } else {
842       return RoundDown(size, kPointerSize);
843     }
844   }
845 
846 #ifdef DEBUG
847   virtual void Print() = 0;
848 #endif
849 
850  private:
851   Heap* heap_;
852   AllocationSpace id_;
853   Executability executable_;
854 };
855 
856 
857 // ----------------------------------------------------------------------------
858 // All heap objects containing executable code (code objects) must be allocated
859 // from a 2 GB range of memory, so that they can call each other using 32-bit
860 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
861 // displacements cover the entire 4GB virtual address space.  On 64-bit
862 // platforms, we support this using the CodeRange object, which reserves and
863 // manages a range of virtual memory.
864 class CodeRange {
865  public:
866   explicit CodeRange(Isolate* isolate);
~CodeRange()867   ~CodeRange() { TearDown(); }
868 
869   // Reserves a range of virtual memory, but does not commit any of it.
870   // Can only be called once, at heap initialization time.
871   // Returns false on failure.
872   bool SetUp(size_t requested_size);
873 
874   // Frees the range of virtual memory, and frees the data structures used to
875   // manage it.
876   void TearDown();
877 
valid()878   bool valid() { return code_range_ != NULL; }
start()879   Address start() {
880     DCHECK(valid());
881     return static_cast<Address>(code_range_->address());
882   }
contains(Address address)883   bool contains(Address address) {
884     if (!valid()) return false;
885     Address start = static_cast<Address>(code_range_->address());
886     return start <= address && address < start + code_range_->size();
887   }
888 
889   // Allocates a chunk of memory from the large-object portion of
890   // the code range.  On platforms with no separate code range, should
891   // not be called.
892   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
893                                             const size_t commit_size,
894                                             size_t* allocated);
895   bool CommitRawMemory(Address start, size_t length);
896   bool UncommitRawMemory(Address start, size_t length);
897   void FreeRawMemory(Address buf, size_t length);
898 
899  private:
900   Isolate* isolate_;
901 
902   // The reserved range of virtual memory that all code objects are put in.
903   base::VirtualMemory* code_range_;
904   // Plain old data class, just a struct plus a constructor.
905   class FreeBlock {
906    public:
FreeBlock(Address start_arg,size_t size_arg)907     FreeBlock(Address start_arg, size_t size_arg)
908         : start(start_arg), size(size_arg) {
909       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
910       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
911     }
FreeBlock(void * start_arg,size_t size_arg)912     FreeBlock(void* start_arg, size_t size_arg)
913         : start(static_cast<Address>(start_arg)), size(size_arg) {
914       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
915       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
916     }
917 
918     Address start;
919     size_t size;
920   };
921 
922   // Freed blocks of memory are added to the free list.  When the allocation
923   // list is exhausted, the free list is sorted and merged to make the new
924   // allocation list.
925   List<FreeBlock> free_list_;
926   // Memory is allocated from the free blocks on the allocation list.
927   // The block at current_allocation_block_index_ is the current block.
928   List<FreeBlock> allocation_list_;
929   int current_allocation_block_index_;
930 
931   // Finds a block on the allocation list that contains at least the
932   // requested amount of memory.  If none is found, sorts and merges
933   // the existing free memory blocks, and searches again.
934   // If none can be found, returns false.
935   bool GetNextAllocationBlock(size_t requested);
936   // Compares the start addresses of two free blocks.
937   static int CompareFreeBlockAddress(const FreeBlock* left,
938                                      const FreeBlock* right);
939 
940   DISALLOW_COPY_AND_ASSIGN(CodeRange);
941 };
942 
943 
944 class SkipList {
945  public:
SkipList()946   SkipList() { Clear(); }
947 
Clear()948   void Clear() {
949     for (int idx = 0; idx < kSize; idx++) {
950       starts_[idx] = reinterpret_cast<Address>(-1);
951     }
952   }
953 
StartFor(Address addr)954   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
955 
AddObject(Address addr,int size)956   void AddObject(Address addr, int size) {
957     int start_region = RegionNumber(addr);
958     int end_region = RegionNumber(addr + size - kPointerSize);
959     for (int idx = start_region; idx <= end_region; idx++) {
960       if (starts_[idx] > addr) starts_[idx] = addr;
961     }
962   }
963 
RegionNumber(Address addr)964   static inline int RegionNumber(Address addr) {
965     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
966   }
967 
Update(Address addr,int size)968   static void Update(Address addr, int size) {
969     Page* page = Page::FromAddress(addr);
970     SkipList* list = page->skip_list();
971     if (list == NULL) {
972       list = new SkipList();
973       page->set_skip_list(list);
974     }
975 
976     list->AddObject(addr, size);
977   }
978 
979  private:
980   static const int kRegionSizeLog2 = 13;
981   static const int kRegionSize = 1 << kRegionSizeLog2;
982   static const int kSize = Page::kPageSize / kRegionSize;
983 
984   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
985 
986   Address starts_[kSize];
987 };
988 
989 
990 // ----------------------------------------------------------------------------
991 // A space acquires chunks of memory from the operating system. The memory
992 // allocator allocated and deallocates pages for the paged heap spaces and large
993 // pages for large object space.
994 //
995 // Each space has to manage it's own pages.
996 //
997 class MemoryAllocator {
998  public:
999   explicit MemoryAllocator(Isolate* isolate);
1000 
1001   // Initializes its internal bookkeeping structures.
1002   // Max capacity of the total space and executable memory limit.
1003   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1004 
1005   void TearDown();
1006 
1007   Page* AllocatePage(intptr_t size, PagedSpace* owner,
1008                      Executability executable);
1009 
1010   LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1011                                Executability executable);
1012 
1013   void Free(MemoryChunk* chunk);
1014 
1015   // Returns the maximum available bytes of heaps.
Available()1016   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1017 
1018   // Returns allocated spaces in bytes.
Size()1019   intptr_t Size() { return size_; }
1020 
1021   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1022   intptr_t AvailableExecutable() {
1023     if (capacity_executable_ < size_executable_) return 0;
1024     return capacity_executable_ - size_executable_;
1025   }
1026 
1027   // Returns allocated executable spaces in bytes.
SizeExecutable()1028   intptr_t SizeExecutable() { return size_executable_; }
1029 
1030   // Returns maximum available bytes that the old space can have.
MaxAvailable()1031   intptr_t MaxAvailable() {
1032     return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1033   }
1034 
1035   // Returns an indication of whether a pointer is in a space that has
1036   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1037   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1038     return address < lowest_ever_allocated_ ||
1039            address >= highest_ever_allocated_;
1040   }
1041 
1042 #ifdef DEBUG
1043   // Reports statistic info of the space.
1044   void ReportStatistics();
1045 #endif
1046 
1047   // Returns a MemoryChunk in which the memory region from commit_area_size to
1048   // reserve_area_size of the chunk area is reserved but not committed, it
1049   // could be committed later by calling MemoryChunk::CommitArea.
1050   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1051                              intptr_t commit_area_size,
1052                              Executability executable, Space* space);
1053 
1054   Address ReserveAlignedMemory(size_t requested, size_t alignment,
1055                                base::VirtualMemory* controller);
1056   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1057                                 size_t alignment, Executability executable,
1058                                 base::VirtualMemory* controller);
1059 
1060   bool CommitMemory(Address addr, size_t size, Executability executable);
1061 
1062   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1063   void FreeMemory(Address addr, size_t size, Executability executable);
1064 
1065   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1066   // the address is not NULL, the size is greater than zero, and that the
1067   // block is contained in the initial chunk.  Returns true if it succeeded
1068   // and false otherwise.
1069   bool CommitBlock(Address start, size_t size, Executability executable);
1070 
1071   // Uncommit a contiguous block of memory [start..(start+size)[.
1072   // start is not NULL, the size is greater than zero, and the
1073   // block is contained in the initial chunk.  Returns true if it succeeded
1074   // and false otherwise.
1075   bool UncommitBlock(Address start, size_t size);
1076 
1077   // Zaps a contiguous block of memory [start..(start+size)[ thus
1078   // filling it up with a recognizable non-NULL bit pattern.
1079   void ZapBlock(Address start, size_t size);
1080 
1081   void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
1082                                  size_t size);
1083 
1084   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1085                                    ObjectSpace space, AllocationAction action);
1086 
1087   void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
1088 
1089   bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
1090 
1091   static int CodePageGuardStartOffset();
1092 
1093   static int CodePageGuardSize();
1094 
1095   static int CodePageAreaStartOffset();
1096 
1097   static int CodePageAreaEndOffset();
1098 
CodePageAreaSize()1099   static int CodePageAreaSize() {
1100     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1101   }
1102 
1103   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1104                                               Address start, size_t commit_size,
1105                                               size_t reserved_size);
1106 
1107  private:
1108   Isolate* isolate_;
1109 
1110   // Maximum space size in bytes.
1111   size_t capacity_;
1112   // Maximum subset of capacity_ that can be executable
1113   size_t capacity_executable_;
1114 
1115   // Allocated space size in bytes.
1116   size_t size_;
1117   // Allocated executable space size in bytes.
1118   size_t size_executable_;
1119 
1120   // We keep the lowest and highest addresses allocated as a quick way
1121   // of determining that pointers are outside the heap. The estimate is
1122   // conservative, i.e. not all addrsses in 'allocated' space are allocated
1123   // to our heap. The range is [lowest, highest[, inclusive on the low end
1124   // and exclusive on the high end.
1125   void* lowest_ever_allocated_;
1126   void* highest_ever_allocated_;
1127 
1128   struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistrationMemoryAllocationCallbackRegistration1129     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1130                                          ObjectSpace space,
1131                                          AllocationAction action)
1132         : callback(callback), space(space), action(action) {}
1133     MemoryAllocationCallback callback;
1134     ObjectSpace space;
1135     AllocationAction action;
1136   };
1137 
1138   // A List of callback that are triggered when memory is allocated or free'd
1139   List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
1140 
1141   // Initializes pages in a chunk. Returns the first page address.
1142   // This function and GetChunkId() are provided for the mark-compact
1143   // collector to rebuild page headers in the from space, which is
1144   // used as a marking stack and its page headers are destroyed.
1145   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1146                                PagedSpace* owner);
1147 
UpdateAllocatedSpaceLimits(void * low,void * high)1148   void UpdateAllocatedSpaceLimits(void* low, void* high) {
1149     lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1150     highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1151   }
1152 
1153   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1154 };
1155 
1156 
1157 // -----------------------------------------------------------------------------
1158 // Interface for heap object iterator to be implemented by all object space
1159 // object iterators.
1160 //
1161 // NOTE: The space specific object iterators also implements the own next()
1162 //       method which is used to avoid using virtual functions
1163 //       iterating a specific space.
1164 
1165 class ObjectIterator : public Malloced {
1166  public:
~ObjectIterator()1167   virtual ~ObjectIterator() {}
1168 
1169   virtual HeapObject* next_object() = 0;
1170 };
1171 
1172 
1173 // -----------------------------------------------------------------------------
1174 // Heap object iterator in new/old/map spaces.
1175 //
1176 // A HeapObjectIterator iterates objects from the bottom of the given space
1177 // to its top or from the bottom of the given page to its top.
1178 //
1179 // If objects are allocated in the page during iteration the iterator may
1180 // or may not iterate over those objects.  The caller must create a new
1181 // iterator in order to be sure to visit these new objects.
1182 class HeapObjectIterator : public ObjectIterator {
1183  public:
1184   // Creates a new object iterator in a given space.
1185   // If the size function is not given, the iterator calls the default
1186   // Object::Size().
1187   explicit HeapObjectIterator(PagedSpace* space);
1188   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1189   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1190 
1191   // Advance to the next object, skipping free spaces and other fillers and
1192   // skipping the special garbage section of which there is one per space.
1193   // Returns NULL when the iteration has ended.
Next()1194   inline HeapObject* Next() {
1195     do {
1196       HeapObject* next_obj = FromCurrentPage();
1197       if (next_obj != NULL) return next_obj;
1198     } while (AdvanceToNextPage());
1199     return NULL;
1200   }
1201 
next_object()1202   virtual HeapObject* next_object() { return Next(); }
1203 
1204  private:
1205   enum PageMode { kOnePageOnly, kAllPagesInSpace };
1206 
1207   Address cur_addr_;              // Current iteration point.
1208   Address cur_end_;               // End iteration point.
1209   HeapObjectCallback size_func_;  // Size function or NULL.
1210   PagedSpace* space_;
1211   PageMode page_mode_;
1212 
1213   // Fast (inlined) path of next().
1214   inline HeapObject* FromCurrentPage();
1215 
1216   // Slow path of next(), goes into the next page.  Returns false if the
1217   // iteration has ended.
1218   bool AdvanceToNextPage();
1219 
1220   // Initializes fields.
1221   inline void Initialize(PagedSpace* owner, Address start, Address end,
1222                          PageMode mode, HeapObjectCallback size_func);
1223 };
1224 
1225 
1226 // -----------------------------------------------------------------------------
1227 // A PageIterator iterates the pages in a paged space.
1228 
1229 class PageIterator BASE_EMBEDDED {
1230  public:
1231   explicit inline PageIterator(PagedSpace* space);
1232 
1233   inline bool has_next();
1234   inline Page* next();
1235 
1236  private:
1237   PagedSpace* space_;
1238   Page* prev_page_;  // Previous page returned.
1239   // Next page that will be returned.  Cached here so that we can use this
1240   // iterator for operations that deallocate pages.
1241   Page* next_page_;
1242 };
1243 
1244 
1245 // -----------------------------------------------------------------------------
1246 // A space has a circular list of pages. The next page can be accessed via
1247 // Page::next_page() call.
1248 
1249 // An abstraction of allocation and relocation pointers in a page-structured
1250 // space.
1251 class AllocationInfo {
1252  public:
AllocationInfo()1253   AllocationInfo() : top_(NULL), limit_(NULL) {}
1254 
INLINE(void set_top (Address top))1255   INLINE(void set_top(Address top)) {
1256     SLOW_DCHECK(top == NULL ||
1257                 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1258     top_ = top;
1259   }
1260 
INLINE(Address top ())1261   INLINE(Address top()) const {
1262     SLOW_DCHECK(top_ == NULL ||
1263                 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1264     return top_;
1265   }
1266 
top_address()1267   Address* top_address() { return &top_; }
1268 
INLINE(void set_limit (Address limit))1269   INLINE(void set_limit(Address limit)) {
1270     SLOW_DCHECK(limit == NULL ||
1271                 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1272     limit_ = limit;
1273   }
1274 
INLINE(Address limit ())1275   INLINE(Address limit()) const {
1276     SLOW_DCHECK(limit_ == NULL ||
1277                 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
1278                     0);
1279     return limit_;
1280   }
1281 
limit_address()1282   Address* limit_address() { return &limit_; }
1283 
1284 #ifdef DEBUG
VerifyPagedAllocation()1285   bool VerifyPagedAllocation() {
1286     return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1287            (top_ <= limit_);
1288   }
1289 #endif
1290 
1291  private:
1292   // Current allocation top.
1293   Address top_;
1294   // Current allocation limit.
1295   Address limit_;
1296 };
1297 
1298 
1299 // An abstraction of the accounting statistics of a page-structured space.
1300 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1301 // including page bookkeeping structures) currently in the space. The 'size'
1302 // of a space is the number of allocated bytes, the 'waste' in the space is
1303 // the number of bytes that are not allocated and not available to
1304 // allocation without reorganizing the space via a GC (e.g. small blocks due
1305 // to internal fragmentation, top of page areas in map space), and the bytes
1306 // 'available' is the number of unallocated bytes that are not waste.  The
1307 // capacity is the sum of size, waste, and available.
1308 //
1309 // The stats are only set by functions that ensure they stay balanced. These
1310 // functions increase or decrease one of the non-capacity stats in
1311 // conjunction with capacity, or else they always balance increases and
1312 // decreases to the non-capacity stats.
1313 class AllocationStats BASE_EMBEDDED {
1314  public:
AllocationStats()1315   AllocationStats() { Clear(); }
1316 
1317   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1318   void Clear() {
1319     capacity_ = 0;
1320     max_capacity_ = 0;
1321     size_ = 0;
1322     waste_ = 0;
1323   }
1324 
ClearSizeWaste()1325   void ClearSizeWaste() {
1326     size_ = capacity_;
1327     waste_ = 0;
1328   }
1329 
1330   // Reset the allocation statistics (i.e., available = capacity with no
1331   // wasted or allocated bytes).
Reset()1332   void Reset() {
1333     size_ = 0;
1334     waste_ = 0;
1335   }
1336 
1337   // Accessors for the allocation statistics.
Capacity()1338   intptr_t Capacity() { return capacity_; }
MaxCapacity()1339   intptr_t MaxCapacity() { return max_capacity_; }
Size()1340   intptr_t Size() { return size_; }
Waste()1341   intptr_t Waste() { return waste_; }
1342 
1343   // Grow the space by adding available bytes.  They are initially marked as
1344   // being in use (part of the size), but will normally be immediately freed,
1345   // putting them on the free list and removing them from size_.
ExpandSpace(int size_in_bytes)1346   void ExpandSpace(int size_in_bytes) {
1347     capacity_ += size_in_bytes;
1348     size_ += size_in_bytes;
1349     if (capacity_ > max_capacity_) {
1350       max_capacity_ = capacity_;
1351     }
1352     DCHECK(size_ >= 0);
1353   }
1354 
1355   // Shrink the space by removing available bytes.  Since shrinking is done
1356   // during sweeping, bytes have been marked as being in use (part of the size)
1357   // and are hereby freed.
ShrinkSpace(int size_in_bytes)1358   void ShrinkSpace(int size_in_bytes) {
1359     capacity_ -= size_in_bytes;
1360     size_ -= size_in_bytes;
1361     DCHECK(size_ >= 0);
1362   }
1363 
1364   // Allocate from available bytes (available -> size).
AllocateBytes(intptr_t size_in_bytes)1365   void AllocateBytes(intptr_t size_in_bytes) {
1366     size_ += size_in_bytes;
1367     DCHECK(size_ >= 0);
1368   }
1369 
1370   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(intptr_t size_in_bytes)1371   void DeallocateBytes(intptr_t size_in_bytes) {
1372     size_ -= size_in_bytes;
1373     DCHECK(size_ >= 0);
1374   }
1375 
1376   // Waste free bytes (available -> waste).
WasteBytes(int size_in_bytes)1377   void WasteBytes(int size_in_bytes) {
1378     DCHECK(size_in_bytes >= 0);
1379     waste_ += size_in_bytes;
1380   }
1381 
1382  private:
1383   intptr_t capacity_;
1384   intptr_t max_capacity_;
1385   intptr_t size_;
1386   intptr_t waste_;
1387 };
1388 
1389 
1390 // -----------------------------------------------------------------------------
1391 // Free lists for old object spaces
1392 //
1393 // Free-list nodes are free blocks in the heap.  They look like heap objects
1394 // (free-list node pointers have the heap object tag, and they have a map like
1395 // a heap object).  They have a size and a next pointer.  The next pointer is
1396 // the raw address of the next free list node (or NULL).
1397 class FreeListNode : public HeapObject {
1398  public:
1399   // Obtain a free-list node from a raw address.  This is not a cast because
1400   // it does not check nor require that the first word at the address is a map
1401   // pointer.
FromAddress(Address address)1402   static FreeListNode* FromAddress(Address address) {
1403     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1404   }
1405 
1406   static inline bool IsFreeListNode(HeapObject* object);
1407 
1408   // Set the size in bytes, which can be read with HeapObject::Size().  This
1409   // function also writes a map to the first word of the block so that it
1410   // looks like a heap object to the garbage collector and heap iteration
1411   // functions.
1412   void set_size(Heap* heap, int size_in_bytes);
1413 
1414   // Accessors for the next field.
1415   inline FreeListNode* next();
1416   inline FreeListNode** next_address();
1417   inline void set_next(FreeListNode* next);
1418 
1419   inline void Zap();
1420 
cast(Object * object)1421   static inline FreeListNode* cast(Object* object) {
1422     return reinterpret_cast<FreeListNode*>(object);
1423   }
1424 
1425  private:
1426   static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1427 
1428   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1429 };
1430 
1431 
1432 // The free list category holds a pointer to the top element and a pointer to
1433 // the end element of the linked list of free memory blocks.
1434 class FreeListCategory {
1435  public:
FreeListCategory()1436   FreeListCategory() : top_(0), end_(NULL), available_(0) {}
1437 
1438   intptr_t Concatenate(FreeListCategory* category);
1439 
1440   void Reset();
1441 
1442   void Free(FreeListNode* node, int size_in_bytes);
1443 
1444   FreeListNode* PickNodeFromList(int* node_size);
1445   FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
1446 
1447   intptr_t EvictFreeListItemsInList(Page* p);
1448   bool ContainsPageFreeListItemsInList(Page* p);
1449 
1450   void RepairFreeList(Heap* heap);
1451 
top()1452   FreeListNode* top() const {
1453     return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1454   }
1455 
set_top(FreeListNode * top)1456   void set_top(FreeListNode* top) {
1457     base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1458   }
1459 
GetEndAddress()1460   FreeListNode** GetEndAddress() { return &end_; }
end()1461   FreeListNode* end() const { return end_; }
set_end(FreeListNode * end)1462   void set_end(FreeListNode* end) { end_ = end; }
1463 
GetAvailableAddress()1464   int* GetAvailableAddress() { return &available_; }
available()1465   int available() const { return available_; }
set_available(int available)1466   void set_available(int available) { available_ = available; }
1467 
mutex()1468   base::Mutex* mutex() { return &mutex_; }
1469 
IsEmpty()1470   bool IsEmpty() { return top() == 0; }
1471 
1472 #ifdef DEBUG
1473   intptr_t SumFreeList();
1474   int FreeListLength();
1475 #endif
1476 
1477  private:
1478   // top_ points to the top FreeListNode* in the free list category.
1479   base::AtomicWord top_;
1480   FreeListNode* end_;
1481   base::Mutex mutex_;
1482 
1483   // Total available bytes in all blocks of this free list category.
1484   int available_;
1485 };
1486 
1487 
1488 // The free list for the old space.  The free list is organized in such a way
1489 // as to encourage objects allocated around the same time to be near each
1490 // other.  The normal way to allocate is intended to be by bumping a 'top'
1491 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1492 // find a new space to allocate from.  This is done with the free list, which
1493 // is divided up into rough categories to cut down on waste.  Having finer
1494 // categories would scatter allocation more.
1495 
1496 // The old space free list is organized in categories.
1497 // 1-31 words:  Such small free areas are discarded for efficiency reasons.
1498 //     They can be reclaimed by the compactor.  However the distance between top
1499 //     and limit may be this small.
1500 // 32-255 words: There is a list of spaces this large.  It is used for top and
1501 //     limit when the object we need to allocate is 1-31 words in size.  These
1502 //     spaces are called small.
1503 // 256-2047 words: There is a list of spaces this large.  It is used for top and
1504 //     limit when the object we need to allocate is 32-255 words in size.  These
1505 //     spaces are called medium.
1506 // 1048-16383 words: There is a list of spaces this large.  It is used for top
1507 //     and limit when the object we need to allocate is 256-2047 words in size.
1508 //     These spaces are call large.
1509 // At least 16384 words.  This list is for objects of 2048 words or larger.
1510 //     Empty pages are added to this list.  These spaces are called huge.
1511 class FreeList {
1512  public:
1513   explicit FreeList(PagedSpace* owner);
1514 
1515   intptr_t Concatenate(FreeList* free_list);
1516 
1517   // Clear the free list.
1518   void Reset();
1519 
1520   // Return the number of bytes available on the free list.
available()1521   intptr_t available() {
1522     return small_list_.available() + medium_list_.available() +
1523            large_list_.available() + huge_list_.available();
1524   }
1525 
1526   // Place a node on the free list.  The block of size 'size_in_bytes'
1527   // starting at 'start' is placed on the free list.  The return value is the
1528   // number of bytes that have been lost due to internal fragmentation by
1529   // freeing the block.  Bookkeeping information will be written to the block,
1530   // i.e., its contents will be destroyed.  The start address should be word
1531   // aligned, and the size should be a non-zero multiple of the word size.
1532   int Free(Address start, int size_in_bytes);
1533 
1534   // This method returns how much memory can be allocated after freeing
1535   // maximum_freed memory.
GuaranteedAllocatable(int maximum_freed)1536   static inline int GuaranteedAllocatable(int maximum_freed) {
1537     if (maximum_freed < kSmallListMin) {
1538       return 0;
1539     } else if (maximum_freed <= kSmallListMax) {
1540       return kSmallAllocationMax;
1541     } else if (maximum_freed <= kMediumListMax) {
1542       return kMediumAllocationMax;
1543     } else if (maximum_freed <= kLargeListMax) {
1544       return kLargeAllocationMax;
1545     }
1546     return maximum_freed;
1547   }
1548 
1549   // Allocate a block of size 'size_in_bytes' from the free list.  The block
1550   // is unitialized.  A failure is returned if no block is available.  The
1551   // number of bytes lost to fragmentation is returned in the output parameter
1552   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1553   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1554 
IsEmpty()1555   bool IsEmpty() {
1556     return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1557            large_list_.IsEmpty() && huge_list_.IsEmpty();
1558   }
1559 
1560 #ifdef DEBUG
1561   void Zap();
1562   intptr_t SumFreeLists();
1563   bool IsVeryLong();
1564 #endif
1565 
1566   // Used after booting the VM.
1567   void RepairLists(Heap* heap);
1568 
1569   intptr_t EvictFreeListItems(Page* p);
1570   bool ContainsPageFreeListItems(Page* p);
1571 
small_list()1572   FreeListCategory* small_list() { return &small_list_; }
medium_list()1573   FreeListCategory* medium_list() { return &medium_list_; }
large_list()1574   FreeListCategory* large_list() { return &large_list_; }
huge_list()1575   FreeListCategory* huge_list() { return &huge_list_; }
1576 
1577  private:
1578   // The size range of blocks, in bytes.
1579   static const int kMinBlockSize = 3 * kPointerSize;
1580   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1581 
1582   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1583 
1584   PagedSpace* owner_;
1585   Heap* heap_;
1586 
1587   static const int kSmallListMin = 0x20 * kPointerSize;
1588   static const int kSmallListMax = 0xff * kPointerSize;
1589   static const int kMediumListMax = 0x7ff * kPointerSize;
1590   static const int kLargeListMax = 0x3fff * kPointerSize;
1591   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1592   static const int kMediumAllocationMax = kSmallListMax;
1593   static const int kLargeAllocationMax = kMediumListMax;
1594   FreeListCategory small_list_;
1595   FreeListCategory medium_list_;
1596   FreeListCategory large_list_;
1597   FreeListCategory huge_list_;
1598 
1599   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1600 };
1601 
1602 
1603 class AllocationResult {
1604  public:
1605   // Implicit constructor from Object*.
AllocationResult(Object * object)1606   AllocationResult(Object* object)  // NOLINT
1607       : object_(object),
1608         retry_space_(INVALID_SPACE) {}
1609 
AllocationResult()1610   AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
1611 
1612   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1613     return AllocationResult(space);
1614   }
1615 
IsRetry()1616   inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1617 
1618   template <typename T>
To(T ** obj)1619   bool To(T** obj) {
1620     if (IsRetry()) return false;
1621     *obj = T::cast(object_);
1622     return true;
1623   }
1624 
ToObjectChecked()1625   Object* ToObjectChecked() {
1626     CHECK(!IsRetry());
1627     return object_;
1628   }
1629 
RetrySpace()1630   AllocationSpace RetrySpace() {
1631     DCHECK(IsRetry());
1632     return retry_space_;
1633   }
1634 
1635  private:
AllocationResult(AllocationSpace space)1636   explicit AllocationResult(AllocationSpace space)
1637       : object_(NULL), retry_space_(space) {}
1638 
1639   Object* object_;
1640   AllocationSpace retry_space_;
1641 };
1642 
1643 
1644 class PagedSpace : public Space {
1645  public:
1646   // Creates a space with a maximum capacity, and an id.
1647   PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
1648              Executability executable);
1649 
~PagedSpace()1650   virtual ~PagedSpace() {}
1651 
1652   // Set up the space using the given address range of virtual memory (from
1653   // the memory allocator's initial chunk) if possible.  If the block of
1654   // addresses is not big enough to contain a single page-aligned page, a
1655   // fresh chunk will be allocated.
1656   bool SetUp();
1657 
1658   // Returns true if the space has been successfully set up and not
1659   // subsequently torn down.
1660   bool HasBeenSetUp();
1661 
1662   // Cleans up the space, frees all pages in this space except those belonging
1663   // to the initial chunk, uncommits addresses in the initial chunk.
1664   void TearDown();
1665 
1666   // Checks whether an object/address is in this space.
1667   inline bool Contains(Address a);
Contains(HeapObject * o)1668   bool Contains(HeapObject* o) { return Contains(o->address()); }
1669 
1670   // Given an address occupied by a live object, return that object if it is
1671   // in this space, or a Smi if it is not.  The implementation iterates over
1672   // objects in the page containing the address, the cost is linear in the
1673   // number of objects in the page.  It may be slow.
1674   Object* FindObject(Address addr);
1675 
1676   // During boot the free_space_map is created, and afterwards we may need
1677   // to write it into the free list nodes that were already created.
1678   void RepairFreeListsAfterBoot();
1679 
1680   // Prepares for a mark-compact GC.
1681   void PrepareForMarkCompact();
1682 
1683   // Current capacity without growing (Size() + Available()).
Capacity()1684   intptr_t Capacity() { return accounting_stats_.Capacity(); }
1685 
1686   // Total amount of memory committed for this space.  For paged
1687   // spaces this equals the capacity.
CommittedMemory()1688   intptr_t CommittedMemory() { return Capacity(); }
1689 
1690   // The maximum amount of memory ever committed for this space.
MaximumCommittedMemory()1691   intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1692 
1693   // Approximate amount of physical memory committed for this space.
1694   size_t CommittedPhysicalMemory();
1695 
1696   struct SizeStats {
TotalSizeStats1697     intptr_t Total() {
1698       return small_size_ + medium_size_ + large_size_ + huge_size_;
1699     }
1700 
1701     intptr_t small_size_;
1702     intptr_t medium_size_;
1703     intptr_t large_size_;
1704     intptr_t huge_size_;
1705   };
1706 
1707   void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1708   void ResetFreeListStatistics();
1709 
1710   // Sets the capacity, the available space and the wasted space to zero.
1711   // The stats are rebuilt during sweeping by adding each page to the
1712   // capacity and the size when it is encountered.  As free spaces are
1713   // discovered during the sweeping they are subtracted from the size and added
1714   // to the available and wasted totals.
ClearStats()1715   void ClearStats() {
1716     accounting_stats_.ClearSizeWaste();
1717     ResetFreeListStatistics();
1718   }
1719 
1720   // Increases the number of available bytes of that space.
AddToAccountingStats(intptr_t bytes)1721   void AddToAccountingStats(intptr_t bytes) {
1722     accounting_stats_.DeallocateBytes(bytes);
1723   }
1724 
1725   // Available bytes without growing.  These are the bytes on the free list.
1726   // The bytes in the linear allocation area are not included in this total
1727   // because updating the stats would slow down allocation.  New pages are
1728   // immediately added to the free list so they show up here.
Available()1729   intptr_t Available() { return free_list_.available(); }
1730 
1731   // Allocated bytes in this space.  Garbage bytes that were not found due to
1732   // concurrent sweeping are counted as being allocated!  The bytes in the
1733   // current linear allocation area (between top and limit) are also counted
1734   // here.
Size()1735   virtual intptr_t Size() { return accounting_stats_.Size(); }
1736 
1737   // As size, but the bytes in lazily swept pages are estimated and the bytes
1738   // in the current linear allocation area are not included.
1739   virtual intptr_t SizeOfObjects();
1740 
1741   // Wasted bytes in this space.  These are just the bytes that were thrown away
1742   // due to being too small to use for allocation.  They do not include the
1743   // free bytes that were not found at all due to lazy sweeping.
Waste()1744   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1745 
1746   // Returns the allocation pointer in this space.
top()1747   Address top() { return allocation_info_.top(); }
limit()1748   Address limit() { return allocation_info_.limit(); }
1749 
1750   // The allocation top address.
allocation_top_address()1751   Address* allocation_top_address() { return allocation_info_.top_address(); }
1752 
1753   // The allocation limit address.
allocation_limit_address()1754   Address* allocation_limit_address() {
1755     return allocation_info_.limit_address();
1756   }
1757 
1758   // Allocate the requested number of bytes in the space if possible, return a
1759   // failure object if not.
1760   MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
1761 
1762   // Give a block of memory to the space's free list.  It might be added to
1763   // the free list or accounted as waste.
1764   // If add_to_freelist is false then just accounting stats are updated and
1765   // no attempt to add area to free list is made.
Free(Address start,int size_in_bytes)1766   int Free(Address start, int size_in_bytes) {
1767     int wasted = free_list_.Free(start, size_in_bytes);
1768     accounting_stats_.DeallocateBytes(size_in_bytes);
1769     accounting_stats_.WasteBytes(wasted);
1770     return size_in_bytes - wasted;
1771   }
1772 
ResetFreeList()1773   void ResetFreeList() { free_list_.Reset(); }
1774 
1775   // Set space allocation info.
SetTopAndLimit(Address top,Address limit)1776   void SetTopAndLimit(Address top, Address limit) {
1777     DCHECK(top == limit ||
1778            Page::FromAddress(top) == Page::FromAddress(limit - 1));
1779     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1780     allocation_info_.set_top(top);
1781     allocation_info_.set_limit(limit);
1782   }
1783 
1784   // Empty space allocation info, returning unused area to free list.
EmptyAllocationInfo()1785   void EmptyAllocationInfo() {
1786     // Mark the old linear allocation area with a free space map so it can be
1787     // skipped when scanning the heap.
1788     int old_linear_size = static_cast<int>(limit() - top());
1789     Free(top(), old_linear_size);
1790     SetTopAndLimit(NULL, NULL);
1791   }
1792 
Allocate(int bytes)1793   void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
1794 
1795   void IncreaseCapacity(int size);
1796 
1797   // Releases an unused page and shrinks the space.
1798   void ReleasePage(Page* page);
1799 
1800   // The dummy page that anchors the linked list of pages.
anchor()1801   Page* anchor() { return &anchor_; }
1802 
1803 #ifdef VERIFY_HEAP
1804   // Verify integrity of this space.
1805   virtual void Verify(ObjectVisitor* visitor);
1806 
1807   // Overridden by subclasses to verify space-specific object
1808   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)1809   virtual void VerifyObject(HeapObject* obj) {}
1810 #endif
1811 
1812 #ifdef DEBUG
1813   // Print meta info and objects in this space.
1814   virtual void Print();
1815 
1816   // Reports statistics for the space
1817   void ReportStatistics();
1818 
1819   // Report code object related statistics
1820   void CollectCodeStatistics();
1821   static void ReportCodeStatistics(Isolate* isolate);
1822   static void ResetCodeStatistics(Isolate* isolate);
1823 #endif
1824 
1825   // Evacuation candidates are swept by evacuator.  Needs to return a valid
1826   // result before _and_ after evacuation has finished.
ShouldBeSweptBySweeperThreads(Page * p)1827   static bool ShouldBeSweptBySweeperThreads(Page* p) {
1828     return !p->IsEvacuationCandidate() &&
1829            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
1830   }
1831 
IncrementUnsweptFreeBytes(intptr_t by)1832   void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
1833 
IncreaseUnsweptFreeBytes(Page * p)1834   void IncreaseUnsweptFreeBytes(Page* p) {
1835     DCHECK(ShouldBeSweptBySweeperThreads(p));
1836     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1837   }
1838 
DecrementUnsweptFreeBytes(intptr_t by)1839   void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
1840 
DecreaseUnsweptFreeBytes(Page * p)1841   void DecreaseUnsweptFreeBytes(Page* p) {
1842     DCHECK(ShouldBeSweptBySweeperThreads(p));
1843     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1844   }
1845 
ResetUnsweptFreeBytes()1846   void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
1847 
1848   // This function tries to steal size_in_bytes memory from the sweeper threads
1849   // free-lists. If it does not succeed stealing enough memory, it will wait
1850   // for the sweeper threads to finish sweeping.
1851   // It returns true when sweeping is completed and false otherwise.
1852   bool EnsureSweeperProgress(intptr_t size_in_bytes);
1853 
set_end_of_unswept_pages(Page * page)1854   void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
1855 
end_of_unswept_pages()1856   Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
1857 
FirstPage()1858   Page* FirstPage() { return anchor_.next_page(); }
LastPage()1859   Page* LastPage() { return anchor_.prev_page(); }
1860 
1861   void EvictEvacuationCandidatesFromFreeLists();
1862 
1863   bool CanExpand();
1864 
1865   // Returns the number of total pages in this space.
1866   int CountTotalPages();
1867 
1868   // Return size of allocatable area on a page in this space.
AreaSize()1869   inline int AreaSize() { return area_size_; }
1870 
1871   void CreateEmergencyMemory();
1872   void FreeEmergencyMemory();
1873   void UseEmergencyMemory();
1874 
HasEmergencyMemory()1875   bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
1876 
1877  protected:
free_list()1878   FreeList* free_list() { return &free_list_; }
1879 
1880   int area_size_;
1881 
1882   // Maximum capacity of this space.
1883   intptr_t max_capacity_;
1884 
1885   intptr_t SizeOfFirstPage();
1886 
1887   // Accounting information for this space.
1888   AllocationStats accounting_stats_;
1889 
1890   // The dummy page that anchors the double linked list of pages.
1891   Page anchor_;
1892 
1893   // The space's free list.
1894   FreeList free_list_;
1895 
1896   // Normal allocation information.
1897   AllocationInfo allocation_info_;
1898 
1899   // The number of free bytes which could be reclaimed by advancing the
1900   // concurrent sweeper threads.
1901   intptr_t unswept_free_bytes_;
1902 
1903   // The sweeper threads iterate over the list of pointer and data space pages
1904   // and sweep these pages concurrently. They will stop sweeping after the
1905   // end_of_unswept_pages_ page.
1906   Page* end_of_unswept_pages_;
1907 
1908   // Emergency memory is the memory of a full page for a given space, allocated
1909   // conservatively before evacuating a page. If compaction fails due to out
1910   // of memory error the emergency memory can be used to complete compaction.
1911   // If not used, the emergency memory is released after compaction.
1912   MemoryChunk* emergency_memory_;
1913 
1914   // Expands the space by allocating a fixed number of pages. Returns false if
1915   // it cannot allocate requested number of pages from OS, or if the hard heap
1916   // size limit has been hit.
1917   bool Expand();
1918 
1919   // Generic fast case allocation function that tries linear allocation at the
1920   // address denoted by top in allocation_info_.
1921   inline HeapObject* AllocateLinearly(int size_in_bytes);
1922 
1923   // If sweeping is still in progress try to sweep unswept pages. If that is
1924   // not successful, wait for the sweeper threads and re-try free-list
1925   // allocation.
1926   MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
1927       int size_in_bytes);
1928 
1929   // Slow path of AllocateRaw.  This function is space-dependent.
1930   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1931 
1932   friend class PageIterator;
1933   friend class MarkCompactCollector;
1934 };
1935 
1936 
1937 class NumberAndSizeInfo BASE_EMBEDDED {
1938  public:
NumberAndSizeInfo()1939   NumberAndSizeInfo() : number_(0), bytes_(0) {}
1940 
number()1941   int number() const { return number_; }
increment_number(int num)1942   void increment_number(int num) { number_ += num; }
1943 
bytes()1944   int bytes() const { return bytes_; }
increment_bytes(int size)1945   void increment_bytes(int size) { bytes_ += size; }
1946 
clear()1947   void clear() {
1948     number_ = 0;
1949     bytes_ = 0;
1950   }
1951 
1952  private:
1953   int number_;
1954   int bytes_;
1955 };
1956 
1957 
1958 // HistogramInfo class for recording a single "bar" of a histogram.  This
1959 // class is used for collecting statistics to print to the log file.
1960 class HistogramInfo : public NumberAndSizeInfo {
1961  public:
HistogramInfo()1962   HistogramInfo() : NumberAndSizeInfo() {}
1963 
name()1964   const char* name() { return name_; }
set_name(const char * name)1965   void set_name(const char* name) { name_ = name; }
1966 
1967  private:
1968   const char* name_;
1969 };
1970 
1971 
1972 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
1973 
1974 
1975 class SemiSpace;
1976 
1977 
1978 class NewSpacePage : public MemoryChunk {
1979  public:
1980   // GC related flags copied from from-space to to-space when
1981   // flipping semispaces.
1982   static const intptr_t kCopyOnFlipFlagsMask =
1983       (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1984       (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1985       (1 << MemoryChunk::SCAN_ON_SCAVENGE);
1986 
1987   static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
1988 
next_page()1989   inline NewSpacePage* next_page() const {
1990     return static_cast<NewSpacePage*>(next_chunk());
1991   }
1992 
set_next_page(NewSpacePage * page)1993   inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
1994 
prev_page()1995   inline NewSpacePage* prev_page() const {
1996     return static_cast<NewSpacePage*>(prev_chunk());
1997   }
1998 
set_prev_page(NewSpacePage * page)1999   inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2000 
semi_space()2001   SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2002 
is_anchor()2003   bool is_anchor() { return !this->InNewSpace(); }
2004 
IsAtStart(Address addr)2005   static bool IsAtStart(Address addr) {
2006     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2007            kObjectStartOffset;
2008   }
2009 
IsAtEnd(Address addr)2010   static bool IsAtEnd(Address addr) {
2011     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2012   }
2013 
address()2014   Address address() { return reinterpret_cast<Address>(this); }
2015 
2016   // Finds the NewSpacePage containing the given address.
FromAddress(Address address_in_page)2017   static inline NewSpacePage* FromAddress(Address address_in_page) {
2018     Address page_start =
2019         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2020                                   ~Page::kPageAlignmentMask);
2021     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2022     return page;
2023   }
2024 
2025   // Find the page for a limit address. A limit address is either an address
2026   // inside a page, or the address right after the last byte of a page.
FromLimit(Address address_limit)2027   static inline NewSpacePage* FromLimit(Address address_limit) {
2028     return NewSpacePage::FromAddress(address_limit - 1);
2029   }
2030 
2031   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)2032   static inline bool OnSamePage(Address address1, Address address2) {
2033     return NewSpacePage::FromAddress(address1) ==
2034            NewSpacePage::FromAddress(address2);
2035   }
2036 
2037  private:
2038   // Create a NewSpacePage object that is only used as anchor
2039   // for the doubly-linked list of real pages.
NewSpacePage(SemiSpace * owner)2040   explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2041 
2042   static NewSpacePage* Initialize(Heap* heap, Address start,
2043                                   SemiSpace* semi_space);
2044 
2045   // Intialize a fake NewSpacePage used as sentinel at the ends
2046   // of a doubly-linked list of real NewSpacePages.
2047   // Only uses the prev/next links, and sets flags to not be in new-space.
2048   void InitializeAsAnchor(SemiSpace* owner);
2049 
2050   friend class SemiSpace;
2051   friend class SemiSpaceIterator;
2052 };
2053 
2054 
2055 // -----------------------------------------------------------------------------
2056 // SemiSpace in young generation
2057 //
2058 // A semispace is a contiguous chunk of memory holding page-like memory
2059 // chunks. The mark-compact collector  uses the memory of the first page in
2060 // the from space as a marking stack when tracing live objects.
2061 
2062 class SemiSpace : public Space {
2063  public:
2064   // Constructor.
SemiSpace(Heap * heap,SemiSpaceId semispace)2065   SemiSpace(Heap* heap, SemiSpaceId semispace)
2066       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2067         start_(NULL),
2068         age_mark_(NULL),
2069         id_(semispace),
2070         anchor_(this),
2071         current_page_(NULL) {}
2072 
2073   // Sets up the semispace using the given chunk.
2074   void SetUp(Address start, int initial_capacity, int maximum_capacity);
2075 
2076   // Tear down the space.  Heap memory was not allocated by the space, so it
2077   // is not deallocated here.
2078   void TearDown();
2079 
2080   // True if the space has been set up but not torn down.
HasBeenSetUp()2081   bool HasBeenSetUp() { return start_ != NULL; }
2082 
2083   // Grow the semispace to the new capacity.  The new capacity
2084   // requested must be larger than the current capacity and less than
2085   // the maximum capacity.
2086   bool GrowTo(int new_capacity);
2087 
2088   // Shrinks the semispace to the new capacity.  The new capacity
2089   // requested must be more than the amount of used memory in the
2090   // semispace and less than the current capacity.
2091   bool ShrinkTo(int new_capacity);
2092 
2093   // Returns the start address of the first page of the space.
space_start()2094   Address space_start() {
2095     DCHECK(anchor_.next_page() != &anchor_);
2096     return anchor_.next_page()->area_start();
2097   }
2098 
2099   // Returns the start address of the current page of the space.
page_low()2100   Address page_low() { return current_page_->area_start(); }
2101 
2102   // Returns one past the end address of the space.
space_end()2103   Address space_end() { return anchor_.prev_page()->area_end(); }
2104 
2105   // Returns one past the end address of the current page of the space.
page_high()2106   Address page_high() { return current_page_->area_end(); }
2107 
AdvancePage()2108   bool AdvancePage() {
2109     NewSpacePage* next_page = current_page_->next_page();
2110     if (next_page == anchor()) return false;
2111     current_page_ = next_page;
2112     return true;
2113   }
2114 
2115   // Resets the space to using the first page.
2116   void Reset();
2117 
2118   // Age mark accessors.
age_mark()2119   Address age_mark() { return age_mark_; }
2120   void set_age_mark(Address mark);
2121 
2122   // True if the address is in the address range of this semispace (not
2123   // necessarily below the allocation pointer).
Contains(Address a)2124   bool Contains(Address a) {
2125     return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2126            reinterpret_cast<uintptr_t>(start_);
2127   }
2128 
2129   // True if the object is a heap object in the address range of this
2130   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)2131   bool Contains(Object* o) {
2132     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2133   }
2134 
2135   // If we don't have these here then SemiSpace will be abstract.  However
2136   // they should never be called.
Size()2137   virtual intptr_t Size() {
2138     UNREACHABLE();
2139     return 0;
2140   }
2141 
is_committed()2142   bool is_committed() { return committed_; }
2143   bool Commit();
2144   bool Uncommit();
2145 
first_page()2146   NewSpacePage* first_page() { return anchor_.next_page(); }
current_page()2147   NewSpacePage* current_page() { return current_page_; }
2148 
2149 #ifdef VERIFY_HEAP
2150   virtual void Verify();
2151 #endif
2152 
2153 #ifdef DEBUG
2154   virtual void Print();
2155   // Validate a range of of addresses in a SemiSpace.
2156   // The "from" address must be on a page prior to the "to" address,
2157   // in the linked page order, or it must be earlier on the same page.
2158   static void AssertValidRange(Address from, Address to);
2159 #else
2160   // Do nothing.
AssertValidRange(Address from,Address to)2161   inline static void AssertValidRange(Address from, Address to) {}
2162 #endif
2163 
2164   // Returns the current total capacity of the semispace.
TotalCapacity()2165   int TotalCapacity() { return total_capacity_; }
2166 
2167   // Returns the maximum total capacity of the semispace.
MaximumTotalCapacity()2168   int MaximumTotalCapacity() { return maximum_total_capacity_; }
2169 
2170   // Returns the initial capacity of the semispace.
InitialTotalCapacity()2171   int InitialTotalCapacity() { return initial_total_capacity_; }
2172 
id()2173   SemiSpaceId id() { return id_; }
2174 
2175   static void Swap(SemiSpace* from, SemiSpace* to);
2176 
2177   // Returns the maximum amount of memory ever committed by the semi space.
MaximumCommittedMemory()2178   size_t MaximumCommittedMemory() { return maximum_committed_; }
2179 
2180   // Approximate amount of physical memory committed for this space.
2181   size_t CommittedPhysicalMemory();
2182 
2183  private:
2184   // Flips the semispace between being from-space and to-space.
2185   // Copies the flags into the masked positions on all pages in the space.
2186   void FlipPages(intptr_t flags, intptr_t flag_mask);
2187 
2188   // Updates Capacity and MaximumCommitted based on new capacity.
2189   void SetCapacity(int new_capacity);
2190 
anchor()2191   NewSpacePage* anchor() { return &anchor_; }
2192 
2193   // The current and maximum total capacity of the space.
2194   int total_capacity_;
2195   int maximum_total_capacity_;
2196   int initial_total_capacity_;
2197 
2198   intptr_t maximum_committed_;
2199 
2200   // The start address of the space.
2201   Address start_;
2202   // Used to govern object promotion during mark-compact collection.
2203   Address age_mark_;
2204 
2205   // Masks and comparison values to test for containment in this semispace.
2206   uintptr_t address_mask_;
2207   uintptr_t object_mask_;
2208   uintptr_t object_expected_;
2209 
2210   bool committed_;
2211   SemiSpaceId id_;
2212 
2213   NewSpacePage anchor_;
2214   NewSpacePage* current_page_;
2215 
2216   friend class SemiSpaceIterator;
2217   friend class NewSpacePageIterator;
2218 
2219  public:
2220   TRACK_MEMORY("SemiSpace")
2221 };
2222 
2223 
2224 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2225 // semispace of the heap's new space.  It iterates over the objects in the
2226 // semispace from a given start address (defaulting to the bottom of the
2227 // semispace) to the top of the semispace.  New objects allocated after the
2228 // iterator is created are not iterated.
2229 class SemiSpaceIterator : public ObjectIterator {
2230  public:
2231   // Create an iterator over the objects in the given space.  If no start
2232   // address is given, the iterator starts from the bottom of the space.  If
2233   // no size function is given, the iterator calls Object::Size().
2234 
2235   // Iterate over all of allocated to-space.
2236   explicit SemiSpaceIterator(NewSpace* space);
2237   // Iterate over all of allocated to-space, with a custome size function.
2238   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2239   // Iterate over part of allocated to-space, from start to the end
2240   // of allocation.
2241   SemiSpaceIterator(NewSpace* space, Address start);
2242   // Iterate from one address to another in the same semi-space.
2243   SemiSpaceIterator(Address from, Address to);
2244 
Next()2245   HeapObject* Next() {
2246     if (current_ == limit_) return NULL;
2247     if (NewSpacePage::IsAtEnd(current_)) {
2248       NewSpacePage* page = NewSpacePage::FromLimit(current_);
2249       page = page->next_page();
2250       DCHECK(!page->is_anchor());
2251       current_ = page->area_start();
2252       if (current_ == limit_) return NULL;
2253     }
2254 
2255     HeapObject* object = HeapObject::FromAddress(current_);
2256     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2257 
2258     current_ += size;
2259     return object;
2260   }
2261 
2262   // Implementation of the ObjectIterator functions.
next_object()2263   virtual HeapObject* next_object() { return Next(); }
2264 
2265  private:
2266   void Initialize(Address start, Address end, HeapObjectCallback size_func);
2267 
2268   // The current iteration point.
2269   Address current_;
2270   // The end of iteration.
2271   Address limit_;
2272   // The callback function.
2273   HeapObjectCallback size_func_;
2274 };
2275 
2276 
2277 // -----------------------------------------------------------------------------
2278 // A PageIterator iterates the pages in a semi-space.
2279 class NewSpacePageIterator BASE_EMBEDDED {
2280  public:
2281   // Make an iterator that runs over all pages in to-space.
2282   explicit inline NewSpacePageIterator(NewSpace* space);
2283 
2284   // Make an iterator that runs over all pages in the given semispace,
2285   // even those not used in allocation.
2286   explicit inline NewSpacePageIterator(SemiSpace* space);
2287 
2288   // Make iterator that iterates from the page containing start
2289   // to the page that contains limit in the same semispace.
2290   inline NewSpacePageIterator(Address start, Address limit);
2291 
2292   inline bool has_next();
2293   inline NewSpacePage* next();
2294 
2295  private:
2296   NewSpacePage* prev_page_;  // Previous page returned.
2297   // Next page that will be returned.  Cached here so that we can use this
2298   // iterator for operations that deallocate pages.
2299   NewSpacePage* next_page_;
2300   // Last page returned.
2301   NewSpacePage* last_page_;
2302 };
2303 
2304 
2305 // -----------------------------------------------------------------------------
2306 // The young generation space.
2307 //
2308 // The new space consists of a contiguous pair of semispaces.  It simply
2309 // forwards most functions to the appropriate semispace.
2310 
2311 class NewSpace : public Space {
2312  public:
2313   // Constructor.
NewSpace(Heap * heap)2314   explicit NewSpace(Heap* heap)
2315       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2316         to_space_(heap, kToSpace),
2317         from_space_(heap, kFromSpace),
2318         reservation_(),
2319         inline_allocation_limit_step_(0) {}
2320 
2321   // Sets up the new space using the given chunk.
2322   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2323 
2324   // Tears down the space.  Heap memory was not allocated by the space, so it
2325   // is not deallocated here.
2326   void TearDown();
2327 
2328   // True if the space has been set up but not torn down.
HasBeenSetUp()2329   bool HasBeenSetUp() {
2330     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2331   }
2332 
2333   // Flip the pair of spaces.
2334   void Flip();
2335 
2336   // Grow the capacity of the semispaces.  Assumes that they are not at
2337   // their maximum capacity.
2338   void Grow();
2339 
2340   // Shrink the capacity of the semispaces.
2341   void Shrink();
2342 
2343   // True if the address or object lies in the address range of either
2344   // semispace (not necessarily below the allocation pointer).
Contains(Address a)2345   bool Contains(Address a) {
2346     return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2347            reinterpret_cast<uintptr_t>(start_);
2348   }
2349 
Contains(Object * o)2350   bool Contains(Object* o) {
2351     Address a = reinterpret_cast<Address>(o);
2352     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2353   }
2354 
2355   // Return the allocated bytes in the active semispace.
Size()2356   virtual intptr_t Size() {
2357     return pages_used_ * NewSpacePage::kAreaSize +
2358            static_cast<int>(top() - to_space_.page_low());
2359   }
2360 
2361   // The same, but returning an int.  We have to have the one that returns
2362   // intptr_t because it is inherited, but if we know we are dealing with the
2363   // new space, which can't get as big as the other spaces then this is useful:
SizeAsInt()2364   int SizeAsInt() { return static_cast<int>(Size()); }
2365 
2366   // Return the allocatable capacity of a semispace.
Capacity()2367   intptr_t Capacity() {
2368     SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2369     return (to_space_.TotalCapacity() / Page::kPageSize) *
2370            NewSpacePage::kAreaSize;
2371   }
2372 
2373   // Return the current size of a semispace, allocatable and non-allocatable
2374   // memory.
TotalCapacity()2375   intptr_t TotalCapacity() {
2376     DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2377     return to_space_.TotalCapacity();
2378   }
2379 
2380   // Return the total amount of memory committed for new space.
CommittedMemory()2381   intptr_t CommittedMemory() {
2382     if (from_space_.is_committed()) return 2 * Capacity();
2383     return TotalCapacity();
2384   }
2385 
2386   // Return the total amount of memory committed for new space.
MaximumCommittedMemory()2387   intptr_t MaximumCommittedMemory() {
2388     return to_space_.MaximumCommittedMemory() +
2389            from_space_.MaximumCommittedMemory();
2390   }
2391 
2392   // Approximate amount of physical memory committed for this space.
2393   size_t CommittedPhysicalMemory();
2394 
2395   // Return the available bytes without growing.
Available()2396   intptr_t Available() { return Capacity() - Size(); }
2397 
2398   // Return the maximum capacity of a semispace.
MaximumCapacity()2399   int MaximumCapacity() {
2400     DCHECK(to_space_.MaximumTotalCapacity() ==
2401            from_space_.MaximumTotalCapacity());
2402     return to_space_.MaximumTotalCapacity();
2403   }
2404 
IsAtMaximumCapacity()2405   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2406 
2407   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2408   int InitialTotalCapacity() {
2409     DCHECK(to_space_.InitialTotalCapacity() ==
2410            from_space_.InitialTotalCapacity());
2411     return to_space_.InitialTotalCapacity();
2412   }
2413 
2414   // Return the address of the allocation pointer in the active semispace.
top()2415   Address top() {
2416     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2417     return allocation_info_.top();
2418   }
2419 
set_top(Address top)2420   void set_top(Address top) {
2421     DCHECK(to_space_.current_page()->ContainsLimit(top));
2422     allocation_info_.set_top(top);
2423   }
2424 
2425   // Return the address of the allocation pointer limit in the active semispace.
limit()2426   Address limit() {
2427     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2428     return allocation_info_.limit();
2429   }
2430 
2431   // Return the address of the first object in the active semispace.
bottom()2432   Address bottom() { return to_space_.space_start(); }
2433 
2434   // Get the age mark of the inactive semispace.
age_mark()2435   Address age_mark() { return from_space_.age_mark(); }
2436   // Set the age mark in the active semispace.
set_age_mark(Address mark)2437   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2438 
2439   // The start address of the space and a bit mask. Anding an address in the
2440   // new space with the mask will result in the start address.
start()2441   Address start() { return start_; }
mask()2442   uintptr_t mask() { return address_mask_; }
2443 
INLINE(uint32_t AddressToMarkbitIndex (Address addr))2444   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2445     DCHECK(Contains(addr));
2446     DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
2447            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2448     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2449   }
2450 
INLINE(Address MarkbitIndexToAddress (uint32_t index))2451   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2452     return reinterpret_cast<Address>(index << kPointerSizeLog2);
2453   }
2454 
2455   // The allocation top and limit address.
allocation_top_address()2456   Address* allocation_top_address() { return allocation_info_.top_address(); }
2457 
2458   // The allocation limit address.
allocation_limit_address()2459   Address* allocation_limit_address() {
2460     return allocation_info_.limit_address();
2461   }
2462 
2463   MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
2464 
2465   // Reset the allocation pointer to the beginning of the active semispace.
2466   void ResetAllocationInfo();
2467 
2468   void UpdateInlineAllocationLimit(int size_in_bytes);
LowerInlineAllocationLimit(intptr_t step)2469   void LowerInlineAllocationLimit(intptr_t step) {
2470     inline_allocation_limit_step_ = step;
2471     UpdateInlineAllocationLimit(0);
2472     top_on_previous_step_ = allocation_info_.top();
2473   }
2474 
2475   // Get the extent of the inactive semispace (for use as a marking stack,
2476   // or to zap it). Notice: space-addresses are not necessarily on the
2477   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2478   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2479   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2480   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2481   Address FromSpaceEnd() { return from_space_.space_end(); }
2482 
2483   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2484   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2485   Address ToSpaceEnd() { return to_space_.space_end(); }
2486 
ToSpaceContains(Address address)2487   inline bool ToSpaceContains(Address address) {
2488     return to_space_.Contains(address);
2489   }
FromSpaceContains(Address address)2490   inline bool FromSpaceContains(Address address) {
2491     return from_space_.Contains(address);
2492   }
2493 
2494   // True if the object is a heap object in the address range of the
2495   // respective semispace (not necessarily below the allocation pointer of the
2496   // semispace).
ToSpaceContains(Object * o)2497   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)2498   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2499 
2500   // Try to switch the active semispace to a new, empty, page.
2501   // Returns false if this isn't possible or reasonable (i.e., there
2502   // are no pages, or the current page is already empty), or true
2503   // if successful.
2504   bool AddFreshPage();
2505 
2506 #ifdef VERIFY_HEAP
2507   // Verify the active semispace.
2508   virtual void Verify();
2509 #endif
2510 
2511 #ifdef DEBUG
2512   // Print the active semispace.
Print()2513   virtual void Print() { to_space_.Print(); }
2514 #endif
2515 
2516   // Iterates the active semispace to collect statistics.
2517   void CollectStatistics();
2518   // Reports previously collected statistics of the active semispace.
2519   void ReportStatistics();
2520   // Clears previously collected statistics.
2521   void ClearHistograms();
2522 
2523   // Record the allocation or promotion of a heap object.  Note that we don't
2524   // record every single allocation, but only those that happen in the
2525   // to space during a scavenge GC.
2526   void RecordAllocation(HeapObject* obj);
2527   void RecordPromotion(HeapObject* obj);
2528 
2529   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2530   bool CommitFromSpaceIfNeeded() {
2531     if (from_space_.is_committed()) return true;
2532     return from_space_.Commit();
2533   }
2534 
UncommitFromSpace()2535   bool UncommitFromSpace() {
2536     if (!from_space_.is_committed()) return true;
2537     return from_space_.Uncommit();
2538   }
2539 
inline_allocation_limit_step()2540   inline intptr_t inline_allocation_limit_step() {
2541     return inline_allocation_limit_step_;
2542   }
2543 
active_space()2544   SemiSpace* active_space() { return &to_space_; }
2545 
2546  private:
2547   // Update allocation info to match the current to-space page.
2548   void UpdateAllocationInfo();
2549 
2550   Address chunk_base_;
2551   uintptr_t chunk_size_;
2552 
2553   // The semispaces.
2554   SemiSpace to_space_;
2555   SemiSpace from_space_;
2556   base::VirtualMemory reservation_;
2557   int pages_used_;
2558 
2559   // Start address and bit mask for containment testing.
2560   Address start_;
2561   uintptr_t address_mask_;
2562   uintptr_t object_mask_;
2563   uintptr_t object_expected_;
2564 
2565   // Allocation pointer and limit for normal allocation and allocation during
2566   // mark-compact collection.
2567   AllocationInfo allocation_info_;
2568 
2569   // When incremental marking is active we will set allocation_info_.limit
2570   // to be lower than actual limit and then will gradually increase it
2571   // in steps to guarantee that we do incremental marking steps even
2572   // when all allocation is performed from inlined generated code.
2573   intptr_t inline_allocation_limit_step_;
2574 
2575   Address top_on_previous_step_;
2576 
2577   HistogramInfo* allocated_histogram_;
2578   HistogramInfo* promoted_histogram_;
2579 
2580   MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
2581 
2582   friend class SemiSpaceIterator;
2583 
2584  public:
2585   TRACK_MEMORY("NewSpace")
2586 };
2587 
2588 
2589 // -----------------------------------------------------------------------------
2590 // Old object space (excluding map objects)
2591 
2592 class OldSpace : public PagedSpace {
2593  public:
2594   // Creates an old space object with a given maximum capacity.
2595   // The constructor does not allocate pages from OS.
OldSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)2596   OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
2597            Executability executable)
2598       : PagedSpace(heap, max_capacity, id, executable) {}
2599 
2600  public:
2601   TRACK_MEMORY("OldSpace")
2602 };
2603 
2604 
2605 // For contiguous spaces, top should be in the space (or at the end) and limit
2606 // should be the end of the space.
2607 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2608   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
2609               (info).top() <= (space).page_high() &&  \
2610               (info).limit() <= (space).page_high())
2611 
2612 
2613 // -----------------------------------------------------------------------------
2614 // Old space for all map objects
2615 
2616 class MapSpace : public PagedSpace {
2617  public:
2618   // Creates a map space object with a maximum capacity.
MapSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2619   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2620       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2621         max_map_space_pages_(kMaxMapPageIndex - 1) {}
2622 
2623   // Given an index, returns the page address.
2624   // TODO(1600): this limit is artifical just to keep code compilable
2625   static const int kMaxMapPageIndex = 1 << 16;
2626 
RoundSizeDownToObjectAlignment(int size)2627   virtual int RoundSizeDownToObjectAlignment(int size) {
2628     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2629       return RoundDown(size, Map::kSize);
2630     } else {
2631       return (size / Map::kSize) * Map::kSize;
2632     }
2633   }
2634 
2635  protected:
2636   virtual void VerifyObject(HeapObject* obj);
2637 
2638  private:
2639   static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2640 
2641   // Do map space compaction if there is a page gap.
CompactionThreshold()2642   int CompactionThreshold() {
2643     return kMapsPerPage * (max_map_space_pages_ - 1);
2644   }
2645 
2646   const int max_map_space_pages_;
2647 
2648  public:
2649   TRACK_MEMORY("MapSpace")
2650 };
2651 
2652 
2653 // -----------------------------------------------------------------------------
2654 // Old space for simple property cell objects
2655 
2656 class CellSpace : public PagedSpace {
2657  public:
2658   // Creates a property cell space object with a maximum capacity.
CellSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2659   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2660       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
2661 
RoundSizeDownToObjectAlignment(int size)2662   virtual int RoundSizeDownToObjectAlignment(int size) {
2663     if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
2664       return RoundDown(size, Cell::kSize);
2665     } else {
2666       return (size / Cell::kSize) * Cell::kSize;
2667     }
2668   }
2669 
2670  protected:
2671   virtual void VerifyObject(HeapObject* obj);
2672 
2673  public:
2674   TRACK_MEMORY("CellSpace")
2675 };
2676 
2677 
2678 // -----------------------------------------------------------------------------
2679 // Old space for all global object property cell objects
2680 
2681 class PropertyCellSpace : public PagedSpace {
2682  public:
2683   // Creates a property cell space object with a maximum capacity.
PropertyCellSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2684   PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2685       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
2686 
RoundSizeDownToObjectAlignment(int size)2687   virtual int RoundSizeDownToObjectAlignment(int size) {
2688     if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
2689       return RoundDown(size, PropertyCell::kSize);
2690     } else {
2691       return (size / PropertyCell::kSize) * PropertyCell::kSize;
2692     }
2693   }
2694 
2695  protected:
2696   virtual void VerifyObject(HeapObject* obj);
2697 
2698  public:
2699   TRACK_MEMORY("PropertyCellSpace")
2700 };
2701 
2702 
2703 // -----------------------------------------------------------------------------
2704 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2705 // the large object space. A large object is allocated from OS heap with
2706 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2707 // A large object always starts at Page::kObjectStartOffset to a page.
2708 // Large objects do not move during garbage collections.
2709 
2710 class LargeObjectSpace : public Space {
2711  public:
2712   LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
~LargeObjectSpace()2713   virtual ~LargeObjectSpace() {}
2714 
2715   // Initializes internal data structures.
2716   bool SetUp();
2717 
2718   // Releases internal resources, frees objects in this space.
2719   void TearDown();
2720 
ObjectSizeFor(intptr_t chunk_size)2721   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2722     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2723     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2724   }
2725 
2726   // Shared implementation of AllocateRaw, AllocateRawCode and
2727   // AllocateRawFixedArray.
2728   MUST_USE_RESULT AllocationResult
2729       AllocateRaw(int object_size, Executability executable);
2730 
2731   // Available bytes for objects in this space.
2732   inline intptr_t Available();
2733 
Size()2734   virtual intptr_t Size() { return size_; }
2735 
SizeOfObjects()2736   virtual intptr_t SizeOfObjects() { return objects_size_; }
2737 
MaximumCommittedMemory()2738   intptr_t MaximumCommittedMemory() { return maximum_committed_; }
2739 
CommittedMemory()2740   intptr_t CommittedMemory() { return Size(); }
2741 
2742   // Approximate amount of physical memory committed for this space.
2743   size_t CommittedPhysicalMemory();
2744 
PageCount()2745   int PageCount() { return page_count_; }
2746 
2747   // Finds an object for a given address, returns a Smi if it is not found.
2748   // The function iterates through all objects in this space, may be slow.
2749   Object* FindObject(Address a);
2750 
2751   // Finds a large object page containing the given address, returns NULL
2752   // if such a page doesn't exist.
2753   LargePage* FindPage(Address a);
2754 
2755   // Frees unmarked objects.
2756   void FreeUnmarkedObjects();
2757 
2758   // Checks whether a heap object is in this space; O(1).
2759   bool Contains(HeapObject* obj);
2760 
2761   // Checks whether the space is empty.
IsEmpty()2762   bool IsEmpty() { return first_page_ == NULL; }
2763 
first_page()2764   LargePage* first_page() { return first_page_; }
2765 
2766 #ifdef VERIFY_HEAP
2767   virtual void Verify();
2768 #endif
2769 
2770 #ifdef DEBUG
2771   virtual void Print();
2772   void ReportStatistics();
2773   void CollectCodeStatistics();
2774 #endif
2775   // Checks whether an address is in the object area in this space.  It
2776   // iterates all objects in the space. May be slow.
SlowContains(Address addr)2777   bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2778 
2779  private:
2780   intptr_t max_capacity_;
2781   intptr_t maximum_committed_;
2782   // The head of the linked list of large object chunks.
2783   LargePage* first_page_;
2784   intptr_t size_;          // allocated bytes
2785   int page_count_;         // number of chunks
2786   intptr_t objects_size_;  // size of objects
2787   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2788   HashMap chunk_map_;
2789 
2790   friend class LargeObjectIterator;
2791 
2792  public:
2793   TRACK_MEMORY("LargeObjectSpace")
2794 };
2795 
2796 
2797 class LargeObjectIterator : public ObjectIterator {
2798  public:
2799   explicit LargeObjectIterator(LargeObjectSpace* space);
2800   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2801 
2802   HeapObject* Next();
2803 
2804   // implementation of ObjectIterator.
next_object()2805   virtual HeapObject* next_object() { return Next(); }
2806 
2807  private:
2808   LargePage* current_;
2809   HeapObjectCallback size_func_;
2810 };
2811 
2812 
2813 // Iterates over the chunks (pages and large object pages) that can contain
2814 // pointers to new space.
2815 class PointerChunkIterator BASE_EMBEDDED {
2816  public:
2817   inline explicit PointerChunkIterator(Heap* heap);
2818 
2819   // Return NULL when the iterator is done.
next()2820   MemoryChunk* next() {
2821     switch (state_) {
2822       case kOldPointerState: {
2823         if (old_pointer_iterator_.has_next()) {
2824           return old_pointer_iterator_.next();
2825         }
2826         state_ = kMapState;
2827         // Fall through.
2828       }
2829       case kMapState: {
2830         if (map_iterator_.has_next()) {
2831           return map_iterator_.next();
2832         }
2833         state_ = kLargeObjectState;
2834         // Fall through.
2835       }
2836       case kLargeObjectState: {
2837         HeapObject* heap_object;
2838         do {
2839           heap_object = lo_iterator_.Next();
2840           if (heap_object == NULL) {
2841             state_ = kFinishedState;
2842             return NULL;
2843           }
2844           // Fixed arrays are the only pointer-containing objects in large
2845           // object space.
2846         } while (!heap_object->IsFixedArray());
2847         MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2848         return answer;
2849       }
2850       case kFinishedState:
2851         return NULL;
2852       default:
2853         break;
2854     }
2855     UNREACHABLE();
2856     return NULL;
2857   }
2858 
2859 
2860  private:
2861   enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
2862   State state_;
2863   PageIterator old_pointer_iterator_;
2864   PageIterator map_iterator_;
2865   LargeObjectIterator lo_iterator_;
2866 };
2867 
2868 
2869 #ifdef DEBUG
2870 struct CommentStatistic {
2871   const char* comment;
2872   int size;
2873   int count;
ClearCommentStatistic2874   void Clear() {
2875     comment = NULL;
2876     size = 0;
2877     count = 0;
2878   }
2879   // Must be small, since an iteration is used for lookup.
2880   static const int kMaxComments = 64;
2881 };
2882 #endif
2883 }
2884 }  // namespace v8::internal
2885 
2886 #endif  // V8_HEAP_SPACES_H_
2887